repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
tafia/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/XMLHttpRequest/resources/upload.py
|
232
|
def main(request, response):
content = []
for key, values in sorted(item for item in request.POST.items() if not hasattr(item[1][0], "filename")):
content.append("%s=%s," % (key, values[0]))
content.append("\n")
for key, values in sorted(item for item in request.POST.items() if hasattr(item[1][0], "filename")):
value = values[0]
content.append("%s=%s:%s:%s," % (key,
value.filename,
value.headers["Content-Type"],
len(value.file.read())))
return "".join(content)
|
zhimin711/nova
|
refs/heads/master
|
nova/tests/unit/servicegroup/test_api.py
|
9
|
# Copyright 2015 Intel Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test the base class for the servicegroup API
"""
import mock
from nova import servicegroup
from nova import test
class ServiceGroupApiTestCase(test.NoDBTestCase):
def setUp(self):
super(ServiceGroupApiTestCase, self).setUp()
self.flags(servicegroup_driver='db')
self.servicegroup_api = servicegroup.API()
self.driver = self.servicegroup_api._driver
def test_join(self):
""""""
member = {'host': "fake-host", "topic": "compute"}
group = "group"
self.driver.join = mock.MagicMock(return_value=None)
result = self.servicegroup_api.join(member, group)
self.assertIsNone(result)
self.driver.join.assert_called_with(member, group, None)
def test_service_is_up(self):
""""""
member = {"host": "fake-host",
"topic": "compute",
"forced_down": False}
for retval in (True, False):
driver = self.servicegroup_api._driver
driver.is_up = mock.MagicMock(return_value=retval)
result = self.servicegroup_api.service_is_up(member)
self.assertIs(result, retval)
driver.is_up.assert_called_with(member)
member["forced_down"] = True
driver = self.servicegroup_api._driver
driver.is_up = mock.MagicMock()
result = self.servicegroup_api.service_is_up(member)
self.assertIs(result, False)
driver.is_up.assert_not_called()
|
johankaito/fufuka
|
refs/heads/master
|
microblog/old-flask/lib/python2.7/site-packages/setuptools/command/install_lib.py
|
396
|
import os
import imp
from itertools import product, starmap
import distutils.command.install_lib as orig
class install_lib(orig.install_lib):
"""Don't add compiled flags to filenames of non-Python files"""
def run(self):
self.build()
outfiles = self.install()
if outfiles is not None:
# always compile, in case we have any extension stubs to deal with
self.byte_compile(outfiles)
def get_exclusions(self):
"""
Return a collections.Sized collections.Container of paths to be
excluded for single_version_externally_managed installations.
"""
all_packages = (
pkg
for ns_pkg in self._get_SVEM_NSPs()
for pkg in self._all_packages(ns_pkg)
)
excl_specs = product(all_packages, self._gen_exclusion_paths())
return set(starmap(self._exclude_pkg_path, excl_specs))
def _exclude_pkg_path(self, pkg, exclusion_path):
"""
Given a package name and exclusion path within that package,
compute the full exclusion path.
"""
parts = pkg.split('.') + [exclusion_path]
return os.path.join(self.install_dir, *parts)
@staticmethod
def _all_packages(pkg_name):
"""
>>> list(install_lib._all_packages('foo.bar.baz'))
['foo.bar.baz', 'foo.bar', 'foo']
"""
while pkg_name:
yield pkg_name
pkg_name, sep, child = pkg_name.rpartition('.')
def _get_SVEM_NSPs(self):
"""
Get namespace packages (list) but only for
single_version_externally_managed installations and empty otherwise.
"""
# TODO: is it necessary to short-circuit here? i.e. what's the cost
# if get_finalized_command is called even when namespace_packages is
# False?
if not self.distribution.namespace_packages:
return []
install_cmd = self.get_finalized_command('install')
svem = install_cmd.single_version_externally_managed
return self.distribution.namespace_packages if svem else []
@staticmethod
def _gen_exclusion_paths():
"""
Generate file paths to be excluded for namespace packages (bytecode
cache files).
"""
# always exclude the package module itself
yield '__init__.py'
yield '__init__.pyc'
yield '__init__.pyo'
if not hasattr(imp, 'get_tag'):
return
base = os.path.join('__pycache__', '__init__.' + imp.get_tag())
yield base + '.pyc'
yield base + '.pyo'
def copy_tree(
self, infile, outfile,
preserve_mode=1, preserve_times=1, preserve_symlinks=0, level=1
):
assert preserve_mode and preserve_times and not preserve_symlinks
exclude = self.get_exclusions()
if not exclude:
return orig.install_lib.copy_tree(self, infile, outfile)
# Exclude namespace package __init__.py* files from the output
from setuptools.archive_util import unpack_directory
from distutils import log
outfiles = []
def pf(src, dst):
if dst in exclude:
log.warn("Skipping installation of %s (namespace package)",
dst)
return False
log.info("copying %s -> %s", src, os.path.dirname(dst))
outfiles.append(dst)
return dst
unpack_directory(infile, outfile, pf)
return outfiles
def get_outputs(self):
outputs = orig.install_lib.get_outputs(self)
exclude = self.get_exclusions()
if exclude:
return [f for f in outputs if f not in exclude]
return outputs
|
snegovick/dswarm_simulator
|
refs/heads/master
|
emath/emath.py
|
1
|
""" Convert from polar (r,w) to rectangular (x,y) x = r cos(w) y = r sin(w) """
def rect(r, w, deg=0): # radian if deg=0; degree if deg=1
from math import cos, sin, pi
if deg:
w = pi * w / 180.0
return r * cos(w), r * sin(w)
""" Convert from rectangular (x,y) to polar (r,w) r = sqrt(x^2 + y^2) w = arctan(y/x) = [-\pi,\pi] = [-180,180] """
def polar(x, y, deg=0): # radian if deg=0; degree if deg=1
from math import hypot, atan2, pi
if deg:
return hypot(x, y), 180.0 * atan2(y, x) / pi
return hypot(x, y), atan2(y, x)
""" cbrt(x) = x^{1/3}, if x >= 0 = -|x|^{1/3}, if x < 0 """
def cbrt(x):
from math import pow
if x >= 0:
return pow(x, 1.0/3.0)
return -pow(abs(x), 1.0/3.0)
""" x^2 + ax + b = 0 (or ax^2 + bx + c = 0) By substituting x = y-t and t = a/2, the equation reduces to y^2 + (b-t^2) = 0 which has easy solution y = +/- sqrt(t^2-b) """
def quadratic(a, b, c=None):
import math, cmath
if c: # (ax^2 + bx + c = 0)
a, b = b / float(a), c / float(a)
t = a / 2.0
r = t**2 - b
if r >= 0: # real roots
y1 = math.sqrt(r)
else: # complex roots
y1 = cmath.sqrt(r)
y2 = -y1
return y1 - t, y2 - t
""" x^3 + ax^2 + bx + c = 0 (or ax^3 + bx^2 + cx + d = 0) With substitution x = y-t and t = a/3, the cubic equation reduces to y^3 + py + q = 0, where p = b-3t^2 and q = c-bt+2t^3. Then, one real root y1 = u+v can be determined by solving w^2 + qw - (p/3)^3 = 0 where w = u^3, v^3. From Vieta's theorem, y1 + y2 + y3 = 0 y1 y2 + y1 y3 + y2 y3 = p y1 y2 y3 = -q, the other two (real or complex) roots can be obtained by solving y^2 + (y1)y + (p+y1^2) = 0 """
def cubic(a, b, c, d=None):
from math import cos
if d: # (ax^3 + bx^2 + cx + d = 0)
a, b, c = b / float(a), c / float(a), d / float(a)
t = a / 3.0
p, q = b - 3 * t**2, c - b * t + 2 * t**3
u, v = quadratic(q, -(p/3.0)**3)
if type(u) == type(0j): # complex cubic root
r, w = polar(u.real, u.imag)
y1 = 2 * cbrt(r) * cos(w / 3.0)
else: # real root
y1 = cbrt(u) + cbrt(v)
y2, y3 = quadratic(y1, p + y1**2)
return y1 - t, y2 - t, y3 - t
if __name__ == "__main__":
print cubic(1, 3, -6, -8)
|
ntt-sic/nova
|
refs/heads/master
|
nova/api/openstack/compute/contrib/hypervisors.py
|
11
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The hypervisors admin extension."""
import webob.exc
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova import exception
from nova.openstack.common.gettextutils import _
authorize = extensions.extension_authorizer('compute', 'hypervisors')
def make_hypervisor(elem, detail):
elem.set('hypervisor_hostname')
elem.set('id')
if detail:
elem.set('vcpus')
elem.set('memory_mb')
elem.set('local_gb')
elem.set('vcpus_used')
elem.set('memory_mb_used')
elem.set('local_gb_used')
elem.set('hypervisor_type')
elem.set('hypervisor_version')
elem.set('free_ram_mb')
elem.set('free_disk_gb')
elem.set('current_workload')
elem.set('running_vms')
elem.set('cpu_info')
elem.set('disk_available_least')
service = xmlutil.SubTemplateElement(elem, 'service',
selector='service')
service.set('id')
service.set('host')
class HypervisorIndexTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('hypervisors')
elem = xmlutil.SubTemplateElement(root, 'hypervisor',
selector='hypervisors')
make_hypervisor(elem, False)
return xmlutil.MasterTemplate(root, 1)
class HypervisorDetailTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('hypervisors')
elem = xmlutil.SubTemplateElement(root, 'hypervisor',
selector='hypervisors')
make_hypervisor(elem, True)
return xmlutil.MasterTemplate(root, 1)
class HypervisorTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('hypervisor', selector='hypervisor')
make_hypervisor(root, True)
return xmlutil.MasterTemplate(root, 1)
class HypervisorUptimeTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('hypervisor', selector='hypervisor')
make_hypervisor(root, False)
root.set('uptime')
return xmlutil.MasterTemplate(root, 1)
class HypervisorServersTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('hypervisors')
elem = xmlutil.SubTemplateElement(root, 'hypervisor',
selector='hypervisors')
make_hypervisor(elem, False)
servers = xmlutil.SubTemplateElement(elem, 'servers')
server = xmlutil.SubTemplateElement(servers, 'server',
selector='servers')
server.set('name')
server.set('uuid')
return xmlutil.MasterTemplate(root, 1)
class HypervisorStatisticsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('hypervisor_statistics',
selector='hypervisor_statistics')
root.set('count')
root.set('vcpus')
root.set('memory_mb')
root.set('local_gb')
root.set('vcpus_used')
root.set('memory_mb_used')
root.set('local_gb_used')
root.set('free_ram_mb')
root.set('free_disk_gb')
root.set('current_workload')
root.set('running_vms')
root.set('disk_available_least')
return xmlutil.MasterTemplate(root, 1)
class HypervisorsController(object):
"""The Hypervisors API controller for the OpenStack API."""
def __init__(self):
self.host_api = compute.HostAPI()
super(HypervisorsController, self).__init__()
def _view_hypervisor(self, hypervisor, detail, servers=None, **kwargs):
hyp_dict = {
'id': hypervisor['id'],
'hypervisor_hostname': hypervisor['hypervisor_hostname'],
}
if detail and not servers:
for field in ('vcpus', 'memory_mb', 'local_gb', 'vcpus_used',
'memory_mb_used', 'local_gb_used',
'hypervisor_type', 'hypervisor_version',
'free_ram_mb', 'free_disk_gb', 'current_workload',
'running_vms', 'cpu_info', 'disk_available_least'):
hyp_dict[field] = hypervisor[field]
hyp_dict['service'] = {
'id': hypervisor['service_id'],
'host': hypervisor['service']['host'],
}
if servers:
hyp_dict['servers'] = [dict(name=serv['name'], uuid=serv['uuid'])
for serv in servers]
# Add any additional info
if kwargs:
hyp_dict.update(kwargs)
return hyp_dict
@wsgi.serializers(xml=HypervisorIndexTemplate)
def index(self, req):
context = req.environ['nova.context']
authorize(context)
compute_nodes = self.host_api.compute_node_get_all(context)
return dict(hypervisors=[self._view_hypervisor(hyp, False)
for hyp in compute_nodes])
@wsgi.serializers(xml=HypervisorDetailTemplate)
def detail(self, req):
context = req.environ['nova.context']
authorize(context)
compute_nodes = self.host_api.compute_node_get_all(context)
return dict(hypervisors=[self._view_hypervisor(hyp, True)
for hyp in compute_nodes])
@wsgi.serializers(xml=HypervisorTemplate)
def show(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
hyp = self.host_api.compute_node_get(context, id)
except (ValueError, exception.ComputeHostNotFound):
msg = _("Hypervisor with ID '%s' could not be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
return dict(hypervisor=self._view_hypervisor(hyp, True))
@wsgi.serializers(xml=HypervisorUptimeTemplate)
def uptime(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
hyp = self.host_api.compute_node_get(context, id)
except (ValueError, exception.ComputeHostNotFound):
msg = _("Hypervisor with ID '%s' could not be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
# Get the uptime
try:
host = hyp['service']['host']
uptime = self.host_api.get_host_uptime(context, host)
except NotImplementedError:
msg = _("Virt driver does not implement uptime function.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
return dict(hypervisor=self._view_hypervisor(hyp, False,
uptime=uptime))
@wsgi.serializers(xml=HypervisorIndexTemplate)
def search(self, req, id):
context = req.environ['nova.context']
authorize(context)
hypervisors = self.host_api.compute_node_search_by_hypervisor(
context, id)
if hypervisors:
return dict(hypervisors=[self._view_hypervisor(hyp, False)
for hyp in hypervisors])
else:
msg = _("No hypervisor matching '%s' could be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
@wsgi.serializers(xml=HypervisorServersTemplate)
def servers(self, req, id):
context = req.environ['nova.context']
authorize(context)
compute_nodes = self.host_api.compute_node_search_by_hypervisor(
context, id)
if not compute_nodes:
msg = _("No hypervisor matching '%s' could be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
hypervisors = []
for compute_node in compute_nodes:
instances = self.host_api.instance_get_all_by_host(context,
compute_node['service']['host'])
hyp = self._view_hypervisor(compute_node, False, instances)
hypervisors.append(hyp)
return dict(hypervisors=hypervisors)
@wsgi.serializers(xml=HypervisorStatisticsTemplate)
def statistics(self, req):
context = req.environ['nova.context']
authorize(context)
stats = self.host_api.compute_node_statistics(context)
return dict(hypervisor_statistics=stats)
class Hypervisors(extensions.ExtensionDescriptor):
"""Admin-only hypervisor administration."""
name = "Hypervisors"
alias = "os-hypervisors"
namespace = "http://docs.openstack.org/compute/ext/hypervisors/api/v1.1"
updated = "2012-06-21T00:00:00+00:00"
def get_resources(self):
resources = [extensions.ResourceExtension('os-hypervisors',
HypervisorsController(),
collection_actions={'detail': 'GET',
'statistics': 'GET'},
member_actions={'uptime': 'GET',
'search': 'GET',
'servers': 'GET'})]
return resources
|
barzan/dbseer
|
refs/heads/master
|
rs-sysmon2/plugins/dstat_sendmail.py
|
8
|
### Author: Dag Wieers <dag@wieers.com>
### FIXME: Should read /var/log/mail/statistics or /etc/mail/statistics (format ?)
class dstat_plugin(dstat):
def __init__(self):
self.name = 'sendmail'
self.vars = ('queue',)
self.type = 'd'
self.width = 4
self.scale = 100
def check(self):
if not os.access('/var/spool/mqueue', os.R_OK):
raise Exception, 'Cannot access sendmail queue'
def extract(self):
self.val['queue'] = len(glob.glob('/var/spool/mqueue/qf*'))
# vim:ts=4:sw=4:et
|
jungla/ICOM-fluidity-toolbox
|
refs/heads/master
|
Detectors/offline_advection/plot_traj_23D.py
|
1
|
#!~/python
import fluidity_tools
import matplotlib as mpl
mpl.use('ps')
import matplotlib.pyplot as plt
import myfun
import numpy as np
import os
import fio
import lagrangian_stats
import advect_functions
from matplotlib.patches import Ellipse
exp = 'm_25_2'
filename2D = 'traj_m_25_2_512_0_500_2D.csv'
filename3D = 'traj_m_25_2_512_0_500_3D.csv'
tt = 500 # IC + 24-48 included
#tt = 230 # IC + 24-48 included
x0 = range(3000,4010,10)
y0 = range(2000,3010,10)
#z0 = range(1,20,4)
z0 = [0,5,10,15]
xp = len(x0)
yp = len(y0)
zp = len(z0)
pt = xp*yp*zp
#time2D, par2D = advect_functions.read_particles_csv(filename2D,pt,tt)
#par2D = lagrangian_stats.periodicCoords(par2D,10000,4000)
#time3D, par3D = advect_functions.read_particles_csv(filename3D,pt,tt)
#par3D = lagrangian_stats.periodicCoords(par3D,10000,4000)
#
#time2D = (time2D)*1440
#time3D = (time3D)*1440
time0 = time2D
# horizontal
#depths = [1, 5, 11, 17, 26]
depths = [5, 10, 15]
depthsid = [1, 2, 3]
#depths = [1] #, 17, 1]
for z in range(len(depths)):
print 'depth', z
par2Dz = np.reshape(par2D,(xp,yp,zp,3,tt))
par2Dzr = par2Dz[:,:,depthsid[z],:,:]
par2Dz = np.reshape(par2Dzr,(xp*yp,3,tt))
par3Dz = np.reshape(par3D,(xp,yp,zp,3,tt))
par3Dzr = par3Dz[:,:,depthsid[z],:,:]
par3Dz = np.reshape(par3Dzr,(xp*yp,3,tt))
#
for t in range(70,75,5):
print 'time', time0[t]/24
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111, aspect='equal')
#
plt.plot((3,3),(2,3),'k')
plt.plot((3,4),(2,2),'k')
plt.plot((4,4),(2,3),'k')
plt.plot((3,4),(3,3),'k')
plt.plot((0,0),(0,4),'k')
plt.plot((0,10),(0,0),'k')
plt.plot((10,10),(0,4),'k')
plt.plot((0,10),(4,4),'k')
#
s3D = plt.scatter(par3Dz[:,0,t]/1000, par3Dz[:,1,t]/1000, marker='.', s=35, facecolor='r', lw = 0)
s2D = plt.scatter(par2Dz[:,0,t]/1000, par2Dz[:,1,t]/1000, marker='.', s=35, facecolor='b', lw = 0)
#
plt.legend((s3D,s2D),('3D','2D'))
print 'Saving 0 to eps'
xt3 = par3Dz[:,0,t] - np.mean(par3Dz[:,0,t])
yt3 = par3Dz[:,1,t] - np.mean(par3Dz[:,1,t])
xt2 = par2Dz[:,0,t] - np.mean(par2Dz[:,0,t])
yt2 = par2Dz[:,1,t] - np.mean(par2Dz[:,1,t])
#
cov3 = np.cov(xt3/1000, yt3/1000)
lambda_3, v = np.linalg.eig(cov3)
lambda_3 = np.sqrt(lambda_3)
theta3 = np.rad2deg(0.5*np.arctan2(2*cov3[1,0],(cov3[0,0]-cov3[1,1])))
theta3 = np.rad2deg(np.arcsin(v[0, 0]))
#
cov2 = np.cov(xt2/1000, yt2/1000)
lambda_2, v = np.linalg.eig(cov2)
lambda_2 = np.sqrt(lambda_2)
theta2 = np.rad2deg(0.5*np.arctan2(2*cov2[1,0],(cov2[0,0]-cov2[1,1]))) + np.pi*0.5
theta2 = np.rad2deg(np.arcsin(v[0, 0]))
#
e0 = Ellipse(xy=(np.mean(par3Dz[:,0,t])/1000,np.mean(par3Dz[:,1,t])/1000),width=4*lambda_3[1],height=4*lambda_3[0],angle=theta3)
e1 = Ellipse(xy=(np.mean(par2Dz[:,0,t])/1000,np.mean(par2Dz[:,1,t])/1000),width=4*lambda_2[1],height=4*lambda_2[0],angle=theta2)
ax.add_artist(e0)
e0.set_facecolor('none')
e0.set_edgecolor('k')
e0.set_linewidth(2.5)
ax.add_artist(e1)
e1.set_facecolor('none')
e1.set_edgecolor('k')
e1.set_linewidth(2.5)
e1.set_linestyle('dashed')
plt.xlim([-1, 11])
plt.ylim([-5, 5])
plt.xlabel('X [km]',fontsize=18)
plt.ylabel('Y [km]',fontsize=18)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
#
# ax.text(0, 7, str(depths[z])+'m, '+str(time0[t]/3600)+'h', fontsize=18)
plt.xlabel('X [km]', fontsize=18)
plt.ylabel('Y [km]', fontsize=18)
plt.savefig('./plot/'+exp+'/traj_'+exp+'_z'+str(depths[z])+'_'+str(time0[t])+'_h.eps')
print './plot/'+exp+'/traj_'+exp+'_z'+str(depths[z])+'_'+str(time0[t])+'_h.eps'
plt.close()
# plot ellipse
# vertical
fig = plt.figure(figsize=(8,8))
plt.plot((3,3),(0,-50),'k')
plt.plot((4,4),(0,-50),'k')
#
s2D = plt.scatter(par2Dz[:,0,t]/1000, par2Dz[:,2,t], marker='.', s=35, facecolor='b', lw = 0)
s3D = plt.scatter(par3Dz[:,0,t]/1000, par3Dz[:,2,t], marker='.', s=35, facecolor='r', lw = 0)
plt.legend((s3D,s2D),('3D','2D'))
#
plt.xlim([-1, 11])
plt.ylim([-50, 0])
#
print 'Saving 0 to eps'
#
# plt.text(10, -40, str(depths[z])+'m, '+str(time0[t]/3600)+'h', fontsize=18)
plt.xlabel('X [km]', fontsize=18)
plt.ylabel('Z [m]', fontsize=18)
plt.savefig('./plot/'+exp+'/traj_'+exp+'_z'+str(depths[z])+'_'+str(time0[t])+'_v.eps')
print './plot/'+exp+'/traj_'+exp+'_z'+str(depths[z])+'_'+str(time0[t])+'_v.eps'
plt.close()
|
liorsion/django-avatar
|
refs/heads/master
|
tests/settings.py
|
71
|
from django.conf.urls.defaults import patterns, include, handler500, handler404
DEFAULT_CHARSET = 'utf-8'
DATABASE_ENGINE = 'sqlite3'
DATABASE_NAME = ':memory:'
ROOT_URLCONF = 'settings'
STATIC_URL = '/site_media/static/'
SITE_ID = 1
INSTALLED_APPS = (
'django.contrib.sessions',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sites',
'django.contrib.comments',
'avatar',
)
TEMPLATE_LOADERS = (
'django.template.loaders.app_directories.load_template_source',
)
AVATAR_ALLOWED_FILE_EXTS = ('.jpg', '.png')
AVATAR_MAX_SIZE = 1024 * 1024
AVATAR_MAX_AVATARS_PER_USER = 20
urlpatterns = patterns('',
(r'^avatar/', include('avatar.urls')),
)
def __exported_functionality__():
return (handler500, handler404)
|
hurricup/intellij-community
|
refs/heads/master
|
python/testData/inspections/PyTypeCheckerInspection/MapReturnElementType.py
|
48
|
def test():
xs = map(lambda x: x + 1, [1, 2, 3])
print('foo' + xs[0]) # Can be a str since map returns list[V] | str | unicode
ys = map(tuple, iter([1, 2, 3]))
print(1 + <warning descr="Expected type 'Number', got 'Union[tuple, str, unicode]' instead">ys[0]</warning>, 'bar' + ys[1])
|
ffurano/root5
|
refs/heads/v5-34-00-patches
|
tutorials/pyroot/parse_CSV_file_with_TTree_ReadStream.py
|
28
|
#!/usr/bin/env python
import ROOT
import sys
import os
def parse_CSV_file_with_TTree_ReadStream(tree_name, afile):
"""
parse_CSV_file_with_TTree_ReadStream
Michael Marino: mmarino@gmail.com
This function provides an example of how one might
massage a csv data file to read into a ROOT TTree
via TTree::ReadStream. This could be useful if the
data read out from some DAQ program doesn't 'quite'
match the formatting expected by ROOT (e.g. comma-
separated, tab-separated with white-space strings,
headers not matching the expected format, etc.)
This example is shipped with a data
file that looks like:
Date/Time Synchro Capacity Temp.Cold Head Temp. Electrode HV Supply Voltage Electrode 1 Electrode 2 Electrode 3 Electrode 4
# Example data to read out. Some data have oddities that might need to
# dealt with, including the 'NaN' in Electrode 4 and the empty string in Date/Time (last row)
08112010.160622 7 5.719000E-10 8.790500 24.237700 -0.008332 0 0 0 0
8112010.160626 7 5.710000E-10 8.828400 24.237500 -0.008818 0 0 0 0
08112010.160626 7 5.719000E-10 8.828400 24.237500 -0.008818 0 0 0 0
08112010.160627 7 5.719000E-10 9.014300 24.237400 -0.028564 0 0 0 NaN
08112010.160627 7 5.711000E-10 8.786000 24.237400 -0.008818 0 0 0 0
08112010.160628 7 5.702000E-10 8.786000 24.237400 -0.009141 0 0 0 0
08112010.160633 7 5.710000E-10 9.016200 24.237200 -0.008818 0 0 0 0
7 5.710000E-10 8.903400 24.237200 -0.008818 0 0 0 0
These data require some massaging, including:
- Date/Time has a blank ('') entry that must be handled
- The headers are not in the correct format
- Tab-separated entries with additional white space
- NaN entries
"""
ROOT.gROOT.SetBatch()
# The mapping dictionary defines the proper branch names and types given a header name.
header_mapping_dictionary = {
'Date/Time' : ('Datetime' , str) ,
'Synchro' : ('Synchro' , int) ,
'Capacity' : ('Capacitance' , float) ,
'Temp.Cold Head' : ('TempColdHead' , float) ,
'Temp. Electrode' : ('TempElectrode' , float) ,
'HV Supply Voltage' : ('HVSupplyVoltage', float) ,
'Electrode 1' : ('Electrode1' , int) ,
'Electrode 2' : ('Electrode2' , int) ,
'Electrode 3' : ('Electrode3' , int) ,
'Electrode 4' : ('Electrode4' , int) ,
}
type_mapping_dictionary = {
str : 'C',
int : 'I',
float : 'F'
}
# Grab the header row of the file. In this particular example,
# the data are separated using tabs, but some of the header names
# include spaces and are not generally in the ROOT expected format, e.g.
#
# FloatData/F:StringData/C:IntData/I
#
# etc. Therefore, we grab the header_row of the file, and use
# a python dictionary to set up the appropriate branch descriptor
# line.
# Open a file, grab the first line, strip the new lines
# and split it into a list along 'tab' boundaries
header_row = open(afile).readline().strip().split('\t')
# Create the branch descriptor
branch_descriptor = ':'.join([header_mapping_dictionary[row][0]+'/'+
type_mapping_dictionary[header_mapping_dictionary[row][1]]
for row in header_row])
#print branch_descriptor
# Handling the input and output names. Using the same
# base name for the ROOT output file.
output_ROOT_file_name = os.path.splitext(afile)[0] + '.root'
output_file = ROOT.TFile(output_ROOT_file_name, 'recreate')
print "Outputting %s -> %s" % (afile, output_ROOT_file_name)
output_tree = ROOT.TTree(tree_name, tree_name)
file_lines = open(afile).readlines()
# Clean the data entries: remove the first (header) row.
# Ensure empty strings are tagged as such since
# ROOT doesn't differentiate between different types
# of white space. Therefore, we change all of these
# entries to 'empty'. Also, avoiding any lines that begin
# with '#'
file_lines = ['\t'.join([val if (val.find(' ') == -1 and val != '')
else 'empty' for val in line.split('\t')])
for line in file_lines[1:] if line[0] != '#' ]
# Removing NaN, setting these entries to 0.0.
# Also joining the list of strings into one large string.
file_as_string = ('\n'.join(file_lines)).replace('NaN', str(0.0))
#print file_as_string
# creating an istringstream to pass into ReadStream
istring = ROOT.istringstream(file_as_string)
# Now read the stream
output_tree.ReadStream(istring, branch_descriptor)
output_file.cd()
output_tree.Write()
if __name__ == '__main__':
if len(sys.argv) < 2:
print "Usage: %s file_to_parse.dat" % sys.argv[0]
sys.exit(1)
parse_CSV_file_with_TTree_ReadStream("example_tree", sys.argv[1])
|
krafczyk/spack
|
refs/heads/develop
|
var/spack/repos/builtin/packages/r-amap/package.py
|
2
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RAmap(RPackage):
"""Tools for Clustering and Principal Component Analysis
(With robust methods, and parallelized functions)."""
homepage = "http://mulcyber.toulouse.inra.fr/projects/amap/"
url = "https://cran.rstudio.com/src/contrib/amap_0.8-16.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/amap/"
version('0.8-16', sha256='d3775ad7f660581f7d2f070e426be95ae0d6743622943e6f5491988e5217d4e2')
depends_on('r@2.10.0:', type=('build', 'run'))
|
RichDijk/eXe
|
refs/heads/master
|
formless/webform.py
|
10
|
# -*- test-case-name: formless.test.test_freeform -*-
# Copyright (c) 2004 Divmod.
# See LICENSE for details.
from __future__ import generators
import os
import os.path
import warnings
from nevow import inevow
from nevow.stan import slot, specialMatches
from nevow.tags import *
from nevow import util
from nevow import compy
from nevow.context import NodeNotFound
import formless
from formless import iformless
from formless.formutils import enumerate, FormDefaults, FormErrors, calculatePostURL, keyToXMLID, getError
def _locateDefaultCSS():
"""Calculate and return the full path to the default freeform CSS.
"""
dirname = os.path.abspath(os.path.dirname(__file__))
return os.path.join(dirname, 'freeform-default.css')
try:
from nevow.static import File
except ImportError:
class File(object):
__implements__ = inevow.IResource
def __init__(self, path, content_type='text/plain'):
self.path = path
self.content_type = content_type
def locateChild(self, *args):
from nevow import rend
return rend.NotFound
def renderHTTP(self, ctx):
inevow.IRequest(ctx).setHeader('Content-type', self.content_type)
return open(self.path).read()
defaultCSS = File(_locateDefaultCSS(), 'text/css')
class DefaultRenderer(object):
__implements__ = inevow.IRenderer, iformless.ITypedRenderer
complexType = False
def rend(self, context, data):
return StringRenderer(data)
defaultBindingRenderer = DefaultRenderer()
class BaseInputRenderer(compy.Adapter):
__implements__ = inevow.IRenderer, iformless.ITypedRenderer
complexType = False
def rend(self, context, data):
defaults = context.locate(iformless.IFormDefaults)
value = defaults.getDefault(context.key, context)
context.remember(data.typedValue, iformless.ITyped)
if data.typedValue.getAttribute('immutable'):
inp = span(id=keyToXMLID(context.key))[value]
else:
##value may be a deferred; make sure to wait on it properly before calling self.input
## TODO: If flattening this results in an empty string, return an empty string
inp = invisible(
render=lambda c, value: self.input( context, invisible(), data, data.name, value ),
data=value)
if data.typedValue.getAttribute('hidden') or data.typedValue.getAttribute('compact'):
return inp
context.fillSlots( 'label', data.label )
context.fillSlots( 'name', data.name )
context.fillSlots( 'input', inp )
context.fillSlots( 'error', getError(context) )
context.fillSlots( 'description', data.description )
context.fillSlots( 'id', keyToXMLID(context.key) )
context.fillSlots( 'value', value )
return context.tag
def input(self, context, slot, data, name, value):
raise NotImplementedError, "Implement in subclass"
class PasswordRenderer(BaseInputRenderer):
def input(self, context, slot, data, name, value):
return [
input(id=keyToXMLID(context.key), name=name, type="password", _class="freeform-input-password"),
" Again ",
input(name="%s____2" % name, type="password", _class="freeform-input-password"),
]
class PasswordEntryRenderer(BaseInputRenderer):
def input(self, context, slot, data, name, value):
return slot[
input(id=keyToXMLID(context.key), type='password', name=name,
_class='freeform-input-password')]
class StringRenderer(BaseInputRenderer):
def input(self, context, slot, data, name, value):
if data.typedValue.getAttribute('hidden'):
T="hidden"
else:
T="text"
return slot[
input(id=keyToXMLID(context.key), type=T, name=name, value=value,
_class='freeform-input-%s' % T)]
class TextRenderer(BaseInputRenderer):
def input(self, context, slot, data, name, value):
return slot[
textarea(id=keyToXMLID(context.key), name=name, _class="freeform-textarea", rows=8, cols=40)[
value or '']]
class BooleanRenderer(BaseInputRenderer):
def input(self, context, slot, data, name, value):
## The only difference here is the "checked" attribute; the value is still the same because
## we want true to be passed to the server when the checkbox is checked and the form
## is posted.
node = input(id=keyToXMLID(context.key), type="checkbox", name=name, value='True', _class="freeform-input-checkbox")
if value:
node(checked="checked")
# HTML forms are so weak. If the checkbox is not checked, no value at all will be
# in request.args with the name data.name. So let's force the value False to always
# be in request.args[data.name]. If the checkbox is checked, the value True will
# be first, and we will find that.
return slot[node, input(type="hidden", name=name, value="False")]
class FileUploadRenderer(BaseInputRenderer):
def input(self, context, slot, data, name, value):
return slot[input(id=keyToXMLID(context.key), type="file", name=name,
_class='freeform-input-file')]
class ICurrentlySelectedValue(compy.Interface):
"""The currently-selected-value for the ITypedRenderer being rendered.
"""
csv = ICurrentlySelectedValue
def valToKey(c, d):
return iformless.ITyped(c).valueToKey(d)
def isSelected(c, d):
if csv(c) == valToKey(c, d):
return c.tag(selected='selected')
return c.tag
class ChoiceRenderer(BaseInputRenderer):
default_select = select(id=slot('id'), name=slot('name'), render=directive('sequence'))[
option(pattern="item",
value=valToKey,
render=isSelected)[
lambda c, d: iformless.ITyped(c).stringify(d)]]
def input(self, context, slot, data, name, value):
tv = data.typedValue
choices = tv.choices
if value:
context.remember(value, csv)
else:
context.remember('', csv)
try:
selector = context.tag.patternGenerator( 'selector' )
except NodeNotFound:
selector = self.default_select
return selector(data=choices)
class RadioRenderer(ChoiceRenderer):
default_select = span(id=slot('id'), render=directive('sequence'))[
div(pattern="item", _class="freeform-radio-option")[
input(type="radio", name=slot('name'), value=valToKey, render=isSelected)[
lambda c, d: iformless.ITyped(c).stringify(d)]]]
class ObjectRenderer(compy.Adapter):
__implements__ = inevow.IRenderer, iformless.ITypedRenderer
complexType = True
def rend(self, context, data):
configurable = context.locate(iformless.IConfigurable)
return getattr(configurable, data.name)
class NullRenderer(compy.Adapter):
"""Use a NullRenderer as the ITypedRenderer adapter when nothing should
be included in the output.
"""
__implements__ = inevow.IRenderer, iformless.ITypedRenderer
def rend(self, context, data):
return ''
class GroupBindingRenderer(compy.Adapter):
__implements__ = inevow.IRenderer,
def rend(self, context, data):
context.remember(data, iformless.IBinding)
from formless import configurable as conf
configurable = conf.GroupConfigurable(data.boundTo, data.typedValue.interface)
context.remember(configurable, iformless.IConfigurable)
bindingNames = configurable.getBindingNames(context)
def generateBindings():
for name in bindingNames:
bnd = configurable.getBinding(context, name)
renderer = iformless.IBindingRenderer(bnd, defaultBindingRenderer, persist=False)
renderer.isGrouped = True
renderer.needsSkin = True
yield invisible(
data=bnd,
render=renderer,
key=name)
return getError(context), form(
id=keyToXMLID(context.key),
enctype="multipart/form-data",
action=calculatePostURL(context, data),
method="post",
**{'accept-charset':'utf-8'})[
fieldset[
legend(_class="freeform-form-label")[data.label],
input(type='hidden', name='_charset_'),
generateBindings(),
input(type="submit")]]
class BaseBindingRenderer(compy.Adapter):
__implements__ = inevow.IRenderer,
isGrouped = False
needsSkin = False
def calculateDefaultSkin(self, context):
if self.isGrouped:
frm = invisible
butt = ''
fld = invisible
else:
frm = form(
id=slot('form-id'),
action=slot('form-action'),
method="post",
enctype="multipart/form-data",
**{'accept-charset':'utf-8'}
)
butt = slot('form-button')
fld = fieldset[input(type='hidden', name='_charset_')]
## Provide default skin since no skin was provided for us.
context.tag.clear()[
frm[fld[legend(_class="freeform-form-label")[ slot('form-label') ],
div(_class="freeform-form-description")[slot('form-description')],
div(_class="freeform-form-error")[ slot('form-error') ],
slot('form-arguments'), butt ]]]
def fillForm(self, context, data):
context.fillSlots( 'form-id', keyToXMLID(context.key) )
context.fillSlots( 'form-action', calculatePostURL(context, data) )
context.fillSlots( 'form-name', data.name )
context.fillSlots( 'form-error', getError(context) )
class PropertyBindingRenderer(BaseBindingRenderer):
def rend(self, context, data):
context.remember(data, iformless.IBinding)
context.remember(data.typedValue, iformless.ITyped)
typedRenderer = iformless.ITypedRenderer(data.typedValue, defaultBindingRenderer, persist=False)
if typedRenderer.complexType:
return invisible(data=data, render=typedRenderer)
if self.needsSkin or not context.tag.children:
self.calculateDefaultSkin(context)
if self.isGrouped or data.typedValue.getAttribute('immutable'):
subm = ''
else:
subm = input(type="submit", name="change", value="Change")
self.fillForm(context, data)
context.fillSlots( 'form-label', '' )
context.fillSlots( 'form-description', '' )
try:
content_pattern = context.tag.patternGenerator( 'binding' )
except NodeNotFound:
content_pattern = freeformDefaultContentPattern
context.fillSlots(
'form-arguments',
content_pattern(
data=data, render=typedRenderer, key=data.name))
context.fillSlots('form-button', subm)
return context.tag
freeformDefaultContentPattern = invisible[
label(_class="freeform-label", _for=slot('id'))[ slot('label') ],
span(_class="freeform-input")[ slot('input') ],
div(_class="freeform-error")[ slot('error') ],
div(_class="freeform-description")[label(_for=slot('id'))[ slot('description') ]]].freeze()
class MethodBindingRenderer(BaseBindingRenderer):
def rend(self, context, data):
if data.getAttribute('invisible'):
return ''
context.remember(data, iformless.IBinding)
if self.needsSkin or not context.tag.children:
self.calculateDefaultSkin(context)
self.fillForm(context, data)
context.fillSlots( 'form-label', data.label )
context.fillSlots( 'form-description', data.description )
context.fillSlots( 'form-arguments', list(self.generateArguments(context, data.getArgs())))
if not self.isGrouped:
try:
button_pattern = context.tag.onePattern( 'form-button' )
except NodeNotFound:
button_pattern = invisible[ slot('input') ]
button_pattern.fillSlots( 'input', input(type='submit', value=data.action or data.label) )
context.fillSlots( 'form-button', button_pattern )
return context.tag(key=None)
def generateArguments(self, context, args):
default_content_pattern = None
content_pattern = None
for argument in args:
try:
content_pattern = context.tag.patternGenerator( 'argument!!%s' % argument.name )
except NodeNotFound:
if default_content_pattern is None:
try:
default_content_pattern = context.tag.patternGenerator( 'argument' )
except NodeNotFound:
default_content_pattern = freeformDefaultContentPattern
content_pattern = default_content_pattern
renderer = iformless.ITypedRenderer(
argument.typedValue, defaultBindingRenderer, persist=False)
pat = content_pattern(
key=argument.name,
data=argument,
render=renderer,
remember={iformless.ITyped: argument.typedValue})
context.fillSlots( 'argument!!%s' % argument.name, pat )
yield pat
class ButtonRenderer(compy.Adapter):
__implements__ = inevow.IRenderer,
def rend(self, context, data):
return input(type='submit', value=data.label)
freeformDefaultForm = div(_class="freeform-form").freeze()
def renderForms(configurableKey='', bindingNames=None, bindingDefaults=None):
"""Render forms for either the named configurable, or, if no configurableKey is given,
the main configurable. If no bindingNames are given, forms will be
rendered for all bindings described by the configurable.
@param configurableKey: The name of the configurable to render. The empty
string indicates ctx.locate(IRenderer).
@param bindingNames: The names of the bindings to render. None indicates
all bindings.
@param bindingDefaults: A dict mapping bindingName: bindingDefault. For example,
given the TypedInterface::
>>> class IMyForm(annotate.TypedInterface):
... def doSomething(self, name=annotate.String()):
... pass
... doSomething = annotate.autocallable(doSomething)
... def doNothing(self name=annotate.String()):
... pass
... doNothing = annotate.autocallable(doNothing)
... def doMoreThings(self name=annotate.String(), things=annotate.String()):
... pass
... doMoreThings = annotate.autocallable(doMoreThings)
One might call renderForms() like this::
return webform.renderForms(
'',
bindingDefaults={'doSomething': {'name': 'jimbo'},
# Change 'name' default, don't change 'things'
'doMoreThings': {'things': 'jimbo'}
})
This would cause a form to be rendered which will call doSomething when
submitted, and would have "jimbo" filled out as the default value for
the name field, as well as a form which will call doMoreThings (with no
default value filled in for 'name' but 'jimbo' filled in for 'things').
"""
assert bindingNames is None or bindingDefaults is None, "Only specify bindingNames or bindingDefaults"
if bindingNames is not None:
bindingDefaults = dict.fromkeys(bindingNames, {})
def formRenderer(ctx, data):
cf = ctx.locate(iformless.IConfigurableFactory)
return util.maybeDeferred(cf.locateConfigurable, ctx, configurableKey
).addCallback(_formRenderIt)
def _formRenderIt(configurable):
def _innerFormRenderIt(context, data):
tag = context.tag
# Remember the key for when the form post URL is generated.
context.remember(configurableKey, iformless.IConfigurableKey)
if configurable is None:
warnings.warn(
"No configurable was found which provides enough type information for freeform to be able to render forms; %r" % (cf, ))
yield ''
return
context.remember(configurable, iformless.IConfigurable)
formDefaults = iformless.IFormDefaults(context)
available = configurable.getBindingNames(context)
bindings = []
default_binding_pattern = None
for name in available:
if bindingDefaults is not None:
if name not in bindingDefaults:
continue
defs = formDefaults.getAllDefaults(name)
defs.update(bindingDefaults[name])
bnd = configurable.getBinding(context, name)
renderer = iformless.IBindingRenderer(bnd, defaultBindingRenderer, persist=False)
try:
binding_pattern = tag.patternGenerator( 'freeform-form!!%s' % name )
except NodeNotFound:
if default_binding_pattern is None:
try:
default_binding_pattern = tag.patternGenerator( 'freeform-form' )
except NodeNotFound:
default_binding_pattern = freeformDefaultForm
binding_pattern = default_binding_pattern
if binding_pattern is freeformDefaultForm:
renderer.needsSkin = True
yield binding_pattern(data = bnd, render = renderer, key = name)
return _innerFormRenderIt
return invisible(render=formRenderer)
|
membase/ep-engine
|
refs/heads/master
|
management/cli_auth_utils.py
|
2
|
#!/usr/bin/env python
import clitool
import inspect
import mc_bin_client
import memcacheConstants
import sys
import os
def cmd_decorator(f):
"""Decorate a function with code to authenticate based on 1-3
additional arguments."""
def g(*args, **kwargs):
mc = args[0]
spec = inspect.getargspec(f)
max = len(spec.args)
defaults = len(spec.defaults) if spec.defaults else 0
min = max - defaults
if len(args) < min:
print >> sys.stderr, ("Error: too few arguments - command "
"expected a minimum of %s but was passed "
"%s: %s"
% (min - 1, len(args) - 1, list(args[1:])))
sys.exit(2)
if spec.varargs is None:
if len(args) > max:
print >> sys.stderr, ("Error: too many arguments - command "
"expected a maximum of %s but was passed "
"%s: %s"
% (max - 1, len(args) - 1, list(args[1:])))
sys.exit(2)
bucket = kwargs.pop('bucketName', None)
username = kwargs.pop('username', None) or bucket
password = kwargs.pop('password', None)
if username is not None or password is not None:
bucket = bucket or 'default'
username = username or bucket
password = password or ''
try:
mc.sasl_auth_plain(username, password)
except mc_bin_client.MemcachedError:
print ("Authentication error for user:{0} bucket:{1}"
.format(username, bucket))
sys.exit(1)
mc.enable_xerror()
mc.hello("{0} {1}".format(os.path.split(sys.argv[0])[1],
os.getenv("EP_ENGINE_VERSION",
"unknown version")))
try:
if kwargs.pop('allBuckets', None):
buckets = mc.list_buckets()
for bucket in buckets:
print '*' * 78
print bucket
print
mc.bucket_select(bucket)
f(*args, **kwargs)
elif bucket is not None:
mc.bucket_select(bucket)
f(*args, **kwargs)
else:
f(*args, **kwargs)
except mc_bin_client.ErrorEaccess:
print ("No access to bucket:{} - permission denied "
"or bucket does not exist.".format(bucket))
sys.exit(1)
return g
def get_authed_clitool():
c = clitool.CliTool()
c.addFlag('-a', 'allBuckets', 'iterate over all buckets')
c.addOption('-b', 'bucketName', 'the bucket to get stats from (Default: default)')
c.addOption('-u', 'username', 'the user as which to authenticate (Default: bucketName)')
c.addOption('-p', 'password', 'the password for the bucket if one exists')
return c
|
studiawan/pygraphc
|
refs/heads/master
|
pygraphc/clustering/MaxCliquesPercolation.py
|
1
|
import networkx as nx
from itertools import combinations
from KCliquePercolation import KCliquePercolation
from ClusterUtility import ClusterUtility
class MaxCliquesPercolation(KCliquePercolation):
"""This class find maximal cliques and their percolation in a graph.
The procedure will find any intersection (percolation) between any maximal cliques found.
The cluster is defined as percolated maximal cliques [Reid2012]_.
References
----------
.. [Reid2012] Fergal Reid, Aaron McDaid, and Neil Hurley. Percolation computation in complex networks.
In Proceedings of the 2012 IEEE/ACM International Conference on Advances in Social Networks
Analysis and Mining, pp. 274-281, 2012.
"""
def __init__(self, graph, edges_weight, nodes_id):
"""This is the constructor of class MaxCliquesPercolation
Parameters
----------
graph : graph
Graph to be clustered.
edges_weight : list[tuple]
List of tuple containing (node1, node2, cosine similarity between these two).
nodes_id : list
List of all node identifier.
Notes
-----
max_cliques : list[frozenset]
List of frozenset containing node id for each maximal clique.
"""
super(MaxCliquesPercolation, self).__init__(graph, edges_weight, nodes_id)
def init_maxclique_percolation(self):
"""Initialization of maxial clique percolation method.
The first step is to build temporary graph (percolation graph). Then the procedure finds
all maximal cliques in the graph.
"""
super(MaxCliquesPercolation, self)._build_temp_graph()
maxcliques = self._find_maxcliques()
self.cliques = maxcliques
def get_maxcliques_percolation(self, k):
"""The main method to find clusters based on maximal clique percolation.
This method looks for percolation between cliques.
It then remove any edges that connecting two vertices but has different clusters.
Returns
-------
k : int
Number of percolation or intersection between an individual clique.
clusters : dict[frozenset]
List of frozenset containing node identifier (node id in integer).
"""
super(MaxCliquesPercolation, self)._get_percolation_graph(self.cliques, k)
super(MaxCliquesPercolation, self)._remove_outcluster()
clusters = super(MaxCliquesPercolation, self)._get_clusters()
return clusters
def _find_maxcliques(self):
"""Find maximal cliques using `find_clique` function from NetworkX.
Returns
-------
maxcliques : list[frozenset]
List of frozenset containing node id for each maximal clique.
"""
maxcliques = list(frozenset(c) for c in nx.find_cliques(self.graph))
self.cliques = maxcliques
return maxcliques
class MaxCliquesPercolationWeighted(MaxCliquesPercolation):
"""This is a class for maximal clique percolation with edge weight [Liu2009]_.
Edge weight is evaluated using intensity threshold or the geometric mean
for all edge weights in a maximal clique [Studiawan2016c]_. We then remove the overlapping nodes
where a node only follow the weighter neighboring cluster [Studiawan2016c]_.
References
----------
.. [Liu2009] Guimei Liu, Limsoon Wong, and Hon Nian Chua. Complex discovery from
weighted PPI networks. Bioinformatics, 25(15):1891-1897, 2009.
.. [Studiawan2016c] H. Studiawan, C. Payne, F. Sohel, SSH log clustering using weighted
maximal clique percolation (to be submitted).
"""
def __init__(self, graph, edges_weight, nodes_id):
"""This is the constructor of class MaxCliquePercolation Weighted.
The parameters are the same with its parent class but we add a threshold
for the intensity for maximal clique found.
Parameters
----------
graph : graph
Graph to be clustered.
edges_weight : list[tuple]
List of tuple containing (node1, node2, cosine similarity between these two).
nodes_id : list
List of all node identifier.
"""
super(MaxCliquesPercolationWeighted, self).__init__(graph, edges_weight, nodes_id)
self.percolation_dict = {}
def get_maxcliques_percolation_weighted(self, k, threshold):
"""This is the main method for maximal clique percolation for edge-weighted graph.
Parameters
----------
k : int
Number of percolation or intersection between an individual clique.
threshold : float
Threshold for intensity of maximal clique.
Returns
-------
clusters : dict[int, frozenset]
List of frozenset containing node identifier (node id in integer).
Notes
-----
weighted_maxcliques : list[frozenset]
List of frozenset containing node identifier for each weighted maximal clique.
"""
# get weighted maximal cliques and get percolation
weighted_maxcliques = ClusterUtility.get_weighted_cliques(self.graph, self.cliques, threshold)
clusters = {}
if weighted_maxcliques:
super(MaxCliquesPercolationWeighted, self)._get_percolation_graph(weighted_maxcliques, k)
self.__set_percolation_dict(self.clique_percolation)
# remove overlapping nodes
percolations = self.clique_percolation.values()
for p1, p2 in combinations(percolations, 2):
intersections = p1.intersection(p2)
if intersections:
for node in intersections:
node_neighbors = self.graph.neighbors(node)
p1_neighbors, p2_neighbors = p1.intersection(node_neighbors), p2.intersection(node_neighbors)
p1_neighbors_weight = self.__get_neighbors_weight(node, p1_neighbors)
p2_neighbors_weight = self.__get_neighbors_weight(node, p2_neighbors)
# follow the neighboring cluster which has bigger sum of edge-weight
self.percolation_dict[node] = self.percolation_dict[list(p1_neighbors)[0]] \
if p1_neighbors_weight > p2_neighbors_weight else \
self.percolation_dict[list(p2_neighbors)[0]]
self.__set_graph_cluster()
clusters = self.__get_clusters()
# remove intracluster edges
super(MaxCliquesPercolationWeighted, self)._remove_outcluster()
return clusters
def __set_percolation_dict(self, percolations):
"""Set dictionary of node id and its index in percolations.
Parameters
----------
percolations : dict[frozenset]
Dictionary of frozenset containing nodes id for each maximal clique.
"""
nodes = self.graph.nodes()
percolations_merged = []
for index, percolation in percolations.iteritems():
for p in percolation:
self.percolation_dict[p] = index
# merge percolations list
percolations_merged += percolation
diff = set(nodes).difference(percolations_merged)
other_cluster = len(percolations)
if diff:
for i in diff:
self.percolation_dict[i] = other_cluster
other_cluster += 1
def __get_neighbors_weight(self, node, neighbors):
"""Get all weight of neighboring cluster.
Parameters
----------
node : int
Node identifier
neighbors : list[int]
List of node identifier of intersection between two clusters
Returns
-------
weight : int
Sum of all edge weight from specific neighboring cluster.
"""
weight = 0
for n in neighbors:
weight += self.graph[node][n][0]['weight']
return weight
def __set_graph_cluster(self):
"""Set cluster id in the given graph based on percolation dictionary.
Returns
-------
self.graph : graph
Graph with updated cluster identifier after cluster processing.
"""
for node in self.graph.nodes_iter(data=True):
self.graph.node[node[0]]['cluster'] = self.percolation_dict[node[0]]
return self.graph
def __get_clusters(self):
"""Get maximal clique percolation as clusters with incremental cluster id.
Returns
-------
clusters : dict[list]
Dictionary of list containing node identifier for each cluster found.
"""
cluster_ids = set(self.percolation_dict.values())
clusters = {}
cluster_idx = 0
for ids in cluster_ids:
cluster = []
for node, cluster_id in self.percolation_dict.iteritems():
if ids == cluster_id:
cluster.append(node)
clusters[cluster_idx] = cluster
cluster_idx += 1
return clusters
|
nvoron23/hue
|
refs/heads/master
|
desktop/core/ext-py/Django-1.6.10/tests/context_processors/models.py
|
431
|
# Models file for tests to run.
|
2947721120/curly-hockeypuck
|
refs/heads/master
|
examples/python/crossword2.py
|
32
|
# Copyright 2010 Hakan Kjellerstrand hakank@bonetmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Crosswords in Google CP Solver.
This is a standard example for constraint logic programming. See e.g.
http://www.cis.temple.edu/~ingargio/cis587/readings/constraints.html
'''
We are to complete the puzzle
1 2 3 4 5
+---+---+---+---+---+ Given the list of words:
1 | 1 | | 2 | | 3 | AFT LASER
+---+---+---+---+---+ ALE LEE
2 | # | # | | # | | EEL LINE
+---+---+---+---+---+ HEEL SAILS
3 | # | 4 | | 5 | | HIKE SHEET
+---+---+---+---+---+ HOSES STEER
4 | 6 | # | 7 | | | KEEL TIE
+---+---+---+---+---+ KNOT
5 | 8 | | | | |
+---+---+---+---+---+
6 | | # | # | | # | The numbers 1,2,3,4,5,6,7,8 in the crossword
+---+---+---+---+---+ puzzle correspond to the words
that will start at those locations.
'''
The model was inspired by Sebastian Brand's Array Constraint cross word
example
http://www.cs.mu.oz.au/~sbrand/project/ac/
http://www.cs.mu.oz.au/~sbrand/project/ac/examples.pl
Also, see the following models:
* MiniZinc: http://www.hakank.org/minizinc/crossword.mzn
* Comet: http://www.hakank.org/comet/crossword.co
* ECLiPSe: http://hakank.org/eclipse/crossword2.ecl
* Gecode: http://hakank.org/gecode/crossword2.cpp
* SICStus: http://hakank.org/sicstus/crossword2.pl
* Zinc: http://hakank.org/minizinc/crossword2.zinc
This model was created by Hakan Kjellerstrand (hakank@bonetmail.com)
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
from google.apputils import app
from ortools.constraint_solver import pywrapcp
def main(_):
# Create the solver.
solver = pywrapcp.Solver("Problem")
#
# data
#
alpha = "_abcdefghijklmnopqrstuvwxyz"
a = 1
b = 2
c = 3
d = 4
e = 5
f = 6
g = 7
h = 8
i = 9
j = 10
k = 11
l = 12
m = 13
n = 14
o = 15
p = 16
q = 17
r = 18
s = 19
t = 20
u = 21
v = 22
w = 23
x = 24
y = 25
z = 26
num_words = 15
word_len = 5
AA = [
[h, o, s, e, s], # HOSES
[l, a, s, e, r], # LASER
[s, a, i, l, s], # SAILS
[s, h, e, e, t], # SHEET
[s, t, e, e, r], # STEER
[h, e, e, l, 0], # HEEL
[h, i, k, e, 0], # HIKE
[k, e, e, l, 0], # KEEL
[k, n, o, t, 0], # KNOT
[l, i, n, e, 0], # LINE
[a, f, t, 0, 0], # AFT
[a, l, e, 0, 0], # ALE
[e, e, l, 0, 0], # EEL
[l, e, e, 0, 0], # LEE
[t, i, e, 0, 0] # TIE
]
num_overlapping = 12
overlapping = [
[0, 2, 1, 0], # s
[0, 4, 2, 0], # s
[3, 1, 1, 2], # i
[3, 2, 4, 0], # k
[3, 3, 2, 2], # e
[6, 0, 1, 3], # l
[6, 1, 4, 1], # e
[6, 2, 2, 3], # e
[7, 0, 5, 1], # l
[7, 2, 1, 4], # s
[7, 3, 4, 2], # e
[7, 4, 2, 4] # r
]
n = 8
# declare variables
A = {}
for I in range(num_words):
for J in range(word_len):
A[(I, J)] = solver.IntVar(0, 26, "A(%i,%i)" % (I, J))
A_flat = [A[(I, J)] for I in range(num_words) for J in range(word_len)]
E = [solver.IntVar(0, num_words, "E%i" % I) for I in range(n)]
#
# constraints
#
solver.Add(solver.AllDifferent(E))
for I in range(num_words):
for J in range(word_len):
solver.Add(A[(I, J)] == AA[I][J])
for I in range(num_overlapping):
# This is what I would do:
# solver.Add(A[(E[overlapping[I][0]], overlapping[I][1])] == A[(E[overlapping[I][2]], overlapping[I][3])])
# But we must use Element explicitly
solver.Add(
solver.Element(
A_flat, E[overlapping[I][0]] * word_len + overlapping[I][1]) ==
solver.Element(
A_flat, E[overlapping[I][2]] * word_len + overlapping[I][3]))
#
# solution and search
#
solution = solver.Assignment()
solution.Add(E)
# db: DecisionBuilder
db = solver.Phase(E + A_flat,
solver.INT_VAR_SIMPLE,
solver.ASSIGN_MIN_VALUE)
solver.NewSearch(db)
num_solutions = 0
while solver.NextSolution():
print E
print_solution(A, E, alpha, n, word_len)
num_solutions += 1
solver.EndSearch()
print
print "num_solutions:", num_solutions
print "failures:", solver.Failures()
print "branches:", solver.Branches()
print "WallTime:", solver.WallTime()
def print_solution(A, E, alpha, n, word_len):
for ee in range(n):
print "%i: (%2i)" % (ee, E[ee].Value()),
print "".join(["%s" % (alpha[A[ee, ii].Value()]) for ii in range(word_len)])
if __name__ == "__main__":
app.run()
|
google-code-export/evennia
|
refs/heads/master
|
src/server/caches.py
|
2
|
"""
Central caching module.
"""
from sys import getsizeof
import os
import threading
from collections import defaultdict
from src.server.models import ServerConfig
from src.utils.utils import uses_database, to_str, get_evennia_pids
_GA = object.__getattribute__
_SA = object.__setattr__
_DA = object.__delattr__
_IS_SUBPROCESS = os.getpid() in get_evennia_pids()
_IS_MAIN_THREAD = threading.currentThread().getName() == "MainThread"
#
# Set up the cache stores
#
_ATTR_CACHE = {}
_PROP_CACHE = defaultdict(dict)
#------------------------------------------------------------
# Cache key hash generation
#------------------------------------------------------------
if uses_database("mysql") and ServerConfig.objects.get_mysql_db_version() < '5.6.4':
# mysql <5.6.4 don't support millisecond precision
_DATESTRING = "%Y:%m:%d-%H:%M:%S:000000"
else:
_DATESTRING = "%Y:%m:%d-%H:%M:%S:%f"
def hashid(obj, suffix=""):
"""
Returns a per-class unique hash that combines the object's
class name with its idnum and creation time. This makes this id unique also
between different typeclassed entities such as scripts and
objects (which may still have the same id).
"""
if not obj:
return obj
try:
hid = _GA(obj, "_hashid")
except AttributeError:
try:
date, idnum = _GA(obj, "db_date_created").strftime(_DATESTRING), _GA(obj, "id")
except AttributeError:
try:
# maybe a typeclass, try to go to dbobj
obj = _GA(obj, "dbobj")
date, idnum = _GA(obj, "db_date_created").strftime(_DATESTRING), _GA(obj, "id")
except AttributeError:
# this happens if hashing something like ndb. We have to
# rely on memory adressing in this case.
date, idnum = "InMemory", id(obj)
if not idnum or not date:
# this will happen if setting properties on an object which
# is not yet saved
return None
# we have to remove the class-name's space, for eventual use
# of memcached
hid = "%s-%s-#%s" % (_GA(obj, "__class__"), date, idnum)
hid = hid.replace(" ", "")
# we cache the object part of the hashid to avoid too many
# object lookups
_SA(obj, "_hashid", hid)
# build the complete hashid
hid = "%s%s" % (hid, suffix)
return to_str(hid)
#------------------------------------------------------------
# Cache callback handlers
#------------------------------------------------------------
# callback to field pre_save signal (connected in src.server.server)
def field_pre_save(sender, instance=None, update_fields=None, raw=False, **kwargs):
"""
Called at the beginning of the field save operation. The save method
must be called with the update_fields keyword in order to be most efficient.
This method should NOT save; rather it is the save() that triggers this
function. Its main purpose is to allow to plug-in a save handler and oob
handlers.
"""
if raw:
return
if update_fields:
# this is a list of strings at this point. We want field objects
update_fields = (_GA(_GA(instance, "_meta"), "get_field_by_name")(field)[0] for field in update_fields)
else:
# meta.fields are already field objects; get them all
update_fields = _GA(_GA(instance, "_meta"), "fields")
for field in update_fields:
fieldname = field.name
handlername = "_at_%s_presave" % fieldname
handler = _GA(instance, handlername) if handlername in _GA(sender, '__dict__') else None
if callable(handler):
handler()
def field_post_save(sender, instance=None, update_fields=None, raw=False, **kwargs):
"""
Called at the beginning of the field save operation. The save method
must be called with the update_fields keyword in order to be most efficient.
This method should NOT save; rather it is the save() that triggers this
function. Its main purpose is to allow to plug-in a save handler and oob
handlers.
"""
if raw:
return
if update_fields:
# this is a list of strings at this point. We want field objects
update_fields = (_GA(_GA(instance, "_meta"), "get_field_by_name")(field)[0] for field in update_fields)
else:
# meta.fields are already field objects; get them all
update_fields = _GA(_GA(instance, "_meta"), "fields")
for field in update_fields:
fieldname = field.name
handlername = "_at_%s_postsave" % fieldname
handler = _GA(instance, handlername) if handlername in _GA(sender, '__dict__') else None
if callable(handler):
handler()
trackerhandler = _GA(instance, "_trackerhandler") if "_trackerhandler" in _GA(instance, '__dict__') else None
if trackerhandler:
trackerhandler.update(fieldname, _GA(instance, fieldname))
#------------------------------------------------------------
# Attribute lookup cache
#------------------------------------------------------------
def get_attr_cache(obj):
"Retrieve lookup cache"
hid = hashid(obj)
return _ATTR_CACHE.get(hid, None)
def set_attr_cache(obj, store):
"Set lookup cache"
global _ATTR_CACHE
hid = hashid(obj)
_ATTR_CACHE[hid] = store
#------------------------------------------------------------
# Property cache - this is a generic cache for properties stored on models.
#------------------------------------------------------------
# access methods
def get_prop_cache(obj, propname):
"retrieve data from cache"
hid = hashid(obj, "-%s" % propname)
return _PROP_CACHE[hid].get(propname, None) if hid else None
def set_prop_cache(obj, propname, propvalue):
"Set property cache"
hid = hashid(obj, "-%s" % propname)
if hid:
_PROP_CACHE[hid][propname] = propvalue
def del_prop_cache(obj, propname):
"Delete element from property cache"
hid = hashid(obj, "-%s" % propname)
if hid:
if propname in _PROP_CACHE[hid]:
del _PROP_CACHE[hid][propname]
def flush_prop_cache():
"Clear property cache"
global _PROP_CACHE
_PROP_CACHE = defaultdict(dict)
def get_cache_sizes():
"""
Get cache sizes, expressed in number of objects and memory size in MB
"""
global _ATTR_CACHE, _PROP_CACHE
attr_n = len(_ATTR_CACHE)
attr_mb = sum(getsizeof(obj) for obj in _ATTR_CACHE) / 1024.0
prop_n = sum(len(dic) for dic in _PROP_CACHE.values())
prop_mb = sum(sum([getsizeof(obj) for obj in dic.values()]) for dic in _PROP_CACHE.values()) / 1024.0
return (attr_n, attr_mb), (prop_n, prop_mb)
|
hvaibhav/linux-media
|
refs/heads/master
|
tools/perf/util/setup.py
|
97
|
#!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
perf = Extension('perf',
sources = ['util/python.c', 'util/ctype.c', 'util/evlist.c',
'util/evsel.c', 'util/cpumap.c', 'util/thread_map.c',
'util/util.c', 'util/xyarray.c', 'util/cgroup.c',
'util/debugfs.c'],
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
|
haiyangd/python-show-me-the-code-
|
refs/heads/master
|
renzongxian/0001/0001.py
|
40
|
# Source:https://github.com/Show-Me-the-Code/show-me-the-code
# Author:renzongxian
# Date:2014-11-30
# Python 3.4
"""
第 0001 题:做为 Apple Store App 独立开发者,你要搞限时促销,为你的应用生成激活码
(或者优惠券),使用 Python 如何生成 200 个激活码(或者优惠券)?
"""
import uuid
def generate_key():
key_list = []
for i in range(200):
uuid_key = uuid.uuid3(uuid.NAMESPACE_DNS, str(uuid.uuid1()))
key_list.append(str(uuid_key).replace('-', ''))
return key_list
if __name__ == '__main__':
print(generate_key())
|
codebhendi/scrapy
|
refs/heads/master
|
scrapy/utils/response.py
|
20
|
"""
This module provides some useful functions for working with
scrapy.http.Response objects
"""
import os
import re
import weakref
import webbrowser
import tempfile
from twisted.web import http
from twisted.web.http import RESPONSES
from w3lib import html
from scrapy.utils.decorators import deprecated
@deprecated
def body_or_str(*a, **kw):
from scrapy.utils.iterators import _body_or_str
return _body_or_str(*a, **kw)
_baseurl_cache = weakref.WeakKeyDictionary()
def get_base_url(response):
"""Return the base url of the given response, joined with the response url"""
if response not in _baseurl_cache:
text = response.body_as_unicode()[0:4096]
_baseurl_cache[response] = html.get_base_url(text, response.url, \
response.encoding)
return _baseurl_cache[response]
_noscript_re = re.compile(u'<noscript>.*?</noscript>', re.IGNORECASE | re.DOTALL)
_script_re = re.compile(u'<script.*?>.*?</script>', re.IGNORECASE | re.DOTALL)
_metaref_cache = weakref.WeakKeyDictionary()
def get_meta_refresh(response):
"""Parse the http-equiv refrsh parameter from the given response"""
if response not in _metaref_cache:
text = response.body_as_unicode()[0:4096]
text = _noscript_re.sub(u'', text)
text = _script_re.sub(u'', text)
_metaref_cache[response] = html.get_meta_refresh(text, response.url, \
response.encoding)
return _metaref_cache[response]
def response_status_message(status):
"""Return status code plus status text descriptive message
>>> response_status_message(200)
'200 OK'
>>> response_status_message(404)
'404 Not Found'
"""
return '%s %s' % (status, http.responses.get(int(status)))
def response_httprepr(response):
"""Return raw HTTP representation (as string) of the given response. This
is provided only for reference, since it's not the exact stream of bytes
that was received (that's not exposed by Twisted).
"""
s = "HTTP/1.1 %d %s\r\n" % (response.status, RESPONSES.get(response.status, ''))
if response.headers:
s += response.headers.to_string() + "\r\n"
s += "\r\n"
s += response.body
return s
def open_in_browser(response, _openfunc=webbrowser.open):
"""Open the given response in a local web browser, populating the <base>
tag for external links to work
"""
from scrapy.http import HtmlResponse, TextResponse
# XXX: this implementation is a bit dirty and could be improved
body = response.body
if isinstance(response, HtmlResponse):
if '<base' not in body:
body = body.replace('<head>', '<head><base href="%s">' % response.url)
ext = '.html'
elif isinstance(response, TextResponse):
ext = '.txt'
else:
raise TypeError("Unsupported response type: %s" % \
response.__class__.__name__)
fd, fname = tempfile.mkstemp(ext)
os.write(fd, body)
os.close(fd)
return _openfunc("file://%s" % fname)
|
ElDeveloper/qiime
|
refs/heads/master
|
tests/test_plot_rank_abundance_graph.py
|
15
|
#!/usr/bin/env python
# File created on 17 Aug 2010
from __future__ import division
__author__ = "Jens Reeder"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Jens Reeder", "Greg Caporaso"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Justin Kuczynski"
__email__ = "justinak@gmail.com"
from os import close
from os.path import exists, abspath
from shutil import rmtree
from numpy import array
from matplotlib.axes import Subplot
from tempfile import mkstemp, mkdtemp
from unittest import TestCase, main
from skbio.util import remove_files
from qiime.plot_rank_abundance_graph import make_sorted_frequencies,\
plot_rank_abundance_graph, plot_rank_abundance_graphs
from qiime.util import create_dir
from biom.parse import parse_biom_table
class PlotRankAbundance(TestCase):
def setUp(self):
self.tmp_dir = '/tmp/'
self.files_to_remove = []
self._dirs_to_remove = []
def tearDown(self):
remove_files(self.files_to_remove)
if self._dirs_to_remove:
for i in self._dirs_to_remove:
rmtree(i)
def test_make_sorted_frequencies(self):
"""make_sorted_frequencies transforms and sorts correctly"""
# works on empty
counts = array([])
self.assertItemsEqual(make_sorted_frequencies(counts), [])
# works on zeros
counts = array([0, 0, 0, 0, 0, 0])
self.assertItemsEqual(make_sorted_frequencies(counts), [])
# works on flat data
counts = array([3, 3, 3, 3, 3])
expected_freqs = [0.2, 0.2, 0.2, 0.2, 0.2]
observed_freqs = make_sorted_frequencies(counts)
self.assertItemsEqual(observed_freqs, expected_freqs)
# works on real data
counts = array([1, 2, 0, 1, 0, 2, 4])
expected_freqs = [0.4, 0.2, 0.2, 0.1, 0.1]
observed_freqs = make_sorted_frequencies(counts)
self.assertItemsEqual(observed_freqs, expected_freqs)
def test_make_sorted_frequencies_abolute(self):
"""make_sorted_frequencies returns correct absolute values"""
# works on empty
counts = array([])
self.assertItemsEqual(make_sorted_frequencies(counts, True), [])
# works on zeros
counts = array([0, 0, 0, 0, 0, 0])
self.assertItemsEqual(make_sorted_frequencies(counts, True), [])
# works on flat data
counts = array([3, 3, 3, 3, 3])
expected_freqs = [3, 3, 3, 3, 3]
observed_freqs = make_sorted_frequencies(counts, True)
self.assertItemsEqual(observed_freqs, expected_freqs)
# works o real data
counts = array([1, 2, 0, 1, 0, 2, 4])
expected_freqs = [4, 2, 2, 1, 1]
observed_freqs = make_sorted_frequencies(counts, True)
self.assertItemsEqual(observed_freqs, expected_freqs)
def test_plot_rank_abundance_graph(self):
"""plot_rank_abudance_graph plots something"""
counts = array([20, 15, 12, 8, 4, 2, 1, 3, 1, 2])
observed = plot_rank_abundance_graph(counts)
# can we test something more clever here?
# basically just tests that something is drawn, but not what
self.assertEqual(type(observed), Subplot)
def test_plot_rank_abundance_graphs_filetype(self):
"""plot_rank_abundance_graphs works with all filetypes"""
self.otu_table = parse_biom_table(otu_table_sparse)
self.dir = mkdtemp(dir=self.tmp_dir,
prefix="test_plot_rank_abundance",
suffix="/")
self._dirs_to_remove.append(self.dir)
# test all supported filetypes
for file_type in ['pdf', 'svg', 'png', 'eps']:
tmp_file = abspath(self.dir + "rank_abundance_cols_0." + file_type)
plot_rank_abundance_graphs(
tmp_file,
'S3',
self.otu_table,
file_type=file_type)
self.files_to_remove.append(tmp_file)
self.assertTrue(exists(tmp_file))
def test_plot_rank_abundance_graphs(self):
"""plot_rank_abundance_graphs works with any number of samples (Table)"""
self.otu_table = parse_biom_table(otu_table_sparse)
self.dir = mkdtemp(dir=self.tmp_dir,
prefix="test_plot_rank_abundance",
suffix="/")
self._dirs_to_remove.append(self.dir)
fd, tmp_fname = mkstemp(dir=self.dir)
close(fd)
# test empty sample name
self.assertRaises(
ValueError, plot_rank_abundance_graphs, tmp_fname, '',
self.otu_table)
# test invalid sample name
self.assertRaises(ValueError, plot_rank_abundance_graphs, tmp_fname,
'Invalid_sample_name',
self.otu_table)
# test with two samples
file_type = "pdf"
tmp_file = abspath(self.dir + "rank_abundance_cols_0_2." + file_type)
plot_rank_abundance_graphs(tmp_file, 'S3,S5', self.otu_table,
file_type=file_type)
self.assertTrue(exists(tmp_file))
self.files_to_remove.append(tmp_file)
# test with all samples
tmp_file = abspath(self.dir + "rank_abundance_cols_0_1_2." + file_type)
plot_rank_abundance_graphs(tmp_file, '*', self.otu_table,
file_type=file_type)
self.files_to_remove.append(tmp_file)
self.assertTrue(exists(tmp_file))
otu_table_sparse = ('{"rows": [{"id": "0", "metadata": '
'{"taxonomy": ["Root", "Bacteria"]}}, {"id": "3", '
'"metadata": {"taxonomy": ["Root", "Bacteria", "Acidobacteria"]}}, '
'{"id": "4", "metadata": '
'{"taxonomy": ["Root", "Bacteria", "Bacteroidetes"]}}, '
'{"id": "2", "metadata": {"taxonomy": '
'["Root", "Bacteria", "Acidobacteria", "Acidobacteria", "Gp5"]}}, '
'{"id": "6", "metadata": {"taxonomy": ["Root", "Archaea"]}}], '
'"format": "Biological Observation Matrix v0.9", "data": '
'[[0, 0, 1.0], [0, 2, 1.0], [1, 0, 2.0], [1, 2, 1.0], [2, 0, 1.0], '
'[2, 2, 9.0], [3, 0, 1.0], [3, 2, 1.0], [4, 0, 1.0], [4, 1, 25.0], '
'[4, 2, 42.0]], "columns": [{"id": "S3", "metadata": null}, '
'{"id": "S4", "metadata": null}, {"id": "S5", "metadata": null}], '
'"generated_by": "QIIME 1.4.0-dev, svn revision 2571", "matrix_type": '
'"sparse", "shape": [5, 3], '
'"format_url": "http://www.qiime.org/svn_documentation/documentation/'
'biom_format.html", "date": "2011-12-21T19:33:37.780300", '
'"type": "OTU table", "id": null, "matrix_element_type": "float"}')
if __name__ == "__main__":
main()
|
beiko-lab/gengis
|
refs/heads/master
|
bin/Lib/site-packages/win32/Demos/security/list_rights.py
|
4
|
import win32security,win32file,win32api,ntsecuritycon,win32con
from security_enums import TRUSTEE_TYPE,TRUSTEE_FORM,ACE_FLAGS,ACCESS_MODE
new_privs = ((win32security.LookupPrivilegeValue('',ntsecuritycon.SE_SECURITY_NAME),win32con.SE_PRIVILEGE_ENABLED),
(win32security.LookupPrivilegeValue('',ntsecuritycon.SE_CREATE_PERMANENT_NAME),win32con.SE_PRIVILEGE_ENABLED),
(win32security.LookupPrivilegeValue('','SeEnableDelegationPrivilege'),win32con.SE_PRIVILEGE_ENABLED) ##doesn't seem to be in ntsecuritycon.py ?
)
ph = win32api.GetCurrentProcess()
th = win32security.OpenProcessToken(ph,win32security.TOKEN_ALL_ACCESS) ##win32con.TOKEN_ADJUST_PRIVILEGES)
win32security.AdjustTokenPrivileges(th,0,new_privs)
policy_handle = win32security.GetPolicyHandle('',win32security.POLICY_ALL_ACCESS)
sidlist=win32security.LsaEnumerateAccountsWithUserRight(policy_handle,ntsecuritycon.SE_RESTORE_NAME)
for sid in sidlist:
print win32security.LookupAccountSid('',sid)
win32security.LsaClose(policy_handle)
|
Argon-Zhou/django
|
refs/heads/master
|
tests/multiple_database/tests.py
|
47
|
from __future__ import unicode_literals
import datetime
import pickle
import warnings
from operator import attrgetter
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.core import management
from django.db import DEFAULT_DB_ALIAS, connections, router, transaction
from django.db.models import signals
from django.db.utils import ConnectionRouter
from django.test import SimpleTestCase, TestCase, override_settings
from django.utils.encoding import force_text
from django.utils.six import StringIO
from .models import Book, Person, Pet, Review, UserProfile
from .routers import AuthRouter, TestRouter, WriteRouter
class QueryTestCase(TestCase):
multi_db = True
def test_db_selection(self):
"Check that querysets will use the default database by default"
self.assertEqual(Book.objects.db, DEFAULT_DB_ALIAS)
self.assertEqual(Book.objects.all().db, DEFAULT_DB_ALIAS)
self.assertEqual(Book.objects.using('other').db, 'other')
self.assertEqual(Book.objects.db_manager('other').db, 'other')
self.assertEqual(Book.objects.db_manager('other').all().db, 'other')
def test_default_creation(self):
"Objects created on the default database don't leak onto other databases"
# Create a book on the default database using create()
Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
# Create a book on the default database using a save
dive = Book()
dive.title = "Dive into Python"
dive.published = datetime.date(2009, 5, 4)
dive.save()
# Check that book exists on the default database, but not on other database
try:
Book.objects.get(title="Pro Django")
Book.objects.using('default').get(title="Pro Django")
except Book.DoesNotExist:
self.fail('"Pro Django" should exist on default database')
self.assertRaises(
Book.DoesNotExist,
Book.objects.using('other').get,
title="Pro Django"
)
try:
Book.objects.get(title="Dive into Python")
Book.objects.using('default').get(title="Dive into Python")
except Book.DoesNotExist:
self.fail('"Dive into Python" should exist on default database')
self.assertRaises(
Book.DoesNotExist,
Book.objects.using('other').get,
title="Dive into Python"
)
def test_other_creation(self):
"Objects created on another database don't leak onto the default database"
# Create a book on the second database
Book.objects.using('other').create(title="Pro Django",
published=datetime.date(2008, 12, 16))
# Create a book on the default database using a save
dive = Book()
dive.title = "Dive into Python"
dive.published = datetime.date(2009, 5, 4)
dive.save(using='other')
# Check that book exists on the default database, but not on other database
try:
Book.objects.using('other').get(title="Pro Django")
except Book.DoesNotExist:
self.fail('"Pro Django" should exist on other database')
self.assertRaises(
Book.DoesNotExist,
Book.objects.get,
title="Pro Django"
)
self.assertRaises(
Book.DoesNotExist,
Book.objects.using('default').get,
title="Pro Django"
)
try:
Book.objects.using('other').get(title="Dive into Python")
except Book.DoesNotExist:
self.fail('"Dive into Python" should exist on other database')
self.assertRaises(
Book.DoesNotExist,
Book.objects.get,
title="Dive into Python"
)
self.assertRaises(
Book.DoesNotExist,
Book.objects.using('default').get,
title="Dive into Python"
)
def test_refresh(self):
dive = Book()
dive.title = "Dive into Python"
dive = Book()
dive.title = "Dive into Python"
dive.published = datetime.date(2009, 5, 4)
dive.save(using='other')
dive.published = datetime.date(2009, 5, 4)
dive.save(using='other')
dive2 = Book.objects.using('other').get()
dive2.title = "Dive into Python (on default)"
dive2.save(using='default')
dive.refresh_from_db()
self.assertEqual(dive.title, "Dive into Python")
dive.refresh_from_db(using='default')
self.assertEqual(dive.title, "Dive into Python (on default)")
self.assertEqual(dive._state.db, "default")
def test_basic_queries(self):
"Queries are constrained to a single database"
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
dive = Book.objects.using('other').get(published=datetime.date(2009, 5, 4))
self.assertEqual(dive.title, "Dive into Python")
self.assertRaises(Book.DoesNotExist, Book.objects.using('default').get, published=datetime.date(2009, 5, 4))
dive = Book.objects.using('other').get(title__icontains="dive")
self.assertEqual(dive.title, "Dive into Python")
self.assertRaises(Book.DoesNotExist, Book.objects.using('default').get, title__icontains="dive")
dive = Book.objects.using('other').get(title__iexact="dive INTO python")
self.assertEqual(dive.title, "Dive into Python")
self.assertRaises(Book.DoesNotExist, Book.objects.using('default').get, title__iexact="dive INTO python")
dive = Book.objects.using('other').get(published__year=2009)
self.assertEqual(dive.title, "Dive into Python")
self.assertEqual(dive.published, datetime.date(2009, 5, 4))
self.assertRaises(Book.DoesNotExist, Book.objects.using('default').get, published__year=2009)
years = Book.objects.using('other').dates('published', 'year')
self.assertEqual([o.year for o in years], [2009])
years = Book.objects.using('default').dates('published', 'year')
self.assertEqual([o.year for o in years], [])
months = Book.objects.using('other').dates('published', 'month')
self.assertEqual([o.month for o in months], [5])
months = Book.objects.using('default').dates('published', 'month')
self.assertEqual([o.month for o in months], [])
def test_m2m_separation(self):
"M2M fields are constrained to a single database"
# Create a book and author on the default database
pro = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
marty = Person.objects.create(name="Marty Alchin")
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
mark = Person.objects.using('other').create(name="Mark Pilgrim")
# Save the author relations
pro.authors = [marty]
dive.authors = [mark]
# Inspect the m2m tables directly.
# There should be 1 entry in each database
self.assertEqual(Book.authors.through.objects.using('default').count(), 1)
self.assertEqual(Book.authors.through.objects.using('other').count(), 1)
# Check that queries work across m2m joins
self.assertEqual(list(Book.objects.using('default').filter(authors__name='Marty Alchin').values_list('title', flat=True)),
['Pro Django'])
self.assertEqual(list(Book.objects.using('other').filter(authors__name='Marty Alchin').values_list('title', flat=True)),
[])
self.assertEqual(list(Book.objects.using('default').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)),
[])
self.assertEqual(list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)),
['Dive into Python'])
# Reget the objects to clear caches
dive = Book.objects.using('other').get(title="Dive into Python")
mark = Person.objects.using('other').get(name="Mark Pilgrim")
# Retrieve related object by descriptor. Related objects should be database-bound
self.assertEqual(list(dive.authors.all().values_list('name', flat=True)),
['Mark Pilgrim'])
self.assertEqual(list(mark.book_set.all().values_list('title', flat=True)),
['Dive into Python'])
def test_m2m_forward_operations(self):
"M2M forward manipulations are all constrained to a single DB"
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
mark = Person.objects.using('other').create(name="Mark Pilgrim")
# Save the author relations
dive.authors = [mark]
# Add a second author
john = Person.objects.using('other').create(name="John Smith")
self.assertEqual(list(Book.objects.using('other').filter(authors__name='John Smith').values_list('title', flat=True)),
[])
dive.authors.add(john)
self.assertEqual(list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)),
['Dive into Python'])
self.assertEqual(list(Book.objects.using('other').filter(authors__name='John Smith').values_list('title', flat=True)),
['Dive into Python'])
# Remove the second author
dive.authors.remove(john)
self.assertEqual(list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)),
['Dive into Python'])
self.assertEqual(list(Book.objects.using('other').filter(authors__name='John Smith').values_list('title', flat=True)),
[])
# Clear all authors
dive.authors.clear()
self.assertEqual(list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)),
[])
self.assertEqual(list(Book.objects.using('other').filter(authors__name='John Smith').values_list('title', flat=True)),
[])
# Create an author through the m2m interface
dive.authors.create(name='Jane Brown')
self.assertEqual(list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)),
[])
self.assertEqual(list(Book.objects.using('other').filter(authors__name='Jane Brown').values_list('title', flat=True)),
['Dive into Python'])
def test_m2m_reverse_operations(self):
"M2M reverse manipulations are all constrained to a single DB"
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
mark = Person.objects.using('other').create(name="Mark Pilgrim")
# Save the author relations
dive.authors = [mark]
# Create a second book on the other database
grease = Book.objects.using('other').create(title="Greasemonkey Hacks",
published=datetime.date(2005, 11, 1))
# Add a books to the m2m
mark.book_set.add(grease)
self.assertEqual(list(Person.objects.using('other').filter(book__title='Dive into Python').values_list('name', flat=True)),
['Mark Pilgrim'])
self.assertEqual(list(Person.objects.using('other').filter(book__title='Greasemonkey Hacks').values_list('name', flat=True)),
['Mark Pilgrim'])
# Remove a book from the m2m
mark.book_set.remove(grease)
self.assertEqual(list(Person.objects.using('other').filter(book__title='Dive into Python').values_list('name', flat=True)),
['Mark Pilgrim'])
self.assertEqual(list(Person.objects.using('other').filter(book__title='Greasemonkey Hacks').values_list('name', flat=True)),
[])
# Clear the books associated with mark
mark.book_set.clear()
self.assertEqual(list(Person.objects.using('other').filter(book__title='Dive into Python').values_list('name', flat=True)),
[])
self.assertEqual(list(Person.objects.using('other').filter(book__title='Greasemonkey Hacks').values_list('name', flat=True)),
[])
# Create a book through the m2m interface
mark.book_set.create(title="Dive into HTML5", published=datetime.date(2020, 1, 1))
self.assertEqual(list(Person.objects.using('other').filter(book__title='Dive into Python').values_list('name', flat=True)),
[])
self.assertEqual(list(Person.objects.using('other').filter(book__title='Dive into HTML5').values_list('name', flat=True)),
['Mark Pilgrim'])
def test_m2m_cross_database_protection(self):
"Operations that involve sharing M2M objects across databases raise an error"
# Create a book and author on the default database
pro = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
marty = Person.objects.create(name="Marty Alchin")
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
mark = Person.objects.using('other').create(name="Mark Pilgrim")
# Set a foreign key set with an object from a different database
with self.assertRaises(ValueError):
with transaction.atomic(using='default'):
marty.edited = [pro, dive]
# Add to an m2m with an object from a different database
with self.assertRaises(ValueError):
with transaction.atomic(using='default'):
marty.book_set.add(dive)
# Set a m2m with an object from a different database
with self.assertRaises(ValueError):
with transaction.atomic(using='default'):
marty.book_set = [pro, dive]
# Add to a reverse m2m with an object from a different database
with self.assertRaises(ValueError):
with transaction.atomic(using='other'):
dive.authors.add(marty)
# Set a reverse m2m with an object from a different database
with self.assertRaises(ValueError):
with transaction.atomic(using='other'):
dive.authors = [mark, marty]
def test_m2m_deletion(self):
"Cascaded deletions of m2m relations issue queries on the right database"
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
mark = Person.objects.using('other').create(name="Mark Pilgrim")
dive.authors = [mark]
# Check the initial state
self.assertEqual(Person.objects.using('default').count(), 0)
self.assertEqual(Book.objects.using('default').count(), 0)
self.assertEqual(Book.authors.through.objects.using('default').count(), 0)
self.assertEqual(Person.objects.using('other').count(), 1)
self.assertEqual(Book.objects.using('other').count(), 1)
self.assertEqual(Book.authors.through.objects.using('other').count(), 1)
# Delete the object on the other database
dive.delete(using='other')
self.assertEqual(Person.objects.using('default').count(), 0)
self.assertEqual(Book.objects.using('default').count(), 0)
self.assertEqual(Book.authors.through.objects.using('default').count(), 0)
# The person still exists ...
self.assertEqual(Person.objects.using('other').count(), 1)
# ... but the book has been deleted
self.assertEqual(Book.objects.using('other').count(), 0)
# ... and the relationship object has also been deleted.
self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
# Now try deletion in the reverse direction. Set up the relation again
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
dive.authors = [mark]
# Check the initial state
self.assertEqual(Person.objects.using('default').count(), 0)
self.assertEqual(Book.objects.using('default').count(), 0)
self.assertEqual(Book.authors.through.objects.using('default').count(), 0)
self.assertEqual(Person.objects.using('other').count(), 1)
self.assertEqual(Book.objects.using('other').count(), 1)
self.assertEqual(Book.authors.through.objects.using('other').count(), 1)
# Delete the object on the other database
mark.delete(using='other')
self.assertEqual(Person.objects.using('default').count(), 0)
self.assertEqual(Book.objects.using('default').count(), 0)
self.assertEqual(Book.authors.through.objects.using('default').count(), 0)
# The person has been deleted ...
self.assertEqual(Person.objects.using('other').count(), 0)
# ... but the book still exists
self.assertEqual(Book.objects.using('other').count(), 1)
# ... and the relationship object has been deleted.
self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
def test_foreign_key_separation(self):
"FK fields are constrained to a single database"
# Create a book and author on the default database
pro = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
george = Person.objects.create(name="George Vilches")
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
chris = Person.objects.using('other').create(name="Chris Mills")
# Save the author's favorite books
pro.editor = george
pro.save()
dive.editor = chris
dive.save()
pro = Book.objects.using('default').get(title="Pro Django")
self.assertEqual(pro.editor.name, "George Vilches")
dive = Book.objects.using('other').get(title="Dive into Python")
self.assertEqual(dive.editor.name, "Chris Mills")
# Check that queries work across foreign key joins
self.assertEqual(list(Person.objects.using('default').filter(edited__title='Pro Django').values_list('name', flat=True)),
['George Vilches'])
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Pro Django').values_list('name', flat=True)),
[])
self.assertEqual(list(Person.objects.using('default').filter(edited__title='Dive into Python').values_list('name', flat=True)),
[])
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True)),
['Chris Mills'])
# Reget the objects to clear caches
chris = Person.objects.using('other').get(name="Chris Mills")
dive = Book.objects.using('other').get(title="Dive into Python")
# Retrieve related object by descriptor. Related objects should be database-bound
self.assertEqual(list(chris.edited.values_list('title', flat=True)),
['Dive into Python'])
def test_foreign_key_reverse_operations(self):
"FK reverse manipulations are all constrained to a single DB"
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
chris = Person.objects.using('other').create(name="Chris Mills")
# Save the author relations
dive.editor = chris
dive.save()
# Add a second book edited by chris
html5 = Book.objects.using('other').create(title="Dive into HTML5", published=datetime.date(2010, 3, 15))
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)),
[])
chris.edited.add(html5)
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)),
['Chris Mills'])
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True)),
['Chris Mills'])
# Remove the second editor
chris.edited.remove(html5)
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)),
[])
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True)),
['Chris Mills'])
# Clear all edited books
chris.edited.clear()
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)),
[])
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True)),
[])
# Create an author through the m2m interface
chris.edited.create(title='Dive into Water', published=datetime.date(2010, 3, 15))
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)),
[])
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into Water').values_list('name', flat=True)),
['Chris Mills'])
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True)),
[])
def test_foreign_key_cross_database_protection(self):
"Operations that involve sharing FK objects across databases raise an error"
# Create a book and author on the default database
pro = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
marty = Person.objects.create(name="Marty Alchin")
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
# Set a foreign key with an object from a different database
with self.assertRaises(ValueError):
dive.editor = marty
# Set a foreign key set with an object from a different database
with self.assertRaises(ValueError):
with transaction.atomic(using='default'):
marty.edited = [pro, dive]
# Add to a foreign key set with an object from a different database
with self.assertRaises(ValueError):
with transaction.atomic(using='default'):
marty.edited.add(dive)
def test_foreign_key_deletion(self):
"Cascaded deletions of Foreign Key relations issue queries on the right database"
mark = Person.objects.using('other').create(name="Mark Pilgrim")
Pet.objects.using('other').create(name="Fido", owner=mark)
# Check the initial state
self.assertEqual(Person.objects.using('default').count(), 0)
self.assertEqual(Pet.objects.using('default').count(), 0)
self.assertEqual(Person.objects.using('other').count(), 1)
self.assertEqual(Pet.objects.using('other').count(), 1)
# Delete the person object, which will cascade onto the pet
mark.delete(using='other')
self.assertEqual(Person.objects.using('default').count(), 0)
self.assertEqual(Pet.objects.using('default').count(), 0)
# Both the pet and the person have been deleted from the right database
self.assertEqual(Person.objects.using('other').count(), 0)
self.assertEqual(Pet.objects.using('other').count(), 0)
def test_foreign_key_validation(self):
"ForeignKey.validate() uses the correct database"
mickey = Person.objects.using('other').create(name="Mickey")
pluto = Pet.objects.using('other').create(name="Pluto", owner=mickey)
self.assertIsNone(pluto.full_clean())
def test_o2o_separation(self):
"OneToOne fields are constrained to a single database"
# Create a user and profile on the default database
alice = User.objects.db_manager('default').create_user('alice', 'alice@example.com')
alice_profile = UserProfile.objects.using('default').create(user=alice, flavor='chocolate')
# Create a user and profile on the other database
bob = User.objects.db_manager('other').create_user('bob', 'bob@example.com')
bob_profile = UserProfile.objects.using('other').create(user=bob, flavor='crunchy frog')
# Retrieve related objects; queries should be database constrained
alice = User.objects.using('default').get(username="alice")
self.assertEqual(alice.userprofile.flavor, "chocolate")
bob = User.objects.using('other').get(username="bob")
self.assertEqual(bob.userprofile.flavor, "crunchy frog")
# Check that queries work across joins
self.assertEqual(list(User.objects.using('default').filter(userprofile__flavor='chocolate').values_list('username', flat=True)),
['alice'])
self.assertEqual(list(User.objects.using('other').filter(userprofile__flavor='chocolate').values_list('username', flat=True)),
[])
self.assertEqual(list(User.objects.using('default').filter(userprofile__flavor='crunchy frog').values_list('username', flat=True)),
[])
self.assertEqual(list(User.objects.using('other').filter(userprofile__flavor='crunchy frog').values_list('username', flat=True)),
['bob'])
# Reget the objects to clear caches
alice_profile = UserProfile.objects.using('default').get(flavor='chocolate')
bob_profile = UserProfile.objects.using('other').get(flavor='crunchy frog')
# Retrieve related object by descriptor. Related objects should be database-bound
self.assertEqual(alice_profile.user.username, 'alice')
self.assertEqual(bob_profile.user.username, 'bob')
def test_o2o_cross_database_protection(self):
"Operations that involve sharing FK objects across databases raise an error"
# Create a user and profile on the default database
alice = User.objects.db_manager('default').create_user('alice', 'alice@example.com')
# Create a user and profile on the other database
bob = User.objects.db_manager('other').create_user('bob', 'bob@example.com')
# Set a one-to-one relation with an object from a different database
alice_profile = UserProfile.objects.using('default').create(user=alice, flavor='chocolate')
with self.assertRaises(ValueError):
bob.userprofile = alice_profile
# BUT! if you assign a FK object when the base object hasn't
# been saved yet, you implicitly assign the database for the
# base object.
bob_profile = UserProfile.objects.using('other').create(user=bob, flavor='crunchy frog')
new_bob_profile = UserProfile(flavor="spring surprise")
# assigning a profile requires an explicit pk as the object isn't saved
charlie = User(pk=51, username='charlie', email='charlie@example.com')
charlie.set_unusable_password()
# initially, no db assigned
self.assertEqual(new_bob_profile._state.db, None)
self.assertEqual(charlie._state.db, None)
# old object comes from 'other', so the new object is set to use 'other'...
new_bob_profile.user = bob
charlie.userprofile = bob_profile
self.assertEqual(new_bob_profile._state.db, 'other')
self.assertEqual(charlie._state.db, 'other')
# ... but it isn't saved yet
self.assertEqual(list(User.objects.using('other').values_list('username', flat=True)),
['bob'])
self.assertEqual(list(UserProfile.objects.using('other').values_list('flavor', flat=True)),
['crunchy frog'])
# When saved (no using required), new objects goes to 'other'
charlie.save()
bob_profile.save()
new_bob_profile.save()
self.assertEqual(list(User.objects.using('default').values_list('username', flat=True)),
['alice'])
self.assertEqual(list(User.objects.using('other').values_list('username', flat=True)),
['bob', 'charlie'])
self.assertEqual(list(UserProfile.objects.using('default').values_list('flavor', flat=True)),
['chocolate'])
self.assertEqual(list(UserProfile.objects.using('other').values_list('flavor', flat=True)),
['crunchy frog', 'spring surprise'])
# This also works if you assign the O2O relation in the constructor
denise = User.objects.db_manager('other').create_user('denise', 'denise@example.com')
denise_profile = UserProfile(flavor="tofu", user=denise)
self.assertEqual(denise_profile._state.db, 'other')
# ... but it isn't saved yet
self.assertEqual(list(UserProfile.objects.using('default').values_list('flavor', flat=True)),
['chocolate'])
self.assertEqual(list(UserProfile.objects.using('other').values_list('flavor', flat=True)),
['crunchy frog', 'spring surprise'])
# When saved, the new profile goes to 'other'
denise_profile.save()
self.assertEqual(list(UserProfile.objects.using('default').values_list('flavor', flat=True)),
['chocolate'])
self.assertEqual(list(UserProfile.objects.using('other').values_list('flavor', flat=True)),
['crunchy frog', 'spring surprise', 'tofu'])
def test_generic_key_separation(self):
"Generic fields are constrained to a single database"
# Create a book and author on the default database
pro = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
review1 = Review.objects.create(source="Python Monthly", content_object=pro)
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
review2 = Review.objects.using('other').create(source="Python Weekly", content_object=dive)
review1 = Review.objects.using('default').get(source="Python Monthly")
self.assertEqual(review1.content_object.title, "Pro Django")
review2 = Review.objects.using('other').get(source="Python Weekly")
self.assertEqual(review2.content_object.title, "Dive into Python")
# Reget the objects to clear caches
dive = Book.objects.using('other').get(title="Dive into Python")
# Retrieve related object by descriptor. Related objects should be database-bound
self.assertEqual(list(dive.reviews.all().values_list('source', flat=True)),
['Python Weekly'])
def test_generic_key_reverse_operations(self):
"Generic reverse manipulations are all constrained to a single DB"
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
temp = Book.objects.using('other').create(title="Temp",
published=datetime.date(2009, 5, 4))
review1 = Review.objects.using('other').create(source="Python Weekly", content_object=dive)
review2 = Review.objects.using('other').create(source="Python Monthly", content_object=temp)
self.assertEqual(list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)),
[])
self.assertEqual(list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
['Python Weekly'])
# Add a second review
dive.reviews.add(review2)
self.assertEqual(list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)),
[])
self.assertEqual(list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
['Python Monthly', 'Python Weekly'])
# Remove the second author
dive.reviews.remove(review1)
self.assertEqual(list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)),
[])
self.assertEqual(list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
['Python Monthly'])
# Clear all reviews
dive.reviews.clear()
self.assertEqual(list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)),
[])
self.assertEqual(list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
[])
# Create an author through the generic interface
dive.reviews.create(source='Python Daily')
self.assertEqual(list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)),
[])
self.assertEqual(list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
['Python Daily'])
def test_generic_key_cross_database_protection(self):
"Operations that involve sharing generic key objects across databases raise an error"
# Create a book and author on the default database
pro = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
review1 = Review.objects.create(source="Python Monthly", content_object=pro)
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
Review.objects.using('other').create(source="Python Weekly", content_object=dive)
# Set a foreign key with an object from a different database
with self.assertRaises(ValueError):
review1.content_object = dive
# Add to a foreign key set with an object from a different database
with self.assertRaises(ValueError):
with transaction.atomic(using='other'):
dive.reviews.add(review1)
# BUT! if you assign a FK object when the base object hasn't
# been saved yet, you implicitly assign the database for the
# base object.
review3 = Review(source="Python Daily")
# initially, no db assigned
self.assertEqual(review3._state.db, None)
# Dive comes from 'other', so review3 is set to use 'other'...
review3.content_object = dive
self.assertEqual(review3._state.db, 'other')
# ... but it isn't saved yet
self.assertEqual(list(Review.objects.using('default').filter(object_id=pro.pk).values_list('source', flat=True)),
['Python Monthly'])
self.assertEqual(list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
['Python Weekly'])
# When saved, John goes to 'other'
review3.save()
self.assertEqual(list(Review.objects.using('default').filter(object_id=pro.pk).values_list('source', flat=True)),
['Python Monthly'])
self.assertEqual(list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
['Python Daily', 'Python Weekly'])
def test_generic_key_deletion(self):
"Cascaded deletions of Generic Key relations issue queries on the right database"
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
Review.objects.using('other').create(source="Python Weekly", content_object=dive)
# Check the initial state
self.assertEqual(Book.objects.using('default').count(), 0)
self.assertEqual(Review.objects.using('default').count(), 0)
self.assertEqual(Book.objects.using('other').count(), 1)
self.assertEqual(Review.objects.using('other').count(), 1)
# Delete the Book object, which will cascade onto the pet
dive.delete(using='other')
self.assertEqual(Book.objects.using('default').count(), 0)
self.assertEqual(Review.objects.using('default').count(), 0)
# Both the pet and the person have been deleted from the right database
self.assertEqual(Book.objects.using('other').count(), 0)
self.assertEqual(Review.objects.using('other').count(), 0)
def test_ordering(self):
"get_next_by_XXX commands stick to a single database"
Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
learn = Book.objects.using('other').create(title="Learning Python",
published=datetime.date(2008, 7, 16))
self.assertEqual(learn.get_next_by_published().title, "Dive into Python")
self.assertEqual(dive.get_previous_by_published().title, "Learning Python")
def test_raw(self):
"test the raw() method across databases"
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
val = Book.objects.db_manager("other").raw('SELECT id FROM multiple_database_book')
self.assertQuerysetEqual(val, [dive.pk], attrgetter("pk"))
val = Book.objects.raw('SELECT id FROM multiple_database_book').using('other')
self.assertQuerysetEqual(val, [dive.pk], attrgetter("pk"))
def test_select_related(self):
"Database assignment is retained if an object is retrieved with select_related()"
# Create a book and author on the other database
mark = Person.objects.using('other').create(name="Mark Pilgrim")
Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4),
editor=mark)
# Retrieve the Person using select_related()
book = Book.objects.using('other').select_related('editor').get(title="Dive into Python")
# The editor instance should have a db state
self.assertEqual(book.editor._state.db, 'other')
def test_subquery(self):
"""Make sure as_sql works with subqueries and primary/replica."""
sub = Person.objects.using('other').filter(name='fff')
qs = Book.objects.filter(editor__in=sub)
# When you call __str__ on the query object, it doesn't know about using
# so it falls back to the default. If the subquery explicitly uses a
# different database, an error should be raised.
self.assertRaises(ValueError, str, qs.query)
# Evaluating the query shouldn't work, either
with self.assertRaises(ValueError):
for obj in qs:
pass
def test_related_manager(self):
"Related managers return managers, not querysets"
mark = Person.objects.using('other').create(name="Mark Pilgrim")
# extra_arg is removed by the BookManager's implementation of
# create(); but the BookManager's implementation won't get called
# unless edited returns a Manager, not a queryset
mark.book_set.create(title="Dive into Python",
published=datetime.date(2009, 5, 4),
extra_arg=True)
mark.book_set.get_or_create(title="Dive into Python",
published=datetime.date(2009, 5, 4),
extra_arg=True)
mark.edited.create(title="Dive into Water",
published=datetime.date(2009, 5, 4),
extra_arg=True)
mark.edited.get_or_create(title="Dive into Water",
published=datetime.date(2009, 5, 4),
extra_arg=True)
class ConnectionRouterTestCase(SimpleTestCase):
@override_settings(DATABASE_ROUTERS=[
'multiple_database.tests.TestRouter',
'multiple_database.tests.WriteRouter'])
def test_router_init_default(self):
connection_router = ConnectionRouter()
self.assertListEqual([r.__class__.__name__ for r in connection_router.routers],
['TestRouter', 'WriteRouter'])
def test_router_init_arg(self):
connection_router = ConnectionRouter([
'multiple_database.tests.TestRouter',
'multiple_database.tests.WriteRouter'
])
self.assertListEqual([r.__class__.__name__ for r in connection_router.routers],
['TestRouter', 'WriteRouter'])
# Init with instances instead of strings
connection_router = ConnectionRouter([TestRouter(), WriteRouter()])
self.assertListEqual([r.__class__.__name__ for r in connection_router.routers],
['TestRouter', 'WriteRouter'])
# Make the 'other' database appear to be a replica of the 'default'
@override_settings(DATABASE_ROUTERS=[TestRouter()])
class RouterTestCase(TestCase):
multi_db = True
def test_db_selection(self):
"Check that querysets obey the router for db suggestions"
self.assertEqual(Book.objects.db, 'other')
self.assertEqual(Book.objects.all().db, 'other')
self.assertEqual(Book.objects.using('default').db, 'default')
self.assertEqual(Book.objects.db_manager('default').db, 'default')
self.assertEqual(Book.objects.db_manager('default').all().db, 'default')
def test_migrate_selection(self):
"Synchronization behavior is predictable"
self.assertTrue(router.allow_migrate_model('default', User))
self.assertTrue(router.allow_migrate_model('default', Book))
self.assertTrue(router.allow_migrate_model('other', User))
self.assertTrue(router.allow_migrate_model('other', Book))
with override_settings(DATABASE_ROUTERS=[TestRouter(), AuthRouter()]):
# Add the auth router to the chain. TestRouter is a universal
# synchronizer, so it should have no effect.
self.assertTrue(router.allow_migrate_model('default', User))
self.assertTrue(router.allow_migrate_model('default', Book))
self.assertTrue(router.allow_migrate_model('other', User))
self.assertTrue(router.allow_migrate_model('other', Book))
with override_settings(DATABASE_ROUTERS=[AuthRouter(), TestRouter()]):
# Now check what happens if the router order is reversed.
self.assertFalse(router.allow_migrate_model('default', User))
self.assertTrue(router.allow_migrate_model('default', Book))
self.assertTrue(router.allow_migrate_model('other', User))
self.assertTrue(router.allow_migrate_model('other', Book))
def test_migrate_legacy_router(self):
class LegacyRouter(object):
def allow_migrate(self, db, model):
"""
Deprecated allow_migrate signature should trigger
RemovedInDjango110Warning.
"""
assert db == 'default'
assert model is User
return True
with override_settings(DATABASE_ROUTERS=[LegacyRouter()]):
with warnings.catch_warnings(record=True) as recorded:
warnings.filterwarnings('always')
msg = (
"The signature of allow_migrate has changed from "
"allow_migrate(self, db, model) to "
"allow_migrate(self, db, app_label, model_name=None, **hints). "
"Support for the old signature will be removed in Django 1.10."
)
self.assertTrue(router.allow_migrate_model('default', User))
self.assertEqual(force_text(recorded.pop().message), msg)
self.assertEqual(recorded, [])
self.assertTrue(router.allow_migrate('default', 'app_label'))
self.assertEqual(force_text(recorded.pop().message), msg)
def test_partial_router(self):
"A router can choose to implement a subset of methods"
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
# First check the baseline behavior.
self.assertEqual(router.db_for_read(User), 'other')
self.assertEqual(router.db_for_read(Book), 'other')
self.assertEqual(router.db_for_write(User), 'default')
self.assertEqual(router.db_for_write(Book), 'default')
self.assertTrue(router.allow_relation(dive, dive))
self.assertTrue(router.allow_migrate_model('default', User))
self.assertTrue(router.allow_migrate_model('default', Book))
with override_settings(DATABASE_ROUTERS=[WriteRouter(), AuthRouter(), TestRouter()]):
self.assertEqual(router.db_for_read(User), 'default')
self.assertEqual(router.db_for_read(Book), 'other')
self.assertEqual(router.db_for_write(User), 'writer')
self.assertEqual(router.db_for_write(Book), 'writer')
self.assertTrue(router.allow_relation(dive, dive))
self.assertFalse(router.allow_migrate_model('default', User))
self.assertTrue(router.allow_migrate_model('default', Book))
def test_database_routing(self):
marty = Person.objects.using('default').create(name="Marty Alchin")
pro = Book.objects.using('default').create(title="Pro Django",
published=datetime.date(2008, 12, 16),
editor=marty)
pro.authors = [marty]
# Create a book and author on the other database
Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
# An update query will be routed to the default database
Book.objects.filter(title='Pro Django').update(pages=200)
with self.assertRaises(Book.DoesNotExist):
# By default, the get query will be directed to 'other'
Book.objects.get(title='Pro Django')
# But the same query issued explicitly at a database will work.
pro = Book.objects.using('default').get(title='Pro Django')
# Check that the update worked.
self.assertEqual(pro.pages, 200)
# An update query with an explicit using clause will be routed
# to the requested database.
Book.objects.using('other').filter(title='Dive into Python').update(pages=300)
self.assertEqual(Book.objects.get(title='Dive into Python').pages, 300)
# Related object queries stick to the same database
# as the original object, regardless of the router
self.assertEqual(list(pro.authors.values_list('name', flat=True)), ['Marty Alchin'])
self.assertEqual(pro.editor.name, 'Marty Alchin')
# get_or_create is a special case. The get needs to be targeted at
# the write database in order to avoid potential transaction
# consistency problems
book, created = Book.objects.get_or_create(title="Pro Django")
self.assertFalse(created)
book, created = Book.objects.get_or_create(title="Dive Into Python",
defaults={'published': datetime.date(2009, 5, 4)})
self.assertTrue(created)
# Check the head count of objects
self.assertEqual(Book.objects.using('default').count(), 2)
self.assertEqual(Book.objects.using('other').count(), 1)
# If a database isn't specified, the read database is used
self.assertEqual(Book.objects.count(), 1)
# A delete query will also be routed to the default database
Book.objects.filter(pages__gt=150).delete()
# The default database has lost the book.
self.assertEqual(Book.objects.using('default').count(), 1)
self.assertEqual(Book.objects.using('other').count(), 1)
def test_foreign_key_cross_database_protection(self):
"Foreign keys can cross databases if they two databases have a common source"
# Create a book and author on the default database
pro = Book.objects.using('default').create(title="Pro Django",
published=datetime.date(2008, 12, 16))
marty = Person.objects.using('default').create(name="Marty Alchin")
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
mark = Person.objects.using('other').create(name="Mark Pilgrim")
# Set a foreign key with an object from a different database
try:
dive.editor = marty
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Database assignments of original objects haven't changed...
self.assertEqual(marty._state.db, 'default')
self.assertEqual(pro._state.db, 'default')
self.assertEqual(dive._state.db, 'other')
self.assertEqual(mark._state.db, 'other')
# ... but they will when the affected object is saved.
dive.save()
self.assertEqual(dive._state.db, 'default')
# ...and the source database now has a copy of any object saved
try:
Book.objects.using('default').get(title='Dive into Python').delete()
except Book.DoesNotExist:
self.fail('Source database should have a copy of saved object')
# This isn't a real primary/replica database, so restore the original from other
dive = Book.objects.using('other').get(title='Dive into Python')
self.assertEqual(dive._state.db, 'other')
# Set a foreign key set with an object from a different database
try:
marty.edited = [pro, dive]
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Assignment implies a save, so database assignments of original objects have changed...
self.assertEqual(marty._state.db, 'default')
self.assertEqual(pro._state.db, 'default')
self.assertEqual(dive._state.db, 'default')
self.assertEqual(mark._state.db, 'other')
# ...and the source database now has a copy of any object saved
try:
Book.objects.using('default').get(title='Dive into Python').delete()
except Book.DoesNotExist:
self.fail('Source database should have a copy of saved object')
# This isn't a real primary/replica database, so restore the original from other
dive = Book.objects.using('other').get(title='Dive into Python')
self.assertEqual(dive._state.db, 'other')
# Add to a foreign key set with an object from a different database
try:
marty.edited.add(dive)
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Add implies a save, so database assignments of original objects have changed...
self.assertEqual(marty._state.db, 'default')
self.assertEqual(pro._state.db, 'default')
self.assertEqual(dive._state.db, 'default')
self.assertEqual(mark._state.db, 'other')
# ...and the source database now has a copy of any object saved
try:
Book.objects.using('default').get(title='Dive into Python').delete()
except Book.DoesNotExist:
self.fail('Source database should have a copy of saved object')
# This isn't a real primary/replica database, so restore the original from other
dive = Book.objects.using('other').get(title='Dive into Python')
# If you assign a FK object when the base object hasn't
# been saved yet, you implicitly assign the database for the
# base object.
chris = Person(name="Chris Mills")
html5 = Book(title="Dive into HTML5", published=datetime.date(2010, 3, 15))
# initially, no db assigned
self.assertEqual(chris._state.db, None)
self.assertEqual(html5._state.db, None)
# old object comes from 'other', so the new object is set to use the
# source of 'other'...
self.assertEqual(dive._state.db, 'other')
chris.save()
dive.editor = chris
html5.editor = mark
self.assertEqual(dive._state.db, 'other')
self.assertEqual(mark._state.db, 'other')
self.assertEqual(chris._state.db, 'default')
self.assertEqual(html5._state.db, 'default')
# This also works if you assign the FK in the constructor
water = Book(title="Dive into Water", published=datetime.date(2001, 1, 1), editor=mark)
self.assertEqual(water._state.db, 'default')
# For the remainder of this test, create a copy of 'mark' in the
# 'default' database to prevent integrity errors on backends that
# don't defer constraints checks until the end of the transaction
mark.save(using='default')
# This moved 'mark' in the 'default' database, move it back in 'other'
mark.save(using='other')
self.assertEqual(mark._state.db, 'other')
# If you create an object through a FK relation, it will be
# written to the write database, even if the original object
# was on the read database
cheesecake = mark.edited.create(title='Dive into Cheesecake', published=datetime.date(2010, 3, 15))
self.assertEqual(cheesecake._state.db, 'default')
# Same goes for get_or_create, regardless of whether getting or creating
cheesecake, created = mark.edited.get_or_create(title='Dive into Cheesecake', published=datetime.date(2010, 3, 15))
self.assertEqual(cheesecake._state.db, 'default')
puddles, created = mark.edited.get_or_create(title='Dive into Puddles', published=datetime.date(2010, 3, 15))
self.assertEqual(puddles._state.db, 'default')
def test_m2m_cross_database_protection(self):
"M2M relations can cross databases if the database share a source"
# Create books and authors on the inverse to the usual database
pro = Book.objects.using('other').create(pk=1, title="Pro Django",
published=datetime.date(2008, 12, 16))
marty = Person.objects.using('other').create(pk=1, name="Marty Alchin")
dive = Book.objects.using('default').create(pk=2, title="Dive into Python",
published=datetime.date(2009, 5, 4))
mark = Person.objects.using('default').create(pk=2, name="Mark Pilgrim")
# Now save back onto the usual database.
# This simulates primary/replica - the objects exist on both database,
# but the _state.db is as it is for all other tests.
pro.save(using='default')
marty.save(using='default')
dive.save(using='other')
mark.save(using='other')
# Check that we have 2 of both types of object on both databases
self.assertEqual(Book.objects.using('default').count(), 2)
self.assertEqual(Book.objects.using('other').count(), 2)
self.assertEqual(Person.objects.using('default').count(), 2)
self.assertEqual(Person.objects.using('other').count(), 2)
# Set a m2m set with an object from a different database
try:
marty.book_set = [pro, dive]
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Database assignments don't change
self.assertEqual(marty._state.db, 'default')
self.assertEqual(pro._state.db, 'default')
self.assertEqual(dive._state.db, 'other')
self.assertEqual(mark._state.db, 'other')
# All m2m relations should be saved on the default database
self.assertEqual(Book.authors.through.objects.using('default').count(), 2)
self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
# Reset relations
Book.authors.through.objects.using('default').delete()
# Add to an m2m with an object from a different database
try:
marty.book_set.add(dive)
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Database assignments don't change
self.assertEqual(marty._state.db, 'default')
self.assertEqual(pro._state.db, 'default')
self.assertEqual(dive._state.db, 'other')
self.assertEqual(mark._state.db, 'other')
# All m2m relations should be saved on the default database
self.assertEqual(Book.authors.through.objects.using('default').count(), 1)
self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
# Reset relations
Book.authors.through.objects.using('default').delete()
# Set a reverse m2m with an object from a different database
try:
dive.authors = [mark, marty]
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Database assignments don't change
self.assertEqual(marty._state.db, 'default')
self.assertEqual(pro._state.db, 'default')
self.assertEqual(dive._state.db, 'other')
self.assertEqual(mark._state.db, 'other')
# All m2m relations should be saved on the default database
self.assertEqual(Book.authors.through.objects.using('default').count(), 2)
self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
# Reset relations
Book.authors.through.objects.using('default').delete()
self.assertEqual(Book.authors.through.objects.using('default').count(), 0)
self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
# Add to a reverse m2m with an object from a different database
try:
dive.authors.add(marty)
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Database assignments don't change
self.assertEqual(marty._state.db, 'default')
self.assertEqual(pro._state.db, 'default')
self.assertEqual(dive._state.db, 'other')
self.assertEqual(mark._state.db, 'other')
# All m2m relations should be saved on the default database
self.assertEqual(Book.authors.through.objects.using('default').count(), 1)
self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
# If you create an object through a M2M relation, it will be
# written to the write database, even if the original object
# was on the read database
alice = dive.authors.create(name='Alice')
self.assertEqual(alice._state.db, 'default')
# Same goes for get_or_create, regardless of whether getting or creating
alice, created = dive.authors.get_or_create(name='Alice')
self.assertEqual(alice._state.db, 'default')
bob, created = dive.authors.get_or_create(name='Bob')
self.assertEqual(bob._state.db, 'default')
def test_o2o_cross_database_protection(self):
"Operations that involve sharing FK objects across databases raise an error"
# Create a user and profile on the default database
alice = User.objects.db_manager('default').create_user('alice', 'alice@example.com')
# Create a user and profile on the other database
bob = User.objects.db_manager('other').create_user('bob', 'bob@example.com')
# Set a one-to-one relation with an object from a different database
alice_profile = UserProfile.objects.create(user=alice, flavor='chocolate')
try:
bob.userprofile = alice_profile
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Database assignments of original objects haven't changed...
self.assertEqual(alice._state.db, 'default')
self.assertEqual(alice_profile._state.db, 'default')
self.assertEqual(bob._state.db, 'other')
# ... but they will when the affected object is saved.
bob.save()
self.assertEqual(bob._state.db, 'default')
def test_generic_key_cross_database_protection(self):
"Generic Key operations can span databases if they share a source"
# Create a book and author on the default database
pro = Book.objects.using(
'default').create(title="Pro Django", published=datetime.date(2008, 12, 16))
review1 = Review.objects.using(
'default').create(source="Python Monthly", content_object=pro)
# Create a book and author on the other database
dive = Book.objects.using(
'other').create(title="Dive into Python", published=datetime.date(2009, 5, 4))
review2 = Review.objects.using(
'other').create(source="Python Weekly", content_object=dive)
# Set a generic foreign key with an object from a different database
try:
review1.content_object = dive
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Database assignments of original objects haven't changed...
self.assertEqual(pro._state.db, 'default')
self.assertEqual(review1._state.db, 'default')
self.assertEqual(dive._state.db, 'other')
self.assertEqual(review2._state.db, 'other')
# ... but they will when the affected object is saved.
dive.save()
self.assertEqual(review1._state.db, 'default')
self.assertEqual(dive._state.db, 'default')
# ...and the source database now has a copy of any object saved
try:
Book.objects.using('default').get(title='Dive into Python').delete()
except Book.DoesNotExist:
self.fail('Source database should have a copy of saved object')
# This isn't a real primary/replica database, so restore the original from other
dive = Book.objects.using('other').get(title='Dive into Python')
self.assertEqual(dive._state.db, 'other')
# Add to a generic foreign key set with an object from a different database
try:
dive.reviews.add(review1)
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Database assignments of original objects haven't changed...
self.assertEqual(pro._state.db, 'default')
self.assertEqual(review1._state.db, 'default')
self.assertEqual(dive._state.db, 'other')
self.assertEqual(review2._state.db, 'other')
# ... but they will when the affected object is saved.
dive.save()
self.assertEqual(dive._state.db, 'default')
# ...and the source database now has a copy of any object saved
try:
Book.objects.using('default').get(title='Dive into Python').delete()
except Book.DoesNotExist:
self.fail('Source database should have a copy of saved object')
# BUT! if you assign a FK object when the base object hasn't
# been saved yet, you implicitly assign the database for the
# base object.
review3 = Review(source="Python Daily")
# initially, no db assigned
self.assertEqual(review3._state.db, None)
# Dive comes from 'other', so review3 is set to use the source of 'other'...
review3.content_object = dive
self.assertEqual(review3._state.db, 'default')
# If you create an object through a M2M relation, it will be
# written to the write database, even if the original object
# was on the read database
dive = Book.objects.using('other').get(title='Dive into Python')
nyt = dive.reviews.create(source="New York Times", content_object=dive)
self.assertEqual(nyt._state.db, 'default')
def test_m2m_managers(self):
"M2M relations are represented by managers, and can be controlled like managers"
pro = Book.objects.using('other').create(pk=1, title="Pro Django",
published=datetime.date(2008, 12, 16))
marty = Person.objects.using('other').create(pk=1, name="Marty Alchin")
self.assertEqual(pro.authors.db, 'other')
self.assertEqual(pro.authors.db_manager('default').db, 'default')
self.assertEqual(pro.authors.db_manager('default').all().db, 'default')
self.assertEqual(marty.book_set.db, 'other')
self.assertEqual(marty.book_set.db_manager('default').db, 'default')
self.assertEqual(marty.book_set.db_manager('default').all().db, 'default')
def test_foreign_key_managers(self):
"FK reverse relations are represented by managers, and can be controlled like managers"
marty = Person.objects.using('other').create(pk=1, name="Marty Alchin")
Book.objects.using('other').create(pk=1, title="Pro Django",
published=datetime.date(2008, 12, 16),
editor=marty)
self.assertEqual(marty.edited.db, 'other')
self.assertEqual(marty.edited.db_manager('default').db, 'default')
self.assertEqual(marty.edited.db_manager('default').all().db, 'default')
def test_generic_key_managers(self):
"Generic key relations are represented by managers, and can be controlled like managers"
pro = Book.objects.using('other').create(title="Pro Django",
published=datetime.date(2008, 12, 16))
Review.objects.using('other').create(source="Python Monthly",
content_object=pro)
self.assertEqual(pro.reviews.db, 'other')
self.assertEqual(pro.reviews.db_manager('default').db, 'default')
self.assertEqual(pro.reviews.db_manager('default').all().db, 'default')
def test_subquery(self):
"""Make sure as_sql works with subqueries and primary/replica."""
# Create a book and author on the other database
mark = Person.objects.using('other').create(name="Mark Pilgrim")
Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4),
editor=mark)
sub = Person.objects.filter(name='Mark Pilgrim')
qs = Book.objects.filter(editor__in=sub)
# When you call __str__ on the query object, it doesn't know about using
# so it falls back to the default. Don't let routing instructions
# force the subquery to an incompatible database.
str(qs.query)
# If you evaluate the query, it should work, running on 'other'
self.assertEqual(list(qs.values_list('title', flat=True)), ['Dive into Python'])
def test_deferred_models(self):
mark_def = Person.objects.using('default').create(name="Mark Pilgrim")
mark_other = Person.objects.using('other').create(name="Mark Pilgrim")
orig_b = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4),
editor=mark_other)
b = Book.objects.using('other').only('title').get(pk=orig_b.pk)
self.assertEqual(b.published, datetime.date(2009, 5, 4))
b = Book.objects.using('other').only('title').get(pk=orig_b.pk)
b.editor = mark_def
b.save(using='default')
self.assertEqual(Book.objects.using('default').get(pk=b.pk).published,
datetime.date(2009, 5, 4))
@override_settings(DATABASE_ROUTERS=[AuthRouter()])
class AuthTestCase(TestCase):
multi_db = True
def test_auth_manager(self):
"The methods on the auth manager obey database hints"
# Create one user using default allocation policy
User.objects.create_user('alice', 'alice@example.com')
# Create another user, explicitly specifying the database
User.objects.db_manager('default').create_user('bob', 'bob@example.com')
# The second user only exists on the other database
alice = User.objects.using('other').get(username='alice')
self.assertEqual(alice.username, 'alice')
self.assertEqual(alice._state.db, 'other')
self.assertRaises(User.DoesNotExist, User.objects.using('default').get, username='alice')
# The second user only exists on the default database
bob = User.objects.using('default').get(username='bob')
self.assertEqual(bob.username, 'bob')
self.assertEqual(bob._state.db, 'default')
self.assertRaises(User.DoesNotExist, User.objects.using('other').get, username='bob')
# That is... there is one user on each database
self.assertEqual(User.objects.using('default').count(), 1)
self.assertEqual(User.objects.using('other').count(), 1)
def test_dumpdata(self):
"Check that dumpdata honors allow_migrate restrictions on the router"
User.objects.create_user('alice', 'alice@example.com')
User.objects.db_manager('default').create_user('bob', 'bob@example.com')
# Check that dumping the default database doesn't try to include auth
# because allow_migrate prohibits auth on default
new_io = StringIO()
management.call_command('dumpdata', 'auth', format='json', database='default', stdout=new_io)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, '[]')
# Check that dumping the other database does include auth
new_io = StringIO()
management.call_command('dumpdata', 'auth', format='json', database='other', stdout=new_io)
command_output = new_io.getvalue().strip()
self.assertIn('"email": "alice@example.com"', command_output)
class AntiPetRouter(object):
# A router that only expresses an opinion on migrate,
# passing pets to the 'other' database
def allow_migrate(self, db, app_label, model_name=None, **hints):
if db == 'other':
return model_name == 'pet'
else:
return model_name != 'pet'
class FixtureTestCase(TestCase):
multi_db = True
fixtures = ['multidb-common', 'multidb']
@override_settings(DATABASE_ROUTERS=[AntiPetRouter()])
def test_fixture_loading(self):
"Multi-db fixtures are loaded correctly"
# Check that "Pro Django" exists on the default database, but not on other database
try:
Book.objects.get(title="Pro Django")
Book.objects.using('default').get(title="Pro Django")
except Book.DoesNotExist:
self.fail('"Pro Django" should exist on default database')
self.assertRaises(
Book.DoesNotExist,
Book.objects.using('other').get,
title="Pro Django"
)
# Check that "Dive into Python" exists on the default database, but not on other database
try:
Book.objects.using('other').get(title="Dive into Python")
except Book.DoesNotExist:
self.fail('"Dive into Python" should exist on other database')
self.assertRaises(
Book.DoesNotExist,
Book.objects.get,
title="Dive into Python"
)
self.assertRaises(
Book.DoesNotExist,
Book.objects.using('default').get,
title="Dive into Python"
)
# Check that "Definitive Guide" exists on the both databases
try:
Book.objects.get(title="The Definitive Guide to Django")
Book.objects.using('default').get(title="The Definitive Guide to Django")
Book.objects.using('other').get(title="The Definitive Guide to Django")
except Book.DoesNotExist:
self.fail('"The Definitive Guide to Django" should exist on both databases')
@override_settings(DATABASE_ROUTERS=[AntiPetRouter()])
def test_pseudo_empty_fixtures(self):
"A fixture can contain entries, but lead to nothing in the database; this shouldn't raise an error (ref #14068)"
new_io = StringIO()
management.call_command('loaddata', 'pets', stdout=new_io, stderr=new_io)
command_output = new_io.getvalue().strip()
# No objects will actually be loaded
self.assertEqual(command_output, "Installed 0 object(s) (of 2) from 1 fixture(s)")
class PickleQuerySetTestCase(TestCase):
multi_db = True
def test_pickling(self):
for db in connections:
Book.objects.using(db).create(title='Dive into Python', published=datetime.date(2009, 5, 4))
qs = Book.objects.all()
self.assertEqual(qs.db, pickle.loads(pickle.dumps(qs)).db)
class DatabaseReceiver(object):
"""
Used in the tests for the database argument in signals (#13552)
"""
def __call__(self, signal, sender, **kwargs):
self._database = kwargs['using']
class WriteToOtherRouter(object):
"""
A router that sends all writes to the other database.
"""
def db_for_write(self, model, **hints):
return "other"
class SignalTests(TestCase):
multi_db = True
def override_router(self):
return override_settings(DATABASE_ROUTERS=[WriteToOtherRouter()])
def test_database_arg_save_and_delete(self):
"""
Tests that the pre/post_save signal contains the correct database.
(#13552)
"""
# Make some signal receivers
pre_save_receiver = DatabaseReceiver()
post_save_receiver = DatabaseReceiver()
pre_delete_receiver = DatabaseReceiver()
post_delete_receiver = DatabaseReceiver()
# Make model and connect receivers
signals.pre_save.connect(sender=Person, receiver=pre_save_receiver)
signals.post_save.connect(sender=Person, receiver=post_save_receiver)
signals.pre_delete.connect(sender=Person, receiver=pre_delete_receiver)
signals.post_delete.connect(sender=Person, receiver=post_delete_receiver)
p = Person.objects.create(name='Darth Vader')
# Save and test receivers got calls
p.save()
self.assertEqual(pre_save_receiver._database, DEFAULT_DB_ALIAS)
self.assertEqual(post_save_receiver._database, DEFAULT_DB_ALIAS)
# Delete, and test
p.delete()
self.assertEqual(pre_delete_receiver._database, DEFAULT_DB_ALIAS)
self.assertEqual(post_delete_receiver._database, DEFAULT_DB_ALIAS)
# Save again to a different database
p.save(using="other")
self.assertEqual(pre_save_receiver._database, "other")
self.assertEqual(post_save_receiver._database, "other")
# Delete, and test
p.delete(using="other")
self.assertEqual(pre_delete_receiver._database, "other")
self.assertEqual(post_delete_receiver._database, "other")
signals.pre_save.disconnect(sender=Person, receiver=pre_save_receiver)
signals.post_save.disconnect(sender=Person, receiver=post_save_receiver)
signals.pre_delete.disconnect(sender=Person, receiver=pre_delete_receiver)
signals.post_delete.disconnect(sender=Person, receiver=post_delete_receiver)
def test_database_arg_m2m(self):
"""
Test that the m2m_changed signal has a correct database arg (#13552)
"""
# Make a receiver
receiver = DatabaseReceiver()
# Connect it
signals.m2m_changed.connect(receiver=receiver)
# Create the models that will be used for the tests
b = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
p = Person.objects.create(name="Marty Alchin")
# Create a copy of the models on the 'other' database to prevent
# integrity errors on backends that don't defer constraints checks
Book.objects.using('other').create(pk=b.pk, title=b.title,
published=b.published)
Person.objects.using('other').create(pk=p.pk, name=p.name)
# Test addition
b.authors.add(p)
self.assertEqual(receiver._database, DEFAULT_DB_ALIAS)
with self.override_router():
b.authors.add(p)
self.assertEqual(receiver._database, "other")
# Test removal
b.authors.remove(p)
self.assertEqual(receiver._database, DEFAULT_DB_ALIAS)
with self.override_router():
b.authors.remove(p)
self.assertEqual(receiver._database, "other")
# Test addition in reverse
p.book_set.add(b)
self.assertEqual(receiver._database, DEFAULT_DB_ALIAS)
with self.override_router():
p.book_set.add(b)
self.assertEqual(receiver._database, "other")
# Test clearing
b.authors.clear()
self.assertEqual(receiver._database, DEFAULT_DB_ALIAS)
with self.override_router():
b.authors.clear()
self.assertEqual(receiver._database, "other")
class AttributeErrorRouter(object):
"A router to test the exception handling of ConnectionRouter"
def db_for_read(self, model, **hints):
raise AttributeError
def db_for_write(self, model, **hints):
raise AttributeError
class RouterAttributeErrorTestCase(TestCase):
multi_db = True
def override_router(self):
return override_settings(DATABASE_ROUTERS=[AttributeErrorRouter()])
def test_attribute_error_read(self):
"Check that the AttributeError from AttributeErrorRouter bubbles up"
b = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
with self.override_router():
self.assertRaises(AttributeError, Book.objects.get, pk=b.pk)
def test_attribute_error_save(self):
"Check that the AttributeError from AttributeErrorRouter bubbles up"
dive = Book()
dive.title = "Dive into Python"
dive.published = datetime.date(2009, 5, 4)
with self.override_router():
self.assertRaises(AttributeError, dive.save)
def test_attribute_error_delete(self):
"Check that the AttributeError from AttributeErrorRouter bubbles up"
b = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
p = Person.objects.create(name="Marty Alchin")
b.authors = [p]
b.editor = p
with self.override_router():
self.assertRaises(AttributeError, b.delete)
def test_attribute_error_m2m(self):
"Check that the AttributeError from AttributeErrorRouter bubbles up"
b = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
p = Person.objects.create(name="Marty Alchin")
with self.override_router():
self.assertRaises(AttributeError, setattr, b, 'authors', [p])
class ModelMetaRouter(object):
"A router to ensure model arguments are real model classes"
def db_for_write(self, model, **hints):
if not hasattr(model, '_meta'):
raise ValueError
@override_settings(DATABASE_ROUTERS=[ModelMetaRouter()])
class RouterModelArgumentTestCase(TestCase):
multi_db = True
def test_m2m_collection(self):
b = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
p = Person.objects.create(name="Marty Alchin")
# test add
b.authors.add(p)
# test remove
b.authors.remove(p)
# test clear
b.authors.clear()
# test setattr
b.authors = [p]
# test M2M collection
b.delete()
def test_foreignkey_collection(self):
person = Person.objects.create(name='Bob')
Pet.objects.create(owner=person, name='Wart')
# test related FK collection
person.delete()
class SyncOnlyDefaultDatabaseRouter(object):
def allow_migrate(self, db, app_label, **hints):
return db == DEFAULT_DB_ALIAS
class MigrateTestCase(TestCase):
available_apps = [
'multiple_database',
'django.contrib.auth',
'django.contrib.contenttypes'
]
multi_db = True
def test_migrate_to_other_database(self):
"""Regression test for #16039: migrate with --database option."""
cts = ContentType.objects.using('other').filter(app_label='multiple_database')
count = cts.count()
self.assertGreater(count, 0)
cts.delete()
management.call_command('migrate', verbosity=0, interactive=False, database='other')
self.assertEqual(cts.count(), count)
def test_migrate_to_other_database_with_router(self):
"""Regression test for #16039: migrate with --database option."""
cts = ContentType.objects.using('other').filter(app_label='multiple_database')
cts.delete()
with override_settings(DATABASE_ROUTERS=[SyncOnlyDefaultDatabaseRouter()]):
management.call_command('migrate', verbosity=0, interactive=False, database='other')
self.assertEqual(cts.count(), 0)
class RouterUsed(Exception):
WRITE = 'write'
def __init__(self, mode, model, hints):
self.mode = mode
self.model = model
self.hints = hints
class RouteForWriteTestCase(TestCase):
multi_db = True
class WriteCheckRouter(object):
def db_for_write(self, model, **hints):
raise RouterUsed(mode=RouterUsed.WRITE, model=model, hints=hints)
def override_router(self):
return override_settings(DATABASE_ROUTERS=[RouteForWriteTestCase.WriteCheckRouter()])
def test_fk_delete(self):
owner = Person.objects.create(name='Someone')
pet = Pet.objects.create(name='fido', owner=owner)
try:
with self.override_router():
pet.owner.delete()
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Person)
self.assertEqual(e.hints, {'instance': owner})
def test_reverse_fk_delete(self):
owner = Person.objects.create(name='Someone')
to_del_qs = owner.pet_set.all()
try:
with self.override_router():
to_del_qs.delete()
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Pet)
self.assertEqual(e.hints, {'instance': owner})
def test_reverse_fk_get_or_create(self):
owner = Person.objects.create(name='Someone')
try:
with self.override_router():
owner.pet_set.get_or_create(name='fido')
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Pet)
self.assertEqual(e.hints, {'instance': owner})
def test_reverse_fk_update(self):
owner = Person.objects.create(name='Someone')
Pet.objects.create(name='fido', owner=owner)
try:
with self.override_router():
owner.pet_set.update(name='max')
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Pet)
self.assertEqual(e.hints, {'instance': owner})
def test_m2m_add(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
try:
with self.override_router():
book.authors.add(auth)
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book.authors.through)
self.assertEqual(e.hints, {'instance': book})
def test_m2m_clear(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
book.authors.add(auth)
try:
with self.override_router():
book.authors.clear()
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book.authors.through)
self.assertEqual(e.hints, {'instance': book})
def test_m2m_delete(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
book.authors.add(auth)
try:
with self.override_router():
book.authors.all().delete()
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Person)
self.assertEqual(e.hints, {'instance': book})
def test_m2m_get_or_create(self):
Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
try:
with self.override_router():
book.authors.get_or_create(name='Someone else')
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book)
self.assertEqual(e.hints, {'instance': book})
def test_m2m_remove(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
book.authors.add(auth)
try:
with self.override_router():
book.authors.remove(auth)
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book.authors.through)
self.assertEqual(e.hints, {'instance': book})
def test_m2m_update(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
book.authors.add(auth)
try:
with self.override_router():
book.authors.all().update(name='Different')
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Person)
self.assertEqual(e.hints, {'instance': book})
def test_reverse_m2m_add(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
try:
with self.override_router():
auth.book_set.add(book)
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book.authors.through)
self.assertEqual(e.hints, {'instance': auth})
def test_reverse_m2m_clear(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
book.authors.add(auth)
try:
with self.override_router():
auth.book_set.clear()
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book.authors.through)
self.assertEqual(e.hints, {'instance': auth})
def test_reverse_m2m_delete(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
book.authors.add(auth)
try:
with self.override_router():
auth.book_set.all().delete()
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book)
self.assertEqual(e.hints, {'instance': auth})
def test_reverse_m2m_get_or_create(self):
auth = Person.objects.create(name='Someone')
Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
try:
with self.override_router():
auth.book_set.get_or_create(title="New Book", published=datetime.datetime.now())
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Person)
self.assertEqual(e.hints, {'instance': auth})
def test_reverse_m2m_remove(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
book.authors.add(auth)
try:
with self.override_router():
auth.book_set.remove(book)
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book.authors.through)
self.assertEqual(e.hints, {'instance': auth})
def test_reverse_m2m_update(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
book.authors.add(auth)
try:
with self.override_router():
auth.book_set.all().update(title='Different')
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book)
self.assertEqual(e.hints, {'instance': auth})
|
sbauza/sandbox
|
refs/heads/master
|
sandbox/__init__.py
|
1
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pbr.version
__version__ = pbr.version.VersionInfo(
'sandbox').version_string()
|
akiellor/selenium
|
refs/heads/master
|
py/test/selenium/webdriver/firefox/profile_tests.py
|
4
|
# Copyright 2008-2009 WebDriver committers
# Copyright 2008-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
import datetime
import logging
import os
import tempfile
import time
import unittest
from selenium import webdriver
from selenium.test.selenium.webdriver.common.webserver import SimpleWebServer
from selenium.test.selenium.webdriver.common import utils
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
class ProfileTests(unittest.TestCase):
DUMMY_FILE_NAME = "dummy.js"
DUMMY_FILE_CONTENT = "# test"
def testAnonymousProfileExample(self):
driver = webdriver.connect('firefox')
driver.get("http://localhost:8000/simpleTest.html")
self.assertEquals("Hello WebDriver", driver.get_title())
driver.quit()
def testNamedProfile(self):
profile = FirefoxProfile("example")
driver = webdriver.connect('firefox',profile)
driver.get("http://localhost:8000/simpleTest.html")
self.assertEquals("Hello WebDriver", driver.get_title())
driver.quit()
def testAnonymousProfileIsFresh(self):
driver = webdriver.connect('firefox')
driver.get("http://localhost:8000/simpleTest.html")
timestamp = time.mktime(datetime.datetime.now().timetuple()) + 100
cookie = {"name": "foo",
"value": "bar",
"expires": str(int(timestamp)) + "000",
"domain": "localhost",
"path": "/"}
driver.add_cookie(utils.convert_cookie_to_json(cookie))
self.assertEquals(cookie, driver.get_cookies()[0])
driver.quit()
driver = webdriver.connect('firefox')
self.assertEquals([], driver.get_cookies())
driver.quit()
def testCopyFromSource(self):
dir_name = tempfile.mkdtemp()
self._create_dummy_file(dir_name)
profile = FirefoxProfile()
profile.copy_profile_source(dir_name)
profile_dir = profile.path
dst_pref_file = open(os.path.join(profile_dir, self.DUMMY_FILE_NAME))
content = dst_pref_file.read()
self.assertEquals(self.DUMMY_FILE_CONTENT, content)
def _create_dummy_file(self, dir_name):
pref_file = open(os.path.join(dir_name, self.DUMMY_FILE_NAME), "w")
pref_file.write(self.DUMMY_FILE_CONTENT)
pref_file.close()
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
webserver = SimpleWebServer(8000)
webserver.start()
try:
unittest.main()
finally:
webserver.stop()
|
fangxingli/hue
|
refs/heads/master
|
desktop/core/ext-py/django-nose-1.3/testapp/runtests.py
|
45
|
#!/usr/bin/env python
import sys
from django.conf import settings
if not settings.configured:
settings.configure(
DATABASES={'default': {'ENGINE': 'django.db.backends.sqlite3'}},
INSTALLED_APPS=[
'django_nose',
],
)
from django_nose import NoseTestSuiteRunner
def runtests(*test_labels):
runner = NoseTestSuiteRunner(verbosity=1, interactive=True)
failures = runner.run_tests(test_labels)
sys.exit(failures)
if __name__ == '__main__':
runtests(*sys.argv[1:])
|
wesolutki/voter
|
refs/heads/master
|
common/models.py
|
1
|
# -*- coding: utf-8 -*-
#
from django.db import models
from django.core.exceptions import ValidationError
class ShortFloatField(models.FloatField):
pass
class ShortIntegerField(models.IntegerField):
pass
class AdditionalNotesField(models.TextField):
pass
class DescriptionField(models.TextField):
pass
def validate_pesel_length(value):
if len(value) != 11:
raise ValidationError('PESEL musi zawierać 11 cyfr')
def validate_pesel_chars(value):
if not value.isdigit():
raise ValidationError('PESEL musi składać się z samych cyfr')
def validate_pesel_checksum(value):
if not value.isdigit():
return
multiple_table = (1, 3, 7, 9, 1, 3, 7, 9, 1, 3, 1)
result = 0
for i in range(len(value)):
result += int(value[i]) * multiple_table[i]
if result % 10 != 0:
raise ValidationError('Suma kontrolna się nie zgadza, zły PESEL')
class PESELField(models.CharField):
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 11
kwargs['null'] = True
kwargs['blank'] = True
kwargs['validators'] = [validate_pesel_length, validate_pesel_chars, validate_pesel_checksum]
super(PESELField, self).__init__(*args, **kwargs)
|
bvilhjal/ldpred
|
refs/heads/master
|
setup.py
|
1
|
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
import ldpred
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='LDpred',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=ldpred.__version__,
description='A Python package that adjusts GWAS summary statistics for the effects of linkage disequilibrium (LD)',
long_description=long_description,
# The project's main homepage.
url='https://github.com/ldpred',
# Author details
author='Bjarni J Vilhjalmsson',
author_email='bjarni.vilhjalmsson@gmail.com',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
#'Programming Language :: Python :: 2',
#'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
#'Programming Language :: Python :: 3',
#'Programming Language :: Python :: 3.3',
#'Programming Language :: Python :: 3.4',
#'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
# What does your project relate to?
keywords='Polygenic Risk Scores, GWAS, Linkage Disequilibrium, Risk Prediction',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['h5py','scipy','plinkio'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
#'dev': ['check-manifest'],
#'test': ['coverage'],
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={'tests':
['./test_data/sim*_0_test.fam',
'./test_data/sim*_0_test.bim',
'./test_data/sim*_0_test.bed',
'./test_data/sim*_0_ss.txt',
'./test_data/sim*_parameters.json',
'./test_data/goldens/golden_inf*',
'./test_data/goldens/golden_LD*',
'./test_data/goldens/golden_ld*',
'./test_data/goldens/golden_P*',
'./test_data/goldens/golden.coord*',
'./test_data/goldens/goldenprs*',
'./test_data/goldens/golden_mix*',
'./test_data/goldens/ld_data*',
],
'ldpred':['./reference/hm3_sids.txt.gz',
'./reference/long-range-ld-price-2008hg38.txt']
},
include_package_data=True,
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
scripts=['ldpred/run.py'],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'ldpred=ldpred.run:main',
'ldpred-unittest=tests.test:run_unit_tests',
'ldpred-inttest=tests.test:run_integration_tests',
],
},
)
|
js0701/chromium-crosswalk
|
refs/heads/master
|
build/android/gyp/jar_toc.py
|
19
|
#!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Creates a TOC file from a Java jar.
The TOC file contains the non-package API of the jar. This includes all
public/protected/package classes/functions/members and the values of static
final variables (members with package access are kept because in some cases we
have multiple libraries with the same package, particularly test+non-test). Some
other information (major/minor javac version) is also included.
This TOC file then can be used to determine if a dependent library should be
rebuilt when this jar changes. I.e. any change to the jar that would require a
rebuild, will have a corresponding change in the TOC file.
"""
import optparse
import os
import re
import sys
import zipfile
from util import build_utils
from util import md5_check
def GetClassesInZipFile(zip_file):
classes = []
files = zip_file.namelist()
for f in files:
if f.endswith('.class'):
# f is of the form org/chromium/base/Class$Inner.class
classes.append(f.replace('/', '.')[:-6])
return classes
def CallJavap(classpath, classes):
javap_cmd = [
'javap',
'-package', # Show public/protected/package.
# -verbose is required to get constant values (which can be inlined in
# dependents).
'-verbose',
'-J-XX:NewSize=4m',
'-classpath', classpath
] + classes
return build_utils.CheckOutput(javap_cmd)
def ExtractToc(disassembled_classes):
# javap output is structured by indent (2-space) levels.
good_patterns = [
'^[^ ]', # This includes all class signatures.
'^ SourceFile:',
'^ minor version:',
'^ major version:',
'^ Constant value:',
'^ public ',
'^ protected ',
]
bad_patterns = [
'^const #', # Matches the constant pool (i.e. literals used in the class).
]
def JavapFilter(line):
return (re.match('|'.join(good_patterns), line) and
not re.match('|'.join(bad_patterns), line))
toc = filter(JavapFilter, disassembled_classes.split('\n'))
return '\n'.join(toc)
def UpdateToc(jar_path, toc_path):
classes = GetClassesInZipFile(zipfile.ZipFile(jar_path))
toc = ''
if len(classes) != 0:
javap_output = CallJavap(classpath=jar_path, classes=classes)
toc = ExtractToc(javap_output)
with open(toc_path, 'w') as tocfile:
tocfile.write(toc)
def DoJarToc(options):
jar_path = options.jar_path
toc_path = options.toc_path
record_path = '%s.md5.stamp' % toc_path
md5_check.CallAndRecordIfStale(
lambda: UpdateToc(jar_path, toc_path),
record_path=record_path,
input_paths=[jar_path],
force=not os.path.exists(toc_path),
)
build_utils.Touch(toc_path, fail_if_missing=True)
def main():
parser = optparse.OptionParser()
build_utils.AddDepfileOption(parser)
parser.add_option('--jar-path', help='Input .jar path.')
parser.add_option('--toc-path', help='Output .jar.TOC path.')
parser.add_option('--stamp', help='Path to touch on success.')
options, _ = parser.parse_args()
if options.depfile:
build_utils.WriteDepfile(
options.depfile,
build_utils.GetPythonDependencies())
DoJarToc(options)
if options.depfile:
build_utils.WriteDepfile(
options.depfile,
build_utils.GetPythonDependencies())
if options.stamp:
build_utils.Touch(options.stamp)
if __name__ == '__main__':
sys.exit(main())
|
havt/odoo
|
refs/heads/8.0
|
addons/mass_mailing/models/mail_thread.py
|
220
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import logging
import re
from openerp.addons.mail.mail_message import decode
from openerp.addons.mail.mail_thread import decode_header
from openerp.osv import osv
_logger = logging.getLogger(__name__)
class MailThread(osv.AbstractModel):
""" Update MailThread to add the feature of bounced emails and replied emails
in message_process. """
_name = 'mail.thread'
_inherit = ['mail.thread']
def message_route_check_bounce(self, cr, uid, message, context=None):
""" Override to verify that the email_to is the bounce alias. If it is the
case, log the bounce, set the parent and related document as bounced and
return False to end the routing process. """
bounce_alias = self.pool['ir.config_parameter'].get_param(cr, uid, "mail.bounce.alias", context=context)
message_id = message.get('Message-Id')
email_from = decode_header(message, 'From')
email_to = decode_header(message, 'To')
# 0. Verify whether this is a bounced email (wrong destination,...) -> use it to collect data, such as dead leads
if bounce_alias in email_to:
# Bounce regex
# Typical form of bounce is bounce_alias-128-crm.lead-34@domain
# group(1) = the mail ID; group(2) = the model (if any); group(3) = the record ID
bounce_re = re.compile("%s-(\d+)-?([\w.]+)?-?(\d+)?" % re.escape(bounce_alias), re.UNICODE)
bounce_match = bounce_re.search(email_to)
if bounce_match:
bounced_model, bounced_thread_id = None, False
bounced_mail_id = bounce_match.group(1)
stat_ids = self.pool['mail.mail.statistics'].set_bounced(cr, uid, mail_mail_ids=[bounced_mail_id], context=context)
for stat in self.pool['mail.mail.statistics'].browse(cr, uid, stat_ids, context=context):
bounced_model = stat.model
bounced_thread_id = stat.res_id
_logger.info('Routing mail from %s to %s with Message-Id %s: bounced mail from mail %s, model: %s, thread_id: %s',
email_from, email_to, message_id, bounced_mail_id, bounced_model, bounced_thread_id)
if bounced_model and bounced_model in self.pool and hasattr(self.pool[bounced_model], 'message_receive_bounce') and bounced_thread_id:
self.pool[bounced_model].message_receive_bounce(cr, uid, [bounced_thread_id], mail_id=bounced_mail_id, context=context)
return False
return True
def message_route(self, cr, uid, message, message_dict, model=None, thread_id=None,
custom_values=None, context=None):
if not self.message_route_check_bounce(cr, uid, message, context=context):
return []
return super(MailThread, self).message_route(cr, uid, message, message_dict, model, thread_id, custom_values, context)
def message_receive_bounce(self, cr, uid, ids, mail_id=None, context=None):
"""Called by ``message_process`` when a bounce email (such as Undelivered
Mail Returned to Sender) is received for an existing thread. The default
behavior is to check is an integer ``message_bounce`` column exists.
If it is the case, its content is incremented. """
if 'message_bounce' in self._fields:
for obj in self.browse(cr, uid, ids, context=context):
self.write(cr, uid, [obj.id], {'message_bounce': obj.message_bounce + 1}, context=context)
def message_route_process(self, cr, uid, message, message_dict, routes, context=None):
""" Override to update the parent mail statistics. The parent is found
by using the References header of the incoming message and looking for
matching message_id in mail.mail.statistics. """
if message.get('References'):
message_ids = [x.strip() for x in decode(message['References']).split()]
self.pool['mail.mail.statistics'].set_replied(cr, uid, mail_message_ids=message_ids, context=context)
return super(MailThread, self).message_route_process(cr, uid, message, message_dict, routes, context=context)
|
be-cloud-be/horizon-addons
|
refs/heads/9.0
|
server/addons/l10n_cn_small_business/__init__.py
|
256
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# Copyright (C) 2007-2014 Jeff Wang(<http://jeff@osbzr.com>).
|
piffey/ansible
|
refs/heads/devel
|
lib/ansible/modules/utilities/logic/wait_for_connection.py
|
74
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Dag Wieers <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: wait_for_connection
short_description: Waits until remote system is reachable/usable
description:
- Waits for a total of C(timeout) seconds.
- Retries the transport connection after a timeout of C(connect_timeout).
- Tests the transport connection every C(sleep) seconds.
- This module makes use of internal ansible transport (and configuration) and the ping/win_ping module to guarantee correct end-to-end functioning.
- This module is also supported for Windows targets.
version_added: "2.3"
options:
connect_timeout:
description:
- Maximum number of seconds to wait for a connection to happen before closing and retrying.
default: 5
delay:
description:
- Number of seconds to wait before starting to poll.
default: 0
sleep:
default: 1
description:
- Number of seconds to sleep between checks.
timeout:
description:
- Maximum number of seconds to wait for.
default: 600
notes:
- This module is also supported for Windows targets.
author: "Dag Wieers (@dagwieers)"
'''
EXAMPLES = r'''
- name: Wait 600 seconds for target connection to become reachable/usable
wait_for_connection:
- name: Wait 300 seconds, but only start checking after 60 seconds
wait_for_connection:
delay: 60
timeout: 300
# Wake desktops, wait for them to become ready and continue playbook
- hosts: all
gather_facts: no
tasks:
- name: Send magic Wake-On-Lan packet to turn on individual systems
wakeonlan:
mac: '{{ mac }}'
broadcast: 192.168.0.255
delegate_to: localhost
- name: Wait for system to become reachable
wait_for_connection:
- name: Gather facts for first time
setup:
# Build a new VM, wait for it to become ready and continue playbook
- hosts: all
gather_facts: no
tasks:
- name: Clone new VM, if missing
vmware_guest:
hostname: '{{ vcenter_ipaddress }}'
name: '{{ inventory_hostname_short }}'
template: Windows 2012R2
customization:
hostname: '{{ vm_shortname }}'
runonce:
- powershell.exe -ExecutionPolicy Unrestricted -File C:\Windows\Temp\ConfigureRemotingForAnsible.ps1 -ForceNewSSLCert -EnableCredSSP
delegate_to: localhost
- name: Wait for system to become reachable over WinRM
wait_for_connection:
timeout: 900
- name: Gather facts for first time
setup:
'''
RETURN = r'''
elapsed:
description: The number of seconds that elapsed waiting for the connection to appear.
returned: always
type: int
sample: 23
'''
|
rtroxell/brkt-cli
|
refs/heads/master
|
brkt_cli/service.py
|
1
|
# Copyright 2015 Bracket Computing, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# https://github.com/brkt/brkt-sdk-java/blob/master/LICENSE
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and
# limitations under the License.
import abc
import boto
import logging
import json
import urllib2
from boto.exception import EC2ResponseError
ENCRYPT_SUCCESSFUL = 'finished'
ENCRYPT_FAILED = 'failed'
ENCRYPTOR_STATUS_PORT = 8000
PLATFORM_WINDOWS = 'windows'
log = logging.getLogger(__name__)
class BaseAWSService(object):
__metaclass__ = abc.ABCMeta
def __init__(self, session_id):
self.session_id = session_id
@abc.abstractmethod
def run_instance(self,
image_id,
security_group_ids=None,
instance_type='m3.medium',
block_device_map=None):
pass
@abc.abstractmethod
def get_instance(self, instance_id):
pass
@abc.abstractmethod
def create_tags(self, resource_id, name=None, description=None):
pass
@abc.abstractmethod
def stop_instance(self, instance_id):
pass
@abc.abstractmethod
def terminate_instance(self, instance_id):
pass
@abc.abstractmethod
def get_volume(self, volume_id):
pass
@abc.abstractmethod
def get_volumes(self, tag_key=None, tag_value=None):
pass
@abc.abstractmethod
def get_snapshots(self, *snapshot_ids):
pass
@abc.abstractmethod
def get_snapshot(self, snapshot_id):
pass
@abc.abstractmethod
def create_snapshot(self, volume_id, name=None, description=None):
pass
@abc.abstractmethod
def delete_volume(self, volume_id):
pass
@abc.abstractmethod
def validate_guest_ami(self, ami_id):
pass
@abc.abstractmethod
def validate_encryptor_ami(self, ami_id):
pass
@abc.abstractmethod
def register_image(self,
kernel_id,
block_device_map,
name=None,
description=None):
pass
@abc.abstractmethod
def get_image(self, image_id):
pass
@abc.abstractmethod
def delete_snapshot(self, snapshot_id):
pass
@abc.abstractmethod
def create_security_group(self, name, description):
pass
@abc.abstractmethod
def get_security_group(self, sg_id):
pass
@abc.abstractmethod
def add_security_group_rule(self, sg_id, **kwargs):
pass
@abc.abstractmethod
def delete_security_group(self, sg_id):
pass
@abc.abstractmethod
def get_key_pair(self, keyname):
pass
@abc.abstractmethod
def get_console_output(self, instance_id):
pass
class AWSService(BaseAWSService):
def __init__(
self,
encryptor_session_id,
encryptor_ami,
default_tags=None):
super(AWSService, self).__init__(encryptor_session_id)
self.encryptor_ami = encryptor_ami
self.default_tags = default_tags or {}
# These will be initialized by connect().
self.key_name = None
self.region = None
self.conn = None
def connect(self, key_name, region):
self.key_name = key_name
self.region = region
self.conn = boto.vpc.connect_to_region(region)
def run_instance(self,
image_id,
security_group_ids=None,
instance_type='m3.medium',
block_device_map=None):
if security_group_ids is None:
security_group_ids = []
log.debug('Starting a new instance based on %s', image_id)
try:
reservation = self.conn.run_instances(
image_id=image_id,
key_name=self.key_name,
instance_type=instance_type,
block_device_map=block_device_map,
security_group_ids=security_group_ids
)
return reservation.instances[0]
except EC2ResponseError:
# Log the failed operation, so that the user has context.
log.error('Unable to launch instance for %s', image_id)
raise
def get_instance(self, instance_id):
return self.conn.get_only_instances([instance_id])[0]
def create_tags(self, resource_id, name=None, description=None):
tags = dict(self.default_tags)
if name:
tags['Name'] = name
if description:
tags['Description'] = description
log.debug('Tagging %s with %s', resource_id, tags)
self.conn.create_tags([resource_id], tags)
def stop_instance(self, instance_id):
log.debug('Stopping instance %s', instance_id)
instances = self.conn.stop_instances([instance_id])
return instances[0]
def terminate_instance(self, instance_id):
log.debug('Terminating instance %s', instance_id)
self.conn.terminate_instances([instance_id])
def get_volume(self, volume_id):
return self.conn.get_all_volumes(volume_ids=[volume_id])[0]
def get_volumes(self, tag_key=None, tag_value=None):
filters = {}
if tag_key and tag_value:
filters['tag:%s' % tag_key] = tag_value
return self.conn.get_all_volumes(filters=filters)
def get_snapshots(self, *snapshot_ids):
return self.conn.get_all_snapshots(snapshot_ids)
def get_snapshot(self, snapshot_id):
return self.conn.get_all_snapshots([snapshot_id])[0]
def create_snapshot(self, volume_id, name=None, description=None):
log.debug('Creating snapshot of %s', volume_id)
snapshot = self.conn.create_snapshot(volume_id, description)
self.create_tags(snapshot.id, name=name)
return snapshot
def delete_volume(self, volume_id):
log.debug('Deleting volume %s', volume_id)
return self.conn.delete_volume(volume_id)
def validate_guest_ami(self, ami_id):
try:
images = self.conn.get_all_images([ami_id])
except EC2ResponseError, e:
if e.error_code == 'InvalidAMIID.NotFound':
return e.error_message
else:
raise
if len(images) == 0:
return '%s is no longer available' % ami_id
image = images[0]
# Amazon's API only returns 'windows' or nothing. We're not currently
# able to detect individual Linux distros.
if image.platform == PLATFORM_WINDOWS:
return '%s is not a supported platform for %s' % (
PLATFORM_WINDOWS, ami_id)
if image.root_device_type != 'ebs':
return '%s does not use EBS storage.' % ami_id
if image.hypervisor != 'xen':
return '%s uses hypervisor %s. Only xen is supported' % (
ami_id, image.hypervisor)
return None
def validate_encryptor_ami(self, ami_id):
try:
images = self.conn.get_all_images([ami_id])
except EC2ResponseError, e:
return e.error_message
if len(images) == 0:
return 'Bracket encryptor image %s is no longer available' % ami_id
image = images[0]
if 'brkt-avatar' not in image.name:
return '%s (%s) is not a Bracket Encryptor image' % (
ami_id, image.name)
return None
def register_image(self,
kernel_id,
block_device_map,
name=None,
description=None):
log.debug('Registering image.')
return self.conn.register_image(
name=name,
description=description,
architecture='x86_64',
kernel_id=kernel_id,
root_device_name='/dev/sda1',
block_device_map=block_device_map,
virtualization_type='paravirtual'
)
def get_image(self, image_id):
return self.conn.get_image(image_id)
def delete_snapshot(self, snapshot_id):
return self.conn.delete_snapshot(snapshot_id)
def create_security_group(self, name, description):
sg = self.conn.create_security_group(name, description)
return sg.id
def get_security_group(self, sg_id):
return self.conn.get_all_security_groups(group_ids=[sg_id])[0]
def add_security_group_rule(self, sg_id, **kwargs):
kwargs['group_id'] = sg_id
ok = self.conn.authorize_security_group(**kwargs)
if not ok:
raise Exception('Unknown error while adding security group rule')
def delete_security_group(self, sg_id):
ok = self.conn.delete_security_group(group_id=sg_id)
if not ok:
raise Exception('Unknown error while deleting security group')
def get_key_pair(self, keyname):
return self.conn.get_all_key_pairs(keynames=[keyname])[0]
def get_console_output(self, instance_id):
return self.conn.get_console_output(instance_id)
class BaseEncryptorService(object):
__metaclass__ = abc.ABCMeta
def __init__(self, hostname, port=ENCRYPTOR_STATUS_PORT):
self.hostname = hostname
self.port = port
@abc.abstractmethod
def is_encryptor_up(self):
pass
@abc.abstractmethod
def get_status(self):
pass
class EncryptorService(BaseEncryptorService):
def is_encryptor_up(self):
try:
self.get_status()
return True
except Exception as e:
log.debug("Couldn't get encryptor status: %s", e)
return False
def get_status(self, timeout_secs=2):
url = 'http://%s:%d/encryption_status' % (self.hostname, self.port)
r = urllib2.urlopen(url, timeout=timeout_secs)
data = r.read()
info = json.loads(data)
ratio = 0
info['percent_complete'] = 0
if info['state'] == ENCRYPT_SUCCESSFUL:
info['percent_complete'] = 100
elif info['bytes_total'] > 0:
ratio = float(info['bytes_written']) / info['bytes_total']
info['percent_complete'] = int(100 * ratio)
return info
|
nirvn/QGIS
|
refs/heads/master
|
python/pyplugin_installer/__init__.py
|
32
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
__init__.py
---------------------
Date : May 2013
Copyright : (C) 2013 by Borys Jurgiel
Email : info at borysjurgiel dot pl
This module is based on former plugin_installer plugin:
Copyright (C) 2007-2008 Matthew Perry
Copyright (C) 2008-2013 Borys Jurgiel
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
__author__ = 'Borys Jurgiel'
__date__ = 'May 2013'
__copyright__ = '(C) 2013, Borys Jurgiel'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
# import functions for easier access
from . import installer
from .installer import initPluginInstaller # NOQA
def instance():
if not installer.pluginInstaller:
installer.initPluginInstaller()
return installer.pluginInstaller
|
myerpengine/odoo
|
refs/heads/master
|
addons/mail/mail_alias.py
|
32
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import re
import unicodedata
from openerp.osv import fields, osv
from openerp.tools import ustr
from openerp.modules.registry import RegistryManager
from openerp import SUPERUSER_ID
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
# Inspired by http://stackoverflow.com/questions/517923
def remove_accents(input_str):
"""Suboptimal-but-better-than-nothing way to replace accented
latin letters by an ASCII equivalent. Will obviously change the
meaning of input_str and work only for some cases"""
input_str = ustr(input_str)
nkfd_form = unicodedata.normalize('NFKD', input_str)
return u''.join([c for c in nkfd_form if not unicodedata.combining(c)])
class mail_alias(osv.Model):
"""A Mail Alias is a mapping of an email address with a given OpenERP Document
model. It is used by OpenERP's mail gateway when processing incoming emails
sent to the system. If the recipient address (To) of the message matches
a Mail Alias, the message will be either processed following the rules
of that alias. If the message is a reply it will be attached to the
existing discussion on the corresponding record, otherwise a new
record of the corresponding model will be created.
This is meant to be used in combination with a catch-all email configuration
on the company's mail server, so that as soon as a new mail.alias is
created, it becomes immediately usable and OpenERP will accept email for it.
"""
_name = 'mail.alias'
_description = "Email Aliases"
_rec_name = 'alias_name'
_order = 'alias_model_id, alias_name'
def _get_alias_domain(self, cr, uid, ids, name, args, context=None):
ir_config_parameter = self.pool.get("ir.config_parameter")
domain = ir_config_parameter.get_param(cr, uid, "mail.catchall.domain", context=context)
return dict.fromkeys(ids, domain or "")
_columns = {
'alias_name': fields.char('Alias Name',
help="The name of the email alias, e.g. 'jobs' if you want to catch emails for <jobs@example.my.openerp.com>",),
'alias_model_id': fields.many2one('ir.model', 'Aliased Model', required=True, ondelete="cascade",
help="The model (OpenERP Document Kind) to which this alias "
"corresponds. Any incoming email that does not reply to an "
"existing record will cause the creation of a new record "
"of this model (e.g. a Project Task)",
# hack to only allow selecting mail_thread models (we might
# (have a few false positives, though)
domain="[('field_id.name', '=', 'message_ids')]"),
'alias_user_id': fields.many2one('res.users', 'Owner',
help="The owner of records created upon receiving emails on this alias. "
"If this field is not set the system will attempt to find the right owner "
"based on the sender (From) address, or will use the Administrator account "
"if no system user is found for that address."),
'alias_defaults': fields.text('Default Values', required=True,
help="A Python dictionary that will be evaluated to provide "
"default values when creating new records for this alias."),
'alias_force_thread_id': fields.integer('Record Thread ID',
help="Optional ID of a thread (record) to which all incoming "
"messages will be attached, even if they did not reply to it. "
"If set, this will disable the creation of new records completely."),
'alias_domain': fields.function(_get_alias_domain, string="Alias domain", type='char', size=None),
'alias_parent_model_id': fields.many2one('ir.model', 'Parent Model',
help="Parent model holding the alias. The model holding the alias reference\n"
"is not necessarily the model given by alias_model_id\n"
"(example: project (parent_model) and task (model))"),
'alias_parent_thread_id': fields.integer('Parent Record Thread ID',
help="ID of the parent record holding the alias (example: project holding the task creation alias)"),
'alias_contact': fields.selection([
('everyone', 'Everyone'),
('partners', 'Authenticated Partners'),
('followers', 'Followers only'),
], string='Alias Contact Security', required=True,
help="Policy to post a message on the document using the mailgateway.\n"
"- everyone: everyone can post\n"
"- partners: only authenticated partners\n"
"- followers: only followers of the related document\n"),
}
_defaults = {
'alias_defaults': '{}',
'alias_user_id': lambda self, cr, uid, context: uid,
# looks better when creating new aliases - even if the field is informative only
'alias_domain': lambda self, cr, uid, context: self._get_alias_domain(cr, SUPERUSER_ID, [1], None, None)[1],
'alias_contact': 'everyone',
}
_sql_constraints = [
('alias_unique', 'UNIQUE(alias_name)', 'Unfortunately this email alias is already used, please choose a unique one')
]
def _check_alias_defaults(self, cr, uid, ids, context=None):
try:
for record in self.browse(cr, uid, ids, context=context):
dict(eval(record.alias_defaults))
except Exception:
return False
return True
_constraints = [
(_check_alias_defaults, '''Invalid expression, it must be a literal python dictionary definition e.g. "{'field': 'value'}"''', ['alias_defaults']),
]
def name_get(self, cr, uid, ids, context=None):
"""Return the mail alias display alias_name, including the implicit
mail catchall domain if exists from config otherwise "New Alias".
e.g. `jobs@openerp.my.openerp.com` or `jobs` or 'New Alias'
"""
res = []
for record in self.browse(cr, uid, ids, context=context):
if record.alias_name and record.alias_domain:
res.append((record['id'], "%s@%s" % (record.alias_name, record.alias_domain)))
elif record.alias_name:
res.append((record['id'], "%s" % (record.alias_name)))
else:
res.append((record['id'], _("Inactive Alias")))
return res
def _find_unique(self, cr, uid, name, context=None):
"""Find a unique alias name similar to ``name``. If ``name`` is
already taken, make a variant by adding an integer suffix until
an unused alias is found.
"""
sequence = None
while True:
new_name = "%s%s" % (name, sequence) if sequence is not None else name
if not self.search(cr, uid, [('alias_name', '=', new_name)]):
break
sequence = (sequence + 1) if sequence else 2
return new_name
def _clean_and_make_unique(self, cr, uid, name, context=None):
# when an alias name appears to already be an email, we keep the local part only
name = remove_accents(name).lower().split('@')[0]
name = re.sub(r'[^\w+.]+', '-', name)
return self._find_unique(cr, uid, name, context=context)
def migrate_to_alias(self, cr, child_model_name, child_table_name, child_model_auto_init_fct,
alias_model_name, alias_id_column, alias_key, alias_prefix='', alias_force_key='', alias_defaults={},
alias_generate_name=False, context=None):
""" Installation hook to create aliases for all users and avoid constraint errors.
:param child_model_name: model name of the child class (i.e. res.users)
:param child_table_name: table name of the child class (i.e. res_users)
:param child_model_auto_init_fct: pointer to the _auto_init function
(i.e. super(res_users,self)._auto_init(cr, context=context))
:param alias_model_name: name of the aliased model
:param alias_id_column: alias_id column (i.e. self._columns['alias_id'])
:param alias_key: name of the column used for the unique name (i.e. 'login')
:param alias_prefix: prefix for the unique name (i.e. 'jobs' + ...)
:param alias_force_key': name of the column for force_thread_id;
if empty string, not taken into account
:param alias_defaults: dict, keys = mail.alias columns, values = child
model column name used for default values (i.e. {'job_id': 'id'})
:param alias_generate_name: automatically generate alias name using prefix / alias key;
default alias_name value is False because since 8.0 it is not required anymore
"""
if context is None:
context = {}
# disable the unique alias_id not null constraint, to avoid spurious warning during
# super.auto_init. We'll reinstall it afterwards.
alias_id_column.required = False
# call _auto_init
res = child_model_auto_init_fct(cr, context=context)
registry = RegistryManager.get(cr.dbname)
mail_alias = registry.get('mail.alias')
child_class_model = registry[child_model_name]
no_alias_ids = child_class_model.search(cr, SUPERUSER_ID, [('alias_id', '=', False)], context={'active_test': False})
# Use read() not browse(), to avoid prefetching uninitialized inherited fields
for obj_data in child_class_model.read(cr, SUPERUSER_ID, no_alias_ids, [alias_key]):
alias_vals = {'alias_name': False}
if alias_generate_name:
alias_vals['alias_name'] = '%s%s' % (alias_prefix, obj_data[alias_key])
if alias_force_key:
alias_vals['alias_force_thread_id'] = obj_data[alias_force_key]
alias_vals['alias_defaults'] = dict((k, obj_data[v]) for k, v in alias_defaults.iteritems())
alias_vals['alias_parent_thread_id'] = obj_data['id']
alias_create_ctx = dict(context, alias_model_name=alias_model_name, alias_parent_model_name=child_model_name)
alias_id = mail_alias.create(cr, SUPERUSER_ID, alias_vals, context=alias_create_ctx)
child_class_model.write(cr, SUPERUSER_ID, obj_data['id'], {'alias_id': alias_id}, context={'mail_notrack': True})
_logger.info('Mail alias created for %s %s (id %s)', child_model_name, obj_data[alias_key], obj_data['id'])
# Finally attempt to reinstate the missing constraint
try:
cr.execute('ALTER TABLE %s ALTER COLUMN alias_id SET NOT NULL' % (child_table_name))
except Exception:
_logger.warning("Table '%s': unable to set a NOT NULL constraint on column '%s' !\n"\
"If you want to have it, you should update the records and execute manually:\n"\
"ALTER TABLE %s ALTER COLUMN %s SET NOT NULL",
child_table_name, 'alias_id', child_table_name, 'alias_id')
# set back the unique alias_id constraint
alias_id_column.required = True
return res
def create(self, cr, uid, vals, context=None):
""" Creates an email.alias record according to the values provided in ``vals``,
with 2 alterations: the ``alias_name`` value may be suffixed in order to
make it unique (and certain unsafe characters replaced), and
he ``alias_model_id`` value will set to the model ID of the ``model_name``
context value, if provided.
"""
if context is None:
context = {}
model_name = context.get('alias_model_name')
parent_model_name = context.get('alias_parent_model_name')
if vals.get('alias_name'):
vals['alias_name'] = self._clean_and_make_unique(cr, uid, vals.get('alias_name'), context=context)
if model_name:
model_id = self.pool.get('ir.model').search(cr, uid, [('model', '=', model_name)], context=context)[0]
vals['alias_model_id'] = model_id
if parent_model_name:
model_id = self.pool.get('ir.model').search(cr, uid, [('model', '=', parent_model_name)], context=context)[0]
vals['alias_parent_model_id'] = model_id
return super(mail_alias, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
""""give uniqe alias name if given alias name is allready assigned"""
if vals.get('alias_name'):
vals['alias_name'] = self._clean_and_make_unique(cr, uid, vals.get('alias_name'), context=context)
return super(mail_alias, self).write(cr, uid, ids, vals, context=context)
def open_document(self, cr, uid, ids, context=None):
alias = self.browse(cr, uid, ids, context=context)[0]
if not alias.alias_model_id or not alias.alias_force_thread_id:
return False
return {
'view_type': 'form',
'view_mode': 'form',
'res_model': alias.alias_model_id.model,
'res_id': alias.alias_force_thread_id,
'type': 'ir.actions.act_window',
}
def open_parent_document(self, cr, uid, ids, context=None):
alias = self.browse(cr, uid, ids, context=context)[0]
if not alias.alias_parent_model_id or not alias.alias_parent_thread_id:
return False
return {
'view_type': 'form',
'view_mode': 'form',
'res_model': alias.alias_parent_model_id.model,
'res_id': alias.alias_parent_thread_id,
'type': 'ir.actions.act_window',
}
|
ghickman/django
|
refs/heads/master
|
tests/fixtures_model_package/tests.py
|
312
|
from __future__ import unicode_literals
import warnings
from django.core import management
from django.test import TestCase
from .models import Article
class SampleTestCase(TestCase):
fixtures = ['fixture1.json', 'fixture2.json']
def testClassFixtures(self):
"Test cases can load fixture objects into models defined in packages"
self.assertEqual(Article.objects.count(), 3)
self.assertQuerysetEqual(
Article.objects.all(), [
"Django conquers world!",
"Copyright is fine the way it is",
"Poker has no place on ESPN",
],
lambda a: a.headline
)
class FixtureTestCase(TestCase):
def test_loaddata(self):
"Fixtures can load data into models defined in packages"
# Load fixture 1. Single JSON file, with two objects
management.call_command("loaddata", "fixture1.json", verbosity=0)
self.assertQuerysetEqual(
Article.objects.all(), [
"Time to reform copyright",
"Poker has no place on ESPN",
],
lambda a: a.headline,
)
# Load fixture 2. JSON file imported by default. Overwrites some
# existing objects
management.call_command("loaddata", "fixture2.json", verbosity=0)
self.assertQuerysetEqual(
Article.objects.all(), [
"Django conquers world!",
"Copyright is fine the way it is",
"Poker has no place on ESPN",
],
lambda a: a.headline,
)
# Load a fixture that doesn't exist
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
management.call_command("loaddata", "unknown.json", verbosity=0)
self.assertEqual(len(w), 1)
self.assertTrue(w[0].message, "No fixture named 'unknown' found.")
self.assertQuerysetEqual(
Article.objects.all(), [
"Django conquers world!",
"Copyright is fine the way it is",
"Poker has no place on ESPN",
],
lambda a: a.headline,
)
|
endlessm/chromium-browser
|
refs/heads/master
|
third_party/catapult/third_party/google-endpoints/future/backports/xmlrpc/__init__.py
|
1383
|
# This directory is a Python package.
|
nuncjo/odoo
|
refs/heads/8.0
|
openerp/report/render/simple.py
|
324
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import render
from cStringIO import StringIO
import xml.dom.minidom
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Table
from reportlab.lib.units import mm
from reportlab.lib.pagesizes import A4
import reportlab.lib
import copy
class simple(render.render):
def _render(self):
self.result = StringIO()
parser = xml.dom.minidom.parseString(self.xml)
title = parser.documentElement.tagName
doc = SimpleDocTemplate(self.result, pagesize=A4, title=title,
author='Odoo, Fabien Pinckaers', leftmargin=10*mm, rightmargin=10*mm)
styles = reportlab.lib.styles.getSampleStyleSheet()
title_style = copy.deepcopy(styles["Heading1"])
title_style.alignment = reportlab.lib.enums.TA_CENTER
story = [ Paragraph(title, title_style) ]
style_level = {}
nodes = [ (parser.documentElement,0) ]
while len(nodes):
node = nodes.pop(0)
value = ''
n=len(node[0].childNodes)-1
while n>=0:
if node[0].childNodes[n].nodeType==3:
value += node[0].childNodes[n].nodeValue
else:
nodes.insert( 0, (node[0].childNodes[n], node[1]+1) )
n-=1
if not node[1] in style_level:
style = copy.deepcopy(styles["Normal"])
style.leftIndent=node[1]*6*mm
style.firstLineIndent=-3*mm
style_level[node[1]] = style
story.append( Paragraph('<b>%s</b>: %s' % (node[0].tagName, value), style_level[node[1]]))
doc.build(story)
return self.result.getvalue()
if __name__=='__main__':
s = simple()
s.xml = '''<test>
<author-list>
<author>
<name>Fabien Pinckaers</name>
<age>23</age>
</author>
<author>
<name>Michel Pinckaers</name>
<age>53</age>
</author>
No other
</author-list>
</test>'''
if s.render():
print s.get()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
kevalds51/sympy
|
refs/heads/master
|
sympy/physics/gaussopt.py
|
113
|
from sympy.physics.optics.gaussopt import RayTransferMatrix, FreeSpace,\
FlatRefraction, CurvedRefraction, FlatMirror, CurvedMirror, ThinLens,\
GeometricRay, BeamParameter, waist2rayleigh, rayleigh2waist, geometric_conj_ab,\
geometric_conj_af, geometric_conj_bf, gaussian_conj, conjugate_gauss_beams
from sympy.utilities.exceptions import SymPyDeprecationWarning
SymPyDeprecationWarning(feature="Module sympy.physics.gaussopt",
useinstead="sympy.physics.optics.gaussopt",
deprecated_since_version="0.7.6", issue=7659).warn()
|
liggitt/openshift-ansible
|
refs/heads/master
|
roles/lib_utils/action_plugins/set_version_facts.py
|
10
|
"""
Ansible action plugin to set version facts
"""
# pylint: disable=no-name-in-module, import-error, wrong-import-order
from distutils.version import LooseVersion
from ansible.plugins.action import ActionBase
# pylint: disable=too-many-statements
def set_version_facts_if_unset(version):
""" Set version facts. This currently includes common.version and
common.version_gte_3_x
Args:
version (string): version of openshift installed/to install
Returns:
dict: the facts dict updated with version facts.
"""
facts = {}
if version and version != "latest":
version = LooseVersion(version)
version_gte_3_10 = version >= LooseVersion('3.10')
version_gte_3_11 = version >= LooseVersion('3.11')
else:
# 'Latest' version is set to True, 'Next' versions set to False
version_gte_3_10 = True
version_gte_3_11 = False
facts['openshift_version_gte_3_10'] = version_gte_3_10
facts['openshift_version_gte_3_11'] = version_gte_3_11
if version_gte_3_11:
examples_content_version = 'v3.11'
else:
examples_content_version = 'v3.10'
facts['openshift_examples_content_version'] = examples_content_version
return facts
# pylint: disable=too-few-public-methods
class ActionModule(ActionBase):
"""Action plugin to set version facts"""
def run(self, tmp=None, task_vars=None):
"""Run set_version_facts"""
result = super(ActionModule, self).run(tmp, task_vars)
# Ignore settting self.task_vars outside of init.
# pylint: disable=W0201
self.task_vars = task_vars or {}
result["changed"] = False
result["failed"] = False
result["msg"] = "Version facts set"
version = self._task.args.get('version')
result["ansible_facts"] = set_version_facts_if_unset(version)
return result
|
navigator8972/vae_hwmotion
|
refs/heads/master
|
dataset.py
|
3
|
import numpy as np
class DataSets(object):
pass
class DataSet(object):
def __init__(self, data, labels=None):
if labels is not None:
#check consistency
assert data.shape[0]==labels.shape[0], (
'data.shape: %s labels.shape: %s' % (data.shape,
labels.shape))
else:
#goahead
self._num_examples = data.shape[0]
self._data = data
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
return
def next_batch(self, batch_size):
"""Return the next `batch_size` examples from this data set."""
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Shuffle the data
perm = np.arange(self._num_examples)
np.random.shuffle(perm)
self._data = self._data[perm]
if self._labels is not None:
self._labels = self._labels[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._num_examples
end = self._index_in_epoch
if self._labels is not None:
return self._data[start:end], self._labels[start:end]
else:
return self._data[start:end], None
def construct_datasets(data, labels=None, shuffle=True, validation_ratio=.1, test_ratio=.1):
data_sets = DataSets()
if shuffle:
perm = np.arange(data.shape[0])
np.random.shuffle(perm)
data_shuffled = data[perm]
if labels is not None:
labels_shuffled = labels[perm]
else:
data_shuffled = data
labels_shuffled = labels
test_start_idx = int((1-test_ratio)*data_shuffled.shape[0])
validation_start_idx = int((1-validation_ratio-test_ratio)*data_shuffled.shape[0])
if labels is not None:
assert data_shuffled.shape[0] == labels_shuffled.shape[0], (
'data.shape: %s labels.shape: %s' % (data.shape,
labels.shape))
data_sets.train = DataSet(data_shuffled[:validation_start_idx, :], labels_shuffled[:validation_start_idx, :])
data_sets.validation = DataSet(data_shuffled[validation_start_idx:test_start_idx, :], labels_shuffled[validation_start_idx, test_start_idx, :])
data_sets.test = DataSet(data_shuffled[test_start_idx:, :], labels_shuffled[test_start_idx:, :])
else:
data_sets.train = DataSet(data_shuffled[:validation_start_idx, :])
data_sets.validation = DataSet(data_shuffled[validation_start_idx:test_start_idx, :])
data_sets.test = DataSet(data_shuffled[test_start_idx:, :])
return data_sets
|
oschulz/pkg-inst-tools
|
refs/heads/master
|
bin/genPkgRules.py
|
1
|
#!/usr/bin/env python
# Copyright (C) 2014 Oliver Schulz <oliver.schulz@tu-dortmund.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import sys
if sys.version_info < (2, 7):
raise SystemExit("ERROR: Python version >= 2.7 required.")
import codecs
import json
from collections import namedtuple
import glob
import re
import os.path
python_version = '{}.{}'.format(sys.version_info.major, sys.version_info.minor)
def ntFromJSONFile(ntName, fileName):
with codecs.open(fileName, encoding="utf-8") as file:
data = json.load(file, object_hook=lambda d: namedtuple(ntName, d.keys())(*d.values()))
return data
def toVarName(s):
r = s.replace("+", "x")
r = re.sub(r"[.-]", "_", r)
return re.sub(r"^[^A-Za-z]|\W", "", r)
def instVar(pkgName):
return toVarName(pkgName) + "_INST"
packages = [p for f in sys.argv[1:] for p in ntFromJSONFile("Struct", f).packages]
def pkg_inst_mark(pkg):
if (os.path.isfile('{pkg}/setup.py'.format(pkg = pkg.name))):
return "$(PREFIX)/lib/python{py_v}/site-packages/{pkg}.inst".format(
pkg = pkg.name,
py_v = python_version
)
else:
return "$(PREFIX)/bin/{}-config".format(pkg.name)
print("# Variables")
print("PYTHON_VERSION = {}".format(python_version))
print("")
print("# Package installation detection")
print("")
for pkg in packages:
print("{} = {}".format(instVar(pkg.name), pkg_inst_mark(pkg)))
print("")
print("# Package rules and dependencies")
pkgNames = [pkg.name for pkg in packages]
for pkg in packages:
print("")
print(".PHONY: install-{}".format(pkg.name, pkg.name))
print("INSTALL_ALL += install-{}".format(pkg.name))
print("install-{}: $({})".format(pkg.name, instVar(pkg.name)))
if hasattr(pkg, "options"):
print("$({}): PKGFLAGS = {}".format(instVar(pkg.name), pkg.options))
if hasattr(pkg, "requires") and set(pkg.requires).intersection(pkgNames):
depString = " ".join(["$({})".format(instVar(dep)) for dep in pkg.requires if dep in pkgNames])
print("$({}): {}".format(instVar(pkg.name), depString))
|
msiebuhr/v8.go
|
refs/heads/master
|
v8/tools/stats-viewer.py
|
143
|
#!/usr/bin/env python
#
# Copyright 2008 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A cross-platform execution counter viewer.
The stats viewer reads counters from a binary file and displays them
in a window, re-reading and re-displaying with regular intervals.
"""
import mmap
import optparse
import os
import re
import struct
import sys
import time
import Tkinter
# The interval, in milliseconds, between ui updates
UPDATE_INTERVAL_MS = 100
# Mapping from counter prefix to the formatting to be used for the counter
COUNTER_LABELS = {"t": "%i ms.", "c": "%i"}
# The magic numbers used to check if a file is not a counters file
COUNTERS_FILE_MAGIC_NUMBER = 0xDEADFACE
CHROME_COUNTERS_FILE_MAGIC_NUMBER = 0x13131313
class StatsViewer(object):
"""The main class that keeps the data used by the stats viewer."""
def __init__(self, data_name, name_filter):
"""Creates a new instance.
Args:
data_name: the name of the file containing the counters.
name_filter: The regexp filter to apply to counter names.
"""
self.data_name = data_name
self.name_filter = name_filter
# The handle created by mmap.mmap to the counters file. We need
# this to clean it up on exit.
self.shared_mmap = None
# A mapping from counter names to the ui element that displays
# them
self.ui_counters = {}
# The counter collection used to access the counters file
self.data = None
# The Tkinter root window object
self.root = None
def Run(self):
"""The main entry-point to running the stats viewer."""
try:
self.data = self.MountSharedData()
# OpenWindow blocks until the main window is closed
self.OpenWindow()
finally:
self.CleanUp()
def MountSharedData(self):
"""Mount the binary counters file as a memory-mapped file. If
something goes wrong print an informative message and exit the
program."""
if not os.path.exists(self.data_name):
maps_name = "/proc/%s/maps" % self.data_name
if not os.path.exists(maps_name):
print "\"%s\" is neither a counter file nor a PID." % self.data_name
sys.exit(1)
maps_file = open(maps_name, "r")
try:
self.data_name = None
for m in re.finditer(r"/dev/shm/\S*", maps_file.read()):
if os.path.exists(m.group(0)):
self.data_name = m.group(0)
break
if self.data_name is None:
print "Can't find counter file in maps for PID %s." % self.data_name
sys.exit(1)
finally:
maps_file.close()
data_file = open(self.data_name, "r")
size = os.fstat(data_file.fileno()).st_size
fileno = data_file.fileno()
self.shared_mmap = mmap.mmap(fileno, size, access=mmap.ACCESS_READ)
data_access = SharedDataAccess(self.shared_mmap)
if data_access.IntAt(0) == COUNTERS_FILE_MAGIC_NUMBER:
return CounterCollection(data_access)
elif data_access.IntAt(0) == CHROME_COUNTERS_FILE_MAGIC_NUMBER:
return ChromeCounterCollection(data_access)
print "File %s is not stats data." % self.data_name
sys.exit(1)
def CleanUp(self):
"""Cleans up the memory mapped file if necessary."""
if self.shared_mmap:
self.shared_mmap.close()
def UpdateCounters(self):
"""Read the contents of the memory-mapped file and update the ui if
necessary. If the same counters are present in the file as before
we just update the existing labels. If any counters have been added
or removed we scrap the existing ui and draw a new one.
"""
changed = False
counters_in_use = self.data.CountersInUse()
if counters_in_use != len(self.ui_counters):
self.RefreshCounters()
changed = True
else:
for i in xrange(self.data.CountersInUse()):
counter = self.data.Counter(i)
name = counter.Name()
if name in self.ui_counters:
value = counter.Value()
ui_counter = self.ui_counters[name]
counter_changed = ui_counter.Set(value)
changed = (changed or counter_changed)
else:
self.RefreshCounters()
changed = True
break
if changed:
# The title of the window shows the last time the file was
# changed.
self.UpdateTime()
self.ScheduleUpdate()
def UpdateTime(self):
"""Update the title of the window with the current time."""
self.root.title("Stats Viewer [updated %s]" % time.strftime("%H:%M:%S"))
def ScheduleUpdate(self):
"""Schedules the next ui update."""
self.root.after(UPDATE_INTERVAL_MS, lambda: self.UpdateCounters())
def RefreshCounters(self):
"""Tear down and rebuild the controls in the main window."""
counters = self.ComputeCounters()
self.RebuildMainWindow(counters)
def ComputeCounters(self):
"""Group the counters by the suffix of their name.
Since the same code-level counter (for instance "X") can result in
several variables in the binary counters file that differ only by a
two-character prefix (for instance "c:X" and "t:X") counters are
grouped by suffix and then displayed with custom formatting
depending on their prefix.
Returns:
A mapping from suffixes to a list of counters with that suffix,
sorted by prefix.
"""
names = {}
for i in xrange(self.data.CountersInUse()):
counter = self.data.Counter(i)
name = counter.Name()
names[name] = counter
# By sorting the keys we ensure that the prefixes always come in the
# same order ("c:" before "t:") which looks more consistent in the
# ui.
sorted_keys = names.keys()
sorted_keys.sort()
# Group together the names whose suffix after a ':' are the same.
groups = {}
for name in sorted_keys:
counter = names[name]
if ":" in name:
name = name[name.find(":")+1:]
if not name in groups:
groups[name] = []
groups[name].append(counter)
return groups
def RebuildMainWindow(self, groups):
"""Tear down and rebuild the main window.
Args:
groups: the groups of counters to display
"""
# Remove elements in the current ui
self.ui_counters.clear()
for child in self.root.children.values():
child.destroy()
# Build new ui
index = 0
sorted_groups = groups.keys()
sorted_groups.sort()
for counter_name in sorted_groups:
counter_objs = groups[counter_name]
if self.name_filter.match(counter_name):
name = Tkinter.Label(self.root, width=50, anchor=Tkinter.W,
text=counter_name)
name.grid(row=index, column=0, padx=1, pady=1)
count = len(counter_objs)
for i in xrange(count):
counter = counter_objs[i]
name = counter.Name()
var = Tkinter.StringVar()
if self.name_filter.match(name):
value = Tkinter.Label(self.root, width=15, anchor=Tkinter.W,
textvariable=var)
value.grid(row=index, column=(1 + i), padx=1, pady=1)
# If we know how to interpret the prefix of this counter then
# add an appropriate formatting to the variable
if (":" in name) and (name[0] in COUNTER_LABELS):
format = COUNTER_LABELS[name[0]]
else:
format = "%i"
ui_counter = UiCounter(var, format)
self.ui_counters[name] = ui_counter
ui_counter.Set(counter.Value())
index += 1
self.root.update()
def OpenWindow(self):
"""Create and display the root window."""
self.root = Tkinter.Tk()
# Tkinter is no good at resizing so we disable it
self.root.resizable(width=False, height=False)
self.RefreshCounters()
self.ScheduleUpdate()
self.root.mainloop()
class UiCounter(object):
"""A counter in the ui."""
def __init__(self, var, format):
"""Creates a new ui counter.
Args:
var: the Tkinter string variable for updating the ui
format: the format string used to format this counter
"""
self.var = var
self.format = format
self.last_value = None
def Set(self, value):
"""Updates the ui for this counter.
Args:
value: The value to display
Returns:
True if the value had changed, otherwise False. The first call
always returns True.
"""
if value == self.last_value:
return False
else:
self.last_value = value
self.var.set(self.format % value)
return True
class SharedDataAccess(object):
"""A utility class for reading data from the memory-mapped binary
counters file."""
def __init__(self, data):
"""Create a new instance.
Args:
data: A handle to the memory-mapped file, as returned by mmap.mmap.
"""
self.data = data
def ByteAt(self, index):
"""Return the (unsigned) byte at the specified byte index."""
return ord(self.CharAt(index))
def IntAt(self, index):
"""Return the little-endian 32-byte int at the specified byte index."""
word_str = self.data[index:index+4]
result, = struct.unpack("I", word_str)
return result
def CharAt(self, index):
"""Return the ascii character at the specified byte index."""
return self.data[index]
class Counter(object):
"""A pointer to a single counter withing a binary counters file."""
def __init__(self, data, offset):
"""Create a new instance.
Args:
data: the shared data access object containing the counter
offset: the byte offset of the start of this counter
"""
self.data = data
self.offset = offset
def Value(self):
"""Return the integer value of this counter."""
return self.data.IntAt(self.offset)
def Name(self):
"""Return the ascii name of this counter."""
result = ""
index = self.offset + 4
current = self.data.ByteAt(index)
while current:
result += chr(current)
index += 1
current = self.data.ByteAt(index)
return result
class CounterCollection(object):
"""An overlay over a counters file that provides access to the
individual counters contained in the file."""
def __init__(self, data):
"""Create a new instance.
Args:
data: the shared data access object
"""
self.data = data
self.max_counters = data.IntAt(4)
self.max_name_size = data.IntAt(8)
def CountersInUse(self):
"""Return the number of counters in active use."""
return self.data.IntAt(12)
def Counter(self, index):
"""Return the index'th counter."""
return Counter(self.data, 16 + index * self.CounterSize())
def CounterSize(self):
"""Return the size of a single counter."""
return 4 + self.max_name_size
class ChromeCounter(object):
"""A pointer to a single counter withing a binary counters file."""
def __init__(self, data, name_offset, value_offset):
"""Create a new instance.
Args:
data: the shared data access object containing the counter
name_offset: the byte offset of the start of this counter's name
value_offset: the byte offset of the start of this counter's value
"""
self.data = data
self.name_offset = name_offset
self.value_offset = value_offset
def Value(self):
"""Return the integer value of this counter."""
return self.data.IntAt(self.value_offset)
def Name(self):
"""Return the ascii name of this counter."""
result = ""
index = self.name_offset
current = self.data.ByteAt(index)
while current:
result += chr(current)
index += 1
current = self.data.ByteAt(index)
return result
class ChromeCounterCollection(object):
"""An overlay over a counters file that provides access to the
individual counters contained in the file."""
_HEADER_SIZE = 4 * 4
_COUNTER_NAME_SIZE = 64
_THREAD_NAME_SIZE = 32
def __init__(self, data):
"""Create a new instance.
Args:
data: the shared data access object
"""
self.data = data
self.max_counters = data.IntAt(8)
self.max_threads = data.IntAt(12)
self.counter_names_offset = \
self._HEADER_SIZE + self.max_threads * (self._THREAD_NAME_SIZE + 2 * 4)
self.counter_values_offset = \
self.counter_names_offset + self.max_counters * self._COUNTER_NAME_SIZE
def CountersInUse(self):
"""Return the number of counters in active use."""
for i in xrange(self.max_counters):
name_offset = self.counter_names_offset + i * self._COUNTER_NAME_SIZE
if self.data.ByteAt(name_offset) == 0:
return i
return self.max_counters
def Counter(self, i):
"""Return the i'th counter."""
name_offset = self.counter_names_offset + i * self._COUNTER_NAME_SIZE
value_offset = self.counter_values_offset + i * self.max_threads * 4
return ChromeCounter(self.data, name_offset, value_offset)
def Main(data_file, name_filter):
"""Run the stats counter.
Args:
data_file: The counters file to monitor.
name_filter: The regexp filter to apply to counter names.
"""
StatsViewer(data_file, name_filter).Run()
if __name__ == "__main__":
parser = optparse.OptionParser("usage: %prog [--filter=re] "
"<stats data>|<test_shell pid>")
parser.add_option("--filter",
default=".*",
help=("regexp filter for counter names "
"[default: %default]"))
(options, args) = parser.parse_args()
if len(args) != 1:
parser.print_help()
sys.exit(1)
Main(args[0], re.compile(options.filter))
|
rachel3834/event_observer
|
refs/heads/master
|
scripts/version.py
|
1
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 4 23:19:06 2016
@author: robouser
"""
# Version string stored separately from the package so that setup.py can
# read it without importing the entire package
__version__ = '1.0'
|
endlessm/chromium-browser
|
refs/heads/master
|
third_party/catapult/third_party/pyasn1/pyasn1/type/__init__.py
|
3653
|
# This file is necessary to make this directory a package.
|
joeyjojo/django_offline
|
refs/heads/master
|
src/django/db/transaction.py
|
41
|
"""
This module implements a transaction manager that can be used to define
transaction handling in a request or view function. It is used by transaction
control middleware and decorators.
The transaction manager can be in managed or in auto state. Auto state means the
system is using a commit-on-save strategy (actually it's more like
commit-on-change). As soon as the .save() or .delete() (or related) methods are
called, a commit is made.
Managed transactions don't do those commits, but will need some kind of manual
or implicit commits or rollbacks.
"""
from __future__ import with_statement
from functools import wraps
from django.db import connections, DEFAULT_DB_ALIAS
class TransactionManagementError(Exception):
"""
This exception is thrown when something bad happens with transaction
management.
"""
pass
def enter_transaction_management(managed=True, using=None):
"""
Enters transaction management for a running thread. It must be balanced with
the appropriate leave_transaction_management call, since the actual state is
managed as a stack.
The state and dirty flag are carried over from the surrounding block or
from the settings, if there is no surrounding block (dirty is always false
when no current block is running).
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
connection.enter_transaction_management(managed)
def leave_transaction_management(using=None):
"""
Leaves transaction management for a running thread. A dirty flag is carried
over to the surrounding block, as a commit will commit all changes, even
those from outside. (Commits are on connection level.)
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
connection.leave_transaction_management()
def is_dirty(using=None):
"""
Returns True if the current transaction requires a commit for changes to
happen.
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
return connection.is_dirty()
def set_dirty(using=None):
"""
Sets a dirty flag for the current thread and code streak. This can be used
to decide in a managed block of code to decide whether there are open
changes waiting for commit.
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
connection.set_dirty()
def set_clean(using=None):
"""
Resets a dirty flag for the current thread and code streak. This can be used
to decide in a managed block of code to decide whether a commit or rollback
should happen.
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
connection.set_clean()
def clean_savepoints(using=None):
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
connection.clean_savepoints()
def is_managed(using=None):
"""
Checks whether the transaction manager is in manual or in auto state.
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
return connection.is_managed()
def managed(flag=True, using=None):
"""
Puts the transaction manager into a manual state: managed transactions have
to be committed explicitly by the user. If you switch off transaction
management and there is a pending commit/rollback, the data will be
commited.
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
connection.managed(flag)
def commit_unless_managed(using=None):
"""
Commits changes if the system is not in managed transaction mode.
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
connection.commit_unless_managed()
def rollback_unless_managed(using=None):
"""
Rolls back changes if the system is not in managed transaction mode.
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
connection.rollback_unless_managed()
def commit(using=None):
"""
Does the commit itself and resets the dirty flag.
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
connection.commit()
def rollback(using=None):
"""
This function does the rollback itself and resets the dirty flag.
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
connection.rollback()
def savepoint(using=None):
"""
Creates a savepoint (if supported and required by the backend) inside the
current transaction. Returns an identifier for the savepoint that will be
used for the subsequent rollback or commit.
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
return connection.savepoint()
def savepoint_rollback(sid, using=None):
"""
Rolls back the most recent savepoint (if one exists). Does nothing if
savepoints are not supported.
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
connection.savepoint_rollback(sid)
def savepoint_commit(sid, using=None):
"""
Commits the most recent savepoint (if one exists). Does nothing if
savepoints are not supported.
"""
if using is None:
using = DEFAULT_DB_ALIAS
connection = connections[using]
connection.savepoint_commit(sid)
##############
# DECORATORS #
##############
class Transaction(object):
"""
Acts as either a decorator, or a context manager. If it's a decorator it
takes a function and returns a wrapped function. If it's a contextmanager
it's used with the ``with`` statement. In either event entering/exiting
are called before and after, respectively, the function/block is executed.
autocommit, commit_on_success, and commit_manually contain the
implementations of entering and exiting.
"""
def __init__(self, entering, exiting, using):
self.entering = entering
self.exiting = exiting
self.using = using
def __enter__(self):
self.entering(self.using)
def __exit__(self, exc_type, exc_value, traceback):
self.exiting(exc_value, self.using)
def __call__(self, func):
@wraps(func)
def inner(*args, **kwargs):
with self:
return func(*args, **kwargs)
return inner
def _transaction_func(entering, exiting, using):
"""
Takes 3 things, an entering function (what to do to start this block of
transaction management), an exiting function (what to do to end it, on both
success and failure, and using which can be: None, indiciating using is
DEFAULT_DB_ALIAS, a callable, indicating that using is DEFAULT_DB_ALIAS and
to return the function already wrapped.
Returns either a Transaction objects, which is both a decorator and a
context manager, or a wrapped function, if using is a callable.
"""
# Note that although the first argument is *called* `using`, it
# may actually be a function; @autocommit and @autocommit('foo')
# are both allowed forms.
if using is None:
using = DEFAULT_DB_ALIAS
if callable(using):
return Transaction(entering, exiting, DEFAULT_DB_ALIAS)(using)
return Transaction(entering, exiting, using)
def autocommit(using=None):
"""
Decorator that activates commit on save. This is Django's default behavior;
this decorator is useful if you globally activated transaction management in
your settings file and want the default behavior in some view functions.
"""
def entering(using):
enter_transaction_management(managed=False, using=using)
managed(False, using=using)
def exiting(exc_value, using):
leave_transaction_management(using=using)
return _transaction_func(entering, exiting, using)
def commit_on_success(using=None):
"""
This decorator activates commit on response. This way, if the view function
runs successfully, a commit is made; if the viewfunc produces an exception,
a rollback is made. This is one of the most common ways to do transaction
control in Web apps.
"""
def entering(using):
enter_transaction_management(using=using)
managed(True, using=using)
def exiting(exc_value, using):
try:
if exc_value is not None:
if is_dirty(using=using):
rollback(using=using)
else:
if is_dirty(using=using):
try:
commit(using=using)
except:
rollback(using=using)
raise
finally:
leave_transaction_management(using=using)
return _transaction_func(entering, exiting, using)
def commit_manually(using=None):
"""
Decorator that activates manual transaction control. It just disables
automatic transaction control and doesn't do any commit/rollback of its
own -- it's up to the user to call the commit and rollback functions
themselves.
"""
def entering(using):
enter_transaction_management(using=using)
managed(True, using=using)
def exiting(exc_value, using):
leave_transaction_management(using=using)
return _transaction_func(entering, exiting, using)
|
LinkHS/incubator-mxnet
|
refs/heads/master
|
example/rnn/old/gru_bucketing.py
|
38
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C0111,too-many-arguments,too-many-instance-attributes,too-many-locals,redefined-outer-name,fixme
# pylint: disable=superfluous-parens, no-member, invalid-name
import sys
sys.path.insert(0, "../../python")
import numpy as np
import mxnet as mx
from gru import gru_unroll
from bucket_io import BucketSentenceIter, default_build_vocab
def Perplexity(label, pred):
label = label.T.reshape((-1,))
loss = 0.
for i in range(pred.shape[0]):
loss += -np.log(max(1e-10, pred[i][int(label[i])]))
return np.exp(loss / label.size)
if __name__ == '__main__':
batch_size = 32
#buckets = [10, 20, 30, 40, 50, 60]
#buckets = [32]
buckets = []
num_hidden = 200
num_embed = 200
num_lstm_layer = 2
num_epoch = 25
learning_rate = 0.01
momentum = 0.0
# dummy data is used to test speed without IO
dummy_data = False
#contexts = [mx.context.gpu(i) for i in range(1)]
contexts = mx.context.cpu()
vocab = default_build_vocab("./data/ptb.train.txt")
def sym_gen(seq_len):
return gru_unroll(num_lstm_layer, seq_len, len(vocab),
num_hidden=num_hidden, num_embed=num_embed,
num_label=len(vocab))
init_h = [('l%d_init_h'%l, (batch_size, num_hidden)) for l in range(num_lstm_layer)]
data_train = BucketSentenceIter("./data/ptb.train.txt", vocab,
buckets, batch_size, init_h)
data_val = BucketSentenceIter("./data/ptb.valid.txt", vocab,
buckets, batch_size, init_h)
if dummy_data:
data_train = DummyIter(data_train)
data_val = DummyIter(data_val)
if len(buckets) == 1:
# only 1 bucket, disable bucketing
symbol = sym_gen(buckets[0])
else:
symbol = sym_gen
model = mx.model.FeedForward(ctx=contexts,
symbol=symbol,
num_epoch=num_epoch,
learning_rate=learning_rate,
momentum=momentum,
wd=0.00001,
initializer=mx.init.Xavier(factor_type="in", magnitude=2.34))
import logging
head = '%(asctime)-15s %(message)s'
logging.basicConfig(level=logging.DEBUG, format=head)
model.fit(X=data_train, eval_data=data_val,
eval_metric = mx.metric.np(Perplexity),
batch_end_callback=mx.callback.Speedometer(batch_size, 50),)
|
Jumpscale/jumpscale6_core
|
refs/heads/master
|
apps/osis/logic/system/info/OSIS_info_impl.py
|
1
|
from JumpScale import j
from JumpScale.grid.osis.OSISStoreES import OSISStoreES
# from JumpScale.grid.osis.OSISStore import OSISStore
class mainclass(OSISStoreES):
"""
"""
|
seungjin/app5-seungjin-net.appspot.com
|
refs/heads/master
|
django/contrib/contenttypes/management.py
|
315
|
from django.contrib.contenttypes.models import ContentType
from django.db.models import get_apps, get_models, signals
from django.utils.encoding import smart_unicode
def update_contenttypes(app, created_models, verbosity=2, **kwargs):
"""
Creates content types for models in the given app, removing any model
entries that no longer have a matching model class.
"""
ContentType.objects.clear_cache()
content_types = list(ContentType.objects.filter(app_label=app.__name__.split('.')[-2]))
app_models = get_models(app)
if not app_models:
return
for klass in app_models:
opts = klass._meta
try:
ct = ContentType.objects.get(app_label=opts.app_label,
model=opts.object_name.lower())
content_types.remove(ct)
except ContentType.DoesNotExist:
ct = ContentType(name=smart_unicode(opts.verbose_name_raw),
app_label=opts.app_label, model=opts.object_name.lower())
ct.save()
if verbosity >= 2:
print "Adding content type '%s | %s'" % (ct.app_label, ct.model)
# The presence of any remaining content types means the supplied app has an
# undefined model. Confirm that the content type is stale before deletion.
if content_types:
if kwargs.get('interactive', False):
content_type_display = '\n'.join([' %s | %s' % (ct.app_label, ct.model) for ct in content_types])
ok_to_delete = raw_input("""The following content types are stale and need to be deleted:
%s
Any objects related to these content types by a foreign key will also
be deleted. Are you sure you want to delete these content types?
If you're unsure, answer 'no'.
Type 'yes' to continue, or 'no' to cancel: """ % content_type_display)
else:
ok_to_delete = False
if ok_to_delete == 'yes':
for ct in content_types:
if verbosity >= 2:
print "Deleting stale content type '%s | %s'" % (ct.app_label, ct.model)
ct.delete()
else:
if verbosity >= 2:
print "Stale content types remain."
def update_all_contenttypes(verbosity=2, **kwargs):
for app in get_apps():
update_contenttypes(app, None, verbosity, **kwargs)
signals.post_syncdb.connect(update_contenttypes)
if __name__ == "__main__":
update_all_contenttypes()
|
zorojean/tushare
|
refs/heads/master
|
tushare/stock/fundamental.py
|
21
|
# -*- coding:utf-8 -*-
"""
基本面数据接口
Created on 2015/01/18
@author: Jimmy Liu
@group : waditu
@contact: jimmysoa@sina.cn
"""
import pandas as pd
from tushare.stock import cons as ct
import lxml.html
from lxml import etree
import re
from pandas.compat import StringIO
try:
from urllib.request import urlopen, Request
except ImportError:
from urllib2 import urlopen, Request
def get_stock_basics():
"""
获取沪深上市公司基本情况
Return
--------
DataFrame
code,代码
name,名称
industry,细分行业
area,地区
pe,市盈率
outstanding,流通股本
totals,总股本(万)
totalAssets,总资产(万)
liquidAssets,流动资产
fixedAssets,固定资产
reserved,公积金
reservedPerShare,每股公积金
eps,每股收益
bvps,每股净资
pb,市净率
timeToMarket,上市日期
"""
request = Request(ct.ALL_STOCK_BASICS_FILE)
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
text = text.replace('--', '')
df = pd.read_csv(StringIO(text), dtype={'code':'object'})
df = df.set_index('code')
return df
def get_report_data(year, quarter):
"""
获取业绩报表数据
Parameters
--------
year:int 年度 e.g:2014
quarter:int 季度 :1、2、3、4,只能输入这4个季度
说明:由于是从网站获取的数据,需要一页页抓取,速度取决于您当前网络速度
Return
--------
DataFrame
code,代码
name,名称
eps,每股收益
eps_yoy,每股收益同比(%)
bvps,每股净资产
roe,净资产收益率(%)
epcf,每股现金流量(元)
net_profits,净利润(万元)
profits_yoy,净利润同比(%)
distrib,分配方案
report_date,发布日期
"""
if ct._check_input(year,quarter) is True:
ct._write_head()
df = _get_report_data(year, quarter, 1, pd.DataFrame())
if df is not None:
df = df.drop_duplicates('code')
df['code'] = df['code'].map(lambda x:str(x).zfill(6))
return df
def _get_report_data(year, quarter, pageNo, dataArr):
ct._write_console()
try:
request = Request(ct.REPORT_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'], ct.PAGES['fd'],
year, quarter, pageNo, ct.PAGE_NUM[1]))
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
text = text.replace('--', '')
html = lxml.html.parse(StringIO(text))
res = html.xpath("//table[@class=\"list_table\"]/tr")
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
sarr = '<table>%s</table>'%sarr
df = pd.read_html(sarr)[0]
df = df.drop(11, axis=1)
df.columns = ct.REPORT_COLS
dataArr = dataArr.append(df, ignore_index=True)
nextPage = html.xpath('//div[@class=\"pages\"]/a[last()]/@onclick')
if len(nextPage)>0:
pageNo = re.findall(r'\d+', nextPage[0])[0]
return _get_report_data(year, quarter, pageNo, dataArr)
else:
return dataArr
except Exception as e:
print(e)
def get_profit_data(year, quarter):
"""
获取盈利能力数据
Parameters
--------
year:int 年度 e.g:2014
quarter:int 季度 :1、2、3、4,只能输入这4个季度
说明:由于是从网站获取的数据,需要一页页抓取,速度取决于您当前网络速度
Return
--------
DataFrame
code,代码
name,名称
roe,净资产收益率(%)
net_profit_ratio,净利率(%)
gross_profit_rate,毛利率(%)
net_profits,净利润(万元)
eps,每股收益
business_income,营业收入(百万元)
bips,每股主营业务收入(元)
"""
if ct._check_input(year, quarter) is True:
ct._write_head()
data = _get_profit_data(year, quarter, 1, pd.DataFrame())
if data is not None:
data = data.drop_duplicates('code')
data['code'] = data['code'].map(lambda x:str(x).zfill(6))
return data
def _get_profit_data(year, quarter, pageNo, dataArr):
ct._write_console()
try:
request = Request(ct.PROFIT_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'],
ct.PAGES['fd'], year,
quarter, pageNo, ct.PAGE_NUM[1]))
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
text = text.replace('--', '')
html = lxml.html.parse(StringIO(text))
res = html.xpath("//table[@class=\"list_table\"]/tr")
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
sarr = '<table>%s</table>'%sarr
df = pd.read_html(sarr)[0]
df.columns=ct.PROFIT_COLS
dataArr = dataArr.append(df, ignore_index=True)
nextPage = html.xpath('//div[@class=\"pages\"]/a[last()]/@onclick')
if len(nextPage)>0:
pageNo = re.findall(r'\d+', nextPage[0])[0]
return _get_profit_data(year, quarter, pageNo, dataArr)
else:
return dataArr
except:
pass
def get_operation_data(year, quarter):
"""
获取营运能力数据
Parameters
--------
year:int 年度 e.g:2014
quarter:int 季度 :1、2、3、4,只能输入这4个季度
说明:由于是从网站获取的数据,需要一页页抓取,速度取决于您当前网络速度
Return
--------
DataFrame
code,代码
name,名称
arturnover,应收账款周转率(次)
arturndays,应收账款周转天数(天)
inventory_turnover,存货周转率(次)
inventory_days,存货周转天数(天)
currentasset_turnover,流动资产周转率(次)
currentasset_days,流动资产周转天数(天)
"""
if ct._check_input(year, quarter) is True:
ct._write_head()
data = _get_operation_data(year, quarter, 1, pd.DataFrame())
if data is not None:
data = data.drop_duplicates('code')
data['code'] = data['code'].map(lambda x:str(x).zfill(6))
return data
def _get_operation_data(year, quarter, pageNo, dataArr):
ct._write_console()
try:
request = Request(ct.OPERATION_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'],
ct.PAGES['fd'], year,
quarter, pageNo, ct.PAGE_NUM[1]))
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
text = text.replace('--', '')
html = lxml.html.parse(StringIO(text))
res = html.xpath("//table[@class=\"list_table\"]/tr")
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
sarr = '<table>%s</table>'%sarr
df = pd.read_html(sarr)[0]
df.columns=ct.OPERATION_COLS
dataArr = dataArr.append(df, ignore_index=True)
nextPage = html.xpath('//div[@class=\"pages\"]/a[last()]/@onclick')
if len(nextPage)>0:
pageNo = re.findall(r'\d+', nextPage[0])[0]
return _get_operation_data(year, quarter, pageNo, dataArr)
else:
return dataArr
except Exception as e:
print(e)
def get_growth_data(year, quarter):
"""
获取成长能力数据
Parameters
--------
year:int 年度 e.g:2014
quarter:int 季度 :1、2、3、4,只能输入这4个季度
说明:由于是从网站获取的数据,需要一页页抓取,速度取决于您当前网络速度
Return
--------
DataFrame
code,代码
name,名称
mbrg,主营业务收入增长率(%)
nprg,净利润增长率(%)
nav,净资产增长率
targ,总资产增长率
epsg,每股收益增长率
seg,股东权益增长率
"""
if ct._check_input(year, quarter) is True:
ct._write_head()
data = _get_growth_data(year, quarter, 1, pd.DataFrame())
if data is not None:
data = data.drop_duplicates('code')
data['code'] = data['code'].map(lambda x:str(x).zfill(6))
return data
def _get_growth_data(year, quarter, pageNo, dataArr):
ct._write_console()
try:
request = Request(ct.GROWTH_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'],
ct.PAGES['fd'], year,
quarter, pageNo, ct.PAGE_NUM[1]))
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
text = text.replace('--', '')
html = lxml.html.parse(StringIO(text))
res = html.xpath("//table[@class=\"list_table\"]/tr")
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
sarr = '<table>%s</table>'%sarr
df = pd.read_html(sarr)[0]
df.columns=ct.GROWTH_COLS
dataArr = dataArr.append(df, ignore_index=True)
nextPage = html.xpath('//div[@class=\"pages\"]/a[last()]/@onclick')
if len(nextPage)>0:
pageNo = re.findall(r'\d+', nextPage[0])[0]
return _get_growth_data(year, quarter, pageNo, dataArr)
else:
return dataArr
except Exception as e:
print(e)
def get_debtpaying_data(year, quarter):
"""
获取偿债能力数据
Parameters
--------
year:int 年度 e.g:2014
quarter:int 季度 :1、2、3、4,只能输入这4个季度
说明:由于是从网站获取的数据,需要一页页抓取,速度取决于您当前网络速度
Return
--------
DataFrame
code,代码
name,名称
currentratio,流动比率
quickratio,速动比率
cashratio,现金比率
icratio,利息支付倍数
sheqratio,股东权益比率
adratio,股东权益增长率
"""
if ct._check_input(year, quarter) is True:
ct._write_head()
df = _get_debtpaying_data(year, quarter, 1, pd.DataFrame())
if df is not None:
df = df.drop_duplicates('code')
df['code'] = df['code'].map(lambda x:str(x).zfill(6))
return df
def _get_debtpaying_data(year, quarter, pageNo, dataArr):
ct._write_console()
try:
request = Request(ct.DEBTPAYING_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'],
ct.PAGES['fd'], year,
quarter, pageNo, ct.PAGE_NUM[1]))
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
html = lxml.html.parse(StringIO(text))
res = html.xpath("//table[@class=\"list_table\"]/tr")
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
sarr = '<table>%s</table>'%sarr
df = pd.read_html(sarr)[0]
df.columns = ct.DEBTPAYING_COLS
dataArr = dataArr.append(df, ignore_index=True)
nextPage = html.xpath('//div[@class=\"pages\"]/a[last()]/@onclick')
if len(nextPage)>0:
pageNo = re.findall(r'\d+', nextPage[0])[0]
return _get_debtpaying_data(year, quarter, pageNo, dataArr)
else:
return dataArr
except Exception as e:
print(e)
def get_cashflow_data(year, quarter):
"""
获取现金流量数据
Parameters
--------
year:int 年度 e.g:2014
quarter:int 季度 :1、2、3、4,只能输入这4个季度
说明:由于是从网站获取的数据,需要一页页抓取,速度取决于您当前网络速度
Return
--------
DataFrame
code,代码
name,名称
cf_sales,经营现金净流量对销售收入比率
rateofreturn,资产的经营现金流量回报率
cf_nm,经营现金净流量与净利润的比率
cf_liabilities,经营现金净流量对负债比率
cashflowratio,现金流量比率
"""
if ct._check_input(year, quarter) is True:
ct._write_head()
df = _get_cashflow_data(year, quarter, 1, pd.DataFrame())
if df is not None:
df = df.drop_duplicates('code')
df['code'] = df['code'].map(lambda x:str(x).zfill(6))
return df
def _get_cashflow_data(year, quarter, pageNo, dataArr):
ct._write_console()
try:
request = Request(ct.CASHFLOW_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'],
ct.PAGES['fd'], year,
quarter, pageNo, ct.PAGE_NUM[1]))
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
text = text.replace('--', '')
html = lxml.html.parse(StringIO(text))
res = html.xpath("//table[@class=\"list_table\"]/tr")
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
sarr = '<table>%s</table>'%sarr
df = pd.read_html(sarr)[0]
df.columns = ct.CASHFLOW_COLS
dataArr = dataArr.append(df, ignore_index=True)
nextPage = html.xpath('//div[@class=\"pages\"]/a[last()]/@onclick')
if len(nextPage)>0:
pageNo = re.findall(r'\d+', nextPage[0])[0]
return _get_cashflow_data(year, quarter, pageNo, dataArr)
else:
return dataArr
except Exception as e:
print(e)
def _data_path():
import os
import inspect
caller_file = inspect.stack()[1][1]
pardir = os.path.abspath(os.path.join(os.path.dirname(caller_file), os.path.pardir))
return os.path.abspath(os.path.join(pardir, os.path.pardir))
|
lisaglendenning/pynet
|
refs/heads/master
|
source/pynet/io/poll/py24/selects.py
|
1
|
# @copyright
# @license
from ipolls import IPoller, EVENTS, POLLIN, POLLOUT, POLLEX, POLLHUP
import select as pyselect
##############################################################################
##############################################################################
class Poller(IPoller):
readables = None
writables = None
exceptables = None
def __init__(self):
super(Poller, self).__init__()
self.readables = set()
self.writables = set()
self.exceptables = set()
def __setitem__(self, fd, events):
if fd in self:
old = self[fd]
added = events & ~old
removed = old & ~events
else:
added = events
removed = 0
for flag, group in ((POLLIN, self.readables), (POLLOUT, self.writables),):
if flag & added:
group.add(fd)
elif flag & removed:
group.remove(fd)
if fd in self.readables or fd in self.writables:
if fd not in self.exceptables:
self.exceptables.add(fd)
else:
if fd in self.exceptables:
self.exceptables.remove(fd)
IPoller.__setitem__(self, fd, events)
def __delitem__(self, fd):
if fd not in self:
raise KeyError(fd)
old = self[fd]
if old:
for flag, group in ((POLLIN, self.readables), (POLLOUT, self.writables),):
if flag & old:
group.remove(fd)
self.exceptables.remove(fd)
IPoller.__delitem__(self, fd)
def poll(self, timeout=0.0):
# must be sequences of integers or objects with fileno()
rs = self.readables
ws = self.writables
xs = self.exceptables
# acceptance of three empty sequences is platform-dependent
if not (rs or ws or xs):
return
r, w, x = pyselect.select(rs, ws, xs, timeout)
for event, fds in ((POLLIN, r), (POLLOUT, w), (POLLEX, x),):
for fd in fds:
yield (fd, event,)
#############################################################################
#############################################################################
|
chauhanhardik/populo_2
|
refs/heads/master
|
lms/envs/sauce.py
|
116
|
"""
This config file extends the test environment configuration
so that we can run the lettuce acceptance tests on SauceLabs.
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-import, unused-wildcard-import
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
import os
PORTS = [
2000, 2001, 2020, 2109, 2222, 2310, 3000, 3001,
3030, 3210, 3333, 4000, 4001, 4040, 4321, 4502, 4503,
5050, 5555, 5432, 6060, 6666, 6543, 7000, 7070, 7774,
7777, 8003, 8031, 8080, 8081, 8765, 8888,
9080, 9090, 9876, 9999, 49221, 55001
]
DESIRED_CAPABILITIES = {
'chrome': DesiredCapabilities.CHROME,
'internetexplorer': DesiredCapabilities.INTERNETEXPLORER,
'firefox': DesiredCapabilities.FIREFOX,
'opera': DesiredCapabilities.OPERA,
'iphone': DesiredCapabilities.IPHONE,
'ipad': DesiredCapabilities.IPAD,
'safari': DesiredCapabilities.SAFARI,
'android': DesiredCapabilities.ANDROID
}
# All keys must be URL and JSON encodable
# PLATFORM-BROWSER-VERSION_NUM-DEVICE
ALL_CONFIG = {
'Linux-chrome--': ['Linux', 'chrome', '', ''],
'Windows 8-chrome--': ['Windows 8', 'chrome', '', ''],
'Windows 7-chrome--': ['Windows 7', 'chrome', '', ''],
'Windows XP-chrome--': ['Windows XP', 'chrome', '', ''],
'OS X 10.8-chrome--': ['OS X 10.8', 'chrome', '', ''],
'OS X 10.6-chrome--': ['OS X 10.6', 'chrome', '', ''],
'Linux-firefox-23-': ['Linux', 'firefox', '23', ''],
'Windows 8-firefox-23-': ['Windows 8', 'firefox', '23', ''],
'Windows 7-firefox-23-': ['Windows 7', 'firefox', '23', ''],
'Windows XP-firefox-23-': ['Windows XP', 'firefox', '23', ''],
'OS X 10.8-safari-6-': ['OS X 10.8', 'safari', '6', ''],
'Windows 8-internetexplorer-10-': ['Windows 8', 'internetexplorer', '10', ''],
}
SAUCE_INFO = ALL_CONFIG.get(os.environ.get('SAUCE_INFO', 'Linux-chrome--'))
# Information needed to utilize Sauce Labs.
SAUCE = {
'USERNAME': os.environ.get('SAUCE_USER_NAME'),
'ACCESS_ID': os.environ.get('SAUCE_API_KEY'),
'PLATFORM': SAUCE_INFO[0],
'BROWSER': DESIRED_CAPABILITIES.get(SAUCE_INFO[1]),
'VERSION': SAUCE_INFO[2],
'DEVICE': SAUCE_INFO[3],
'SESSION': 'Jenkins Acceptance Tests',
'BUILD': os.environ.get('BUILD_DISPLAY_NAME', 'LETTUCE TESTS'),
}
|
odyaka341/pyglet
|
refs/heads/master
|
pyglet/gl/agl.py
|
46
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Wrapper for /System/Library/Frameworks/AGL.framework/Headers/agl.h
Generated by tools/gengl.py.
Do not modify this file.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: gengl.py 601 2007-02-04 05:36:59Z Alex.Holkner $'
from ctypes import *
from pyglet.gl.lib import link_AGL as _link_function
if not _link_function:
raise ImportError('AGL framework is not available.')
# BEGIN GENERATED CONTENT (do not edit below this line)
# This content is generated by tools/gengl.py.
# Wrapper for /System/Library/Frameworks/AGL.framework/Headers/agl.h
AGL_VERSION_2_0 = 1 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:41
class struct_GDevice(Structure):
__slots__ = [
]
struct_GDevice._fields_ = [
('_opaque_struct', c_int)
]
GDevice = struct_GDevice # /System/Library/Frameworks/ApplicationServices.framework/Frameworks/QD.framework/Headers/Quickdraw.h:1347
GDPtr = POINTER(GDevice) # /System/Library/Frameworks/ApplicationServices.framework/Frameworks/QD.framework/Headers/Quickdraw.h:1348
GDHandle = POINTER(GDPtr) # /System/Library/Frameworks/ApplicationServices.framework/Frameworks/QD.framework/Headers/Quickdraw.h:1349
AGLDevice = GDHandle # /System/Library/Frameworks/AGL.framework/Headers/agl.h:46
class struct_OpaqueGrafPtr(Structure):
__slots__ = [
]
struct_OpaqueGrafPtr._fields_ = [
('_opaque_struct', c_int)
]
GrafPtr = POINTER(struct_OpaqueGrafPtr) # /System/Library/Frameworks/ApplicationServices.framework/Frameworks/QD.framework/Headers/Quickdraw.h:1009
CGrafPtr = GrafPtr # /System/Library/Frameworks/ApplicationServices.framework/Frameworks/QD.framework/Headers/Quickdraw.h:1392
AGLDrawable = CGrafPtr # /System/Library/Frameworks/AGL.framework/Headers/agl.h:51
class struct___AGLRendererInfoRec(Structure):
__slots__ = [
]
struct___AGLRendererInfoRec._fields_ = [
('_opaque_struct', c_int)
]
AGLRendererInfo = POINTER(struct___AGLRendererInfoRec) # /System/Library/Frameworks/AGL.framework/Headers/agl.h:56
class struct___AGLPixelFormatRec(Structure):
__slots__ = [
]
struct___AGLPixelFormatRec._fields_ = [
('_opaque_struct', c_int)
]
AGLPixelFormat = POINTER(struct___AGLPixelFormatRec) # /System/Library/Frameworks/AGL.framework/Headers/agl.h:57
class struct___AGLContextRec(Structure):
__slots__ = [
]
struct___AGLContextRec._fields_ = [
('_opaque_struct', c_int)
]
AGLContext = POINTER(struct___AGLContextRec) # /System/Library/Frameworks/AGL.framework/Headers/agl.h:58
class struct___AGLPBufferRec(Structure):
__slots__ = [
]
struct___AGLPBufferRec._fields_ = [
('_opaque_struct', c_int)
]
AGLPbuffer = POINTER(struct___AGLPBufferRec) # /System/Library/Frameworks/AGL.framework/Headers/agl.h:59
AGL_NONE = 0 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:66
AGL_ALL_RENDERERS = 1 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:67
AGL_BUFFER_SIZE = 2 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:68
AGL_LEVEL = 3 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:69
AGL_RGBA = 4 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:70
AGL_DOUBLEBUFFER = 5 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:71
AGL_STEREO = 6 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:72
AGL_AUX_BUFFERS = 7 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:73
AGL_RED_SIZE = 8 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:74
AGL_GREEN_SIZE = 9 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:75
AGL_BLUE_SIZE = 10 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:76
AGL_ALPHA_SIZE = 11 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:77
AGL_DEPTH_SIZE = 12 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:78
AGL_STENCIL_SIZE = 13 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:79
AGL_ACCUM_RED_SIZE = 14 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:80
AGL_ACCUM_GREEN_SIZE = 15 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:81
AGL_ACCUM_BLUE_SIZE = 16 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:82
AGL_ACCUM_ALPHA_SIZE = 17 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:83
AGL_PIXEL_SIZE = 50 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:88
AGL_MINIMUM_POLICY = 51 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:89
AGL_MAXIMUM_POLICY = 52 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:90
AGL_OFFSCREEN = 53 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:91
AGL_FULLSCREEN = 54 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:92
AGL_SAMPLE_BUFFERS_ARB = 55 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:93
AGL_SAMPLES_ARB = 56 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:94
AGL_AUX_DEPTH_STENCIL = 57 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:95
AGL_COLOR_FLOAT = 58 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:96
AGL_MULTISAMPLE = 59 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:97
AGL_SUPERSAMPLE = 60 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:98
AGL_SAMPLE_ALPHA = 61 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:99
AGL_RENDERER_ID = 70 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:104
AGL_SINGLE_RENDERER = 71 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:105
AGL_NO_RECOVERY = 72 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:106
AGL_ACCELERATED = 73 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:107
AGL_CLOSEST_POLICY = 74 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:108
AGL_ROBUST = 75 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:109
AGL_BACKING_STORE = 76 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:110
AGL_MP_SAFE = 78 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:111
AGL_WINDOW = 80 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:113
AGL_MULTISCREEN = 81 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:114
AGL_VIRTUAL_SCREEN = 82 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:115
AGL_COMPLIANT = 83 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:116
AGL_PBUFFER = 90 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:118
AGL_BUFFER_MODES = 100 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:135
AGL_MIN_LEVEL = 101 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:136
AGL_MAX_LEVEL = 102 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:137
AGL_COLOR_MODES = 103 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:138
AGL_ACCUM_MODES = 104 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:139
AGL_DEPTH_MODES = 105 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:140
AGL_STENCIL_MODES = 106 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:141
AGL_MAX_AUX_BUFFERS = 107 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:142
AGL_VIDEO_MEMORY = 120 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:143
AGL_TEXTURE_MEMORY = 121 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:144
AGL_RENDERER_COUNT = 128 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:145
AGL_SWAP_RECT = 200 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:150
AGL_BUFFER_RECT = 202 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:151
AGL_SWAP_LIMIT = 203 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:152
AGL_COLORMAP_TRACKING = 210 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:153
AGL_COLORMAP_ENTRY = 212 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:154
AGL_RASTERIZATION = 220 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:155
AGL_SWAP_INTERVAL = 222 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:156
AGL_STATE_VALIDATION = 230 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:157
AGL_BUFFER_NAME = 231 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:158
AGL_ORDER_CONTEXT_TO_FRONT = 232 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:159
AGL_CONTEXT_SURFACE_ID = 233 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:160
AGL_CONTEXT_DISPLAY_ID = 234 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:161
AGL_SURFACE_ORDER = 235 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:162
AGL_SURFACE_OPACITY = 236 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:163
AGL_CLIP_REGION = 254 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:164
AGL_FS_CAPTURE_SINGLE = 255 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:165
AGL_SURFACE_BACKING_SIZE = 304 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:166
AGL_ENABLE_SURFACE_BACKING_SIZE = 305 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:167
AGL_SURFACE_VOLATILE = 306 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:168
AGL_FORMAT_CACHE_SIZE = 501 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:172
AGL_CLEAR_FORMAT_CACHE = 502 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:173
AGL_RETAIN_RENDERERS = 503 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:174
AGL_MONOSCOPIC_BIT = 1 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:177
AGL_STEREOSCOPIC_BIT = 2 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:178
AGL_SINGLEBUFFER_BIT = 4 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:179
AGL_DOUBLEBUFFER_BIT = 8 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:180
AGL_0_BIT = 1 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:183
AGL_1_BIT = 2 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:184
AGL_2_BIT = 4 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:185
AGL_3_BIT = 8 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:186
AGL_4_BIT = 16 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:187
AGL_5_BIT = 32 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:188
AGL_6_BIT = 64 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:189
AGL_8_BIT = 128 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:190
AGL_10_BIT = 256 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:191
AGL_12_BIT = 512 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:192
AGL_16_BIT = 1024 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:193
AGL_24_BIT = 2048 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:194
AGL_32_BIT = 4096 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:195
AGL_48_BIT = 8192 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:196
AGL_64_BIT = 16384 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:197
AGL_96_BIT = 32768 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:198
AGL_128_BIT = 65536 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:199
AGL_RGB8_BIT = 1 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:202
AGL_RGB8_A8_BIT = 2 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:203
AGL_BGR233_BIT = 4 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:204
AGL_BGR233_A8_BIT = 8 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:205
AGL_RGB332_BIT = 16 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:206
AGL_RGB332_A8_BIT = 32 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:207
AGL_RGB444_BIT = 64 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:208
AGL_ARGB4444_BIT = 128 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:209
AGL_RGB444_A8_BIT = 256 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:210
AGL_RGB555_BIT = 512 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:211
AGL_ARGB1555_BIT = 1024 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:212
AGL_RGB555_A8_BIT = 2048 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:213
AGL_RGB565_BIT = 4096 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:214
AGL_RGB565_A8_BIT = 8192 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:215
AGL_RGB888_BIT = 16384 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:216
AGL_ARGB8888_BIT = 32768 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:217
AGL_RGB888_A8_BIT = 65536 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:218
AGL_RGB101010_BIT = 131072 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:219
AGL_ARGB2101010_BIT = 262144 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:220
AGL_RGB101010_A8_BIT = 524288 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:221
AGL_RGB121212_BIT = 1048576 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:222
AGL_ARGB12121212_BIT = 2097152 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:223
AGL_RGB161616_BIT = 4194304 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:224
AGL_ARGB16161616_BIT = 8388608 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:225
AGL_INDEX8_BIT = 536870912 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:226
AGL_INDEX16_BIT = 1073741824 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:227
AGL_RGBFLOAT64_BIT = 16777216 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:228
AGL_RGBAFLOAT64_BIT = 33554432 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:229
AGL_RGBFLOAT128_BIT = 67108864 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:230
AGL_RGBAFLOAT128_BIT = 134217728 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:231
AGL_RGBFLOAT256_BIT = 268435456 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:232
AGL_RGBAFLOAT256_BIT = 536870912 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:233
AGL_NO_ERROR = 0 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:238
AGL_BAD_ATTRIBUTE = 10000 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:240
AGL_BAD_PROPERTY = 10001 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:241
AGL_BAD_PIXELFMT = 10002 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:242
AGL_BAD_RENDINFO = 10003 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:243
AGL_BAD_CONTEXT = 10004 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:244
AGL_BAD_DRAWABLE = 10005 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:245
AGL_BAD_GDEV = 10006 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:246
AGL_BAD_STATE = 10007 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:247
AGL_BAD_VALUE = 10008 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:248
AGL_BAD_MATCH = 10009 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:249
AGL_BAD_ENUM = 10010 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:250
AGL_BAD_OFFSCREEN = 10011 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:251
AGL_BAD_FULLSCREEN = 10012 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:252
AGL_BAD_WINDOW = 10013 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:253
AGL_BAD_POINTER = 10014 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:254
AGL_BAD_MODULE = 10015 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:255
AGL_BAD_ALLOC = 10016 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:256
AGL_BAD_CONNECTION = 10017 # /System/Library/Frameworks/AGL.framework/Headers/agl.h:257
GLint = c_long # /System/Library/Frameworks/OpenGL.framework/Headers/gl.h:47
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:264
aglChoosePixelFormat = _link_function('aglChoosePixelFormat', AGLPixelFormat, [POINTER(AGLDevice), GLint, POINTER(GLint)], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:265
aglDestroyPixelFormat = _link_function('aglDestroyPixelFormat', None, [AGLPixelFormat], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:266
aglNextPixelFormat = _link_function('aglNextPixelFormat', AGLPixelFormat, [AGLPixelFormat], None)
GLboolean = c_ubyte # /System/Library/Frameworks/OpenGL.framework/Headers/gl.h:43
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:267
aglDescribePixelFormat = _link_function('aglDescribePixelFormat', GLboolean, [AGLPixelFormat, GLint, POINTER(GLint)], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:268
aglDevicesOfPixelFormat = _link_function('aglDevicesOfPixelFormat', POINTER(AGLDevice), [AGLPixelFormat, POINTER(GLint)], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:273
aglQueryRendererInfo = _link_function('aglQueryRendererInfo', AGLRendererInfo, [POINTER(AGLDevice), GLint], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:274
aglDestroyRendererInfo = _link_function('aglDestroyRendererInfo', None, [AGLRendererInfo], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:275
aglNextRendererInfo = _link_function('aglNextRendererInfo', AGLRendererInfo, [AGLRendererInfo], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:276
aglDescribeRenderer = _link_function('aglDescribeRenderer', GLboolean, [AGLRendererInfo, GLint, POINTER(GLint)], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:281
aglCreateContext = _link_function('aglCreateContext', AGLContext, [AGLPixelFormat, AGLContext], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:282
aglDestroyContext = _link_function('aglDestroyContext', GLboolean, [AGLContext], None)
GLuint = c_ulong # /System/Library/Frameworks/OpenGL.framework/Headers/gl.h:51
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:283
aglCopyContext = _link_function('aglCopyContext', GLboolean, [AGLContext, AGLContext, GLuint], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:284
aglUpdateContext = _link_function('aglUpdateContext', GLboolean, [AGLContext], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:289
aglSetCurrentContext = _link_function('aglSetCurrentContext', GLboolean, [AGLContext], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:290
aglGetCurrentContext = _link_function('aglGetCurrentContext', AGLContext, [], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:295
aglSetDrawable = _link_function('aglSetDrawable', GLboolean, [AGLContext, AGLDrawable], None)
GLsizei = c_long # /System/Library/Frameworks/OpenGL.framework/Headers/gl.h:48
GLvoid = None # /System/Library/Frameworks/OpenGL.framework/Headers/gl.h:56
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:296
aglSetOffScreen = _link_function('aglSetOffScreen', GLboolean, [AGLContext, GLsizei, GLsizei, GLsizei, POINTER(GLvoid)], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:297
aglSetFullScreen = _link_function('aglSetFullScreen', GLboolean, [AGLContext, GLsizei, GLsizei, GLsizei, GLint], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:298
aglGetDrawable = _link_function('aglGetDrawable', AGLDrawable, [AGLContext], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:303
aglSetVirtualScreen = _link_function('aglSetVirtualScreen', GLboolean, [AGLContext, GLint], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:304
aglGetVirtualScreen = _link_function('aglGetVirtualScreen', GLint, [AGLContext], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:309
aglGetVersion = _link_function('aglGetVersion', None, [POINTER(GLint), POINTER(GLint)], None)
GLenum = c_ulong # /System/Library/Frameworks/OpenGL.framework/Headers/gl.h:42
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:314
aglConfigure = _link_function('aglConfigure', GLboolean, [GLenum, GLuint], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:319
aglSwapBuffers = _link_function('aglSwapBuffers', None, [AGLContext], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:324
aglEnable = _link_function('aglEnable', GLboolean, [AGLContext, GLenum], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:325
aglDisable = _link_function('aglDisable', GLboolean, [AGLContext, GLenum], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:326
aglIsEnabled = _link_function('aglIsEnabled', GLboolean, [AGLContext, GLenum], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:327
aglSetInteger = _link_function('aglSetInteger', GLboolean, [AGLContext, GLenum, POINTER(GLint)], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:328
aglGetInteger = _link_function('aglGetInteger', GLboolean, [AGLContext, GLenum, POINTER(GLint)], None)
Style = c_ubyte # /System/Library/Frameworks/CoreServices.framework/Headers/../Frameworks/CarbonCore.framework/Headers/MacTypes.h:524
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:333
aglUseFont = _link_function('aglUseFont', GLboolean, [AGLContext, GLint, Style, GLint, GLint, GLint, GLint], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:338
aglGetError = _link_function('aglGetError', GLenum, [], None)
GLubyte = c_ubyte # /System/Library/Frameworks/OpenGL.framework/Headers/gl.h:49
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:339
aglErrorString = _link_function('aglErrorString', POINTER(GLubyte), [GLenum], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:344
aglResetLibrary = _link_function('aglResetLibrary', None, [], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:349
aglSurfaceTexture = _link_function('aglSurfaceTexture', None, [AGLContext, GLenum, GLenum, AGLContext], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:354
aglCreatePBuffer = _link_function('aglCreatePBuffer', GLboolean, [GLint, GLint, GLenum, GLenum, c_long, POINTER(AGLPbuffer)], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:355
aglDestroyPBuffer = _link_function('aglDestroyPBuffer', GLboolean, [AGLPbuffer], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:356
aglDescribePBuffer = _link_function('aglDescribePBuffer', GLboolean, [AGLPbuffer, POINTER(GLint), POINTER(GLint), POINTER(GLenum), POINTER(GLenum), POINTER(GLint)], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:357
aglTexImagePBuffer = _link_function('aglTexImagePBuffer', GLboolean, [AGLContext, AGLPbuffer, GLint], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:362
aglSetPBuffer = _link_function('aglSetPBuffer', GLboolean, [AGLContext, AGLPbuffer, GLint, GLint, GLint], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:363
aglGetPBuffer = _link_function('aglGetPBuffer', GLboolean, [AGLContext, POINTER(AGLPbuffer), POINTER(GLint), POINTER(GLint), POINTER(GLint)], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:368
aglGetCGLContext = _link_function('aglGetCGLContext', GLboolean, [AGLContext, POINTER(POINTER(None))], None)
# /System/Library/Frameworks/AGL.framework/Headers/agl.h:369
aglGetCGLPixelFormat = _link_function('aglGetCGLPixelFormat', GLboolean, [AGLPixelFormat, POINTER(POINTER(None))], None)
__all__ = ['AGL_VERSION_2_0', 'AGLDevice', 'AGLDrawable', 'AGLRendererInfo',
'AGLPixelFormat', 'AGLContext', 'AGLPbuffer', 'AGL_NONE', 'AGL_ALL_RENDERERS',
'AGL_BUFFER_SIZE', 'AGL_LEVEL', 'AGL_RGBA', 'AGL_DOUBLEBUFFER', 'AGL_STEREO',
'AGL_AUX_BUFFERS', 'AGL_RED_SIZE', 'AGL_GREEN_SIZE', 'AGL_BLUE_SIZE',
'AGL_ALPHA_SIZE', 'AGL_DEPTH_SIZE', 'AGL_STENCIL_SIZE', 'AGL_ACCUM_RED_SIZE',
'AGL_ACCUM_GREEN_SIZE', 'AGL_ACCUM_BLUE_SIZE', 'AGL_ACCUM_ALPHA_SIZE',
'AGL_PIXEL_SIZE', 'AGL_MINIMUM_POLICY', 'AGL_MAXIMUM_POLICY', 'AGL_OFFSCREEN',
'AGL_FULLSCREEN', 'AGL_SAMPLE_BUFFERS_ARB', 'AGL_SAMPLES_ARB',
'AGL_AUX_DEPTH_STENCIL', 'AGL_COLOR_FLOAT', 'AGL_MULTISAMPLE',
'AGL_SUPERSAMPLE', 'AGL_SAMPLE_ALPHA', 'AGL_RENDERER_ID',
'AGL_SINGLE_RENDERER', 'AGL_NO_RECOVERY', 'AGL_ACCELERATED',
'AGL_CLOSEST_POLICY', 'AGL_ROBUST', 'AGL_BACKING_STORE', 'AGL_MP_SAFE',
'AGL_WINDOW', 'AGL_MULTISCREEN', 'AGL_VIRTUAL_SCREEN', 'AGL_COMPLIANT',
'AGL_PBUFFER', 'AGL_BUFFER_MODES', 'AGL_MIN_LEVEL', 'AGL_MAX_LEVEL',
'AGL_COLOR_MODES', 'AGL_ACCUM_MODES', 'AGL_DEPTH_MODES', 'AGL_STENCIL_MODES',
'AGL_MAX_AUX_BUFFERS', 'AGL_VIDEO_MEMORY', 'AGL_TEXTURE_MEMORY',
'AGL_RENDERER_COUNT', 'AGL_SWAP_RECT', 'AGL_BUFFER_RECT', 'AGL_SWAP_LIMIT',
'AGL_COLORMAP_TRACKING', 'AGL_COLORMAP_ENTRY', 'AGL_RASTERIZATION',
'AGL_SWAP_INTERVAL', 'AGL_STATE_VALIDATION', 'AGL_BUFFER_NAME',
'AGL_ORDER_CONTEXT_TO_FRONT', 'AGL_CONTEXT_SURFACE_ID',
'AGL_CONTEXT_DISPLAY_ID', 'AGL_SURFACE_ORDER', 'AGL_SURFACE_OPACITY',
'AGL_CLIP_REGION', 'AGL_FS_CAPTURE_SINGLE', 'AGL_SURFACE_BACKING_SIZE',
'AGL_ENABLE_SURFACE_BACKING_SIZE', 'AGL_SURFACE_VOLATILE',
'AGL_FORMAT_CACHE_SIZE', 'AGL_CLEAR_FORMAT_CACHE', 'AGL_RETAIN_RENDERERS',
'AGL_MONOSCOPIC_BIT', 'AGL_STEREOSCOPIC_BIT', 'AGL_SINGLEBUFFER_BIT',
'AGL_DOUBLEBUFFER_BIT', 'AGL_0_BIT', 'AGL_1_BIT', 'AGL_2_BIT', 'AGL_3_BIT',
'AGL_4_BIT', 'AGL_5_BIT', 'AGL_6_BIT', 'AGL_8_BIT', 'AGL_10_BIT',
'AGL_12_BIT', 'AGL_16_BIT', 'AGL_24_BIT', 'AGL_32_BIT', 'AGL_48_BIT',
'AGL_64_BIT', 'AGL_96_BIT', 'AGL_128_BIT', 'AGL_RGB8_BIT', 'AGL_RGB8_A8_BIT',
'AGL_BGR233_BIT', 'AGL_BGR233_A8_BIT', 'AGL_RGB332_BIT', 'AGL_RGB332_A8_BIT',
'AGL_RGB444_BIT', 'AGL_ARGB4444_BIT', 'AGL_RGB444_A8_BIT', 'AGL_RGB555_BIT',
'AGL_ARGB1555_BIT', 'AGL_RGB555_A8_BIT', 'AGL_RGB565_BIT',
'AGL_RGB565_A8_BIT', 'AGL_RGB888_BIT', 'AGL_ARGB8888_BIT',
'AGL_RGB888_A8_BIT', 'AGL_RGB101010_BIT', 'AGL_ARGB2101010_BIT',
'AGL_RGB101010_A8_BIT', 'AGL_RGB121212_BIT', 'AGL_ARGB12121212_BIT',
'AGL_RGB161616_BIT', 'AGL_ARGB16161616_BIT', 'AGL_INDEX8_BIT',
'AGL_INDEX16_BIT', 'AGL_RGBFLOAT64_BIT', 'AGL_RGBAFLOAT64_BIT',
'AGL_RGBFLOAT128_BIT', 'AGL_RGBAFLOAT128_BIT', 'AGL_RGBFLOAT256_BIT',
'AGL_RGBAFLOAT256_BIT', 'AGL_NO_ERROR', 'AGL_BAD_ATTRIBUTE',
'AGL_BAD_PROPERTY', 'AGL_BAD_PIXELFMT', 'AGL_BAD_RENDINFO', 'AGL_BAD_CONTEXT',
'AGL_BAD_DRAWABLE', 'AGL_BAD_GDEV', 'AGL_BAD_STATE', 'AGL_BAD_VALUE',
'AGL_BAD_MATCH', 'AGL_BAD_ENUM', 'AGL_BAD_OFFSCREEN', 'AGL_BAD_FULLSCREEN',
'AGL_BAD_WINDOW', 'AGL_BAD_POINTER', 'AGL_BAD_MODULE', 'AGL_BAD_ALLOC',
'AGL_BAD_CONNECTION', 'aglChoosePixelFormat', 'aglDestroyPixelFormat',
'aglNextPixelFormat', 'aglDescribePixelFormat', 'aglDevicesOfPixelFormat',
'aglQueryRendererInfo', 'aglDestroyRendererInfo', 'aglNextRendererInfo',
'aglDescribeRenderer', 'aglCreateContext', 'aglDestroyContext',
'aglCopyContext', 'aglUpdateContext', 'aglSetCurrentContext',
'aglGetCurrentContext', 'aglSetDrawable', 'aglSetOffScreen',
'aglSetFullScreen', 'aglGetDrawable', 'aglSetVirtualScreen',
'aglGetVirtualScreen', 'aglGetVersion', 'aglConfigure', 'aglSwapBuffers',
'aglEnable', 'aglDisable', 'aglIsEnabled', 'aglSetInteger', 'aglGetInteger',
'aglUseFont', 'aglGetError', 'aglErrorString', 'aglResetLibrary',
'aglSurfaceTexture', 'aglCreatePBuffer', 'aglDestroyPBuffer',
'aglDescribePBuffer', 'aglTexImagePBuffer', 'aglSetPBuffer', 'aglGetPBuffer',
'aglGetCGLContext', 'aglGetCGLPixelFormat']
# END GENERATED CONTENT (do not edit above this line)
|
bottompawn/kbengine
|
refs/heads/master
|
kbe/res/scripts/common/Lib/site-packages/pip/_vendor/requests/packages/urllib3/contrib/pyopenssl.py
|
304
|
'''SSL with SNI_-support for Python 2. Follow these instructions if you would
like to verify SSL certificates in Python 2. Note, the default libraries do
*not* do certificate checking; you need to do additional work to validate
certificates yourself.
This needs the following packages installed:
* pyOpenSSL (tested with 0.13)
* ndg-httpsclient (tested with 0.3.2)
* pyasn1 (tested with 0.1.6)
You can install them with the following command:
pip install pyopenssl ndg-httpsclient pyasn1
To activate certificate checking, call
:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code
before you begin making HTTP requests. This can be done in a ``sitecustomize``
module, or at any other time before your application begins using ``urllib3``,
like this::
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
Now you can use :mod:`urllib3` as you normally would, and it will support SNI
when the required modules are installed.
Activating this module also has the positive side effect of disabling SSL/TLS
encryption in Python 2 (see `CRIME attack`_).
If you want to configure the default list of supported cipher suites, you can
set the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable.
Module Variables
----------------
:var DEFAULT_SSL_CIPHER_LIST: The list of supported SSL/TLS cipher suites.
Default: ``ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:
ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:!aNULL:!MD5:!DSS``
.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication
.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit)
'''
from ndg.httpsclient.ssl_peer_verification import SUBJ_ALT_NAME_SUPPORT
from ndg.httpsclient.subj_alt_name import SubjectAltName as BaseSubjectAltName
import OpenSSL.SSL
from pyasn1.codec.der import decoder as der_decoder
from pyasn1.type import univ, constraint
from socket import _fileobject, timeout
import ssl
import select
from cStringIO import StringIO
from .. import connection
from .. import util
__all__ = ['inject_into_urllib3', 'extract_from_urllib3']
# SNI only *really* works if we can read the subjectAltName of certificates.
HAS_SNI = SUBJ_ALT_NAME_SUPPORT
# Map from urllib3 to PyOpenSSL compatible parameter-values.
_openssl_versions = {
ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD,
ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD,
ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD,
}
_openssl_verify = {
ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE,
ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER,
ssl.CERT_REQUIRED: OpenSSL.SSL.VERIFY_PEER
+ OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
}
# A secure default.
# Sources for more information on TLS ciphers:
#
# - https://wiki.mozilla.org/Security/Server_Side_TLS
# - https://www.ssllabs.com/projects/best-practices/index.html
# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
#
# The general intent is:
# - Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE),
# - prefer ECDHE over DHE for better performance,
# - prefer any AES-GCM over any AES-CBC for better performance and security,
# - use 3DES as fallback which is secure but slow,
# - disable NULL authentication, MD5 MACs and DSS for security reasons.
DEFAULT_SSL_CIPHER_LIST = "ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:" + \
"ECDH+AES128:DH+AES:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:" + \
"!aNULL:!MD5:!DSS"
orig_util_HAS_SNI = util.HAS_SNI
orig_connection_ssl_wrap_socket = connection.ssl_wrap_socket
def inject_into_urllib3():
'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.'
connection.ssl_wrap_socket = ssl_wrap_socket
util.HAS_SNI = HAS_SNI
def extract_from_urllib3():
'Undo monkey-patching by :func:`inject_into_urllib3`.'
connection.ssl_wrap_socket = orig_connection_ssl_wrap_socket
util.HAS_SNI = orig_util_HAS_SNI
### Note: This is a slightly bug-fixed version of same from ndg-httpsclient.
class SubjectAltName(BaseSubjectAltName):
'''ASN.1 implementation for subjectAltNames support'''
# There is no limit to how many SAN certificates a certificate may have,
# however this needs to have some limit so we'll set an arbitrarily high
# limit.
sizeSpec = univ.SequenceOf.sizeSpec + \
constraint.ValueSizeConstraint(1, 1024)
### Note: This is a slightly bug-fixed version of same from ndg-httpsclient.
def get_subj_alt_name(peer_cert):
# Search through extensions
dns_name = []
if not SUBJ_ALT_NAME_SUPPORT:
return dns_name
general_names = SubjectAltName()
for i in range(peer_cert.get_extension_count()):
ext = peer_cert.get_extension(i)
ext_name = ext.get_short_name()
if ext_name != 'subjectAltName':
continue
# PyOpenSSL returns extension data in ASN.1 encoded form
ext_dat = ext.get_data()
decoded_dat = der_decoder.decode(ext_dat,
asn1Spec=general_names)
for name in decoded_dat:
if not isinstance(name, SubjectAltName):
continue
for entry in range(len(name)):
component = name.getComponentByPosition(entry)
if component.getName() != 'dNSName':
continue
dns_name.append(str(component.getComponent()))
return dns_name
class fileobject(_fileobject):
def _wait_for_sock(self):
rd, wd, ed = select.select([self._sock], [], [],
self._sock.gettimeout())
if not rd:
raise timeout()
def read(self, size=-1):
# Use max, disallow tiny reads in a loop as they are very inefficient.
# We never leave read() with any leftover data from a new recv() call
# in our internal buffer.
rbufsize = max(self._rbufsize, self.default_bufsize)
# Our use of StringIO rather than lists of string objects returned by
# recv() minimizes memory usage and fragmentation that occurs when
# rbufsize is large compared to the typical return value of recv().
buf = self._rbuf
buf.seek(0, 2) # seek end
if size < 0:
# Read until EOF
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
try:
data = self._sock.recv(rbufsize)
except OpenSSL.SSL.WantReadError:
self._wait_for_sock()
continue
if not data:
break
buf.write(data)
return buf.getvalue()
else:
# Read until size bytes or EOF seen, whichever comes first
buf_len = buf.tell()
if buf_len >= size:
# Already have size bytes in our buffer? Extract and return.
buf.seek(0)
rv = buf.read(size)
self._rbuf = StringIO()
self._rbuf.write(buf.read())
return rv
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
left = size - buf_len
# recv() will malloc the amount of memory given as its
# parameter even though it often returns much less data
# than that. The returned data string is short lived
# as we copy it into a StringIO and free it. This avoids
# fragmentation issues on many platforms.
try:
data = self._sock.recv(left)
except OpenSSL.SSL.WantReadError:
self._wait_for_sock()
continue
if not data:
break
n = len(data)
if n == size and not buf_len:
# Shortcut. Avoid buffer data copies when:
# - We have no data in our buffer.
# AND
# - Our call to recv returned exactly the
# number of bytes we were asked to read.
return data
if n == left:
buf.write(data)
del data # explicit free
break
assert n <= left, "recv(%d) returned %d bytes" % (left, n)
buf.write(data)
buf_len += n
del data # explicit free
#assert buf_len == buf.tell()
return buf.getvalue()
def readline(self, size=-1):
buf = self._rbuf
buf.seek(0, 2) # seek end
if buf.tell() > 0:
# check if we already have it in our buffer
buf.seek(0)
bline = buf.readline(size)
if bline.endswith('\n') or len(bline) == size:
self._rbuf = StringIO()
self._rbuf.write(buf.read())
return bline
del bline
if size < 0:
# Read until \n or EOF, whichever comes first
if self._rbufsize <= 1:
# Speed up unbuffered case
buf.seek(0)
buffers = [buf.read()]
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
data = None
recv = self._sock.recv
while True:
try:
while data != "\n":
data = recv(1)
if not data:
break
buffers.append(data)
except OpenSSL.SSL.WantReadError:
self._wait_for_sock()
continue
break
return "".join(buffers)
buf.seek(0, 2) # seek end
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
try:
data = self._sock.recv(self._rbufsize)
except OpenSSL.SSL.WantReadError:
self._wait_for_sock()
continue
if not data:
break
nl = data.find('\n')
if nl >= 0:
nl += 1
buf.write(data[:nl])
self._rbuf.write(data[nl:])
del data
break
buf.write(data)
return buf.getvalue()
else:
# Read until size bytes or \n or EOF seen, whichever comes first
buf.seek(0, 2) # seek end
buf_len = buf.tell()
if buf_len >= size:
buf.seek(0)
rv = buf.read(size)
self._rbuf = StringIO()
self._rbuf.write(buf.read())
return rv
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
try:
data = self._sock.recv(self._rbufsize)
except OpenSSL.SSL.WantReadError:
self._wait_for_sock()
continue
if not data:
break
left = size - buf_len
# did we just receive a newline?
nl = data.find('\n', 0, left)
if nl >= 0:
nl += 1
# save the excess data to _rbuf
self._rbuf.write(data[nl:])
if buf_len:
buf.write(data[:nl])
break
else:
# Shortcut. Avoid data copy through buf when returning
# a substring of our first recv().
return data[:nl]
n = len(data)
if n == size and not buf_len:
# Shortcut. Avoid data copy through buf when
# returning exactly all of our first recv().
return data
if n >= left:
buf.write(data[:left])
self._rbuf.write(data[left:])
break
buf.write(data)
buf_len += n
#assert buf_len == buf.tell()
return buf.getvalue()
class WrappedSocket(object):
'''API-compatibility wrapper for Python OpenSSL's Connection-class.'''
def __init__(self, connection, socket):
self.connection = connection
self.socket = socket
def fileno(self):
return self.socket.fileno()
def makefile(self, mode, bufsize=-1):
return fileobject(self.connection, mode, bufsize)
def settimeout(self, timeout):
return self.socket.settimeout(timeout)
def sendall(self, data):
return self.connection.sendall(data)
def close(self):
return self.connection.shutdown()
def getpeercert(self, binary_form=False):
x509 = self.connection.get_peer_certificate()
if not x509:
return x509
if binary_form:
return OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_ASN1,
x509)
return {
'subject': (
(('commonName', x509.get_subject().CN),),
),
'subjectAltName': [
('DNS', value)
for value in get_subj_alt_name(x509)
]
}
def _verify_callback(cnx, x509, err_no, err_depth, return_code):
return err_no == 0
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None):
ctx = OpenSSL.SSL.Context(_openssl_versions[ssl_version])
if certfile:
ctx.use_certificate_file(certfile)
if keyfile:
ctx.use_privatekey_file(keyfile)
if cert_reqs != ssl.CERT_NONE:
ctx.set_verify(_openssl_verify[cert_reqs], _verify_callback)
if ca_certs:
try:
ctx.load_verify_locations(ca_certs, None)
except OpenSSL.SSL.Error as e:
raise ssl.SSLError('bad ca_certs: %r' % ca_certs, e)
else:
ctx.set_default_verify_paths()
# Disable TLS compression to migitate CRIME attack (issue #309)
OP_NO_COMPRESSION = 0x20000
ctx.set_options(OP_NO_COMPRESSION)
# Set list of supported ciphersuites.
ctx.set_cipher_list(DEFAULT_SSL_CIPHER_LIST)
cnx = OpenSSL.SSL.Connection(ctx, sock)
cnx.set_tlsext_host_name(server_hostname)
cnx.set_connect_state()
while True:
try:
cnx.do_handshake()
except OpenSSL.SSL.WantReadError:
select.select([sock], [], [])
continue
except OpenSSL.SSL.Error as e:
raise ssl.SSLError('bad handshake', e)
break
return WrappedSocket(cnx, sock)
|
DomainGroupOSS/luigi
|
refs/heads/master
|
luigi/interface.py
|
2
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains the bindings for command line integration and dynamic loading of tasks
"""
import argparse
import logging
import logging.config
import os
import sys
import tempfile
from luigi import configuration
from luigi import lock
from luigi import parameter
from luigi import rpc
from luigi import scheduler
from luigi import task
from luigi import worker
from luigi.task_register import Register
def setup_interface_logging(conf_file=None):
# use a variable in the function object to determine if it has run before
if getattr(setup_interface_logging, "has_run", False):
return
if conf_file is None:
logger = logging.getLogger('luigi-interface')
logger.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(levelname)s: %(message)s')
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
else:
logging.config.fileConfig(conf_file, disable_existing_loggers=False)
setup_interface_logging.has_run = True
class core(task.Config):
''' Keeps track of a bunch of environment params.
Uses the internal luigi parameter mechanism.
The nice thing is that we can instantiate this class
and get an object with all the environment variables set.
This is arguably a bit of a hack.
'''
use_cmdline_section = False
local_scheduler = parameter.BoolParameter(
default=False,
description='Use local scheduling')
scheduler_host = parameter.Parameter(
default='localhost',
description='Hostname of machine running remote scheduler',
config_path=dict(section='core', name='default-scheduler-host'))
scheduler_port = parameter.IntParameter(
default=8082,
description='Port of remote scheduler api process',
config_path=dict(section='core', name='default-scheduler-port'))
lock_size = parameter.IntParameter(
default=1,
description="Maximum number of workers running the same command")
no_lock = parameter.BoolParameter(
default=False,
description='Ignore if similar process is already running')
lock_pid_dir = parameter.Parameter(
default=os.path.join(tempfile.gettempdir(), 'luigi'),
description='Directory to store the pid file')
workers = parameter.IntParameter(
default=1,
description='Maximum number of parallel tasks to run')
logging_conf_file = parameter.Parameter(
default=None,
description='Configuration file for logging')
module = parameter.Parameter(
default=None,
description='Used for dynamic loading of modules') # see DynamicArgParseInterface
parallel_scheduling = parameter.BoolParameter(
default=False,
description='Use multiprocessing to do scheduling in parallel.')
assistant = parameter.BoolParameter(
default=False,
description='Run any task from the scheduler.')
class WorkerSchedulerFactory(object):
def create_local_scheduler(self):
return scheduler.CentralPlannerScheduler(prune_on_get_work=True)
def create_remote_scheduler(self, host, port):
return rpc.RemoteScheduler(host=host, port=port)
def create_worker(self, scheduler, worker_processes, assistant=False):
return worker.Worker(
scheduler=scheduler, worker_processes=worker_processes, assistant=assistant)
class Interface(object):
def parse(self):
raise NotImplementedError
@staticmethod
def run(tasks, worker_scheduler_factory=None, override_defaults=None):
"""
:param tasks:
:param worker_scheduler_factory:
:param override_defaults:
:return: True if all tasks and their dependencies were successfully run (or already completed);
False if any error occurred.
"""
if worker_scheduler_factory is None:
worker_scheduler_factory = WorkerSchedulerFactory()
if override_defaults is None:
override_defaults = {}
env_params = core(**override_defaults)
# search for logging configuration path first on the command line, then
# in the application config file
logging_conf = env_params.logging_conf_file
if logging_conf is not None and not os.path.exists(logging_conf):
raise Exception(
"Error: Unable to locate specified logging configuration file!"
)
if not configuration.get_config().getboolean(
'core', 'no_configure_logging', False):
setup_interface_logging(logging_conf)
if (not env_params.no_lock and
not(lock.acquire_for(env_params.lock_pid_dir, env_params.lock_size))):
sys.exit(1)
if env_params.local_scheduler:
sch = worker_scheduler_factory.create_local_scheduler()
else:
sch = worker_scheduler_factory.create_remote_scheduler(
host=env_params.scheduler_host,
port=env_params.scheduler_port)
w = worker_scheduler_factory.create_worker(
scheduler=sch, worker_processes=env_params.workers, assistant=env_params.assistant)
success = True
for t in tasks:
success &= w.add(t, env_params.parallel_scheduling)
logger = logging.getLogger('luigi-interface')
logger.info('Done scheduling tasks')
if env_params.workers != 0:
success &= w.run()
w.stop()
return success
# Simple unweighted Levenshtein distance
def _editdistance(a, b):
r0 = range(0, len(b) + 1)
r1 = [0] * (len(b) + 1)
for i in range(0, len(a)):
r1[0] = i + 1
for j in range(0, len(b)):
c = 0 if a[i] is b[j] else 1
r1[j + 1] = min(r1[j] + 1, r0[j + 1] + 1, r0[j] + c)
r0 = r1[:]
return r1[len(b)]
def error_task_names(task_name, task_names):
weighted_tasks = [(_editdistance(task_name, task_name_2), task_name_2) for task_name_2 in task_names]
ordered_tasks = sorted(weighted_tasks, key=lambda pair: pair[0])
candidates = [task for (dist, task) in ordered_tasks if dist <= 5 and dist < len(task)]
display_string = ""
if candidates:
display_string = "No task %s. Did you mean:\n%s" % (task_name, '\n'.join(candidates))
else:
display_string = "No task %s." % task_name
raise SystemExit(display_string)
def add_task_parameters(parser, task_cls):
for param_name, param in task_cls.get_params():
param.add_to_cmdline_parser(parser, param_name, task_cls.task_family, glob=False)
def get_global_parameters():
seen_params = set()
for task_name, is_without_section, param_name, param in Register.get_all_params():
if param in seen_params:
continue
seen_params.add(param)
yield task_name, is_without_section, param_name, param
def add_global_parameters(parser):
for task_name, is_without_section, param_name, param in get_global_parameters():
param.add_to_cmdline_parser(parser, param_name, task_name, glob=True, is_without_section=is_without_section)
def get_task_parameters(task_cls, args):
# Parse a str->str dict to the correct types
params = {}
for param_name, param in task_cls.get_params():
param.parse_from_args(param_name, task_cls.task_family, args, params)
return params
def set_global_parameters(args):
# Note that this is not side effect free
for task_name, is_without_section, param_name, param in get_global_parameters():
param.set_global_from_args(param_name, task_name, args, is_without_section=is_without_section)
class ArgParseInterface(Interface):
"""
Takes the task as the command, with parameters specific to it.
"""
def parse_task(self, cmdline_args=None):
if cmdline_args is None:
cmdline_args = sys.argv[1:]
parser = argparse.ArgumentParser()
add_global_parameters(parser)
task_names = Register.task_names()
# Parse global arguments and pull out the task name.
# We used to do this using subparsers+command, but some issues with
# argparse across different versions of Python (2.7.9) made it hard.
args, unknown = parser.parse_known_args(args=[a for a in cmdline_args if a != '--help'])
if len(unknown) == 0:
# In case it included a --help argument, run again
parser.parse_known_args(args=cmdline_args)
raise SystemExit('No task specified')
task_name = unknown[0]
if task_name not in task_names:
error_task_names(task_name, task_names)
task_cls = Register.get_task_cls(task_name)
# Add a subparser to parse task-specific arguments
subparsers = parser.add_subparsers(dest='command')
subparser = subparsers.add_parser(task_name)
# Add both task and global params here so that we can support both:
# test.py --global-param xyz Test --n 42
# test.py Test --n 42 --global-param xyz
add_global_parameters(subparser)
add_task_parameters(subparser, task_cls)
# Workaround for bug in argparse for Python 2.7.9
# See https://mail.python.org/pipermail/python-dev/2015-January/137699.html
subargs = parser.parse_args(args=cmdline_args)
for key, value in vars(subargs).items():
if value: # Either True (for boolean args) or non-None (everything else)
setattr(args, key, value)
# Notice that this is not side effect free because it might set global params
set_global_parameters(args)
task_params = get_task_parameters(task_cls, args)
return [task_cls(**task_params)]
def parse(self, cmdline_args=None):
return self.parse_task(cmdline_args)
class DynamicArgParseInterface(ArgParseInterface):
"""
Uses --module as a way to load modules dynamically
Usage:
.. code-block:: console
python whatever.py --module foo_module FooTask --blah xyz --x 123
This will dynamically import foo_module and then try to create FooTask from this.
"""
def parse(self, cmdline_args=None):
if cmdline_args is None:
cmdline_args = sys.argv[1:]
parser = argparse.ArgumentParser()
add_global_parameters(parser)
args, unknown = parser.parse_known_args(args=[a for a in cmdline_args if a != '--help'])
module = args.module
__import__(module)
return self.parse_task(cmdline_args)
def run(cmdline_args=None, main_task_cls=None,
worker_scheduler_factory=None, use_dynamic_argparse=False, local_scheduler=False):
"""
Please dont use. Instead use `luigi` binary.
Run from cmdline using argparse.
:param cmdline_args:
:param main_task_cls:
:param worker_scheduler_factory:
:param use_dynamic_argparse:
:param local_scheduler:
"""
if cmdline_args is None:
cmdline_args = sys.argv[1:]
if use_dynamic_argparse:
interface = DynamicArgParseInterface()
else:
interface = ArgParseInterface()
if main_task_cls:
cmdline_args.insert(0, main_task_cls.task_family)
if local_scheduler:
cmdline_args.insert(0, '--local-scheduler')
tasks = interface.parse(cmdline_args)
return interface.run(tasks, worker_scheduler_factory)
def build(tasks, worker_scheduler_factory=None, **env_params):
"""
Run internally, bypassing the cmdline parsing.
Useful if you have some luigi code that you want to run internally.
Example:
.. code-block:: python
luigi.build([MyTask1(), MyTask2()], local_scheduler=True)
One notable difference is that `build` defaults to not using
the identical process lock. Otherwise, `build` would only be
callable once from each process.
:param tasks:
:param worker_scheduler_factory:
:param env_params:
:return:
"""
if "no_lock" not in env_params:
env_params["no_lock"] = True
Interface.run(tasks, worker_scheduler_factory, override_defaults=env_params)
|
GitAngel/django
|
refs/heads/master
|
tests/migrations/migrations_test_apps/lookuperror_a/models.py
|
103
|
from django.db import models
class A1(models.Model):
pass
class A2(models.Model):
pass
class A3(models.Model):
b2 = models.ForeignKey('lookuperror_b.B2')
c2 = models.ForeignKey('lookuperror_c.C2')
class A4(models.Model):
pass
|
cetic/ansible
|
refs/heads/devel
|
lib/ansible/modules/database/mssql/mssql_db.py
|
8
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Vedit Firat Arig <firatarig@gmail.com>
# Outline and parts are reused from Mark Theunissen's mysql_db module
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: mssql_db
short_description: Add or remove MSSQL databases from a remote host.
description:
- Add or remove MSSQL databases from a remote host.
version_added: "2.2"
options:
name:
description:
- name of the database to add or remove
required: true
default: null
aliases: [ db ]
login_user:
description:
- The username used to authenticate with
required: false
default: null
login_password:
description:
- The password used to authenticate with
required: false
default: null
login_host:
description:
- Host running the database
required: false
login_port:
description:
- Port of the MSSQL server. Requires login_host be defined as other then localhost if login_port is used
required: false
default: 1433
state:
description:
- The database state
required: false
default: present
choices: [ "present", "absent", "import" ]
target:
description:
- Location, on the remote host, of the dump file to read from or write to. Uncompressed SQL
files (C(.sql)) files are supported.
required: false
autocommit:
description:
- Automatically commit the change only if the import succeed. Sometimes it is necessary to use autocommit=true, since some content can't be changed
within a transaction.
required: false
default: false
choices: [ "false", "true" ]
notes:
- Requires the pymssql Python package on the remote host. For Ubuntu, this
is as easy as pip install pymssql (See M(pip).)
requirements:
- python >= 2.7
- pymssql
author: Vedit Firat Arig
'''
EXAMPLES = '''
# Create a new database with name 'jackdata'
- mssql_db:
name: jackdata
state: present
# Copy database dump file to remote host and restore it to database 'my_db'
- copy:
src: dump.sql
dest: /tmp
- mssql_db:
name: my_db
state: import
target: /tmp/dump.sql
'''
RETURN = '''
#
'''
import os
try:
import pymssql
except ImportError:
mssql_found = False
else:
mssql_found = True
from ansible.module_utils.basic import AnsibleModule
def db_exists(conn, cursor, db):
cursor.execute("SELECT name FROM master.sys.databases WHERE name = %s", db)
conn.commit()
return bool(cursor.rowcount)
def db_create(conn, cursor, db):
cursor.execute("CREATE DATABASE [%s]" % db)
return db_exists(conn, cursor, db)
def db_delete(conn, cursor, db):
try:
cursor.execute("ALTER DATABASE [%s] SET single_user WITH ROLLBACK IMMEDIATE" % db)
except:
pass
cursor.execute("DROP DATABASE [%s]" % db)
return not db_exists(conn, cursor, db)
def db_import(conn, cursor, module, db, target):
if os.path.isfile(target):
backup = open(target, 'r')
try:
sqlQuery = "USE [%s]\n" % db
for line in backup:
if line is None:
break
elif line.startswith('GO'):
cursor.execute(sqlQuery)
sqlQuery = "USE [%s]\n" % db
else:
sqlQuery += line
cursor.execute(sqlQuery)
conn.commit()
finally:
backup.close()
return 0, "import successful", ""
else:
return 1, "cannot find target file", "cannot find target file"
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, aliases=['db']),
login_user=dict(default=''),
login_password=dict(default='', no_log=True),
login_host=dict(required=True),
login_port=dict(default='1433'),
target=dict(default=None),
autocommit=dict(type='bool', default=False),
state=dict(
default='present', choices=['present', 'absent', 'import'])
)
)
if not mssql_found:
module.fail_json(msg="pymssql python module is required")
db = module.params['name']
state = module.params['state']
autocommit = module.params['autocommit']
target = module.params["target"]
login_user = module.params['login_user']
login_password = module.params['login_password']
login_host = module.params['login_host']
login_port = module.params['login_port']
login_querystring = login_host
if login_port != "1433":
login_querystring = "%s:%s" % (login_host, login_port)
if login_user != "" and login_password == "":
module.fail_json(msg="when supplying login_user arguments login_password must be provided")
try:
conn = pymssql.connect(user=login_user, password=login_password, host=login_querystring, database='master')
cursor = conn.cursor()
except Exception as e:
if "Unknown database" in str(e):
errno, errstr = e.args
module.fail_json(msg="ERROR: %s %s" % (errno, errstr))
else:
module.fail_json(msg="unable to connect, check login_user and login_password are correct, or alternatively check your "
"@sysconfdir@/freetds.conf / ${HOME}/.freetds.conf")
conn.autocommit(True)
changed = False
if db_exists(conn, cursor, db):
if state == "absent":
try:
changed = db_delete(conn, cursor, db)
except Exception as e:
module.fail_json(msg="error deleting database: " + str(e))
elif state == "import":
conn.autocommit(autocommit)
rc, stdout, stderr = db_import(conn, cursor, module, db, target)
if rc != 0:
module.fail_json(msg="%s" % stderr)
else:
module.exit_json(changed=True, db=db, msg=stdout)
else:
if state == "present":
try:
changed = db_create(conn, cursor, db)
except Exception as e:
module.fail_json(msg="error creating database: " + str(e))
elif state == "import":
try:
changed = db_create(conn, cursor, db)
except Exception as e:
module.fail_json(msg="error creating database: " + str(e))
conn.autocommit(autocommit)
rc, stdout, stderr = db_import(conn, cursor, module, db, target)
if rc != 0:
module.fail_json(msg="%s" % stderr)
else:
module.exit_json(changed=True, db=db, msg=stdout)
module.exit_json(changed=changed, db=db)
if __name__ == '__main__':
main()
|
Swordf1sh/Moderat
|
refs/heads/master
|
modules/mlogviewer/main.py
|
1
|
import main_ui
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import os
from libs.wav_factory import spectrum_analyzer_image, audio_duration
class mainPopup(QWidget, main_ui.Ui_Form):
def __init__(self, args):
QWidget.__init__(self)
self.setupUi(self)
self.anim = QPropertyAnimation(self, 'windowOpacity')
self.anim.setDuration(500)
self.anim.setStartValue(0)
self.anim.setEndValue(1)
self.anim.start()
self.moderat = args['moderat']
self.client_id = args['client']
self.client_alias = args['alias']
self.client_ip_address = args['ip_address']
self.module_id = args['module_id']
self.p2p = args['p2p']
title_prefix = self.client_alias if len(self.client_alias) > 0 else self.client_ip_address
self.setWindowTitle(u'[{}] {}'.format(title_prefix, self.moderat.MString('VIEWER_WINDOW_TITLE')))
self.plots = {}
# resize audio.spectrum column
self.audioTable.setColumnWidth(1, 570)
self.date = str(self.timeCalendar.selectedDate().toPyDate())
# update gui
self.gui = QApplication.processEvents
self.screenshots_dict = {}
self.keylogs_dict = {}
self.audio_dict = {}
# Triggers
self.timeCalendar.clicked.connect(self.check_data_counts)
self.downloadButton.clicked.connect(self.download_logs)
self.screenshotsTable.doubleClicked.connect(self.open_screenshot)
self.keylogsTable.doubleClicked.connect(self.open_keylog)
self.audioTable.doubleClicked.connect(self.open_audio)
# Init
self.init_ui()
self.set_language()
self.check_data_counts()
def signal(self, data):
self.callback(data)
def init_ui(self):
self.clientIdLine.setText(self.client_id)
self.clientAliasLine.setText(self.client_alias)
self.clientIpLine.setText(self.client_ip_address)
# Hide Progress Bar
self.downloadProgress.setHidden(True)
self.downloadedLabel.setHidden(True)
# Hide Path Columns
self.screenshotsTable.setColumnHidden(2, True)
self.keylogsTable.setColumnHidden(2, True)
self.audioTable.setColumnHidden(3, True)
def set_language(self):
self.logsTab.setTabText(0, self.moderat.MString('VIEWER_SCREENSHOTS_TAB'))
self.logsTab.setTabText(1, self.moderat.MString('VIEWER_KEYLOGS_TAB'))
self.logsTab.setTabText(2, self.moderat.MString('VIEWER_AUDIO_TAB'))
self.screenshotsTable.horizontalHeaderItem(0).setText(self.moderat.MString('VIEWER_SCREENSHOT_PREVIEW'))
self.screenshotsTable.horizontalHeaderItem(1).setText(self.moderat.MString('VIEWER_SCREENSHOT_INFO'))
self.keylogsTable.horizontalHeaderItem(0).setText(self.moderat.MString('VIEWER_KEYLOGS_DATETIME'))
self.keylogsTable.horizontalHeaderItem(1).setText(self.moderat.MString('VIEWER_KEYLOGS_INFO'))
self.audioTable.horizontalHeaderItem(0).setText(self.moderat.MString('VIEWER_AUDIO_DURATION'))
self.audioTable.horizontalHeaderItem(1).setText(self.moderat.MString('VIEWER_AUDIO_SPECTRUM'))
self.audioTable.horizontalHeaderItem(2).setText(self.moderat.MString('VIEWER_AUDIO_DATETIME'))
self.clientIdLabel.setText(self.moderat.MString('VIEWER_CLIENT_ID'))
self.clientAliasLabel.setText(self.moderat.MString('VIEWER_CLIENT_ALIAS'))
self.clientIpLabel.setText(self.moderat.MString('VIEWER_CLIENT_IP'))
self.downloadGroup.setTitle(self.moderat.MString('VIEWER_DOWNLOAD_GROUP_TITLE'))
self.ignoreViewedCheck.setText(self.moderat.MString('VIEWER_IGNOR_VIEWED'))
self.downloadButton.setText(self.moderat.MString('VIEWER_DOWNLOAD'))
def check_data_counts(self):
'''
Send Count Logs Signal
:return:
'''
self.update_date()
self.moderat.send_message('%s %s' % (self.client_id, self.date),
'countData',
session_id=self.moderat.session_id,
module_id=self.module_id,
p2p=self.p2p)
self.callback = self.recv_data_counts
def recv_data_counts(self, data):
'''
Receive Count Logs
@:type data: dict
:param data: received data
:return: Set Count in Labels
'''
counted_logs = data['payload']
self.screenshotsCountNewLabel.setText(str(counted_logs['screenshots']['new']))
self.screenshotsCountOldLabel.setText(str(counted_logs['screenshots']['old']))
self.keylogsCountNewLabel.setText(str(counted_logs['keylogs']['new']))
self.keylogsCountOldLabel.setText(str(counted_logs['keylogs']['old']))
self.audioCountNewLabel.setText(str(counted_logs['audio']['new']))
self.audioCountOldLabel.setText(str(counted_logs['audio']['old']))
def update_date(self):
'''
:return: Update Global Date Variable
'''
self.date = str(self.timeCalendar.selectedDate().toPyDate())
def open_screenshot(self):
'''
:return: Open Screenshot In System Default Image Viewer
'''
current_screenshot_path = str(self.screenshotsTable.item(self.screenshotsTable.currentRow(), 2).text())
os.startfile(current_screenshot_path)
def open_keylog(self):
'''
:return: Open Keylogs In System Default Browser
'''
current_keylog_path = str(self.keylogsTable.item(self.keylogsTable.currentRow(), 2).text())
os.startfile(current_keylog_path)
def open_audio(self):
'''
:return: Open Audio In System Default Audio Player
'''
current_audio_path = str(self.audioTable.item(self.audioTable.currentRow(), 3).text())
os.startfile(current_audio_path)
def download_logs(self):
self.update_date()
download_info = {
'screenshot': self.screenshotsEnableButton.isChecked(),
'keylog': self.keylogsEnableButton.isChecked(),
'audio': self.audioEnableButton.isChecked(),
'filter': self.ignoreViewedCheck.isChecked(),
'client_id': self.client_id,
'date': self.date,
}
# Init Dirs
self.screenshots_dir = os.path.join(self.moderat.DATA, self.client_id, self.date, 'SCREENSHOTS')
self.keylogs_dir = os.path.join(self.moderat.DATA, self.client_id, self.date, 'KEYLOGS')
self.audios_dir = os.path.join(self.moderat.DATA, self.client_id, self.date, 'AUDIOS')
self.spectrums_dir = os.path.join(self.moderat.DATA, self.client_id, self.date, 'AUDIOS')
if not os.path.exists(self.screenshots_dir):
os.makedirs(self.screenshots_dir)
if not os.path.exists(self.keylogs_dir):
os.makedirs(self.keylogs_dir)
if not os.path.exists(self.audios_dir):
os.makedirs(self.audios_dir)
self.moderat.send_message(download_info,
'downloadLogs',
module_id=self.module_id,
session_id=self.moderat.session_id,
p2p=self.p2p)
self.callback = self.recv_download_logs
def recv_download_logs(self, data):
self.downloading_screenshots_count = data['payload']['screenshots']
self.downloaded_screenshots = 0
self.downloading_keylogs_count = data['payload']['keylogs']
self.downloaded_keylogs = 0
self.downloading_audios_count = data['payload']['audios']
self.downloaded_audios = 0
# Prepar Progress Bar
self.downloadProgress.setHidden(False)
self.downloadedLabel.setHidden(False)
self.callback = self.recv_log
def recv_log(self, data):
type = data['payload']['type']
if type == 'screenshot':
self.downloaded_screenshots += 1
self.downloadProgress.setValue(self.downloaded_screenshots*100/self.downloading_screenshots_count)
self.downloadedLabel.setText('Downloaded {screenshot} Screenshots From {screenshots}'.format(
screenshot=self.downloaded_screenshots,
screenshots=self.downloading_screenshots_count
))
self.screenshotsTable.setRowCount(self.downloading_screenshots_count)
# Generate File
path = os.path.join(self.screenshots_dir, data['payload']['datetime']+'.png')
if not os.path.exists(path):
with open(path, 'wb') as screenshot_file:
screenshot_file.write(data['payload']['raw'])
# add screenshot preview
image = QImage(path)
pixmap = QPixmap.fromImage(image)
previews_dict = QLabel()
previews_dict.setPixmap(pixmap.scaled(200, 200, Qt.KeepAspectRatio))
previews_dict.setScaledContents(True)
self.screenshotsTable.setCellWidget(self.downloaded_screenshots-1, 0, previews_dict)
# add screenshot information
payload = '''
<p align="center"><font color="#e67e22">%s</font></p>
%s
''' % (data['payload']['datetime'], data['payload']['window_title'])
infoText = QTextEdit()
infoText.setReadOnly(True)
infoText.setStyleSheet('background: #2c3e50;\nborder: 1px ridge;\nborder-color: #2c3e50;\nborder-top: none;\npadding: 3px;')
infoText.insertHtml(payload)
self.screenshotsTable.setCellWidget(self.downloaded_screenshots-1, 1, infoText)
# add path
item = QTableWidgetItem(path)
self.screenshotsTable.setItem(self.downloaded_screenshots-1, 2, item)
elif type == 'keylog':
self.downloaded_keylogs += 1
self.downloadProgress.setValue(self.downloaded_keylogs*100/self.downloading_keylogs_count)
self.downloadedLabel.setText('Downloaded {keylog} Keylog From {keylogs}'.format(
keylog=self.downloaded_keylogs,
keylogs=self.downloading_keylogs_count
))
# Generate File
path = os.path.join(self.keylogs_dir, data['payload']['datetime']+'.html')
if not os.path.exists(path):
with open(path, 'wb') as screenshot_file:
screenshot_file.write(data['payload']['raw'])
self.keylogsTable.setRowCount(self.downloading_keylogs_count)
# Add Data
item = QTableWidgetItem(data['payload']['datetime'])
item.setTextColor(QColor('#f39c12'))
self.keylogsTable.setItem(self.downloaded_keylogs-1, 0, item)
# Add Preview
keylog_preview = open(path, 'r').readline()
item = QTableWidgetItem(keylog_preview)
self.keylogsTable.setItem(self.downloaded_keylogs-1, 1, item)
# Add Path
item = QTableWidgetItem(path)
self.keylogsTable.setItem(self.downloaded_keylogs-1, 2, item)
elif type == 'audio':
self.downloaded_audios += 1
self.downloadProgress.setValue(self.downloaded_audios*100/self.downloading_audios_count)
self.downloadedLabel.setText('Downloaded {audio} Audio From {audios}'.format(
audio=self.downloaded_audios,
audios=self.downloading_audios_count
))
# Generate File
path = os.path.join(self.audios_dir, data['payload']['datetime']+'.wav')
if not os.path.exists(path):
with open(path, 'wb') as audio_file:
audio_file.write(data['payload']['raw'])
self.audioTable.setRowCount(self.downloading_audios_count)
# Add Audio Duration
item = QTableWidgetItem(audio_duration(path))
item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)
item.setTextColor(QColor('#16a085'))
self.audioTable.setItem(self.downloaded_audios-1, 0, item)
# Add Spectrum
generated_spectrum = spectrum_analyzer_image(path, data['payload']['datetime'], self.spectrums_dir)
image = QImage(generated_spectrum)
pixmap = QPixmap.fromImage(image)
spectrum_image = QLabel()
spectrum_image.setStyleSheet('background: none;')
spectrum_image.setPixmap(pixmap)
self.audioTable.setCellWidget(self.downloaded_audios-1, 1, spectrum_image)
# add date time
item = QTableWidgetItem(data['payload']['datetime'])
item.setTextAlignment(Qt.AlignRight | Qt.AlignVCenter)
item.setTextColor(QColor('#f39c12'))
self.audioTable.setItem(self.downloaded_audios-1, 2, item)
# add path
item = QTableWidgetItem(path)
self.audioTable.setItem(self.downloaded_audios-1, 3, item)
else:
# Prepar Progress Bar
self.downloadProgress.setHidden(True)
self.downloadedLabel.setHidden(True)
self.downloaded_screenshots = 0
self.downloaded_keylogs = 0
self.downloaded_audios = 0
self.check_data_counts()
|
flycn1985/shadowsocks
|
refs/heads/master
|
tests/test.py
|
1016
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import sys
import os
import signal
import select
import time
import argparse
from subprocess import Popen, PIPE
python = ['python']
default_url = 'http://localhost/'
parser = argparse.ArgumentParser(description='test Shadowsocks')
parser.add_argument('-c', '--client-conf', type=str, default=None)
parser.add_argument('-s', '--server-conf', type=str, default=None)
parser.add_argument('-a', '--client-args', type=str, default=None)
parser.add_argument('-b', '--server-args', type=str, default=None)
parser.add_argument('--with-coverage', action='store_true', default=None)
parser.add_argument('--should-fail', action='store_true', default=None)
parser.add_argument('--tcp-only', action='store_true', default=None)
parser.add_argument('--url', type=str, default=default_url)
parser.add_argument('--dns', type=str, default='8.8.8.8')
config = parser.parse_args()
if config.with_coverage:
python = ['coverage', 'run', '-p', '-a']
client_args = python + ['shadowsocks/local.py', '-v']
server_args = python + ['shadowsocks/server.py', '-v']
if config.client_conf:
client_args.extend(['-c', config.client_conf])
if config.server_conf:
server_args.extend(['-c', config.server_conf])
else:
server_args.extend(['-c', config.client_conf])
if config.client_args:
client_args.extend(config.client_args.split())
if config.server_args:
server_args.extend(config.server_args.split())
else:
server_args.extend(config.client_args.split())
if config.url == default_url:
server_args.extend(['--forbidden-ip', ''])
p1 = Popen(server_args, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
p2 = Popen(client_args, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
p3 = None
p4 = None
p3_fin = False
p4_fin = False
# 1 shadowsocks started
# 2 curl started
# 3 curl finished
# 4 dig started
# 5 dig finished
stage = 1
try:
local_ready = False
server_ready = False
fdset = [p1.stdout, p2.stdout, p1.stderr, p2.stderr]
while True:
r, w, e = select.select(fdset, [], fdset)
if e:
break
for fd in r:
line = fd.readline()
if not line:
if stage == 2 and fd == p3.stdout:
stage = 3
if stage == 4 and fd == p4.stdout:
stage = 5
if bytes != str:
line = str(line, 'utf8')
sys.stderr.write(line)
if line.find('starting local') >= 0:
local_ready = True
if line.find('starting server') >= 0:
server_ready = True
if stage == 1:
time.sleep(2)
p3 = Popen(['curl', config.url, '-v', '-L',
'--socks5-hostname', '127.0.0.1:1081',
'-m', '15', '--connect-timeout', '10'],
stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
if p3 is not None:
fdset.append(p3.stdout)
fdset.append(p3.stderr)
stage = 2
else:
sys.exit(1)
if stage == 3 and p3 is not None:
fdset.remove(p3.stdout)
fdset.remove(p3.stderr)
r = p3.wait()
if config.should_fail:
if r == 0:
sys.exit(1)
else:
if r != 0:
sys.exit(1)
if config.tcp_only:
break
p4 = Popen(['socksify', 'dig', '@%s' % config.dns,
'www.google.com'],
stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
if p4 is not None:
fdset.append(p4.stdout)
fdset.append(p4.stderr)
stage = 4
else:
sys.exit(1)
if stage == 5:
r = p4.wait()
if config.should_fail:
if r == 0:
sys.exit(1)
print('test passed (expecting failure)')
else:
if r != 0:
sys.exit(1)
print('test passed')
break
finally:
for p in [p1, p2]:
try:
os.kill(p.pid, signal.SIGINT)
os.waitpid(p.pid, 0)
except OSError:
pass
|
emilyemorehouse/flask-restless
|
refs/heads/master
|
docs/conf.py
|
10
|
# -*- coding: utf-8 -*-
#
# Flask-Restless documentation build configuration file, created by
# sphinx-quickstart on Fri Mar 2 00:35:49 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
sys.path.append(os.path.abspath('_themes'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']
extensions += ['sphinxcontrib.httpdomain']
extensions += ['sphinxcontrib.issuetracker']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Flask-Restless'
copyright = u'2012, 2013, 2014, 2015 Jeffrey Finkelstein'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
#version = '0.3'
# The full version, including alpha/beta/rc tags.
#release = '0.3-dev'
import pkg_resources
try:
release = pkg_resources.get_distribution('Flask-Restless').version
except pkg_resources.DistributionNotFound:
print 'To build the documentation, the distribution information of'
print 'Flask-Restless has to be available. Either install the package'
print 'into your development environment or run "setup.py develop"'
print 'to setup the metadata. A virtualenv is recommended!'
sys.exit(1)
del pkg_resources
if 'dev' in release:
release = release.split('dev')[0] + 'dev'
version = '.'.join(release.split('.')[:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'sphinx'
pygments_style = 'flask_theme_support.FlaskyStyle'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
html_theme = 'flask'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'index_logo': 'flask-restless.png'
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': ['sidebarintro.html', 'sourcelink.html', 'searchbox.html'],
'**': ['sidebarlogo.html', 'localtoc.html', 'relations.html',
'sourcelink.html', 'searchbox.html']
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Flask-Restlessdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Flask-Restless.tex', u'Flask-Restless Documentation',
u'Jeffrey Finkelstein', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
# If false, no module index is generated.
latex_use_modindex = False
latex_elements = {
'fontpkg': r'\usepackage{mathpazo}',
'papersize': 'a4paper',
'pointsize': '12pt',
'preamble': r'\usepackage{flaskstyle}'
}
latex_use_parts = True
latex_additional_files = ['flaskstyle.sty', 'logo.png']
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'flask-restless', u'Flask-Restless Documentation',
[u'Jeffrey Finkelstein'], 1)
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'python': ('http://docs.python.org/', None),
'flask': ('http://flask.pocoo.org/docs', None),
'sqlalchemy': ('http://sqlalchemy.org/docs', None),
'flasksqlalchemy':
('http://packages.python.org/Flask-SQLAlchemy',
None),
'flasklogin':
('https://flask-login.readthedocs.org/en/latest',
None)}
# fall back if theme is not there
try:
__import__('flask_theme_support')
except ImportError, e:
print '-' * 74
print 'Warning: Flask themes unavailable. Building with default theme'
print 'If you want the Flask themes, run this command and build again:'
print
print ' git submodule update --init'
print '-' * 74
pygments_style = 'tango'
html_theme = 'default'
html_theme_options = {}
# Configuration for issuetracker extension.
issuetracker = 'github'
issuetracker_project = 'jfinkels/flask-restless'
|
kolsan/StarWarsCarnival
|
refs/heads/master
|
Score-Sounds/starwars08.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#the next line is only needed for python2.x and not necessary for python3.x
from __future__ import print_function, division
import serial
import time
import pygame
import os
import sys
import random
# if using python2, the get_input command needs to act like raw_input:
if sys.version_info[:2] <= (2, 7):
get_input = raw_input
else:
get_input = input # python3
pygame.mixer.pre_init(44100, -16, 2, 2048) # setup mixer to avoid sound lag
pygame.init()
screen = pygame.display.set_mode((800, 600))
clock = pygame.time.Clock()
start_time = pygame.time.get_ticks()
# look for sound & music files in subfolder 'data'
#pygame.mixer.music.load(os.path.join('data', 'an-turr.ogg'))#load music
_songs = ['data\M_chewbacca.ogg','data\M_imperial_march.ogg','data\M_R2D2_Again.ogg','data\M_rebel-theme.ogg','data\M_star-wars-cantina-song.ogg','data\M_star-wars-theme-song.ogg','data\M_darth_vader_respirando.ogg']
_currently_playing_song = None
saberup = pygame.mixer.Sound(os.path.join('data','saberdown.wav')) #load sound
fail = pygame.mixer.Sound(os.path.join('data','fail.wav')) #load sound
jump = pygame.mixer.Sound(os.path.join('data','jump.wav')) #load sound
NOK_001_002_003 = pygame.mixer.Sound(os.path.join('data','NOK_001_002_003.ogg')) #load sound
NOK_004_005 = pygame.mixer.Sound(os.path.join('data','NOK_004_005.ogg')) #load sound
OK_001_002_003 = pygame.mixer.Sound(os.path.join('data','OK_001_002_003.ogg')) #load sound
OK_004_005 = pygame.mixer.Sound(os.path.join('data','OK_004_005.ogg')) #load sound
UP_001_002_003 = pygame.mixer.Sound(os.path.join('data','UP_001_002_003.ogg')) #load sound
UP_004_005 = pygame.mixer.Sound(os.path.join('data','UP_004_005.ogg')) #load sound
SONG_END = pygame.USEREVENT + 1
pygame.mixer.music.set_endevent(SONG_END)
#ser = serial.Serial('/dev/ttyACM0', 9600,timeout=0)
#Variable
playing = False
done = False
f = 0
i = 0
LIME = (0,255,0)
gold = (255,215,0)
black = (0,0,0)
texto = ""
font = pygame.font.SysFont("comicsansms", 256)
font2 = pygame.font.SysFont("comicsansms", 64)
text = font.render(texto + str(f), True, gold)
gametime = 0
tiempo = ""
def play_a_different_song():
global _currently_playing_song, _songs
next_song = random.choice(_songs)
while next_song == _currently_playing_song:
next_song = random.choice(_songs)
_currently_playing_song = next_song
pygame.mixer.music.load(next_song)
pygame.mixer.music.play()
while not done:
#print (gametime)
#print (time.time())
if (time.time () > gametime) and playing :
playing = False
#gametime = 0
print ("End of the game!!")
print (gametime)
print (time.time())
pygame.mixer.music.stop()
#ser.write ('2\n')
if playing:
tiempo = str(gametime - time.time())
#print (gametime - time.time())
# message = ser.readline()
message = "hhhh"
if message !="":
#print(message + "\n")
#print(message[0:5] + "\n")
if message[0:5] == "go001":
UP_001_002_003.play()
if message[0:5] == "go002":
UP_001_002_003.play()
if message[0:5] == "go003":
UP_001_002_003.play()
if message[0:5] == "go004":
UP_004_005.play()
if message[0:5] == "go005":
UP_004_005.play()
if message[0:6] == "nok001":
NOK_001_002_003.play()
if message[0:6] == "nok002":
NOK_001_002_003.play()
if message[0:6] == "nok003":
NOK_001_002_003.play()
if message[0:6] == "nok004":
NOK_004_005.play()
if message[0:6] == "nok005":
NOK_004_005.play()
if message[0:5] == "ok001":
OK_001_002_003.play()
f = f + 10
if message[0:5] == "ok002":
OK_001_002_003.play()
f = f + 10
if message[0:5] == "ok003":
OK_001_002_003.play()
f = f + 10
if message[0:5] == "ok004":
OK_004_005.play()
f = f + 20
if message[0:5] == "ok005":
OK_004_005.play()
f = f + 20
for event in pygame.event.get():
if event.type == SONG_END:
play_a_different_song()
pygame.mixer.music.set_volume (0.2)
if event.type == pygame.QUIT:
done = True
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
done = True
pressed = pygame.key.get_pressed()
if pressed[pygame.K_q]: done = True
if pressed[pygame.K_s]:
# ser.write ('1\n')
f= 0
playing = True
play_a_different_song()
pygame.mixer.music.set_volume (1.0)
pygame.time.delay(2000)
pygame.mixer.music.set_volume (0.9)
pygame.time.delay(2000)
pygame.mixer.music.set_volume (0.7)
pygame.time.delay(2000)
pygame.mixer.music.set_volume (0.5)
pygame.time.delay(2000)
pygame.mixer.music.set_volume (0.4)
pygame.time.delay(2000)
pygame.mixer.music.set_volume (0.2)
gametime = time.time() + 120
#print (gametime - time.time())
#print (gametime)
#print (time.time())
print ("Start!!!")
#-------------------------------------------
# Koldo atencion a esta linea
#--------------------------------------
if pressed[pygame.K_e]:
playing = False
gametime = 0
tiempo = ""
pygame.mixer.music.stop()
print ("End of the game!!")
#ser.write ('2\n')
# if pressed[pygame.K_u]: ser.write ('3\n')
# if pressed[pygame.K_d]: ser.write ('4\n')
# if pressed[pygame.K_m]: ser.write ('5\n')
# if pressed[pygame.K_r]: ser.write ('6\n')
screen.fill(black)
if playing:
counting_text = font2.render(tiempo[:3], 1, gold)
if ( gametime - time.time () < 100):counting_text = font2.render(tiempo[:2], 1, gold)
counting_rect = counting_text.get_rect(center = screen.get_rect().center)
screen.blit(counting_text, (20,20))
text = font.render(texto + str(f), True, gold)
screen.blit(text,
(320 - text.get_width() // 2, 240 - text.get_height() // 2))
#if playing:
pygame.display.flip()
clock.tick(60)
#i = i + 1
#print (i)
#if counting_time >= 100000: done = True
pygame.quit ()
|
crepererum/invenio
|
refs/heads/master
|
invenio/legacy/bibrank/adminlib.py
|
13
|
# This file is part of Invenio.
# Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Youshould have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Invenio BibRank Administrator Interface."""
__revision__ = "$Id$"
import os
import ConfigParser
from invenio.config import \
CFG_SITE_LANG, \
CFG_SITE_URL
from invenio.base.helpers import utf8ifier
import invenio.modules.access.engine as acce
from invenio.base.i18n import language_list_long
from invenio.legacy.dbquery import run_sql, wash_table_column_name
from invenio.modules.ranker.registry import configuration
def getnavtrail(previous=''):
navtrail = """<a class="navtrail" href="%s/help/admin">Admin Area</a> """ % (
CFG_SITE_URL,)
navtrail = navtrail + previous
return navtrail
def check_user(req, role, adminarea=2, authorized=0):
(auth_code, auth_message) = is_adminuser(req, role)
if not authorized and auth_code != 0:
return ("false", auth_message)
return ("", auth_message)
def is_adminuser(req, role):
"""check if user is a registered administrator. """
return acce.acc_authorize_action(req, role)
def perform_index(ln=CFG_SITE_LANG):
"""create the bibrank main area menu page."""
header = ['Code', 'Translations', 'Collections', 'Rank method']
rnk_list = get_def_name('', "rnkMETHOD")
actions = []
for (rnkID, name) in rnk_list:
actions.append([name])
for col in [(('Modify', 'modifytranslations'),),
(('Modify', 'modifycollection'),),
(('Show Details', 'showrankdetails'),
('Modify', 'modifyrank'),
('Delete', 'deleterank'))]:
actions[-1].append('<a href="%s/admin/bibrank/bibrankadmin.py/%s?rnkID=%s&ln=%s">%s</a>' %
(CFG_SITE_URL, col[0][1], rnkID, ln, col[0][0]))
for (str, function) in col[1:]:
actions[-1][-1] += ' / <a href="%s/admin/bibrank/bibrankadmin.py/%s?rnkID=%s&ln=%s">%s</a>' % (
CFG_SITE_URL, function, rnkID, ln, str)
output = """
<a href="%s/admin/bibrank/bibrankadmin.py/addrankarea?ln=%s">Add new rank method</a><br /><br />
""" % (CFG_SITE_URL, ln)
output += tupletotable(header=header, tuple=actions)
return addadminbox("""Overview of rank methods <small>[<a title="See guide" href="%s/help/admin/bibrank-admin-guide#mi">?</a>]</small>""" % CFG_SITE_URL, datalist=[output, ''])
def perform_modifycollection(rnkID='', ln=CFG_SITE_LANG, func='', colID='', confirm=0):
"""Modify which collections the rank method is visible to"""
output = ""
subtitle = ""
if rnkID:
rnkNAME = get_def_name(rnkID, "rnkMETHOD")[0][1]
if func in ["0", 0] and confirm in ["1", 1]:
finresult = attach_col_rnk(rnkID, colID)
elif func in ["1", 1] and confirm in ["1", 1]:
finresult = detach_col_rnk(rnkID, colID)
if colID:
colNAME = get_def_name(colID, "collection")[0][1]
subtitle = """Step 1 - Select collection to enable/disable rank method '%s' for""" % rnkNAME
output = """
<dl>
<dt>The rank method is currently enabled for these collections:</dt>
<dd>
"""
col_list = get_rnk_col(rnkID, ln)
if not col_list:
output += """No collections"""
else:
for (id, name) in col_list:
output += """%s, """ % name
output += """</dd>
</dl>
"""
col_list = get_def_name('', "collection")
col_rnk = dict(get_rnk_col(rnkID))
col_list = filter(lambda x: x[0] not in col_rnk, col_list)
if col_list:
text = """
<span class="adminlabel">Enable for:</span>
<select name="colID" class="admin_w200">
<option value="">- select collection -</option>
"""
for (id, name) in col_list:
text += """<option value="%s" %s>%s</option>""" % (id, (func in ["0", 0] and confirm in [
"0", 0] and colID and int(colID) == int(id)) and 'selected="selected"' or '', name)
text += """</select>"""
output += createhiddenform(action="modifycollection",
text=text,
button="Enable",
rnkID=rnkID,
ln=ln,
func=0,
confirm=1)
if confirm in ["0", 0] and func in ["0", 0] and colID:
subtitle = "Step 2 - Confirm to enable rank method for the chosen collection"
text = "<b><p>Please confirm to enable rank method '%s' for the collection '%s'</p></b>" % (
rnkNAME, colNAME)
output += createhiddenform(action="modifycollection",
text=text,
button="Confirm",
rnkID=rnkID,
ln=ln,
colID=colID,
func=0,
confirm=1)
elif confirm in ["1", 1] and func in ["0", 0] and colID:
subtitle = "Step 3 - Result"
output += write_outcome(finresult)
elif confirm not in ["0", 0] and func in ["0", 0]:
output += """<b><span class="info">Please select a collection.</span></b>"""
col_list = get_rnk_col(rnkID, ln)
if col_list:
text = """
<span class="adminlabel">Disable for:</span>
<select name="colID" class="admin_w200">
<option value="">- select collection -</option>
"""
for (id, name) in col_list:
text += """<option value="%s" %s>%s</option>""" % (id, (func in ["1", 1] and confirm in [
"0", 0] and colID and int(colID) == int(id)) and 'selected="selected"' or '', name)
text += """</select>"""
output += createhiddenform(action="modifycollection",
text=text,
button="Disable",
rnkID=rnkID,
ln=ln,
func=1,
confirm=1)
if confirm in ["1", 1] and func in ["1", 1] and colID:
subtitle = "Step 3 - Result"
output += write_outcome(finresult)
elif confirm not in ["0", 0] and func in ["1", 1]:
output += """<b><span class="info">Please select a collection.</span></b>"""
body = [output]
return addadminbox(subtitle + """ <small>[<a title="See guide" href="%s/help/admin/bibrank-admin-guide#mc">?</a>]</small>""" % CFG_SITE_URL, body)
def perform_modifytranslations(rnkID, ln, sel_type, trans, confirm, callback='yes'):
"""Modify the translations of a rank method"""
output = ''
subtitle = ''
langs = get_languages()
langs.sort()
if confirm in ["2", 2] and rnkID:
finresult = modify_translations(
rnkID, langs, sel_type, trans, "rnkMETHOD")
rnk_name = get_def_name(rnkID, "rnkMETHOD")[0][1]
rnk_dict = dict(
get_i8n_name('', ln, get_rnk_nametypes()[0][0], "rnkMETHOD"))
if rnkID and int(rnkID) in rnk_dict:
rnkID = int(rnkID)
subtitle = """<a name="3">3. Modify translations for rank method '%s'</a>""" % rnk_name
if type(trans) is str:
trans = [trans]
if sel_type == '':
sel_type = get_rnk_nametypes()[0][0]
header = ['Language', 'Translation']
actions = []
text = """
<span class="adminlabel">Name type</span>
<select name="sel_type" class="admin_w200">
"""
types = get_rnk_nametypes()
if len(types) > 1:
for (key, value) in types:
text += """<option value="%s" %s>%s""" % (
key, key == sel_type and 'selected="selected"' or '', value)
trans_names = get_name(rnkID, ln, key, "rnkMETHOD")
if trans_names and trans_names[0][0]:
text += ": %s" % trans_names[0][0]
text += "</option>"
text += """</select>"""
output += createhiddenform(action="modifytranslations",
text=text,
button="Select",
rnkID=rnkID,
ln=ln,
confirm=0)
if confirm in [-1, "-1", 0, "0"]:
trans = []
for key, value in langs:
try:
trans_names = get_name(rnkID, key, sel_type, "rnkMETHOD")
trans.append(trans_names[0][0])
except StandardError as e:
trans.append('')
for nr in range(0, len(langs)):
actions.append(["%s" % (langs[nr][1],)])
actions[-1].append(
'<input type="text" name="trans" size="30" value="%s"/>' % trans[nr])
text = tupletotable(header=header, tuple=actions)
output += createhiddenform(action="modifytranslations",
text=text,
button="Modify",
rnkID=rnkID,
sel_type=sel_type,
ln=ln,
confirm=2)
if sel_type and len(trans) and confirm in ["2", 2]:
output += write_outcome(finresult)
body = [output]
return addadminbox(subtitle + """ <small>[<a title="See guide" href="%s/help/admin/bibrank-admin-guide#mt">?</a>]</small>""" % CFG_SITE_URL, body)
def perform_addrankarea(rnkcode='', ln=CFG_SITE_LANG, template='', confirm=-1):
"""form to add a new rank method with these values:"""
subtitle = 'Step 1 - Create new rank method'
output = """
<dl>
<dt>BibRank code:</dt>
<dd>A unique code that identifies a rank method, is used when running the bibrank daemon and used to name the configuration file for the method.
<br />The template files includes the necessary parameters for the chosen rank method, and only needs to be edited with the correct tags and paths.
<br />For more information, please go to the <a title="See guide" href="%s/help/admin/bibrank-admin-guide">BibRank guide</a> and read the section about adding a rank method</dd>
</dl>
""" % CFG_SITE_URL
text = """
<span class="adminlabel">BibRank code</span>
<input class="admin_wvar" type="text" name="rnkcode" value="%s" />
""" % (rnkcode)
text += """<br />
<span class="adminlabel">Cfg template</span>
<select name="template" class="admin_w200">
<option value="">No template</option>
"""
templates = get_templates()
for templ in templates:
text += """<option value="%s" %s>%s</option>""" % (
templ, template == templ and 'selected="selected"' or '', templ[9:len(templ) - 4])
text += """</select>"""
output += createhiddenform(action="addrankarea",
text=text,
button="Add rank method",
ln=ln,
confirm=1)
if rnkcode:
if confirm in ["0", 0]:
subtitle = 'Step 2 - Confirm addition of rank method'
text = """<b>Add rank method with BibRank code: '%s'.</b>""" % (
rnkcode)
if template:
text += """<br /><b>Using configuration template: '%s'.</b>""" % (
template)
else:
text += """<br /><b>Create empty configuration file.</b>"""
output += createhiddenform(action="addrankarea",
text=text,
rnkcode=rnkcode,
button="Confirm",
template=template,
confirm=1)
elif confirm in ["1", 1]:
rnkID = add_rnk(rnkcode)
subtitle = "Step 3 - Result"
if rnkID[0] == 1:
rnkID = rnkID[1]
text = """<b><span class="info">Added new rank method with BibRank code '%s'</span></b>""" % rnkcode
try:
if template:
infile = open(configuration.get(template, ''), 'r')
indata = infile.readlines()
infile.close()
else:
indata = ()
file = open(
configuration.get(get_rnk_code(rnkID)[0][0] + '.cfg', ''), 'w')
for line in indata:
file.write(line)
file.close()
if template:
text += """<b><span class="info"><br />Configuration file created using '%s' as template.</span></b>""" % template
else:
text += """<b><span class="info"><br />Empty configuration file created.</span></b>"""
except StandardError as e:
text += """<b><span class="info"><br />Sorry, could not create configuration file: '%s.cfg', either because it already exists, or not enough rights to create file. <br />Please create the file in the path given.</span></b>""" % (
configuration.get(get_rnk_code(rnkID)[0][0] + '.cfg', ''), )
else:
text = """<b><span class="info">Sorry, could not add rank method, rank method with the same BibRank code probably exists.</span></b>"""
output += text
elif not rnkcode and confirm not in [-1, "-1"]:
output += """<b><span class="info">Sorry, could not add rank method, not enough data submitted.</span></b>"""
body = [output]
return addadminbox(subtitle + """ <small>[<a title="See guide" href="%s/help/admin/bibrank-admin-guide#ar">?</a>]</small>""" % CFG_SITE_URL, body)
def perform_modifyrank(rnkID, rnkcode='', ln=CFG_SITE_LANG, template='', cfgfile='', confirm=0):
"""form to modify a rank method
rnkID - id of the rank method
"""
if not rnkID:
return "No ranking method selected."
if not get_rnk_code(rnkID):
return "Ranking method %s does not seem to exist." % str(rnkID)
subtitle = 'Step 1 - Please modify the wanted values below'
if not rnkcode:
oldcode = get_rnk_code(rnkID)[0]
else:
oldcode = rnkcode
output = """
<dl>
<dd>When changing the BibRank code of a rank method, you must also change any scheduled tasks using the old value.
<br />For more information, please go to the <a title="See guide" href="%s/help/admin/bibrank-admin-guide">BibRank guide</a> and read the section about modifying a rank method's BibRank code.</dd>
</dl>
""" % CFG_SITE_URL
text = """
<span class="adminlabel">BibRank code</span>
<input class="admin_wvar" type="text" name="rnkcode" value="%s" />
<br />
""" % (oldcode)
try:
text += """<span class="adminlabel">Cfg file</span>"""
textarea = ""
if cfgfile:
textarea += cfgfile
else:
file = open(
configuration.get(get_rnk_code(rnkID)[0][0] + '.cfg', ''))
for line in file.readlines():
textarea += line
text += """<textarea class="admin_wvar" name="cfgfile" rows="15" cols="70">""" + \
textarea + """</textarea>"""
except StandardError as e:
text += """<b><span class="info">Cannot load file, either it does not exist, or not enough rights to read it: '%s.cfg'<br />Please create the file in the path given.</span></b>""" % (
configuration.get(get_rnk_code(rnkID)[0][0] + '.cfg', ''), )
output += createhiddenform(action="modifyrank",
text=text,
rnkID=rnkID,
button="Modify",
confirm=1)
if rnkcode and confirm in ["1", 1] and get_rnk_code(rnkID)[0][0] != rnkcode:
oldcode = get_rnk_code(rnkID)[0][0]
result = modify_rnk(rnkID, rnkcode)
subtitle = "Step 3 - Result"
if result:
text = """<b><span class="info">Rank method modified.</span></b>"""
try:
file = open(configuration.get(oldcode + '.cfg', ''), 'r')
file2 = open(configuration.get(rnkcode + '.cfg', ''), 'w')
lines = file.readlines()
for line in lines:
file2.write(line)
file.close()
file2.close()
os.remove(configuration.get(oldcode + '.cfg', ''))
except StandardError as e:
text = """<b><span class="info">Sorry, could not change name of cfg file, must be done manually: '%s.cfg'</span></b>""" % (
configuration.get(oldcode + '.cfg', ''), )
else:
text = """<b><span class="info">Sorry, could not modify rank method.</span></b>"""
output += text
if cfgfile and confirm in ["1", 1]:
try:
file = open(
configuration.get(get_rnk_code(rnkID)[0][0] + '.cfg', ''), 'w')
file.write(cfgfile)
file.close()
text = """<b><span class="info"><br />Configuration file modified: '%s/bibrank/%s.cfg'</span></b>""" % (
configuration.get(get_rnk_code(rnkID)[0][0] + '.cfg', ''), )
except StandardError as e:
text = """<b><span class="info"><br />Sorry, could not modify configuration file, please check for rights to do so: '%s.cfg'<br />Please modify the file manually.</span></b>""" % (
configuration.get(get_rnk_code(rnkID)[0][0] + '.cfg', ''), )
output += text
finoutput = addadminbox(
subtitle + """ <small>[<a title="See guide" href="%s/help/admin/bibrank-admin-guide#mr">?</a>]</small>""" % CFG_SITE_URL, [output])
output = ""
text = """
<span class="adminlabel">Select</span>
<select name="template" class="admin_w200">
<option value="">- select template -</option>
"""
templates = get_templates()
for templ in templates:
text += """<option value="%s" %s>%s</option>""" % (
templ, template == templ and 'selected="selected"' or '', templ[9:len(templ) - 4])
text += """</select><br />"""
output += createhiddenform(action="modifyrank",
text=text,
rnkID=rnkID,
button="Show template",
confirm=0)
try:
if template:
textarea = ""
text = """<span class="adminlabel">Content:</span>"""
file = open(configuration.get(template, ''), 'r')
lines = file.readlines()
for line in lines:
textarea += line
file.close()
text += """<textarea class="admin_wvar" readonly="true" rows="15" cols="70">""" + \
textarea + """</textarea>"""
output += text
except StandardError as e:
output += """Cannot load file, either it does not exist, or not enough rights to read it: '%s'""" % (
configuration.get(template, ''), )
finoutput += addadminbox("View templates", [output])
return finoutput
def perform_deleterank(rnkID, ln=CFG_SITE_LANG, confirm=0):
"""form to delete a rank method
"""
subtitle = ''
output = """
<span class="warning">
<dl>
<dt><strong>WARNING:</strong></dt>
<dd><strong>When deleting a rank method, you also deletes all data related to the rank method, like translations, which collections
it was attached to and the data necessary to rank the searchresults. Any scheduled tasks using the deleted rank method will also stop working.
<br /><br />For more information, please go to the <a title="See guide" href="%s/help/admin/bibrank-admin-guide">BibRank guide</a> and read the section regarding deleting a rank method.</strong></dd>
</dl>
</span>
""" % CFG_SITE_URL
if rnkID:
if confirm in ["0", 0]:
rnkNAME = get_def_name(rnkID, "rnkMETHOD")[0][1]
subtitle = 'Step 1 - Confirm deletion'
text = """Delete rank method '%s'.""" % (rnkNAME)
output += createhiddenform(action="deleterank",
text=text,
button="Confirm",
rnkID=rnkID,
confirm=1)
elif confirm in ["1", 1]:
try:
rnkNAME = get_def_name(rnkID, "rnkMETHOD")[0][1]
rnkcode = get_rnk_code(rnkID)[0][0]
table = ""
try:
config = ConfigParser.ConfigParser()
config.readfp(
open(configuration.get(rnkcode + ".cfg"), 'r'))
table = config.get(
config.get('rank_method', "function"), "table")
except Exception:
pass
result = delete_rnk(rnkID, table)
subtitle = "Step 2 - Result"
if result:
text = """<b><span class="info">Rank method deleted</span></b>"""
try:
os.remove(configuration.get(rnkcode + ".cfg"))
text += """<br /><b><span class="info">Configuration file deleted: '%s.cfg'.</span></b>""" % (
configuration.get(rnkcode + ".cfg"), )
except StandardError as e:
text += """<br /><b><span class="info">Sorry, could not delete configuration file: '%s/bibrank/%s.cfg'.</span><br />Please delete the file manually.</span></b>""" % (
configuration.get(rnkcode + ".cfg"), )
else:
text = """<b><span class="info">Sorry, could not delete rank method</span></b>"""
except StandardError as e:
text = """<b><span class="info">Sorry, could not delete rank method, most likely already deleted</span></b>"""
output = text
body = [output]
return addadminbox(subtitle + """ <small>[<a title="See guide" href="%s/help/admin/bibrank-admin-guide#dr">?</a>]</small>""" % CFG_SITE_URL, body)
def perform_showrankdetails(rnkID, ln=CFG_SITE_LANG):
"""Returns details about the rank method given by rnkID"""
if not rnkID:
return "No ranking method selected."
if not get_rnk_code(rnkID):
return "Ranking method %s does not seem to exist." % str(rnkID)
subtitle = """Overview <a href="%s/admin/bibrank/bibrankadmin.py/modifyrank?rnkID=%s&ln=%s">[Modify]</a>""" % (
CFG_SITE_URL, rnkID, ln)
text = """
BibRank code: %s<br />
Last updated by BibRank:
""" % (get_rnk_code(rnkID)[0][0])
if get_rnk(rnkID)[0][2]:
text += "%s<br />" % get_rnk(rnkID)[0][2]
else:
text += "Not yet run.<br />"
output = addadminbox(subtitle, [text])
subtitle = """Rank method statistics"""
text = ""
try:
text = "Not yet implemented"
except StandardError as e:
text = "BibRank not yet run, cannot show statistics for method"
output += addadminbox(subtitle, [text])
subtitle = """Attached to collections <a href="%s/admin/bibrank/bibrankadmin.py/modifycollection?rnkID=%s&ln=%s">[Modify]</a>""" % (
CFG_SITE_URL, rnkID, ln)
text = ""
col = get_rnk_col(rnkID, ln)
for key, value in col:
text += "%s<br />" % value
if not col:
text += "No collections"
output += addadminbox(subtitle, [text])
subtitle = """Translations <a href="%s/admin/bibrank/bibrankadmin.py/modifytranslations?rnkID=%s&ln=%s">[Modify]</a>""" % (
CFG_SITE_URL, rnkID, ln)
prev_lang = ''
trans = get_translations(rnkID)
types = get_rnk_nametypes()
types = dict(map(lambda x: (x[0], x[1]), types))
text = ""
languages = dict(get_languages())
if trans:
for lang, type, name in trans:
if lang and lang in languages and type and name:
if prev_lang != lang:
prev_lang = lang
text += """%s: <br />""" % (languages[lang])
if type in types:
text += """<span style="margin-left: 10px">'%s'</span><span class="note">(%s)</span><br />""" % (
name, types[type])
else:
text = """No translations exists"""
output += addadminbox(subtitle, [text])
subtitle = """Configuration file: '%s/bibrank/%s.cfg' <a href="%s/admin/bibrank/bibrankadmin.py/modifyrank?rnkID=%s&ln=%s">[Modify]</a>""" % (
CFG_ETCDIR, get_rnk_code(rnkID)[0][0], CFG_SITE_URL, rnkID, ln)
text = ""
try:
file = open(configuration.get(get_rnk_code(rnkID)[0][0] + ".cfg", ''))
text += """<pre>"""
for line in file.readlines():
text += line
text += """</pre>"""
except StandardError as e:
text = """Cannot load file, either it does not exist, or not enough rights to read it."""
output += addadminbox(subtitle, [text])
return output
def compare_on_val(second, first):
return cmp(second[1], first[1])
def get_rnk_code(rnkID):
"""Returns the name from rnkMETHOD based on argument
rnkID - id from rnkMETHOD"""
try:
res = run_sql("SELECT name FROM rnkMETHOD where id=%s" % (rnkID))
return res
except StandardError as e:
return ()
def get_rnk(rnkID=''):
"""Return one or all rank methods
rnkID - return the rank method given, or all if not given"""
try:
if rnkID:
res = run_sql(
"SELECT id,name,DATE_FORMAT(last_updated, '%%Y-%%m-%%d %%H:%%i:%%s') from rnkMETHOD WHERE id=%s" % rnkID)
else:
res = run_sql(
"SELECT id,name,DATE_FORMAT(last_updated, '%%Y-%%m-%%d %%H:%%i:%%s') from rnkMETHOD")
return res
except StandardError as e:
return ()
def get_translations(rnkID):
"""Returns the translations in rnkMETHODNAME for a rankmethod
rnkID - the id of the rankmethod from rnkMETHOD """
try:
res = run_sql(
"SELECT ln, type, value FROM rnkMETHODNAME where id_rnkMETHOD=%s ORDER BY ln,type" % (rnkID))
return res
except StandardError as e:
return ()
def get_rnk_nametypes():
"""Return a list of the various translationnames for the rank methods"""
type = []
type.append(('ln', 'Long name'))
#type.append(('sn', 'Short name'))
return type
def get_col_nametypes():
"""Return a list of the various translationnames for the rank methods"""
type = []
type.append(('ln', 'Long name'))
return type
def get_rnk_col(rnkID, ln=CFG_SITE_LANG):
""" Returns a list of the collections the given rank method is attached to
rnkID - id from rnkMETHOD"""
try:
res1 = dict(run_sql(
"SELECT id_collection, '' FROM collection_rnkMETHOD WHERE id_rnkMETHOD=%s" % rnkID))
res2 = get_def_name('', "collection")
result = filter(lambda x: x[0] in res1, res2)
return result
except StandardError as e:
return ()
def get_templates():
"""Read CFG_ETCDIR/bibrank and returns a list of all files with 'template' """
templates = []
files = configuration.itervalues()
for file in files:
if str.find(file, "template_") != -1:
templates.append(file)
return templates
def attach_col_rnk(rnkID, colID):
"""attach rank method to collection
rnkID - id from rnkMETHOD table
colID - id of collection, as in collection table """
try:
res = run_sql(
"INSERT INTO collection_rnkMETHOD(id_collection, id_rnkMETHOD) values (%s,%s)" % (colID, rnkID))
return (1, "")
except StandardError as e:
return (0, e)
def detach_col_rnk(rnkID, colID):
"""detach rank method from collection
rnkID - id from rnkMETHOD table
colID - id of collection, as in collection table """
try:
res = run_sql(
"DELETE FROM collection_rnkMETHOD WHERE id_collection=%s AND id_rnkMETHOD=%s" % (colID, rnkID))
return (1, "")
except StandardError as e:
return (0, e)
def delete_rnk(rnkID, table=""):
"""Deletes all data for the given rank method
rnkID - delete all data in the tables associated with ranking and this id """
try:
res = run_sql("DELETE FROM rnkMETHOD WHERE id=%s" % rnkID)
res = run_sql(
"DELETE FROM rnkMETHODNAME WHERE id_rnkMETHOD=%s" % rnkID)
res = run_sql(
"DELETE FROM collection_rnkMETHOD WHERE id_rnkMETHOD=%s" % rnkID)
res = run_sql(
"DELETE FROM rnkMETHODDATA WHERE id_rnkMETHOD=%s" % rnkID)
if table:
res = run_sql("truncate %s" % table)
res = run_sql("truncate %sR" % table[:-1])
return (1, "")
except StandardError as e:
return (0, e)
def modify_rnk(rnkID, rnkcode):
"""change the code for the rank method given
rnkID - change in rnkMETHOD where id is like this
rnkcode - new value for field 'name' in rnkMETHOD """
try:
res = run_sql(
"UPDATE rnkMETHOD set name=%s WHERE id=%s", (rnkcode, rnkID))
return (1, "")
except StandardError as e:
return (0, e)
def add_rnk(rnkcode):
"""Adds a new rank method to rnkMETHOD
rnkcode - the "code" for the rank method, to be used by bibrank daemon """
try:
res = run_sql("INSERT INTO rnkMETHOD (name) VALUES (%s)", (rnkcode,))
res = run_sql("SELECT id FROM rnkMETHOD WHERE name=%s", (rnkcode,))
if res:
return (1, res[0][0])
else:
raise StandardError
except StandardError as e:
return (0, e)
def addadminbox(header='', datalist=[], cls="admin_wvar"):
"""used to create table around main data on a page, row based.
header - header on top of the table
datalist - list of the data to be added row by row
cls - possible to select wich css-class to format the look of the table."""
if len(datalist) == 1:
per = '100'
else:
per = '75'
output = '<table class="%s" ' % (cls, ) + 'width="95%">\n'
output += """
<thead>
<tr>
<th class="adminheaderleft" colspan="%s">%s</th>
</tr>
</thead>
<tbody>
""" % (len(datalist), header)
output += ' <tr>\n'
output += """
<td style="vertical-align: top; margin-top: 5px; width: %s;">
%s
</td>
""" % (per + '%', datalist[0])
if len(datalist) > 1:
output += """
<td style="vertical-align: top; margin-top: 5px; width: %s;">
%s
</td>
""" % ('25%', datalist[1])
output += ' </tr>\n'
output += """
</tbody>
</table>
"""
return output
def tupletotable(header=[], tuple=[], start='', end='', extracolumn='', highlight_rows_p=False, alternate_row_colors_p=False):
"""create html table for a tuple.
header - optional header for the columns
tuple - create table of this
start - text to be added in the beginning, most likely beginning of a form
end - text to be added in the end, mot likely end of a form.
extracolumn - mainly used to put in a button.
highlight_rows_p - if the cursor hovering a row should highlight the full row or not
alternate_row_colors_p - if alternate background colours should be used for the rows
"""
# study first row in tuple for alignment
align = []
try:
firstrow = tuple[0]
if type(firstrow) in [int, long]:
align = ['admintdright']
elif type(firstrow) in [str, dict]:
align = ['admintdleft']
else:
for item in firstrow:
if type(item) is int:
align.append('admintdright')
else:
align.append('admintdleft')
except IndexError:
firstrow = []
tblstr = ''
for h in header + ['']:
tblstr += ' <th class="adminheader">%s</th>\n' % (h, )
if tblstr:
tblstr = ' <tr>\n%s\n </tr>\n' % (tblstr, )
tblstr = start + '<table class="admin_wvar_nomargin">\n' + tblstr
# extra column
try:
extra = '<tr class="%s">' % (
highlight_rows_p and 'admin_row_highlight' or '')
if type(firstrow) not in [int, long, str, dict]:
# for data in firstrow: extra += '<td class="%s">%s</td>\n' % ('admintd', data)
for i in range(len(firstrow)):
extra += '<td class="{0}">{1}</td>\n'.format(
align[i], firstrow[i])
else:
extra += ' <td class="%s">%s</td>\n' % (align[0], firstrow)
extra += '<td class="extracolumn" rowspan="%s" style="vertical-align: top;">\n%s\n</td>\n</tr>\n' % (
len(tuple), extracolumn)
except IndexError:
extra = ''
tblstr += extra
# for i in range(1, len(tuple)):
j = 0
for row in tuple[1:]:
j += 1
tblstr += ' <tr class="%s %s">\n' % (highlight_rows_p and 'admin_row_highlight' or '',
(j % 2 and alternate_row_colors_p) and 'admin_row_color' or '')
# row = tuple[i]
if type(row) not in [int, long, str, dict]:
# for data in row: tblstr += '<td class="admintd">%s</td>\n' % (data,)
for i in range(len(row)):
tblstr += '<td class="{0}">{1}</td>\n'.format(align[i], utf8ifier(row[i]))
else:
tblstr += ' <td class="%s">%s</td>\n' % (align[0], row)
tblstr += ' </tr> \n'
tblstr += '</table> \n '
tblstr += end
return tblstr
def tupletotable_onlyselected(header=[], tuple=[], selected=[], start='', end='', extracolumn=''):
"""create html table for a tuple.
header - optional header for the columns
tuple - create table of this
selected - indexes of selected rows in the tuple
start - put this in the beginning
end - put this in the beginning
extracolumn - mainly used to put in a button"""
tuple2 = []
for index in selected:
tuple2.append(tuple[int(index) - 1])
return tupletotable(header=header,
tuple=tuple2,
start=start,
end=end,
extracolumn=extracolumn)
def addcheckboxes(datalist=[], name='authids', startindex=1, checked=[]):
"""adds checkboxes in front of the listdata.
datalist - add checkboxes in front of this list
name - name of all the checkboxes, values will be associated with this name
startindex - usually 1 because of the header
checked - values of checkboxes to be pre-checked """
if not type(checked) is list:
checked = [checked]
for row in datalist:
# always box, check another place
if 1 or row[0] not in [-1, "-1", 0, "0"]:
chkstr = str(startindex) in checked and 'checked="checked"' or ''
row.insert(
0, '<input type="checkbox" name="%s" value="%s" %s />' % (name, startindex, chkstr))
else:
row.insert(0, '')
startindex += 1
return datalist
def createhiddenform(action="", text="", button="confirm", cnfrm='', **hidden):
"""create select with hidden values and submit button
action - name of the action to perform on submit
text - additional text, can also be used to add non hidden input
button - value/caption on the submit button
cnfrm - if given, must check checkbox to confirm
**hidden - dictionary with name=value pairs for hidden input """
output = '<form action="%s" method="post">\n' % (action, )
output += '<table>\n<tr><td style="vertical-align: top">'
# output += text.decode('utf-8')
output += text
if cnfrm:
output += ' <input type="checkbox" name="confirm" value="1"/>'
for key in hidden.keys():
if type(hidden[key]) is list:
for value in hidden[key]:
output += ' <input type="hidden" name="%s" value="%s"/>\n' % (
key, value)
else:
output += ' <input type="hidden" name="%s" value="%s"/>\n' % (
key, hidden[key])
output += '</td><td style="vertical-align: bottom">'
output += ' <input class="btn btn-default" type="submit" value="%s"/>\n' % (
button, )
output += '</td></tr></table>'
output += '</form>\n'
return output
def get_languages():
languages = []
for (lang, lang_namelong) in language_list_long():
languages.append((lang, lang_namelong))
languages.sort()
return languages
def get_def_name(ID, table):
"""Returns a list of the names, either with the name in the current language, the default language, or just the name from the given table
ln - a language supported by Invenio
type - the type of value wanted, like 'ln', 'sn'"""
name = "name"
if table[-1:].isupper():
name = "NAME"
try:
if ID:
res = run_sql("SELECT id,name FROM %s where id=%s" % (table, ID))
else:
res = run_sql("SELECT id,name FROM %s" % table)
res = list(res)
res.sort(compare_on_val)
return res
except StandardError as e:
return []
def get_i8n_name(ID, ln, rtype, table):
"""Returns a list of the names, either with the name in the current language, the default language, or just the name from the given table
ln - a language supported by Invenio
type - the type of value wanted, like 'ln', 'sn'"""
name = "name"
if table[-1:].isupper():
name = "NAME"
try:
res = ""
if ID:
res = run_sql("SELECT id_%s,value FROM %s%s where type='%s' and ln='%s' and id_%s=%s" % (
table, table, name, rtype, ln, table, ID))
else:
res = run_sql("SELECT id_%s,value FROM %s%s where type='%s' and ln='%s'" % (
table, table, name, rtype, ln))
if ln != CFG_SITE_LANG:
if ID:
res1 = run_sql("SELECT id_%s,value FROM %s%s WHERE ln='%s' and type='%s' and id_%s=%s" % (
table, table, name, CFG_SITE_LANG, rtype, table, ID))
else:
res1 = run_sql("SELECT id_%s,value FROM %s%s WHERE ln='%s' and type='%s'" % (
table, table, name, CFG_SITE_LANG, rtype))
res2 = dict(res)
result = filter(lambda x: x[0] not in res2, res1)
res = res + result
if ID:
res1 = run_sql("SELECT id,name FROM %s where id=%s" % (table, ID))
else:
res1 = run_sql("SELECT id,name FROM %s" % table)
res2 = dict(res)
result = filter(lambda x: x[0] not in res2, res1)
res = res + result
res = list(res)
res.sort(compare_on_val)
return res
except StandardError as e:
raise StandardError
def get_name(ID, ln, rtype, table, id_column=None):
"""Returns the value from the table name based on arguments
ID - id
ln - a language supported by Invenio
type - the type of value wanted, like 'ln', 'sn'
table - tablename
id_column - name of the column with identifier. If None, expect column to be named 'id_%s' % table
"""
name = "name"
if table[-1:].isupper():
name = "NAME"
if id_column:
id_column = wash_table_column_name(id_column)
try:
res = run_sql("SELECT value FROM %s%s WHERE type='%s' and ln='%s' and %s=%s" % (
table, name, rtype, ln, (id_column or 'id_%s' % wash_table_column_name(table)), ID))
return res
except StandardError as e:
return ()
def modify_translations(ID, langs, sel_type, trans, table, id_column=None):
"""add or modify translations in tables given by table
frmID - the id of the format from the format table
sel_type - the name type
langs - the languages
trans - the translations, in same order as in langs
table - the table
id_column - name of the column with identifier. If None, expect column to be named 'id_%s' % table
"""
name = "name"
if table[-1:].isupper():
name = "NAME"
id_column = id_column or 'id_%s' % table
if id_column:
id_column = wash_table_column_name(id_column)
try:
for nr in range(0, len(langs)):
res = run_sql("SELECT value FROM %s%s WHERE %s=%%s AND type=%%s AND ln=%%s" % (table, name, id_column),
(ID, sel_type, langs[nr][0]))
if res:
if trans[nr]:
res = run_sql("UPDATE %s%s SET value=%%s WHERE %s=%%s AND type=%%s AND ln=%%s" % (table, name, id_column),
(trans[nr], ID, sel_type, langs[nr][0]))
else:
res = run_sql("DELETE FROM %s%s WHERE %s=%%s AND type=%%s AND ln=%%s" % (table, name, id_column),
(ID, sel_type, langs[nr][0]))
else:
if trans[nr]:
res = run_sql("INSERT INTO %s%s (%s, type, ln, value) VALUES (%%s,%%s,%%s,%%s)" % (table, name, id_column),
(ID, sel_type, langs[nr][0], trans[nr]))
return (1, "")
except StandardError as e:
return (0, e)
def write_outcome(res):
"""
Write the outcome of an update of some settings.
Parameter 'res' is a tuple (int, str), where 'int' is 0 when there
is an error to display, and 1 when everything went fine. 'str' is
a message displayed when there is an error.
"""
if res and res[0] == 1:
return """<b><span class="info">Operation successfully completed.</span></b>"""
elif res:
return """<b><span class="info">Operation failed. Reason:</span></b><br />%s""" % res[1]
|
geopython/QGIS
|
refs/heads/master
|
scripts/parse_dash_results.py
|
14
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
****************************3***********************************************
parse_dash_results.py
---------------------
Date : October 2016
Copyright : (C) 2016 by Nyall Dawson
Email : nyall dot dawson at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import range
__author__ = 'Nyall Dawson'
__date__ = 'October 2016'
__copyright__ = '(C) 2016, Nyall Dawson'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import sys
import argparse
import urllib.request
import urllib.parse
import urllib.error
import re
from bs4 import BeautifulSoup
from PyQt5.QtGui import (
QImage, QColor, qRed, qBlue, qGreen, qAlpha, qRgb, QPixmap)
from PyQt5.QtWidgets import (QDialog,
QApplication,
QLabel,
QVBoxLayout,
QHBoxLayout,
QGridLayout,
QPushButton,
QDoubleSpinBox,
QMessageBox,
QWidget,
QScrollArea)
import struct
import glob
dash_url = 'https://dash.orfeo-toolbox.org'
def error(msg):
print(msg)
sys.exit(1)
def colorDiff(c1, c2):
redDiff = abs(qRed(c1) - qRed(c2))
greenDiff = abs(qGreen(c1) - qGreen(c2))
blueDiff = abs(qBlue(c1) - qBlue(c2))
alphaDiff = abs(qAlpha(c1) - qAlpha(c2))
return max(redDiff, greenDiff, blueDiff, alphaDiff)
def imageFromPath(path):
print(path)
if (path[:8] == 'https://' or path[:7] == 'file://'):
# fetch remote image
print('fetching remote!')
data = urllib.request.urlopen(path).read()
image = QImage()
image.loadFromData(data)
else:
print('using local!')
image = QImage(path)
return image
class ResultHandler(QDialog):
def __init__(self, parent=None):
super(ResultHandler, self).__init__()
self.setWindowTitle('Dash results')
self.control_label = QLabel()
self.rendered_label = QLabel()
self.diff_label = QLabel()
self.mask_label = QLabel()
self.new_mask_label = QLabel()
self.scrollArea = QScrollArea()
self.widget = QWidget()
self.test_name_label = QLabel()
grid = QGridLayout()
grid.addWidget(self.test_name_label, 0, 0)
grid.addWidget(QLabel('Control'), 1, 0)
grid.addWidget(QLabel('Rendered'), 1, 1)
grid.addWidget(QLabel('Difference'), 1, 2)
grid.addWidget(self.control_label, 2, 0)
grid.addWidget(self.rendered_label, 2, 1)
grid.addWidget(self.diff_label, 2, 2)
grid.addWidget(QLabel('Current Mask'), 3, 0)
grid.addWidget(QLabel('New Mask'), 3, 1)
grid.addWidget(self.mask_label, 4, 0)
grid.addWidget(self.new_mask_label, 4, 1)
self.widget.setLayout(grid)
self.scrollArea.setWidget(self.widget)
v_layout = QVBoxLayout()
v_layout.addWidget(self.scrollArea, 1)
next_image_button = QPushButton()
next_image_button.setText('Skip')
next_image_button.pressed.connect(self.load_next)
self.overload_spin = QDoubleSpinBox()
self.overload_spin.setMinimum(1)
self.overload_spin.setMaximum(255)
self.overload_spin.setValue(1)
self.overload_spin.valueChanged.connect(lambda: save_mask_button.setEnabled(False))
preview_mask_button = QPushButton()
preview_mask_button.setText('Preview New Mask')
preview_mask_button.pressed.connect(self.preview_mask)
preview_mask_button.pressed.connect(lambda: save_mask_button.setEnabled(True))
save_mask_button = QPushButton()
save_mask_button.setText('Save New Mask')
save_mask_button.pressed.connect(self.save_mask)
button_layout = QHBoxLayout()
button_layout.addWidget(next_image_button)
button_layout.addWidget(QLabel('Mask diff multiplier:'))
button_layout.addWidget(self.overload_spin)
button_layout.addWidget(preview_mask_button)
button_layout.addWidget(save_mask_button)
button_layout.addStretch()
v_layout.addLayout(button_layout)
self.setLayout(v_layout)
def closeEvent(self, event):
self.reject()
def parse_url(self, url):
print('Fetching dash results from: {}'.format(url))
page = urllib.request.urlopen(url)
soup = BeautifulSoup(page, "lxml")
# build up list of rendered images
measurement_img = [img for img in soup.find_all('img') if
img.get('alt') and img.get('alt').startswith('Rendered Image')]
images = {}
for img in measurement_img:
m = re.search('Rendered Image (.*?)(\s|$)', img.get('alt'))
test_name = m.group(1)
rendered_image = img.get('src')
images[test_name] = '{}/{}'.format(dash_url, rendered_image)
if images:
print('found images:\n{}'.format(images))
else:
print('no images found\n')
self.images = images
self.load_next()
def load_next(self):
if not self.images:
# all done
self.accept()
exit(0)
test_name, rendered_image = self.images.popitem()
self.test_name_label.setText(test_name)
control_image = self.get_control_image_path(test_name)
if not control_image:
self.load_next()
return
self.mask_image_path = control_image[:-4] + '_mask.png'
self.load_images(control_image, rendered_image, self.mask_image_path)
def load_images(self, control_image_path, rendered_image_path, mask_image_path):
self.control_image = imageFromPath(control_image_path)
if not self.control_image:
error('Could not read control image {}'.format(control_image_path))
self.rendered_image = imageFromPath(rendered_image_path)
if not self.rendered_image:
error(
'Could not read rendered image {}'.format(rendered_image_path))
if not self.rendered_image.width() == self.control_image.width() or not self.rendered_image.height() == self.control_image.height():
print(
'Size mismatch - control image is {}x{}, rendered image is {}x{}'.format(self.control_image.width(),
self.control_image.height(
),
self.rendered_image.width(
),
self.rendered_image.height()))
max_width = min(
self.rendered_image.width(), self.control_image.width())
max_height = min(
self.rendered_image.height(), self.control_image.height())
# read current mask, if it exist
self.mask_image = imageFromPath(mask_image_path)
if self.mask_image.isNull():
print(
'Mask image does not exist, creating {}'.format(mask_image_path))
self.mask_image = QImage(
self.control_image.width(), self.control_image.height(), QImage.Format_ARGB32)
self.mask_image.fill(QColor(0, 0, 0))
self.diff_image = self.create_diff_image(
self.control_image, self.rendered_image, self.mask_image)
if not self.diff_image:
self.load_next()
return
self.control_label.setPixmap(QPixmap.fromImage(self.control_image))
self.rendered_label.setPixmap(QPixmap.fromImage(self.rendered_image))
self.mask_label.setPixmap(QPixmap.fromImage(self.mask_image))
self.diff_label.setPixmap(QPixmap.fromImage(self.diff_image))
self.preview_mask()
def preview_mask(self):
self.new_mask_image = self.create_mask(
self.control_image, self.rendered_image, self.mask_image, self.overload_spin.value())
self.new_mask_label.setPixmap(QPixmap.fromImage(self.new_mask_image))
def save_mask(self):
self.new_mask_image.save(self.mask_image_path, "png")
self.load_next()
def create_mask(self, control_image, rendered_image, mask_image, overload=1):
max_width = min(rendered_image.width(), control_image.width())
max_height = min(rendered_image.height(), control_image.height())
new_mask_image = QImage(
control_image.width(), control_image.height(), QImage.Format_ARGB32)
new_mask_image.fill(QColor(0, 0, 0))
# loop through pixels in rendered image and compare
mismatch_count = 0
linebytes = max_width * 4
for y in range(max_height):
control_scanline = control_image.constScanLine(
y).asstring(linebytes)
rendered_scanline = rendered_image.constScanLine(
y).asstring(linebytes)
mask_scanline = mask_image.scanLine(y).asstring(linebytes)
for x in range(max_width):
currentTolerance = qRed(
struct.unpack('I', mask_scanline[x * 4:x * 4 + 4])[0])
if currentTolerance == 255:
# ignore pixel
new_mask_image.setPixel(
x, y, qRgb(currentTolerance, currentTolerance, currentTolerance))
continue
expected_rgb = struct.unpack(
'I', control_scanline[x * 4:x * 4 + 4])[0]
rendered_rgb = struct.unpack(
'I', rendered_scanline[x * 4:x * 4 + 4])[0]
difference = min(
255, colorDiff(expected_rgb, rendered_rgb) * overload)
if difference > currentTolerance:
# update mask image
new_mask_image.setPixel(
x, y, qRgb(difference, difference, difference))
mismatch_count += 1
else:
new_mask_image.setPixel(
x, y, qRgb(currentTolerance, currentTolerance, currentTolerance))
return new_mask_image
def get_control_image_path(self, test_name):
if os.path.isfile(test_name):
return path
# else try and find matching test image
script_folder = os.path.dirname(os.path.realpath(sys.argv[0]))
control_images_folder = os.path.join(
script_folder, '../tests/testdata/control_images')
matching_control_images = [x[0]
for x in os.walk(control_images_folder) if test_name in x[0]]
if len(matching_control_images) > 1:
QMessageBox.warning(
self, 'Result', 'Found multiple matching control images for {}'.format(test_name))
return None
elif len(matching_control_images) == 0:
QMessageBox.warning(
self, 'Result', 'No matching control images found for {}'.format(test_name))
return None
found_control_image_path = matching_control_images[0]
# check for a single matching expected image
images = glob.glob(os.path.join(found_control_image_path, '*.png'))
filtered_images = [i for i in images if not i[-9:] == '_mask.png']
if len(filtered_images) > 1:
error(
'Found multiple matching control images for {}'.format(test_name))
elif len(filtered_images) == 0:
error('No matching control images found for {}'.format(test_name))
found_image = filtered_images[0]
print('Found matching control image: {}'.format(found_image))
return found_image
def create_diff_image(self, control_image, rendered_image, mask_image):
# loop through pixels in rendered image and compare
mismatch_count = 0
max_width = min(rendered_image.width(), control_image.width())
max_height = min(rendered_image.height(), control_image.height())
linebytes = max_width * 4
diff_image = QImage(
control_image.width(), control_image.height(), QImage.Format_ARGB32)
diff_image.fill(QColor(152, 219, 249))
for y in range(max_height):
control_scanline = control_image.constScanLine(
y).asstring(linebytes)
rendered_scanline = rendered_image.constScanLine(
y).asstring(linebytes)
mask_scanline = mask_image.scanLine(y).asstring(linebytes)
for x in range(max_width):
currentTolerance = qRed(
struct.unpack('I', mask_scanline[x * 4:x * 4 + 4])[0])
if currentTolerance == 255:
# ignore pixel
continue
expected_rgb = struct.unpack(
'I', control_scanline[x * 4:x * 4 + 4])[0]
rendered_rgb = struct.unpack(
'I', rendered_scanline[x * 4:x * 4 + 4])[0]
difference = colorDiff(expected_rgb, rendered_rgb)
if difference > currentTolerance:
# update mask image
diff_image.setPixel(x, y, qRgb(255, 0, 0))
mismatch_count += 1
if mismatch_count:
return diff_image
else:
print('No mismatches')
return None
def main():
app = QApplication(sys.argv)
parser = argparse.ArgumentParser()
parser.add_argument('dash_url')
args = parser.parse_args()
w = ResultHandler()
w.parse_url(args.dash_url)
w.exec_()
if __name__ == '__main__':
main()
|
christianurich/VIBe2UrbanSim
|
refs/heads/master
|
3rdparty/opus/src/synthesizer/gui/results_menu/doRandPoints.py
|
2
|
# PopGen 1.1 is A Synthetic Population Generator for Advanced
# Microsimulation Models of Travel Demand
# Copyright (C) 2009, Arizona State University
# See PopGen/License
#-----------------------------------------------------------
#
# Generate Random Points
#
# A QGIS plugin for generating a simple random points
# shapefile.
#
# Copyright (C) 2008 Carson Farmer
#
# EMAIL: carson.farmer (at) gmail.com
# WEB : www.geog.uvic.ca/spar/carson
#
#-----------------------------------------------------------
#
# licensed under the terms of GNU GPL 2
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#---------------------------------------------------------------------
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
from random import *
import math
class RandPoints():
def __init__(self, Dialog):
self.progressBar = QProgressBar()
self.progressBar.setProperty("value",QVariant(24))
self.progressBar.setTextVisible(False)
self.progressBar.setObjectName("progressBar")
#self.gridlayout.addWidget(self.progressBar,7,0,1,1)
self.progressBar.setValue(0)
# when 'OK' button is pressed, gather required inputs, and initiate random points generation
def accept(self, path, hhfreqvar):
self.progressBar.setValue(2.5)
inlayerpath = path + '_sel' + '.shp'
inLayer = QgsVectorLayer(inlayerpath, "SelCounties", "ogr")
outPath = path + '_hhpoints' + '.shp'
self.progressBar.setValue(5)
design = "field"
value = hhfreqvar
minimum = 0.00
self.progressBar.setValue(10)
self.randomize(inLayer, outPath, minimum, design, value, self.progressBar)
self.progressBar.setValue(100)
#addToTOC = QMessageBox.question(self, "Random Points", "Created output point Shapefile:\n" + outPath
# + "\n\nWould you like to add the new layer to the TOC?", QMessageBox.Yes, QMessageBox.No, QMessageBox.NoButton)
#if addToTOC == QMessageBox.Yes:
# self.vlayer = QgsVectorLayer(outPath, unicode(outName), "ogr")
# QgsMapLayerRegistry.instance().addMapLayer(self.vlayer)
self.progressBar.setValue(0)
return outPath
# Generate list of random points
def simpleRandom(self, n, bound, xmin, xmax, ymin, ymax):
seed()
points = []
i = 1
while i <= n:
pGeom = QgsGeometry().fromPoint(QgsPoint(xmin + (xmax-xmin) * random(), ymin + (ymax-ymin) * random()))
if pGeom.intersects(bound):
points.append(pGeom)
i = i + 1
return points
# Get vector layer by name from TOC
def getVectorLayerByName(self, myName):
mc = self.iface.getMapCanvas()
nLayers = mc.layerCount()
for l in range(nLayers):
layer = mc.getZpos(l)
if layer.name() == unicode(myName):
vlayer = QgsVectorLayer(unicode(layer.source()), unicode(myName), unicode(layer.getDataProvider().name()))
if vlayer.isValid():
return vlayer
else:
QMessageBox.information(self, "Generate Centroids", "Vector layer is not valid")
# Get map layer by name from TOC
def getMapLayerByName(self, myName):
mc = self.iface.getMapCanvas()
nLayers = mc.layerCount()
for l in range(nLayers):
layer = mc.getZpos(l)
if layer.name() == unicode(myName):
if layer.isValid():
return layer
# Retreive the field map of a vector Layer
def getFieldList(self, vlayer):
fProvider = vlayer.getDataProvider()
feat = QgsFeature()
allAttrs = fProvider.allAttributesList()
fProvider.select(allAttrs)
myFields = fProvider.fields()
return myFields
def randomize(self, inLayer, outPath, minimum, design, value, progressBar):
outFeat = QgsFeature()
points = self.loopThruPolygons(inLayer, value, design, progressBar)
fields = { 0 : QgsField("ID", QVariant.Int) }
check = QFile(outPath)
if check.exists():
if not QgsVectorFileWriter.deleteShapeFile(outPath):
print "Problem"
return
writer = QgsVectorFileWriter(outPath, "CP1250", fields, QGis.WKBPoint, None)
#writer = QgsVectorFileWriter(unicode(outPath), "CP1250", fields, QGis.WKBPoint, None)
idVar = 0
count = 70.00
add = 30.00 / len(points)
for i in points:
outFeat.setGeometry(i)
outFeat.addAttribute(0, QVariant(idVar))
writer.addFeature(outFeat)
idVar = idVar + 1
count = count + add
progressBar.setValue(count)
del writer
#
def loopThruPolygons(self, inLayer, numRand, design, progressBar):
sProvider = inLayer.getDataProvider()
sAllAttrs = sProvider.allAttributesList()
sProvider.select(sAllAttrs)
sFeat = QgsFeature()
sGeom = QgsGeometry()
sPoints = []
if design == "field":
for (i, attr) in sProvider.fields().iteritems():
if (unicode(numRand) == attr.name()): index = i #get input field index
count = 10.00
add = 60.00 / sProvider.featureCount()
while sProvider.getNextFeature(sFeat):
sGeom = sFeat.geometry()
if design == "density":
sDistArea = QgsDistanceArea()
value = int(round(numRand * sDistArea.measure(sGeom)))
elif design == "field":
sAtMap = sFeat.attributeMap()
value = sAtMap[index].toInt()[0]
else:
value = numRand
sExt = sGeom.boundingBox()
sPoints.extend(self.simpleRandom(value, sGeom, sExt.xMin(), sExt.xMax(), sExt.yMin(), sExt.yMax()))
count = count + add
progressBar.setValue(count)
return sPoints
|
fbossy/SickRage
|
refs/heads/master
|
lib/hachoir_core/stream/output.py
|
74
|
from cStringIO import StringIO
from hachoir_core.endian import BIG_ENDIAN, LITTLE_ENDIAN
from hachoir_core.bits import long2raw
from hachoir_core.stream import StreamError
from errno import EBADF
MAX_READ_NBYTES = 2 ** 16
class OutputStreamError(StreamError):
pass
class OutputStream(object):
def __init__(self, output, filename=None):
self._output = output
self._filename = filename
self._bit_pos = 0
self._byte = 0
def _getFilename(self):
return self._filename
filename = property(_getFilename)
def writeBit(self, state, endian):
assert endian in (BIG_ENDIAN, LITTLE_ENDIAN) # middle endian not yet supported
if self._bit_pos == 7:
self._bit_pos = 0
if state:
if endian is BIG_ENDIAN:
self._byte |= 1
else:
self._byte |= 128
self._output.write(chr(self._byte))
self._byte = 0
else:
if state:
if endian is BIG_ENDIAN:
self._byte |= (1 << self._bit_pos)
else:
self._byte |= (1 << (7-self._bit_pos))
self._bit_pos += 1
def writeBits(self, count, value, endian):
assert endian in (BIG_ENDIAN, LITTLE_ENDIAN) # middle endian not yet supported
assert 0 <= value < 2**count
# Feed bits to align to byte address
if self._bit_pos != 0:
n = 8 - self._bit_pos
if n <= count:
count -= n
if endian is BIG_ENDIAN:
self._byte |= (value >> count)
value &= ((1 << count) - 1)
else:
self._byte |= (value & ((1 << n)-1)) << self._bit_pos
value >>= n
self._output.write(chr(self._byte))
self._bit_pos = 0
self._byte = 0
else:
if endian is BIG_ENDIAN:
self._byte |= (value << (8-self._bit_pos-count))
else:
self._byte |= (value << self._bit_pos)
self._bit_pos += count
return
# Write byte per byte
while 8 <= count:
count -= 8
if endian is BIG_ENDIAN:
byte = (value >> count)
value &= ((1 << count) - 1)
else:
byte = (value & 0xFF)
value >>= 8
self._output.write(chr(byte))
# Keep last bits
assert 0 <= count < 8
self._bit_pos = count
if 0 < count:
assert 0 <= value < 2**count
if endian is BIG_ENDIAN:
self._byte = value << (8-count)
else:
self._byte = value
else:
assert value == 0
self._byte = 0
def writeInteger(self, value, signed, size_byte, endian):
if signed:
value += 1 << (size_byte*8 - 1)
raw = long2raw(value, endian, size_byte)
self.writeBytes(raw)
def copyBitsFrom(self, input, address, nb_bits, endian):
if (nb_bits % 8) == 0:
self.copyBytesFrom(input, address, nb_bits/8)
else:
# Arbitrary limit (because we should use a buffer, like copyBytesFrom(),
# but with endianess problem
assert nb_bits <= 128
data = input.readBits(address, nb_bits, endian)
self.writeBits(nb_bits, data, endian)
def copyBytesFrom(self, input, address, nb_bytes):
if (address % 8):
raise OutputStreamError("Unable to copy bytes with address with bit granularity")
buffer_size = 1 << 12 # 8192 (8 KB)
while 0 < nb_bytes:
# Compute buffer size
if nb_bytes < buffer_size:
buffer_size = nb_bytes
# Read
data = input.readBytes(address, buffer_size)
# Write
self.writeBytes(data)
# Move address
address += buffer_size*8
nb_bytes -= buffer_size
def writeBytes(self, bytes):
if self._bit_pos != 0:
raise NotImplementedError()
self._output.write(bytes)
def readBytes(self, address, nbytes):
"""
Read bytes from the stream at specified address (in bits).
Address have to be a multiple of 8.
nbytes have to in 1..MAX_READ_NBYTES (64 KB).
This method is only supported for StringOuputStream (not on
FileOutputStream).
Return read bytes as byte string.
"""
assert (address % 8) == 0
assert (1 <= nbytes <= MAX_READ_NBYTES)
self._output.flush()
oldpos = self._output.tell()
try:
self._output.seek(0)
try:
return self._output.read(nbytes)
except IOError, err:
if err[0] == EBADF:
raise OutputStreamError("Stream doesn't support read() operation")
finally:
self._output.seek(oldpos)
def StringOutputStream():
"""
Create an output stream into a string.
"""
data = StringIO()
return OutputStream(data)
def FileOutputStream(filename, real_filename=None):
"""
Create an output stream into file with given name.
Filename have to be unicode, whereas (optional) real_filename can be str.
"""
assert isinstance(filename, unicode)
if not real_filename:
real_filename = filename
output = open(real_filename, 'wb')
return OutputStream(output, filename=filename)
|
sagenschneider/FrameworkBenchmarks
|
refs/heads/master
|
frameworks/Python/responder/app.py
|
13
|
import asyncio
import asyncpg
import os
import responder
import jinja2
from random import randint
from operator import itemgetter
READ_ROW_SQL = 'SELECT "randomnumber" FROM "world" WHERE id = $1'
WRITE_ROW_SQL = 'UPDATE "world" SET "randomnumber"=$1 WHERE id=$2'
ADDITIONAL_ROW = [0, 'Additional fortune added at request time.']
async def setup_database():
global connection_pool
connection_pool = await asyncpg.create_pool(
user=os.getenv('PGUSER', 'benchmarkdbuser'),
password=os.getenv('PGPASS', 'benchmarkdbpass'),
database='hello_world',
host='tfb-database',
port=5432
)
def load_fortunes_template():
path = os.path.join('templates', 'fortune.html')
with open(path, 'r') as template_file:
template_text = template_file.read()
return jinja2.Template(template_text)
def get_num_queries(request):
try:
query_string = request.params['queries']
query_count = int(query_string)
except (KeyError, IndexError, ValueError):
return 1
if query_count < 1:
return 1
if query_count > 500:
return 500
return query_count
connection_pool = None
sort_fortunes_key = itemgetter(1)
template = load_fortunes_template()
loop = asyncio.get_event_loop()
loop.run_until_complete(setup_database())
app = responder.API()
@app.route('/json')
def json_serialization(req, resp):
resp.media = {'message': 'Hello, world!'}
@app.route('/db')
async def single_database_query(req, resp):
row_id = randint(1, 10000)
async with connection_pool.acquire() as connection:
number = await connection.fetchval(READ_ROW_SQL, row_id)
resp.media = {'id': row_id, 'randomNumber': number}
@app.route('/queries')
async def multiple_database_queries(req, resp):
num_queries = get_num_queries(req)
row_ids = [randint(1, 10000) for _ in range(num_queries)]
worlds = []
async with connection_pool.acquire() as connection:
statement = await connection.prepare(READ_ROW_SQL)
for row_id in row_ids:
number = await statement.fetchval(row_id)
worlds.append({'id': row_id, 'randomNumber': number})
resp.media = worlds
@app.route('/fortunes')
async def fortunes(req, resp):
fortune_list = []
async with connection_pool.acquire() as connection:
fortune_list = await connection.fetch('SELECT * FROM Fortune')
fortune_list.append(ADDITIONAL_ROW)
fortune_list.sort(key=sort_fortunes_key)
resp.headers['Content-Type'] = "text/html;charset=utf-8"
resp.content = template.render(fortunes=fortune_list)
@app.route('/updates')
async def database_updates(req, resp):
num_queries = get_num_queries(req)
updates = [(randint(1, 10000), randint(1, 10000)) for _ in range(num_queries)]
worlds = [{'id': row_id, 'randomNumber': number} for row_id, number in updates]
async with connection_pool.acquire() as connection:
statement = await connection.prepare(READ_ROW_SQL)
for row_id, number in updates:
await statement.fetchval(row_id)
await connection.executemany(WRITE_ROW_SQL, updates)
resp.media = worlds
@app.route('/plaintext')
def plaintext(req, resp):
resp.headers['Content-Type'] = "text/plain"
resp.text = "Hello, world!"
"""
if __name__ == '__main__':
app.run()
"""
|
dbarobin/google-mysql-tools
|
refs/heads/master
|
pylib/trickle_lib.py
|
4
|
# Copyright 2006 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library to allow for long running, slow operations.
Intended mainly for database operations, this library provides a
skeleton to safely execute a given set of operations such that the
runtime is a fraction of realtime (such as N% utilization). It is
designed to keep track of its position and be restartable, though that
is dependent upon the class that subclasses TrickledOperation.
"""
__author__ = 'bbiskebo@google.com (Brian Biskeborn)'
# Original author: chip@google.com (Chip Turner)
import logging
import time
class TrickledOperation(object):
"""The class representing both how a trickle operation is performed and
the state associated with a given operation.
Users of this class will subclass it and implement the protected
methods _SetupTrickle, _Finished, _PerformTrickle, and
_FinalizeTrickle. These methods will be called during a trickle
operation such that the size of the batch passed to _PerformTrickle
tries to be exactly utilization_percent of each cycle_time.
Specifying utilization_percent=100 turns off all sleeping, and
does the work at maximum speed.
"""
def __init__(self, utilization_percent, cycle_time):
"""Constructor. Subclass and store operation-specific state here.
Args:
utilization_percent: percent of time to spend in _PerformTrickle
cycle_time: interval over which utilization_percent is
calculated per run
"""
self._utilization_fraction = utilization_percent / 100.0
self._cycle_time = cycle_time
self._batch_size = 1 # Start maximally cautious (single statement)
# and increase batch size as we can
self._batch_size_limit = 1000000
self.verbose = True
def _SetupTrickle(self):
"""A setup method, invoked before a trickle loop is run.
It is valid to execute multiple loops, and this method will be
invoked for each. Useful for finding the next starting position.
"""
raise NotImplementedError('call to pure virtual function')
def _FinalizeTrickle(self):
"""A completion method copied after a series of trickle loops.
Intended to finish up state, such as the final 'slosh' rows of a
continual copy loop.
"""
raise NotImplementedError('call to pure virtual function')
def _Finished(self):
"""Called to determine if a trickle is complete."""
raise NotImplementedError('call to pure virtual function')
def _PerformTrickle(self, batch_size):
"""The method invoked to perform the actual trickle operation.
This method will be invoked multiple times and is passed the size
of a batch to execute, which will vary depending upon the runtime
of the previous batch.
Args:
batch_size: size of the current batch (subclass dependent)
Returns:
number of items processed (usually batch_size)
"""
raise NotImplementedError('call to pure virtual function')
def _GetProgress(self):
"""Called to fetch progress information
This method is intended to be overridden by inheriting classes
that can provide data about their progress through the trickle.
Args:
none
Returns:
String representation of current progress state or None if unknown
"""
return None
def SetBatchSizeLimit(self, n):
"""Set the maximum batch_size that will ever be submitted by Trickle().
Intended for operations that need to be artificially slowed down in
excess of what the throttler would have done. For instance, when
replicas are drastically slower than the primary for some reason, which
the throttler will be unable to detect.
Args:
n: How many queries may be run in a batch. Values above 1000000 are
probably ill-advised.
"""
self._batch_size_limit = n
def Trickle(self):
"""Perform the actual trickle operation.
This function will loop until self._Finished() returns true. It
calls the above methods in this order:
self._SetupTrickle()
while not self._Finished():
...
self._PerformTrickle(self, batch_size)
...
self._FinalizeTrickle()
Args:
None
Returns:
None
"""
self._SetupTrickle()
# track the last ten cycles worth of copy rates
copy_rates = [0] * 10
copy_rate_idx = 0
# also track average copies over this invocation's lifetime
rows_copied = 0
start_time = time.time()
while not self._Finished():
then = time.time()
batch_size = self._batch_size
rowcount = self._PerformTrickle(batch_size)
# Increase or decrease batch size to get the target utilization
# percentage at the given cycle time. At all times, we're cautious about
# floating point rounding effects and the like.
time_delta = time.time() - then
ideal_delta = self._cycle_time * self._utilization_fraction
if time_delta > 2.0 * ideal_delta:
# If our utilization fraction is way too high, we want to decrease our
# batch size fairly drastically, but we don't want to chop things to
# nothing in case what we saw was a transient blip. Note that if we get
# here, either (a) we've seen enough short statements to increase our
# batch size from its initial value of 1 before hitting this, or (b) a
# batch size of 1 is still way too big, in which case we modulate cycle
# time below instead.
self._batch_size = max(int(batch_size/2), 1)
elif time_delta * 1.5 < ideal_delta:
# We don't want to increase batch size quite as drastically
self._batch_size = max(int(batch_size * 1.5), batch_size + 1)
else:
# batch_size is between 2x and 0.67x target. Modulate it directly to
# the target value.
self._batch_size = max(int(batch_size * ideal_delta / time_delta), 1)
# Rev limiter in case an operation is a no-op by accident.
self._batch_size = min(self._batch_size, self._batch_size_limit)
# How long are we going to sleep this time? At least the rest of the
# cycle time, longer if utilization would otherwise be too high, and
# at least one second regardless but no more than 2x the cycle time.
if self._utilization_fraction < 1.0:
sleep_time = min(
max(self._cycle_time - time_delta,
(time_delta / self._utilization_fraction) - time_delta,
1),
2 * self._cycle_time)
else:
# But if running with 100% utilization, don't sleep at all.
sleep_time = 0.0
# update average copy rate
this_batch_rate = batch_size / (sleep_time + time_delta)
copy_rates[copy_rate_idx] = this_batch_rate
copy_rate_idx = (copy_rate_idx + 1) % len(copy_rates)
current_rate_avg = sum(copy_rates) / float(len(copy_rates))
rows_copied += rowcount
if self.verbose:
self._LogStatus(batch_size, start_time, time_delta, sleep_time,
current_rate_avg, rows_copied, copy_rate_idx)
time.sleep(sleep_time)
if self.verbose:
self._LogFinish(rows_copied, start_time)
self._FinalizeTrickle()
def _LogStatus(self, batch_size, start_time, time_delta, sleep_time,
current_rate_avg, rows_copied, copy_rate_idx):
progress = self._GetProgress()
if progress:
progress = ', ' + progress
else:
progress = ''
logging.info('batch of %d in %.2f s%s, sleeping %.2f s'
% (batch_size, time_delta, progress, sleep_time))
logging.info('util %.2f, new batch size %d '
'(%.2f current, %.2f avg rows/sec)'
% (time_delta / (time_delta + sleep_time),
self._batch_size,
current_rate_avg,
rows_copied / (time.time() - start_time)))
def _LogFinish(self, rows_copied, start_time):
logging.info('Done: %.2f avg rows/sec',
(rows_copied / (time.time() - start_time)))
class GeneratorOperation(TrickledOperation):
"""Adapts blocking functions so they can run within trickle_lib.
The adapter only requires that users insert a 'yield' statement after each
entry in the batch has been processed.
"""
def __init__(self, generator, utilization_percent, cycle_time):
"""Constructor.
Args:
generator: A function, that does work every time every time it is iterated
over.
utilization_percent: An int, percent of time to spend in _PerformTrickle.
cycle_time: interval over which utilization_percent is
calculated per run
"""
TrickledOperation.__init__(self, utilization_percent, cycle_time)
self._generator = generator
self._finished = False
def _SetupTrickle(self):
pass
def _FinalizeTrickle(self):
pass
def _Finished(self):
return self._finished
def _PerformTrickle(self, batch_size):
processed = 0
try:
for _ in xrange(batch_size):
self._generator.next()
processed += 1
except StopIteration:
self._finished = True
return processed
|
yglazko/socorro
|
refs/heads/master
|
socorro/unittest/external/postgresql/test_setupdb_app.py
|
9
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import mock
from psycopg2 import ProgrammingError
import psycopg2
from .unittestbase import PostgreSQLTestCase
from nose.plugins.attrib import attr
from nose.plugins.skip import SkipTest
from nose.tools import ok_, eq_
from socorro.external.postgresql.setupdb_app import SocorroDBApp
from socorro.unittest.testbase import TestCase
from configman import ConfigurationManager
from configman.dotdict import DotDict
class NoInheritanceCheatSocorroDBApp(SocorroDBApp):
def __init__(self, config):
self.config = config
class TestConnectionContext(TestCase):
def test_create_connection_url_no_super(self):
"""from PG Docs:
postgresql://[user[:password]@][netloc][:port][/dbname]"""
test_cases_no_super = (
(
{
'database_hostname': 'host01',
'database_name': 'name',
'database_port': 'port',
'database_username': 'user',
'database_password': 'password',
},
"postgresql://user:password@host01:port/name"
),
(
{
'database_hostname': 'host02',
'database_name': 'name',
'database_port': 'port',
'database_username': 'user',
'database_password': '',
},
"postgresql://user@host02:port/name"
),
(
{
'database_hostname': 'host03',
'database_name': 'name',
'database_port': 'port',
'database_username': 'user',
},
"postgresql://user@host03:port/name"
),
(
{
'database_hostname': 'host04',
'database_name': '',
'database_port': 5432,
'database_username': 'user',
},
"postgresql://user@host04:5432"
),
(
{
'database_hostname': 'host04',
'database_name': '',
'database_port': 5432,
'database_username': 'user',
},
"postgresql://user@host04:5432"
),
)
for a_config, expected_result in test_cases_no_super:
setup_app = NoInheritanceCheatSocorroDBApp(a_config)
eq_(
setup_app.create_connection_url(
database_name=a_config.get('database_name', ''),
username=a_config.get('database_username', ''),
password=a_config.get('database_password', '')
),
expected_result
)
@attr(integration='postgres')
class IntegrationTestSetupDB(PostgreSQLTestCase):
def _get_connection(self, database_name, DSN):
if not database_name:
database_name = DSN['database_name']
dsn = (
'host=%(database_hostname)s '
'dbname=%(database_name)s '
'user=%(database_username)s '
'password=%(database_password)s' %
dict(DSN, database_name=database_name)
)
#print 'DEBUG', dsn
return psycopg2.connect(dsn)
def _drop_database(self):
conn = self._get_connection('template1', self.super_dsn)
cursor = conn.cursor()
# double-check there is a crontabber_state row
conn.set_isolation_level(0)
try:
cursor.execute('DROP DATABASE %s' % self.dsn['database_name'])
except ProgrammingError:
pass
conn.set_isolation_level(1)
conn.close()
def setUp(self):
super(IntegrationTestSetupDB, self).setUp()
config_manager = self._setup_config_manager({'dropdb': True})
with config_manager.context() as config:
self.dsn = {
"database_hostname": config.database_hostname,
"database_name": config.database_name,
"database_username": config.database_username,
"database_password": config.database_password
}
self.super_dsn = {
"database_hostname": config.database_hostname,
"database_name": config.database_name,
"database_username": config.database_superusername,
"database_password": config.database_superuserpassword
}
self._drop_database()
def _setup_config_manager(self, extra_value_source=None):
if not extra_value_source:
extra_value_source = {}
mock_logging = mock.Mock()
required_config = SocorroDBApp.required_config
required_config.add_option('logger', default=mock_logging)
# We manually set the database_name to something deliberately
# different from all other integration tests. This way we can have
# tight control over its creation and destruction without affecting
# the other tests.
required_config.database_name = 'soccoro_integration_test_setupdb_only'
required_config.database_hostname = self.config.database_hostname
config_manager = ConfigurationManager(
[required_config,
],
app_name='setupdb',
app_description=__doc__,
values_source_list=[{
'logger': mock_logging,
}, extra_value_source],
argv_source=[]
)
return config_manager
def test_run_setupdb_app(self):
# this really touches the DB and causes problems if you do not
# have a superuser name/pass that match the default. Disable
# this until there's a way to override. Not sure if this is
# worth testing here anyway (we have other setupdb_app tests)
raise SkipTest
config_manager = self._setup_config_manager({'dropdb': True})
with config_manager.context() as config:
db = SocorroDBApp(config)
db.main()
# we can't know exactly because it would be tedious to have to
# expect an exact amount of created tables and views so we just
# expect it to be a relatively large number
conn = self._get_connection(None, self.dsn)
cursor = conn.cursor()
cursor.execute("""
select count(relname) from pg_class
where relkind='r' and relname NOT ilike 'pg_%'
""")
count_tables, = cursor.fetchone()
ok_(count_tables > 50)
cursor.execute("""
select count(relname) from pg_class
where relkind='v' and relname NOT ilike 'pg_%'
""")
count_views, = cursor.fetchone()
ok_(count_views > 50)
|
aspaas/ion
|
refs/heads/master
|
contrib/wallettools/walletunlock.py
|
1
|
from jsonrpc import ServiceProxy
access = ServiceProxy("http://127.0.0.1:12700")
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
|
anbangleo/NlsdeWeb
|
refs/heads/master
|
Python-3.6.0/Lib/sqlite3/test/transactions.py
|
2
|
#-*- coding: iso-8859-1 -*-
# pysqlite2/test/transactions.py: tests transactions
#
# Copyright (C) 2005-2007 Gerhard Häring <gh@ghaering.de>
#
# This file is part of pysqlite.
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
import os, unittest
import sqlite3 as sqlite
def get_db_path():
return "sqlite_testdb"
class TransactionTests(unittest.TestCase):
def setUp(self):
try:
os.remove(get_db_path())
except OSError:
pass
self.con1 = sqlite.connect(get_db_path(), timeout=0.1)
self.cur1 = self.con1.cursor()
self.con2 = sqlite.connect(get_db_path(), timeout=0.1)
self.cur2 = self.con2.cursor()
def tearDown(self):
self.cur1.close()
self.con1.close()
self.cur2.close()
self.con2.close()
try:
os.unlink(get_db_path())
except OSError:
pass
def CheckDMLDoesNotAutoCommitBefore(self):
self.cur1.execute("create table test(i)")
self.cur1.execute("insert into test(i) values (5)")
self.cur1.execute("create table test2(j)")
self.cur2.execute("select i from test")
res = self.cur2.fetchall()
self.assertEqual(len(res), 0)
def CheckInsertStartsTransaction(self):
self.cur1.execute("create table test(i)")
self.cur1.execute("insert into test(i) values (5)")
self.cur2.execute("select i from test")
res = self.cur2.fetchall()
self.assertEqual(len(res), 0)
def CheckUpdateStartsTransaction(self):
self.cur1.execute("create table test(i)")
self.cur1.execute("insert into test(i) values (5)")
self.con1.commit()
self.cur1.execute("update test set i=6")
self.cur2.execute("select i from test")
res = self.cur2.fetchone()[0]
self.assertEqual(res, 5)
def CheckDeleteStartsTransaction(self):
self.cur1.execute("create table test(i)")
self.cur1.execute("insert into test(i) values (5)")
self.con1.commit()
self.cur1.execute("delete from test")
self.cur2.execute("select i from test")
res = self.cur2.fetchall()
self.assertEqual(len(res), 1)
def CheckReplaceStartsTransaction(self):
self.cur1.execute("create table test(i)")
self.cur1.execute("insert into test(i) values (5)")
self.con1.commit()
self.cur1.execute("replace into test(i) values (6)")
self.cur2.execute("select i from test")
res = self.cur2.fetchall()
self.assertEqual(len(res), 1)
self.assertEqual(res[0][0], 5)
def CheckToggleAutoCommit(self):
self.cur1.execute("create table test(i)")
self.cur1.execute("insert into test(i) values (5)")
self.con1.isolation_level = None
self.assertEqual(self.con1.isolation_level, None)
self.cur2.execute("select i from test")
res = self.cur2.fetchall()
self.assertEqual(len(res), 1)
self.con1.isolation_level = "DEFERRED"
self.assertEqual(self.con1.isolation_level , "DEFERRED")
self.cur1.execute("insert into test(i) values (5)")
self.cur2.execute("select i from test")
res = self.cur2.fetchall()
self.assertEqual(len(res), 1)
@unittest.skipIf(sqlite.sqlite_version_info < (3, 2, 2),
'test hangs on sqlite versions older than 3.2.2')
def CheckRaiseTimeout(self):
self.cur1.execute("create table test(i)")
self.cur1.execute("insert into test(i) values (5)")
with self.assertRaises(sqlite.OperationalError):
self.cur2.execute("insert into test(i) values (5)")
@unittest.skipIf(sqlite.sqlite_version_info < (3, 2, 2),
'test hangs on sqlite versions older than 3.2.2')
def CheckLocking(self):
"""
This tests the improved concurrency with pysqlite 2.3.4. You needed
to roll back con2 before you could commit con1.
"""
self.cur1.execute("create table test(i)")
self.cur1.execute("insert into test(i) values (5)")
with self.assertRaises(sqlite.OperationalError):
self.cur2.execute("insert into test(i) values (5)")
# NO self.con2.rollback() HERE!!!
self.con1.commit()
def CheckRollbackCursorConsistency(self):
"""
Checks if cursors on the connection are set into a "reset" state
when a rollback is done on the connection.
"""
con = sqlite.connect(":memory:")
cur = con.cursor()
cur.execute("create table test(x)")
cur.execute("insert into test(x) values (5)")
cur.execute("select 1 union select 2 union select 3")
con.rollback()
with self.assertRaises(sqlite.InterfaceError):
cur.fetchall()
class SpecialCommandTests(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:")
self.cur = self.con.cursor()
def CheckDropTable(self):
self.cur.execute("create table test(i)")
self.cur.execute("insert into test(i) values (5)")
self.cur.execute("drop table test")
def CheckPragma(self):
self.cur.execute("create table test(i)")
self.cur.execute("insert into test(i) values (5)")
self.cur.execute("pragma count_changes=1")
def tearDown(self):
self.cur.close()
self.con.close()
class TransactionalDDL(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:")
def CheckDdlDoesNotAutostartTransaction(self):
# For backwards compatibility reasons, DDL statements should not
# implicitly start a transaction.
self.con.execute("create table test(i)")
self.con.rollback()
result = self.con.execute("select * from test").fetchall()
self.assertEqual(result, [])
def CheckTransactionalDDL(self):
# You can achieve transactional DDL by issuing a BEGIN
# statement manually.
self.con.execute("begin")
self.con.execute("create table test(i)")
self.con.rollback()
with self.assertRaises(sqlite.OperationalError):
self.con.execute("select * from test")
def tearDown(self):
self.con.close()
def suite():
default_suite = unittest.makeSuite(TransactionTests, "Check")
special_command_suite = unittest.makeSuite(SpecialCommandTests, "Check")
ddl_suite = unittest.makeSuite(TransactionalDDL, "Check")
return unittest.TestSuite((default_suite, special_command_suite, ddl_suite))
def test():
runner = unittest.TextTestRunner()
runner.run(suite())
if __name__ == "__main__":
test()
|
kifcaliph/odoo
|
refs/heads/8.0
|
addons/mass_mailing/models/mail_thread.py
|
220
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import logging
import re
from openerp.addons.mail.mail_message import decode
from openerp.addons.mail.mail_thread import decode_header
from openerp.osv import osv
_logger = logging.getLogger(__name__)
class MailThread(osv.AbstractModel):
""" Update MailThread to add the feature of bounced emails and replied emails
in message_process. """
_name = 'mail.thread'
_inherit = ['mail.thread']
def message_route_check_bounce(self, cr, uid, message, context=None):
""" Override to verify that the email_to is the bounce alias. If it is the
case, log the bounce, set the parent and related document as bounced and
return False to end the routing process. """
bounce_alias = self.pool['ir.config_parameter'].get_param(cr, uid, "mail.bounce.alias", context=context)
message_id = message.get('Message-Id')
email_from = decode_header(message, 'From')
email_to = decode_header(message, 'To')
# 0. Verify whether this is a bounced email (wrong destination,...) -> use it to collect data, such as dead leads
if bounce_alias in email_to:
# Bounce regex
# Typical form of bounce is bounce_alias-128-crm.lead-34@domain
# group(1) = the mail ID; group(2) = the model (if any); group(3) = the record ID
bounce_re = re.compile("%s-(\d+)-?([\w.]+)?-?(\d+)?" % re.escape(bounce_alias), re.UNICODE)
bounce_match = bounce_re.search(email_to)
if bounce_match:
bounced_model, bounced_thread_id = None, False
bounced_mail_id = bounce_match.group(1)
stat_ids = self.pool['mail.mail.statistics'].set_bounced(cr, uid, mail_mail_ids=[bounced_mail_id], context=context)
for stat in self.pool['mail.mail.statistics'].browse(cr, uid, stat_ids, context=context):
bounced_model = stat.model
bounced_thread_id = stat.res_id
_logger.info('Routing mail from %s to %s with Message-Id %s: bounced mail from mail %s, model: %s, thread_id: %s',
email_from, email_to, message_id, bounced_mail_id, bounced_model, bounced_thread_id)
if bounced_model and bounced_model in self.pool and hasattr(self.pool[bounced_model], 'message_receive_bounce') and bounced_thread_id:
self.pool[bounced_model].message_receive_bounce(cr, uid, [bounced_thread_id], mail_id=bounced_mail_id, context=context)
return False
return True
def message_route(self, cr, uid, message, message_dict, model=None, thread_id=None,
custom_values=None, context=None):
if not self.message_route_check_bounce(cr, uid, message, context=context):
return []
return super(MailThread, self).message_route(cr, uid, message, message_dict, model, thread_id, custom_values, context)
def message_receive_bounce(self, cr, uid, ids, mail_id=None, context=None):
"""Called by ``message_process`` when a bounce email (such as Undelivered
Mail Returned to Sender) is received for an existing thread. The default
behavior is to check is an integer ``message_bounce`` column exists.
If it is the case, its content is incremented. """
if 'message_bounce' in self._fields:
for obj in self.browse(cr, uid, ids, context=context):
self.write(cr, uid, [obj.id], {'message_bounce': obj.message_bounce + 1}, context=context)
def message_route_process(self, cr, uid, message, message_dict, routes, context=None):
""" Override to update the parent mail statistics. The parent is found
by using the References header of the incoming message and looking for
matching message_id in mail.mail.statistics. """
if message.get('References'):
message_ids = [x.strip() for x in decode(message['References']).split()]
self.pool['mail.mail.statistics'].set_replied(cr, uid, mail_message_ids=message_ids, context=context)
return super(MailThread, self).message_route_process(cr, uid, message, message_dict, routes, context=context)
|
marcossilvadecastro/googletest
|
refs/heads/master
|
test/gtest_list_tests_unittest.py
|
1898
|
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's --gtest_list_tests flag.
A user can ask Google Test to list all tests by specifying the
--gtest_list_tests flag. This script tests such functionality
by invoking gtest_list_tests_unittest_ (a program written with
Google Test) the command line flags.
"""
__author__ = 'phanna@google.com (Patrick Hanna)'
import gtest_test_utils
import re
# Constants.
# The command line flag for enabling/disabling listing all tests.
LIST_TESTS_FLAG = 'gtest_list_tests'
# Path to the gtest_list_tests_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath('gtest_list_tests_unittest_')
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests
EXPECTED_OUTPUT_NO_FILTER_RE = re.compile(r"""FooDeathTest\.
Test1
Foo\.
Bar1
Bar2
DISABLED_Bar3
Abc\.
Xyz
Def
FooBar\.
Baz
FooTest\.
Test1
DISABLED_Test2
Test3
TypedTest/0\. # TypeParam = (VeryLo{245}|class VeryLo{239})\.\.\.
TestA
TestB
TypedTest/1\. # TypeParam = int\s*\*
TestA
TestB
TypedTest/2\. # TypeParam = .*MyArray<bool,\s*42>
TestA
TestB
My/TypeParamTest/0\. # TypeParam = (VeryLo{245}|class VeryLo{239})\.\.\.
TestA
TestB
My/TypeParamTest/1\. # TypeParam = int\s*\*
TestA
TestB
My/TypeParamTest/2\. # TypeParam = .*MyArray<bool,\s*42>
TestA
TestB
MyInstantiation/ValueParamTest\.
TestA/0 # GetParam\(\) = one line
TestA/1 # GetParam\(\) = two\\nlines
TestA/2 # GetParam\(\) = a very\\nlo{241}\.\.\.
TestB/0 # GetParam\(\) = one line
TestB/1 # GetParam\(\) = two\\nlines
TestB/2 # GetParam\(\) = a very\\nlo{241}\.\.\.
""")
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests and --gtest_filter=Foo*.
EXPECTED_OUTPUT_FILTER_FOO_RE = re.compile(r"""FooDeathTest\.
Test1
Foo\.
Bar1
Bar2
DISABLED_Bar3
FooBar\.
Baz
FooTest\.
Test1
DISABLED_Test2
Test3
""")
# Utilities.
def Run(args):
"""Runs gtest_list_tests_unittest_ and returns the list of tests printed."""
return gtest_test_utils.Subprocess([EXE_PATH] + args,
capture_stderr=False).output
# The unit test.
class GTestListTestsUnitTest(gtest_test_utils.TestCase):
"""Tests using the --gtest_list_tests flag to list all tests."""
def RunAndVerify(self, flag_value, expected_output_re, other_flag):
"""Runs gtest_list_tests_unittest_ and verifies that it prints
the correct tests.
Args:
flag_value: value of the --gtest_list_tests flag;
None if the flag should not be present.
expected_output_re: regular expression that matches the expected
output after running command;
other_flag: a different flag to be passed to command
along with gtest_list_tests;
None if the flag should not be present.
"""
if flag_value is None:
flag = ''
flag_expression = 'not set'
elif flag_value == '0':
flag = '--%s=0' % LIST_TESTS_FLAG
flag_expression = '0'
else:
flag = '--%s' % LIST_TESTS_FLAG
flag_expression = '1'
args = [flag]
if other_flag is not None:
args += [other_flag]
output = Run(args)
if expected_output_re:
self.assert_(
expected_output_re.match(output),
('when %s is %s, the output of "%s" is "%s",\n'
'which does not match regex "%s"' %
(LIST_TESTS_FLAG, flag_expression, ' '.join(args), output,
expected_output_re.pattern)))
else:
self.assert_(
not EXPECTED_OUTPUT_NO_FILTER_RE.match(output),
('when %s is %s, the output of "%s" is "%s"'%
(LIST_TESTS_FLAG, flag_expression, ' '.join(args), output)))
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(flag_value=None,
expected_output_re=None,
other_flag=None)
def testFlag(self):
"""Tests using the --gtest_list_tests flag."""
self.RunAndVerify(flag_value='0',
expected_output_re=None,
other_flag=None)
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE,
other_flag=None)
def testOverrideNonFilterFlags(self):
"""Tests that --gtest_list_tests overrides the non-filter flags."""
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE,
other_flag='--gtest_break_on_failure')
def testWithFilterFlags(self):
"""Tests that --gtest_list_tests takes into account the
--gtest_filter flag."""
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_FILTER_FOO_RE,
other_flag='--gtest_filter=Foo*')
if __name__ == '__main__':
gtest_test_utils.Main()
|
ibinti/intellij-community
|
refs/heads/master
|
python/lib/Lib/modjy/modjy_impl.py
|
109
|
###
#
# Copyright Alan Kennedy.
#
# You may contact the copyright holder at this uri:
#
# http://www.xhaus.com/contact/modjy
#
# The licence under which this code is released is the Apache License v2.0.
#
# The terms and conditions of this license are listed in a file contained
# in the distribution that also contained this file, under the name
# LICENSE.txt.
#
# You may also read a copy of the license at the following web address.
#
# http://modjy.xhaus.com/LICENSE.txt
#
###
import types
import sys
from modjy_exceptions import *
class modjy_impl:
def deal_with_app_return(self, environ, start_response_callable, app_return):
self.log.debug("Processing app return type: %s" % str(type(app_return)))
if isinstance(app_return, types.StringTypes):
raise ReturnNotIterable("Application returned object that was not an iterable: %s" % str(type(app_return)))
if type(app_return) is types.FileType:
pass # TBD: What to do here? can't call fileno()
if hasattr(app_return, '__len__') and callable(app_return.__len__):
expected_pieces = app_return.__len__()
else:
expected_pieces = -1
try:
try:
ix = 0
for next_piece in app_return:
if not isinstance(next_piece, types.StringTypes):
raise NonStringOutput("Application returned iterable containing non-strings: %s" % str(type(next_piece)))
if ix == 0:
# The application may have called start_response in the first iteration
if not start_response_callable.called:
raise StartResponseNotCalled("Start_response callable was never called.")
if not start_response_callable.content_length \
and expected_pieces == 1 \
and start_response_callable.write_callable.num_writes == 0:
# Take the length of the first piece
start_response_callable.set_content_length(len(next_piece))
start_response_callable.write_callable(next_piece)
ix += 1
if ix == expected_pieces:
break
if expected_pieces != -1 and ix != expected_pieces:
raise WrongLength("Iterator len() was wrong. Expected %d pieces: got %d" % (expected_pieces, ix) )
except AttributeError, ax:
if str(ax) == "__getitem__":
raise ReturnNotIterable("Application returned object that was not an iterable: %s" % str(type(app_return)))
else:
raise ax
except TypeError, tx:
raise ReturnNotIterable("Application returned object that was not an iterable: %s" % str(type(app_return)))
except ModjyException, mx:
raise mx
except Exception, x:
raise ApplicationException(x)
finally:
if hasattr(app_return, 'close') and callable(app_return.close):
app_return.close()
def init_impl(self):
self.do_j_env_params()
def add_packages(self, package_list):
packages = [p.strip() for p in package_list.split(';')]
for p in packages:
self.log.info("Adding java package %s to jython" % p)
sys.add_package(p)
def add_classdirs(self, classdir_list):
classdirs = [cd.strip() for cd in classdir_list.split(';')]
for cd in classdirs:
self.log.info("Adding directory %s to jython class file search path" % cd)
sys.add_classdir(cd)
def add_extdirs(self, extdir_list):
extdirs = [ed.strip() for ed in extdir_list.split(';')]
for ed in extdirs:
self.log.info("Adding directory %s for .jars and .zips search path" % ed)
sys.add_extdir(self.expand_relative_path(ed))
def do_j_env_params(self):
if self.params['packages']:
self.add_packages(self.params['packages'])
if self.params['classdirs']:
self.add_classdirs(self.params['classdirs'])
if self.params['extdirs']:
self.add_extdirs(self.params['extdirs'])
|
ychen820/microblog
|
refs/heads/master
|
y/google-cloud-sdk/platform/google_appengine/lib/webapp2-2.5.1/webapp2_extras/appengine/auth/models.py
|
49
|
# -*- coding: utf-8 -*-
"""
webapp2_extras.appengine.auth.models
====================================
Auth related models.
:copyright: 2011 by tipfy.org.
:license: Apache Sotware License, see LICENSE for details.
"""
import time
try:
from ndb import model
except ImportError: # pragma: no cover
from google.appengine.ext.ndb import model
from webapp2_extras import auth
from webapp2_extras import security
class Unique(model.Model):
"""A model to store unique values.
The only purpose of this model is to "reserve" values that must be unique
within a given scope, as a workaround because datastore doesn't support
the concept of uniqueness for entity properties.
For example, suppose we have a model `User` with three properties that
must be unique across a given group: `username`, `auth_id` and `email`::
class User(model.Model):
username = model.StringProperty(required=True)
auth_id = model.StringProperty(required=True)
email = model.StringProperty(required=True)
To ensure property uniqueness when creating a new `User`, we first create
`Unique` records for those properties, and if everything goes well we can
save the new `User` record::
@classmethod
def create_user(cls, username, auth_id, email):
# Assemble the unique values for a given class and attribute scope.
uniques = [
'User.username.%s' % username,
'User.auth_id.%s' % auth_id,
'User.email.%s' % email,
]
# Create the unique username, auth_id and email.
success, existing = Unique.create_multi(uniques)
if success:
# The unique values were created, so we can save the user.
user = User(username=username, auth_id=auth_id, email=email)
user.put()
return user
else:
# At least one of the values is not unique.
# Make a list of the property names that failed.
props = [name.split('.', 2)[1] for name in uniques]
raise ValueError('Properties %r are not unique.' % props)
Based on the idea from http://goo.gl/pBQhB
"""
@classmethod
def create(cls, value):
"""Creates a new unique value.
:param value:
The value to be unique, as a string.
The value should include the scope in which the value must be
unique (ancestor, namespace, kind and/or property name).
For example, for a unique property `email` from kind `User`, the
value can be `User.email:me@myself.com`. In this case `User.email`
is the scope, and `me@myself.com` is the value to be unique.
:returns:
True if the unique value was created, False otherwise.
"""
entity = cls(key=model.Key(cls, value))
txn = lambda: entity.put() if not entity.key.get() else None
return model.transaction(txn) is not None
@classmethod
def create_multi(cls, values):
"""Creates multiple unique values at once.
:param values:
A sequence of values to be unique. See :meth:`create`.
:returns:
A tuple (bool, list_of_keys). If all values were created, bool is
True and list_of_keys is empty. If one or more values weren't
created, bool is False and the list contains all the values that
already existed in datastore during the creation attempt.
"""
# Maybe do a preliminary check, before going for transactions?
# entities = model.get_multi(keys)
# existing = [entity.key.id() for entity in entities if entity]
# if existing:
# return False, existing
# Create all records transactionally.
keys = [model.Key(cls, value) for value in values]
entities = [cls(key=key) for key in keys]
func = lambda e: e.put() if not e.key.get() else None
created = [model.transaction(lambda: func(e)) for e in entities]
if created != keys:
# A poor man's "rollback": delete all recently created records.
model.delete_multi(k for k in created if k)
return False, [k.id() for k in keys if k not in created]
return True, []
@classmethod
def delete_multi(cls, values):
"""Deletes multiple unique values at once.
:param values:
A sequence of values to be deleted.
"""
return model.delete_multi(model.Key(cls, v) for v in values)
class UserToken(model.Model):
"""Stores validation tokens for users."""
created = model.DateTimeProperty(auto_now_add=True)
updated = model.DateTimeProperty(auto_now=True)
user = model.StringProperty(required=True, indexed=False)
subject = model.StringProperty(required=True)
token = model.StringProperty(required=True)
@classmethod
def get_key(cls, user, subject, token):
"""Returns a token key.
:param user:
User unique ID.
:param subject:
The subject of the key. Examples:
- 'auth'
- 'signup'
:param token:
Randomly generated token.
:returns:
``model.Key`` containing a string id in the following format:
``{user_id}.{subject}.{token}.``
"""
return model.Key(cls, '%s.%s.%s' % (str(user), subject, token))
@classmethod
def create(cls, user, subject, token=None):
"""Creates a new token for the given user.
:param user:
User unique ID.
:param subject:
The subject of the key. Examples:
- 'auth'
- 'signup'
:param token:
Optionally an existing token may be provided.
If None, a random token will be generated.
:returns:
The newly created :class:`UserToken`.
"""
user = str(user)
token = token or security.generate_random_string(entropy=128)
key = cls.get_key(user, subject, token)
entity = cls(key=key, user=user, subject=subject, token=token)
entity.put()
return entity
@classmethod
def get(cls, user=None, subject=None, token=None):
"""Fetches a user token.
:param user:
User unique ID.
:param subject:
The subject of the key. Examples:
- 'auth'
- 'signup'
:param token:
The existing token needing verified.
:returns:
A :class:`UserToken` or None if the token does not exist.
"""
if user and subject and token:
return cls.get_key(user, subject, token).get()
assert subject and token, \
'subject and token must be provided to UserToken.get().'
return cls.query(cls.subject == subject, cls.token == token).get()
class User(model.Expando):
"""Stores user authentication credentials or authorization ids."""
#: The model used to ensure uniqueness.
unique_model = Unique
#: The model used to store tokens.
token_model = UserToken
created = model.DateTimeProperty(auto_now_add=True)
updated = model.DateTimeProperty(auto_now=True)
# ID for third party authentication, e.g. 'google:username'. UNIQUE.
auth_ids = model.StringProperty(repeated=True)
# Hashed password. Not required because third party authentication
# doesn't use password.
password = model.StringProperty()
def get_id(self):
"""Returns this user's unique ID, which can be an integer or string."""
return self._key.id()
def add_auth_id(self, auth_id):
"""A helper method to add additional auth ids to a User
:param auth_id:
String representing a unique id for the user. Examples:
- own:username
- google:username
:returns:
A tuple (boolean, info). The boolean indicates if the user
was saved. If creation succeeds, ``info`` is the user entity;
otherwise it is a list of duplicated unique properties that
caused creation to fail.
"""
self.auth_ids.append(auth_id)
unique = '%s.auth_id:%s' % (self.__class__.__name__, auth_id)
ok = self.unique_model.create(unique)
if ok:
self.put()
return True, self
else:
return False, ['auth_id']
@classmethod
def get_by_auth_id(cls, auth_id):
"""Returns a user object based on a auth_id.
:param auth_id:
String representing a unique id for the user. Examples:
- own:username
- google:username
:returns:
A user object.
"""
return cls.query(cls.auth_ids == auth_id).get()
@classmethod
def get_by_auth_token(cls, user_id, token):
"""Returns a user object based on a user ID and token.
:param user_id:
The user_id of the requesting user.
:param token:
The token string to be verified.
:returns:
A tuple ``(User, timestamp)``, with a user object and
the token timestamp, or ``(None, None)`` if both were not found.
"""
token_key = cls.token_model.get_key(user_id, 'auth', token)
user_key = model.Key(cls, user_id)
# Use get_multi() to save a RPC call.
valid_token, user = model.get_multi([token_key, user_key])
if valid_token and user:
timestamp = int(time.mktime(valid_token.created.timetuple()))
return user, timestamp
return None, None
@classmethod
def get_by_auth_password(cls, auth_id, password):
"""Returns a user object, validating password.
:param auth_id:
Authentication id.
:param password:
Password to be checked.
:returns:
A user object, if found and password matches.
:raises:
``auth.InvalidAuthIdError`` or ``auth.InvalidPasswordError``.
"""
user = cls.get_by_auth_id(auth_id)
if not user:
raise auth.InvalidAuthIdError()
if not security.check_password_hash(password, user.password):
raise auth.InvalidPasswordError()
return user
@classmethod
def validate_token(cls, user_id, subject, token):
"""Checks for existence of a token, given user_id, subject and token.
:param user_id:
User unique ID.
:param subject:
The subject of the key. Examples:
- 'auth'
- 'signup'
:param token:
The token string to be validated.
:returns:
A :class:`UserToken` or None if the token does not exist.
"""
return cls.token_model.get(user=user_id, subject=subject,
token=token) is not None
@classmethod
def create_auth_token(cls, user_id):
"""Creates a new authorization token for a given user ID.
:param user_id:
User unique ID.
:returns:
A string with the authorization token.
"""
return cls.token_model.create(user_id, 'auth').token
@classmethod
def validate_auth_token(cls, user_id, token):
return cls.validate_token(user_id, 'auth', token)
@classmethod
def delete_auth_token(cls, user_id, token):
"""Deletes a given authorization token.
:param user_id:
User unique ID.
:param token:
A string with the authorization token.
"""
cls.token_model.get_key(user_id, 'auth', token).delete()
@classmethod
def create_signup_token(cls, user_id):
entity = cls.token_model.create(user_id, 'signup')
return entity.token
@classmethod
def validate_signup_token(cls, user_id, token):
return cls.validate_token(user_id, 'signup', token)
@classmethod
def delete_signup_token(cls, user_id, token):
cls.token_model.get_key(user_id, 'signup', token).delete()
@classmethod
def create_user(cls, auth_id, unique_properties=None, **user_values):
"""Creates a new user.
:param auth_id:
A string that is unique to the user. Users may have multiple
auth ids. Example auth ids:
- own:username
- own:email@example.com
- google:username
- yahoo:username
The value of `auth_id` must be unique.
:param unique_properties:
Sequence of extra property names that must be unique.
:param user_values:
Keyword arguments to create a new user entity. Since the model is
an ``Expando``, any provided custom properties will be saved.
To hash a plain password, pass a keyword ``password_raw``.
:returns:
A tuple (boolean, info). The boolean indicates if the user
was created. If creation succeeds, ``info`` is the user entity;
otherwise it is a list of duplicated unique properties that
caused creation to fail.
"""
assert user_values.get('password') is None, \
'Use password_raw instead of password to create new users.'
assert not isinstance(auth_id, list), \
'Creating a user with multiple auth_ids is not allowed, ' \
'please provide a single auth_id.'
if 'password_raw' in user_values:
user_values['password'] = security.generate_password_hash(
user_values.pop('password_raw'), length=12)
user_values['auth_ids'] = [auth_id]
user = cls(**user_values)
# Set up unique properties.
uniques = [('%s.auth_id:%s' % (cls.__name__, auth_id), 'auth_id')]
if unique_properties:
for name in unique_properties:
key = '%s.%s:%s' % (cls.__name__, name, user_values[name])
uniques.append((key, name))
ok, existing = cls.unique_model.create_multi(k for k, v in uniques)
if ok:
user.put()
return True, user
else:
properties = [v for k, v in uniques if k in existing]
return False, properties
|
askulkarni2/ansible
|
refs/heads/devel
|
docsite/conf.py
|
147
|
# -*- coding: utf-8 -*-
#
# documentation build configuration file, created by
# sphinx-quickstart on Sat Sep 27 13:23:22 2008-2009.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed
# automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys
import os
# pip install sphinx_rtd_theme
#import sphinx_rtd_theme
#html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.append(os.path.abspath('some/directory'))
#
sys.path.insert(0, os.path.join('ansible', 'lib'))
sys.path.append(os.path.abspath('_themes'))
VERSION='0.01'
AUTHOR='Ansible, Inc'
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings.
# They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Later on, add 'sphinx.ext.viewcode' to the list if you want to have
# colorized code generated too for references.
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'Ansible Documentation'
copyright = "2013-2015 Ansible, Inc"
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = VERSION
# The full version, including alpha/beta/rc tags.
release = VERSION
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directories, that shouldn't be
# searched for source files.
#exclude_dirs = []
# A list of glob-style patterns that should be excluded when looking
# for source files.
exclude_patterns = ['modules']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
highlight_language = 'YAML'
# Options for HTML output
# -----------------------
html_theme_path = ['_themes']
html_theme = 'srtd'
html_short_title = 'Ansible Documentation'
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
#html_style = 'solar.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'Ansible Documentation'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (within the static path) to place at the top of
# the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = 'favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['.static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
html_copy_source = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Poseidodoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class
# [howto/manual]).
latex_documents = [
('index', 'ansible.tex', 'Ansible 1.2 Documentation',
AUTHOR, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
autoclass_content = 'both'
|
CARocha/plasystem
|
refs/heads/master
|
productores/migrations/0002_auto_20170327_0841.py
|
1
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-27 14:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('productores', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='ingresosfamilia',
name='respuesta',
field=models.CharField(choices=[('Si', 'Si'), ('No', 'No')], max_length=5),
),
]
|
rxcomm/pyaxo
|
refs/heads/master
|
examples/ratchet_watcher.py
|
2
|
#!/usr/bin/env python
import copy
import os
from pyaxo import Axolotl
name1 = 'Angie'
name2 = 'Barb'
a = Axolotl(name1, dbname='name1.db', dbpassphrase=None)
b = Axolotl(name2, dbname='name2.db', dbpassphrase=None)
a.loadState(name1, name2)
b.loadState(name2, name1)
topic = [' My Name',
'Other Name',
' RK',
' HKs',
' HKr',
' NHKs',
' NHKr',
' CKs',
' CKr',
' DHIs_priv',
' DHIs',
' DHIr',
' DHRs_priv',
' DHRs',
' DHRr',
' CONVid',
' Ns',
' Nr',
' PNs',
' ratchet',
' mode']
def hilite(text, c=False):
attr = []
if c:
attr.append('41')
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), text)
data_old = False
os.system('clear')
while True:
print '\x1b[;32m Name: 1 2\x1b[0m'
print '--------------'
a.loadState(name1, name2)
b.loadState(name2, name1)
databases = (a.db, b.db)
data = []
a_chg = False
b_chg = False
for number, database in enumerate(databases):
cur = database.cursor()
cur.execute('SELECT * from conversations')
data += [cur.fetchall()]
if not data_old:
data_old = data
for i in range(len(data[0][0])):
if data[0][0][i] != data_old[0][0][i]: a_chg=True
if data[1][0][i] != data_old[1][0][i]: b_chg=True
if topic[i] == ' mode':
if data[0][0][i] == 1:
var = 'A'
var2 = 'B'
else:
var = 'B'
var2 = 'A'
elif topic[i]==' Ns' or topic[i]==' Nr' or topic[i]==' PNs':
var = data[0][0][i]
var2 = data[1][0][i]
elif topic[i] == ' ratchet':
var = 'F'
var2 = 'F'
if data[0][0][i] == 1:
var = 'T'
elif data[1][0][i] == 1:
var2 = 'T'
else:
var = '*'
var2 = '*'
print topic[i], hilite(var, a_chg), hilite(var2, b_chg)
a_chg = False
b_chg = False
print '--------------'
ans = raw_input('Load new state? ')
if ans=='q' or ans=='n': exit()
os.system('clear')
data_old = data
|
williamHuang5468/StockServer
|
refs/heads/master
|
venv/lib/python3.5/site-packages/wheel/test/test_install.py
|
455
|
# Test wheel.
# The file has the following contents:
# hello.pyd
# hello/hello.py
# hello/__init__.py
# test-1.0.data/data/hello.dat
# test-1.0.data/headers/hello.dat
# test-1.0.data/scripts/hello.sh
# test-1.0.dist-info/WHEEL
# test-1.0.dist-info/METADATA
# test-1.0.dist-info/RECORD
# The root is PLATLIB
# So, some in PLATLIB, and one in each of DATA, HEADERS and SCRIPTS.
import wheel.tool
import wheel.pep425tags
from wheel.install import WheelFile
from tempfile import mkdtemp
import shutil
import os
THISDIR = os.path.dirname(__file__)
TESTWHEEL = os.path.join(THISDIR, 'test-1.0-py2.py3-none-win32.whl')
def check(*path):
return os.path.exists(os.path.join(*path))
def test_install():
tempdir = mkdtemp()
def get_supported():
return list(wheel.pep425tags.get_supported()) + [('py3', 'none', 'win32')]
whl = WheelFile(TESTWHEEL, context=get_supported)
assert whl.supports_current_python(get_supported)
try:
locs = {}
for key in ('purelib', 'platlib', 'scripts', 'headers', 'data'):
locs[key] = os.path.join(tempdir, key)
os.mkdir(locs[key])
whl.install(overrides=locs)
assert len(os.listdir(locs['purelib'])) == 0
assert check(locs['platlib'], 'hello.pyd')
assert check(locs['platlib'], 'hello', 'hello.py')
assert check(locs['platlib'], 'hello', '__init__.py')
assert check(locs['data'], 'hello.dat')
assert check(locs['headers'], 'hello.dat')
assert check(locs['scripts'], 'hello.sh')
assert check(locs['platlib'], 'test-1.0.dist-info', 'RECORD')
finally:
shutil.rmtree(tempdir)
def test_install_tool():
"""Slightly improve coverage of wheel.install"""
wheel.tool.install([TESTWHEEL], force=True, dry_run=True)
|
jotyGill/openpyn-nordvpn
|
refs/heads/master
|
openpyn/api.py
|
1
|
import logging
import sys
from typing import Dict, List
import requests
import verboselogs
from openpyn import filters
logger = logging.getLogger(__package__)
verboselogs.install()
# Using requests, GETs and returns json from a url.
def get_json(url) -> Dict:
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) \
AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.90 Safari/537.36'}
try:
json_response = requests.get(url, headers=headers).json()
except requests.exceptions.HTTPError:
logger.error("Cannot GET the JSON from nordvpn.com, Manually Specify a Server \
using '-s' for example '-s au10'")
sys.exit(1)
except requests.exceptions.RequestException:
logger.error("There was an ambiguous exception, Check Your Network Connection. \
forgot to flush iptables? (openpyn -x)")
sys.exit(1)
return json_response
# Gets json data, from api.nordvpn.com. filter servers by type, country, area.
def get_data_from_api(
country_code: str, area: str, p2p: bool, dedicated: bool, double_vpn: bool,
tor_over_vpn: bool, anti_ddos: bool, netflix: bool, location: float) -> List:
url = "https://api.nordvpn.com/server"
json_response = get_json(url)
type_filtered_servers = filters.filter_by_type(
json_response, p2p, dedicated, double_vpn, tor_over_vpn, anti_ddos, netflix)
if location:
type_location_filtered = filters.filter_by_location(location, type_filtered_servers)
return type_location_filtered
if country_code != "all": # if "-l" had country code with it. e.g "-l au"
type_country_filtered = filters.filter_by_country(country_code, type_filtered_servers)
if area is None:
return type_country_filtered
type_country_area_filtered = filters.filter_by_area(area, type_country_filtered)
return type_country_area_filtered
return type_filtered_servers
def list_all_countries() -> None:
countries_mapping = {}
url = "https://api.nordvpn.com/server"
json_response = get_json(url)
for res in json_response:
if res["domain"][:2] not in countries_mapping:
countries_mapping.update({res["domain"][:2]: res["country"]})
for key, val in countries_mapping.items():
print("Full Name : " + val + " Country Code : " + key)
def get_country_code(full_name: str) -> str:
url = "https://api.nordvpn.com/server"
json_response = get_json(url)
for res in json_response:
if res["country"].lower() == full_name.lower():
code = res["domain"][:2].lower()
return code
logger.error("Country Name Not Correct")
sys.exit(1)
|
40223114/w16b_test
|
refs/heads/master
|
static/Brython3.1.3-20150514-095342/Lib/errno.py
|
624
|
"""
This module makes available standard errno system symbols.
The value of each symbol is the corresponding integer value,
e.g., on most systems, errno.ENOENT equals the integer 2.
The dictionary errno.errorcode maps numeric codes to symbol names,
e.g., errno.errorcode[2] could be the string 'ENOENT'.
Symbols that are not relevant to the underlying system are not defined.
To map error codes to error messages, use the function os.strerror(),
e.g. os.strerror(2) could return 'No such file or directory'.
"""
errorcode= {1: 'EPERM', 2: 'ENOENT', 3: 'ESRCH', 4: 'EINTR', 5: 'EIO',
6: 'ENXIO', 7: 'E2BIG', 8: 'ENOEXEC', 9: 'EBADF', 10: 'ECHILD', 11: 'EAGAIN',
12: 'ENOMEM', 13: 'EACCES', 14: 'EFAULT', 15: 'ENOTBLK', 16: 'EBUSY',
17: 'EEXIST', 18: 'EXDEV', 19: 'ENODEV', 20: 'ENOTDIR', 21: 'EISDIR',
22: 'EINVAL', 23: 'ENFILE', 24: 'EMFILE', 25: 'ENOTTY', 26: 'ETXTBSY',
27: 'EFBIG', 28: 'ENOSPC', 29: 'ESPIPE', 30: 'EROFS', 31: 'EMLINK',
32: 'EPIPE', 33: 'EDOM', 34: 'ERANGE', 35: 'EDEADLOCK', 36: 'ENAMETOOLONG',
37: 'ENOLCK', 38: 'ENOSYS', 39: 'ENOTEMPTY', 40: 'ELOOP', 42: 'ENOMSG',
43: 'EIDRM', 44: 'ECHRNG', 45: 'EL2NSYNC', 46: 'EL3HLT', 47: 'EL3RST',
48: 'ELNRNG', 49: 'EUNATCH', 50: 'ENOCSI', 51: 'EL2HLT', 52: 'EBADE',
53: 'EBADR', 54: 'EXFULL', 55: 'ENOANO', 56: 'EBADRQC', 57: 'EBADSLT',
59: 'EBFONT', 60: 'ENOSTR', 61: 'ENODATA', 62: 'ETIME', 63: 'ENOSR',
64: 'ENONET', 65: 'ENOPKG', 66: 'EREMOTE', 67: 'ENOLINK', 68: 'EADV',
69: 'ESRMNT', 70: 'ECOMM', 71: 'EPROTO', 72: 'EMULTIHOP', 73: 'EDOTDOT',
74: 'EBADMSG', 75: 'EOVERFLOW', 76: 'ENOTUNIQ', 77: 'EBADFD', 78: 'EREMCHG',
79: 'ELIBACC', 80: 'ELIBBAD', 81: 'ELIBSCN', 82: 'ELIBMAX', 83: 'ELIBEXEC',
84: 'EILSEQ', 85: 'ERESTART', 86: 'ESTRPIPE', 87: 'EUSERS', 88: 'ENOTSOCK',
89: 'EDESTADDRREQ', 90: 'EMSGSIZE', 91: 'EPROTOTYPE', 92: 'ENOPROTOOPT',
93: 'EPROTONOSUPPORT', 94: 'ESOCKTNOSUPPORT', 95: 'ENOTSUP',
96: 'EPFNOSUPPORT', 97: 'EAFNOSUPPORT', 98: 'EADDRINUSE',
99: 'EADDRNOTAVAIL', 100: 'ENETDOWN', 101: 'ENETUNREACH', 102: 'ENETRESET',
103: 'ECONNABORTED', 104: 'ECONNRESET', 105: 'ENOBUFS', 106: 'EISCONN',
107: 'ENOTCONN', 108: 'ESHUTDOWN', 109: 'ETOOMANYREFS', 110: 'ETIMEDOUT',
111: 'ECONNREFUSED', 112: 'EHOSTDOWN', 113: 'EHOSTUNREACH', 114: 'EALREADY',
115: 'EINPROGRESS', 116: 'ESTALE', 117: 'EUCLEAN', 118: 'ENOTNAM',
119: 'ENAVAIL', 120: 'EISNAM', 121: 'EREMOTEIO', 122: 'EDQUOT',
123: 'ENOMEDIUM', 124: 'EMEDIUMTYPE', 125: 'ECANCELED', 126: 'ENOKEY',
127: 'EKEYEXPIRED', 128: 'EKEYREVOKED', 129: 'EKEYREJECTED',
130: 'EOWNERDEAD', 131: 'ENOTRECOVERABLE', 132: 'ERFKILL'}
EPERM=1
ENOENT=2
ESRCH=3
EINTR=4
EIO=5
ENXIO=6
E2BIG=7
ENOEXEC=8
EBADF=9
ECHILD=10
EAGAIN=11
ENOMEM=12
EACCES=13
EFAULT=14
ENOTBLK=15
EBUSY=16
EEXIST=17
EXDEV=18
ENODEV=19
ENOTDIR=20
EISDIR=21
EINVAL=22
ENFILE=23
EMFILE=24
ENOTTY=25
ETXTBSY=26
EFBIG=27
ENOSPC=28
ESPIPE=29
EROFS=30
EMLINK=31
EPIPE=32
EDOM=33
ERANGE=34
EDEADLOCK=35
ENAMETOOLONG=36
ENOLCK=37
ENOSYS=38
ENOTEMPTY=39
ELOOP=40
ENOMSG=42
EIDRM=43
ECHRNG=44
EL2NSYNC=45
EL3HLT=46
EL3RST=47
ELNRNG=48
EUNATCH=49
ENOCSI=50
EL2HLT=51
EBADE=52
EBADR=53
EXFULL=54
ENOANO=55
EBADRQC=56
EBADSLT=57
EBFONT=59
ENOSTR=60
ENODATA=61
ETIME=62
ENOSR=63
ENONET=64
ENOPKG=65
EREMOTE=66
ENOLINK=67
EADV=68
ESRMNT=69
ECOMM=70
EPROTO=71
EMULTIHOP=72
EDOTDOT=73
EBADMSG=74
EOVERFLOW=75
ENOTUNIQ=76
EBADFD=77
EREMCHG=78
ELIBACC=79
ELIBBAD=80
ELIBSCN=81
ELIBMAX=82
ELIBEXEC=83
EILSEQ=84
ERESTART=85
ESTRPIPE=86
EUSERS=87
ENOTSOCK=88
EDESTADDRREQ=89
EMSGSIZE=90
EPROTOTYPE=91
ENOPROTOOPT=92
EPROTONOSUPPORT=93
ESOCKTNOSUPPORT=94
ENOTSUP=95
EPFNOSUPPORT=96
EAFNOSUPPORT=97
EADDRINUSE=98
EADDRNOTAVAIL=99
ENETDOWN=100
ENETUNREACH=101
ENETRESET=102
ECONNABORTED=103
ECONNRESET=104
ENOBUFS=105
EISCONN=106
ENOTCONN=107
ESHUTDOWN=108
ETOOMANYREFS=109
ETIMEDOUT=110
ECONNREFUSED=111
EHOSTDOWN=112
EHOSTUNREACH=113
EALREADY=114
EINPROGRESS=115
ESTALE=116
EUCLEAN=117
ENOTNAM=118
ENAVAIL=119
EISNAM=120
EREMOTEIO=121
EDQUOT=122
ENOMEDIUM=123
EMEDIUMTYPE=124
ECANCELED=125
ENOKEY=126
EKEYEXPIRED=127
EKEYREVOKED=128
EKEYREJECTED=129
EOWNERDEAD=130
ENOTRECOVERABLE=131
ERFKILL=132
|
mfherbst/bohrium
|
refs/heads/master
|
core/codegen/argparse_utils.py
|
6
|
import os
import argparse
class FullPaths(argparse.Action):
"""Expand user- and relative-paths"""
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, os.path.abspath(os.path.expanduser(values)))
def is_dir(dirname):
"""Checks if a path is an actual directory"""
if not os.path.isdir(dirname):
msg = "{0} is not a directory".format(dirname)
raise argparse.ArgumentTypeError(msg)
else:
return dirname
|
Kozea/pygal
|
refs/heads/master
|
demo/moulinrouge/__init__.py
|
1
|
# -*- coding: utf-8 -*-
# This file is part of pygal
#
# A python svg graph plotting library
# Copyright © 2012-2016 Kozea
#
# This library is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with pygal. If not, see <http://www.gnu.org/licenses/>.
from flask import Flask, render_template, Response, request
import pygal
from pygal.config import Config
from pygal.util import cut
from pygal.etree import etree
from pygal.style import styles, parametric_styles
from base64 import (
urlsafe_b64encode as b64encode, urlsafe_b64decode as b64decode
)
import string
import random
import pickle
def get(type):
from importlib import import_module
module = '.'.join(type.split('.')[:-1])
name = type.split('.')[-1]
return getattr(import_module(module), name)
def random_label():
chars = string.ascii_letters + string.digits + u' àéèçêâäëï'
return ''.join([
random.choice(chars) for i in range(random.randrange(4, 30))
])
def random_value(min=0, max=15):
return random.randrange(min, max, 1)
def create_app():
"""Creates the pygal test web app"""
app = Flask(__name__)
@app.before_request
def before_request():
if request.args.get('etree'):
etree.to_etree()
elif request.args.get('lxml'):
etree.to_lxml()
def _random(data, order):
max = 10**order
min = 10**random.randrange(0, order)
series = []
for i in range(random.randrange(1, 10)):
values = [(
random_value((-max, min)[random.randrange(0, 2)], max),
random_value((-max, min)[random.randrange(0, 2)], max)
) for i in range(data)]
series.append((random_label(), values, {}))
return series
def _random_series(type, data, order):
max = 10**order
min = 10**random.randrange(0, order)
with_secondary = bool(random.randint(0, 1))
series = []
for i in range(random.randrange(1, 10)):
if type == 'Pie':
values = random_value(min, max)
elif type == 'XY':
values = [(
random_value((-max, min)[random.randrange(0, 2)], max),
random_value((-max, min)[random.randrange(0, 2)], max)
) for i in range(data)]
else:
values = [
random_value((-max, min)[random.randrange(1, 2)], max)
for i in range(data)
]
config = {
'secondary': with_secondary and bool(random.randint(0, 1))
}
series.append((random_label(), values, config))
return series
from .tests import get_test_routes
links = get_test_routes(app)
@app.route("/")
def index():
return render_template(
'index.jinja2',
styles=styles,
parametric_styles=parametric_styles,
parametric_colors=(
'#ff5995', '#b6e354', '#feed6c', '#8cedff', '#9e6ffe'
),
links=links,
charts_name=pygal.CHARTS_NAMES
)
@app.route("/svg/<type>/<series>/<config>")
def svg(type, series, config):
graph = get(type)(pickle.loads(b64decode(str(config))))
for title, values, serie_config in pickle.loads(b64decode(
str(series))):
graph.add(title, values, **serie_config)
return graph.render_response()
@app.route("/table/<type>/<series>/<config>")
def table(type, series, config):
graph = get(type)(pickle.loads(b64decode(str(config))))
for title, values, serie_config in pickle.loads(b64decode(
str(series))):
graph.add(title, values, **serie_config)
return graph.render_table()
@app.route("/sparkline/<style>")
@app.route("/sparkline/parameric/<style>/<color>")
def sparkline(style, color=None):
if color is None:
style = styles[style]
else:
style = parametric_styles[style](color)
line = pygal.Line(style=style, pretty_print=True)
line.add('_', [random.randrange(0, 10) for _ in range(25)])
return Response(
line.render_sparkline(height=40), mimetype='image/svg+xml'
)
@app.route("/with/table/<type>")
def with_table(type):
chart = pygal.StackedBar(
disable_xml_declaration=True, x_label_rotation=35
)
chart.title = (
'What Linux distro do you primarily use'
' on your server computers? (Desktop'
' users vs Server Users)'
)
if type == 'series':
chart.add('Debian', [1775, 82])
chart.add('Ubuntu', [1515, 80])
chart.add('CentOS', [807, 60])
chart.add('Arch Linux', [549, 12])
chart.add('Red Hat Enterprise Linux', [247, 10])
chart.add('Gentoo', [129, 7])
chart.add('Fedora', [91, 6])
chart.add('Amazon Linux', [60, 0])
chart.add('OpenSUSE', [58, 0])
chart.add('Slackware', [50, 3])
chart.add('Xubuntu', [38, 1])
chart.add('Rasbian', [33, 4])
chart.add('SUSE Linux Enterprise Server', [33, 1])
chart.add('Linux Mint', [30, 4])
chart.add('Scientific Linux', [32, 0])
chart.add('Other', [187, 5])
elif type == 'labels':
chart.x_labels = [
'Debian', 'Ubuntu', 'CentOS', 'Arch Linux',
'Red Hat Enterprise Linux', 'Gentoo', 'Fedora', 'Amazon Linux',
'OpenSUSE', 'Slackware', 'Xubuntu', 'Rasbian',
'SUSE Linux Enterprise Server', 'Linux Mint',
'Scientific Linux', 'Other'
]
chart.add(
'Desktop Users', [
1775, 1515, 807, 549, 247, 129, 91, 60, 58, 50, 38, 33, 33,
30, 32, 187
]
)
chart.add(
'Server Users',
[82, 80, 60, 12, 10, 7, 6, 0, 0, 3, 1, 4, 1, 4, 0, 5]
)
return render_template('table.jinja2', chart=chart)
@app.route("/all")
@app.route("/all/<style>")
@app.route("/all/<style>/<color>")
@app.route("/all/<style>/<color>/<base_style>")
@app.route("/all/interpolate=<interpolate>")
def all(style='default', color=None, interpolate=None, base_style=None):
width, height = 600, 400
data = random.randrange(1, 10)
order = random.randrange(1, 10)
if color is None:
style = styles[style]
else:
style = parametric_styles[style](
color, base_style=styles[base_style or 'default']
)
xy_series = _random(data, order)
other_series = []
for title, values, config in xy_series:
other_series.append((title, cut(values, 1), config))
xy_series = b64encode(pickle.dumps(xy_series))
other_series = b64encode(pickle.dumps(other_series))
config = Config()
config.width = width
config.height = height
config.fill = bool(random.randrange(0, 2))
config.interpolate = interpolate
config.style = style
svgs = []
for chart in pygal.CHARTS:
type = '.'.join((chart.__module__, chart.__name__))
if chart._dual:
config.x_labels = None
else:
config.x_labels = [random_label() for i in range(data)]
svgs.append({
'type': type,
'series': xy_series if chart._dual else other_series,
'config': b64encode(pickle.dumps(config))
})
return render_template(
'svgs.jinja2', svgs=svgs, width=width, height=height
)
@app.route("/rotation")
def rotation():
width, height = 375, 245
config = Config()
config.width = width
config.height = height
config.fill = True
config.style = styles['neon']
data = random.randrange(1, 10)
order = random.randrange(1, 10)
series = b64encode(pickle.dumps(_random_series(type, data, order)))
labels = [random_label() for i in range(data)]
svgs = []
config.show_legend = bool(random.randrange(0, 2))
for angle in range(0, 370, 10):
config.title = "%d rotation" % angle
config.x_labels = labels
config.x_label_rotation = angle
config.y_label_rotation = angle
svgs.append({
'type': 'pygal.Bar',
'series': series,
'config': b64encode(pickle.dumps(config))
})
return render_template(
'svgs.jinja2', svgs=svgs, width=width, height=height
)
@app.route("/interpolation")
def interpolation():
width, height = 600, 400
config = Config()
config.width = width
config.height = height
config.fill = True
config.style = styles['neon']
data = random.randrange(1, 10)
order = random.randrange(1, 10)
series = b64encode(pickle.dumps(_random_series(type, data, order)))
svgs = []
for interpolation in 'quadratic', 'cubic', 'lagrange', 'trigonometric':
config.title = "%s interpolation" % interpolation
config.interpolate = interpolation
svgs.append({
'type': 'pygal.StackedLine',
'series': series,
'config': b64encode(pickle.dumps(config))
})
for params in [{'type': 'catmull_rom'}, {'type': 'finite_difference'},
{'type': 'cardinal',
'c': .25}, {'type': 'cardinal',
'c': .5}, {'type': 'cardinal', 'c': .75},
{'type': 'cardinal',
'c': 1.5}, {'type': 'cardinal',
'c': 2}, {'type': 'cardinal', 'c': 5},
{'type': 'kochanek_bartels', 'b': 1, 'c': 1,
't': 1}, {'type': 'kochanek_bartels', 'b': -1, 'c': 1,
't': 1}, {'type': 'kochanek_bartels', 'b': 1,
'c': -1, 't': 1},
{'type': 'kochanek_bartels', 'b': 1, 'c': 1, 't': -1}, {
'type': 'kochanek_bartels', 'b': -1, 'c': 1, 't': -1
}, {'type': 'kochanek_bartels', 'b': -1, 'c': -1,
't': 1}, {'type': 'kochanek_bartels', 'b': -1,
'c': -1, 't': -1}]:
config.title = "Hermite interpolation with params %r" % params
config.interpolate = 'hermite'
config.interpolation_parameters = params
svgs.append({
'type': 'pygal.StackedLine',
'series': series,
'config': b64encode(pickle.dumps(config))
})
return render_template(
'svgs.jinja2', svgs=svgs, width=width, height=height
)
@app.route("/raw_svgs/")
def raw_svgs():
svgs = []
for color in styles['neon'].colors:
chart = pygal.Pie(
style=parametric_styles['rotate'](color),
width=400,
height=300
)
chart.title = color
chart.disable_xml_declaration = True
chart.explicit_size = True
chart.js = ['http://l:2343/2.0.x/pygal-tooltips.js']
for i in range(6):
chart.add(str(i), 2**i)
svgs.append(chart.render())
return render_template('raw_svgs.jinja2', svgs=svgs)
return app
|
davidharrigan/django
|
refs/heads/master
|
django/db/models/fields/related_lookups.py
|
287
|
from django.db.models.lookups import (
Exact, GreaterThan, GreaterThanOrEqual, In, LessThan, LessThanOrEqual,
)
class MultiColSource(object):
contains_aggregate = False
def __init__(self, alias, targets, sources, field):
self.targets, self.sources, self.field, self.alias = targets, sources, field, alias
self.output_field = self.field
def __repr__(self):
return "{}({}, {})".format(
self.__class__.__name__, self.alias, self.field)
def relabeled_clone(self, relabels):
return self.__class__(relabels.get(self.alias, self.alias),
self.targets, self.sources, self.field)
def get_normalized_value(value, lhs):
from django.db.models import Model
if isinstance(value, Model):
value_list = []
# A case like Restaurant.objects.filter(place=restaurant_instance),
# where place is a OneToOneField and the primary key of Restaurant.
if getattr(lhs.output_field, 'primary_key', False):
return (value.pk,)
sources = lhs.output_field.get_path_info()[-1].target_fields
for source in sources:
while not isinstance(value, source.model) and source.remote_field:
source = source.remote_field.model._meta.get_field(source.remote_field.field_name)
value_list.append(getattr(value, source.attname))
return tuple(value_list)
if not isinstance(value, tuple):
return (value,)
return value
class RelatedIn(In):
def get_prep_lookup(self):
if not isinstance(self.lhs, MultiColSource) and self.rhs_is_direct_value():
# If we get here, we are dealing with single-column relations.
self.rhs = [get_normalized_value(val, self.lhs)[0] for val in self.rhs]
# We need to run the related field's get_prep_lookup(). Consider case
# ForeignKey to IntegerField given value 'abc'. The ForeignKey itself
# doesn't have validation for non-integers, so we must run validation
# using the target field.
if hasattr(self.lhs.output_field, 'get_path_info'):
# Run the target field's get_prep_lookup. We can safely assume there is
# only one as we don't get to the direct value branch otherwise.
self.rhs = self.lhs.output_field.get_path_info()[-1].target_fields[-1].get_prep_lookup(
self.lookup_name, self.rhs)
return super(RelatedIn, self).get_prep_lookup()
def as_sql(self, compiler, connection):
if isinstance(self.lhs, MultiColSource):
# For multicolumn lookups we need to build a multicolumn where clause.
# This clause is either a SubqueryConstraint (for values that need to be compiled to
# SQL) or a OR-combined list of (col1 = val1 AND col2 = val2 AND ...) clauses.
from django.db.models.sql.where import WhereNode, SubqueryConstraint, AND, OR
root_constraint = WhereNode(connector=OR)
if self.rhs_is_direct_value():
values = [get_normalized_value(value, self.lhs) for value in self.rhs]
for value in values:
value_constraint = WhereNode()
for source, target, val in zip(self.lhs.sources, self.lhs.targets, value):
lookup_class = target.get_lookup('exact')
lookup = lookup_class(target.get_col(self.lhs.alias, source), val)
value_constraint.add(lookup, AND)
root_constraint.add(value_constraint, OR)
else:
root_constraint.add(
SubqueryConstraint(
self.lhs.alias, [target.column for target in self.lhs.targets],
[source.name for source in self.lhs.sources], self.rhs),
AND)
return root_constraint.as_sql(compiler, connection)
else:
return super(RelatedIn, self).as_sql(compiler, connection)
class RelatedLookupMixin(object):
def get_prep_lookup(self):
if not isinstance(self.lhs, MultiColSource) and self.rhs_is_direct_value():
# If we get here, we are dealing with single-column relations.
self.rhs = get_normalized_value(self.rhs, self.lhs)[0]
# We need to run the related field's get_prep_lookup(). Consider case
# ForeignKey to IntegerField given value 'abc'. The ForeignKey itself
# doesn't have validation for non-integers, so we must run validation
# using the target field.
if hasattr(self.lhs.output_field, 'get_path_info'):
# Get the target field. We can safely assume there is only one
# as we don't get to the direct value branch otherwise.
self.rhs = self.lhs.output_field.get_path_info()[-1].target_fields[-1].get_prep_lookup(
self.lookup_name, self.rhs)
return super(RelatedLookupMixin, self).get_prep_lookup()
def as_sql(self, compiler, connection):
if isinstance(self.lhs, MultiColSource):
assert self.rhs_is_direct_value()
self.rhs = get_normalized_value(self.rhs, self.lhs)
from django.db.models.sql.where import WhereNode, AND
root_constraint = WhereNode()
for target, source, val in zip(self.lhs.targets, self.lhs.sources, self.rhs):
lookup_class = target.get_lookup(self.lookup_name)
root_constraint.add(
lookup_class(target.get_col(self.lhs.alias, source), val), AND)
return root_constraint.as_sql(compiler, connection)
return super(RelatedLookupMixin, self).as_sql(compiler, connection)
class RelatedExact(RelatedLookupMixin, Exact):
pass
class RelatedLessThan(RelatedLookupMixin, LessThan):
pass
class RelatedGreaterThan(RelatedLookupMixin, GreaterThan):
pass
class RelatedGreaterThanOrEqual(RelatedLookupMixin, GreaterThanOrEqual):
pass
class RelatedLessThanOrEqual(RelatedLookupMixin, LessThanOrEqual):
pass
|
NeCTAR-RC/ceilometer
|
refs/heads/nectar/icehouse
|
ceilometer/compute/util.py
|
3
|
# -*- encoding: utf-8 -*-
#
# Copyright © 2014 Red Hat, Inc
#
# Author: Eoghan Glynn <eglynn@redhat.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
OPTS = [
cfg.ListOpt('reserved_metadata_namespace',
default=['metering.'],
help='List of metadata prefixes reserved for metering use.'),
cfg.IntOpt('reserved_metadata_length',
default=256,
help='Limit on length of reserved metadata values.'),
]
cfg.CONF.register_opts(OPTS)
def add_reserved_user_metadata(src_metadata, dest_metadata):
limit = cfg.CONF.reserved_metadata_length
user_metadata = {}
for prefix in cfg.CONF.reserved_metadata_namespace:
md = dict(
(k[len(prefix):].replace('.', '_'),
v[:limit] if isinstance(v, basestring) else v)
for k, v in src_metadata.items()
if (k.startswith(prefix) and
k[len(prefix):].replace('.', '_') not in dest_metadata)
)
user_metadata.update(md)
if user_metadata:
dest_metadata['user_metadata'] = user_metadata
return dest_metadata
|
pombredanne/bokeh
|
refs/heads/master
|
sphinx/source/docs/user_guide/source_examples/styling_glyph_hover.py
|
6
|
from bokeh.plotting import figure, output_file, show
from bokeh.models import HoverTool
from bokeh.sampledata.glucose import data
output_file("styling_hover.html")
subset = data.ix['2010-10-06']
x, y = subset.index.to_series(), subset['glucose']
# Basic plot setup
plot = figure(width=600, height=300, x_axis_type="datetime", tools="",
toolbar_location=None, title='Hover over points')
plot.line(x, y, line_dash="4 4", line_width=1, color='gray')
cr = plot.circle(x, y, size=20,
fill_color="grey", hover_fill_color="firebrick",
fill_alpha=0.05, hover_alpha=0.3,
line_color=None, hover_line_color="white")
plot.add_tools(HoverTool(tooltips=None, renderers=[cr], mode='hline'))
show(plot)
|
martinbuc/missionplanner
|
refs/heads/master
|
Lib/site-packages/scipy/linalg/setup_atlas_version.py
|
51
|
#!"C:\Users\hog\Documents\Visual Studio 2010\Projects\ArdupilotMega\ArdupilotMega\bin\Debug\ipy.exe"
import os
from distutils.core import Extension
from numpy.distutils.misc_util import get_path, default_config_dict
from numpy.distutils.system_info import get_info,AtlasNotFoundError
def configuration (parent_package=''):
package = 'linalg'
config = default_config_dict(package,parent_package)
del config['fortran_libraries']
local_path = get_path(__name__)
atlas_info = get_info('atlas_threads')
if not atlas_info:
atlas_info = get_info('atlas')
if not atlas_info:
raise AtlasNotFoundError(AtlasNotFoundError.__doc__)
ext = Extension('atlas_version',
sources=[os.path.join(local_path,'atlas_version.c')],
libraries=[atlas_info['libraries'][-1]],
library_dirs=atlas_info['library_dirs'])
config['ext_modules'].append(ext)
return config
if __name__ == '__main__':
from distutils.core import setup
setup(**configuration())
|
mark-adams/python-social-auth
|
refs/heads/master
|
social/backends/exacttarget.py
|
5
|
"""
ExactTarget OAuth support.
Support Authentication from IMH using JWT token and pre-shared key.
Requires package pyjwt
"""
from datetime import timedelta, datetime
import jwt
from social.exceptions import AuthFailed, AuthCanceled
from social.backends.oauth import BaseOAuth2
class ExactTargetOAuth2(BaseOAuth2):
name = 'exacttarget'
def get_user_details(self, response):
"""Use the email address of the user, suffixed by _et"""
user = response.get('token', {})\
.get('request', {})\
.get('user', {})
if 'email' in user:
user['username'] = user['email']
return user
def get_user_id(self, details, response):
"""
Create a user ID from the ET user ID. Uses details rather than the
default response, as only the token is available in response. details
is much richer:
{
'expiresIn': 1200,
'username': 'example@example.com',
'refreshToken': '1234567890abcdef',
'internalOauthToken': 'jwttoken.......',
'oauthToken': 'yetanothertoken',
'id': 123456,
'culture': 'en-US',
'timezone': {
'shortName': 'CST',
'offset': -6.0,
'dst': False,
'longName': '(GMT-06:00) Central Time (No Daylight Saving)'
},
'email': 'example@example.com'
}
"""
return '{0}'.format(details.get('id'))
def uses_redirect(self):
return False
def auth_url(self):
return None
def process_error(self, data):
if data.get('error'):
error = self.data.get('error_description') or self.data['error']
raise AuthFailed(self, error)
def do_auth(self, token, *args, **kwargs):
dummy, secret = self.get_key_and_secret()
try: # Decode the token, using the Application Signature from settings
decoded = jwt.decode(token, secret)
except jwt.DecodeError: # Wrong signature, fail authentication
raise AuthCanceled(self)
kwargs.update({'response': {'token': decoded}, 'backend': self})
return self.strategy.authenticate(*args, **kwargs)
def auth_complete(self, *args, **kwargs):
"""Completes login process, must return user instance"""
token = self.data.get('jwt', {})
if not token:
raise AuthFailed(self, 'Authentication Failed')
return self.do_auth(token, *args, **kwargs)
def extra_data(self, user, uid, response, details):
"""Load extra details from the JWT token"""
data = {
'id': details.get('id'),
'email': details.get('email'),
# OAuth token, for use with legacy SOAP API calls:
# http://bit.ly/13pRHfo
'internalOauthToken': details.get('internalOauthToken'),
# Token for use with the Application ClientID for the FUEL API
'oauthToken': details.get('oauthToken'),
# If the token has expired, use the FUEL API to get a new token see
# http://bit.ly/10v1K5l and http://bit.ly/11IbI6F - set legacy=1
'refreshToken': details.get('refreshToken'),
}
# The expiresIn value determines how long the tokens are valid for.
# Take a bit off, then convert to an int timestamp
expiresSeconds = details.get('expiresIn', 0) - 30
expires = datetime.utcnow() + timedelta(seconds=expiresSeconds)
data['expires'] = (expires - datetime(1970, 1, 1)).total_seconds()
if response.get('token'):
token = response['token']
org = token.get('request', {}).get('organization')
if org:
data['stack'] = org.get('stackKey')
data['enterpriseId'] = org.get('enterpriseId')
return data
|
mnahm5/django-estore
|
refs/heads/master
|
Lib/site-packages/setuptools/command/py36compat.py
|
286
|
import os
from glob import glob
from distutils.util import convert_path
from distutils.command import sdist
from setuptools.extern.six.moves import filter
class sdist_add_defaults:
"""
Mix-in providing forward-compatibility for functionality as found in
distutils on Python 3.7.
Do not edit the code in this class except to update functionality
as implemented in distutils. Instead, override in the subclass.
"""
def add_defaults(self):
"""Add all the default files to self.filelist:
- README or README.txt
- setup.py
- test/test*.py
- all pure Python modules mentioned in setup script
- all files pointed by package_data (build_py)
- all files defined in data_files.
- all files defined as scripts.
- all C sources listed as part of extensions or C libraries
in the setup script (doesn't catch C headers!)
Warns if (README or README.txt) or setup.py are missing; everything
else is optional.
"""
self._add_defaults_standards()
self._add_defaults_optional()
self._add_defaults_python()
self._add_defaults_data_files()
self._add_defaults_ext()
self._add_defaults_c_libs()
self._add_defaults_scripts()
@staticmethod
def _cs_path_exists(fspath):
"""
Case-sensitive path existence check
>>> sdist_add_defaults._cs_path_exists(__file__)
True
>>> sdist_add_defaults._cs_path_exists(__file__.upper())
False
"""
if not os.path.exists(fspath):
return False
# make absolute so we always have a directory
abspath = os.path.abspath(fspath)
directory, filename = os.path.split(abspath)
return filename in os.listdir(directory)
def _add_defaults_standards(self):
standards = [self.READMES, self.distribution.script_name]
for fn in standards:
if isinstance(fn, tuple):
alts = fn
got_it = False
for fn in alts:
if self._cs_path_exists(fn):
got_it = True
self.filelist.append(fn)
break
if not got_it:
self.warn("standard file not found: should have one of " +
', '.join(alts))
else:
if self._cs_path_exists(fn):
self.filelist.append(fn)
else:
self.warn("standard file '%s' not found" % fn)
def _add_defaults_optional(self):
optional = ['test/test*.py', 'setup.cfg']
for pattern in optional:
files = filter(os.path.isfile, glob(pattern))
self.filelist.extend(files)
def _add_defaults_python(self):
# build_py is used to get:
# - python modules
# - files defined in package_data
build_py = self.get_finalized_command('build_py')
# getting python files
if self.distribution.has_pure_modules():
self.filelist.extend(build_py.get_source_files())
# getting package_data files
# (computed in build_py.data_files by build_py.finalize_options)
for pkg, src_dir, build_dir, filenames in build_py.data_files:
for filename in filenames:
self.filelist.append(os.path.join(src_dir, filename))
def _add_defaults_data_files(self):
# getting distribution.data_files
if self.distribution.has_data_files():
for item in self.distribution.data_files:
if isinstance(item, str):
# plain file
item = convert_path(item)
if os.path.isfile(item):
self.filelist.append(item)
else:
# a (dirname, filenames) tuple
dirname, filenames = item
for f in filenames:
f = convert_path(f)
if os.path.isfile(f):
self.filelist.append(f)
def _add_defaults_ext(self):
if self.distribution.has_ext_modules():
build_ext = self.get_finalized_command('build_ext')
self.filelist.extend(build_ext.get_source_files())
def _add_defaults_c_libs(self):
if self.distribution.has_c_libraries():
build_clib = self.get_finalized_command('build_clib')
self.filelist.extend(build_clib.get_source_files())
def _add_defaults_scripts(self):
if self.distribution.has_scripts():
build_scripts = self.get_finalized_command('build_scripts')
self.filelist.extend(build_scripts.get_source_files())
if hasattr(sdist.sdist, '_add_defaults_standards'):
# disable the functionality already available upstream
class sdist_add_defaults:
pass
|
MarcelloLins/ServerlessCrawler-VancouverRealState
|
refs/heads/master
|
Bootstrapper/urllib3/contrib/_securetransport/low_level.py
|
136
|
"""
Low-level helpers for the SecureTransport bindings.
These are Python functions that are not directly related to the high-level APIs
but are necessary to get them to work. They include a whole bunch of low-level
CoreFoundation messing about and memory management. The concerns in this module
are almost entirely about trying to avoid memory leaks and providing
appropriate and useful assistance to the higher-level code.
"""
import base64
import ctypes
import itertools
import re
import os
import ssl
import tempfile
from .bindings import Security, CoreFoundation, CFConst
# This regular expression is used to grab PEM data out of a PEM bundle.
_PEM_CERTS_RE = re.compile(
b"-----BEGIN CERTIFICATE-----\n(.*?)\n-----END CERTIFICATE-----", re.DOTALL
)
def _cf_data_from_bytes(bytestring):
"""
Given a bytestring, create a CFData object from it. This CFData object must
be CFReleased by the caller.
"""
return CoreFoundation.CFDataCreate(
CoreFoundation.kCFAllocatorDefault, bytestring, len(bytestring)
)
def _cf_dictionary_from_tuples(tuples):
"""
Given a list of Python tuples, create an associated CFDictionary.
"""
dictionary_size = len(tuples)
# We need to get the dictionary keys and values out in the same order.
keys = (t[0] for t in tuples)
values = (t[1] for t in tuples)
cf_keys = (CoreFoundation.CFTypeRef * dictionary_size)(*keys)
cf_values = (CoreFoundation.CFTypeRef * dictionary_size)(*values)
return CoreFoundation.CFDictionaryCreate(
CoreFoundation.kCFAllocatorDefault,
cf_keys,
cf_values,
dictionary_size,
CoreFoundation.kCFTypeDictionaryKeyCallBacks,
CoreFoundation.kCFTypeDictionaryValueCallBacks,
)
def _cf_string_to_unicode(value):
"""
Creates a Unicode string from a CFString object. Used entirely for error
reporting.
Yes, it annoys me quite a lot that this function is this complex.
"""
value_as_void_p = ctypes.cast(value, ctypes.POINTER(ctypes.c_void_p))
string = CoreFoundation.CFStringGetCStringPtr(
value_as_void_p,
CFConst.kCFStringEncodingUTF8
)
if string is None:
buffer = ctypes.create_string_buffer(1024)
result = CoreFoundation.CFStringGetCString(
value_as_void_p,
buffer,
1024,
CFConst.kCFStringEncodingUTF8
)
if not result:
raise OSError('Error copying C string from CFStringRef')
string = buffer.value
if string is not None:
string = string.decode('utf-8')
return string
def _assert_no_error(error, exception_class=None):
"""
Checks the return code and throws an exception if there is an error to
report
"""
if error == 0:
return
cf_error_string = Security.SecCopyErrorMessageString(error, None)
output = _cf_string_to_unicode(cf_error_string)
CoreFoundation.CFRelease(cf_error_string)
if output is None or output == u'':
output = u'OSStatus %s' % error
if exception_class is None:
exception_class = ssl.SSLError
raise exception_class(output)
def _cert_array_from_pem(pem_bundle):
"""
Given a bundle of certs in PEM format, turns them into a CFArray of certs
that can be used to validate a cert chain.
"""
der_certs = [
base64.b64decode(match.group(1))
for match in _PEM_CERTS_RE.finditer(pem_bundle)
]
if not der_certs:
raise ssl.SSLError("No root certificates specified")
cert_array = CoreFoundation.CFArrayCreateMutable(
CoreFoundation.kCFAllocatorDefault,
0,
ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks)
)
if not cert_array:
raise ssl.SSLError("Unable to allocate memory!")
try:
for der_bytes in der_certs:
certdata = _cf_data_from_bytes(der_bytes)
if not certdata:
raise ssl.SSLError("Unable to allocate memory!")
cert = Security.SecCertificateCreateWithData(
CoreFoundation.kCFAllocatorDefault, certdata
)
CoreFoundation.CFRelease(certdata)
if not cert:
raise ssl.SSLError("Unable to build cert object!")
CoreFoundation.CFArrayAppendValue(cert_array, cert)
CoreFoundation.CFRelease(cert)
except Exception:
# We need to free the array before the exception bubbles further.
# We only want to do that if an error occurs: otherwise, the caller
# should free.
CoreFoundation.CFRelease(cert_array)
return cert_array
def _is_cert(item):
"""
Returns True if a given CFTypeRef is a certificate.
"""
expected = Security.SecCertificateGetTypeID()
return CoreFoundation.CFGetTypeID(item) == expected
def _is_identity(item):
"""
Returns True if a given CFTypeRef is an identity.
"""
expected = Security.SecIdentityGetTypeID()
return CoreFoundation.CFGetTypeID(item) == expected
def _temporary_keychain():
"""
This function creates a temporary Mac keychain that we can use to work with
credentials. This keychain uses a one-time password and a temporary file to
store the data. We expect to have one keychain per socket. The returned
SecKeychainRef must be freed by the caller, including calling
SecKeychainDelete.
Returns a tuple of the SecKeychainRef and the path to the temporary
directory that contains it.
"""
# Unfortunately, SecKeychainCreate requires a path to a keychain. This
# means we cannot use mkstemp to use a generic temporary file. Instead,
# we're going to create a temporary directory and a filename to use there.
# This filename will be 8 random bytes expanded into base64. We also need
# some random bytes to password-protect the keychain we're creating, so we
# ask for 40 random bytes.
random_bytes = os.urandom(40)
filename = base64.b64encode(random_bytes[:8]).decode('utf-8')
password = base64.b64encode(random_bytes[8:]) # Must be valid UTF-8
tempdirectory = tempfile.mkdtemp()
keychain_path = os.path.join(tempdirectory, filename).encode('utf-8')
# We now want to create the keychain itself.
keychain = Security.SecKeychainRef()
status = Security.SecKeychainCreate(
keychain_path,
len(password),
password,
False,
None,
ctypes.byref(keychain)
)
_assert_no_error(status)
# Having created the keychain, we want to pass it off to the caller.
return keychain, tempdirectory
def _load_items_from_file(keychain, path):
"""
Given a single file, loads all the trust objects from it into arrays and
the keychain.
Returns a tuple of lists: the first list is a list of identities, the
second a list of certs.
"""
certificates = []
identities = []
result_array = None
with open(path, 'rb') as f:
raw_filedata = f.read()
try:
filedata = CoreFoundation.CFDataCreate(
CoreFoundation.kCFAllocatorDefault,
raw_filedata,
len(raw_filedata)
)
result_array = CoreFoundation.CFArrayRef()
result = Security.SecItemImport(
filedata, # cert data
None, # Filename, leaving it out for now
None, # What the type of the file is, we don't care
None, # what's in the file, we don't care
0, # import flags
None, # key params, can include passphrase in the future
keychain, # The keychain to insert into
ctypes.byref(result_array) # Results
)
_assert_no_error(result)
# A CFArray is not very useful to us as an intermediary
# representation, so we are going to extract the objects we want
# and then free the array. We don't need to keep hold of keys: the
# keychain already has them!
result_count = CoreFoundation.CFArrayGetCount(result_array)
for index in range(result_count):
item = CoreFoundation.CFArrayGetValueAtIndex(
result_array, index
)
item = ctypes.cast(item, CoreFoundation.CFTypeRef)
if _is_cert(item):
CoreFoundation.CFRetain(item)
certificates.append(item)
elif _is_identity(item):
CoreFoundation.CFRetain(item)
identities.append(item)
finally:
if result_array:
CoreFoundation.CFRelease(result_array)
CoreFoundation.CFRelease(filedata)
return (identities, certificates)
def _load_client_cert_chain(keychain, *paths):
"""
Load certificates and maybe keys from a number of files. Has the end goal
of returning a CFArray containing one SecIdentityRef, and then zero or more
SecCertificateRef objects, suitable for use as a client certificate trust
chain.
"""
# Ok, the strategy.
#
# This relies on knowing that macOS will not give you a SecIdentityRef
# unless you have imported a key into a keychain. This is a somewhat
# artificial limitation of macOS (for example, it doesn't necessarily
# affect iOS), but there is nothing inside Security.framework that lets you
# get a SecIdentityRef without having a key in a keychain.
#
# So the policy here is we take all the files and iterate them in order.
# Each one will use SecItemImport to have one or more objects loaded from
# it. We will also point at a keychain that macOS can use to work with the
# private key.
#
# Once we have all the objects, we'll check what we actually have. If we
# already have a SecIdentityRef in hand, fab: we'll use that. Otherwise,
# we'll take the first certificate (which we assume to be our leaf) and
# ask the keychain to give us a SecIdentityRef with that cert's associated
# key.
#
# We'll then return a CFArray containing the trust chain: one
# SecIdentityRef and then zero-or-more SecCertificateRef objects. The
# responsibility for freeing this CFArray will be with the caller. This
# CFArray must remain alive for the entire connection, so in practice it
# will be stored with a single SSLSocket, along with the reference to the
# keychain.
certificates = []
identities = []
# Filter out bad paths.
paths = (path for path in paths if path)
try:
for file_path in paths:
new_identities, new_certs = _load_items_from_file(
keychain, file_path
)
identities.extend(new_identities)
certificates.extend(new_certs)
# Ok, we have everything. The question is: do we have an identity? If
# not, we want to grab one from the first cert we have.
if not identities:
new_identity = Security.SecIdentityRef()
status = Security.SecIdentityCreateWithCertificate(
keychain,
certificates[0],
ctypes.byref(new_identity)
)
_assert_no_error(status)
identities.append(new_identity)
# We now want to release the original certificate, as we no longer
# need it.
CoreFoundation.CFRelease(certificates.pop(0))
# We now need to build a new CFArray that holds the trust chain.
trust_chain = CoreFoundation.CFArrayCreateMutable(
CoreFoundation.kCFAllocatorDefault,
0,
ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks),
)
for item in itertools.chain(identities, certificates):
# ArrayAppendValue does a CFRetain on the item. That's fine,
# because the finally block will release our other refs to them.
CoreFoundation.CFArrayAppendValue(trust_chain, item)
return trust_chain
finally:
for obj in itertools.chain(identities, certificates):
CoreFoundation.CFRelease(obj)
|
crakensio/django_training
|
refs/heads/master
|
lib/python2.7/site-packages/django/core/mail/backends/locmem.py
|
227
|
"""
Backend for test environment.
"""
from django.core import mail
from django.core.mail.backends.base import BaseEmailBackend
class EmailBackend(BaseEmailBackend):
"""A email backend for use during test sessions.
The test connection stores email messages in a dummy outbox,
rather than sending them out on the wire.
The dummy outbox is accessible through the outbox instance attribute.
"""
def __init__(self, *args, **kwargs):
super(EmailBackend, self).__init__(*args, **kwargs)
if not hasattr(mail, 'outbox'):
mail.outbox = []
def send_messages(self, messages):
"""Redirect messages to the dummy outbox"""
for message in messages: # .message() triggers header validation
message.message()
mail.outbox.extend(messages)
return len(messages)
|
oudalab/fajita
|
refs/heads/master
|
pythonAPI/flask/lib/python3.5/site-packages/chardet/mbcharsetprober.py
|
289
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .enums import ProbingState, MachineState
class MultiByteCharSetProber(CharSetProber):
"""
MultiByteCharSetProber
"""
def __init__(self, lang_filter=None):
super(MultiByteCharSetProber, self).__init__(lang_filter=lang_filter)
self.distribution_analyzer = None
self.coding_sm = None
self._last_char = [0, 0]
def reset(self):
super(MultiByteCharSetProber, self).reset()
if self.coding_sm:
self.coding_sm.reset()
if self.distribution_analyzer:
self.distribution_analyzer.reset()
self._last_char = [0, 0]
@property
def charset_name(self):
raise NotImplementedError
@property
def language(self):
raise NotImplementedError
def feed(self, byte_str):
for i in range(len(byte_str)):
coding_state = self.coding_sm.next_state(byte_str[i])
if coding_state == MachineState.ERROR:
self.logger.debug('%s %s prober hit error at byte %s',
self.charset_name, self.language, i)
self._state = ProbingState.NOT_ME
break
elif coding_state == MachineState.ITS_ME:
self._state = ProbingState.FOUND_IT
break
elif coding_state == MachineState.START:
char_len = self.coding_sm.get_current_charlen()
if i == 0:
self._last_char[1] = byte_str[0]
self.distribution_analyzer.feed(self._last_char, char_len)
else:
self.distribution_analyzer.feed(byte_str[i - 1:i + 1],
char_len)
self._last_char[0] = byte_str[-1]
if self.state == ProbingState.DETECTING:
if (self.distribution_analyzer.got_enough_data() and
(self.get_confidence() > self.SHORTCUT_THRESHOLD)):
self._state = ProbingState.FOUND_IT
return self.state
def get_confidence(self):
return self.distribution_analyzer.get_confidence()
|
naitoh/py2rb
|
refs/heads/master
|
tests/functions/bitand.py
|
1
|
x = 32424
y = 1437
z = x & y
print(z)
|
shssoichiro/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/pywebsocket/src/example/abort_wsh.py
|
465
|
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from mod_pywebsocket import handshake
def web_socket_do_extra_handshake(request):
pass
def web_socket_transfer_data(request):
raise handshake.AbortedByUserException(
"Aborted in web_socket_transfer_data")
# vi:sts=4 sw=4 et
|
grupozeety/CDerpnext
|
refs/heads/bk_master
|
erpnext/accounts/doctype/period_closing_voucher/period_closing_voucher.py
|
12
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt
from frappe import _
from erpnext.accounts.utils import get_account_currency
from erpnext.controllers.accounts_controller import AccountsController
class PeriodClosingVoucher(AccountsController):
def validate(self):
self.validate_account_head()
self.validate_posting_date()
def on_submit(self):
self.make_gl_entries()
def on_cancel(self):
frappe.db.sql("""delete from `tabGL Entry`
where voucher_type = 'Period Closing Voucher' and voucher_no=%s""", self.name)
def validate_account_head(self):
closing_account_type = frappe.db.get_value("Account", self.closing_account_head, "root_type")
if closing_account_type not in ["Liability", "Equity"]:
frappe.throw(_("Closing Account {0} must be of type Liability / Equity")
.format(self.closing_account_head))
account_currency = get_account_currency(self.closing_account_head)
company_currency = frappe.db.get_value("Company", self.company, "default_currency")
if account_currency != company_currency:
frappe.throw(_("Currency of the Closing Account must be {0}").format(company_currency))
def validate_posting_date(self):
from erpnext.accounts.utils import get_fiscal_year, validate_fiscal_year
validate_fiscal_year(self.posting_date, self.fiscal_year, label=_("Posting Date"), doc=self)
self.year_start_date = get_fiscal_year(self.posting_date, self.fiscal_year)[1]
pce = frappe.db.sql("""select name from `tabPeriod Closing Voucher`
where posting_date > %s and fiscal_year = %s and docstatus = 1""",
(self.posting_date, self.fiscal_year))
if pce and pce[0][0]:
frappe.throw(_("Another Period Closing Entry {0} has been made after {1}")
.format(pce[0][0], self.posting_date))
def make_gl_entries(self):
gl_entries = []
net_pl_balance = 0
pl_accounts = self.get_pl_balances()
for acc in pl_accounts:
if flt(acc.balance_in_company_currency):
gl_entries.append(self.get_gl_dict({
"account": acc.account,
"account_currency": acc.account_currency,
"debit_in_account_currency": abs(flt(acc.balance_in_account_currency)) \
if flt(acc.balance_in_account_currency) < 0 else 0,
"debit": abs(flt(acc.balance_in_company_currency)) \
if flt(acc.balance_in_company_currency) < 0 else 0,
"credit_in_account_currency": abs(flt(acc.balance_in_account_currency)) \
if flt(acc.balance_in_account_currency) > 0 else 0,
"credit": abs(flt(acc.balance_in_company_currency)) \
if flt(acc.balance_in_company_currency) > 0 else 0
}))
net_pl_balance += flt(acc.balance_in_company_currency)
if net_pl_balance:
gl_entries.append(self.get_gl_dict({
"account": self.closing_account_head,
"debit_in_account_currency": abs(net_pl_balance) if net_pl_balance > 0 else 0,
"debit": abs(net_pl_balance) if net_pl_balance > 0 else 0,
"credit_in_account_currency": abs(net_pl_balance) if net_pl_balance < 0 else 0,
"credit": abs(net_pl_balance) if net_pl_balance < 0 else 0
}))
from erpnext.accounts.general_ledger import make_gl_entries
make_gl_entries(gl_entries)
def get_pl_balances(self):
"""Get balance for pl accounts"""
return frappe.db.sql("""
select
t1.account, t2.account_currency,
sum(t1.debit_in_account_currency) - sum(t1.credit_in_account_currency) as balance_in_account_currency,
sum(t1.debit) - sum(t1.credit) as balance_in_company_currency
from `tabGL Entry` t1, `tabAccount` t2
where t1.account = t2.name and t2.report_type = 'Profit and Loss'
and t2.docstatus < 2 and t2.company = %s
and t1.posting_date between %s and %s
group by t1.account
""", (self.company, self.get("year_start_date"), self.posting_date), as_dict=1)
|
obi-two/Rebelion
|
refs/heads/master
|
data/scripts/templates/object/tangible/furniture/city/shared_streetlamp_large_01.py
|
2
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/furniture/city/shared_streetlamp_large_01.iff"
result.attribute_template_id = 6
result.stfName("frn_n","streetlamp")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
mauriciofierrom/testing
|
refs/heads/master
|
iaen_curriculum_ws.py
|
1
|
# -*- coding: utf-8 -*-
from suds.client import Client
from suds.wsse import *
from suds.sax.element import Element
from suds.sax.attribute import Attribute
from suds.xsd.sxbasic import Import
class IaenCurriculumWs:
"""
:module: IaenCurriculum
"""
def __init__(self, usuario=None):
# Usuario para consultar el servicio de permiso
self.ws_user = '1103635445'
# Url del servicio de permiso
self.auth_client = Client(url='https://www.bsg.gob.ec/sw/STI/BSGSW08_Acceder_BSG?wsdl')
# El tipo de Dato validarPermisoPeticion
self.request = self.auth_client.factory.create('validarPermisoPeticion')
def find_identification_info(self, identification):
"""
Obtiene los datos personales de una persona consumiendo el servicio web
del Registro Civil.
:param identification: La cédula de la persona de la que se consultarán los datos.
:type identification: str.
:returns: **dictionary** -- Diccionario con la información útil para el sistema Refactory
>>> data[] = {"address_1", "house_number", "state_residency", "city_residency", "parish_residency",
"state_birth", "city_birth", "parish_birth", "civil_status", "birth_date", "gender", "nationality", "name"}
:raises: AttributeError, KeyError
"""
response = self.get_authorization('https://www.bsg.gob.ec/sw/RC/BSGSW01_Consultar_Cedula?wsdl')
wss_header = self.generate_header_authentication(response)
client = Client(url='https://www.bsg.gob.ec/sw/RC/BSGSW01_Consultar_Cedula?wsdl')
client.set_options(soapheaders=wss_header)
client_response = client.service.BusquedaPorCedula(identification, 'testroot', 'Sti1DigS21')
try: # and not client_response["Mensaje"]
data = {
"address_1": client_response['CalleDomicilio'],
"house_number": client_response['NumeroDomicilio'],
#"state_residency": client_response['Domicilio'].split('/')[0].capitalize(),
#"city_residency": client_response['Domicilio'].split('/')[1].capitalize(),
#"parish_residency": client_response['Domicilio'].split('/')[2].capitalize(),
#"state_birth": str(client_response['LugarNacimiento'].split('/')[0]).capitalize(),
#"city_birth": str(client_response['LugarNacimiento'].split('/')[1]).capitalize(),
#"parish_birth": str(client_response['LugarNacimiento'].split('/')[2]).capitalize(),
"civil_status": str(client_response['EstadoCivil']).capitalize(),
"birth_date": str(client_response['FechaNacimiento']),
"gender": str(client_response['Genero']).capitalize(),
"nationality": client_response['Nacionalidad'],
"name": client_response['Nombre']
}
if client_response['LugarNacimiento'].count('/')<2:
data['parish_birth'] = ""
data['state_birth'] = ""
data['city_birth'] = ""
else:
data['parish_residency'] = client_response['Domicilio'].split('/')[2].capitalize()
data['state_birth'] = client_response['LugarNacimiento'].split('/')[0].capitalize()
data['city_birth'] = client_response['LugarNacimiento'].split('/')[1].capitalize()
if client_response['Domicilio'].count('/')<2:
data["state_residency"] = ""
data["city_residency"] = ""
data["parish_residency"] = ""
else:
data["state_residency"] = client_response['Domicilio'].split('/')[0].capitalize(),
data["city_residency"] = client_response['Domicilio'].split('/')[1].capitalize(),
data["parish_residency"] = client_response['Domicilio'].split('/')[2].capitalize(),
except AttributeError:
data = {}
except KeyError:
data = {}
return data
def find_instruction_info(self, identification):
"""
Obtiene la información académica de una persona consumiendo el web service
de la Senescyt.
>>>data[] = {"level", "institution_name", "title_name", "register_number", "register_date"}
"""
response = self.get_authorization('https://www.bsg.gob.ec/sw/SENESCYT/BSGSW01_Consultar_Titulos?wsdl')
wss_header = self.generate_header_authentication(response)
client = Client(url='https://www.bsg.gob.ec/sw/SENESCYT/BSGSW01_Consultar_Titulos?wsdl')
client.set_options(soapheaders=wss_header)
client_response = client.service.consultaTitulo(identification)
data = {}
if client_response:
index = 0
for title_level in client_response['niveltitulos']:
for title in title_level.titulo:
title_data = {index:{
"level": title_level["nivel"].encode('UTF-8'),
"institution_name": title["ies"].encode('UTF-8'),
"title_name": title["nombreTitulo"].encode('UTF-8'),
"register_number": title["numeroRegistro"].encode('UTF-8'),
"register_date": title["fechaRegistro"].encode('UTF-8')
}}
data.update(title_data)
index+=1
return data
def find_disability_info(self, identification):
"""
Obtiene la información de discapacidad de una persona consumiendo el web service del
Ministerio de Salud Pública.
Keyword Arguments:
identification -- La cédula de la persona de la que se consultará los datos.
Returns:
data -- Diccionario con los datos relevantes al sistema.
data[] = {"conadis_id", "type", "degree"}
"""
response = self.get_authorization('https://www.bsg.gob.ec/sw/MSP/BSGSW01_Consultar_Discapacidad?wsdl')
wss_header = self.generate_header_authentication(response)
client = Client(url='https://www.bsg.gob.ec/sw/MSP/BSGSW01_Consultar_Discapacidad?wsdl')
client.set_options(soapheaders=wss_header)
client_response = client.service.BuscarPersonaConDiscapacidad(identification,'WS-SNAP','StiDig02')
data = {}
try: # and not client_response["Mensaje"]
data = {
"conadis_id": str(client_response['CodigoConadis']),
"type": str(client_response['DeficienciaPredomina']),
"degree": str(client_response['GradoDiscapacidad'])
}
except AttributeError:
data = {}
except KeyError:
data = {}
#print data
return data
def get_authorization(self, url):
"""
Obtiene los datos para la autorización de consumo de los servicios web.
Keyword Arguments:
url -- La url del servicio web para el que se requiere la autorización
Returns:
R
"""
self.request.Cedula = self.ws_user
self.request.Urlsw = url
return self.auth_client.service.ValidarPermiso(self.request)
def generate_header_authentication(self, response):
"""
Se construye la estructura XML para la autorización WS-Security que
luego se insertará en el header de la petición SOAP.
Keyword Arguments:
response --
"""
wss = ('wss', 'http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd')
wsu = ('wsu', 'http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd')
#print response
username_token = Element('UsernameToken', ns=wss).setText('')
username = Element('Username', ns=wss).setText(self.ws_user)
nonce = Element('Nonce', ns=wss).setText(response['Nonce'])
nonce.append(Attribute('EncodingType', 'http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-soap-message-security-1.0#Base64Binary'))
password = Element('Password', ns = wss).setText(response['Digest'])
password.append(Attribute('Type', 'http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-username-token-profile-1.0#PasswordDigest'))
create = Element('Created', ns = wsu).setText(response['Fecha'])
created = Element('Created', ns = wsu).setText(response['Fecha'])
expires = Element('Expires', ns = wsu).setText(response['FechaF'])
timestamp = Element('Timestamp', ns=wsu).setText('')
timestamp.append(Attribute('wsu:Id', 'Timestamp-2'))
username_token.insert(username)
username_token.insert(password)
username_token.insert(nonce)
username_token.insert(create)
timestamp.insert(expires)
timestamp.insert(created)
wss_element = Element('Security', ns=wss).insert(timestamp)
wss_element.insert(username_token)
return wss_element
|
marqueedev/django
|
refs/heads/master
|
django/core/files/__init__.py
|
839
|
from django.core.files.base import File
__all__ = ['File']
|
rob356/SickRage
|
refs/heads/master
|
lib/futures/__init__.py
|
124
|
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Execute computations asynchronously using threads or processes."""
import warnings
from concurrent.futures import (FIRST_COMPLETED,
FIRST_EXCEPTION,
ALL_COMPLETED,
CancelledError,
TimeoutError,
Future,
Executor,
wait,
as_completed,
ProcessPoolExecutor,
ThreadPoolExecutor)
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
warnings.warn('The futures package has been deprecated. '
'Use the concurrent.futures package instead.',
DeprecationWarning)
|
robertmattmueller/sdac-compiler
|
refs/heads/master
|
sympy/polys/tests/test_densetools.py
|
24
|
"""Tests for dense recursive polynomials' tools. """
from sympy.polys.densebasic import (
dup_LC, dmp_LC, dup_normal, dmp_normal,
dup_from_raw_dict, dmp_from_dict,
dmp_convert, dmp_swap, dmp_one_p,
)
from sympy.polys.densearith import (
dup_add, dup_mul, dup_exquo,
dmp_neg, dmp_sub, dmp_mul_ground, dmp_mul, dmp_sqr,
)
from sympy.polys.densetools import (
dup_clear_denoms, dmp_clear_denoms,
dup_integrate, dmp_integrate, dmp_integrate_in,
dup_diff, dmp_diff, dmp_diff_in,
dup_eval, dmp_eval, dmp_eval_in,
dmp_eval_tail, dmp_diff_eval_in,
dup_trunc, dmp_trunc, dmp_ground_trunc,
dup_monic, dmp_ground_monic,
dup_content, dmp_ground_content,
dup_primitive, dmp_ground_primitive,
dup_extract, dmp_ground_extract,
dup_real_imag,
dup_mirror, dup_scale, dup_shift,
dup_transform,
dup_compose, dmp_compose,
dup_decompose,
dmp_lift,
dup_sign_variations,
dup_revert, dmp_revert,
)
from sympy.polys.polyclasses import DMP, ANP
from sympy.polys.polyerrors import (
MultivariatePolynomialError,
ExactQuotientFailed,
NotReversible,
DomainError,
)
from sympy.polys.specialpolys import (
f_polys,
dmp_fateman_poly_F_1,
dmp_fateman_poly_F_2,
dmp_fateman_poly_F_3,
)
from sympy.polys.domains import FF, ZZ, QQ, EX
from sympy.polys.rings import ring
from sympy import S, I, sin
from sympy.core.compatibility import long
from sympy.abc import x
from sympy.utilities.pytest import raises
f_0, f_1, f_2, f_3, f_4, f_5, f_6 = [ f.to_dense() for f in f_polys() ]
def test_dup_integrate():
assert dup_integrate([], 1, QQ) == []
assert dup_integrate([], 2, QQ) == []
assert dup_integrate([QQ(1)], 1, QQ) == [QQ(1), QQ(0)]
assert dup_integrate([QQ(1)], 2, QQ) == [QQ(1, 2), QQ(0), QQ(0)]
assert dup_integrate([QQ(1), QQ(2), QQ(3)], 0, QQ) == \
[QQ(1), QQ(2), QQ(3)]
assert dup_integrate([QQ(1), QQ(2), QQ(3)], 1, QQ) == \
[QQ(1, 3), QQ(1), QQ(3), QQ(0)]
assert dup_integrate([QQ(1), QQ(2), QQ(3)], 2, QQ) == \
[QQ(1, 12), QQ(1, 3), QQ(3, 2), QQ(0), QQ(0)]
assert dup_integrate([QQ(1), QQ(2), QQ(3)], 3, QQ) == \
[QQ(1, 60), QQ(1, 12), QQ(1, 2), QQ(0), QQ(0), QQ(0)]
assert dup_integrate(dup_from_raw_dict({29: QQ(17)}, QQ), 3, QQ) == \
dup_from_raw_dict({32: QQ(17, 29760)}, QQ)
assert dup_integrate(dup_from_raw_dict({29: QQ(17), 5: QQ(1, 2)}, QQ), 3, QQ) == \
dup_from_raw_dict({32: QQ(17, 29760), 8: QQ(1, 672)}, QQ)
def test_dmp_integrate():
assert dmp_integrate([[[]]], 1, 2, QQ) == [[[]]]
assert dmp_integrate([[[]]], 2, 2, QQ) == [[[]]]
assert dmp_integrate([[[QQ(1)]]], 1, 2, QQ) == [[[QQ(1)]], [[]]]
assert dmp_integrate([[[QQ(1)]]], 2, 2, QQ) == [[[QQ(1, 2)]], [[]], [[]]]
assert dmp_integrate([[QQ(1)], [QQ(2)], [QQ(3)]], 0, 1, QQ) == \
[[QQ(1)], [QQ(2)], [QQ(3)]]
assert dmp_integrate([[QQ(1)], [QQ(2)], [QQ(3)]], 1, 1, QQ) == \
[[QQ(1, 3)], [QQ(1)], [QQ(3)], []]
assert dmp_integrate([[QQ(1)], [QQ(2)], [QQ(3)]], 2, 1, QQ) == \
[[QQ(1, 12)], [QQ(1, 3)], [QQ(3, 2)], [], []]
assert dmp_integrate([[QQ(1)], [QQ(2)], [QQ(3)]], 3, 1, QQ) == \
[[QQ(1, 60)], [QQ(1, 12)], [QQ(1, 2)], [], [], []]
def test_dmp_integrate_in():
f = dmp_convert(f_6, 3, ZZ, QQ)
assert dmp_integrate_in(f, 2, 1, 3, QQ) == \
dmp_swap(
dmp_integrate(dmp_swap(f, 0, 1, 3, QQ), 2, 3, QQ), 0, 1, 3, QQ)
assert dmp_integrate_in(f, 3, 1, 3, QQ) == \
dmp_swap(
dmp_integrate(dmp_swap(f, 0, 1, 3, QQ), 3, 3, QQ), 0, 1, 3, QQ)
assert dmp_integrate_in(f, 2, 2, 3, QQ) == \
dmp_swap(
dmp_integrate(dmp_swap(f, 0, 2, 3, QQ), 2, 3, QQ), 0, 2, 3, QQ)
assert dmp_integrate_in(f, 3, 2, 3, QQ) == \
dmp_swap(
dmp_integrate(dmp_swap(f, 0, 2, 3, QQ), 3, 3, QQ), 0, 2, 3, QQ)
def test_dup_diff():
assert dup_diff([], 1, ZZ) == []
assert dup_diff([7], 1, ZZ) == []
assert dup_diff([2, 7], 1, ZZ) == [2]
assert dup_diff([1, 2, 1], 1, ZZ) == [2, 2]
assert dup_diff([1, 2, 3, 4], 1, ZZ) == [3, 4, 3]
assert dup_diff([1, -1, 0, 0, 2], 1, ZZ) == [4, -3, 0, 0]
f = dup_normal([17, 34, 56, -345, 23, 76, 0, 0, 12, 3, 7], ZZ)
assert dup_diff(f, 0, ZZ) == f
assert dup_diff(f, 1, ZZ) == dup_diff(f, 1, ZZ)
assert dup_diff(f, 2, ZZ) == dup_diff(dup_diff(f, 1, ZZ), 1, ZZ)
assert dup_diff(
f, 3, ZZ) == dup_diff(dup_diff(dup_diff(f, 1, ZZ), 1, ZZ), 1, ZZ)
K = FF(3)
f = dup_normal([17, 34, 56, -345, 23, 76, 0, 0, 12, 3, 7], K)
assert dup_diff(f, 1, K) == dup_normal([2, 0, 1, 0, 0, 2, 0, 0, 0, 0], K)
assert dup_diff(f, 2, K) == dup_normal([1, 0, 0, 2, 0, 0, 0], K)
assert dup_diff(f, 3, K) == dup_normal([], K)
assert dup_diff(f, 0, K) == f
assert dup_diff(f, 1, K) == dup_diff(f, 1, K)
assert dup_diff(f, 2, K) == dup_diff(dup_diff(f, 1, K), 1, K)
assert dup_diff(
f, 3, K) == dup_diff(dup_diff(dup_diff(f, 1, K), 1, K), 1, K)
def test_dmp_diff():
assert dmp_diff([], 1, 0, ZZ) == []
assert dmp_diff([[]], 1, 1, ZZ) == [[]]
assert dmp_diff([[[]]], 1, 2, ZZ) == [[[]]]
assert dmp_diff([[[1], [2]]], 1, 2, ZZ) == [[[]]]
assert dmp_diff([[[1]], [[]]], 1, 2, ZZ) == [[[1]]]
assert dmp_diff([[[3]], [[1]], [[]]], 1, 2, ZZ) == [[[6]], [[1]]]
assert dmp_diff([1, -1, 0, 0, 2], 1, 0, ZZ) == \
dup_diff([1, -1, 0, 0, 2], 1, ZZ)
assert dmp_diff(f_6, 0, 3, ZZ) == f_6
assert dmp_diff(f_6, 1, 3, ZZ) == dmp_diff(f_6, 1, 3, ZZ)
assert dmp_diff(
f_6, 2, 3, ZZ) == dmp_diff(dmp_diff(f_6, 1, 3, ZZ), 1, 3, ZZ)
assert dmp_diff(f_6, 3, 3, ZZ) == dmp_diff(
dmp_diff(dmp_diff(f_6, 1, 3, ZZ), 1, 3, ZZ), 1, 3, ZZ)
K = FF(23)
F_6 = dmp_normal(f_6, 3, K)
assert dmp_diff(F_6, 0, 3, K) == F_6
assert dmp_diff(F_6, 1, 3, K) == dmp_diff(F_6, 1, 3, K)
assert dmp_diff(F_6, 2, 3, K) == dmp_diff(dmp_diff(F_6, 1, 3, K), 1, 3, K)
assert dmp_diff(F_6, 3, 3, K) == dmp_diff(
dmp_diff(dmp_diff(F_6, 1, 3, K), 1, 3, K), 1, 3, K)
def test_dmp_diff_in():
assert dmp_diff_in(f_6, 2, 1, 3, ZZ) == \
dmp_swap(dmp_diff(dmp_swap(f_6, 0, 1, 3, ZZ), 2, 3, ZZ), 0, 1, 3, ZZ)
assert dmp_diff_in(f_6, 3, 1, 3, ZZ) == \
dmp_swap(dmp_diff(dmp_swap(f_6, 0, 1, 3, ZZ), 3, 3, ZZ), 0, 1, 3, ZZ)
assert dmp_diff_in(f_6, 2, 2, 3, ZZ) == \
dmp_swap(dmp_diff(dmp_swap(f_6, 0, 2, 3, ZZ), 2, 3, ZZ), 0, 2, 3, ZZ)
assert dmp_diff_in(f_6, 3, 2, 3, ZZ) == \
dmp_swap(dmp_diff(dmp_swap(f_6, 0, 2, 3, ZZ), 3, 3, ZZ), 0, 2, 3, ZZ)
def test_dup_eval():
assert dup_eval([], 7, ZZ) == 0
assert dup_eval([1, 2], 0, ZZ) == 2
assert dup_eval([1, 2, 3], 7, ZZ) == 66
def test_dmp_eval():
assert dmp_eval([], 3, 0, ZZ) == 0
assert dmp_eval([[]], 3, 1, ZZ) == []
assert dmp_eval([[[]]], 3, 2, ZZ) == [[]]
assert dmp_eval([[1, 2]], 0, 1, ZZ) == [1, 2]
assert dmp_eval([[[1]]], 3, 2, ZZ) == [[1]]
assert dmp_eval([[[1, 2]]], 3, 2, ZZ) == [[1, 2]]
assert dmp_eval([[3, 2], [1, 2]], 3, 1, ZZ) == [10, 8]
assert dmp_eval([[[3, 2]], [[1, 2]]], 3, 2, ZZ) == [[10, 8]]
def test_dmp_eval_in():
assert dmp_eval_in(
f_6, -2, 1, 3, ZZ) == dmp_eval(dmp_swap(f_6, 0, 1, 3, ZZ), -2, 3, ZZ)
assert dmp_eval_in(
f_6, 7, 1, 3, ZZ) == dmp_eval(dmp_swap(f_6, 0, 1, 3, ZZ), 7, 3, ZZ)
assert dmp_eval_in(f_6, -2, 2, 3, ZZ) == dmp_swap(
dmp_eval(dmp_swap(f_6, 0, 2, 3, ZZ), -2, 3, ZZ), 0, 1, 2, ZZ)
assert dmp_eval_in(f_6, 7, 2, 3, ZZ) == dmp_swap(
dmp_eval(dmp_swap(f_6, 0, 2, 3, ZZ), 7, 3, ZZ), 0, 1, 2, ZZ)
f = [[[long(45)]], [[]], [[]], [[long(-9)], [-1], [], [long(3), long(0), long(10), long(0)]]]
assert dmp_eval_in(f, -2, 2, 2, ZZ) == \
[[45], [], [], [-9, -1, 0, -44]]
def test_dmp_eval_tail():
assert dmp_eval_tail([[]], [1], 1, ZZ) == []
assert dmp_eval_tail([[[]]], [1], 2, ZZ) == [[]]
assert dmp_eval_tail([[[]]], [1, 2], 2, ZZ) == []
assert dmp_eval_tail(f_0, [], 2, ZZ) == f_0
assert dmp_eval_tail(f_0, [1, -17, 8], 2, ZZ) == 84496
assert dmp_eval_tail(f_0, [-17, 8], 2, ZZ) == [-1409, 3, 85902]
assert dmp_eval_tail(f_0, [8], 2, ZZ) == [[83, 2], [3], [302, 81, 1]]
assert dmp_eval_tail(f_1, [-17, 8], 2, ZZ) == [-136, 15699, 9166, -27144]
assert dmp_eval_tail(
f_2, [-12, 3], 2, ZZ) == [-1377, 0, -702, -1224, 0, -624]
assert dmp_eval_tail(
f_3, [-12, 3], 2, ZZ) == [144, 82, -5181, -28872, -14868, -540]
assert dmp_eval_tail(
f_4, [25, -1], 2, ZZ) == [152587890625, 9765625, -59605407714843750,
-3839159765625, -1562475, 9536712644531250, 610349546750, -4, 24414375000, 1562520]
assert dmp_eval_tail(f_5, [25, -1], 2, ZZ) == [-1, -78, -2028, -17576]
assert dmp_eval_tail(f_6, [0, 2, 4], 3, ZZ) == [5040, 0, 0, 4480]
def test_dmp_diff_eval_in():
assert dmp_diff_eval_in(f_6, 2, 7, 1, 3, ZZ) == \
dmp_eval(dmp_diff(dmp_swap(f_6, 0, 1, 3, ZZ), 2, 3, ZZ), 7, 3, ZZ)
def test_dup_revert():
f = [-QQ(1, 720), QQ(0), QQ(1, 24), QQ(0), -QQ(1, 2), QQ(0), QQ(1)]
g = [QQ(61, 720), QQ(0), QQ(5, 24), QQ(0), QQ(1, 2), QQ(0), QQ(1)]
assert dup_revert(f, 8, QQ) == g
raises(NotReversible, lambda: dup_revert([QQ(1), QQ(0)], 3, QQ))
def test_dmp_revert():
f = [-QQ(1, 720), QQ(0), QQ(1, 24), QQ(0), -QQ(1, 2), QQ(0), QQ(1)]
g = [QQ(61, 720), QQ(0), QQ(5, 24), QQ(0), QQ(1, 2), QQ(0), QQ(1)]
assert dmp_revert(f, 8, 0, QQ) == g
raises(MultivariatePolynomialError, lambda: dmp_revert([[1]], 2, 1, QQ))
def test_dup_trunc():
assert dup_trunc([1, 2, 3, 4, 5, 6], ZZ(3), ZZ) == [1, -1, 0, 1, -1, 0]
assert dup_trunc([6, 5, 4, 3, 2, 1], ZZ(3), ZZ) == [-1, 1, 0, -1, 1]
def test_dmp_trunc():
assert dmp_trunc([[]], [1, 2], 2, ZZ) == [[]]
assert dmp_trunc([[1, 2], [1, 4, 1], [1]], [1, 2], 1, ZZ) == [[-3], [1]]
def test_dmp_ground_trunc():
assert dmp_ground_trunc(f_0, ZZ(3), 2, ZZ) == \
dmp_normal(
[[[1, -1, 0], [-1]], [[]], [[1, -1, 0], [1, -1, 1], [1]]], 2, ZZ)
def test_dup_monic():
assert dup_monic([3, 6, 9], ZZ) == [1, 2, 3]
raises(ExactQuotientFailed, lambda: dup_monic([3, 4, 5], ZZ))
assert dup_monic([], QQ) == []
assert dup_monic([QQ(1)], QQ) == [QQ(1)]
assert dup_monic([QQ(7), QQ(1), QQ(21)], QQ) == [QQ(1), QQ(1, 7), QQ(3)]
def test_dmp_ground_monic():
assert dmp_ground_monic([[3], [6], [9]], 1, ZZ) == [[1], [2], [3]]
raises(
ExactQuotientFailed, lambda: dmp_ground_monic([[3], [4], [5]], 1, ZZ))
assert dmp_ground_monic([[]], 1, QQ) == [[]]
assert dmp_ground_monic([[QQ(1)]], 1, QQ) == [[QQ(1)]]
assert dmp_ground_monic(
[[QQ(7)], [QQ(1)], [QQ(21)]], 1, QQ) == [[QQ(1)], [QQ(1, 7)], [QQ(3)]]
def test_dup_content():
assert dup_content([], ZZ) == ZZ(0)
assert dup_content([1], ZZ) == ZZ(1)
assert dup_content([-1], ZZ) == ZZ(1)
assert dup_content([1, 1], ZZ) == ZZ(1)
assert dup_content([2, 2], ZZ) == ZZ(2)
assert dup_content([1, 2, 1], ZZ) == ZZ(1)
assert dup_content([2, 4, 2], ZZ) == ZZ(2)
assert dup_content([QQ(2, 3), QQ(4, 9)], QQ) == QQ(2, 9)
assert dup_content([QQ(2, 3), QQ(4, 5)], QQ) == QQ(2, 15)
def test_dmp_ground_content():
assert dmp_ground_content([[]], 1, ZZ) == ZZ(0)
assert dmp_ground_content([[]], 1, QQ) == QQ(0)
assert dmp_ground_content([[1]], 1, ZZ) == ZZ(1)
assert dmp_ground_content([[-1]], 1, ZZ) == ZZ(1)
assert dmp_ground_content([[1], [1]], 1, ZZ) == ZZ(1)
assert dmp_ground_content([[2], [2]], 1, ZZ) == ZZ(2)
assert dmp_ground_content([[1], [2], [1]], 1, ZZ) == ZZ(1)
assert dmp_ground_content([[2], [4], [2]], 1, ZZ) == ZZ(2)
assert dmp_ground_content([[QQ(2, 3)], [QQ(4, 9)]], 1, QQ) == QQ(2, 9)
assert dmp_ground_content([[QQ(2, 3)], [QQ(4, 5)]], 1, QQ) == QQ(2, 15)
assert dmp_ground_content(f_0, 2, ZZ) == ZZ(1)
assert dmp_ground_content(
dmp_mul_ground(f_0, ZZ(2), 2, ZZ), 2, ZZ) == ZZ(2)
assert dmp_ground_content(f_1, 2, ZZ) == ZZ(1)
assert dmp_ground_content(
dmp_mul_ground(f_1, ZZ(3), 2, ZZ), 2, ZZ) == ZZ(3)
assert dmp_ground_content(f_2, 2, ZZ) == ZZ(1)
assert dmp_ground_content(
dmp_mul_ground(f_2, ZZ(4), 2, ZZ), 2, ZZ) == ZZ(4)
assert dmp_ground_content(f_3, 2, ZZ) == ZZ(1)
assert dmp_ground_content(
dmp_mul_ground(f_3, ZZ(5), 2, ZZ), 2, ZZ) == ZZ(5)
assert dmp_ground_content(f_4, 2, ZZ) == ZZ(1)
assert dmp_ground_content(
dmp_mul_ground(f_4, ZZ(6), 2, ZZ), 2, ZZ) == ZZ(6)
assert dmp_ground_content(f_5, 2, ZZ) == ZZ(1)
assert dmp_ground_content(
dmp_mul_ground(f_5, ZZ(7), 2, ZZ), 2, ZZ) == ZZ(7)
assert dmp_ground_content(f_6, 3, ZZ) == ZZ(1)
assert dmp_ground_content(
dmp_mul_ground(f_6, ZZ(8), 3, ZZ), 3, ZZ) == ZZ(8)
def test_dup_primitive():
assert dup_primitive([], ZZ) == (ZZ(0), [])
assert dup_primitive([ZZ(1)], ZZ) == (ZZ(1), [ZZ(1)])
assert dup_primitive([ZZ(1), ZZ(1)], ZZ) == (ZZ(1), [ZZ(1), ZZ(1)])
assert dup_primitive([ZZ(2), ZZ(2)], ZZ) == (ZZ(2), [ZZ(1), ZZ(1)])
assert dup_primitive(
[ZZ(1), ZZ(2), ZZ(1)], ZZ) == (ZZ(1), [ZZ(1), ZZ(2), ZZ(1)])
assert dup_primitive(
[ZZ(2), ZZ(4), ZZ(2)], ZZ) == (ZZ(2), [ZZ(1), ZZ(2), ZZ(1)])
assert dup_primitive([], QQ) == (QQ(0), [])
assert dup_primitive([QQ(1)], QQ) == (QQ(1), [QQ(1)])
assert dup_primitive([QQ(1), QQ(1)], QQ) == (QQ(1), [QQ(1), QQ(1)])
assert dup_primitive([QQ(2), QQ(2)], QQ) == (QQ(2), [QQ(1), QQ(1)])
assert dup_primitive(
[QQ(1), QQ(2), QQ(1)], QQ) == (QQ(1), [QQ(1), QQ(2), QQ(1)])
assert dup_primitive(
[QQ(2), QQ(4), QQ(2)], QQ) == (QQ(2), [QQ(1), QQ(2), QQ(1)])
assert dup_primitive(
[QQ(2, 3), QQ(4, 9)], QQ) == (QQ(2, 9), [QQ(3), QQ(2)])
assert dup_primitive(
[QQ(2, 3), QQ(4, 5)], QQ) == (QQ(2, 15), [QQ(5), QQ(6)])
def test_dmp_ground_primitive():
assert dmp_ground_primitive([[]], 1, ZZ) == (ZZ(0), [[]])
assert dmp_ground_primitive(f_0, 2, ZZ) == (ZZ(1), f_0)
assert dmp_ground_primitive(
dmp_mul_ground(f_0, ZZ(2), 2, ZZ), 2, ZZ) == (ZZ(2), f_0)
assert dmp_ground_primitive(f_1, 2, ZZ) == (ZZ(1), f_1)
assert dmp_ground_primitive(
dmp_mul_ground(f_1, ZZ(3), 2, ZZ), 2, ZZ) == (ZZ(3), f_1)
assert dmp_ground_primitive(f_2, 2, ZZ) == (ZZ(1), f_2)
assert dmp_ground_primitive(
dmp_mul_ground(f_2, ZZ(4), 2, ZZ), 2, ZZ) == (ZZ(4), f_2)
assert dmp_ground_primitive(f_3, 2, ZZ) == (ZZ(1), f_3)
assert dmp_ground_primitive(
dmp_mul_ground(f_3, ZZ(5), 2, ZZ), 2, ZZ) == (ZZ(5), f_3)
assert dmp_ground_primitive(f_4, 2, ZZ) == (ZZ(1), f_4)
assert dmp_ground_primitive(
dmp_mul_ground(f_4, ZZ(6), 2, ZZ), 2, ZZ) == (ZZ(6), f_4)
assert dmp_ground_primitive(f_5, 2, ZZ) == (ZZ(1), f_5)
assert dmp_ground_primitive(
dmp_mul_ground(f_5, ZZ(7), 2, ZZ), 2, ZZ) == (ZZ(7), f_5)
assert dmp_ground_primitive(f_6, 3, ZZ) == (ZZ(1), f_6)
assert dmp_ground_primitive(
dmp_mul_ground(f_6, ZZ(8), 3, ZZ), 3, ZZ) == (ZZ(8), f_6)
assert dmp_ground_primitive([[ZZ(2)]], 1, ZZ) == (ZZ(2), [[ZZ(1)]])
assert dmp_ground_primitive([[QQ(2)]], 1, QQ) == (QQ(2), [[QQ(1)]])
assert dmp_ground_primitive(
[[QQ(2, 3)], [QQ(4, 9)]], 1, QQ) == (QQ(2, 9), [[QQ(3)], [QQ(2)]])
assert dmp_ground_primitive(
[[QQ(2, 3)], [QQ(4, 5)]], 1, QQ) == (QQ(2, 15), [[QQ(5)], [QQ(6)]])
def test_dup_extract():
f = dup_normal([2930944, 0, 2198208, 0, 549552, 0, 45796], ZZ)
g = dup_normal([17585664, 0, 8792832, 0, 1099104, 0], ZZ)
F = dup_normal([64, 0, 48, 0, 12, 0, 1], ZZ)
G = dup_normal([384, 0, 192, 0, 24, 0], ZZ)
assert dup_extract(f, g, ZZ) == (45796, F, G)
def test_dmp_ground_extract():
f = dmp_normal(
[[2930944], [], [2198208], [], [549552], [], [45796]], 1, ZZ)
g = dmp_normal([[17585664], [], [8792832], [], [1099104], []], 1, ZZ)
F = dmp_normal([[64], [], [48], [], [12], [], [1]], 1, ZZ)
G = dmp_normal([[384], [], [192], [], [24], []], 1, ZZ)
assert dmp_ground_extract(f, g, 1, ZZ) == (45796, F, G)
def test_dup_real_imag():
assert dup_real_imag([], ZZ) == ([[]], [[]])
assert dup_real_imag([1], ZZ) == ([[1]], [[]])
assert dup_real_imag([1, 1], ZZ) == ([[1], [1]], [[1, 0]])
assert dup_real_imag([1, 2], ZZ) == ([[1], [2]], [[1, 0]])
assert dup_real_imag(
[1, 2, 3], ZZ) == ([[1], [2], [-1, 0, 3]], [[2, 0], [2, 0]])
raises(DomainError, lambda: dup_real_imag([EX(1), EX(2)], EX))
def test_dup_mirror():
assert dup_mirror([], ZZ) == []
assert dup_mirror([1], ZZ) == [1]
assert dup_mirror([1, 2, 3, 4, 5], ZZ) == [1, -2, 3, -4, 5]
assert dup_mirror([1, 2, 3, 4, 5, 6], ZZ) == [-1, 2, -3, 4, -5, 6]
def test_dup_scale():
assert dup_scale([], -1, ZZ) == []
assert dup_scale([1], -1, ZZ) == [1]
assert dup_scale([1, 2, 3, 4, 5], -1, ZZ) == [1, -2, 3, -4, 5]
assert dup_scale([1, 2, 3, 4, 5], -7, ZZ) == [2401, -686, 147, -28, 5]
def test_dup_shift():
assert dup_shift([], 1, ZZ) == []
assert dup_shift([1], 1, ZZ) == [1]
assert dup_shift([1, 2, 3, 4, 5], 1, ZZ) == [1, 6, 15, 20, 15]
assert dup_shift([1, 2, 3, 4, 5], 7, ZZ) == [1, 30, 339, 1712, 3267]
def test_dup_transform():
assert dup_transform([], [], [1, 1], ZZ) == []
assert dup_transform([], [1], [1, 1], ZZ) == []
assert dup_transform([], [1, 2], [1, 1], ZZ) == []
assert dup_transform([6, -5, 4, -3, 17], [1, -3, 4], [2, -3], ZZ) == \
[6, -82, 541, -2205, 6277, -12723, 17191, -13603, 4773]
def test_dup_compose():
assert dup_compose([], [], ZZ) == []
assert dup_compose([], [1], ZZ) == []
assert dup_compose([], [1, 2], ZZ) == []
assert dup_compose([1], [], ZZ) == [1]
assert dup_compose([1, 2, 0], [], ZZ) == []
assert dup_compose([1, 2, 1], [], ZZ) == [1]
assert dup_compose([1, 2, 1], [1], ZZ) == [4]
assert dup_compose([1, 2, 1], [7], ZZ) == [64]
assert dup_compose([1, 2, 1], [1, -1], ZZ) == [1, 0, 0]
assert dup_compose([1, 2, 1], [1, 1], ZZ) == [1, 4, 4]
assert dup_compose([1, 2, 1], [1, 2, 1], ZZ) == [1, 4, 8, 8, 4]
def test_dmp_compose():
assert dmp_compose([1, 2, 1], [1, 2, 1], 0, ZZ) == [1, 4, 8, 8, 4]
assert dmp_compose([[[]]], [[[]]], 2, ZZ) == [[[]]]
assert dmp_compose([[[]]], [[[1]]], 2, ZZ) == [[[]]]
assert dmp_compose([[[]]], [[[1]], [[2]]], 2, ZZ) == [[[]]]
assert dmp_compose([[[1]]], [], 2, ZZ) == [[[1]]]
assert dmp_compose([[1], [2], [ ]], [[]], 1, ZZ) == [[]]
assert dmp_compose([[1], [2], [1]], [[]], 1, ZZ) == [[1]]
assert dmp_compose([[1], [2], [1]], [[1]], 1, ZZ) == [[4]]
assert dmp_compose([[1], [2], [1]], [[7]], 1, ZZ) == [[64]]
assert dmp_compose([[1], [2], [1]], [[1], [-1]], 1, ZZ) == [[1], [ ], [ ]]
assert dmp_compose([[1], [2], [1]], [[1], [ 1]], 1, ZZ) == [[1], [4], [4]]
assert dmp_compose(
[[1], [2], [1]], [[1], [2], [1]], 1, ZZ) == [[1], [4], [8], [8], [4]]
def test_dup_decompose():
assert dup_decompose([1], ZZ) == [[1]]
assert dup_decompose([1, 0], ZZ) == [[1, 0]]
assert dup_decompose([1, 0, 0, 0], ZZ) == [[1, 0, 0, 0]]
assert dup_decompose([1, 0, 0, 0, 0], ZZ) == [[1, 0, 0], [1, 0, 0]]
assert dup_decompose(
[1, 0, 0, 0, 0, 0, 0], ZZ) == [[1, 0, 0, 0], [1, 0, 0]]
assert dup_decompose([7, 0, 0, 0, 1], ZZ) == [[7, 0, 1], [1, 0, 0]]
assert dup_decompose([4, 0, 3, 0, 2], ZZ) == [[4, 3, 2], [1, 0, 0]]
f = [1, 0, 20, 0, 150, 0, 500, 0, 625, -2, 0, -10, 9]
assert dup_decompose(f, ZZ) == [[1, 0, 0, -2, 9], [1, 0, 5, 0]]
f = [2, 0, 40, 0, 300, 0, 1000, 0, 1250, -4, 0, -20, 18]
assert dup_decompose(f, ZZ) == [[2, 0, 0, -4, 18], [1, 0, 5, 0]]
f = [1, 0, 20, -8, 150, -120, 524, -600, 865, -1034, 600, -170, 29]
assert dup_decompose(f, ZZ) == [[1, -8, 24, -34, 29], [1, 0, 5, 0]]
R, t = ring("t", ZZ)
f = [6*t**2 - 42,
48*t**2 + 96,
144*t**2 + 648*t + 288,
624*t**2 + 864*t + 384,
108*t**3 + 312*t**2 + 432*t + 192]
assert dup_decompose(f, R.to_domain()) == [f]
def test_dmp_lift():
q = [QQ(1, 1), QQ(0, 1), QQ(1, 1)]
f = [ANP([QQ(1, 1)], q, QQ), ANP([], q, QQ), ANP([], q, QQ),
ANP([QQ(1, 1), QQ(0, 1)], q, QQ), ANP([QQ(17, 1), QQ(0, 1)], q, QQ)]
assert dmp_lift(f, 0, QQ.algebraic_field(I)) == \
[QQ(1), QQ(0), QQ(0), QQ(0), QQ(0), QQ(0), QQ(2), QQ(0), QQ(578),
QQ(0), QQ(0), QQ(0), QQ(1), QQ(0), QQ(-578), QQ(0), QQ(83521)]
raises(DomainError, lambda: dmp_lift([EX(1), EX(2)], 0, EX))
def test_dup_sign_variations():
assert dup_sign_variations([], ZZ) == 0
assert dup_sign_variations([1, 0], ZZ) == 0
assert dup_sign_variations([1, 0, 2], ZZ) == 0
assert dup_sign_variations([1, 0, 3, 0], ZZ) == 0
assert dup_sign_variations([1, 0, 4, 0, 5], ZZ) == 0
assert dup_sign_variations([-1, 0, 2], ZZ) == 1
assert dup_sign_variations([-1, 0, 3, 0], ZZ) == 1
assert dup_sign_variations([-1, 0, 4, 0, 5], ZZ) == 1
assert dup_sign_variations([-1, -4, -5], ZZ) == 0
assert dup_sign_variations([ 1, -4, -5], ZZ) == 1
assert dup_sign_variations([ 1, 4, -5], ZZ) == 1
assert dup_sign_variations([ 1, -4, 5], ZZ) == 2
assert dup_sign_variations([-1, 4, -5], ZZ) == 2
assert dup_sign_variations([-1, 4, 5], ZZ) == 1
assert dup_sign_variations([-1, -4, 5], ZZ) == 1
assert dup_sign_variations([ 1, 4, 5], ZZ) == 0
assert dup_sign_variations([-1, 0, -4, 0, -5], ZZ) == 0
assert dup_sign_variations([ 1, 0, -4, 0, -5], ZZ) == 1
assert dup_sign_variations([ 1, 0, 4, 0, -5], ZZ) == 1
assert dup_sign_variations([ 1, 0, -4, 0, 5], ZZ) == 2
assert dup_sign_variations([-1, 0, 4, 0, -5], ZZ) == 2
assert dup_sign_variations([-1, 0, 4, 0, 5], ZZ) == 1
assert dup_sign_variations([-1, 0, -4, 0, 5], ZZ) == 1
assert dup_sign_variations([ 1, 0, 4, 0, 5], ZZ) == 0
def test_dup_clear_denoms():
assert dup_clear_denoms([], QQ, ZZ) == (ZZ(1), [])
assert dup_clear_denoms([QQ(1)], QQ, ZZ) == (ZZ(1), [QQ(1)])
assert dup_clear_denoms([QQ(7)], QQ, ZZ) == (ZZ(1), [QQ(7)])
assert dup_clear_denoms([QQ(7, 3)], QQ) == (ZZ(3), [QQ(7)])
assert dup_clear_denoms([QQ(7, 3)], QQ, ZZ) == (ZZ(3), [QQ(7)])
assert dup_clear_denoms(
[QQ(3), QQ(1), QQ(0)], QQ, ZZ) == (ZZ(1), [QQ(3), QQ(1), QQ(0)])
assert dup_clear_denoms(
[QQ(1), QQ(1, 2), QQ(0)], QQ, ZZ) == (ZZ(2), [QQ(2), QQ(1), QQ(0)])
assert dup_clear_denoms([QQ(3), QQ(
1), QQ(0)], QQ, ZZ, convert=True) == (ZZ(1), [ZZ(3), ZZ(1), ZZ(0)])
assert dup_clear_denoms([QQ(1), QQ(
1, 2), QQ(0)], QQ, ZZ, convert=True) == (ZZ(2), [ZZ(2), ZZ(1), ZZ(0)])
assert dup_clear_denoms(
[EX(S(3)/2), EX(S(9)/4)], EX) == (EX(4), [EX(6), EX(9)])
assert dup_clear_denoms([EX(7)], EX) == (EX(1), [EX(7)])
assert dup_clear_denoms([EX(sin(x)/x), EX(0)], EX) == (EX(x), [EX(sin(x)), EX(0)])
def test_dmp_clear_denoms():
assert dmp_clear_denoms([[]], 1, QQ, ZZ) == (ZZ(1), [[]])
assert dmp_clear_denoms([[QQ(1)]], 1, QQ, ZZ) == (ZZ(1), [[QQ(1)]])
assert dmp_clear_denoms([[QQ(7)]], 1, QQ, ZZ) == (ZZ(1), [[QQ(7)]])
assert dmp_clear_denoms([[QQ(7, 3)]], 1, QQ) == (ZZ(3), [[QQ(7)]])
assert dmp_clear_denoms([[QQ(7, 3)]], 1, QQ, ZZ) == (ZZ(3), [[QQ(7)]])
assert dmp_clear_denoms(
[[QQ(3)], [QQ(1)], []], 1, QQ, ZZ) == (ZZ(1), [[QQ(3)], [QQ(1)], []])
assert dmp_clear_denoms([[QQ(
1)], [QQ(1, 2)], []], 1, QQ, ZZ) == (ZZ(2), [[QQ(2)], [QQ(1)], []])
assert dmp_clear_denoms([QQ(3), QQ(
1), QQ(0)], 0, QQ, ZZ, convert=True) == (ZZ(1), [ZZ(3), ZZ(1), ZZ(0)])
assert dmp_clear_denoms([QQ(1), QQ(1, 2), QQ(
0)], 0, QQ, ZZ, convert=True) == (ZZ(2), [ZZ(2), ZZ(1), ZZ(0)])
assert dmp_clear_denoms([[QQ(3)], [QQ(
1)], []], 1, QQ, ZZ, convert=True) == (ZZ(1), [[QQ(3)], [QQ(1)], []])
assert dmp_clear_denoms([[QQ(1)], [QQ(1, 2)], []], 1, QQ, ZZ,
convert=True) == (ZZ(2), [[QQ(2)], [QQ(1)], []])
assert dmp_clear_denoms(
[[EX(S(3)/2)], [EX(S(9)/4)]], 1, EX) == (EX(4), [[EX(6)], [EX(9)]])
assert dmp_clear_denoms([[EX(7)]], 1, EX) == (EX(1), [[EX(7)]])
assert dmp_clear_denoms([[EX(sin(x)/x), EX(0)]], 1, EX) == (EX(x), [[EX(sin(x)), EX(0)]])
|
simonsfoundation/CaImAn
|
refs/heads/master
|
use_cases/CaImAnpaper/create_movie_zebra.py
|
2
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Complete pipeline for online processing using OnACID.
@author: Andrea Giovannucci @agiovann and Eftychios Pnevmatikakis @epnev
Special thanks to Andreas Tolias and his lab at Baylor College of Medicine
for sharing their data used in this demo.
KERAS_BACKEND=tensorflow; CUDA_VISIBLE_DEVICES=-1; spyder
"""
import os
import sys
import numpy as np
try:
if __IPYTHON__:
print('Detected iPython')
# this is used for debugging purposes only. allows to reload classes when changed
get_ipython().magic('load_ext autoreload')
get_ipython().magic('autoreload 2')
except NameError:
pass
from time import time
import caiman as cm
from caiman.utils.visualization import view_patches_bar
from caiman.utils.utils import download_demo, load_object, save_object
import pylab as pl
import scipy
from caiman.motion_correction import motion_correct_iteration_fast
import cv2
from caiman.utils.visualization import plot_contours
import glob
from caiman.source_extraction.cnmf.online_cnmf import bare_initialization
from copy import deepcopy
from caiman.paths import caiman_datadir
#%%
try:
import sys
if 'pydevconsole' in sys.argv[0]:
raise Exception()
ID = sys.argv[1]
ID = str(np.int(ID)+1)
print('Processing ID:'+ str(ID))
ploton = False
save_results = True
save_init = True # flag for saving initialization object. Useful if you want to check OnACID with different parameters but same initialization
except:
ID = 11 # process plane 12
print('ID NOT PASSED')
ploton = False
save_results = False
save_init = False # flag for saving initialization object. Useful if you want to check OnACID with different parameters but same initialization
reload = True
plot_figures = True
base_folder = '/mnt/ceph/neuro/DataForPublications/DATA_PAPER_ELIFE/WEBSITE/'
#%%
decay_time = 1.5
gSig = (6,6)
rval_thr = 1
epochs = 1
fls = [os.path.join(base_folder,'Zebrafish/Plane' + str(ID) + '.stack.hdf5')];
K = 100
min_num_trial = 50
mmm = cm.load(fls,subindices = 0)
dims = mmm.shape
K = np.maximum(K,np.round(600/1602720*np.prod(mmm.shape)).astype(np.int))
min_num_trial = np.maximum(min_num_trial,np.round(200/1602720*np.prod(mmm.shape)).astype(np.int))
# your list of files should look something like this
print(fls)
print([K,min_num_trial])
# number of passes over the data
#%% Set up some parameters
# frame rate (Hz)
fr = 2
#fr = 15
# approximate length of transient event in seconds
#decay_time = 0.5
# expected half size of neurons
#gSig = (2.5, 2.5)
# order of AR indicator dynamics
p = 1
# minimum SNR for accepting new components
min_SNR = 2.5
# correlation threshold for new component inclusion
#rval_thr = 0.85
# spatial downsampling factor (increases speed but may lose some fine structure)
ds_factor = 2
# number of background components
gnb = 3
# recompute gSig if downsampling is involved
gSig = tuple((np.array(gSig) / ds_factor))#.astype('int'))
# flag for online motion correction
mot_corr = True
# maximum allowed shift during motion correction
max_shift = np.ceil(10. / ds_factor).astype('int')
# set up some additional supporting parameters needed for the algorithm (these are default values but change according to dataset characteristics)
# number of shapes to be updated each time (put this to a finite small value to increase speed)
max_comp_update_shape = np.inf
# number of files used for initialization
init_files = 1
# number of files used for online
online_files = len(fls) - 1
# number of frames for initialization (presumably from the first file)
initbatch = 200
# maximum number of expected components used for memory pre-allocation (exaggerate here)
expected_comps = 600
# initial number of components
# number of timesteps to consider when testing new neuron candidates
N_samples = np.ceil(fr * decay_time)
# exceptionality threshold
thresh_fitness_raw = scipy.special.log_ndtr(-min_SNR) * N_samples
# upper bound for number of frames in each file (used right below)
len_file = 1885#1885 1815
# total length of all files (if not known use a large number, then truncate at the end)
T1 = len(fls) * len_file * epochs
#%%
compute_corr = False
if compute_corr:
m = cm.load(fls)
mc = m.motion_correct(10,10)[0]
mp = (mc.computeDFF(3))
Cn = cv2.resize(mp[0].local_correlations(eight_neighbours=True, swap_dim=False),dims[::-1][:-1])
np.save(os.path.join(base_folder,'Zebrafish/results_analysis_online_Plane_CN_' + str(ID) + '.npy'), Cn)
else:
Cn = np.load(os.path.join(base_folder,'Zebrafish/results_analysis_online_Plane_CN_' + str(ID) + '.npy'))
#%%
big_mov = cm.load(fls[0])
mean_mov = big_mov.mean(0)
median_mov = np.median(big_mov,0)
#%%
pl.imshow(median_mov[::-1,::-1].T)
pl.colorbar()
#%% Initialize movie
# load only the first initbatch frames and possibly downsample them
if ds_factor > 1:
Y = cm.load(fls[0], subindices=slice(0, initbatch, None)).astype(
np.float32).resize(1. / ds_factor, 1. / ds_factor)
else:
Y = cm.load(fls[0], subindices=slice(
0, initbatch, None)).astype(np.float32)
if mot_corr: # perform motion correction on the first initbatch frames
mc = Y.motion_correct(max_shift, max_shift)
Y = mc[0].astype(np.float32)
borders = np.max(mc[1])
else:
Y = Y.astype(np.float32)
# minimum value of movie. Subtract it to make the data non-negative
img_min = Y.min()
Y -= img_min
img_norm = np.std(Y, axis=0)
# normalizing factor to equalize the FOV
img_norm += np.median(img_norm)
Y = Y / img_norm[None, :, :] # normalize data
_, d1, d2 = Y.shape
dims = (d1, d2) # dimensions of FOV
Yr = Y.to_2D().T # convert data into 2D array
Cn_init = Y.local_correlations(swap_dim=False) # compute correlation image
if ploton:
pl.imshow(Cn_init)
pl.title('Correlation Image on initial batch')
pl.colorbar()
#%% initialize OnACID with bare initialization
with np.load(os.path.join(base_folder,'Zebrafish/results_analysis_online_1EPOCH_gSig6_equalized_Plane_' + str(ID) + '.npz')) as ld:
locals().update(ld)
print(ld.keys())
Ab = Ab[()]
A, b = Ab[:, gnb:], Ab[:, :gnb].toarray()
C, f = Cf[gnb:Ab.shape[-1],:T1], Cf[:gnb, :T1]
noisyC = noisyC[:, :T1]
# m = cm.movie((A.dot(C)+b.dot(f))[:].reshape(list(dims)+[-1],order='F')).transpose([2,0,1])*img_norm[None,:,:]
m = cm.movie((A.dot(C))[:].reshape(list(dims)+[-1],order='F')).transpose([2,0,1])*img_norm[None,:,:]
mean_img = np.median(m,0)
#%%
recompute_CN = False
if recompute_CN:
m = cm.load(fls)
m = m.motion_correct(10,10)[0]
mp = (m.computeDFF(3))
Cn_ = mp[0].local_correlations(eight_neighbours=True, swap_dim=False)
Cn_ = cv2.resize(Cn_,dims[::-1])
else:
Cn_ = Cn
#%% FIGURE 7a TOP
if ploton:
crd = cm.utils.visualization.plot_contours(A, Cn_, thr=0.9, vmax = 0.75)
view_patches_bar(Yr, scipy.sparse.coo_matrix(A.tocsc()[:, :]), C[:, :], b, f,
dims[0], dims[1], YrA=noisyC[gnb:A.shape[-1]+gnb] - C, img=Cn_)
#%% START CLUSTER
if plot_figures:
try:
# cm.stop_server()
dview.terminate()
except:
print('No clusters to stop')
c, dview, n_processes = cm.cluster.setup_cluster(
backend='local', n_processes=24)
#%% FIGURE 6 Supplement 1 (print_masks = True or False to print the right or left sides), including Figure 6d (plane 11)
print('THIS MIGHT TAKE A LONG TIME IF YOU WANT BOTH MASKS AND TRACES!')
print_masks = False
IDDS = range(1,46)
for plot_traces, nm in zip([True, False],['Traces','Masks']):
from sklearn.preprocessing import normalize
num_neur = []
#tott = np.zeros_like(tottime)
update_comps_time = []
tott = []
totneursum = 0
time_per_neuron = []
pl.figure("Figure 11 " + nm)
for ID in IDDS:
# try:
with np.load(os.path.join(base_folder,'Zebrafish/results_analysis_online_1EPOCH_gSig6_equalized_Plane_' + str(ID) + '.npz')) as ld:
locals().update(ld)
print(np.sum(ld['tottime'])+ld['time_init'])
tottime = ld['tottime']
print(ld.keys())
totneursum += ld['Cf'].shape[0]-3
pl.subplot(5,9,ID)
# img = normalize(Ab[()][:,3:],'l1',axis=0).mean(-1).reshape(dims,order = 'F').T
Cn_ = np.load(os.path.join(base_folder,'Zebrafish/results_analysis_online_Plane_CN_'+str(ID)+ '.npy'))
if plot_traces :
pl.imshow(ld['Cf'][3:],aspect = 'auto', vmax = 10)
pl.ylim([0,1950])
pl.axis('off')
pl.pause(0.1)
else:
# pl.figure();crd = cm.utils.visualization.plot_contours(
# Ab[()][:,3:].toarray().reshape(tuple(dims)+(-1,), order = 'F').transpose([1,0,2]).\
# reshape((dims[1]*dims[0],-1),order = 'F'), cv2.resize(Cn_,tuple(dims[::-1])).T, thr=0.9, vmax = 0.75,
# display_numbers=False)
A_thr = cm.source_extraction.cnmf.spatial.threshold_components(ld['Ab'][()].tocsc()[:,gnb:].toarray(), dims, medw=None, thr_method='nrg',
maxthr=0.3, nrgthr=0.95, extract_cc=True,
se=None, ss=None, dview=dview)
# np.save('/mnt/ceph/neuro/zebra/05292014Fish1-4/thresholded_components' + str(ID) + '.npy',A_thr)
# A_thr = np.load('Zebrafish/thresholded_components' + str(ID) + '.npy')
# img = normalize(Ab[()][:,gnb:].multiply(A_thr),'l1',axis=0).mean(-1).reshape(dims,order = 'F').T
# img = Ab[()][:,gnb:].multiply(A_thr).mean(-1).reshape(dims,order = 'F').T
Ab_thr = ld['Ab'][()][:,gnb:].multiply(A_thr)
img = (Ab_thr.dot(scipy.sparse.spdiags(np.minimum(1.0/np.max(Ab_thr,0).toarray(),100),0,Ab_thr.shape[-1],Ab_thr.shape[-1]))).mean(-1).reshape(dims,order = 'F').T
xx,yy = np.subtract((560,860),img.shape)//2+1
pl.imshow(cv2.copyMakeBorder(img,xx,xx,yy,yy, cv2.BORDER_CONSTANT,0),vmin=np.percentile(img,5),vmax=np.percentile(img,99.99),cmap = 'gray')
# A_thr = A_thr > 0
# pl.imshow(((A_thr*np.random.randint(1,10,A_thr.shape[-1])[None,:]).sum(-1).reshape(dims,order='F')).T, cmap = 'hot', vmin = 0.9, vmax=20)
pl.axis('off')
pl.pause(0.05)
num_neur.append(num_comps[1884-201])
tottime = tottime[:1885-201]
num_comps = num_comps[:1885-201]
update_comps_time.append((np.array(num_comps)[99::100],tottime[99::100].copy()))
tottime[99::100] = np.nan
tottime[0] = np.nan
[(np.where(np.diff([0]+list(num_comps))==cc)[0], tottime[np.where(np.diff([0]+list(num_comps))==cc)[0]]) for cc in range(6)]
tott.append(tottime)
# except:
print(ID)
if not print_masks:
break
pl.tight_layout()
#%% FIGURE 8 g
pl.rcParams['pdf.fonttype'] = 42
font = {'family' : 'Arial',
'weight' : 'regular',
'size' : 20}
pl.rc('font', **font)
pl.figure("Figure 8c")
pl.plot(np.arange(1885-201)*1,np.max(tott,0))
pl.plot([0,(1885-201)*1],[1,1],'k--')
pl.ylabel('time (s)')
pl.xlabel('time (s)')
pl.title('neural activity tracking')
#%% FIGURE 6 e
pl.figure("Figure 6e")
pl.plot(num_neur,'r.')
#%% FIGURE 6a,b,c PREPARATION
print('THIS WILL TAKE SOME TIME!!')
from sklearn.preprocessing import normalize
num_neur = []
#tott = np.zeros_like(tottime)
update_comps_time = []
tott = []
time_per_neuron = []
pl.figure()
for ID in range(11,12):
# try:
with np.load(os.path.join(base_folder,'Zebrafish/results_analysis_online_1EPOCH_gSig6_equalized_Plane_' + str(ID) + '.npz')) as ld:
locals().update(ld)
print(ld.keys())
pl.subplot(5,9,ID)
# img = normalize(Ab[()][:,3:],'l1',axis=0).mean(-1).reshape(dims,order = 'F').T
Cn_ = np.load(os.path.join(base_folder,'Zebrafish/results_analysis_online_Plane_CN_'+str(ID)+ '.npy'))
#
# pl.imshow(Cf[3:],aspect = 'auto', vmax = 10)
pl.figure();crd = cm.utils.visualization.plot_contours(
Ab[()][:,3:].toarray().reshape(tuple(dims)+(-1,), order = 'F').transpose([1,0,2]).\
reshape((dims[1]*dims[0],-1),order = 'F'), cv2.resize(Cn_,tuple(dims[::-1])).T, thr=0.9, vmax = 0.75,
display_numbers=False)
A_thr = cm.source_extraction.cnmf.spatial.threshold_components(Ab[()].tocsc()[:,gnb:].toarray(), dims, medw=None, thr_method='nrg',
maxthr=0.3, nrgthr=0.95, extract_cc=True,
se=None, ss=None, dview=dview)
Ab_thr = Ab[()][:,gnb:].multiply(A_thr)
img = (Ab_thr.dot(scipy.sparse.spdiags(np.minimum(1.0/np.max(Ab_thr,0).toarray(),100),0,Ab_thr.shape[-1],Ab_thr.shape[-1]))).mean(-1).reshape(dims,order = 'F').T
pl.imshow(img,vmin=np.percentile(img,5),vmax=np.percentile(img,99.99),cmap = 'hot')
# A_thr = A_thr > 0
# pl.imshow(((A_thr*np.random.randint(1,10,A_thr.shape[-1])[None,:]).sum(-1).reshape(dims,order='F')).T, cmap = 'hot', vmin = 0.9, vmax=20)
pl.axis('off')
pl.pause(0.05)
print(ID)
pl.tight_layout()
#% close the unused figures
pl.close()
pl.close()
#%% predictions for Plan 11 to choose nicely looking neurons
from skimage.util._montage import montage2d
predictions, final_crops = cm.components_evaluation.evaluate_components_CNN(Ab[()][:,gnb:], dims, np.array(gSig).astype(np.int), model_name=os.path.join(caiman_datadir(), 'model', 'cnn_model'), patch_size=50, loaded_model=None, isGPU=False)
#%% FIGURE 6 a,b
idx = np.argsort(predictions[:,0])[:10]#[[0,1,2,3,5,9]]
Ab_part = Ab[()][:,gnb:][:,idx]
pl.imshow(montage2d(final_crops[idx]))
pl.close()
pl.figure("Figure 6a base");crd = cm.utils.visualization.plot_contours(
Ab_part.toarray().reshape(tuple(dims)+(-1,), order = 'F').transpose([1,0,2]).\
reshape((dims[1]*dims[0],-1),order = 'F'), cv2.resize(Cn_,tuple(dims[::-1])).T, thr=0.9, vmax = 0.95,
display_numbers=True)
pl.colorbar()
pl.figure("Figure 6a overlay");crd = cm.utils.visualization.plot_contours(
Ab_part.toarray().reshape(tuple(dims)+(-1,), order = 'F').transpose([1,0,2]).\
reshape((dims[1]*dims[0],-1),order = 'F'), img, thr=0.9, display_numbers=True, vmax = .001)
pl.colorbar()
#%% FIGURE 6c
pl.figure("Figure 6c")
count = 0
for cf,sp_c in zip(Cf[idx+gnb], final_crops[idx]):
pl.subplot(10,2,2*count+1)
pl.imshow(sp_c[10:-10,10:-10][::-1][::-1].T)
pl.axis('off')
pl.subplot(10,2,2*count+2)
pl.plot(cf/cf.max())
pl.ylim([0,1])
count+=1
pl.axis('off')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.