repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
dandanvidi/capacity-usage
|
refs/heads/master
|
scripts/MFA_constrained_pFBA.py
|
3
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 25 13:20:57 2016
@author: dan
"""
import pandas as pd
import numpy as np
from cobra.io.sbml import create_cobra_model_from_sbml_file
from cobra.flux_analysis.parsimonious import optimize_minimal_flux
from cobra.manipulation.modify import convert_to_irreversible
conditions = pd.DataFrame.from_csv("../data/conditions.csv")
conditions = conditions[conditions.media_key>0]
conditions.sort_values('growth rate Gerosa [h-1]', inplace=True)
cs = conditions.index
projections = pd.DataFrame.from_csv('../data/flux projections[mmol_gCDW_h].csv')
model = create_cobra_model_from_sbml_file('../source/iJO1366.xml')
convert_to_irreversible(model)
mmol_gCDW = pd.DataFrame(index = map(str, model.reactions),
columns = cs)
for c in projections.columns:
cobra_c = conditions.loc[c, 'media_key']
gr = conditions.loc[c, 'growth rate Gerosa [h-1]']
model = create_cobra_model_from_sbml_file('../source/iJO1366.xml')
convert_to_irreversible(model)
# redefine sole carbon source uptake reaction in mmol/gr/h
model.reactions.get_by_id('EX_glc_e').lower_bound = 0
model.reactions.get_by_id('EX_' + cobra_c + '_e').lower_bound = -1000
# set growth rate according to measurements
biomass = "Ec_biomass_iJO1366_WT_53p95M"
model.change_objective(biomass)
growth_rate = model.reactions.get_by_id(biomass)
growth_rate.upper_bound = gr
# growth_rate.lower_bound = gr
revs = []
for rid in projections.index:
v = projections.loc[rid, c]
r_f = model.reactions.get_by_id(rid)
if v < 0:
r_b = model.reactions.get_by_id(rid+'_reverse')
revs.append(r_b)
r_f.upper_bound = 0
r_f.lower_bound = 0
r_b.upper_bound = -v
r_b.lower_bound = -v
else:
r_f.upper_bound = v
r_f.lower_bound = v
f = optimize_minimal_flux(model, already_irreversible=True)
mmol_gCDW[c].update(pd.Series(f.x_dict))
umol_gCDW_min = mmol_gCDW * 1000 / 60
mmol_gCDW.to_csv('../data/mmol_gCDW_hr.csv')
umol_gCDW_min.to_csv('../data/umol_gCDW_min.csv')
|
ivancrneto/django-magic-album
|
refs/heads/master
|
magicalbum/utils.py
|
1
|
""" Utilitary functions module """
from django.core.mail import EmailMessage
def send_album_email(sender, instance, **kwargs):
""" sends email showing the album number of pictures when it has multiple
of 100 pictures and less then 501 pictures """
num_pics = len(instance.pictures)
if num_pics and not num_pics % 100 and num_pics < 501:
email_from = 'Eversnap Hashtag <Hashtag@EversnapApp.com>'
email_to = ['ivan.cr.neto@gmail.com']
bcc = ['davide@geteversnap.com']
subject = '#carnival has {} photos'.format(num_pics)
message = 'I\'m awesome!'
mail = EmailMessage(subject, message, email_from, email_to, bcc=bcc)
mail.send(fail_silently=False)
|
MapStory/geonode
|
refs/heads/master
|
geonode/upload/signals.py
|
32
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.dispatch import Signal
upload_complete = Signal(providing_args=['layer'])
|
mariansoban/ardupilot
|
refs/heads/Copter-4.0.x-sobi-phl-8m
|
Tools/autotest/pysim/util.py
|
5
|
from __future__ import print_function
import atexit
import math
import os
import random
import re
import shlex
import signal
import subprocess
import sys
import tempfile
import time
from math import acos, atan2, cos, pi, sqrt
import pexpect
from . rotmat import Matrix3, Vector3
if (sys.version_info[0] >= 3):
ENCODING = 'ascii'
else:
ENCODING = None
RADIUS_OF_EARTH = 6378100.0 # in meters
def m2ft(x):
"""Meters to feet."""
return float(x) / 0.3048
def ft2m(x):
"""Feet to meters."""
return float(x) * 0.3048
def kt2mps(x):
return x * 0.514444444
def mps2kt(x):
return x / 0.514444444
def topdir():
"""Return top of git tree where autotest is running from."""
d = os.path.dirname(os.path.realpath(__file__))
assert(os.path.basename(d) == 'pysim')
d = os.path.dirname(d)
assert(os.path.basename(d) == 'autotest')
d = os.path.dirname(d)
assert(os.path.basename(d) == 'Tools')
d = os.path.dirname(d)
return d
def reltopdir(path):
"""Return a path relative to topdir()."""
return os.path.normpath(os.path.join(topdir(), path))
def run_cmd(cmd, directory=".", show=True, output=False, checkfail=True):
"""Run a shell command."""
shell = False
if not isinstance(cmd, list):
cmd = [cmd]
shell = True
if show:
print("Running: (%s) in (%s)" % (cmd_as_shell(cmd), directory,))
if output:
return subprocess.Popen(cmd, shell=shell, stdout=subprocess.PIPE, cwd=directory).communicate()[0]
elif checkfail:
return subprocess.check_call(cmd, shell=shell, cwd=directory)
else:
return subprocess.call(cmd, shell=shell, cwd=directory)
def rmfile(path):
"""Remove a file if it exists."""
try:
os.unlink(path)
except Exception:
pass
def deltree(path):
"""Delete a tree of files."""
run_cmd('rm -rf %s' % path)
def relwaf():
return "./modules/waf/waf-light"
def waf_configure(board, j=None, debug=False, extra_args=[]):
cmd_configure = [relwaf(), "configure", "--board", board]
if debug:
cmd_configure.append('--debug')
if j is not None:
cmd_configure.extend(['-j', str(j)])
pieces = [shlex.split(x) for x in extra_args]
for piece in pieces:
cmd_configure.extend(piece)
run_cmd(cmd_configure, directory=topdir(), checkfail=True)
def waf_clean():
run_cmd([relwaf(), "clean"], directory=topdir(), checkfail=True)
def build_SITL(build_target, j=None, debug=False, board='sitl', clean=True, configure=True, extra_configure_args=[]):
"""Build desktop SITL."""
# first configure
if configure:
waf_configure(board, j=j, debug=debug, extra_args=extra_configure_args)
# then clean
if clean:
waf_clean()
# then build
cmd_make = [relwaf(), "build", "--target", build_target]
if j is not None:
cmd_make.extend(['-j', str(j)])
run_cmd(cmd_make, directory=topdir(), checkfail=True, show=True)
return True
def build_examples(board, j=None, debug=False, clean=False):
# first configure
waf_configure(board, j=j, debug=debug)
# then clean
if clean:
waf_clean()
# then build
cmd_make = [relwaf(), "examples"]
run_cmd(cmd_make, directory=topdir(), checkfail=True, show=True)
return True
def build_tests(board, j=None, debug=False, clean=False):
# first configure
waf_configure(board, j=j, debug=debug)
# then clean
if clean:
waf_clean()
# then build
run_cmd([relwaf(), "tests"], directory=topdir(), checkfail=True, show=True)
return True
# list of pexpect children to close on exit
close_list = []
def pexpect_autoclose(p):
"""Mark for autoclosing."""
global close_list
close_list.append(p)
def pexpect_close(p):
"""Close a pexpect child."""
global close_list
ex = None
try:
p.kill(signal.SIGTERM)
except IOError as e:
print("Caught exception: %s" % str(e))
ex = e
pass
if ex is None:
# give the process some time to go away
for i in range(20):
if not p.isalive():
break
time.sleep(0.05)
try:
p.close()
except Exception:
pass
try:
p.close(force=True)
except Exception:
pass
if p in close_list:
close_list.remove(p)
def pexpect_close_all():
"""Close all pexpect children."""
global close_list
for p in close_list[:]:
pexpect_close(p)
def pexpect_drain(p):
"""Drain any pending input."""
import pexpect
try:
p.read_nonblocking(1000, timeout=0)
except Exception:
pass
def cmd_as_shell(cmd):
return (" ".join(['"%s"' % x for x in cmd]))
def make_safe_filename(text):
"""Return a version of text safe for use as a filename."""
r = re.compile("([^a-zA-Z0-9_.+-])")
text.replace('/', '-')
filename = r.sub(lambda m: "%" + str(hex(ord(str(m.group(1))))).upper(), text)
return filename
def valgrind_log_filepath(binary, model):
return make_safe_filename('%s-%s-valgrind.log' % (os.path.basename(binary), model,))
def kill_screen_gdb():
cmd = ["screen", "-X", "-S", "ardupilot-gdb", "quit"]
subprocess.Popen(cmd)
def start_SITL(binary,
valgrind=False,
gdb=False,
wipe=False,
synthetic_clock=True,
home=None,
model=None,
speedup=1,
defaults_file=None,
unhide_parameters=False,
gdbserver=False,
breakpoints=[],
disable_breakpoints=False,
vicon=False,
lldb=False):
"""Launch a SITL instance."""
cmd = []
if valgrind and os.path.exists('/usr/bin/valgrind'):
# we specify a prefix for vgdb-pipe because on Vagrant virtual
# machines the pipes are created on the mountpoint for the
# shared directory with the host machine. mmap's,
# unsurprisingly, fail on files created on that mountpoint.
vgdb_prefix = os.path.join(tempfile.gettempdir(), "vgdb-pipe")
log_file = valgrind_log_filepath(binary=binary, model=model)
cmd.extend([
'valgrind',
# adding this option allows valgrind to cope with the overload
# of operator new
"--soname-synonyms=somalloc=nouserintercepts",
'--vgdb-prefix=%s' % vgdb_prefix,
'-q',
'--log-file=%s' % log_file])
if gdbserver:
cmd.extend(['gdbserver', 'localhost:3333'])
if gdb:
# attach gdb to the gdbserver:
f = open("/tmp/x.gdb", "w")
f.write("target extended-remote localhost:3333\nc\n")
for breakpoint in breakpoints:
f.write("b %s\n" % (breakpoint,))
if disable_breakpoints:
f.write("disable\n")
f.close()
run_cmd('screen -d -m -S ardupilot-gdbserver '
'bash -c "gdb -x /tmp/x.gdb"')
elif gdb:
f = open("/tmp/x.gdb", "w")
for breakpoint in breakpoints:
f.write("b %s\n" % (breakpoint,))
if disable_breakpoints:
f.write("disable\n")
f.write("r\n")
f.close()
if os.environ.get('DISPLAY'):
cmd.extend(['xterm', '-e', 'gdb', '-x', '/tmp/x.gdb', '--args'])
else:
cmd.extend(['screen',
'-L', '-Logfile', 'gdb.log',
'-d',
'-m',
'-S', 'ardupilot-gdb',
'gdb', '-x', '/tmp/x.gdb', binary, '--args'])
elif lldb:
f = open("/tmp/x.lldb", "w")
for breakpoint in breakpoints:
f.write("b %s\n" % (breakpoint,))
if disable_breakpoints:
f.write("disable\n")
f.write("settings set target.process.stop-on-exec false\n")
f.write("process launch\n")
f.close()
if os.environ.get('DISPLAY'):
cmd.extend(['xterm', '-e', 'lldb', '-s','/tmp/x.lldb', '--'])
else:
raise RuntimeError("DISPLAY was not set")
cmd.append(binary)
if wipe:
cmd.append('-w')
if synthetic_clock:
cmd.append('-S')
if home is not None:
cmd.extend(['--home', home])
if model is not None:
cmd.extend(['--model', model])
if speedup != 1:
cmd.extend(['--speedup', str(speedup)])
if defaults_file is not None:
cmd.extend(['--defaults', defaults_file])
if unhide_parameters:
cmd.extend(['--unhide-groups'])
if vicon:
cmd.extend(["--uartF=sim:vicon:"])
if gdb and not os.getenv('DISPLAY'):
subprocess.Popen(cmd)
atexit.register(kill_screen_gdb)
# we are expected to return a pexpect wrapped around the
# stdout of the ArduPilot binary. Not going to happen until
# AP gets a redirect-stdout-to-filehandle option. So, in the
# meantime, return a dummy:
return pexpect.spawn("true", ["true"],
logfile=sys.stdout,
encoding=ENCODING,
timeout=5)
print("Running: %s" % cmd_as_shell(cmd))
first = cmd[0]
rest = cmd[1:]
child = pexpect.spawn(first, rest, logfile=sys.stdout, encoding=ENCODING, timeout=5)
pexpect_autoclose(child)
# give time for parameters to properly setup
time.sleep(3)
if gdb or lldb:
# if we run GDB we do so in an xterm. "Waiting for
# connection" is never going to appear on xterm's output.
# ... so let's give it another magic second.
time.sleep(1)
# TODO: have a SITL-compiled ardupilot able to have its
# console on an output fd.
else:
child.expect('Waiting for connection', timeout=300)
return child
def mavproxy_cmd():
'''return path to which mavproxy to use'''
return os.getenv('MAVPROXY_CMD', 'mavproxy.py')
def MAVProxy_version():
'''return the current version of mavproxy as a tuple e.g. (1,8,8)'''
command = "%s --version" % mavproxy_cmd()
output = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE).communicate()[0]
output = output.decode('ascii')
match = re.search("MAVProxy Version: ([0-9]+)[.]([0-9]+)[.]([0-9]+)", output)
if match is None:
raise ValueError("Unable to determine MAVProxy version from (%s)" % output)
return (int(match.group(1)), int(match.group(2)), int(match.group(3)))
def start_MAVProxy_SITL(atype, aircraft=None, setup=False, master='tcp:127.0.0.1:5760',
options=[], logfile=sys.stdout):
"""Launch mavproxy connected to a SITL instance."""
import pexpect
global close_list
MAVPROXY = mavproxy_cmd()
cmd = MAVPROXY + ' --master=%s --out=127.0.0.1:14550' % master
if setup:
cmd += ' --setup'
if aircraft is None:
aircraft = 'test.%s' % atype
cmd += ' --aircraft=%s' % aircraft
cmd += ' ' + ' '.join(options)
cmd += ' --default-modules misc,terrain,wp,rally,fence,param,arm,mode,rc,cmdlong,output'
ret = pexpect.spawn(cmd, logfile=logfile, encoding=ENCODING, timeout=60)
ret.delaybeforesend = 0
pexpect_autoclose(ret)
return ret
def expect_setup_callback(e, callback):
"""Setup a callback that is called once a second while waiting for
patterns."""
import pexpect
def _expect_callback(pattern, timeout=e.timeout):
tstart = time.time()
while time.time() < tstart + timeout:
try:
ret = e.expect_saved(pattern, timeout=1)
return ret
except pexpect.TIMEOUT:
e.expect_user_callback(e)
print("Timed out looking for %s" % pattern)
raise pexpect.TIMEOUT(timeout)
e.expect_user_callback = callback
e.expect_saved = e.expect
e.expect = _expect_callback
def mkdir_p(directory):
"""Like mkdir -p ."""
if not directory:
return
if directory.endswith("/"):
mkdir_p(directory[:-1])
return
if os.path.isdir(directory):
return
mkdir_p(os.path.dirname(directory))
os.mkdir(directory)
def loadfile(fname):
"""Load a file as a string."""
f = open(fname, mode='r')
r = f.read()
f.close()
return r
def lock_file(fname):
"""Lock a file."""
import fcntl
f = open(fname, mode='w')
try:
fcntl.lockf(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
except Exception:
return None
return f
def check_parent(parent_pid=None):
"""Check our parent process is still alive."""
if parent_pid is None:
try:
parent_pid = os.getppid()
except Exception:
pass
if parent_pid is None:
return
try:
os.kill(parent_pid, 0)
except Exception:
print("Parent had finished - exiting")
sys.exit(1)
def EarthRatesToBodyRates(dcm, earth_rates):
"""Convert the angular velocities from earth frame to
body frame. Thanks to James Goppert for the formula
all inputs and outputs are in radians
returns a gyro vector in body frame, in rad/s .
"""
from math import sin, cos
(phi, theta, psi) = dcm.to_euler()
phiDot = earth_rates.x
thetaDot = earth_rates.y
psiDot = earth_rates.z
p = phiDot - psiDot * sin(theta)
q = cos(phi) * thetaDot + sin(phi) * psiDot * cos(theta)
r = cos(phi) * psiDot * cos(theta) - sin(phi) * thetaDot
return Vector3(p, q, r)
def BodyRatesToEarthRates(dcm, gyro):
"""Convert the angular velocities from body frame to
earth frame.
all inputs and outputs are in radians/s
returns a earth rate vector.
"""
from math import sin, cos, tan, fabs
p = gyro.x
q = gyro.y
r = gyro.z
(phi, theta, psi) = dcm.to_euler()
phiDot = p + tan(theta) * (q * sin(phi) + r * cos(phi))
thetaDot = q * cos(phi) - r * sin(phi)
if fabs(cos(theta)) < 1.0e-20:
theta += 1.0e-10
psiDot = (q * sin(phi) + r * cos(phi)) / cos(theta)
return Vector3(phiDot, thetaDot, psiDot)
def gps_newpos(lat, lon, bearing, distance):
"""Extrapolate latitude/longitude given a heading and distance
thanks to http://www.movable-type.co.uk/scripts/latlong.html .
"""
from math import sin, asin, cos, atan2, radians, degrees
lat1 = radians(lat)
lon1 = radians(lon)
brng = radians(bearing)
dr = distance / RADIUS_OF_EARTH
lat2 = asin(sin(lat1) * cos(dr) +
cos(lat1) * sin(dr) * cos(brng))
lon2 = lon1 + atan2(sin(brng) * sin(dr) * cos(lat1),
cos(dr) - sin(lat1) * sin(lat2))
return (degrees(lat2), degrees(lon2))
def gps_distance(lat1, lon1, lat2, lon2):
"""Return distance between two points in meters,
coordinates are in degrees
thanks to http://www.movable-type.co.uk/scripts/latlong.html ."""
lat1 = math.radians(lat1)
lat2 = math.radians(lat2)
lon1 = math.radians(lon1)
lon2 = math.radians(lon2)
dLat = lat2 - lat1
dLon = lon2 - lon1
a = math.sin(0.5 * dLat)**2 + math.sin(0.5 * dLon)**2 * math.cos(lat1) * math.cos(lat2)
c = 2.0 * math.atan2(math.sqrt(a), math.sqrt(1.0 - a))
return RADIUS_OF_EARTH * c
def gps_bearing(lat1, lon1, lat2, lon2):
"""Return bearing between two points in degrees, in range 0-360
thanks to http://www.movable-type.co.uk/scripts/latlong.html ."""
lat1 = math.radians(lat1)
lat2 = math.radians(lat2)
lon1 = math.radians(lon1)
lon2 = math.radians(lon2)
dLon = lon2 - lon1
y = math.sin(dLon) * math.cos(lat2)
x = math.cos(lat1) * math.sin(lat2) - math.sin(lat1) * math.cos(lat2) * math.cos(dLon)
bearing = math.degrees(math.atan2(y, x))
if bearing < 0:
bearing += 360.0
return bearing
class Wind(object):
"""A wind generation object."""
def __init__(self, windstring, cross_section=0.1):
a = windstring.split(',')
if len(a) != 3:
raise RuntimeError("Expected wind in speed,direction,turbulance form, not %s" % windstring)
self.speed = float(a[0]) # m/s
self.direction = float(a[1]) # direction the wind is going in
self.turbulance = float(a[2]) # turbulance factor (standard deviation)
# the cross-section of the aircraft to wind. This is multiplied by the
# difference in the wind and the velocity of the aircraft to give the acceleration
self.cross_section = cross_section
# the time constant for the turbulance - the average period of the
# changes over time
self.turbulance_time_constant = 5.0
# wind time record
self.tlast = time.time()
# initial turbulance multiplier
self.turbulance_mul = 1.0
def current(self, deltat=None):
"""Return current wind speed and direction as a tuple
speed is in m/s, direction in degrees."""
if deltat is None:
tnow = time.time()
deltat = tnow - self.tlast
self.tlast = tnow
# update turbulance random walk
w_delta = math.sqrt(deltat) * (1.0 - random.gauss(1.0, self.turbulance))
w_delta -= (self.turbulance_mul - 1.0) * (deltat / self.turbulance_time_constant)
self.turbulance_mul += w_delta
speed = self.speed * math.fabs(self.turbulance_mul)
return (speed, self.direction)
# Calculate drag.
def drag(self, velocity, deltat=None):
"""Return current wind force in Earth frame. The velocity parameter is
a Vector3 of the current velocity of the aircraft in earth frame, m/s ."""
from math import radians
# (m/s, degrees) : wind vector as a magnitude and angle.
(speed, direction) = self.current(deltat=deltat)
# speed = self.speed
# direction = self.direction
# Get the wind vector.
w = toVec(speed, radians(direction))
obj_speed = velocity.length()
# Compute the angle between the object vector and wind vector by taking
# the dot product and dividing by the magnitudes.
d = w.length() * obj_speed
if d == 0:
alpha = 0
else:
alpha = acos((w * velocity) / d)
# Get the relative wind speed and angle from the object. Note that the
# relative wind speed includes the velocity of the object; i.e., there
# is a headwind equivalent to the object's speed even if there is no
# absolute wind.
(rel_speed, beta) = apparent_wind(speed, obj_speed, alpha)
# Return the vector of the relative wind, relative to the coordinate
# system.
relWindVec = toVec(rel_speed, beta + atan2(velocity.y, velocity.x))
# Combine them to get the acceleration vector.
return Vector3(acc(relWindVec.x, drag_force(self, relWindVec.x)), acc(relWindVec.y, drag_force(self, relWindVec.y)), 0)
def apparent_wind(wind_sp, obj_speed, alpha):
"""http://en.wikipedia.org/wiki/Apparent_wind
Returns apparent wind speed and angle of apparent wind. Alpha is the angle
between the object and the true wind. alpha of 0 rads is a headwind; pi a
tailwind. Speeds should always be positive."""
delta = wind_sp * cos(alpha)
x = wind_sp**2 + obj_speed**2 + 2 * obj_speed * delta
rel_speed = sqrt(x)
if rel_speed == 0:
beta = pi
else:
beta = acos((delta + obj_speed) / rel_speed)
return (rel_speed, beta)
def drag_force(wind, sp):
"""See http://en.wikipedia.org/wiki/Drag_equation
Drag equation is F(a) = cl * p/2 * v^2 * a, where cl : drag coefficient
(let's assume it's low, .e.g., 0.2), p : density of air (assume about 1
kg/m^3, the density just over 1500m elevation), v : relative speed of wind
(to the body), a : area acted on (this is captured by the cross_section
parameter).
So then we have
F(a) = 0.2 * 1/2 * v^2 * cross_section = 0.1 * v^2 * cross_section."""
return (sp**2.0) * 0.1 * wind.cross_section
def acc(val, mag):
""" Function to make the force vector. relWindVec is the direction the apparent
wind comes *from*. We want to compute the accleration vector in the direction
the wind blows to."""
if val == 0:
return mag
else:
return (val / abs(val)) * (0 - mag)
def toVec(magnitude, angle):
"""Converts a magnitude and angle (radians) to a vector in the xy plane."""
v = Vector3(magnitude, 0, 0)
m = Matrix3()
m.from_euler(0, 0, angle)
return m.transposed() * v
def constrain(value, minv, maxv):
"""Constrain a value to a range."""
if value < minv:
value = minv
if value > maxv:
value = maxv
return value
if __name__ == "__main__":
import doctest
doctest.testmod()
|
jeromewu/bitcoin-opennet
|
refs/heads/master
|
share/qt/make_spinner.py
|
563
|
#!/usr/bin/env python
# W.J. van der Laan, 2011
# Make spinning animation from a .png
# Requires imagemagick 6.7+
from __future__ import division
from os import path
from PIL import Image
from subprocess import Popen
SRC='img/reload.png'
TMPDIR='../../src/qt/res/movies/'
TMPNAME='spinner-%03i.png'
NUMFRAMES=35
FRAMERATE=10.0
CONVERT='convert'
CLOCKWISE=True
DSIZE=(16,16)
im_src = Image.open(SRC)
if CLOCKWISE:
im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT)
def frame_to_filename(frame):
return path.join(TMPDIR, TMPNAME % frame)
frame_files = []
for frame in xrange(NUMFRAMES):
rotation = (frame + 0.5) / NUMFRAMES * 360.0
if CLOCKWISE:
rotation = -rotation
im_new = im_src.rotate(rotation, Image.BICUBIC)
im_new.thumbnail(DSIZE, Image.ANTIALIAS)
outfile = frame_to_filename(frame)
im_new.save(outfile, 'png')
frame_files.append(outfile)
|
dougluce/ansible-modules-core
|
refs/heads/devel
|
utilities/logic/async_wrapper.py
|
189
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>, and others
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
try:
import json
except ImportError:
import simplejson as json
import shlex
import os
import subprocess
import sys
import datetime
import traceback
import signal
import time
import syslog
def daemonize_self():
# daemonizing code: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012
# logger.info("cobblerd started")
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError, e:
print >>sys.stderr, "fork #1 failed: %d (%s)" % (e.errno, e.strerror)
sys.exit(1)
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(022)
# do second fork
try:
pid = os.fork()
if pid > 0:
# print "Daemon PID %d" % pid
sys.exit(0)
except OSError, e:
print >>sys.stderr, "fork #2 failed: %d (%s)" % (e.errno, e.strerror)
sys.exit(1)
dev_null = file('/dev/null','rw')
os.dup2(dev_null.fileno(), sys.stdin.fileno())
os.dup2(dev_null.fileno(), sys.stdout.fileno())
os.dup2(dev_null.fileno(), sys.stderr.fileno())
if len(sys.argv) < 3:
print json.dumps({
"failed" : True,
"msg" : "usage: async_wrapper <jid> <time_limit> <modulescript> <argsfile>. Humans, do not call directly!"
})
sys.exit(1)
jid = "%s.%d" % (sys.argv[1], os.getpid())
time_limit = sys.argv[2]
wrapped_module = sys.argv[3]
argsfile = sys.argv[4]
cmd = "%s %s" % (wrapped_module, argsfile)
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'Invoked with %s' % " ".join(sys.argv[1:]))
# setup logging directory
logdir = os.path.expanduser("~/.ansible_async")
log_path = os.path.join(logdir, jid)
if not os.path.exists(logdir):
try:
os.makedirs(logdir)
except:
print json.dumps({
"failed" : 1,
"msg" : "could not create: %s" % logdir
})
def _run_command(wrapped_cmd, jid, log_path):
logfile = open(log_path, "w")
logfile.write(json.dumps({ "started" : 1, "ansible_job_id" : jid }))
logfile.close()
logfile = open(log_path, "w")
result = {}
outdata = ''
try:
cmd = shlex.split(wrapped_cmd)
script = subprocess.Popen(cmd, shell=False,
stdin=None, stdout=logfile, stderr=logfile)
script.communicate()
outdata = file(log_path).read()
result = json.loads(outdata)
except (OSError, IOError), e:
result = {
"failed": 1,
"cmd" : wrapped_cmd,
"msg": str(e),
}
result['ansible_job_id'] = jid
logfile.write(json.dumps(result))
except:
result = {
"failed" : 1,
"cmd" : wrapped_cmd,
"data" : outdata, # temporary debug only
"msg" : traceback.format_exc()
}
result['ansible_job_id'] = jid
logfile.write(json.dumps(result))
logfile.close()
# immediately exit this process, leaving an orphaned process
# running which immediately forks a supervisory timing process
#import logging
#import logging.handlers
#logger = logging.getLogger("ansible_async")
#logger.setLevel(logging.WARNING)
#logger.addHandler( logging.handlers.SysLogHandler("/dev/log") )
def debug(msg):
#logger.warning(msg)
pass
try:
pid = os.fork()
if pid:
# Notify the overlord that the async process started
# we need to not return immmediately such that the launched command has an attempt
# to initialize PRIOR to ansible trying to clean up the launch directory (and argsfile)
# this probably could be done with some IPC later. Modules should always read
# the argsfile at the very first start of their execution anyway
time.sleep(1)
debug("Return async_wrapper task started.")
print json.dumps({ "started" : 1, "ansible_job_id" : jid, "results_file" : log_path })
sys.stdout.flush()
sys.exit(0)
else:
# The actual wrapper process
# Daemonize, so we keep on running
daemonize_self()
# we are now daemonized, create a supervisory process
debug("Starting module and watcher")
sub_pid = os.fork()
if sub_pid:
# the parent stops the process after the time limit
remaining = int(time_limit)
# set the child process group id to kill all children
os.setpgid(sub_pid, sub_pid)
debug("Start watching %s (%s)"%(sub_pid, remaining))
time.sleep(5)
while os.waitpid(sub_pid, os.WNOHANG) == (0, 0):
debug("%s still running (%s)"%(sub_pid, remaining))
time.sleep(5)
remaining = remaining - 5
if remaining <= 0:
debug("Now killing %s"%(sub_pid))
os.killpg(sub_pid, signal.SIGKILL)
debug("Sent kill to group %s"%sub_pid)
time.sleep(1)
sys.exit(0)
debug("Done in kid B.")
os._exit(0)
else:
# the child process runs the actual module
debug("Start module (%s)"%os.getpid())
_run_command(cmd, jid, log_path)
debug("Module complete (%s)"%os.getpid())
sys.exit(0)
except Exception, err:
debug("error: %s"%(err))
raise err
|
mrquim/repository.mrquim
|
refs/heads/master
|
plugin.video.castaway/resources/lib/modules/cache.py
|
7
|
# -*- coding: utf-8 -*-
'''
Genesis Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,hashlib,time
try:
from sqlite3 import dbapi2 as database
except:
from pysqlite2 import dbapi2 as database
import control
def get(function, timeout, *args, **table):
try:
response = None
f = repr(function)
f = re.sub('.+\smethod\s|.+function\s|\sat\s.+|\sof\s.+', '', f)
a = hashlib.md5()
for i in args: a.update(str(i))
a = str(a.hexdigest())
except:
pass
try:
table = table['table']
except:
table = 'rel_list'
try:
control.makeFile(control.dataPath)
dbcon = database.connect(control.cacheFile)
dbcur = dbcon.cursor()
dbcur.execute("SELECT * FROM %s WHERE func = '%s' AND args = '%s'" % (table, f, a))
match = dbcur.fetchone()
response = eval(match[2].encode('utf-8'))
t1 = int(match[3])
t2 = int(time.time())
update = (abs(t2 - t1) / 3600) >= int(timeout)
if update == False:
return response
except:
pass
try:
r = function(*args)
if (r == None or r == []) and not response == None:
return response
elif (r == None or r == []):
return r
except:
return
try:
r = repr(r)
t = int(time.time())
dbcur.execute("CREATE TABLE IF NOT EXISTS %s (""func TEXT, ""args TEXT, ""response TEXT, ""added TEXT, ""UNIQUE(func, args)"");" % table)
dbcur.execute("DELETE FROM %s WHERE func = '%s' AND args = '%s'" % (table, f, a))
dbcur.execute("INSERT INTO %s Values (?, ?, ?, ?)" % table, (f, a, r, t))
dbcon.commit()
except:
pass
try:
return eval(r.encode('utf-8'))
except:
pass
def clear(table=None):
try:
control.idle()
if table == None: table = ['rel_list', 'rel_lib']
elif not type(table) == list: table = [table]
yes = control.yesnoDialog(control.lang(30401).encode('utf-8'), '', '')
if not yes: return
dbcon = database.connect(control.cacheFile)
dbcur = dbcon.cursor()
for t in table:
try:
dbcur.execute("DROP TABLE IF EXISTS %s" % t)
dbcur.execute("VACUUM")
dbcon.commit()
except:
pass
control.infoDialog(control.lang(30402).encode('utf-8'))
except:
pass
|
karim-omran/openerp-addons
|
refs/heads/master
|
hr_simplify/__init__.py
|
4
|
#-*- coding:utf-8 -*-
#
#
# Copyright (C) 2011,2013 Michael Telahun Makonnen <mmakonnen@gmail.com>.
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from . import hr_simplify
|
MoamerEncsConcordiaCa/tensorflow
|
refs/heads/master
|
tensorflow/contrib/layers/python/layers/encoders_test.py
|
111
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.contrib.layers.python.layers.encoders."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.layers.python.layers import encoders
from tensorflow.contrib.layers.python.ops import sparse_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def _get_const_var(name, shape, value):
return variable_scope.get_variable(
name, shape, initializer=init_ops.constant_initializer(value))
class EncodersTest(test.TestCase):
def testBowEncoderSparse(self):
with self.test_session() as sess:
docs = [[0, 1], [2, 3]]
enc = encoders.bow_encoder(docs, 4, 3)
sess.run(variables.global_variables_initializer())
self.assertAllEqual([2, 3], enc.eval().shape)
def testBowEncoderSparseTensor(self):
with self.test_session() as sess:
docs = [[0, 1], [2, 3]]
sparse_docs = sparse_ops.dense_to_sparse_tensor(docs)
enc = encoders.bow_encoder(sparse_docs, 4, 3)
sess.run(variables.global_variables_initializer())
self.assertAllEqual([2, 3], enc.eval().shape)
def testBowEncoderSparseEmptyRow(self):
with self.test_session() as sess:
docs = [[0, 1], [2, 3], [0, 0]]
enc = encoders.bow_encoder(docs, 4, 5)
sess.run(variables.global_variables_initializer())
self.assertAllEqual([3, 5], enc.eval().shape)
def testBowEncoderDense(self):
with self.test_session() as sess:
docs = [[0, 1], [2, 3], [0, 0], [0, 0]]
enc = encoders.bow_encoder(docs, 4, 3, sparse_lookup=False)
sess.run(variables.global_variables_initializer())
self.assertAllEqual([4, 3], enc.eval().shape)
def testBowEncoderSparseTensorDenseLookup(self):
with self.test_session():
docs = [[0, 1]]
sparse_docs = sparse_ops.dense_to_sparse_tensor(docs)
with self.assertRaises(TypeError):
encoders.bow_encoder(sparse_docs, 4, 3, sparse_lookup=False)
def testBowEncodersSharingEmbeddings(self):
with self.test_session() as sess:
docs = [[0, 1], [2, 3]]
enc_1 = encoders.bow_encoder(docs, 4, 3, scope='test')
enc_2 = encoders.bow_encoder(docs, 4, 3, scope='test', reuse=True)
sess.run(variables.global_variables_initializer())
avg_1, avg_2 = sess.run([enc_1, enc_2])
self.assertAllEqual(avg_1, avg_2)
def testBowEncodersSharingEmbeddingsInheritedScopes(self):
with self.test_session() as sess:
docs = [[0, 1], [2, 3]]
with variable_scope.variable_scope('test'):
enc_1 = encoders.bow_encoder(docs, 4, 3)
with variable_scope.variable_scope('test', reuse=True):
enc_2 = encoders.bow_encoder(docs, 4, 3)
sess.run(variables.global_variables_initializer())
avg_1, avg_2 = sess.run([enc_1, enc_2])
self.assertAllEqual(avg_1, avg_2)
def testBowEncodersSharingEmbeddingsSharedScope(self):
with self.test_session() as sess:
docs = [[0, 1], [2, 3]]
enc_1 = encoders.bow_encoder(docs, 4, 3, scope='bow')
variable_scope.get_variable_scope().reuse_variables()
enc_2 = encoders.bow_encoder(docs, 4, 3, scope='bow')
sess.run(variables.global_variables_initializer())
avg_1, avg_2 = sess.run([enc_1, enc_2])
self.assertAllEqual(avg_1, avg_2)
def testBowEncoderReuseEmbeddingsVariable(self):
with self.test_session() as sess:
docs = [[1, 1], [2, 3]]
with variable_scope.variable_scope('test'):
v = _get_const_var('embeddings', (4, 3),
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]])
self.assertEqual(v.name, 'test/embeddings:0')
enc = encoders.bow_encoder(docs, 4, 3, scope='test', reuse=True)
sess.run(variables.global_variables_initializer())
self.assertAllClose([[3., 4., 5.], [7.5, 8.5, 9.5]], enc.eval())
def testEmbedSequence(self):
with self.test_session() as sess:
docs = [[1, 1], [2, 3]]
with variable_scope.variable_scope('test'):
v = _get_const_var('embeddings', (4, 3),
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]])
self.assertEqual(v.name, 'test/embeddings:0')
emb = encoders.embed_sequence(docs, 4, 3, scope='test', reuse=True)
sess.run(variables.global_variables_initializer())
self.assertAllClose(
[[[3., 4., 5.], [3., 4., 5.]], [[6., 7., 8.], [9., 10., 11.]]],
emb.eval())
if __name__ == '__main__':
test.main()
|
tlatzko/spmcluster
|
refs/heads/master
|
.tox/2.6-nocov/lib/python2.6/site-packages/wheel/pkginfo.py
|
565
|
"""Tools for reading and writing PKG-INFO / METADATA without caring
about the encoding."""
from email.parser import Parser
try:
unicode
_PY3 = False
except NameError:
_PY3 = True
if not _PY3:
from email.generator import Generator
def read_pkg_info_bytes(bytestr):
return Parser().parsestr(bytestr)
def read_pkg_info(path):
with open(path, "r") as headers:
message = Parser().parse(headers)
return message
def write_pkg_info(path, message):
with open(path, 'w') as metadata:
Generator(metadata, maxheaderlen=0).flatten(message)
else:
from email.generator import BytesGenerator
def read_pkg_info_bytes(bytestr):
headers = bytestr.decode(encoding="ascii", errors="surrogateescape")
message = Parser().parsestr(headers)
return message
def read_pkg_info(path):
with open(path, "r",
encoding="ascii",
errors="surrogateescape") as headers:
message = Parser().parse(headers)
return message
def write_pkg_info(path, message):
with open(path, "wb") as out:
BytesGenerator(out, maxheaderlen=0).flatten(message)
|
meteora9479/deep-visualization-toolbox
|
refs/heads/master
|
__init__.py
|
12133432
| |
dompuiu/puzzles
|
refs/heads/master
|
mafia-problem/src/__init__.py
|
12133432
| |
manikishan/fosswebsite
|
refs/heads/master
|
clubManagement/templatetags/__init__.py
|
12133432
| |
taoger/titanium_mobile
|
refs/heads/master
|
support/common/markdown/inlinepatterns.py
|
107
|
"""
INLINE PATTERNS
=============================================================================
Inline patterns such as *emphasis* are handled by means of auxiliary
objects, one per pattern. Pattern objects must be instances of classes
that extend markdown.Pattern. Each pattern object uses a single regular
expression and needs support the following methods:
pattern.getCompiledRegExp() # returns a regular expression
pattern.handleMatch(m) # takes a match object and returns
# an ElementTree element or just plain text
All of python markdown's built-in patterns subclass from Pattern,
but you can add additional patterns that don't.
Also note that all the regular expressions used by inline must
capture the whole block. For this reason, they all start with
'^(.*)' and end with '(.*)!'. In case with built-in expression
Pattern takes care of adding the "^(.*)" and "(.*)!".
Finally, the order in which regular expressions are applied is very
important - e.g. if we first replace http://.../ links with <a> tags
and _then_ try to replace inline html, we would end up with a mess.
So, we apply the expressions in the following order:
* escape and backticks have to go before everything else, so
that we can preempt any markdown patterns by escaping them.
* then we handle auto-links (must be done before inline html)
* then we handle inline HTML. At this point we will simply
replace all inline HTML strings with a placeholder and add
the actual HTML to a hash.
* then inline images (must be done before links)
* then bracketed links, first regular then reference-style
* finally we apply strong and emphasis
"""
import markdown
import re
from urlparse import urlparse, urlunparse
import sys
if sys.version >= "3.0":
from html import entities as htmlentitydefs
else:
import htmlentitydefs
"""
The actual regular expressions for patterns
-----------------------------------------------------------------------------
"""
NOBRACKET = r'[^\]\[]*'
BRK = ( r'\[('
+ (NOBRACKET + r'(\[')*6
+ (NOBRACKET+ r'\])*')*6
+ NOBRACKET + r')\]' )
NOIMG = r'(?<!\!)'
BACKTICK_RE = r'(?<!\\)(`+)(.+?)(?<!`)\2(?!`)' # `e=f()` or ``e=f("`")``
ESCAPE_RE = r'\\(.)' # \<
EMPHASIS_RE = r'(\*)([^\*]+)\2' # *emphasis*
STRONG_RE = r'(\*{2}|_{2})(.+?)\2' # **strong**
STRONG_EM_RE = r'(\*{3}|_{3})(.+?)\2' # ***strong***
if markdown.SMART_EMPHASIS:
EMPHASIS_2_RE = r'(?<!\w)(_)(\S.+?)\2(?!\w)' # _emphasis_
else:
EMPHASIS_2_RE = r'(_)(.+?)\2' # _emphasis_
LINK_RE = NOIMG + BRK + \
r'''\(\s*(<.*?>|((?:(?:\(.*?\))|[^\(\)]))*?)\s*((['"])(.*?)\12)?\)'''
# [text](url) or [text](<url>)
IMAGE_LINK_RE = r'\!' + BRK + r'\s*\((<.*?>|([^\)]*))\)'
#  or 
REFERENCE_RE = NOIMG + BRK+ r'\s*\[([^\]]*)\]' # [Google][3]
IMAGE_REFERENCE_RE = r'\!' + BRK + '\s*\[([^\]]*)\]' # ![alt text][2]
NOT_STRONG_RE = r'((^| )(\*|_)( |$))' # stand-alone * or _
AUTOLINK_RE = r'<((?:f|ht)tps?://[^>]*)>' # <http://www.123.com>
AUTOMAIL_RE = r'<([^> \!]*@[^> ]*)>' # <me@example.com>
HTML_RE = r'(\<([a-zA-Z/][^\>]*?|\!--.*?--)\>)' # <...>
ENTITY_RE = r'(&[\#a-zA-Z0-9]*;)' # &
LINE_BREAK_RE = r' \n' # two spaces at end of line
LINE_BREAK_2_RE = r' $' # two spaces at end of text
def dequote(string):
"""Remove quotes from around a string."""
if ( ( string.startswith('"') and string.endswith('"'))
or (string.startswith("'") and string.endswith("'")) ):
return string[1:-1]
else:
return string
ATTR_RE = re.compile("\{@([^\}]*)=([^\}]*)}") # {@id=123}
def handleAttributes(text, parent):
"""Set values of an element based on attribute definitions ({@id=123})."""
def attributeCallback(match):
parent.set(match.group(1), match.group(2).replace('\n', ' '))
return ATTR_RE.sub(attributeCallback, text)
"""
The pattern classes
-----------------------------------------------------------------------------
"""
class Pattern:
"""Base class that inline patterns subclass. """
def __init__ (self, pattern, markdown_instance=None):
"""
Create an instant of an inline pattern.
Keyword arguments:
* pattern: A regular expression that matches a pattern
"""
self.pattern = pattern
self.compiled_re = re.compile("^(.*?)%s(.*?)$" % pattern, re.DOTALL)
# Api for Markdown to pass safe_mode into instance
self.safe_mode = False
if markdown_instance:
self.markdown = markdown_instance
def getCompiledRegExp (self):
""" Return a compiled regular expression. """
return self.compiled_re
def handleMatch(self, m):
"""Return a ElementTree element from the given match.
Subclasses should override this method.
Keyword arguments:
* m: A re match object containing a match of the pattern.
"""
pass
def type(self):
""" Return class name, to define pattern type """
return self.__class__.__name__
BasePattern = Pattern # for backward compatibility
class SimpleTextPattern (Pattern):
""" Return a simple text of group(2) of a Pattern. """
def handleMatch(self, m):
text = m.group(2)
if text == markdown.INLINE_PLACEHOLDER_PREFIX:
return None
return text
class SimpleTagPattern (Pattern):
"""
Return element of type `tag` with a text attribute of group(3)
of a Pattern.
"""
def __init__ (self, pattern, tag):
Pattern.__init__(self, pattern)
self.tag = tag
def handleMatch(self, m):
el = markdown.etree.Element(self.tag)
el.text = m.group(3)
return el
class SubstituteTagPattern (SimpleTagPattern):
""" Return a eLement of type `tag` with no children. """
def handleMatch (self, m):
return markdown.etree.Element(self.tag)
class BacktickPattern (Pattern):
""" Return a `<code>` element containing the matching text. """
def __init__ (self, pattern):
Pattern.__init__(self, pattern)
self.tag = "code"
def handleMatch(self, m):
el = markdown.etree.Element(self.tag)
el.text = markdown.AtomicString(m.group(3).strip())
return el
class DoubleTagPattern (SimpleTagPattern):
"""Return a ElementTree element nested in tag2 nested in tag1.
Useful for strong emphasis etc.
"""
def handleMatch(self, m):
tag1, tag2 = self.tag.split(",")
el1 = markdown.etree.Element(tag1)
el2 = markdown.etree.SubElement(el1, tag2)
el2.text = m.group(3)
return el1
class HtmlPattern (Pattern):
""" Store raw inline html and return a placeholder. """
def handleMatch (self, m):
rawhtml = m.group(2)
inline = True
place_holder = self.markdown.htmlStash.store(rawhtml)
return place_holder
class LinkPattern (Pattern):
""" Return a link element from the given match. """
def handleMatch(self, m):
el = markdown.etree.Element("a")
el.text = m.group(2)
title = m.group(11)
href = m.group(9)
if href:
if href[0] == "<":
href = href[1:-1]
el.set("href", self.sanitize_url(href.strip()))
else:
el.set("href", "")
if title:
title = dequote(title) #.replace('"', """)
el.set("title", title)
return el
def sanitize_url(self, url):
"""
Sanitize a url against xss attacks in "safe_mode".
Rather than specifically blacklisting `javascript:alert("XSS")` and all
its aliases (see <http://ha.ckers.org/xss.html>), we whitelist known
safe url formats. Most urls contain a network location, however some
are known not to (i.e.: mailto links). Script urls do not contain a
location. Additionally, for `javascript:...`, the scheme would be
"javascript" but some aliases will appear to `urlparse()` to have no
scheme. On top of that relative links (i.e.: "foo/bar.html") have no
scheme. Therefore we must check "path", "parameters", "query" and
"fragment" for any literal colons. We don't check "scheme" for colons
because it *should* never have any and "netloc" must allow the form:
`username:password@host:port`.
"""
locless_schemes = ['', 'mailto', 'news']
scheme, netloc, path, params, query, fragment = url = urlparse(url)
safe_url = False
if netloc != '' or scheme in locless_schemes:
safe_url = True
for part in url[2:]:
if ":" in part:
safe_url = False
if self.markdown.safeMode and not safe_url:
return ''
else:
return urlunparse(url)
class ImagePattern(LinkPattern):
""" Return a img element from the given match. """
def handleMatch(self, m):
el = markdown.etree.Element("img")
src_parts = m.group(9).split()
if src_parts:
src = src_parts[0]
if src[0] == "<" and src[-1] == ">":
src = src[1:-1]
el.set('src', self.sanitize_url(src))
else:
el.set('src', "")
if len(src_parts) > 1:
el.set('title', dequote(" ".join(src_parts[1:])))
if markdown.ENABLE_ATTRIBUTES:
truealt = handleAttributes(m.group(2), el)
else:
truealt = m.group(2)
el.set('alt', truealt)
return el
class ReferencePattern(LinkPattern):
""" Match to a stored reference and return link element. """
def handleMatch(self, m):
if m.group(9):
id = m.group(9).lower()
else:
# if we got something like "[Google][]"
# we'll use "google" as the id
id = m.group(2).lower()
if not id in self.markdown.references: # ignore undefined refs
return None
href, title = self.markdown.references[id]
text = m.group(2)
return self.makeTag(href, title, text)
def makeTag(self, href, title, text):
el = markdown.etree.Element('a')
el.set('href', self.sanitize_url(href))
if title:
el.set('title', title)
el.text = text
return el
class ImageReferencePattern (ReferencePattern):
""" Match to a stored reference and return img element. """
def makeTag(self, href, title, text):
el = markdown.etree.Element("img")
el.set("src", self.sanitize_url(href))
if title:
el.set("title", title)
el.set("alt", text)
return el
class AutolinkPattern (Pattern):
""" Return a link Element given an autolink (`<http://example/com>`). """
def handleMatch(self, m):
el = markdown.etree.Element("a")
el.set('href', m.group(2))
el.text = markdown.AtomicString(m.group(2))
return el
class AutomailPattern (Pattern):
"""
Return a mailto link Element given an automail link (`<foo@example.com>`).
"""
def handleMatch(self, m):
el = markdown.etree.Element('a')
email = m.group(2)
if email.startswith("mailto:"):
email = email[len("mailto:"):]
def codepoint2name(code):
"""Return entity definition by code, or the code if not defined."""
entity = htmlentitydefs.codepoint2name.get(code)
if entity:
return "%s%s;" % (markdown.AMP_SUBSTITUTE, entity)
else:
return "%s#%d;" % (markdown.AMP_SUBSTITUTE, code)
letters = [codepoint2name(ord(letter)) for letter in email]
el.text = markdown.AtomicString(''.join(letters))
mailto = "mailto:" + email
mailto = "".join([markdown.AMP_SUBSTITUTE + '#%d;' %
ord(letter) for letter in mailto])
el.set('href', mailto)
return el
|
mavenlin/tensorflow
|
refs/heads/master
|
tensorflow/contrib/keras/python/keras/utils/layer_utils.py
|
9
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities related to Keras layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.keras.python.keras import backend as K
from tensorflow.contrib.keras.python.keras.utils.conv_utils import convert_kernel
def print_summary(model, line_length=None, positions=None, print_fn=None):
"""Prints a summary of a model.
Arguments:
model: Keras model instance.
line_length: Total length of printed lines
(e.g. set this to adapt the display to different
terminal window sizes).
positions: Relative or absolute positions of log elements in each line.
If not provided, defaults to `[.33, .55, .67, 1.]`.
print_fn: Print function to use (defaults to `print`).
It will be called on each line of the summary.
You can set it to a custom function
in order to capture the string summary.
"""
if print_fn is None:
print_fn = print
if model.__class__.__name__ == 'Sequential':
sequential_like = True
else:
sequential_like = True
for v in model._nodes_by_depth.values(): # pylint: disable=protected-access
if (len(v) > 1) or (len(v) == 1 and len(v[0].inbound_layers) > 1):
# If the model has multiple nodes or if the nodes have
# multiple inbound_layers, the model is no longer sequential.
sequential_like = False
break
if sequential_like:
line_length = line_length or 65
positions = positions or [.45, .85, 1.]
if positions[-1] <= 1:
positions = [int(line_length * p) for p in positions]
# header names for the different log elements
to_display = ['Layer (type)', 'Output Shape', 'Param #']
else:
line_length = line_length or 100
positions = positions or [.33, .55, .67, 1.]
if positions[-1] <= 1:
positions = [int(line_length * p) for p in positions]
# header names for the different log elements
to_display = ['Layer (type)', 'Output Shape', 'Param #', 'Connected to']
relevant_nodes = []
for v in model._nodes_by_depth.values(): # pylint: disable=protected-access
relevant_nodes += v
def print_row(fields, positions):
line = ''
for i in range(len(fields)):
if i > 0:
line = line[:-1] + ' '
line += str(fields[i])
line = line[:positions[i]]
line += ' ' * (positions[i] - len(line))
print_fn(line)
print_fn('_' * line_length)
print_row(to_display, positions)
print_fn('=' * line_length)
def print_layer_summary(layer):
try:
output_shape = layer.output_shape
except AttributeError:
output_shape = 'multiple'
name = layer.name
cls_name = layer.__class__.__name__
fields = [name + ' (' + cls_name + ')', output_shape, layer.count_params()]
print_row(fields, positions)
def print_layer_summary_with_connections(layer):
"""Prints a summary for a single layer.
Arguments:
layer: target layer.
"""
try:
output_shape = layer.output_shape
except AttributeError:
output_shape = 'multiple'
connections = []
for node in layer.inbound_nodes:
if relevant_nodes and node not in relevant_nodes:
# node is not part of the current network
continue
for i in range(len(node.inbound_layers)):
inbound_layer = node.inbound_layers[i].name
inbound_node_index = node.node_indices[i]
inbound_tensor_index = node.tensor_indices[i]
connections.append(inbound_layer + '[' + str(inbound_node_index) + ']['
+ str(inbound_tensor_index) + ']')
name = layer.name
cls_name = layer.__class__.__name__
if not connections:
first_connection = ''
else:
first_connection = connections[0]
fields = [
name + ' (' + cls_name + ')', output_shape,
layer.count_params(), first_connection
]
print_row(fields, positions)
if len(connections) > 1:
for i in range(1, len(connections)):
fields = ['', '', '', connections[i]]
print_row(fields, positions)
layers = model.layers
for i in range(len(layers)):
if sequential_like:
print_layer_summary(layers[i])
else:
print_layer_summary_with_connections(layers[i])
if i == len(layers) - 1:
print_fn('=' * line_length)
else:
print_fn('_' * line_length)
trainable_count = int(
np.sum([K.count_params(p) for p in set(model.trainable_weights)]))
non_trainable_count = int(
np.sum([K.count_params(p) for p in set(model.non_trainable_weights)]))
print_fn('Total params: {:,}'.format(trainable_count + non_trainable_count))
print_fn('Trainable params: {:,}'.format(trainable_count))
print_fn('Non-trainable params: {:,}'.format(non_trainable_count))
print_fn('_' * line_length)
def convert_all_kernels_in_model(model):
"""Converts all convolution kernels in a model from Theano to TensorFlow.
Also works from TensorFlow to Theano.
Arguments:
model: target model for the conversion.
"""
# Note: SeparableConvolution not included
# since only supported by TF.
conv_classes = {
'Conv1D',
'Conv2D',
'Conv3D',
'Conv2DTranspose',
}
to_assign = []
for layer in model.layers:
if layer.__class__.__name__ in conv_classes:
original_kernel = K.get_value(layer.kernel)
converted_kernel = convert_kernel(original_kernel)
to_assign.append((layer.kernel, converted_kernel))
K.batch_set_value(to_assign)
def convert_dense_weights_data_format(dense,
previous_feature_map_shape,
target_data_format='channels_first'):
"""Utility useful when changing a convnet's `data_format`.
When porting the weights of a convnet from one data format to the other,
if the convnet includes a `Flatten` layer
(applied to the last convolutional feature map)
followed by a `Dense` layer, the weights of that `Dense` layer
should be updated to reflect the new dimension ordering.
Arguments:
dense: The target `Dense` layer.
previous_feature_map_shape: A shape tuple of 3 integers,
e.g. `(512, 7, 7)`. The shape of the convolutional
feature map right before the `Flatten` layer that
came before the target `Dense` layer.
target_data_format: One of "channels_last", "channels_first".
Set it "channels_last"
if converting a "channels_first" model to "channels_last",
or reciprocally.
"""
assert target_data_format in {'channels_last', 'channels_first'}
kernel, bias = dense.get_weights()
for i in range(kernel.shape[1]):
if target_data_format == 'channels_first':
c, h, w = previous_feature_map_shape
original_fm_shape = (h, w, c)
ki = kernel[:, i].reshape(original_fm_shape)
ki = np.transpose(ki, (2, 0, 1)) # last -> first
else:
h, w, c = previous_feature_map_shape
original_fm_shape = (c, h, w)
ki = kernel[:, i].reshape(original_fm_shape)
ki = np.transpose(ki, (1, 2, 0)) # first -> last
kernel[:, i] = np.reshape(ki, (np.prod(previous_feature_map_shape),))
dense.set_weights([kernel, bias])
|
wlamond/scikit-learn
|
refs/heads/master
|
sklearn/metrics/pairwise.py
|
28
|
# -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Philippe Gervais <philippe.gervais@inria.fr>
# Lars Buitinck
# Joel Nothman <joel.nothman@gmail.com>
# License: BSD 3 clause
import itertools
from functools import partial
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y, precomputed=False, dtype=None):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats (or dtype if provided). Finally, the function
checks that the size of the second dimension of the two arrays is equal, or
the equivalent check for a precomputed distance matrix.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
precomputed : bool
True if X is to be treated as precomputed distances to the samples in
Y.
dtype : string, type, list of types or None (default=None)
Data type required for X and Y. If None, the dtype will be an
appropriate float type selected by _return_float_dtype.
.. versionadded:: 0.18
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype_float = _return_float_dtype(X, Y)
warn_on_dtype = dtype is not None
estimator = 'check_pairwise_arrays'
if dtype is None:
dtype = dtype_float
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype,
warn_on_dtype=warn_on_dtype, estimator=estimator)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype,
warn_on_dtype=warn_on_dtype, estimator=estimator)
Y = check_array(Y, accept_sparse='csr', dtype=dtype,
warn_on_dtype=warn_on_dtype, estimator=estimator)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError("Precomputed metric requires shape "
"(n_queries, n_indexed). Got (%d, %d) "
"for %d indexed." %
(X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False,
X_norm_squared=None):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if one argument varies but the other remains unchanged, then
`dot(x, x)` and/or `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
X_norm_squared : array-like, shape = [n_samples_1], optional
Pre-computed dot-products of vectors in X (e.g.,
``(X**2).sum(axis=1)``)
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
X, Y = check_pairwise_arrays(X, Y)
if X_norm_squared is not None:
XX = check_array(X_norm_squared)
if XX.shape == (1, X.shape[0]):
XX = XX.T
elif XX.shape != (X.shape[0], 1):
raise ValueError(
"Incompatible dimensions for X and X_norm_squared")
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
YY = XX.T
elif Y_norm_squared is not None:
YY = np.atleast_2d(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances([[3]], [[3]])#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances([[3]], [[2]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[2]], [[3]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
np.clip(S, 0, 2, out=S)
if X is Y or Y is None:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
S[np.diag_indices_from(S)] = 0.0
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
degree : int, default 3
gamma : float, default None
if None, defaults to 1.0 / n_features
coef0 : int, default 1
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
gamma : float, default None
If None, defaults to 1.0 / n_features
coef0 : int, default 1
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default None
If None, defaults to 1.0 / n_features
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def laplacian_kernel(X, Y=None, gamma=None):
"""Compute the laplacian kernel between X and Y.
The laplacian kernel is defined as::
K(x, y) = exp(-gamma ||x-y||_1)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <laplacian_kernel>`.
.. versionadded:: 0.17
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default None
If None, defaults to 1.0 / n_features
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = -gamma * manhattan_distances(X, Y)
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : ndarray or sparse array, shape: (n_samples_X, n_features)
Input data.
Y : ndarray or sparse array, shape: (n_samples_Y, n_features)
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : boolean (optional), default True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
.. versionadded:: 0.17
parameter ``dense_output`` for dense output.
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances,
'precomputed': None, # HACK: precomputed is always allowed, never called
}
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
dtype = bool if metric in PAIRWISE_BOOLEAN_FUNCTIONS else None
X, Y = check_pairwise_arrays(X, Y, dtype=dtype)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# These distances recquire boolean arrays, when using scipy.spatial.distance
PAIRWISE_BOOLEAN_FUNCTIONS = [
'dice',
'jaccard',
'kulsinski',
'matching',
'rogerstanimoto',
'russellrao',
'sokalmichener',
'sokalsneath',
'yule',
]
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'laplacian': laplacian_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'laplacian' sklearn.pairwise.laplacian_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"laplacian": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params : boolean
Whether to filter invalid parameters or not.
**kwds : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
# import GPKernel locally to prevent circular imports
from ..gaussian_process.kernels import Kernel as GPKernel
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif isinstance(metric, GPKernel):
func = metric.__call__
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
|
simplegeo/clusto-sgext
|
refs/heads/pending
|
setup.py
|
1
|
from setuptools import setup, find_packages
__version__ = file('VERSION', 'r').read().strip('\r\n\t ')
setup(name='clusto-sgext',
version=__version__,
packages=find_packages(),
install_requires=[
'IPy',
'PyYAML',
'boto',
'clusto',
'decorator',
'eventlet',
'kombu',
'ostrich',
],
entry_points={
'console_scripts': [
'clusto-puppet-node2 = sgext.commands.puppet_node2:main',
'clusto-barker-consumer = sgext.commands.barker_consumer:main',
'clusto-ec2-report = sgext.commands.ec2_report:main',
'clusto-aws-cleanup = sgext.commands.aws_cleanup:main',
'clusto-elb = sgext.commands.elb:main',
'clusto-apt = sgext.commands.apt:main',
'clusto-chef = sgext.commands.chef:main',
]
})
|
menren/openshift-ansible
|
refs/heads/master
|
bin/openshift_ansible/utils.py
|
45
|
#!/usr/bin/env python
# vim: expandtab:tabstop=4:shiftwidth=4
''' The purpose of this module is to contain small utility functions.
'''
import re
def normalize_dnsname(name, padding=10):
''' The purpose of this function is to return a dns name with zero padding,
so that it sorts properly (as a human would expect).
Example: name=ex-lrg-node10.prod.rhcloud.com
Returns: ex-lrg-node0000000010.prod.rhcloud.com
Example Usage:
sorted(['a3.example.com', 'a10.example.com', 'a1.example.com'],
key=normalize_dnsname)
Returns: ['a1.example.com', 'a3.example.com', 'a10.example.com']
'''
parts = re.split(r'(\d+)', name)
retval = []
for part in parts:
if re.match(r'^\d+$', part):
retval.append(part.zfill(padding))
else:
retval.append(part)
return ''.join(retval)
|
drawks/ansible
|
refs/heads/devel
|
test/integration/targets/inventory_cloudscale/filter_plugins/group_name.py
|
24
|
from ansible.inventory.group import to_safe_group_name
def safe_group_name(name):
return to_safe_group_name(name)
class FilterModule(object):
filter_map = {
'safe_group_name': safe_group_name
}
def filters(self):
return self.filter_map
|
pridemusvaire/yowsup
|
refs/heads/master
|
yowsup/env/test_env_s40.py
|
64
|
import unittest
from yowsup.env import S40YowsupEnv
class S40YowsupEnvTest(unittest.TestCase):
def test_tokengen(self):
phone = "1234567"
S40YowsupEnv._TOKEN_STRING = "PdA2DJyKoUrwLw1Bg6EIhzh502dF9noR9uFCllGk1425519315543{phone}"
env = S40YowsupEnv()
token = env.getToken(phone)
self.assertEqual(token, 'e84e1f1477704159efd46f6f0781dbde')
|
Diegojnb/JdeRobot
|
refs/heads/master
|
src/tools/scratch2jderobot/src/tests/test_kurt_write.py
|
2
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import kurt
import os
# get current working directory
path = os.getcwd()
path = path[:path.rfind('src')] + 'data/'
# load the scratch project
p = kurt.Project.load(path + 'test_scratch2.sb2')
# show the blocks included
for scriptable in p.sprites + [p.stage]:
for script in scriptable.scripts:
s = script
# change number of steps for the move block
s.blocks[1].args = [20]
# save modifications
p.save(path + 'test_scratch2_modified.sb2')
|
martinlunde/RealBack
|
refs/heads/dev
|
realback_api/migrations/__init__.py
|
12133432
| |
automl/HPOlib2
|
refs/heads/master
|
hpolib/container/benchmarks/__init__.py
|
12133432
| |
ccn-2m/django
|
refs/heads/master
|
tests/modeltests/delete/__init__.py
|
12133432
| |
xrmx/django
|
refs/heads/master
|
tests/view_tests/app0/__init__.py
|
12133432
| |
fredkingham/blog-of-fred
|
refs/heads/master
|
registration/tests/default_backend.py
|
1
|
import datetime
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.core import mail
from django.core.urlresolvers import reverse
from django.test import TestCase
from registration import signals
from registration.admin import RegistrationAdmin
from registration.forms import RegistrationForm
from registration.backends.default.views import RegistrationView
from registration.models import RegistrationProfile
class DefaultBackendViewTests(TestCase):
"""
Test the default registration backend.
Running these tests successfully will require two templates to be
created for the sending of activation emails; details on these
templates and their contexts may be found in the documentation for
the default backend.
"""
# urls = 'registration.backends.default.urls'
def setUp(self):
"""
Create an instance of the default backend for use in testing,
and set ``ACCOUNT_ACTIVATION_DAYS`` if it's not set already.
"""
self.old_activation = getattr(settings, 'ACCOUNT_ACTIVATION_DAYS', None)
if self.old_activation is None:
settings.ACCOUNT_ACTIVATION_DAYS = 7 # pragma: no cover
def tearDown(self):
"""
Yank ``ACCOUNT_ACTIVATION_DAYS`` back out if it wasn't
originally set.
"""
if self.old_activation is None:
settings.ACCOUNT_ACTIVATION_DAYS = self.old_activation # pragma: no cover
def test_allow(self):
"""
The setting ``REGISTRATION_OPEN`` appropriately controls
whether registration is permitted.
"""
old_allowed = getattr(settings, 'REGISTRATION_OPEN', True)
settings.REGISTRATION_OPEN = True
resp = self.client.get(reverse('registration_register'))
self.assertEqual(200, resp.status_code)
settings.REGISTRATION_OPEN = False
# Now all attempts to hit the register view should redirect to
# the 'registration is closed' message.
resp = self.client.get(reverse('registration_register'))
self.assertRedirects(resp, reverse('registration_disallowed'))
resp = self.client.post(reverse('registration_register'),
data={'username': 'bob',
'email': 'bob@example.com',
'password1': 'secret',
'password2': 'secret'})
self.assertRedirects(resp, reverse('registration_disallowed'))
settings.REGISTRATION_OPEN = old_allowed
def test_registration_get(self):
"""
HTTP ``GET`` to the registration view uses the appropriate
template and populates a registration form into the context.
"""
resp = self.client.get(reverse('registration_register'))
self.assertEqual(200, resp.status_code)
self.assertTemplateUsed(resp,
'registration/registration_form.html')
self.failUnless(isinstance(resp.context['form'],
RegistrationForm))
def test_registration(self):
"""
Registration creates a new inactive account and a new profile
with activation key, populates the correct account data and
sends an activation email.
"""
resp = self.client.post(reverse('registration_register'),
data={'username': 'bob',
'email': 'bob@example.com',
'password1': 'secret',
'password2': 'secret'})
self.assertRedirects(resp, reverse('registration_complete'))
new_user = User.objects.get(username='bob')
self.failUnless(new_user.check_password('secret'))
self.assertEqual(new_user.email, 'bob@example.com')
# New user must not be active.
self.failIf(new_user.is_active)
# A registration profile was created, and an activation email
# was sent.
self.assertEqual(RegistrationProfile.objects.count(), 1)
self.assertEqual(len(mail.outbox), 1)
def test_registration_no_sites(self):
"""
Registration still functions properly when
``django.contrib.sites`` is not installed; the fallback will
be a ``RequestSite`` instance.
"""
Site._meta.installed = False
resp = self.client.post(reverse('registration_register'),
data={'username': 'bob',
'email': 'bob@example.com',
'password1': 'secret',
'password2': 'secret'})
self.assertEqual(302, resp.status_code)
new_user = User.objects.get(username='bob')
self.failUnless(new_user.check_password('secret'))
self.assertEqual(new_user.email, 'bob@example.com')
self.failIf(new_user.is_active)
self.assertEqual(RegistrationProfile.objects.count(), 1)
self.assertEqual(len(mail.outbox), 1)
Site._meta.installed = True
def test_registration_failure(self):
"""
Registering with invalid data fails.
"""
resp = self.client.post(reverse('registration_register'),
data={'username': 'bob',
'email': 'bob@example.com',
'password1': 'secret',
'password2': 'notsecret'})
self.assertEqual(200, resp.status_code)
self.failIf(resp.context['form'].is_valid())
self.assertEqual(0, len(mail.outbox))
def test_activation(self):
"""
Activation of an account functions properly.
"""
resp = self.client.post(reverse('registration_register'),
data={'username': 'bob',
'email': 'bob@example.com',
'password1': 'secret',
'password2': 'secret'})
profile = RegistrationProfile.objects.get(user__username='bob')
resp = self.client.get(reverse('registration_activate',
args=(),
kwargs={'activation_key': profile.activation_key}))
self.assertRedirects(resp, reverse('registration_activation_complete'))
def test_activation_expired(self):
"""
An expired account can't be activated.
"""
resp = self.client.post(reverse('registration_register'),
data={'username': 'bob',
'email': 'bob@example.com',
'password1': 'secret',
'password2': 'secret'})
profile = RegistrationProfile.objects.get(user__username='bob')
user = profile.user
user.date_joined -= datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS)
user.save()
resp = self.client.get(reverse('registration_activate',
args=(),
kwargs={'activation_key': profile.activation_key}))
self.assertEqual(200, resp.status_code)
self.assertTemplateUsed(resp, 'registration/activate.html')
self.failIf('activation_key' in resp.context)
|
JFriel/honours_project
|
refs/heads/master
|
networkx/networkx/algorithms/assortativity/tests/test_neighbor_degree.py
|
99
|
#!/usr/bin/env python
from nose.tools import *
import networkx as nx
class TestAverageNeighbor(object):
def test_degree_p4(self):
G=nx.path_graph(4)
answer={0:2,1:1.5,2:1.5,3:2}
nd = nx.average_neighbor_degree(G)
assert_equal(nd,answer)
D=G.to_directed()
nd = nx.average_neighbor_degree(D)
assert_equal(nd,answer)
D=G.to_directed()
nd = nx.average_neighbor_degree(D)
assert_equal(nd,answer)
D=G.to_directed()
nd = nx.average_neighbor_degree(D, source='in', target='in')
assert_equal(nd,answer)
def test_degree_p4_weighted(self):
G=nx.path_graph(4)
G[1][2]['weight']=4
answer={0:2,1:1.8,2:1.8,3:2}
nd = nx.average_neighbor_degree(G,weight='weight')
assert_equal(nd,answer)
D=G.to_directed()
nd = nx.average_neighbor_degree(D,weight='weight')
assert_equal(nd,answer)
D=G.to_directed()
nd = nx.average_neighbor_degree(D,weight='weight')
assert_equal(nd,answer)
nd = nx.average_neighbor_degree(D,source='out',target='out',
weight='weight')
assert_equal(nd,answer)
D=G.to_directed()
nd = nx.average_neighbor_degree(D,source='in',target='in',
weight='weight')
assert_equal(nd,answer)
def test_degree_k4(self):
G=nx.complete_graph(4)
answer={0:3,1:3,2:3,3:3}
nd = nx.average_neighbor_degree(G)
assert_equal(nd,answer)
D=G.to_directed()
nd = nx.average_neighbor_degree(D)
assert_equal(nd,answer)
D=G.to_directed()
nd = nx.average_neighbor_degree(D)
assert_equal(nd,answer)
D=G.to_directed()
nd = nx.average_neighbor_degree(D,source='in',target='in')
assert_equal(nd,answer)
def test_degree_k4_nodes(self):
G=nx.complete_graph(4)
answer={1:3.0,2:3.0}
nd = nx.average_neighbor_degree(G,nodes=[1,2])
assert_equal(nd,answer)
def test_degree_barrat(self):
G=nx.star_graph(5)
G.add_edges_from([(5,6),(5,7),(5,8),(5,9)])
G[0][5]['weight']=5
nd = nx.average_neighbor_degree(G)[5]
assert_equal(nd,1.8)
nd = nx.average_neighbor_degree(G,weight='weight')[5]
assert_almost_equal(nd,3.222222,places=5)
|
GaetanCambier/CouchPotatoServer
|
refs/heads/develop
|
couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/izlesene.py
|
26
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
float_or_none,
get_element_by_id,
int_or_none,
parse_iso8601,
str_to_int,
)
class IzleseneIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://(?:(?:www|m)\.)?izlesene\.com/
(?:video|embedplayer)/(?:[^/]+/)?(?P<id>[0-9]+)
'''
_TESTS = [
{
'url': 'http://www.izlesene.com/video/sevincten-cildirtan-dogum-gunu-hediyesi/7599694',
'md5': '4384f9f0ea65086734b881085ee05ac2',
'info_dict': {
'id': '7599694',
'ext': 'mp4',
'title': 'Sevinçten Çıldırtan Doğum Günü Hediyesi',
'description': 'md5:253753e2655dde93f59f74b572454f6d',
'thumbnail': 're:^http://.*\.jpg',
'uploader_id': 'pelikzzle',
'timestamp': 1404302298,
'upload_date': '20140702',
'duration': 95.395,
'age_limit': 0,
}
},
{
'url': 'http://www.izlesene.com/video/tarkan-dortmund-2006-konseri/17997',
'md5': '97f09b6872bffa284cb7fa4f6910cb72',
'info_dict': {
'id': '17997',
'ext': 'mp4',
'title': 'Tarkan Dortmund 2006 Konseri',
'description': 'Tarkan Dortmund 2006 Konseri',
'thumbnail': 're:^http://.*\.jpg',
'uploader_id': 'parlayankiz',
'timestamp': 1163322193,
'upload_date': '20061112',
'duration': 253.666,
'age_limit': 0,
}
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
url = 'http://www.izlesene.com/video/%s' % video_id
webpage = self._download_webpage(url, video_id)
title = self._og_search_title(webpage)
description = self._og_search_description(webpage)
thumbnail = self._proto_relative_url(
self._og_search_thumbnail(webpage), scheme='http:')
uploader = self._html_search_regex(
r"adduserUsername\s*=\s*'([^']+)';",
webpage, 'uploader', fatal=False, default='')
timestamp = parse_iso8601(self._html_search_meta(
'uploadDate', webpage, 'upload date', fatal=False))
duration = float_or_none(self._html_search_regex(
r'"videoduration"\s*:\s*"([^"]+)"',
webpage, 'duration', fatal=False), scale=1000)
view_count = str_to_int(get_element_by_id('videoViewCount', webpage))
comment_count = self._html_search_regex(
r'comment_count\s*=\s*\'([^\']+)\';',
webpage, 'comment_count', fatal=False)
family_friendly = self._html_search_meta(
'isFamilyFriendly', webpage, 'age limit', fatal=False)
content_url = self._html_search_meta(
'contentURL', webpage, 'content URL', fatal=False)
ext = determine_ext(content_url, 'mp4')
# Might be empty for some videos.
streams = self._html_search_regex(
r'"qualitylevel"\s*:\s*"([^"]+)"',
webpage, 'streams', fatal=False, default='')
formats = []
if streams:
for stream in streams.split('|'):
quality, url = re.search(r'\[(\w+)\](.+)', stream).groups()
formats.append({
'format_id': '%sp' % quality if quality else 'sd',
'url': url,
'ext': ext,
})
else:
stream_url = self._search_regex(
r'"streamurl"\s?:\s?"([^"]+)"', webpage, 'stream URL')
formats.append({
'format_id': 'sd',
'url': stream_url,
'ext': ext,
})
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'uploader_id': uploader,
'timestamp': timestamp,
'duration': duration,
'view_count': int_or_none(view_count),
'comment_count': int_or_none(comment_count),
'age_limit': 18 if family_friendly == 'False' else 0,
'formats': formats,
}
|
jhol/libsigrokdecode
|
refs/heads/public
|
decoders/can/__init__.py
|
2
|
##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2012 Uwe Hermann <uwe@hermann-uwe.de>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
##
'''
CAN protocol decoder.
TODO: Description.
Details:
TODO
Protocol output format:
TODO.
'''
from .pd import *
|
antljones/physics
|
refs/heads/master
|
velocity_and_distance_from_rest.py
|
1
|
import sys
import argparse
import math
heavenly_body = {'mercury' : 3.7,
'venus' : 8.87,
'earth' : 9.8,
'mars' : 3.71,
'jupiter' : 24.79,
'saturn' : 10.44,
'uranus' : 8.87,
'neptune' : 11.15,
'moon' : 1.624
}
#Set the argument parser and command line arguments
parser = argparse.ArgumentParser(description="Produce ")
parser.add_argument('-t', dest='max_time', action='store', type=int, help='the amount of time given in seconds')
parser.add_argument('-s', dest='sample_time', action='store', type=float, help='the amount of time to sample in seconds')
parser.add_argument('-b', dest='body', action='store', help='heavenly body')
args = parser.parse_args()
#Check and act on the arguments
if args.max_time == None or args.max_time == 0 or args.body == None or len(args.body) == 0 or args.sample_time == None or args.sample_time == 0:
print("Please give values for max and sample time above zero and the name of the heavenly body")
sys.exit()
else:
current_time = 0
print( args.max_time)
while current_time <= args.max_time:
print "Time:",current_time,"sec"
print "Velocity:",heavenly_body[args.body]*current_time,"m/s/s", " Distance:",0.5*(heavenly_body[args.body]*math.pow(current_time,2)),"m"
current_time = current_time + args.sample_time
sys.exit("Complete")
|
wagtail/wagtail
|
refs/heads/stable/2.13.x
|
wagtail/contrib/simple_translation/tests/test_forms.py
|
4
|
from django.forms import CheckboxInput, HiddenInput
from django.test import TestCase, override_settings
from wagtail.contrib.simple_translation.forms import SubmitTranslationForm
from wagtail.core.models import Locale, Page
from wagtail.tests.i18n.models import TestPage
from wagtail.tests.utils import WagtailTestUtils
@override_settings(
LANGUAGES=[
("en", "English"),
("fr", "French"),
("de", "German"),
],
WAGTAIL_CONTENT_LANGUAGES=[
("en", "English"),
("fr", "French"),
("de", "German"),
],
)
class TestSubmitPageTranslation(WagtailTestUtils, TestCase):
def setUp(self):
self.en_locale = Locale.objects.first()
self.fr_locale = Locale.objects.create(language_code="fr")
self.de_locale = Locale.objects.create(language_code="de")
self.en_homepage = Page.objects.get(depth=2)
self.fr_homepage = self.en_homepage.copy_for_translation(self.fr_locale)
self.de_homepage = self.en_homepage.copy_for_translation(self.de_locale)
self.en_blog_index = TestPage(title="Blog", slug="blog")
self.en_homepage.add_child(instance=self.en_blog_index)
self.en_blog_post = TestPage(title="Blog post", slug="blog-post")
self.en_blog_index.add_child(instance=self.en_blog_post)
def test_include_subtree(self):
form = SubmitTranslationForm(instance=self.en_blog_post)
self.assertIsInstance(form.fields["include_subtree"].widget, HiddenInput)
form = SubmitTranslationForm(instance=self.en_blog_index)
self.assertIsInstance(form.fields["include_subtree"].widget, CheckboxInput)
self.assertEqual(
form.fields["include_subtree"].label, "Include subtree (1 page)"
)
form = SubmitTranslationForm(instance=self.en_homepage)
self.assertEqual(
form.fields["include_subtree"].label, "Include subtree (2 pages)"
)
def test_locales_queryset(self):
# Homepage is translated to all locales.
form = SubmitTranslationForm(instance=self.en_homepage)
self.assertEqual(
list(
form.fields["locales"].queryset.values_list("language_code", flat=True)
),
[],
)
# Blog index can be translated to `de` and `fr`.
form = SubmitTranslationForm(instance=self.en_blog_index)
self.assertEqual(
list(
form.fields["locales"].queryset.values_list("language_code", flat=True)
),
["de", "fr"],
)
# Blog post can be translated to `de` and `fr`.
form = SubmitTranslationForm(instance=self.en_blog_post)
self.assertEqual(
list(
form.fields["locales"].queryset.values_list("language_code", flat=True)
),
["de", "fr"],
)
def test_select_all(self):
form = SubmitTranslationForm(instance=self.en_homepage)
# Homepage is translated to all locales.
self.assertIsInstance(form.fields["select_all"].widget, HiddenInput)
form = SubmitTranslationForm(instance=self.en_blog_index)
# Blog post can be translated to `de` and `fr`.
self.assertIsInstance(form.fields["select_all"].widget, CheckboxInput)
def test_locale_disabled(self):
form = SubmitTranslationForm(instance=self.en_blog_post)
# The parent (blog_index) is translated to English.
# German and French are disabled.
self.assertEqual(
list(form.fields["locales"].widget.disabled_values),
[self.de_locale.id, self.fr_locale.id],
)
label = f"""
<label class="disabled">
<input type="checkbox" name="None" value="{self.de_locale.id}" disabled>
German
</label>
"""
self.assertInHTML(label, form.fields["locales"].widget.render(None, None))
def test_locale_help_text(self):
# German and French are disabled.
# The help_text is plural
form = SubmitTranslationForm(instance=self.en_blog_post)
help_text = f"""
Some locales are disabled because some parent pages are not translated.
<br>
<a href="/admin/translation/submit/page/{self.en_blog_index.id}/">
Translate the parent pages.
</a>
"""
self.assertHTMLEqual(form.fields["locales"].help_text, help_text)
# Add German translation
self.en_blog_index.copy_for_translation(self.de_locale)
# French is disabled.
# The help_text is singular.
form = SubmitTranslationForm(instance=self.en_blog_post)
help_text = f"""
A locale is disabled because a parent page is not translated.
<br>
<a href="/admin/translation/submit/page/{self.en_blog_index.id}/">
Translate the parent page.
</a>
"""
self.assertHTMLEqual(form.fields["locales"].help_text, help_text)
def test_hide_submit(self):
# German and French are disabled.
# There are no other pages to be translated.
# Submit is hidden.
form = SubmitTranslationForm(instance=self.en_blog_post)
self.assertFalse(form.show_submit)
# A parent is translated
self.en_blog_index.copy_for_translation(self.de_locale)
form = SubmitTranslationForm(instance=self.en_blog_post)
self.assertTrue(form.show_submit)
|
Vamshi99/coala-bears
|
refs/heads/master
|
bears/general/FilenameBear.py
|
8
|
import os.path
from coalib.bears.LocalBear import LocalBear
from coalib.results.Diff import Diff
from coalib.results.Result import Result
from coalib.bearlib.naming_conventions import (
to_camelcase, to_kebabcase, to_pascalcase, to_snakecase, to_spacecase)
class FilenameBear(LocalBear):
LANGUAGES = {'All'}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
_naming_convention = {'camel': to_camelcase,
'kebab': to_kebabcase,
'pascal': to_pascalcase,
'snake': to_snakecase,
'space': to_spacecase}
_language_naming_convention = {
'.java': 'pascal',
'.js': 'kebab',
'.py': 'snake',
}
def run(self, filename, file,
file_naming_convention: str=None,
ignore_uppercase_filenames: bool=True,
filename_prefix: str='',
filename_suffix: str=''):
"""
Checks whether the filename follows a certain naming-convention.
:param file_naming_convention:
The naming-convention. Supported values are:
- ``auto`` to guess the correct convention. Defaults to ``snake``
if the correct convention cannot be guessed.
- ``camel`` (``thisIsCamelCase``)
- ``kebab`` (``this-is-kebab-case``)
- ``pascal`` (``ThisIsPascalCase``)
- ``snake`` (``this_is_snake_case``)
- ``space`` (``This Is Space Case``)
:param ignore_uppercase_filenames:
Whether or not to ignore fully uppercase filenames completely,
e.g. COPYING, LICENSE etc.
:param filename_prefix:
Check whether the filename uses a certain prefix.
The file's extension is ignored.
:param filename_suffix:
Check whether the filename uses a certain suffix.
The file's extension is ignored.
"""
head, tail = os.path.split(filename)
filename_without_extension, extension = os.path.splitext(tail)
if file_naming_convention is None:
self.warn('Please specify a file naming convention explicitly'
' or use "auto".')
file_naming_convention = 'auto'
else:
file_naming_convention = file_naming_convention.lower()
if file_naming_convention == 'auto':
if extension in self._language_naming_convention:
file_naming_convention = self._language_naming_convention[
extension]
else:
self.warn('The file naming convention could not be guessed. '
'Using the default "snake" naming convention.')
file_naming_convention = 'snake'
messages = []
try:
new_name = self._naming_convention[file_naming_convention](
filename_without_extension)
except KeyError:
self.err('Invalid file-naming-convention provided: ' +
file_naming_convention)
return
if new_name != filename_without_extension:
messages.append(
'Filename does not follow {} naming-convention.'.format(
file_naming_convention))
if not filename_without_extension.startswith(filename_prefix):
new_name = filename_prefix + new_name
messages.append(
'Filename does not use the prefix {!r}.'.format(
filename_prefix))
if not filename_without_extension.endswith(filename_suffix):
new_name = new_name + filename_suffix
messages.append(
'Filename does not use the suffix {!r}.'.format(
filename_suffix))
if ignore_uppercase_filenames and filename_without_extension.isupper():
return
if messages:
diff = Diff(file, rename=os.path.join(head, new_name + extension))
message = ('\n'.join('- ' + mes for mes in messages)
if len(messages) > 1 else messages[0])
yield Result(
self,
message,
diff.affected_code(filename),
diffs={filename: diff})
|
Vixionar/django
|
refs/heads/master
|
tests/template_tests/filter_tests/test_timeuntil.py
|
207
|
from __future__ import unicode_literals
from datetime import datetime, timedelta
from django.template.defaultfilters import timeuntil_filter
from django.test import SimpleTestCase
from django.test.utils import requires_tz_support
from ..utils import setup
from .timezone_utils import TimezoneTestCase
class TimeuntilTests(TimezoneTestCase):
# Default compare with datetime.now()
@setup({'timeuntil01': '{{ a|timeuntil }}'})
def test_timeuntil01(self):
output = self.engine.render_to_string('timeuntil01', {'a': datetime.now() + timedelta(minutes=2, seconds=10)})
self.assertEqual(output, '2\xa0minutes')
@setup({'timeuntil02': '{{ a|timeuntil }}'})
def test_timeuntil02(self):
output = self.engine.render_to_string('timeuntil02', {'a': (datetime.now() + timedelta(days=1, seconds=10))})
self.assertEqual(output, '1\xa0day')
@setup({'timeuntil03': '{{ a|timeuntil }}'})
def test_timeuntil03(self):
output = self.engine.render_to_string('timeuntil03', {'a': (datetime.now() + timedelta(hours=8, minutes=10, seconds=10))})
self.assertEqual(output, '8\xa0hours, 10\xa0minutes')
# Compare to a given parameter
@setup({'timeuntil04': '{{ a|timeuntil:b }}'})
def test_timeuntil04(self):
output = self.engine.render_to_string(
'timeuntil04',
{'a': self.now - timedelta(days=1), 'b': self.now - timedelta(days=2)},
)
self.assertEqual(output, '1\xa0day')
@setup({'timeuntil05': '{{ a|timeuntil:b }}'})
def test_timeuntil05(self):
output = self.engine.render_to_string(
'timeuntil05',
{'a': self.now - timedelta(days=2), 'b': self.now - timedelta(days=2, minutes=1)},
)
self.assertEqual(output, '1\xa0minute')
# Regression for #7443
@setup({'timeuntil06': '{{ earlier|timeuntil }}'})
def test_timeuntil06(self):
output = self.engine.render_to_string('timeuntil06', {'earlier': self.now - timedelta(days=7)})
self.assertEqual(output, '0\xa0minutes')
@setup({'timeuntil07': '{{ earlier|timeuntil:now }}'})
def test_timeuntil07(self):
output = self.engine.render_to_string('timeuntil07', {'now': self.now, 'earlier': self.now - timedelta(days=7)})
self.assertEqual(output, '0\xa0minutes')
@setup({'timeuntil08': '{{ later|timeuntil }}'})
def test_timeuntil08(self):
output = self.engine.render_to_string('timeuntil08', {'later': self.now + timedelta(days=7, hours=1)})
self.assertEqual(output, '1\xa0week')
@setup({'timeuntil09': '{{ later|timeuntil:now }}'})
def test_timeuntil09(self):
output = self.engine.render_to_string('timeuntil09', {'now': self.now, 'later': self.now + timedelta(days=7)})
self.assertEqual(output, '1\xa0week')
# Ensures that differing timezones are calculated correctly.
@requires_tz_support
@setup({'timeuntil10': '{{ a|timeuntil }}'})
def test_timeuntil10(self):
output = self.engine.render_to_string('timeuntil10', {'a': self.now_tz})
self.assertEqual(output, '0\xa0minutes')
@requires_tz_support
@setup({'timeuntil11': '{{ a|timeuntil }}'})
def test_timeuntil11(self):
output = self.engine.render_to_string('timeuntil11', {'a': self.now_tz_i})
self.assertEqual(output, '0\xa0minutes')
@setup({'timeuntil12': '{{ a|timeuntil:b }}'})
def test_timeuntil12(self):
output = self.engine.render_to_string('timeuntil12', {'a': self.now_tz_i, 'b': self.now_tz})
self.assertEqual(output, '0\xa0minutes')
# Regression for #9065 (two date objects).
@setup({'timeuntil13': '{{ a|timeuntil:b }}'})
def test_timeuntil13(self):
output = self.engine.render_to_string('timeuntil13', {'a': self.today, 'b': self.today})
self.assertEqual(output, '0\xa0minutes')
@setup({'timeuntil14': '{{ a|timeuntil:b }}'})
def test_timeuntil14(self):
output = self.engine.render_to_string('timeuntil14', {'a': self.today, 'b': self.today - timedelta(hours=24)})
self.assertEqual(output, '1\xa0day')
class FunctionTests(SimpleTestCase):
def test_until_now(self):
self.assertEqual(timeuntil_filter(datetime.now() + timedelta(1, 1)), '1\xa0day')
def test_explicit_date(self):
self.assertEqual(timeuntil_filter(datetime(2005, 12, 30), datetime(2005, 12, 29)), '1\xa0day')
|
UnderGreen/ansible-modules-extras
|
refs/heads/devel
|
windows/win_timezone.py
|
71
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Phil Schwartz <schwartzmx@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
DOCUMENTATION = '''
---
module: win_timezone
version_added: "2.1"
short_description: Sets Windows machine timezone
description:
- Sets machine time to the specified timezone, the module will check if the provided timezone is supported on the machine.
options:
timezone:
description:
- Timezone to set to. Example Central Standard Time
required: true
default: null
aliases: []
author: Phil Schwartz
'''
EXAMPLES = '''
# Set machine's timezone to Central Standard Time
win_timezone:
timezone: "Central Standard Time"
'''
RETURN = '''# '''
|
Krossom/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/unittest/test/test_break.py
|
785
|
import gc
import io
import os
import sys
import signal
import weakref
import unittest
@unittest.skipUnless(hasattr(os, 'kill'), "Test requires os.kill")
@unittest.skipIf(sys.platform =="win32", "Test cannot run on Windows")
@unittest.skipIf(sys.platform == 'freebsd6', "Test kills regrtest on freebsd6 "
"if threads have been used")
class TestBreak(unittest.TestCase):
def setUp(self):
self._default_handler = signal.getsignal(signal.SIGINT)
def tearDown(self):
signal.signal(signal.SIGINT, self._default_handler)
unittest.signals._results = weakref.WeakKeyDictionary()
unittest.signals._interrupt_handler = None
def testInstallHandler(self):
default_handler = signal.getsignal(signal.SIGINT)
unittest.installHandler()
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
try:
pid = os.getpid()
os.kill(pid, signal.SIGINT)
except KeyboardInterrupt:
self.fail("KeyboardInterrupt not handled")
self.assertTrue(unittest.signals._interrupt_handler.called)
def testRegisterResult(self):
result = unittest.TestResult()
unittest.registerResult(result)
for ref in unittest.signals._results:
if ref is result:
break
elif ref is not result:
self.fail("odd object in result set")
else:
self.fail("result not found")
def testInterruptCaught(self):
default_handler = signal.getsignal(signal.SIGINT)
result = unittest.TestResult()
unittest.installHandler()
unittest.registerResult(result)
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
def test(result):
pid = os.getpid()
os.kill(pid, signal.SIGINT)
result.breakCaught = True
self.assertTrue(result.shouldStop)
try:
test(result)
except KeyboardInterrupt:
self.fail("KeyboardInterrupt not handled")
self.assertTrue(result.breakCaught)
def testSecondInterrupt(self):
result = unittest.TestResult()
unittest.installHandler()
unittest.registerResult(result)
def test(result):
pid = os.getpid()
os.kill(pid, signal.SIGINT)
result.breakCaught = True
self.assertTrue(result.shouldStop)
os.kill(pid, signal.SIGINT)
self.fail("Second KeyboardInterrupt not raised")
try:
test(result)
except KeyboardInterrupt:
pass
else:
self.fail("Second KeyboardInterrupt not raised")
self.assertTrue(result.breakCaught)
def testTwoResults(self):
unittest.installHandler()
result = unittest.TestResult()
unittest.registerResult(result)
new_handler = signal.getsignal(signal.SIGINT)
result2 = unittest.TestResult()
unittest.registerResult(result2)
self.assertEqual(signal.getsignal(signal.SIGINT), new_handler)
result3 = unittest.TestResult()
def test(result):
pid = os.getpid()
os.kill(pid, signal.SIGINT)
try:
test(result)
except KeyboardInterrupt:
self.fail("KeyboardInterrupt not handled")
self.assertTrue(result.shouldStop)
self.assertTrue(result2.shouldStop)
self.assertFalse(result3.shouldStop)
def testHandlerReplacedButCalled(self):
# If our handler has been replaced (is no longer installed) but is
# called by the *new* handler, then it isn't safe to delay the
# SIGINT and we should immediately delegate to the default handler
unittest.installHandler()
handler = signal.getsignal(signal.SIGINT)
def new_handler(frame, signum):
handler(frame, signum)
signal.signal(signal.SIGINT, new_handler)
try:
pid = os.getpid()
os.kill(pid, signal.SIGINT)
except KeyboardInterrupt:
pass
else:
self.fail("replaced but delegated handler doesn't raise interrupt")
def testRunner(self):
# Creating a TextTestRunner with the appropriate argument should
# register the TextTestResult it creates
runner = unittest.TextTestRunner(stream=io.StringIO())
result = runner.run(unittest.TestSuite())
self.assertIn(result, unittest.signals._results)
def testWeakReferences(self):
# Calling registerResult on a result should not keep it alive
result = unittest.TestResult()
unittest.registerResult(result)
ref = weakref.ref(result)
del result
# For non-reference counting implementations
gc.collect();gc.collect()
self.assertIsNone(ref())
def testRemoveResult(self):
result = unittest.TestResult()
unittest.registerResult(result)
unittest.installHandler()
self.assertTrue(unittest.removeResult(result))
# Should this raise an error instead?
self.assertFalse(unittest.removeResult(unittest.TestResult()))
try:
pid = os.getpid()
os.kill(pid, signal.SIGINT)
except KeyboardInterrupt:
pass
self.assertFalse(result.shouldStop)
def testMainInstallsHandler(self):
failfast = object()
test = object()
verbosity = object()
result = object()
default_handler = signal.getsignal(signal.SIGINT)
class FakeRunner(object):
initArgs = []
runArgs = []
def __init__(self, *args, **kwargs):
self.initArgs.append((args, kwargs))
def run(self, test):
self.runArgs.append(test)
return result
class Program(unittest.TestProgram):
def __init__(self, catchbreak):
self.exit = False
self.verbosity = verbosity
self.failfast = failfast
self.catchbreak = catchbreak
self.testRunner = FakeRunner
self.test = test
self.result = None
p = Program(False)
p.runTests()
self.assertEqual(FakeRunner.initArgs, [((), {'buffer': None,
'verbosity': verbosity,
'failfast': failfast,
'warnings': None})])
self.assertEqual(FakeRunner.runArgs, [test])
self.assertEqual(p.result, result)
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
FakeRunner.initArgs = []
FakeRunner.runArgs = []
p = Program(True)
p.runTests()
self.assertEqual(FakeRunner.initArgs, [((), {'buffer': None,
'verbosity': verbosity,
'failfast': failfast,
'warnings': None})])
self.assertEqual(FakeRunner.runArgs, [test])
self.assertEqual(p.result, result)
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
def testRemoveHandler(self):
default_handler = signal.getsignal(signal.SIGINT)
unittest.installHandler()
unittest.removeHandler()
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
# check that calling removeHandler multiple times has no ill-effect
unittest.removeHandler()
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
def testRemoveHandlerAsDecorator(self):
default_handler = signal.getsignal(signal.SIGINT)
unittest.installHandler()
@unittest.removeHandler
def test():
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
test()
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
|
gh0std4ncer/doit
|
refs/heads/master
|
doc/tutorial/doit_config.py
|
10
|
DOIT_CONFIG = {'default_tasks': ['my_task_1', 'my_task_2'],
'continue': True,
'reporter': 'json'}
|
lsinfo/odoo
|
refs/heads/8.0
|
addons/l10n_be_intrastat/__openerp__.py
|
257
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Odoo, Open Source Business Applications
# Copyright (C) 2014-2015 Odoo S.A. <http://www.odoo.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Belgian Intrastat Declaration',
'version': '1.0',
'category': 'Reporting',
'description': """
Generates Intrastat XML report for declaration
Based on invoices.
""",
'author': 'Odoo SA',
'depends': ['report_intrastat', 'sale_stock', 'account_accountant', 'l10n_be'],
'data': [
'data/regions.xml',
'data/report.intrastat.code.csv',
'data/transaction.codes.xml',
'data/transport.modes.xml',
'security/groups.xml',
'security/ir.model.access.csv',
'l10n_be_intrastat.xml',
'wizard/l10n_be_intrastat_xml_view.xml',
],
'installable': True,
}
|
defance/edx-platform
|
refs/heads/master
|
common/lib/xmodule/xmodule/modulestore/mongo/__init__.py
|
268
|
"""
Provide names as exported by older mongo.py module
"""
from xmodule.modulestore.mongo.base import MongoModuleStore, MongoKeyValueStore
# Backwards compatibility for prod systems that refererence
# xmodule.modulestore.mongo.DraftMongoModuleStore
from xmodule.modulestore.mongo.draft import DraftModuleStore as DraftMongoModuleStore
|
pjdelport/pip
|
refs/heads/develop
|
pip/commands/unzip.py
|
122
|
from __future__ import absolute_import
from pip.commands.zip import ZipCommand
class UnzipCommand(ZipCommand):
"""Unzip individual packages."""
name = 'unzip'
summary = 'DEPRECATED. Unzip individual packages.'
|
jhh/puka
|
refs/heads/main
|
bookmarks/admin.py
|
1
|
from django.contrib import admin
from .models import Bookmark
@admin.register(Bookmark)
class BookmarkAdmin(admin.ModelAdmin):
list_display = (
"id",
"title",
"url",
"tags",
"created_at",
)
list_filter = ("created_at",)
date_hierarchy = "created_at"
exclude = ("title_description_search",)
|
oVirt/ovirt-node-ng
|
refs/heads/master
|
src/nodectl/banner.py
|
1
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# nodectl
#
# Copyright (C) 2016 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author(s): Ryan Barry <rbarry@redhat.com>
#
import json
import re
import subprocess
issue_tmpl = r'''\S
Kernel \r on an \m
'''
class Banner(object):
"""Fetches IP addresses and adds them to motd (on SSH logins)
Also called on a timer to update /etc/issue
"""
def __init__(self, machine_readable, update_issue=False):
self.machine_readable = machine_readable
self._get_ips()
self._generate_message()
if update_issue:
self.update_issue()
else:
self.gen_motd()
def _get_ips(self):
output = subprocess.check_output(["ip", "addr"]).decode("utf-8")
relevant = [l for l in output.splitlines() if "global" in l and
"virbr" not in l]
addresses = []
for r in relevant:
addresses.append(re.match(r'inet6?(.*?)/.*',
r.lstrip()).groups()[0].strip())
self.addresses = addresses
def _generate_message(self):
pre = "Admin Console: "
urls = ' or '.join(["https://%s:9090/" % a for a in self.addresses])
self._msg = pre + urls
def gen_motd(self):
if self.machine_readable:
msg = dict()
msg["admin_console"] = self.addresses
print(json.dumps(msg))
else:
print("%s\n" % self._msg)
def update_issue(self):
with open('/etc/issue', 'w') as f:
f.write(issue_tmpl)
f.write(self._msg)
f.write('\n\n')
|
dfdx2/django
|
refs/heads/master
|
django/views/generic/dates.py
|
16
|
import datetime
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.http import Http404
from django.utils import timezone
from django.utils.functional import cached_property
from django.utils.translation import gettext as _
from django.views.generic.base import View
from django.views.generic.detail import (
BaseDetailView, SingleObjectTemplateResponseMixin,
)
from django.views.generic.list import (
MultipleObjectMixin, MultipleObjectTemplateResponseMixin,
)
class YearMixin:
"""Mixin for views manipulating year-based data."""
year_format = '%Y'
year = None
def get_year_format(self):
"""
Get a year format string in strptime syntax to be used to parse the
year from url variables.
"""
return self.year_format
def get_year(self):
"""Return the year for which this view should display data."""
year = self.year
if year is None:
try:
year = self.kwargs['year']
except KeyError:
try:
year = self.request.GET['year']
except KeyError:
raise Http404(_("No year specified"))
return year
def get_next_year(self, date):
"""Get the next valid year."""
return _get_next_prev(self, date, is_previous=False, period='year')
def get_previous_year(self, date):
"""Get the previous valid year."""
return _get_next_prev(self, date, is_previous=True, period='year')
def _get_next_year(self, date):
"""
Return the start date of the next interval.
The interval is defined by start date <= item date < next start date.
"""
return date.replace(year=date.year + 1, month=1, day=1)
def _get_current_year(self, date):
"""Return the start date of the current interval."""
return date.replace(month=1, day=1)
class MonthMixin:
"""Mixin for views manipulating month-based data."""
month_format = '%b'
month = None
def get_month_format(self):
"""
Get a month format string in strptime syntax to be used to parse the
month from url variables.
"""
return self.month_format
def get_month(self):
"""Return the month for which this view should display data."""
month = self.month
if month is None:
try:
month = self.kwargs['month']
except KeyError:
try:
month = self.request.GET['month']
except KeyError:
raise Http404(_("No month specified"))
return month
def get_next_month(self, date):
"""Get the next valid month."""
return _get_next_prev(self, date, is_previous=False, period='month')
def get_previous_month(self, date):
"""Get the previous valid month."""
return _get_next_prev(self, date, is_previous=True, period='month')
def _get_next_month(self, date):
"""
Return the start date of the next interval.
The interval is defined by start date <= item date < next start date.
"""
if date.month == 12:
return date.replace(year=date.year + 1, month=1, day=1)
else:
return date.replace(month=date.month + 1, day=1)
def _get_current_month(self, date):
"""Return the start date of the previous interval."""
return date.replace(day=1)
class DayMixin:
"""Mixin for views manipulating day-based data."""
day_format = '%d'
day = None
def get_day_format(self):
"""
Get a day format string in strptime syntax to be used to parse the day
from url variables.
"""
return self.day_format
def get_day(self):
"""Return the day for which this view should display data."""
day = self.day
if day is None:
try:
day = self.kwargs['day']
except KeyError:
try:
day = self.request.GET['day']
except KeyError:
raise Http404(_("No day specified"))
return day
def get_next_day(self, date):
"""Get the next valid day."""
return _get_next_prev(self, date, is_previous=False, period='day')
def get_previous_day(self, date):
"""Get the previous valid day."""
return _get_next_prev(self, date, is_previous=True, period='day')
def _get_next_day(self, date):
"""
Return the start date of the next interval.
The interval is defined by start date <= item date < next start date.
"""
return date + datetime.timedelta(days=1)
def _get_current_day(self, date):
"""Return the start date of the current interval."""
return date
class WeekMixin:
"""Mixin for views manipulating week-based data."""
week_format = '%U'
week = None
def get_week_format(self):
"""
Get a week format string in strptime syntax to be used to parse the
week from url variables.
"""
return self.week_format
def get_week(self):
"""Return the week for which this view should display data."""
week = self.week
if week is None:
try:
week = self.kwargs['week']
except KeyError:
try:
week = self.request.GET['week']
except KeyError:
raise Http404(_("No week specified"))
return week
def get_next_week(self, date):
"""Get the next valid week."""
return _get_next_prev(self, date, is_previous=False, period='week')
def get_previous_week(self, date):
"""Get the previous valid week."""
return _get_next_prev(self, date, is_previous=True, period='week')
def _get_next_week(self, date):
"""
Return the start date of the next interval.
The interval is defined by start date <= item date < next start date.
"""
return date + datetime.timedelta(days=7 - self._get_weekday(date))
def _get_current_week(self, date):
"""Return the start date of the current interval."""
return date - datetime.timedelta(self._get_weekday(date))
def _get_weekday(self, date):
"""
Return the weekday for a given date.
The first day according to the week format is 0 and the last day is 6.
"""
week_format = self.get_week_format()
if week_format == '%W': # week starts on Monday
return date.weekday()
elif week_format == '%U': # week starts on Sunday
return (date.weekday() + 1) % 7
else:
raise ValueError("unknown week format: %s" % week_format)
class DateMixin:
"""Mixin class for views manipulating date-based data."""
date_field = None
allow_future = False
def get_date_field(self):
"""Get the name of the date field to be used to filter by."""
if self.date_field is None:
raise ImproperlyConfigured("%s.date_field is required." % self.__class__.__name__)
return self.date_field
def get_allow_future(self):
"""
Return `True` if the view should be allowed to display objects from
the future.
"""
return self.allow_future
# Note: the following three methods only work in subclasses that also
# inherit SingleObjectMixin or MultipleObjectMixin.
@cached_property
def uses_datetime_field(self):
"""
Return `True` if the date field is a `DateTimeField` and `False`
if it's a `DateField`.
"""
model = self.get_queryset().model if self.model is None else self.model
field = model._meta.get_field(self.get_date_field())
return isinstance(field, models.DateTimeField)
def _make_date_lookup_arg(self, value):
"""
Convert a date into a datetime when the date field is a DateTimeField.
When time zone support is enabled, `date` is assumed to be in the
current time zone, so that displayed items are consistent with the URL.
"""
if self.uses_datetime_field:
value = datetime.datetime.combine(value, datetime.time.min)
if settings.USE_TZ:
value = timezone.make_aware(value, timezone.get_current_timezone())
return value
def _make_single_date_lookup(self, date):
"""
Get the lookup kwargs for filtering on a single date.
If the date field is a DateTimeField, we can't just filter on
date_field=date because that doesn't take the time into account.
"""
date_field = self.get_date_field()
if self.uses_datetime_field:
since = self._make_date_lookup_arg(date)
until = self._make_date_lookup_arg(date + datetime.timedelta(days=1))
return {
'%s__gte' % date_field: since,
'%s__lt' % date_field: until,
}
else:
# Skip self._make_date_lookup_arg, it's a no-op in this branch.
return {date_field: date}
class BaseDateListView(MultipleObjectMixin, DateMixin, View):
"""Abstract base class for date-based views displaying a list of objects."""
allow_empty = False
date_list_period = 'year'
def get(self, request, *args, **kwargs):
self.date_list, self.object_list, extra_context = self.get_dated_items()
context = self.get_context_data(object_list=self.object_list,
date_list=self.date_list)
context.update(extra_context)
return self.render_to_response(context)
def get_dated_items(self):
"""Obtain the list of dates and items."""
raise NotImplementedError('A DateView must provide an implementation of get_dated_items()')
def get_ordering(self):
"""
Return the field or fields to use for ordering the queryset; use the
date field by default.
"""
return '-%s' % self.get_date_field() if self.ordering is None else self.ordering
def get_dated_queryset(self, **lookup):
"""
Get a queryset properly filtered according to `allow_future` and any
extra lookup kwargs.
"""
qs = self.get_queryset().filter(**lookup)
date_field = self.get_date_field()
allow_future = self.get_allow_future()
allow_empty = self.get_allow_empty()
paginate_by = self.get_paginate_by(qs)
if not allow_future:
now = timezone.now() if self.uses_datetime_field else timezone_today()
qs = qs.filter(**{'%s__lte' % date_field: now})
if not allow_empty:
# When pagination is enabled, it's better to do a cheap query
# than to load the unpaginated queryset in memory.
is_empty = len(qs) == 0 if paginate_by is None else not qs.exists()
if is_empty:
raise Http404(_("No %(verbose_name_plural)s available") % {
'verbose_name_plural': qs.model._meta.verbose_name_plural,
})
return qs
def get_date_list_period(self):
"""
Get the aggregation period for the list of dates: 'year', 'month', or
'day'.
"""
return self.date_list_period
def get_date_list(self, queryset, date_type=None, ordering='ASC'):
"""
Get a date list by calling `queryset.dates/datetimes()`, checking
along the way for empty lists that aren't allowed.
"""
date_field = self.get_date_field()
allow_empty = self.get_allow_empty()
if date_type is None:
date_type = self.get_date_list_period()
if self.uses_datetime_field:
date_list = queryset.datetimes(date_field, date_type, ordering)
else:
date_list = queryset.dates(date_field, date_type, ordering)
if date_list is not None and not date_list and not allow_empty:
raise Http404(
_("No %(verbose_name_plural)s available") % {
'verbose_name_plural': queryset.model._meta.verbose_name_plural,
}
)
return date_list
class BaseArchiveIndexView(BaseDateListView):
"""
Base class for archives of date-based items. Requires a response mixin.
"""
context_object_name = 'latest'
def get_dated_items(self):
"""Return (date_list, items, extra_context) for this request."""
qs = self.get_dated_queryset()
date_list = self.get_date_list(qs, ordering='DESC')
if not date_list:
qs = qs.none()
return (date_list, qs, {})
class ArchiveIndexView(MultipleObjectTemplateResponseMixin, BaseArchiveIndexView):
"""Top-level archive of date-based items."""
template_name_suffix = '_archive'
class BaseYearArchiveView(YearMixin, BaseDateListView):
"""List of objects published in a given year."""
date_list_period = 'month'
make_object_list = False
def get_dated_items(self):
"""Return (date_list, items, extra_context) for this request."""
year = self.get_year()
date_field = self.get_date_field()
date = _date_from_string(year, self.get_year_format())
since = self._make_date_lookup_arg(date)
until = self._make_date_lookup_arg(self._get_next_year(date))
lookup_kwargs = {
'%s__gte' % date_field: since,
'%s__lt' % date_field: until,
}
qs = self.get_dated_queryset(**lookup_kwargs)
date_list = self.get_date_list(qs)
if not self.get_make_object_list():
# We need this to be a queryset since parent classes introspect it
# to find information about the model.
qs = qs.none()
return (date_list, qs, {
'year': date,
'next_year': self.get_next_year(date),
'previous_year': self.get_previous_year(date),
})
def get_make_object_list(self):
"""
Return `True` if this view should contain the full list of objects in
the given year.
"""
return self.make_object_list
class YearArchiveView(MultipleObjectTemplateResponseMixin, BaseYearArchiveView):
"""List of objects published in a given year."""
template_name_suffix = '_archive_year'
class BaseMonthArchiveView(YearMixin, MonthMixin, BaseDateListView):
"""List of objects published in a given month."""
date_list_period = 'day'
def get_dated_items(self):
"""Return (date_list, items, extra_context) for this request."""
year = self.get_year()
month = self.get_month()
date_field = self.get_date_field()
date = _date_from_string(year, self.get_year_format(),
month, self.get_month_format())
since = self._make_date_lookup_arg(date)
until = self._make_date_lookup_arg(self._get_next_month(date))
lookup_kwargs = {
'%s__gte' % date_field: since,
'%s__lt' % date_field: until,
}
qs = self.get_dated_queryset(**lookup_kwargs)
date_list = self.get_date_list(qs)
return (date_list, qs, {
'month': date,
'next_month': self.get_next_month(date),
'previous_month': self.get_previous_month(date),
})
class MonthArchiveView(MultipleObjectTemplateResponseMixin, BaseMonthArchiveView):
"""List of objects published in a given month."""
template_name_suffix = '_archive_month'
class BaseWeekArchiveView(YearMixin, WeekMixin, BaseDateListView):
"""List of objects published in a given week."""
def get_dated_items(self):
"""Return (date_list, items, extra_context) for this request."""
year = self.get_year()
week = self.get_week()
date_field = self.get_date_field()
week_format = self.get_week_format()
week_start = {
'%W': '1',
'%U': '0',
}[week_format]
date = _date_from_string(year, self.get_year_format(),
week_start, '%w',
week, week_format)
since = self._make_date_lookup_arg(date)
until = self._make_date_lookup_arg(self._get_next_week(date))
lookup_kwargs = {
'%s__gte' % date_field: since,
'%s__lt' % date_field: until,
}
qs = self.get_dated_queryset(**lookup_kwargs)
return (None, qs, {
'week': date,
'next_week': self.get_next_week(date),
'previous_week': self.get_previous_week(date),
})
class WeekArchiveView(MultipleObjectTemplateResponseMixin, BaseWeekArchiveView):
"""List of objects published in a given week."""
template_name_suffix = '_archive_week'
class BaseDayArchiveView(YearMixin, MonthMixin, DayMixin, BaseDateListView):
"""List of objects published on a given day."""
def get_dated_items(self):
"""Return (date_list, items, extra_context) for this request."""
year = self.get_year()
month = self.get_month()
day = self.get_day()
date = _date_from_string(year, self.get_year_format(),
month, self.get_month_format(),
day, self.get_day_format())
return self._get_dated_items(date)
def _get_dated_items(self, date):
"""
Do the actual heavy lifting of getting the dated items; this accepts a
date object so that TodayArchiveView can be trivial.
"""
lookup_kwargs = self._make_single_date_lookup(date)
qs = self.get_dated_queryset(**lookup_kwargs)
return (None, qs, {
'day': date,
'previous_day': self.get_previous_day(date),
'next_day': self.get_next_day(date),
'previous_month': self.get_previous_month(date),
'next_month': self.get_next_month(date)
})
class DayArchiveView(MultipleObjectTemplateResponseMixin, BaseDayArchiveView):
"""List of objects published on a given day."""
template_name_suffix = "_archive_day"
class BaseTodayArchiveView(BaseDayArchiveView):
"""List of objects published today."""
def get_dated_items(self):
"""Return (date_list, items, extra_context) for this request."""
return self._get_dated_items(datetime.date.today())
class TodayArchiveView(MultipleObjectTemplateResponseMixin, BaseTodayArchiveView):
"""List of objects published today."""
template_name_suffix = "_archive_day"
class BaseDateDetailView(YearMixin, MonthMixin, DayMixin, DateMixin, BaseDetailView):
"""
Detail view of a single object on a single date; this differs from the
standard DetailView by accepting a year/month/day in the URL.
"""
def get_object(self, queryset=None):
"""Get the object this request displays."""
year = self.get_year()
month = self.get_month()
day = self.get_day()
date = _date_from_string(year, self.get_year_format(),
month, self.get_month_format(),
day, self.get_day_format())
# Use a custom queryset if provided
qs = self.get_queryset() if queryset is None else queryset
if not self.get_allow_future() and date > datetime.date.today():
raise Http404(_(
"Future %(verbose_name_plural)s not available because "
"%(class_name)s.allow_future is False."
) % {
'verbose_name_plural': qs.model._meta.verbose_name_plural,
'class_name': self.__class__.__name__,
})
# Filter down a queryset from self.queryset using the date from the
# URL. This'll get passed as the queryset to DetailView.get_object,
# which'll handle the 404
lookup_kwargs = self._make_single_date_lookup(date)
qs = qs.filter(**lookup_kwargs)
return super().get_object(queryset=qs)
class DateDetailView(SingleObjectTemplateResponseMixin, BaseDateDetailView):
"""
Detail view of a single object on a single date; this differs from the
standard DetailView by accepting a year/month/day in the URL.
"""
template_name_suffix = '_detail'
def _date_from_string(year, year_format, month='', month_format='', day='', day_format='', delim='__'):
"""
Get a datetime.date object given a format string and a year, month, and day
(only year is mandatory). Raise a 404 for an invalid date.
"""
format = delim.join((year_format, month_format, day_format))
datestr = delim.join((year, month, day))
try:
return datetime.datetime.strptime(datestr, format).date()
except ValueError:
raise Http404(_("Invalid date string '%(datestr)s' given format '%(format)s'") % {
'datestr': datestr,
'format': format,
})
def _get_next_prev(generic_view, date, is_previous, period):
"""
Get the next or the previous valid date. The idea is to allow links on
month/day views to never be 404s by never providing a date that'll be
invalid for the given view.
This is a bit complicated since it handles different intervals of time,
hence the coupling to generic_view.
However in essence the logic comes down to:
* If allow_empty and allow_future are both true, this is easy: just
return the naive result (just the next/previous day/week/month,
regardless of object existence.)
* If allow_empty is true, allow_future is false, and the naive result
isn't in the future, then return it; otherwise return None.
* If allow_empty is false and allow_future is true, return the next
date *that contains a valid object*, even if it's in the future. If
there are no next objects, return None.
* If allow_empty is false and allow_future is false, return the next
date that contains a valid object. If that date is in the future, or
if there are no next objects, return None.
"""
date_field = generic_view.get_date_field()
allow_empty = generic_view.get_allow_empty()
allow_future = generic_view.get_allow_future()
get_current = getattr(generic_view, '_get_current_%s' % period)
get_next = getattr(generic_view, '_get_next_%s' % period)
# Bounds of the current interval
start, end = get_current(date), get_next(date)
# If allow_empty is True, the naive result will be valid
if allow_empty:
if is_previous:
result = get_current(start - datetime.timedelta(days=1))
else:
result = end
if allow_future or result <= timezone_today():
return result
else:
return None
# Otherwise, we'll need to go to the database to look for an object
# whose date_field is at least (greater than/less than) the given
# naive result
else:
# Construct a lookup and an ordering depending on whether we're doing
# a previous date or a next date lookup.
if is_previous:
lookup = {'%s__lt' % date_field: generic_view._make_date_lookup_arg(start)}
ordering = '-%s' % date_field
else:
lookup = {'%s__gte' % date_field: generic_view._make_date_lookup_arg(end)}
ordering = date_field
# Filter out objects in the future if appropriate.
if not allow_future:
# Fortunately, to match the implementation of allow_future,
# we need __lte, which doesn't conflict with __lt above.
if generic_view.uses_datetime_field:
now = timezone.now()
else:
now = timezone_today()
lookup['%s__lte' % date_field] = now
qs = generic_view.get_queryset().filter(**lookup).order_by(ordering)
# Snag the first object from the queryset; if it doesn't exist that
# means there's no next/previous link available.
try:
result = getattr(qs[0], date_field)
except IndexError:
return None
# Convert datetimes to dates in the current time zone.
if generic_view.uses_datetime_field:
if settings.USE_TZ:
result = timezone.localtime(result)
result = result.date()
# Return the first day of the period.
return get_current(result)
def timezone_today():
"""Return the current date in the current time zone."""
if settings.USE_TZ:
return timezone.localdate()
else:
return datetime.date.today()
|
kvnvelasco/aposi
|
refs/heads/master
|
src/app_/forms.py
|
2
|
from flask.ext.wtf import Form
from wtforms import StringField, TextAreaField, SelectField, FileField, SubmitField
from wtforms.validators import DataRequired
from flask_wtf.file import FileField, FileAllowed, FileRequired
class sendForm(Form):
fullName = StringField('name', validators=[DataRequired()])
email = StringField('email', validators=[DataRequired()])
company = StringField('company', validators=[DataRequired()])
phone = StringField('phone', validators=[DataRequired()])
message = TextAreaField('message', validators=[DataRequired()])
submit = SubmitField('Send Message')
class appForm(Form):
selection = SelectField('name', choices=[('Customer Service Rep.', 'Customer Service Rep.'), ('Online Tutors (statistics)', 'Online Tutors (Statistics)'), ('Company Nurse','Company Nurse'), ('Trainer','Trainer'), ('Receptionist','Receptionist'), ('Workforce Coordinator','Workforce Coordinator'), ('QA Assurance Analyst','QA Assurance Analyst'), ('Personal Injury Case Manager','Personal Injury Case Manager'), ('Lead Generation Specialist','Lead Generation Specialist'), ('Client Care Rep.','Client Care Rep.'), ('Team Supervisor','Team Supervisor'), ('Technical Support Specialist','Technical Support Specialist')], validators=[DataRequired()])
name = StringField('name', validators=[DataRequired()])
contact = StringField('email', validators=[DataRequired()])
resume = FileField('resume', validators=[FileRequired(), FileAllowed(['pdf', 'doc'], 'Only pdf or doc files are accepted')] )
submit = SubmitField('sendApplication')
|
harris-helios/helios-sdk-python
|
refs/heads/master
|
helios/utilities/json_utils.py
|
1
|
"""Helper functions for JSON objects."""
import json
def read_json_file(json_file, **kwargs):
"""
Read a json file.
Args:
json_file (str): Full path to JSON file.
**kwargs: Any keyword argument from the json.load method.
Returns:
dict: JSON formatted dictionary.
"""
with open(json_file, 'r') as f:
return json.load(f, **kwargs)
def read_json_string(json_string, **kwargs):
"""
Convert JSON formatted string to JSON.
Args:
json_string (str): JSON formatted string.
**kwargs: Any keyword argument from the json.loads method.
Returns:
dict: JSON formatted dictionary.
"""
return json.loads(json_string, **kwargs)
def write_json(json_dict, file_name, **kwargs):
"""
Write JSON dictionary to file.
Args:
json_dict (dict): JSON formatted dictionary.
file_name (str): Output file name.
**kwargs: Any keyword argument from the json.dump method.
Returns:
None
"""
with open(file_name, 'w+') as output_file:
json.dump(json_dict, output_file, **kwargs)
def merge_json(data, keys):
"""
Merge JSON fields into a single list.
Keys can either be a single string or a list of strings signifying a chain
of "keys" into the dictionary.
Args:
data (list): Dictionary to merge data from.
keys (str or sequence of strs): A chain of keys into the dictionary
to get to the field that will be merged.
Returns:
list: Merged values.
"""
if not isinstance(keys, list):
keys = [keys]
for k in keys:
data = _merge_digger(data, k)
return data
def _merge_digger(data, key):
merged_list = []
if not isinstance(data, list):
data = [data]
for json_slice in data:
temp = json_slice[key]
if not isinstance(temp, list):
temp = [temp]
merged_list.extend(temp)
return merged_list
|
tima/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/cloudengine/__init__.py
|
12133432
| |
lmallin/coverage_test
|
refs/heads/master
|
python_venv/lib/python2.7/site-packages/pandas/tests/indexes/timedeltas/__init__.py
|
12133432
| |
aruizramon/alec_erpnext
|
refs/heads/master
|
erpnext/patches/v4_2/__init__.py
|
12133432
| |
swdream/neutron
|
refs/heads/master
|
neutron/agent/l3/config.py
|
12
|
# Copyright (c) 2015 OpenStack Foundation.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from neutron.common import constants
OPTS = [
cfg.StrOpt('agent_mode', default=constants.L3_AGENT_MODE_LEGACY,
choices=(constants.L3_AGENT_MODE_DVR,
constants.L3_AGENT_MODE_DVR_SNAT,
constants.L3_AGENT_MODE_LEGACY),
help=_("The working mode for the agent. Allowed modes are: "
"'legacy' - this preserves the existing behavior "
"where the L3 agent is deployed on a centralized "
"networking node to provide L3 services like DNAT, "
"and SNAT. Use this mode if you do not want to "
"adopt DVR. 'dvr' - this mode enables DVR "
"functionality and must be used for an L3 agent "
"that runs on a compute host. 'dvr_snat' - this "
"enables centralized SNAT support in conjunction "
"with DVR. This mode must be used for an L3 agent "
"running on a centralized node (or in single-host "
"deployments, e.g. devstack)")),
cfg.StrOpt('external_network_bridge', default='br-ex',
deprecated_for_removal=True,
help=_("Name of bridge used for external network "
"traffic.")),
cfg.IntOpt('metadata_port',
default=9697,
help=_("TCP Port used by Neutron metadata namespace "
"proxy.")),
cfg.IntOpt('send_arp_for_ha',
default=3,
help=_("Send this many gratuitous ARPs for HA setup, if "
"less than or equal to 0, the feature is disabled")),
cfg.StrOpt('router_id', default='',
help=_("If namespaces is disabled, the l3 agent can only"
" configure a router that has the matching router "
"ID.")),
cfg.BoolOpt('handle_internal_only_routers',
default=True,
help=_("Agent should implement routers with no gateway")),
cfg.StrOpt('gateway_external_network_id', default='',
help=_("UUID of external network for routers implemented "
"by the agents.")),
cfg.StrOpt('ipv6_gateway', default='',
help=_("With IPv6, the network used for the external gateway "
"does not need to have an associated subnet, since the "
"automatically assigned link-local address (LLA) can "
"be used. However, an IPv6 gateway address is needed "
"for use as the next-hop for the default route. "
"If no IPv6 gateway address is configured here, "
"(and only then) the neutron router will be configured "
"to get its default route from router advertisements "
"(RAs) from the upstream router; in which case the "
"upstream router must also be configured to send "
"these RAs. "
"The ipv6_gateway, when configured, should be the LLA "
"of the interface on the upstream router. If a "
"next-hop using a global unique address (GUA) is "
"desired, it needs to be done via a subnet allocated "
"to the network and not through this parameter. ")),
cfg.StrOpt('prefix_delegation_driver',
default='dibbler',
help=_('Driver used for ipv6 prefix delegation. This needs to '
'be an entry point defined in the '
'neutron.agent.linux.pd_drivers namespace. See '
'setup.cfg for entry points included with the neutron '
'source.')),
cfg.BoolOpt('enable_metadata_proxy', default=True,
help=_("Allow running metadata proxy.")),
cfg.BoolOpt('router_delete_namespaces', default=True,
help=_("Delete namespace after removing a router."
"This option is deprecated and "
"will be removed in a future release."),
deprecated_for_removal=True),
cfg.StrOpt('metadata_access_mark',
default='0x1',
help=_('Iptables mangle mark used to mark metadata valid '
'requests. This mark will be masked with 0xffff so '
'that only the lower 16 bits will be used.')),
cfg.StrOpt('external_ingress_mark',
default='0x2',
help=_('Iptables mangle mark used to mark ingress from '
'external network. This mark will be masked with '
'0xffff so that only the lower 16 bits will be used.')),
]
|
electrumalt/electrum-doge
|
refs/heads/master
|
lib/transaction.py
|
1
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Note: The deserialization code originally comes from ABE.
import bitcoin
from bitcoin import *
from util import print_error
import time
import struct
#
# Workalike python implementation of Bitcoin's CDataStream class.
#
import struct
import StringIO
import mmap
import random
NO_SIGNATURE = 'ff'
class SerializationError(Exception):
""" Thrown when there's a problem deserializing or serializing """
class BCDataStream(object):
def __init__(self):
self.input = None
self.read_cursor = 0
def clear(self):
self.input = None
self.read_cursor = 0
def write(self, bytes): # Initialize with string of bytes
if self.input is None:
self.input = bytes
else:
self.input += bytes
def map_file(self, file, start): # Initialize with bytes from file
self.input = mmap.mmap(file.fileno(), 0, access=mmap.ACCESS_READ)
self.read_cursor = start
def seek_file(self, position):
self.read_cursor = position
def close_file(self):
self.input.close()
def read_string(self):
# Strings are encoded depending on length:
# 0 to 252 : 1-byte-length followed by bytes (if any)
# 253 to 65,535 : byte'253' 2-byte-length followed by bytes
# 65,536 to 4,294,967,295 : byte '254' 4-byte-length followed by bytes
# ... and the Bitcoin client is coded to understand:
# greater than 4,294,967,295 : byte '255' 8-byte-length followed by bytes of string
# ... but I don't think it actually handles any strings that big.
if self.input is None:
raise SerializationError("call write(bytes) before trying to deserialize")
try:
length = self.read_compact_size()
except IndexError:
raise SerializationError("attempt to read past end of buffer")
return self.read_bytes(length)
def write_string(self, string):
# Length-encoded as with read-string
self.write_compact_size(len(string))
self.write(string)
def read_bytes(self, length):
try:
result = self.input[self.read_cursor:self.read_cursor+length]
self.read_cursor += length
return result
except IndexError:
raise SerializationError("attempt to read past end of buffer")
return ''
def read_boolean(self): return self.read_bytes(1)[0] != chr(0)
def read_int16(self): return self._read_num('<h')
def read_uint16(self): return self._read_num('<H')
def read_int32(self): return self._read_num('<i')
def read_uint32(self): return self._read_num('<I')
def read_int64(self): return self._read_num('<q')
def read_uint64(self): return self._read_num('<Q')
def write_boolean(self, val): return self.write(chr(1) if val else chr(0))
def write_int16(self, val): return self._write_num('<h', val)
def write_uint16(self, val): return self._write_num('<H', val)
def write_int32(self, val): return self._write_num('<i', val)
def write_uint32(self, val): return self._write_num('<I', val)
def write_int64(self, val): return self._write_num('<q', val)
def write_uint64(self, val): return self._write_num('<Q', val)
def read_compact_size(self):
size = ord(self.input[self.read_cursor])
self.read_cursor += 1
if size == 253:
size = self._read_num('<H')
elif size == 254:
size = self._read_num('<I')
elif size == 255:
size = self._read_num('<Q')
return size
def write_compact_size(self, size):
if size < 0:
raise SerializationError("attempt to write size < 0")
elif size < 253:
self.write(chr(size))
elif size < 2**16:
self.write('\xfd')
self._write_num('<H', size)
elif size < 2**32:
self.write('\xfe')
self._write_num('<I', size)
elif size < 2**64:
self.write('\xff')
self._write_num('<Q', size)
def _read_num(self, format):
(i,) = struct.unpack_from(format, self.input, self.read_cursor)
self.read_cursor += struct.calcsize(format)
return i
def _write_num(self, format, num):
s = struct.pack(format, num)
self.write(s)
#
# enum-like type
# From the Python Cookbook, downloaded from http://code.activestate.com/recipes/67107/
#
import types, string, exceptions
class EnumException(exceptions.Exception):
pass
class Enumeration:
def __init__(self, name, enumList):
self.__doc__ = name
lookup = { }
reverseLookup = { }
i = 0
uniqueNames = [ ]
uniqueValues = [ ]
for x in enumList:
if type(x) == types.TupleType:
x, i = x
if type(x) != types.StringType:
raise EnumException, "enum name is not a string: " + x
if type(i) != types.IntType:
raise EnumException, "enum value is not an integer: " + i
if x in uniqueNames:
raise EnumException, "enum name is not unique: " + x
if i in uniqueValues:
raise EnumException, "enum value is not unique for " + x
uniqueNames.append(x)
uniqueValues.append(i)
lookup[x] = i
reverseLookup[i] = x
i = i + 1
self.lookup = lookup
self.reverseLookup = reverseLookup
def __getattr__(self, attr):
if not self.lookup.has_key(attr):
raise AttributeError
return self.lookup[attr]
def whatis(self, value):
return self.reverseLookup[value]
# This function comes from bitcointools, bct-LICENSE.txt.
def long_hex(bytes):
return bytes.encode('hex_codec')
# This function comes from bitcointools, bct-LICENSE.txt.
def short_hex(bytes):
t = bytes.encode('hex_codec')
if len(t) < 11:
return t
return t[0:4]+"..."+t[-4:]
def parse_redeemScript(bytes):
dec = [ x for x in script_GetOp(bytes.decode('hex')) ]
# 2 of 2
match = [ opcodes.OP_2, opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4, opcodes.OP_2, opcodes.OP_CHECKMULTISIG ]
if match_decoded(dec, match):
pubkeys = [ dec[1][1].encode('hex'), dec[2][1].encode('hex') ]
return 2, pubkeys
# 2 of 3
match = [ opcodes.OP_2, opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4, opcodes.OP_3, opcodes.OP_CHECKMULTISIG ]
if match_decoded(dec, match):
pubkeys = [ dec[1][1].encode('hex'), dec[2][1].encode('hex'), dec[3][1].encode('hex') ]
return 2, pubkeys
opcodes = Enumeration("Opcodes", [
("OP_0", 0), ("OP_PUSHDATA1",76), "OP_PUSHDATA2", "OP_PUSHDATA4", "OP_1NEGATE", "OP_RESERVED",
"OP_1", "OP_2", "OP_3", "OP_4", "OP_5", "OP_6", "OP_7",
"OP_8", "OP_9", "OP_10", "OP_11", "OP_12", "OP_13", "OP_14", "OP_15", "OP_16",
"OP_NOP", "OP_VER", "OP_IF", "OP_NOTIF", "OP_VERIF", "OP_VERNOTIF", "OP_ELSE", "OP_ENDIF", "OP_VERIFY",
"OP_RETURN", "OP_TOALTSTACK", "OP_FROMALTSTACK", "OP_2DROP", "OP_2DUP", "OP_3DUP", "OP_2OVER", "OP_2ROT", "OP_2SWAP",
"OP_IFDUP", "OP_DEPTH", "OP_DROP", "OP_DUP", "OP_NIP", "OP_OVER", "OP_PICK", "OP_ROLL", "OP_ROT",
"OP_SWAP", "OP_TUCK", "OP_CAT", "OP_SUBSTR", "OP_LEFT", "OP_RIGHT", "OP_SIZE", "OP_INVERT", "OP_AND",
"OP_OR", "OP_XOR", "OP_EQUAL", "OP_EQUALVERIFY", "OP_RESERVED1", "OP_RESERVED2", "OP_1ADD", "OP_1SUB", "OP_2MUL",
"OP_2DIV", "OP_NEGATE", "OP_ABS", "OP_NOT", "OP_0NOTEQUAL", "OP_ADD", "OP_SUB", "OP_MUL", "OP_DIV",
"OP_MOD", "OP_LSHIFT", "OP_RSHIFT", "OP_BOOLAND", "OP_BOOLOR",
"OP_NUMEQUAL", "OP_NUMEQUALVERIFY", "OP_NUMNOTEQUAL", "OP_LESSTHAN",
"OP_GREATERTHAN", "OP_LESSTHANOREQUAL", "OP_GREATERTHANOREQUAL", "OP_MIN", "OP_MAX",
"OP_WITHIN", "OP_RIPEMD160", "OP_SHA1", "OP_SHA256", "OP_HASH160",
"OP_HASH256", "OP_CODESEPARATOR", "OP_CHECKSIG", "OP_CHECKSIGVERIFY", "OP_CHECKMULTISIG",
"OP_CHECKMULTISIGVERIFY",
("OP_SINGLEBYTE_END", 0xF0),
("OP_DOUBLEBYTE_BEGIN", 0xF000),
"OP_PUBKEY", "OP_PUBKEYHASH",
("OP_INVALIDOPCODE", 0xFFFF),
])
def script_GetOp(bytes):
i = 0
while i < len(bytes):
vch = None
opcode = ord(bytes[i])
i += 1
if opcode >= opcodes.OP_SINGLEBYTE_END:
opcode <<= 8
opcode |= ord(bytes[i])
i += 1
if opcode <= opcodes.OP_PUSHDATA4:
nSize = opcode
if opcode == opcodes.OP_PUSHDATA1:
nSize = ord(bytes[i])
i += 1
elif opcode == opcodes.OP_PUSHDATA2:
(nSize,) = struct.unpack_from('<H', bytes, i)
i += 2
elif opcode == opcodes.OP_PUSHDATA4:
(nSize,) = struct.unpack_from('<I', bytes, i)
i += 4
vch = bytes[i:i+nSize]
i += nSize
yield (opcode, vch, i)
def script_GetOpName(opcode):
return (opcodes.whatis(opcode)).replace("OP_", "")
def decode_script(bytes):
result = ''
for (opcode, vch, i) in script_GetOp(bytes):
if len(result) > 0: result += " "
if opcode <= opcodes.OP_PUSHDATA4:
result += "%d:"%(opcode,)
result += short_hex(vch)
else:
result += script_GetOpName(opcode)
return result
def match_decoded(decoded, to_match):
if len(decoded) != len(to_match):
return False;
for i in range(len(decoded)):
if to_match[i] == opcodes.OP_PUSHDATA4 and decoded[i][0] <= opcodes.OP_PUSHDATA4 and decoded[i][0]>0:
continue # Opcodes below OP_PUSHDATA4 all just push data onto stack, and are equivalent.
if to_match[i] != decoded[i][0]:
return False
return True
def parse_sig(x_sig):
s = []
for sig in x_sig:
if sig[-2:] == '01':
s.append(sig[:-2])
else:
assert sig == NO_SIGNATURE
s.append(None)
return s
def is_extended_pubkey(x_pubkey):
return x_pubkey[0:2] in ['fe', 'ff']
def x_to_xpub(x_pubkey):
if x_pubkey[0:2] == 'ff':
from account import BIP32_Account
xpub, s = BIP32_Account.parse_xpubkey(x_pubkey)
return xpub
def parse_xpub(x_pubkey):
if x_pubkey[0:2] in ['02','03','04']:
pubkey = x_pubkey
elif x_pubkey[0:2] == 'ff':
from account import BIP32_Account
xpub, s = BIP32_Account.parse_xpubkey(x_pubkey)
pubkey = BIP32_Account.derive_pubkey_from_xpub(xpub, s[0], s[1])
elif x_pubkey[0:2] == 'fe':
from account import OldAccount
mpk, s = OldAccount.parse_xpubkey(x_pubkey)
pubkey = OldAccount.get_pubkey_from_mpk(mpk.decode('hex'), s[0], s[1])
elif x_pubkey[0:2] == 'fd':
addrtype = ord(x_pubkey[2:4].decode('hex'))
hash160 = x_pubkey[4:].decode('hex')
pubkey = None
address = hash_160_to_bc_address(hash160, addrtype)
else:
raise BaseException("Cannnot parse pubkey")
if pubkey:
address = public_key_to_bc_address(pubkey.decode('hex'))
return pubkey, address
def parse_scriptSig(d, bytes):
try:
decoded = [ x for x in script_GetOp(bytes) ]
except Exception:
# coinbase transactions raise an exception
print_error("cannot find address in input script", bytes.encode('hex'))
return
# payto_pubkey
match = [ opcodes.OP_PUSHDATA4 ]
if match_decoded(decoded, match):
sig = decoded[0][1].encode('hex')
d['address'] = "(pubkey)"
d['signatures'] = [sig]
d['num_sig'] = 1
d['x_pubkeys'] = ["(pubkey)"]
d['pubkeys'] = ["(pubkey)"]
return
# non-generated TxIn transactions push a signature
# (seventy-something bytes) and then their public key
# (65 bytes) onto the stack:
match = [ opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4 ]
if match_decoded(decoded, match):
sig = decoded[0][1].encode('hex')
x_pubkey = decoded[1][1].encode('hex')
try:
signatures = parse_sig([sig])
pubkey, address = parse_xpub(x_pubkey)
except:
import traceback
traceback.print_exc(file=sys.stdout)
print_error("cannot find address in input script", bytes.encode('hex'))
return
d['signatures'] = signatures
d['x_pubkeys'] = [x_pubkey]
d['num_sig'] = 1
d['pubkeys'] = [pubkey]
d['address'] = address
return
# p2sh transaction, 2 of n
match = [ opcodes.OP_0 ] + [ opcodes.OP_PUSHDATA4 ] * (len(decoded) - 1)
if not match_decoded(decoded, match):
print_error("cannot find address in input script", bytes.encode('hex'))
return
x_sig = [x[1].encode('hex') for x in decoded[1:-1]]
d['signatures'] = parse_sig(x_sig)
d['num_sig'] = 2
dec2 = [ x for x in script_GetOp(decoded[-1][1]) ]
match_2of2 = [ opcodes.OP_2, opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4, opcodes.OP_2, opcodes.OP_CHECKMULTISIG ]
match_2of3 = [ opcodes.OP_2, opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4, opcodes.OP_3, opcodes.OP_CHECKMULTISIG ]
if match_decoded(dec2, match_2of2):
x_pubkeys = [ dec2[1][1].encode('hex'), dec2[2][1].encode('hex') ]
elif match_decoded(dec2, match_2of3):
x_pubkeys = [ dec2[1][1].encode('hex'), dec2[2][1].encode('hex'), dec2[3][1].encode('hex') ]
else:
print_error("cannot find address in input script", bytes.encode('hex'))
return
d['x_pubkeys'] = x_pubkeys
pubkeys = [parse_xpub(x)[0] for x in x_pubkeys] # xpub, addr = parse_xpub()
d['pubkeys'] = pubkeys
redeemScript = Transaction.multisig_script(pubkeys,2)
d['redeemScript'] = redeemScript
d['address'] = hash_160_to_bc_address(hash_160(redeemScript.decode('hex')), 5)
def get_address_from_output_script(bytes):
decoded = [ x for x in script_GetOp(bytes) ]
# The Genesis Block, self-payments, and pay-by-IP-address payments look like:
# 65 BYTES:... CHECKSIG
match = [ opcodes.OP_PUSHDATA4, opcodes.OP_CHECKSIG ]
if match_decoded(decoded, match):
return 'pubkey', decoded[0][1].encode('hex')
# Pay-by-Bitcoin-address TxOuts look like:
# DUP HASH160 20 BYTES:... EQUALVERIFY CHECKSIG
match = [ opcodes.OP_DUP, opcodes.OP_HASH160, opcodes.OP_PUSHDATA4, opcodes.OP_EQUALVERIFY, opcodes.OP_CHECKSIG ]
if match_decoded(decoded, match):
return 'address', hash_160_to_bc_address(decoded[2][1])
# p2sh
match = [ opcodes.OP_HASH160, opcodes.OP_PUSHDATA4, opcodes.OP_EQUAL ]
if match_decoded(decoded, match):
return 'address', hash_160_to_bc_address(decoded[1][1],5)
# OP_RETURN
match = [ opcodes.OP_RETURN, opcodes.OP_PUSHDATA4 ]
if match_decoded(decoded, match):
return 'op_return', decoded[1][1]
return "(None)", "(None)"
def parse_input(vds):
d = {}
prevout_hash = hash_encode(vds.read_bytes(32))
prevout_n = vds.read_uint32()
d['scriptSig'] = scriptSig = vds.read_bytes(vds.read_compact_size())
sequence = vds.read_uint32()
if prevout_hash == '00'*32:
d['is_coinbase'] = True
else:
d['is_coinbase'] = False
d['prevout_hash'] = prevout_hash
d['prevout_n'] = prevout_n
d['sequence'] = sequence
d['pubkeys'] = []
d['signatures'] = {}
d['address'] = None
if scriptSig:
parse_scriptSig(d, scriptSig)
return d
def parse_output(vds, i):
d = {}
d['value'] = vds.read_int64()
scriptPubKey = vds.read_bytes(vds.read_compact_size())
d['type'], d['address'] = get_address_from_output_script(scriptPubKey)
d['scriptPubKey'] = scriptPubKey.encode('hex')
d['prevout_n'] = i
return d
def deserialize(raw):
vds = BCDataStream()
vds.write(raw.decode('hex'))
d = {}
start = vds.read_cursor
d['version'] = vds.read_int32()
n_vin = vds.read_compact_size()
d['inputs'] = list(parse_input(vds) for i in xrange(n_vin))
n_vout = vds.read_compact_size()
d['outputs'] = list(parse_output(vds,i) for i in xrange(n_vout))
d['lockTime'] = vds.read_uint32()
return d
def push_script(x):
return op_push(len(x)/2) + x
class Transaction:
def __str__(self):
if self.raw is None:
self.raw = self.serialize()
return self.raw
def __init__(self, inputs, outputs, locktime=0):
self.inputs = inputs
self.outputs = outputs
self.locktime = locktime
self.raw = None
@classmethod
def deserialize(klass, raw):
self = klass([],[])
self.update(raw)
return self
def update(self, raw):
d = deserialize(raw)
self.raw = raw
self.inputs = d['inputs']
self.outputs = [(x['type'], x['address'], x['value']) for x in d['outputs']]
self.locktime = d['lockTime']
@classmethod
def sweep(klass, privkeys, network, to_address, fee):
inputs = []
for privkey in privkeys:
pubkey = public_key_from_private_key(privkey)
address = address_from_private_key(privkey)
u = network.synchronous_get([ ('blockchain.address.listunspent',[address])])[0]
pay_script = klass.pay_script('address', address)
for item in u:
item['scriptPubKey'] = pay_script
item['redeemPubkey'] = pubkey
item['address'] = address
item['prevout_hash'] = item['tx_hash']
item['prevout_n'] = item['tx_pos']
item['pubkeys'] = [pubkey]
item['x_pubkeys'] = [None]
item['signatures'] = [None]
item['num_sig'] = 1
inputs += u
if not inputs:
return
total = sum(i.get('value') for i in inputs) - fee
outputs = [('address', to_address, total)]
self = klass(inputs, outputs)
self.sign({ pubkey:privkey })
return self
@classmethod
def multisig_script(klass, public_keys, num=None):
n = len(public_keys)
if num is None: num = n
assert num <= n and n in [2,3] , 'Only "2 of 2", and "2 of 3" transactions are supported'
if num==2:
s = '52'
elif num == 3:
s = '53'
else:
raise
for k in public_keys:
s += op_push(len(k)/2) + k
if n==2:
s += '52'
elif n==3:
s += '53'
else:
raise
s += 'ae'
return s
@classmethod
def pay_script(self, type, addr):
if type == 'op_return':
h = addr.encode('hex')
return '6a' + push_script(h)
else:
assert type == 'address'
addrtype, hash_160 = bc_address_to_hash_160(addr)
if addrtype == 30:
script = '76a9' # op_dup, op_hash_160
script += push_script(hash_160.encode('hex'))
script += '88ac' # op_equalverify, op_checksig
elif addrtype == 22:
script = 'a9' # op_hash_160
script += push_script(hash_160.encode('hex'))
script += '87' # op_equal
else:
raise
return script
def serialize(self, for_sig=None):
# for_sig:
# -1 : do not sign, estimate length
# i>=0 : serialized tx for signing input i
# None : add all known signatures
inputs = self.inputs
outputs = self.outputs
s = int_to_hex(1,4) # version
s += var_int( len(inputs) ) # number of inputs
for i, txin in enumerate(inputs):
s += txin['prevout_hash'].decode('hex')[::-1].encode('hex') # prev hash
s += int_to_hex(txin['prevout_n'],4) # prev index
p2sh = txin.get('redeemScript') is not None
num_sig = txin['num_sig']
address = txin['address']
x_signatures = txin['signatures']
signatures = filter(None, x_signatures)
is_complete = len(signatures) == num_sig
if for_sig in [-1, None]:
# if we have enough signatures, we use the actual pubkeys
# use extended pubkeys (with bip32 derivation)
if for_sig == -1:
# we assume that signature will be 0x48 bytes long
pubkeys = txin['pubkeys']
sig_list = [ "00" * 0x48 ] * num_sig
elif is_complete:
pubkeys = txin['pubkeys']
sig_list = ((sig + '01') for sig in signatures)
else:
pubkeys = txin['x_pubkeys']
sig_list = ((sig + '01') if sig else NO_SIGNATURE for sig in x_signatures)
script = ''.join(push_script(x) for x in sig_list)
if not p2sh:
x_pubkey = pubkeys[0]
if x_pubkey is None:
addrtype, h160 = bc_address_to_hash_160(txin['address'])
x_pubkey = 'fd' + (chr(addrtype) + h160).encode('hex')
script += push_script(x_pubkey)
else:
script = '00' + script # put op_0 in front of script
redeem_script = self.multisig_script(pubkeys,2)
script += push_script(redeem_script)
elif for_sig==i:
script = txin['redeemScript'] if p2sh else self.pay_script('address', address)
else:
script = ''
s += var_int( len(script)/2 ) # script length
s += script
s += "ffffffff" # sequence
s += var_int( len(outputs) ) # number of outputs
for output in outputs:
type, addr, amount = output
s += int_to_hex( amount, 8) # amount
script = self.pay_script(type, addr)
s += var_int( len(script)/2 ) # script length
s += script # script
s += int_to_hex(0,4) # lock time
if for_sig is not None and for_sig != -1:
s += int_to_hex(1, 4) # hash type
return s
def tx_for_sig(self,i):
return self.serialize(for_sig = i)
def hash(self):
return Hash(self.raw.decode('hex') )[::-1].encode('hex')
def add_input(self, input):
self.inputs.append(input)
self.raw = None
def input_value(self):
return sum(x['value'] for x in self.inputs)
def output_value(self):
return sum( val for tp,addr,val in self.outputs)
def get_fee(self):
return self.input_value() - self.output_value()
def signature_count(self):
r = 0
s = 0
for txin in self.inputs:
if txin.get('is_coinbase'):
continue
signatures = filter(None, txin['signatures'])
s += len(signatures)
r += txin['num_sig']
return s, r
def is_complete(self):
s, r = self.signature_count()
return r == s
def inputs_to_sign(self):
out = set()
for txin in self.inputs:
x_signatures = txin['signatures']
signatures = filter(None, x_signatures)
if len(signatures) == txin['num_sig']:
# input is complete
continue
for k, x_pubkey in enumerate(txin['x_pubkeys']):
if x_signatures[k] is not None:
# this pubkey already signed
continue
out.add(x_pubkey)
return out
def sign(self, keypairs):
print_error("tx.sign(), keypairs:", keypairs)
for i, txin in enumerate(self.inputs):
signatures = filter(None, txin['signatures'])
num = txin['num_sig']
if len(signatures) == num:
# continue if this txin is complete
continue
for x_pubkey in txin['x_pubkeys']:
if x_pubkey in keypairs.keys():
print_error("adding signature for", x_pubkey)
# add pubkey to txin
txin = self.inputs[i]
x_pubkeys = txin['x_pubkeys']
ii = x_pubkeys.index(x_pubkey)
sec = keypairs[x_pubkey]
pubkey = public_key_from_private_key(sec)
txin['x_pubkeys'][ii] = pubkey
txin['pubkeys'][ii] = pubkey
self.inputs[i] = txin
# add signature
for_sig = Hash(self.tx_for_sig(i).decode('hex'))
pkey = regenerate_key(sec)
secexp = pkey.secret
private_key = ecdsa.SigningKey.from_secret_exponent( secexp, curve = SECP256k1 )
public_key = private_key.get_verifying_key()
sig = private_key.sign_digest_deterministic( for_sig, hashfunc=hashlib.sha256, sigencode = ecdsa.util.sigencode_der )
assert public_key.verify_digest( sig, for_sig, sigdecode = ecdsa.util.sigdecode_der)
txin['signatures'][ii] = sig.encode('hex')
self.inputs[i] = txin
print_error("is_complete", self.is_complete())
self.raw = self.serialize()
def add_pubkey_addresses(self, txdict):
for txin in self.inputs:
if txin.get('address') == "(pubkey)":
prev_tx = txdict.get(txin.get('prevout_hash'))
if prev_tx:
address, value = prev_tx.get_outputs()[txin.get('prevout_n')]
print_error("found pay-to-pubkey address:", address)
txin["address"] = address
def get_outputs(self):
"""convert pubkeys to addresses"""
o = []
for type, x, v in self.outputs:
if type == 'address':
addr = x
elif type == 'pubkey':
addr = public_key_to_bc_address(x.decode('hex'))
elif type == 'op_return':
try:
addr = 'OP_RETURN: "' + x.decode('utf8') + '"'
except:
addr = 'OP_RETURN: "' + x.encode('hex') + '"'
else:
addr = "(None)"
o.append((addr,v)) # consider using yield (addr, v)
return o
def get_output_addresses(self):
return [addr for addr, val in self.get_outputs()]
def has_address(self, addr):
return (addr in self.get_output_addresses()) or (addr in (tx.get("address") for tx in self.inputs))
def get_value(self, addresses, prevout_values):
# return the balance for that tx
is_relevant = False
is_send = False
is_pruned = False
is_partial = False
v_in = v_out = v_out_mine = 0
for item in self.inputs:
addr = item.get('address')
if addr in addresses:
is_send = True
is_relevant = True
key = item['prevout_hash'] + ':%d'%item['prevout_n']
value = prevout_values.get( key )
if value is None:
is_pruned = True
else:
v_in += value
else:
is_partial = True
if not is_send: is_partial = False
for addr, value in self.get_outputs():
v_out += value
if addr in addresses:
v_out_mine += value
is_relevant = True
if is_pruned:
# some inputs are mine:
fee = None
if is_send:
v = v_out_mine - v_out
else:
# no input is mine
v = v_out_mine
else:
v = v_out_mine - v_in
if is_partial:
# some inputs are mine, but not all
fee = None
is_send = v < 0
else:
# all inputs are mine
fee = v_out - v_in
return is_relevant, is_send, v, fee
def as_dict(self):
import json
out = {
"hex":str(self),
"complete":self.is_complete()
}
return out
def requires_fee(self, verifier):
# see https://en.bitcoin.it/wiki/Transaction_fees
#
# size must be smaller than 1 kbyte for free tx
size = len(self.serialize(-1))/2
if size >= 10000:
return True
# all outputs must be 0.01 BTC or larger for free tx
for addr, value in self.get_outputs():
if value < 1000000:
return True
# priority must be large enough for free tx
threshold = 57600000
weight = 0
for txin in self.inputs:
age = verifier.get_confirmations(txin["prevout_hash"])[0]
weight += txin["value"] * age
priority = weight / size
print_error(priority, threshold)
return priority < threshold
|
evangeline97/localwiki-backend-server
|
refs/heads/master
|
localwiki/tags/admin.py
|
4
|
from django.contrib import admin
from guardian.admin import GuardedModelAdmin
from models import Tag
class TagAdmin(GuardedModelAdmin):
pass
admin.site.register(Tag, TagAdmin)
|
teoliphant/scipy
|
refs/heads/master
|
scipy/weave/examples/wx_speed.py
|
12
|
""" Implements a fast replacement for calling DrawLines with an array as an
argument. It uses weave, so you'll need that installed.
Copyright: Space Telescope Science Institute
License: BSD Style
Designed by: Enthought, Inc.
Author: Eric Jones eric@enthought.com
I wrote this because I was seeing very bad performance for DrawLines when
called with a large number of points -- 5000-30000. Now, I have found the
performance is sometimes OK, and sometimes very poor. Drawing to a
MemoryDC seems to be worse than drawing to the screen. My first cut of the
routine just called PolyLine directly, but I got lousy performance for this
also. After noticing the slowdown as the array length grew was much worse
than linear, I tried the following "chunking" algorithm. It is much more
efficient (sometimes by 2 orders of magnitude, but usually only a factor
of 3). There is a slight drawback in that it will draw end caps for each
chunk of the array which is not strictly correct. I don't imagine this is
a major issue, but remains an open issue.
"""
import scipy.weave as weave
from numpy.random import *
from numpy import *
from wxPython.wx import *
"""
const int n_pts = _Nline[0];
const int bunch_size = 100;
const int bunches = n_pts / bunch_size;
const int left_over = n_pts % bunch_size;
for (int i = 0; i < bunches; i++)
{
Polyline(hdc,(POINT*)p_data,bunch_size);
p_data += bunch_size*2; //*2 for two longs per point
}
Polyline(hdc,(POINT*)p_data,left_over);
"""
def polyline(dc,line,xoffset=0,yoffset=0):
#------------------------------------------------------------------------
# Make sure the array is the correct size/shape
#------------------------------------------------------------------------
shp = line.shape
assert(len(shp)==2 and shp[1] == 2)
#------------------------------------------------------------------------
# Offset data if necessary
#------------------------------------------------------------------------
if xoffset or yoffset:
line = line + array((xoffset,yoffset),line.typecode())
#------------------------------------------------------------------------
# Define the win32 version of the function
#------------------------------------------------------------------------
if sys.platform == 'win32':
# win32 requires int type for lines.
if not issubclass(line.dtype.type, int) or not line.iscontiguous():
line = line.astype(int)
code = """
HDC hdc = (HDC) dc->GetHDC();
Polyline(hdc,(POINT*)line,Nline[0]);
"""
else:
if (line.typecode() != uint16 or
not line.iscontiguous()):
line = line.astype(uint16)
code = """
GdkWindow* win = dc->m_window;
GdkGC* pen = dc->m_penGC;
gdk_draw_lines(win,pen,(GdkPoint*)line,Nline[0]);
"""
weave.inline(code,['dc','line'])
#------------------------------------------------------------------------
# Find the maximum and minimum points in the drawing list and add
# them to the bounding box.
#------------------------------------------------------------------------
max_pt = maximum.reduce(line,0)
min_pt = minimum.reduce(line,0)
dc.CalcBoundingBox(max_pt[0],max_pt[1])
dc.CalcBoundingBox(min_pt[0],min_pt[1])
#-----------------------------------------------------------------------------
# Define a new version of DrawLines that calls the optimized
# version for numpy arrays when appropriate.
#-----------------------------------------------------------------------------
def NewDrawLines(dc,line):
"""
"""
if (type(line) is ndarray):
polyline(dc,line)
else:
dc.DrawLines(line)
#-----------------------------------------------------------------------------
# And attach our new method to the wxPaintDC class
# !! We have disabled it and called polyline directly in this example
# !! to get timing comparison between the old and new way.
#-----------------------------------------------------------------------------
#wxPaintDC.DrawLines = NewDrawLines
if __name__ == '__main__':
from wxPython.wx import *
import time
class Canvas(wxWindow):
def __init__(self, parent, id = -1, size = wxDefaultSize):
wxWindow.__init__(self, parent, id, wxPoint(0, 0), size,
wxSUNKEN_BORDER | wxWANTS_CHARS)
self.calc_points()
EVT_PAINT(self, self.OnPaint)
EVT_SIZE(self, self.OnSize)
def calc_points(self):
w,h = self.GetSizeTuple()
#x = randint(0+50, w-50, self.point_count)
#y = randint(0+50, h-50, len(x))
x = arange(0,w,typecode=int32)
y = h/2.*sin(x*2*pi/w)+h/2.
y = y.astype(int32)
self.points = concatenate((x[:,newaxis],y[:,newaxis]),-1)
def OnSize(self,event):
self.calc_points()
self.Refresh()
def OnPaint(self,event):
w,h = self.GetSizeTuple()
print len(self.points)
dc = wxPaintDC(self)
dc.BeginDrawing()
# This first call is slow because your either compiling (very slow)
# or loading a DLL (kinda slow)
# Resize the window to get a more realistic timing.
pt_copy = self.points.copy()
t1 = time.clock()
offset = array((1,0))
mod = array((w,0))
x = pt_copy[:,0];
ang = 2*pi/w;
size = 1
red_pen = wxPen('red',size)
white_pen = wxPen('white',size)
blue_pen = wxPen('blue',size)
pens = iter([red_pen,white_pen,blue_pen])
phase = 10
for i in range(1500):
if phase > 2*pi:
phase = 0
try:
pen = pens.next()
except:
pens = iter([red_pen,white_pen,blue_pen])
pen = pens.next()
dc.SetPen(pen)
polyline(dc,pt_copy)
next_y = (h/2.*sin(x*ang-phase)+h/2.).astype(int32)
pt_copy[:,1] = next_y
phase += ang
t2 = time.clock()
print 'Weave Polyline:', t2-t1
t1 = time.clock()
pt_copy = self.points.copy()
pens = iter([red_pen,white_pen,blue_pen])
phase = 10
for i in range(1500):
if phase > 2*pi:
phase = 0
try:
pen = pens.next()
except:
pens = iter([red_pen,white_pen,blue_pen])
pen = pens.next()
dc.SetPen(pen)
dc.DrawLines(pt_copy)
next_y = (h/2.*sin(x*ang-phase)+h/2.).astype(int32)
pt_copy[:,1] = next_y
phase += ang
t2 = time.clock()
dc.SetPen(red_pen)
print 'wxPython DrawLines:', t2-t1
dc.EndDrawing()
class CanvasWindow(wxFrame):
def __init__(self, id=-1, title='Canvas',size=(500,500)):
parent = NULL
wxFrame.__init__(self, parent,id,title, size=size)
self.canvas = Canvas(self)
self.Show(1)
class MyApp(wxApp):
def OnInit(self):
frame = CanvasWindow(title="Speed Examples",size=(500,500))
frame.Show(true)
return true
app = MyApp(0)
app.MainLoop()
|
carlos-ferras/Sequence-ToolKit
|
refs/heads/master
|
pyqtgraph/widgets/GradientWidget.py
|
50
|
# -*- coding: utf-8 -*-
from ..Qt import QtGui, QtCore
from .GraphicsView import GraphicsView
from ..graphicsItems.GradientEditorItem import GradientEditorItem
import weakref
import numpy as np
__all__ = ['GradientWidget']
class GradientWidget(GraphicsView):
"""
Widget displaying an editable color gradient. The user may add, move, recolor,
or remove colors from the gradient. Additionally, a context menu allows the
user to select from pre-defined gradients.
"""
sigGradientChanged = QtCore.Signal(object)
sigGradientChangeFinished = QtCore.Signal(object)
def __init__(self, parent=None, orientation='bottom', *args, **kargs):
"""
The *orientation* argument may be 'bottom', 'top', 'left', or 'right'
indicating whether the gradient is displayed horizontally (top, bottom)
or vertically (left, right) and on what side of the gradient the editable
ticks will appear.
All other arguments are passed to
:func:`GradientEditorItem.__init__ <pyqtgraph.GradientEditorItem.__init__>`.
Note: For convenience, this class wraps methods from
:class:`GradientEditorItem <pyqtgraph.GradientEditorItem>`.
"""
GraphicsView.__init__(self, parent, useOpenGL=False, background=None)
self.maxDim = 31
kargs['tickPen'] = 'k'
self.item = GradientEditorItem(*args, **kargs)
self.item.sigGradientChanged.connect(self.sigGradientChanged)
self.item.sigGradientChangeFinished.connect(self.sigGradientChangeFinished)
self.setCentralItem(self.item)
self.setOrientation(orientation)
self.setCacheMode(self.CacheNone)
self.setRenderHints(QtGui.QPainter.Antialiasing | QtGui.QPainter.TextAntialiasing)
self.setFrameStyle(QtGui.QFrame.NoFrame | QtGui.QFrame.Plain)
#self.setBackgroundRole(QtGui.QPalette.NoRole)
#self.setBackgroundBrush(QtGui.QBrush(QtCore.Qt.NoBrush))
#self.setAutoFillBackground(False)
#self.setAttribute(QtCore.Qt.WA_PaintOnScreen, False)
#self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent, True)
def setOrientation(self, ort):
"""Set the orientation of the widget. May be one of 'bottom', 'top',
'left', or 'right'."""
self.item.setOrientation(ort)
self.orientation = ort
self.setMaxDim()
def setMaxDim(self, mx=None):
if mx is None:
mx = self.maxDim
else:
self.maxDim = mx
if self.orientation in ['bottom', 'top']:
self.setFixedHeight(mx)
self.setMaximumWidth(16777215)
else:
self.setFixedWidth(mx)
self.setMaximumHeight(16777215)
def __getattr__(self, attr):
### wrap methods from GradientEditorItem
return getattr(self.item, attr)
|
jomyhuang/sdwle
|
refs/heads/master
|
SDWLE/cards_copy/weapons/rogue.py
|
2
|
from SDWLE.cards.base import WeaponCard
from SDWLE.game_objects import Weapon
from SDWLE.tags.action import Damage
from SDWLE.tags.base import Battlecry, Buff
from SDWLE.tags.condition import GreaterThan, IsType
from SDWLE.tags.selector import CharacterSelector, UserPicker, Count, MinionSelector
from SDWLE.tags.status import ChangeAttack
from SDWLE.constants import CHARACTER_CLASS, CARD_RARITY, MINION_TYPE
class WickedKnife(WeaponCard):
def __init__(self):
super().__init__("Wicked Knife", 1, CHARACTER_CLASS.ROGUE, CARD_RARITY.FREE, False)
def create_weapon(self, player):
return Weapon(1, 2)
class AssassinsBlade(WeaponCard):
def __init__(self):
super().__init__("Assassin's Blade", 5, CHARACTER_CLASS.ROGUE, CARD_RARITY.COMMON)
def create_weapon(self, player):
return Weapon(3, 4)
class PerditionsBlade(WeaponCard):
def __init__(self):
super().__init__("Perdition's Blade", 3, CHARACTER_CLASS.ROGUE, CARD_RARITY.RARE,
battlecry=Battlecry(Damage(1), CharacterSelector(None, picker=UserPicker())),
combo=Battlecry(Damage(2), CharacterSelector(None, picker=UserPicker())))
def create_weapon(self, player):
return Weapon(2, 2)
class CogmastersWrench(WeaponCard):
def __init__(self):
super().__init__("Cogmaster's Wrench", 3, CHARACTER_CLASS.ROGUE, CARD_RARITY.EPIC)
def create_weapon(self, player):
return Weapon(1, 3, buffs=[Buff(ChangeAttack(2), GreaterThan(Count(MinionSelector(IsType(MINION_TYPE.MECH))),
value=0))])
|
yawnosnorous/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/test/test_future.py
|
57
|
# Test various flavors of legal and illegal future statements
import unittest
from test import support
import re
rx = re.compile('\((\S+).py, line (\d+)')
def get_error_location(msg):
mo = rx.search(str(msg))
return mo.group(1, 2)
class FutureTest(unittest.TestCase):
def test_future1(self):
support.unload('test_future1')
from test import test_future1
self.assertEqual(test_future1.result, 6)
def test_future2(self):
support.unload('test_future2')
from test import test_future2
self.assertEqual(test_future2.result, 6)
def test_future3(self):
support.unload('test_future3')
from test import test_future3
def test_badfuture3(self):
try:
from test import badsyntax_future3
except SyntaxError as msg:
self.assertEqual(get_error_location(msg), ("badsyntax_future3", '3'))
else:
self.fail("expected exception didn't occur")
def test_badfuture4(self):
try:
from test import badsyntax_future4
except SyntaxError as msg:
self.assertEqual(get_error_location(msg), ("badsyntax_future4", '3'))
else:
self.fail("expected exception didn't occur")
def test_badfuture5(self):
try:
from test import badsyntax_future5
except SyntaxError as msg:
self.assertEqual(get_error_location(msg), ("badsyntax_future5", '4'))
else:
self.fail("expected exception didn't occur")
def test_badfuture6(self):
try:
from test import badsyntax_future6
except SyntaxError as msg:
self.assertEqual(get_error_location(msg), ("badsyntax_future6", '3'))
else:
self.fail("expected exception didn't occur")
def test_badfuture7(self):
try:
from test import badsyntax_future7
except SyntaxError as msg:
self.assertEqual(get_error_location(msg), ("badsyntax_future7", '3'))
else:
self.fail("expected exception didn't occur")
def test_badfuture8(self):
try:
from test import badsyntax_future8
except SyntaxError as msg:
self.assertEqual(get_error_location(msg), ("badsyntax_future8", '3'))
else:
self.fail("expected exception didn't occur")
def test_badfuture9(self):
try:
from test import badsyntax_future9
except SyntaxError as msg:
self.assertEqual(get_error_location(msg), ("badsyntax_future9", '3'))
else:
self.fail("expected exception didn't occur")
def test_parserhack(self):
# test that the parser.c::future_hack function works as expected
# Note: although this test must pass, it's not testing the original
# bug as of 2.6 since the with statement is not optional and
# the parser hack disabled. If a new keyword is introduced in
# 2.6, change this to refer to the new future import.
try:
exec("from __future__ import print_function; print 0")
except SyntaxError:
pass
else:
self.fail("syntax error didn't occur")
try:
exec("from __future__ import (print_function); print 0")
except SyntaxError:
pass
else:
self.fail("syntax error didn't occur")
def test_multiple_features(self):
support.unload("test.test_future5")
from test import test_future5
def test_unicode_literals_exec(self):
scope = {}
exec("from __future__ import unicode_literals; x = ''", {}, scope)
self.assertIsInstance(scope["x"], str)
def test_main():
support.run_unittest(FutureTest)
if __name__ == "__main__":
test_main()
|
daodaoliang/python-phonenumbers
|
refs/heads/dev
|
python/phonenumbers/data/region_GW.py
|
7
|
"""Auto-generated file, do not edit by hand. GW metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_GW = PhoneMetadata(id='GW', country_code=245, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[3-79]\\d{6}', possible_number_pattern='\\d{7}'),
fixed_line=PhoneNumberDesc(national_number_pattern='3(?:2[0125]|3[1245]|4[12]|5[1-4]|70|9[1-467])\\d{4}', possible_number_pattern='\\d{7}', example_number='3201234'),
mobile=PhoneNumberDesc(national_number_pattern='(?:[5-7]\\d|9[012])\\d{5}', possible_number_pattern='\\d{7}', example_number='5012345'),
toll_free=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
premium_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
shared_cost=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
personal_number=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voip=PhoneNumberDesc(national_number_pattern='40\\d{5}', possible_number_pattern='\\d{7}', example_number='4012345'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
number_format=[NumberFormat(pattern='(\\d{3})(\\d{4})', format='\\1 \\2')])
|
dhruvagarwal/django
|
refs/heads/master
|
django/contrib/gis/db/backends/postgis/const.py
|
528
|
"""
PostGIS to GDAL conversion constant definitions
"""
# Lookup to convert pixel type values from GDAL to PostGIS
GDAL_TO_POSTGIS = [None, 4, 6, 5, 8, 7, 10, 11, None, None, None, None]
# Lookup to convert pixel type values from PostGIS to GDAL
POSTGIS_TO_GDAL = [1, 1, 1, 3, 1, 3, 2, 5, 4, None, 6, 7, None, None]
# Struct pack structure for raster header, the raster header has the
# following structure:
#
# Endianness, PostGIS raster version, number of bands, scale, origin,
# skew, srid, width, and height.
#
# Scale, origin, and skew have x and y values. PostGIS currently uses
# a fixed endianness (1) and there is only one version (0).
POSTGIS_HEADER_STRUCTURE = 'B H H d d d d d d i H H'
# Lookup values to convert GDAL pixel types to struct characters. This is
# used to pack and unpack the pixel values of PostGIS raster bands.
GDAL_TO_STRUCT = [
None, 'B', 'H', 'h', 'L', 'l', 'f', 'd',
None, None, None, None,
]
# Size of the packed value in bytes for different numerical types.
# This is needed to cut chunks of band data out of PostGIS raster strings
# when decomposing them into GDALRasters.
# See https://docs.python.org/3/library/struct.html#format-characters
STRUCT_SIZE = {
'b': 1, # Signed char
'B': 1, # Unsigned char
'?': 1, # _Bool
'h': 2, # Short
'H': 2, # Unsigned short
'i': 4, # Integer
'I': 4, # Unsigned Integer
'l': 4, # Long
'L': 4, # Unsigned Long
'f': 4, # Float
'd': 8, # Double
}
|
kingvuplus/gui_test2
|
refs/heads/master
|
lib/python/Plugins/SystemPlugins/SatelliteEquipmentControl/__init__.py
|
12133432
| |
makemytrip/dataShark
|
refs/heads/master
|
plugins/output/__init__.py
|
12133432
| |
evildmp/django-cms
|
refs/heads/master
|
cms/test_utils/project/pluginapp/plugins/__init__.py
|
12133432
| |
russelmahmud/mess-account
|
refs/heads/master
|
django/db/backends/dummy/__init__.py
|
12133432
| |
mdibaiee/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/webdriver/modal/__init__.py
|
12133432
| |
dims/oslo.messaging
|
refs/heads/master
|
oslo_messaging/_drivers/zmq_driver/client/__init__.py
|
12133432
| |
landryb/QGIS
|
refs/heads/master
|
python/plugins/processing/ProcessingPlugin.py
|
4
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
ProcessingPlugin.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import shutil
import inspect
import os
import sys
from PyQt4.QtCore import Qt, QCoreApplication, QDir
from PyQt4.QtGui import QMenu, QAction, QIcon
from processing.core.Processing import Processing
from processing.gui.ProcessingToolbox import ProcessingToolbox
from processing.gui.HistoryDialog import HistoryDialog
from processing.gui.ConfigDialog import ConfigDialog
from processing.gui.ResultsDialog import ResultsDialog
from processing.gui.CommanderWindow import CommanderWindow
from processing.modeler.ModelerDialog import ModelerDialog
from processing.tools.system import tempFolder
cmd_folder = os.path.split(inspect.getfile(inspect.currentframe()))[0]
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
class ProcessingPlugin:
def __init__(self, iface):
self.iface = iface
def initGui(self):
Processing.initialize()
self.commander = None
self.toolbox = ProcessingToolbox()
self.iface.addDockWidget(Qt.RightDockWidgetArea, self.toolbox)
self.toolbox.hide()
Processing.addAlgListListener(self.toolbox)
self.menu = QMenu(self.iface.mainWindow().menuBar())
self.menu.setObjectName('processing')
self.menu.setTitle(self.tr('Pro&cessing'))
self.toolboxAction = self.toolbox.toggleViewAction()
self.toolboxAction.setObjectName('toolboxAction')
self.toolboxAction.setIcon(
QIcon(os.path.join(cmd_folder, 'images', 'alg.png')))
self.toolboxAction.setText(self.tr('&Toolbox'))
self.menu.addAction(self.toolboxAction)
self.modelerAction = QAction(
QIcon(os.path.join(cmd_folder, 'images', 'model.png')),
self.tr('Graphical &Modeler...'), self.iface.mainWindow())
self.modelerAction.setObjectName('modelerAction')
self.modelerAction.triggered.connect(self.openModeler)
self.menu.addAction(self.modelerAction)
self.historyAction = QAction(
QIcon(os.path.join(cmd_folder, 'images', 'history.gif')),
self.tr('&History...'), self.iface.mainWindow())
self.historyAction.setObjectName('historyAction')
self.historyAction.triggered.connect(self.openHistory)
self.menu.addAction(self.historyAction)
self.configAction = QAction(
QIcon(os.path.join(cmd_folder, 'images', 'config.png')),
self.tr('&Options...'), self.iface.mainWindow())
self.configAction.setObjectName('configAction')
self.configAction.triggered.connect(self.openConfig)
self.menu.addAction(self.configAction)
self.resultsAction = QAction(
QIcon(os.path.join(cmd_folder, 'images', 'results.png')),
self.tr('&Results Viewer...'), self.iface.mainWindow())
self.resultsAction.setObjectName('resultsAction')
self.resultsAction.triggered.connect(self.openResults)
self.menu.addAction(self.resultsAction)
menuBar = self.iface.mainWindow().menuBar()
menuBar.insertMenu(
self.iface.firstRightStandardMenu().menuAction(), self.menu)
self.commanderAction = QAction(
QIcon(os.path.join(cmd_folder, 'images', 'commander.png')),
self.tr('&Commander'), self.iface.mainWindow())
self.commanderAction.setObjectName('commanderAction')
self.commanderAction.triggered.connect(self.openCommander)
self.menu.addAction(self.commanderAction)
self.iface.registerMainWindowAction(self.commanderAction,
self.tr('Ctrl+Alt+M'))
def unload(self):
self.toolbox.setVisible(False)
self.menu.deleteLater()
# delete temporary output files
folder = tempFolder()
if QDir(folder).exists():
shutil.rmtree(folder, True)
self.iface.unregisterMainWindowAction(self.commanderAction)
def openCommander(self):
if self.commander is None:
self.commander = CommanderWindow(
self.iface.mainWindow(),
self.iface.mapCanvas())
Processing.addAlgListListener(self.commander)
self.commander.prepareGui()
self.commander.show()
def openToolbox(self):
if self.toolbox.isVisible():
self.toolbox.hide()
else:
self.toolbox.show()
def openModeler(self):
dlg = ModelerDialog()
dlg.exec_()
if dlg.update:
self.toolbox.updateProvider('model')
def openResults(self):
dlg = ResultsDialog()
dlg.show()
dlg.exec_()
def openHistory(self):
dlg = HistoryDialog()
dlg.exec_()
def openConfig(self):
dlg = ConfigDialog(self.toolbox)
dlg.exec_()
def tr(self, message):
return QCoreApplication.translate('ProcessingPlugin', message)
|
dgoedkoop/QGIS
|
refs/heads/master
|
scripts/qgis_fixes/fix_methodattrs.py
|
77
|
from lib2to3.fixes.fix_methodattrs import FixMethodattrs
|
TakeshiTseng/ryu
|
refs/heads/master
|
ryu/lib/packet/bfd.py
|
12
|
# Copyright (C) 2014 Xinguard, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
BFD Control packet parser/serializer
RFC 5880
BFD Control packet format
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|Vers | Diag |Sta|P|F|C|A|D|M| Detect Mult | Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| My Discriminator |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Your Discriminator |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Desired Min TX Interval |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Required Min RX Interval |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Required Min Echo RX Interval |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
An optional Authentication Section MAY be present in the following
format of types:
1. Format of Simple Password Authentication Section
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Auth Type | Auth Len | Auth Key ID | Password... |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| ... |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
2. Format of Keyed MD5 and Meticulous Keyed MD5 Authentication Section
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Auth Type | Auth Len | Auth Key ID | Reserved |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Sequence Number |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Auth Key/Digest... |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| ... |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
3. Format of Keyed SHA1 and Meticulous Keyed SHA1 Authentication Section
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Auth Type | Auth Len | Auth Key ID | Reserved |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Sequence Number |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Auth Key/Hash... |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| ... |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
"""
import binascii
import hashlib
import random
import six
import struct
from . import packet_base
from ryu.lib import addrconv
from ryu.lib import stringify
BFD_STATE_ADMIN_DOWN = 0
BFD_STATE_DOWN = 1
BFD_STATE_INIT = 2
BFD_STATE_UP = 3
BFD_STATE_NAME = {0: "AdminDown",
1: "Down",
2: "Init",
3: "Up"}
BFD_FLAG_POLL = 1 << 5
BFD_FLAG_FINAL = 1 << 4
BFD_FLAG_CTRL_PLANE_INDEP = 1 << 3
BFD_FLAG_AUTH_PRESENT = 1 << 2
BFD_FLAG_DEMAND = 1 << 1
BFD_FLAG_MULTIPOINT = 1
BFD_DIAG_NO_DIAG = 0
BFD_DIAG_CTRL_DETECT_TIME_EXPIRED = 1
BFD_DIAG_ECHO_FUNC_FAILED = 2
BFD_DIAG_NEIG_SIG_SESS_DOWN = 3
BFD_DIAG_FWD_PLANE_RESET = 4
BFD_DIAG_PATH_DOWN = 5
BFD_DIAG_CONCAT_PATH_DOWN = 6
BFD_DIAG_ADMIN_DOWN = 7
BFD_DIAG_REV_CONCAT_PATH_DOWN = 8
BFD_DIAG_CODE_NAME = {0: "No Diagnostic",
1: "Control Detection Time Expired",
2: "Echo Function Failed",
3: "Neighbor Signaled Session Down",
4: "Forwarding Plane Reset",
5: "Path Down",
6: "Concatenated Path Down",
7: "Administratively Down",
8: "Reverse Concatenated Path Down"}
BFD_AUTH_RESERVED = 0
BFD_AUTH_SIMPLE_PASS = 1
BFD_AUTH_KEYED_MD5 = 2
BFD_AUTH_METICULOUS_KEYED_MD5 = 3
BFD_AUTH_KEYED_SHA1 = 4
BFD_AUTH_METICULOUS_KEYED_SHA1 = 5
BFD_AUTH_TYPE_NAME = {0: "Reserved",
1: "Simple Password",
2: "Keyed MD5",
3: "Meticulous Keyed MD5",
4: "Keyed SHA1",
5: "Meticulous Keyed SHA1"}
class bfd(packet_base.PacketBase):
"""BFD (RFC 5880) Control packet encoder/decoder class.
The serialized packet would looks like the ones described
in the following sections.
* RFC 5880 Generic BFD Control Packet Format
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
__init__ takes the corresponding args in this order.
.. tabularcolumns:: |l|L|
============================== ============================================
Attribute Description
============================== ============================================
ver The version number of the protocol.
This class implements protocol version 1.
diag A diagnostic code specifying the local
system's reason for the last change in
session state.
state The current BFD session state as seen by
the transmitting system.
flags Bitmap of the following flags.
| BFD_FLAG_POLL
| BFD_FLAG_FINAL
| BFD_FLAG_CTRL_PLANE_INDEP
| BFD_FLAG_AUTH_PRESENT
| BFD_FLAG_DEMAND
| BFD_FLAG_MULTIPOINT
detect_mult Detection time multiplier.
my_discr My Discriminator.
your_discr Your Discriminator.
desired_min_tx_interval Desired Min TX Interval. (in microseconds)
required_min_rx_interval Required Min RX Interval. (in microseconds)
required_min_echo_rx_interval Required Min Echo RX Interval.
(in microseconds)
auth_cls (Optional) Authentication Section instance.
It's defined only when the Authentication
Present (A) bit is set in flags.
Assign an instance of the following classes:
``SimplePassword``, ``KeyedMD5``,
``MeticulousKeyedMD5``, ``KeyedSHA1``, and
``MeticulousKeyedSHA1``.
length (Optional) Length of the BFD Control packet,
in bytes.
============================== ============================================
"""
_PACK_STR = '!BBBBIIIII'
_PACK_STR_LEN = struct.calcsize(_PACK_STR)
_TYPE = {
'ascii': []
}
_auth_parsers = {}
def __init__(self, ver=1, diag=0, state=0, flags=0, detect_mult=0,
my_discr=0, your_discr=0, desired_min_tx_interval=0,
required_min_rx_interval=0, required_min_echo_rx_interval=0,
auth_cls=None, length=None):
super(bfd, self).__init__()
self.ver = ver
self.diag = diag
self.state = state
self.flags = flags
self.detect_mult = detect_mult
self.my_discr = my_discr
self.your_discr = your_discr
self.desired_min_tx_interval = desired_min_tx_interval
self.required_min_rx_interval = required_min_rx_interval
self.required_min_echo_rx_interval = required_min_echo_rx_interval
self.auth_cls = auth_cls
if isinstance(length, int):
self.length = length
else:
self.length = len(self)
def __len__(self):
if self.flags & BFD_FLAG_AUTH_PRESENT and self.auth_cls is not None:
return self._PACK_STR_LEN + len(self.auth_cls)
else:
return self._PACK_STR_LEN
@classmethod
def parser(cls, buf):
(diag, flags, detect_mult, length, my_discr, your_discr,
desired_min_tx_interval, required_min_rx_interval,
required_min_echo_rx_interval) = \
struct.unpack_from(cls._PACK_STR, buf[:cls._PACK_STR_LEN])
ver = diag >> 5
diag = diag & 0x1f
state = flags >> 6
flags = flags & 0x3f
if flags & BFD_FLAG_AUTH_PRESENT:
auth_type = six.indexbytes(buf, cls._PACK_STR_LEN)
auth_cls = cls._auth_parsers[auth_type].\
parser(buf[cls._PACK_STR_LEN:])[0]
else:
auth_cls = None
msg = cls(ver, diag, state, flags, detect_mult,
my_discr, your_discr, desired_min_tx_interval,
required_min_rx_interval, required_min_echo_rx_interval,
auth_cls)
return msg, None, None
def serialize(self, payload, prev):
if self.flags & BFD_FLAG_AUTH_PRESENT and self.auth_cls is not None:
return self.pack() + \
self.auth_cls.serialize(payload=None, prev=self)
else:
return self.pack()
def pack(self):
"""
Encode a BFD Control packet without authentication section.
"""
diag = (self.ver << 5) + self.diag
flags = (self.state << 6) + self.flags
length = len(self)
return struct.pack(self._PACK_STR, diag, flags, self.detect_mult,
length, self.my_discr, self.your_discr,
self.desired_min_tx_interval,
self.required_min_rx_interval,
self.required_min_echo_rx_interval)
def authenticate(self, *args, **kwargs):
"""Authenticate this packet.
Returns a boolean indicates whether the packet can be authenticated
or not.
Returns ``False`` if the Authentication Present (A) is not set in the
flag of this packet.
Returns ``False`` if the Authentication Section for this packet is not
present.
For the description of the arguemnts of this method, refer to the
authentication method of the Authentication Section classes.
"""
if not self.flags & BFD_FLAG_AUTH_PRESENT or \
not issubclass(self.auth_cls.__class__, BFDAuth):
return False
return self.auth_cls.authenticate(self, *args, **kwargs)
@classmethod
def set_auth_parser(cls, auth_cls):
cls._auth_parsers[auth_cls.auth_type] = auth_cls
@classmethod
def register_auth_type(cls, auth_type):
def _set_type(auth_cls):
auth_cls.set_type(auth_cls, auth_type)
cls.set_auth_parser(auth_cls)
return auth_cls
return _set_type
class BFDAuth(stringify.StringifyMixin):
"""Base class of BFD (RFC 5880) Authentication Section
An instance has the following attributes at least.
Most of them are same to the on-wire counterparts but in host byte order.
.. tabularcolumns:: |l|L|
=========== ============================================
Attribute Description
=========== ============================================
auth_type The authentication type in use.
auth_len The length, in bytes, of the authentication
section, including the ``auth_type`` and
``auth_len`` fields.
=========== ============================================
"""
_PACK_HDR_STR = '!BB'
_PACK_HDR_STR_LEN = struct.calcsize(_PACK_HDR_STR)
auth_type = None
def __init__(self, auth_len=None):
super(BFDAuth, self).__init__()
if isinstance(auth_len, int):
self.auth_len = auth_len
else:
self.auth_len = len(self)
@staticmethod
def set_type(subcls, auth_type):
assert issubclass(subcls, BFDAuth)
subcls.auth_type = auth_type
@classmethod
def parser_hdr(cls, buf):
"""
Parser for common part of authentication section.
"""
return struct.unpack_from(cls._PACK_HDR_STR,
buf[:cls._PACK_HDR_STR_LEN])
def serialize_hdr(self):
"""
Serialization function for common part of authentication section.
"""
return struct.pack(self._PACK_HDR_STR, self.auth_type, self.auth_len)
@bfd.register_auth_type(BFD_AUTH_SIMPLE_PASS)
class SimplePassword(BFDAuth):
""" BFD (RFC 5880) Simple Password Authentication Section class
An instance has the following attributes.
Most of them are same to the on-wire counterparts but in host byte order.
.. tabularcolumns:: |l|L|
=========== ============================================
Attribute Description
=========== ============================================
auth_type (Fixed) The authentication type in use.
auth_key_id The authentication Key ID in use.
password The simple password in use on this session.
The password is a binary string, and MUST be
from 1 to 16 bytes in length.
auth_len The length, in bytes, of the authentication
section, including the ``auth_type`` and
``auth_len`` fields.
=========== ============================================
"""
_PACK_STR = '!B'
_PACK_STR_LEN = struct.calcsize(_PACK_STR)
def __init__(self, auth_key_id, password, auth_len=None):
assert len(password) >= 1 and len(password) <= 16
self.auth_key_id = auth_key_id
self.password = password
super(SimplePassword, self).__init__(auth_len)
def __len__(self):
return self._PACK_HDR_STR_LEN + self._PACK_STR_LEN + len(self.password)
@classmethod
def parser(cls, buf):
(auth_type, auth_len) = cls.parser_hdr(buf)
assert auth_type == cls.auth_type
auth_key_id = six.indexbytes(buf, cls._PACK_HDR_STR_LEN)
password = buf[cls._PACK_HDR_STR_LEN + cls._PACK_STR_LEN:auth_len]
msg = cls(auth_key_id, password, auth_len)
return msg, None, None
def serialize(self, payload, prev):
"""Encode a Simple Password Authentication Section.
``payload`` is the rest of the packet which will immediately follow
this section.
``prev`` is a ``bfd`` instance for the BFD Control header. It's not
necessary for encoding only the Simple Password section.
"""
return self.serialize_hdr() + \
struct.pack(self._PACK_STR, self.auth_key_id) + self.password
def authenticate(self, prev=None, auth_keys=None):
"""Authenticate the password for this packet.
This method can be invoked only when ``self.password`` is defined.
Returns a boolean indicates whether the password can be authenticated
or not.
``prev`` is a ``bfd`` instance for the BFD Control header. It's not
necessary for authenticating the Simple Password.
``auth_keys`` is a dictionary of authentication key chain which
key is an integer of *Auth Key ID* and value is a string of *Password*.
"""
auth_keys = auth_keys if auth_keys else {}
assert isinstance(prev, bfd)
if self.auth_key_id in auth_keys and \
self.password == auth_keys[self.auth_key_id]:
return True
else:
return False
@bfd.register_auth_type(BFD_AUTH_KEYED_MD5)
class KeyedMD5(BFDAuth):
""" BFD (RFC 5880) Keyed MD5 Authentication Section class
An instance has the following attributes.
Most of them are same to the on-wire counterparts but in host byte order.
.. tabularcolumns:: |l|L|
=========== =================================================
Attribute Description
=========== =================================================
auth_type (Fixed) The authentication type in use.
auth_key_id The authentication Key ID in use.
seq The sequence number for this packet.
This value is incremented occasionally.
auth_key The shared MD5 key for this packet.
digest (Optional) The 16-byte MD5 digest for the packet.
auth_len (Fixed) The length of the authentication section
is 24 bytes.
=========== =================================================
"""
_PACK_STR = '!BBL16s'
_PACK_STR_LEN = struct.calcsize(_PACK_STR)
def __init__(self, auth_key_id, seq, auth_key=None, digest=None,
auth_len=None):
self.auth_key_id = auth_key_id
self.seq = seq
self.auth_key = auth_key
self.digest = digest
super(KeyedMD5, self).__init__(auth_len)
def __len__(self):
# Defined in RFC5880 Section 4.3.
return 24
@classmethod
def parser(cls, buf):
(auth_type, auth_len) = cls.parser_hdr(buf)
assert auth_type == cls.auth_type
assert auth_len == 24
(auth_key_id, reserved, seq, digest) = \
struct.unpack_from(cls._PACK_STR, buf[cls._PACK_HDR_STR_LEN:])
assert reserved == 0
msg = cls(auth_key_id=auth_key_id, seq=seq, auth_key=None,
digest=digest)
return msg, None, None
def serialize(self, payload, prev):
"""Encode a Keyed MD5 Authentication Section.
This method is used only when encoding an BFD Control packet.
``payload`` is the rest of the packet which will immediately follow
this section.
``prev`` is a ``bfd`` instance for the BFD Control header which this
authentication section belongs to. It's necessary to be assigned
because an MD5 digest must be calculated over the entire BFD Control
packet.
"""
assert self.auth_key is not None and len(self.auth_key) <= 16
assert isinstance(prev, bfd)
bfd_bin = prev.pack()
auth_hdr_bin = self.serialize_hdr()
auth_data_bin = struct.pack(self._PACK_STR, self.auth_key_id, 0,
self.seq, self.auth_key +
(b'\x00' * (len(self.auth_key) - 16)))
h = hashlib.md5()
h.update(bfd_bin + auth_hdr_bin + auth_data_bin)
self.digest = h.digest()
return auth_hdr_bin + struct.pack(self._PACK_STR, self.auth_key_id, 0,
self.seq, self.digest)
def authenticate(self, prev, auth_keys=None):
"""Authenticate the MD5 digest for this packet.
This method can be invoked only when ``self.digest`` is defined.
Returns a boolean indicates whether the digest can be authenticated
by the correspondent Auth Key or not.
``prev`` is a ``bfd`` instance for the BFD Control header which this
authentication section belongs to. It's necessary to be assigned
because an MD5 digest must be calculated over the entire BFD Control
packet.
``auth_keys`` is a dictionary of authentication key chain which
key is an integer of *Auth Key ID* and value is a string of *Auth Key*.
"""
auth_keys = auth_keys if auth_keys else {}
assert isinstance(prev, bfd)
if self.digest is None:
return False
if self.auth_key_id not in auth_keys:
return False
auth_key = auth_keys[self.auth_key_id]
bfd_bin = prev.pack()
auth_hdr_bin = self.serialize_hdr()
auth_data_bin = struct.pack(self._PACK_STR, self.auth_key_id, 0,
self.seq, auth_key +
(b'\x00' * (len(auth_key) - 16)))
h = hashlib.md5()
h.update(bfd_bin + auth_hdr_bin + auth_data_bin)
if self.digest == h.digest():
return True
else:
return False
@bfd.register_auth_type(BFD_AUTH_METICULOUS_KEYED_MD5)
class MeticulousKeyedMD5(KeyedMD5):
""" BFD (RFC 5880) Meticulous Keyed MD5 Authentication Section class
All methods of this class are inherited from ``KeyedMD5``.
An instance has the following attributes.
Most of them are same to the on-wire counterparts but in host byte order.
.. tabularcolumns:: |l|L|
=========== =================================================
Attribute Description
=========== =================================================
auth_type (Fixed) The authentication type in use.
auth_key_id The authentication Key ID in use.
seq The sequence number for this packet.
This value is incremented for each
successive packet transmitted for a session.
auth_key The shared MD5 key for this packet.
digest (Optional) The 16-byte MD5 digest for the packet.
auth_len (Fixed) The length of the authentication section
is 24 bytes.
=========== =================================================
"""
pass
@bfd.register_auth_type(BFD_AUTH_KEYED_SHA1)
class KeyedSHA1(BFDAuth):
""" BFD (RFC 5880) Keyed SHA1 Authentication Section class
An instance has the following attributes.
Most of them are same to the on-wire counterparts but in host byte order.
.. tabularcolumns:: |l|L|
=========== ================================================
Attribute Description
=========== ================================================
auth_type (Fixed) The authentication type in use.
auth_key_id The authentication Key ID in use.
seq The sequence number for this packet.
This value is incremented occasionally.
auth_key The shared SHA1 key for this packet.
auth_hash (Optional) The 20-byte SHA1 hash for the packet.
auth_len (Fixed) The length of the authentication section
is 28 bytes.
=========== ================================================
"""
_PACK_STR = '!BBL20s'
_PACK_STR_LEN = struct.calcsize(_PACK_STR)
def __init__(self, auth_key_id, seq, auth_key=None, auth_hash=None,
auth_len=None):
self.auth_key_id = auth_key_id
self.seq = seq
self.auth_key = auth_key
self.auth_hash = auth_hash
super(KeyedSHA1, self).__init__(auth_len)
def __len__(self):
# Defined in RFC5880 Section 4.4.
return 28
@classmethod
def parser(cls, buf):
(auth_type, auth_len) = cls.parser_hdr(buf)
assert auth_type == cls.auth_type
assert auth_len == 28
(auth_key_id, reserved, seq, auth_hash) = \
struct.unpack_from(cls._PACK_STR, buf[cls._PACK_HDR_STR_LEN:])
assert reserved == 0
msg = cls(auth_key_id=auth_key_id, seq=seq, auth_key=None,
auth_hash=auth_hash)
return msg, None, None
def serialize(self, payload, prev):
"""Encode a Keyed SHA1 Authentication Section.
This method is used only when encoding an BFD Control packet.
``payload`` is the rest of the packet which will immediately follow
this section.
``prev`` is a ``bfd`` instance for the BFD Control header which this
authentication section belongs to. It's necessary to be assigned
because an SHA1 hash must be calculated over the entire BFD Control
packet.
"""
assert self.auth_key is not None and len(self.auth_key) <= 20
assert isinstance(prev, bfd)
bfd_bin = prev.pack()
auth_hdr_bin = self.serialize_hdr()
auth_data_bin = struct.pack(self._PACK_STR, self.auth_key_id, 0,
self.seq, self.auth_key +
(b'\x00' * (len(self.auth_key) - 20)))
h = hashlib.sha1()
h.update(bfd_bin + auth_hdr_bin + auth_data_bin)
self.auth_hash = h.digest()
return auth_hdr_bin + struct.pack(self._PACK_STR, self.auth_key_id, 0,
self.seq, self.auth_hash)
def authenticate(self, prev, auth_keys=None):
"""Authenticate the SHA1 hash for this packet.
This method can be invoked only when ``self.auth_hash`` is defined.
Returns a boolean indicates whether the hash can be authenticated
by the correspondent Auth Key or not.
``prev`` is a ``bfd`` instance for the BFD Control header which this
authentication section belongs to. It's necessary to be assigned
because an SHA1 hash must be calculated over the entire BFD Control
packet.
``auth_keys`` is a dictionary of authentication key chain which
key is an integer of *Auth Key ID* and value is a string of *Auth Key*.
"""
auth_keys = auth_keys if auth_keys else {}
assert isinstance(prev, bfd)
if self.auth_hash is None:
return False
if self.auth_key_id not in auth_keys:
return False
auth_key = auth_keys[self.auth_key_id]
bfd_bin = prev.pack()
auth_hdr_bin = self.serialize_hdr()
auth_data_bin = struct.pack(self._PACK_STR, self.auth_key_id, 0,
self.seq, auth_key +
(b'\x00' * (len(auth_key) - 20)))
h = hashlib.sha1()
h.update(bfd_bin + auth_hdr_bin + auth_data_bin)
if self.auth_hash == h.digest():
return True
else:
return False
@bfd.register_auth_type(BFD_AUTH_METICULOUS_KEYED_SHA1)
class MeticulousKeyedSHA1(KeyedSHA1):
""" BFD (RFC 5880) Meticulous Keyed SHA1 Authentication Section class
All methods of this class are inherited from ``KeyedSHA1``.
An instance has the following attributes.
Most of them are same to the on-wire counterparts but in host byte order.
.. tabularcolumns:: |l|L|
=========== ================================================
Attribute Description
=========== ================================================
auth_type (Fixed) The authentication type in use.
auth_key_id The authentication Key ID in use.
seq The sequence number for this packet.
This value is incremented for each
successive packet transmitted for a session.
auth_key The shared SHA1 key for this packet.
auth_hash (Optional) The 20-byte SHA1 hash for the packet.
auth_len (Fixed) The length of the authentication section
is 28 bytes.
=========== ================================================
"""
pass
bfd.set_classes(bfd._auth_parsers)
|
MattDevo/edk2
|
refs/heads/master
|
AppPkg/Applications/Python/Python-2.7.2/Lib/test/test_dummy_thread.py
|
12
|
"""Generic thread tests.
Meant to be used by dummy_thread and thread. To allow for different modules
to be used, test_main() can be called with the module to use as the thread
implementation as its sole argument.
"""
import dummy_thread as _thread
import time
import Queue
import random
import unittest
from test import test_support
DELAY = 0 # Set > 0 when testing a module other than dummy_thread, such as
# the 'thread' module.
class LockTests(unittest.TestCase):
"""Test lock objects."""
def setUp(self):
# Create a lock
self.lock = _thread.allocate_lock()
def test_initlock(self):
#Make sure locks start locked
self.assertTrue(not self.lock.locked(),
"Lock object is not initialized unlocked.")
def test_release(self):
# Test self.lock.release()
self.lock.acquire()
self.lock.release()
self.assertTrue(not self.lock.locked(),
"Lock object did not release properly.")
def test_improper_release(self):
#Make sure release of an unlocked thread raises _thread.error
self.assertRaises(_thread.error, self.lock.release)
def test_cond_acquire_success(self):
#Make sure the conditional acquiring of the lock works.
self.assertTrue(self.lock.acquire(0),
"Conditional acquiring of the lock failed.")
def test_cond_acquire_fail(self):
#Test acquiring locked lock returns False
self.lock.acquire(0)
self.assertTrue(not self.lock.acquire(0),
"Conditional acquiring of a locked lock incorrectly "
"succeeded.")
def test_uncond_acquire_success(self):
#Make sure unconditional acquiring of a lock works.
self.lock.acquire()
self.assertTrue(self.lock.locked(),
"Uncondional locking failed.")
def test_uncond_acquire_return_val(self):
#Make sure that an unconditional locking returns True.
self.assertTrue(self.lock.acquire(1) is True,
"Unconditional locking did not return True.")
self.assertTrue(self.lock.acquire() is True)
def test_uncond_acquire_blocking(self):
#Make sure that unconditional acquiring of a locked lock blocks.
def delay_unlock(to_unlock, delay):
"""Hold on to lock for a set amount of time before unlocking."""
time.sleep(delay)
to_unlock.release()
self.lock.acquire()
start_time = int(time.time())
_thread.start_new_thread(delay_unlock,(self.lock, DELAY))
if test_support.verbose:
print
print "*** Waiting for thread to release the lock "\
"(approx. %s sec.) ***" % DELAY
self.lock.acquire()
end_time = int(time.time())
if test_support.verbose:
print "done"
self.assertTrue((end_time - start_time) >= DELAY,
"Blocking by unconditional acquiring failed.")
class MiscTests(unittest.TestCase):
"""Miscellaneous tests."""
def test_exit(self):
#Make sure _thread.exit() raises SystemExit
self.assertRaises(SystemExit, _thread.exit)
def test_ident(self):
#Test sanity of _thread.get_ident()
self.assertIsInstance(_thread.get_ident(), int,
"_thread.get_ident() returned a non-integer")
self.assertTrue(_thread.get_ident() != 0,
"_thread.get_ident() returned 0")
def test_LockType(self):
#Make sure _thread.LockType is the same type as _thread.allocate_locke()
self.assertIsInstance(_thread.allocate_lock(), _thread.LockType,
"_thread.LockType is not an instance of what "
"is returned by _thread.allocate_lock()")
def test_interrupt_main(self):
#Calling start_new_thread with a function that executes interrupt_main
# should raise KeyboardInterrupt upon completion.
def call_interrupt():
_thread.interrupt_main()
self.assertRaises(KeyboardInterrupt, _thread.start_new_thread,
call_interrupt, tuple())
def test_interrupt_in_main(self):
# Make sure that if interrupt_main is called in main threat that
# KeyboardInterrupt is raised instantly.
self.assertRaises(KeyboardInterrupt, _thread.interrupt_main)
class ThreadTests(unittest.TestCase):
"""Test thread creation."""
def test_arg_passing(self):
#Make sure that parameter passing works.
def arg_tester(queue, arg1=False, arg2=False):
"""Use to test _thread.start_new_thread() passes args properly."""
queue.put((arg1, arg2))
testing_queue = Queue.Queue(1)
_thread.start_new_thread(arg_tester, (testing_queue, True, True))
result = testing_queue.get()
self.assertTrue(result[0] and result[1],
"Argument passing for thread creation using tuple failed")
_thread.start_new_thread(arg_tester, tuple(), {'queue':testing_queue,
'arg1':True, 'arg2':True})
result = testing_queue.get()
self.assertTrue(result[0] and result[1],
"Argument passing for thread creation using kwargs failed")
_thread.start_new_thread(arg_tester, (testing_queue, True), {'arg2':True})
result = testing_queue.get()
self.assertTrue(result[0] and result[1],
"Argument passing for thread creation using both tuple"
" and kwargs failed")
def test_multi_creation(self):
#Make sure multiple threads can be created.
def queue_mark(queue, delay):
"""Wait for ``delay`` seconds and then put something into ``queue``"""
time.sleep(delay)
queue.put(_thread.get_ident())
thread_count = 5
testing_queue = Queue.Queue(thread_count)
if test_support.verbose:
print
print "*** Testing multiple thread creation "\
"(will take approx. %s to %s sec.) ***" % (DELAY, thread_count)
for count in xrange(thread_count):
if DELAY:
local_delay = round(random.random(), 1)
else:
local_delay = 0
_thread.start_new_thread(queue_mark,
(testing_queue, local_delay))
time.sleep(DELAY)
if test_support.verbose:
print 'done'
self.assertTrue(testing_queue.qsize() == thread_count,
"Not all %s threads executed properly after %s sec." %
(thread_count, DELAY))
def test_main(imported_module=None):
global _thread, DELAY
if imported_module:
_thread = imported_module
DELAY = 2
if test_support.verbose:
print
print "*** Using %s as _thread module ***" % _thread
test_support.run_unittest(LockTests, MiscTests, ThreadTests)
if __name__ == '__main__':
test_main()
|
marmyshev/item_title
|
refs/heads/master
|
openlp/plugins/media/lib/mediaitem.py
|
1
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
import logging
import os
from PyQt4 import QtCore, QtGui
from openlp.core.lib import ItemCapabilities, MediaManagerItem,MediaType, Registry, ServiceItem, ServiceItemContext, \
Settings, UiStrings, build_icon, check_item_selected, check_directory_exists, translate
from openlp.core.lib.ui import critical_error_message_box, create_horizontal_adjusting_combo_box
from openlp.core.ui import DisplayController, Display, DisplayControllerType
from openlp.core.ui.media import get_media_players, set_media_players
from openlp.core.utils import AppLocation, get_locale_key
log = logging.getLogger(__name__)
CLAPPERBOARD = ':/media/slidecontroller_multimedia.png'
VIDEO_ICON = build_icon(':/media/media_video.png')
AUDIO_ICON = build_icon(':/media/media_audio.png')
DVD_ICON = build_icon(':/media/media_video.png')
ERROR_ICON = build_icon(':/general/general_delete.png')
class MediaMediaItem(MediaManagerItem):
"""
This is the custom media manager item for Media Slides.
"""
log.info('%s MediaMediaItem loaded', __name__)
def __init__(self, parent, plugin):
self.icon_path = 'images/image'
self.background = False
self.automatic = ''
super(MediaMediaItem, self).__init__(parent, plugin)
self.single_service_item = False
self.has_search = True
self.media_object = None
self.display_controller = DisplayController(parent)
self.display_controller.controller_layout = QtGui.QVBoxLayout()
self.media_controller.register_controller(self.display_controller)
self.media_controller.set_controls_visible(self.display_controller, False)
self.display_controller.preview_display = Display(self.display_controller, False, self.display_controller)
self.display_controller.preview_display.hide()
self.display_controller.preview_display.setGeometry(QtCore.QRect(0, 0, 300, 300))
self.display_controller.preview_display.screen = {'size': self.display_controller.preview_display.geometry()}
self.display_controller.preview_display.setup()
self.media_controller.setup_display(self.display_controller.preview_display, False)
Registry().register_function('video_background_replaced', self.video_background_replaced)
Registry().register_function('mediaitem_media_rebuild', self.rebuild_players)
Registry().register_function('config_screen_changed', self.display_setup)
# Allow DnD from the desktop
self.list_view.activateDnD()
def retranslateUi(self):
self.on_new_prompt = translate('MediaPlugin.MediaItem', 'Select Media')
self.replace_action.setText(UiStrings().ReplaceBG)
self.replace_action.setToolTip(UiStrings().ReplaceLiveBG)
self.reset_action.setText(UiStrings().ResetBG)
self.reset_action.setToolTip(UiStrings().ResetLiveBG)
self.automatic = UiStrings().Automatic
self.display_type_label.setText(translate('MediaPlugin.MediaItem', 'Use Player:'))
self.rebuild_players()
def required_icons(self):
"""
Set which icons the media manager tab should show
"""
MediaManagerItem.required_icons(self)
self.has_file_icon = True
self.has_new_icon = False
self.has_edit_icon = False
def add_list_view_to_toolbar(self):
MediaManagerItem.add_list_view_to_toolbar(self)
self.list_view.addAction(self.replace_action)
def add_end_header_bar(self):
# Replace backgrounds do not work at present so remove functionality.
self.replace_action = self.toolbar.add_toolbar_action('replace_action', icon=':/slides/slide_blank.png',
triggers=self.onReplaceClick)
self.reset_action = self.toolbar.add_toolbar_action('reset_action', icon=':/system/system_close.png',
visible=False, triggers=self.onResetClick)
self.media_widget = QtGui.QWidget(self)
self.media_widget.setObjectName('media_widget')
self.display_layout = QtGui.QFormLayout(self.media_widget)
self.display_layout.setMargin(self.display_layout.spacing())
self.display_layout.setObjectName('display_layout')
self.display_type_label = QtGui.QLabel(self.media_widget)
self.display_type_label.setObjectName('display_type_label')
self.display_type_combo_box = create_horizontal_adjusting_combo_box(
self.media_widget, 'display_type_combo_box')
self.display_type_label.setBuddy(self.display_type_combo_box)
self.display_layout.addRow(self.display_type_label, self.display_type_combo_box)
# Add the Media widget to the page layout.
self.page_layout.addWidget(self.media_widget)
self.display_type_combo_box.currentIndexChanged.connect(self.overridePlayerChanged)
def overridePlayerChanged(self, index):
player = get_media_players()[0]
if index == 0:
set_media_players(player)
else:
set_media_players(player, player[index-1])
def onResetClick(self):
"""
Called to reset the Live background with the media selected,
"""
self.media_controller.media_reset(self.live_controller)
self.reset_action.setVisible(False)
def video_background_replaced(self):
"""
Triggered by main display on change of serviceitem.
"""
self.reset_action.setVisible(False)
def onReplaceClick(self):
"""
Called to replace Live background with the media selected.
"""
if check_item_selected(self.list_view,
translate('MediaPlugin.MediaItem', 'You must select a media file to replace the background with.')):
item = self.list_view.currentItem()
filename = item.data(QtCore.Qt.UserRole)
if os.path.exists(filename):
service_item = ServiceItem()
service_item.title = 'webkit'
service_item.processor = 'webkit'
(path, name) = os.path.split(filename)
service_item.add_from_command(path, name,CLAPPERBOARD)
if self.media_controller.video(DisplayControllerType.Live, service_item, video_behind_text=True):
self.reset_action.setVisible(True)
else:
critical_error_message_box(UiStrings().LiveBGError,
translate('MediaPlugin.MediaItem', 'There was no display item to amend.'))
else:
critical_error_message_box(UiStrings().LiveBGError,
translate('MediaPlugin.MediaItem',
'There was a problem replacing your background, the media file "%s" no longer exists.') % filename)
def generate_slide_data(self, service_item, item=None, xml_version=False, remote=False,
context=ServiceItemContext.Live):
"""
Generate the slide data. Needs to be implemented by the plugin.
"""
if item is None:
item = self.list_view.currentItem()
if item is None:
return False
filename = item.data(QtCore.Qt.UserRole)
if not os.path.exists(filename):
if not remote:
# File is no longer present
critical_error_message_box(
translate('MediaPlugin.MediaItem', 'Missing Media File'),
translate('MediaPlugin.MediaItem', 'The file %s no longer exists.') % filename)
return False
(path, name) = os.path.split(filename)
service_item.title = name
service_item.processor = self.display_type_combo_box.currentText()
service_item.add_from_command(path, name, CLAPPERBOARD)
# Only get start and end times if going to a service
if context == ServiceItemContext.Service:
# Start media and obtain the length
if not self.media_controller.media_length(service_item):
return False
service_item.add_capability(ItemCapabilities.CanAutoStartForLive)
service_item.add_capability(ItemCapabilities.CanEditTitle)
service_item.add_capability(ItemCapabilities.RequiresMedia)
if Settings().value(self.settings_section + '/media auto start') == QtCore.Qt.Checked:
service_item.will_auto_start = True
# force a non-existent theme
service_item.theme = -1
return True
def initialise(self):
self.list_view.clear()
self.list_view.setIconSize(QtCore.QSize(88, 50))
self.servicePath = os.path.join(AppLocation.get_section_data_path(self.settings_section), 'thumbnails')
check_directory_exists(self.servicePath)
self.load_list(Settings().value(self.settings_section + '/media files'))
self.populateDisplayTypes()
def rebuild_players(self):
"""
Rebuild the tab in the media manager when changes are made in the settings.
"""
self.populateDisplayTypes()
self.on_new_file_masks = translate('MediaPlugin.MediaItem', 'Videos (%s);;Audio (%s);;%s (*)') % (
' '.join(self.media_controller.video_extensions_list),
' '.join(self.media_controller.audio_extensions_list), UiStrings().AllFiles)
def display_setup(self):
self.media_controller.setup_display(self.display_controller.preview_display, False)
def populateDisplayTypes(self):
"""
Load the combobox with the enabled media players, allowing user to select a specific player if settings allow.
"""
# block signals to avoid unnecessary overridePlayerChanged Signals while combo box creation
self.display_type_combo_box.blockSignals(True)
self.display_type_combo_box.clear()
usedPlayers, overridePlayer = get_media_players()
media_players = self.media_controller.media_players
currentIndex = 0
for player in usedPlayers:
# load the drop down selection
self.display_type_combo_box.addItem(media_players[player].original_name)
if overridePlayer == player:
currentIndex = len(self.display_type_combo_box)
if self.display_type_combo_box.count() > 1:
self.display_type_combo_box.insertItem(0, self.automatic)
self.display_type_combo_box.setCurrentIndex(currentIndex)
if overridePlayer:
self.media_widget.show()
else:
self.media_widget.hide()
self.display_type_combo_box.blockSignals(False)
def on_delete_click(self):
"""
Remove a media item from the list.
"""
if check_item_selected(self.list_view,
translate('MediaPlugin.MediaItem', 'You must select a media file to delete.')):
row_list = [item.row() for item in self.list_view.selectedIndexes()]
row_list.sort(reverse=True)
for row in row_list:
self.list_view.takeItem(row)
Settings().setValue(self.settings_section + '/media files', self.get_file_list())
def load_list(self, media, target_group=None):
# Sort the media by its filename considering language specific characters.
media.sort(key=lambda filename: get_locale_key(os.path.split(str(filename))[1]))
for track in media:
track_info = QtCore.QFileInfo(track)
if not os.path.exists(track):
filename = os.path.split(str(track))[1]
item_name = QtGui.QListWidgetItem(filename)
item_name.setIcon(ERROR_ICON)
item_name.setData(QtCore.Qt.UserRole, track)
elif track_info.isFile():
filename = os.path.split(str(track))[1]
item_name = QtGui.QListWidgetItem(filename)
if '*.%s' % (filename.split('.')[-1].lower()) in self.media_controller.audio_extensions_list:
item_name.setIcon(AUDIO_ICON)
else:
item_name.setIcon(VIDEO_ICON)
item_name.setData(QtCore.Qt.UserRole, track)
else:
filename = os.path.split(str(track))[1]
item_name = QtGui.QListWidgetItem(filename)
item_name.setIcon(build_icon(DVD_ICON))
item_name.setData(QtCore.Qt.UserRole, track)
item_name.setToolTip(track)
self.list_view.addItem(item_name)
def get_list(self, type=MediaType.Audio):
media = Settings().value(self.settings_section + '/media files')
media.sort(key=lambda filename: get_locale_key(os.path.split(str(filename))[1]))
extension = []
if type == MediaType.Audio:
extension = self.media_controller.audio_extensions_list
else:
extension = self.media_controller.video_extensions_list
extension = [x[1:] for x in extension]
media = [x for x in media if os.path.splitext(x)[1] in extension]
return media
def search(self, string, showError):
files = Settings().value(self.settings_section + '/media files')
results = []
string = string.lower()
for file in files:
filename = os.path.split(str(file))[1]
if filename.lower().find(string) > -1:
results.append([file, filename])
return results
|
d3banjan/polyamide
|
refs/heads/master
|
webdev/lib/python2.7/site-packages/django/conf/locale/ga/__init__.py
|
12133432
| |
iclosure/jcoolkits
|
refs/heads/master
|
src/jplot3d-py/com/__init__.py
|
12133432
| |
jazkarta/edx-platform
|
refs/heads/master
|
common/djangoapps/track/contexts.py
|
126
|
"""Generates common contexts"""
import logging
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.keys import CourseKey
from opaque_keys import InvalidKeyError
from util.request import COURSE_REGEX
log = logging.getLogger(__name__)
def course_context_from_url(url):
"""
Extracts the course_context from the given `url` and passes it on to
`course_context_from_course_id()`.
"""
url = url or ''
match = COURSE_REGEX.match(url)
course_id = None
if match:
course_id_string = match.group('course_id')
try:
course_id = SlashSeparatedCourseKey.from_deprecated_string(course_id_string)
except InvalidKeyError:
log.warning(
'unable to parse course_id "{course_id}"'.format(
course_id=course_id_string
),
exc_info=True
)
return course_context_from_course_id(course_id)
def course_context_from_course_id(course_id):
"""
Creates a course context from a `course_id`.
Example Returned Context::
{
'course_id': 'org/course/run',
'org_id': 'org'
}
"""
if course_id is None:
return {'course_id': '', 'org_id': ''}
# TODO: Make this accept any CourseKey, and serialize it using .to_string
assert isinstance(course_id, CourseKey)
return {
'course_id': course_id.to_deprecated_string(),
'org_id': course_id.org,
}
|
nzavagli/UnrealPy
|
refs/heads/master
|
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/boto-2.38.0/tests/unit/ec2/elb/test_loadbalancer.py
|
114
|
#!/usr/bin/env python
from tests.unit import unittest
from tests.compat import mock
from boto.ec2.elb import ELBConnection
from boto.ec2.elb import LoadBalancer
DISABLE_RESPONSE = b"""<?xml version="1.0" encoding="UTF-8"?>
<DisableAvailabilityZonesForLoadBalancerResult xmlns="http://ec2.amazonaws.com/doc/2013-02-01/">
<requestId>3be1508e-c444-4fef-89cc-0b1223c4f02fEXAMPLE</requestId>
<AvailabilityZones>
<member>sample-zone</member>
</AvailabilityZones>
</DisableAvailabilityZonesForLoadBalancerResult>
"""
class TestInstanceStatusResponseParsing(unittest.TestCase):
def test_next_token(self):
elb = ELBConnection(aws_access_key_id='aws_access_key_id',
aws_secret_access_key='aws_secret_access_key')
mock_response = mock.Mock()
mock_response.read.return_value = DISABLE_RESPONSE
mock_response.status = 200
elb.make_request = mock.Mock(return_value=mock_response)
disabled = elb.disable_availability_zones('mine', ['sample-zone'])
self.assertEqual(disabled, ['sample-zone'])
DESCRIBE_RESPONSE = b"""<?xml version="1.0" encoding="UTF-8"?>
<DescribeLoadBalancersResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2012-06-01/">
<DescribeLoadBalancersResult>
<LoadBalancerDescriptions>
<member>
<SecurityGroups/>
<CreatedTime>2013-07-09T19:18:00.520Z</CreatedTime>
<LoadBalancerName>elb-boto-unit-test</LoadBalancerName>
<HealthCheck/>
<ListenerDescriptions>
<member>
<PolicyNames/>
<Listener/>
</member>
</ListenerDescriptions>
<Instances/>
<Policies>
<AppCookieStickinessPolicies/>
<OtherPolicies>
<member>AWSConsole-SSLNegotiationPolicy-my-test-loadbalancer</member>
<member>EnableProxyProtocol</member>
</OtherPolicies>
<LBCookieStickinessPolicies/>
</Policies>
<AvailabilityZones>
<member>us-east-1a</member>
</AvailabilityZones>
<CanonicalHostedZoneName>elb-boto-unit-test-408121642.us-east-1.elb.amazonaws.com</CanonicalHostedZoneName>
<CanonicalHostedZoneNameID>Z3DZXE0Q79N41H</CanonicalHostedZoneNameID>
<Scheme>internet-facing</Scheme>
<SourceSecurityGroup>
<OwnerAlias>amazon-elb</OwnerAlias>
<GroupName>amazon-elb-sg</GroupName>
</SourceSecurityGroup>
<DNSName>elb-boto-unit-test-408121642.us-east-1.elb.amazonaws.com</DNSName>
<BackendServerDescriptions>
<member>
<PolicyNames>
<member>EnableProxyProtocol</member>
</PolicyNames>
<InstancePort>80</InstancePort>
</member>
</BackendServerDescriptions>
<Subnets/>
</member>
</LoadBalancerDescriptions>
<Marker>1234</Marker>
</DescribeLoadBalancersResult>
<ResponseMetadata>
<RequestId>5763d932-e8cc-11e2-a940-11136cceffb8</RequestId>
</ResponseMetadata>
</DescribeLoadBalancersResponse>
"""
class TestDescribeLoadBalancers(unittest.TestCase):
def test_other_policy(self):
elb = ELBConnection(aws_access_key_id='aws_access_key_id',
aws_secret_access_key='aws_secret_access_key')
mock_response = mock.Mock()
mock_response.read.return_value = DESCRIBE_RESPONSE
mock_response.status = 200
elb.make_request = mock.Mock(return_value=mock_response)
load_balancers = elb.get_all_load_balancers()
self.assertEqual(len(load_balancers), 1)
lb = load_balancers[0]
self.assertEqual(len(lb.policies.other_policies), 2)
self.assertEqual(lb.policies.other_policies[0].policy_name,
'AWSConsole-SSLNegotiationPolicy-my-test-loadbalancer')
self.assertEqual(lb.policies.other_policies[1].policy_name,
'EnableProxyProtocol')
self.assertEqual(len(lb.backends), 1)
self.assertEqual(len(lb.backends[0].policies), 1)
self.assertEqual(lb.backends[0].policies[0].policy_name,
'EnableProxyProtocol')
self.assertEqual(lb.backends[0].instance_port, 80)
def test_request_with_marker(self):
elb = ELBConnection(aws_access_key_id='aws_access_key_id',
aws_secret_access_key='aws_secret_access_key')
mock_response = mock.Mock()
mock_response.read.return_value = DESCRIBE_RESPONSE
mock_response.status = 200
elb.make_request = mock.Mock(return_value=mock_response)
load_balancers1 = elb.get_all_load_balancers()
self.assertEqual('1234', load_balancers1.marker)
load_balancers2 = elb.get_all_load_balancers(marker=load_balancers1.marker)
self.assertEqual(len(load_balancers2), 1)
DETACH_RESPONSE = r"""<?xml version="1.0" encoding="UTF-8"?>
<DetachLoadBalancerFromSubnets xmlns="http://ec2.amazonaws.com/doc/2013-02-01/">
<requestId>3be1508e-c444-4fef-89cc-0b1223c4f02fEXAMPLE</requestId>
</DetachLoadBalancerFromSubnets>
"""
class TestDetachSubnets(unittest.TestCase):
def test_detach_subnets(self):
elb = ELBConnection(aws_access_key_id='aws_access_key_id',
aws_secret_access_key='aws_secret_access_key')
lb = LoadBalancer(elb, "mylb")
mock_response = mock.Mock()
mock_response.read.return_value = DETACH_RESPONSE
mock_response.status = 200
elb.make_request = mock.Mock(return_value=mock_response)
lb.detach_subnets("s-xxx")
if __name__ == '__main__':
unittest.main()
|
fniephaus/alfred-gmail
|
refs/heads/master
|
src/gmail_launcher.py
|
1
|
import os
import subprocess
import sys
import json
import httplib2
import base64
from email.mime.text import MIMEText
from googleapiclient.discovery import build
from googleapiclient import errors
from oauth2client.client import flow_from_clientsecrets, OAuth2Credentials
from oauth2client.tools import run_flow
from workflow import Workflow, PasswordNotFound
from gmail_refresh import refresh_cache, PseudoStorage
import config
OPEN_MESSAGE_BASE_URL = 'https://mail.google.com/mail/u/0/?ui=2&pli=1#inbox/%s'
OPEN_ALFRED_OSA_TEMPLATE = """\
osascript -e 'tell application "Alfred 3" to run trigger "open" in workflow \
"com.fniephaus.gmail" with argument "%s"'"""
def execute(wf):
if len(wf.args):
if 'reopen' in wf.args[0]:
open_alfred()
return 0
query = json.loads(wf.args[0])
# Start the OAuth flow to retrieve credentials
flow = flow_from_clientsecrets(
config.CLIENT_SECRET_FILE, scope=config.OAUTH_SCOPE)
http = httplib2.Http()
try:
credentials = OAuth2Credentials.from_json(
wf.get_password('gmail_credentials'))
if credentials is None or credentials.invalid:
credentials = run_flow(flow, PseudoStorage(), http=http)
wf.save_password('gmail_credentials', credentials.to_json())
# Authorize the httplib2.Http object with our credentials
http = credentials.authorize(http)
# Build the Gmail service from discovery
service = build('gmail', 'v1', http=http)
except PasswordNotFound:
wf.logger.error('Credentials not found')
return 0
try:
thread_id = query['thread_id']
except KeyError:
return 0
message_id = query.get('message_id')
target = None
if 'action' in query:
if query['action'] == 'deauthorize':
wf.delete_password('gmail_credentials')
wf.clear_cache()
print "Workflow deauthorized."
return 0
elif query['action'] == 'mark_as_read':
mark_conversation_as_read(service, thread_id)
target = query.get('query')
elif query['action'] == 'mark_as_unread':
mark_conversation_as_unread(service, thread_id)
target = query.get('query')
elif query['action'] == 'archive_conversation':
refresh_cache(archive_conversation(service, thread_id))
elif query['action'] == 'trash_message':
refresh_cache(trash_message(service, message_id))
target = query.get('label')
elif query['action'] == 'move_to_inbox':
refresh_cache(move_to_inbox(service, message_id))
target = query.get('label')
elif query['action'] == 'trash_conversation':
refresh_cache(trash_conversation(service, thread_id))
target = query.get('label')
elif query['action'] == 'reply':
if 'message' in query:
send_reply(wf, service, thread_id, query['message'])
else:
print 'No message found.'
target = query.get('query')
elif query['action'] == 'label':
if 'label' in query:
add_label(service, thread_id, query['label'])
else:
print 'No label found.'
target = query.get('query')
elif query['action'] == 'open':
open_message(wf, thread_id)
if 'label_id' in query:
refresh_cache([query['label_id']])
return 0
else:
wf.logger.debug('No action defined')
return 0
open_alfred(target)
def open_message(wf, message_id):
if message_id:
url = OPEN_MESSAGE_BASE_URL % message_id
subprocess.call(['open', url])
def mark_conversation_as_read(service, thread_id):
try:
# Mark conversation as read
thread = service.users().threads().modify(
userId='me', id=thread_id,
body={'removeLabelIds': ['UNREAD']}).execute()
if all((u'labelIds' in message and
u'UNREAD' not in message['labelIds'])
for message in thread['messages']):
print 'Conversation marked as read.'
return thread['messages'][-1]['labelIds']
else:
print 'An error occurred.'
except KeyError:
print 'Connection error'
return []
def mark_conversation_as_unread(service, thread_id):
try:
# Mark conversation as unread
thread = service.users().threads().modify(
userId='me', id=thread_id,
body={'addLabelIds': ['UNREAD']}).execute()
if all((u'labelIds' in message and
u'UNREAD' in message['labelIds'])
for message in thread['messages']):
print 'Conversation marked as unread.'
return thread['messages'][-1]['labelIds']
else:
print 'An error occurred.'
except KeyError:
print 'Connection error'
return []
def move_to_inbox(service, thread_id):
try:
thread = service.users().threads().modify(
userId='me', id=thread_id,
body={'addLabelIds': ['INBOX']}).execute()
if all((u'labelIds' in message and
u'INBOX' in message['labelIds'])
for message in thread['messages']):
print 'Conversation moved to inbox.'
return thread['messages'][-1]['labelIds']
else:
print 'An error occurred.'
except KeyError:
print 'Connection error'
return []
def archive_conversation(service, thread_id):
try:
# Archive conversation
thread = service.users().threads().modify(
userId='me', id=thread_id,
body={'removeLabelIds': ['INBOX']}).execute()
if all((u'labelIds' in message and
u'INBOX' not in message['labelIds'])
for message in thread['messages']):
print 'Conversation archived.'
return thread['messages'][-1]['labelIds']
else:
print 'An error occurred.'
except Exception:
print 'Connection error'
return []
def trash_message(service, message_id):
if message_id:
try:
# Trash message
message = service.users().messages().trash(
userId='me', id=message_id).execute()
if u'labelIds' in message and u'TRASH' in message['labelIds']:
print 'Mail moved to trash.'
return message['labelIds']
else:
print 'An error occurred.'
except Exception:
print 'Connection error'
return []
def trash_conversation(service, thread_id):
try:
# Trash conversation
thread = service.users().threads().trash(
userId='me', id=thread_id).execute()
if all((u'labelIds' in message and
u'TRASH' in message['labelIds'])
for message in thread['messages']):
print 'Conversation moved to trash.'
return thread['messages'][-1]['labelIds']
else:
print 'An error occurred.'
except Exception:
print 'Connection error'
return []
def send_reply(wf, service, thread_id, message):
try:
thread = service.users().threads().get(
userId='me', id=thread_id,
fields='messages/payload/headers,messages/labelIds').execute()
header_from = None
header_delivered_to = None
header_subject = None
for header in thread['messages'][-1]['payload']['headers']:
if header['name'] == 'From':
header_from = header['value']
if header['name'] == 'Delivered-To':
header_delivered_to = header['value']
if header['name'] == 'Subject':
header_subject = header['value']
if any(not x for x in
[header_from, header_delivered_to, header_subject]):
print 'An error occurred.'
return []
message_body = create_message(
header_delivered_to, header_from, header_subject, message)
service.users().messages().send(
userId='me', body=message_body).execute()
print 'Reply sent.'
return thread['messages'][-1]['labelIds']
except errors.HttpError, error:
print 'An error occurred: %s' % error
return []
def create_message(sender, to, subject, message_text):
message = MIMEText(message_text)
message['to'] = to
message['from'] = sender
message['subject'] = subject
return {'raw': base64.urlsafe_b64encode(message.as_string())}
def add_label(service, thread_id, label):
try:
thread = service.users().threads().modify(
userId='me', id=thread_id,
body={'addLabelIds': [label['id']]}).execute()
if all((u'labelIds' in message and label['id'] in message['labelIds'])
for message in thread['messages']):
print 'Labeled with %s.' % label['name']
return thread['messages'][-1]['labelIds']
else:
print 'An error occurred.'
except KeyError:
print 'Connection error'
return []
def open_alfred(query=None):
os.system(OPEN_ALFRED_OSA_TEMPLATE % (query or ''))
if __name__ == '__main__':
wf = Workflow()
sys.exit(wf.run(execute))
|
hellhovnd/django
|
refs/heads/master
|
tests/db_typecasts/__init__.py
|
12133432
| |
dnozay/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.3/tests/regressiontests/defer_regress/__init__.py
|
12133432
| |
polojacky/ehfpi
|
refs/heads/master
|
ehf/magic/__init__.py
|
12133432
| |
abhishekgahlot/compbio
|
refs/heads/master
|
test/compbio/test_birthdeath.py
|
3
|
from math import exp
import unittest
from compbio import birthdeath
from rasmus.testing import eq_sample_pmf
#=============================================================================
# test coalescence times (normal, censored, bounded)
def rannala1996_prob_birth_death1(n, t, birth, death):
ert = exp(-(birth - death)*t)
p0 = (death - death * ert) / (birth - death * ert)
p1 = (birth - death)**2 * ert / (birth - death * ert)**2
if n == 0:
return p0
elif n == 1:
return p1
else:
return (birth/death)**n * p1 * p0**(n-1)
def rannala1996_prob_birth_death1_fix(n, t, birth, death):
ert = exp(-(birth - death)*t)
p0 = (death - death * ert) / (birth - death * ert)
p1 = (birth - death)**2 * ert / (birth - death * ert)**2
if n == 0:
return p0
elif n == 1:
return p1
else:
return p1 * (birth/death * p0)**(n-1)
class BD (unittest.TestCase):
def test_prob_birth_death1(self):
"""Sampling and PDF for birth-death from single lineage."""
t = 1.0
birth = 0.5
death = 0.2
counts = [birthdeath.sample_birth_death_count(1, t, birth, death)
for i in xrange(10000)]
eq_sample_pmf(
counts,
lambda i: birthdeath.prob_birth_death1(i, t, birth, death),
pval=0.01)
def test_rannala1996_prob_birth_death1(self):
t = 1.0
birth = 0.5
death = 0.2
counts = [birthdeath.sample_birth_death_count(1, t, birth, death)
for i in xrange(10000)]
# original equation should fail
try:
eq_sample_pmf(
counts,
lambda i: rannala1996_prob_birth_death1(i, t, birth, death))
except:
pass
else:
raise AssertionError
eq_sample_pmf(
counts,
lambda i: rannala1996_prob_birth_death1_fix(i, t, birth, death))
def test_prob_birth_death1_eq(self):
"""
Sampling and PDF for birth-death from single lineage birth=death rate.
"""
t = 1.0
birth = 0.5
death = 0.5
counts = [birthdeath.sample_birth_death_count(1, t, birth, death)
for i in xrange(10000)]
eq_sample_pmf(
counts,
lambda i: birthdeath.prob_birth_death1(i, t, birth, death))
def test_birth_wait_time_eq(self):
t = 0.5
n = 2
T = 1.0
birth = 2.0
death = 2.0
self.assertAlmostEqual(
birthdeath.birth_wait_time(t, n, T, birth, death*.9999),
birthdeath.birth_wait_time(t, n, T, birth, death),
places=4)
def test_prob_no_birth_eq(self):
n = 2
T = 1.2
birth = 2.0
death = 2.0
self.assertAlmostEqual(
birthdeath.prob_no_birth(n, T, birth, death*.9999),
birthdeath.prob_no_birth(n, T, birth, death),
places=4)
|
johnseekins/graphite-web
|
refs/heads/master
|
webapp/graphite/views.py
|
39
|
import traceback
from django.http import HttpResponseServerError
from django.template import Context, loader
def server_error(request, template_name='500.html'):
template = loader.get_template(template_name)
context = Context({
'stacktrace' : traceback.format_exc()
})
return HttpResponseServerError( template.render(context) )
|
kdagley/midas_pr
|
refs/heads/master
|
config/settings/production.py
|
1
|
# -*- coding: utf-8 -*-
'''
Production Configurations
- Use djangosecure
- Use Amazon's S3 for storing static files and uploaded media
- Use mailgun to send emails
- Use Redis on Heroku
'''
from __future__ import absolute_import, unicode_literals
from boto.s3.connection import OrdinaryCallingFormat
from django.utils import six
from .common import * # noqa
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ
SECRET_KEY = env("DJANGO_SECRET_KEY")
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# django-secure
# ------------------------------------------------------------------------------
INSTALLED_APPS += ("djangosecure", )
SECURITY_MIDDLEWARE = (
'djangosecure.middleware.SecurityMiddleware',
)
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
MIDDLEWARE_CLASSES = SECURITY_MIDDLEWARE + MIDDLEWARE_CLASSES
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True)
SECURE_FRAME_DENY = env.bool("DJANGO_SECURE_FRAME_DENY", default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = False
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['pr.midasgoldinc.com'])
# ALLOWED_HOSTS = ['*']
# END SITE CONFIGURATION
INSTALLED_APPS += ("gunicorn", )
# STORAGE CONFIGURATION
# ------------------------------------------------------------------------------
# Uploaded Media Files
# ------------------------
# See: http://django-storages.readthedocs.org/en/latest/index.html
INSTALLED_APPS += (
'storages',
)
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
AWS_S3_CALLING_FORMAT = OrdinaryCallingFormat()
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIRY = 60 * 60 * 24 * 7
# TODO See: https://github.com/jschneier/django-storages/issues/47
# Revert the following and use str after the above-mentioned bug is fixed in
# either django-storage-redux or boto
AWS_HEADERS = {
'Cache-Control': six.b('max-age=%d, s-maxage=%d, must-revalidate' % (
AWS_EXPIRY, AWS_EXPIRY))
}
# URL that handles the media served from MEDIA_ROOT, used for managing
# stored files.
MEDIA_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
# Static Assets
# ------------------------
STATICFILES_STORAGE = DEFAULT_FILE_STORAGE
STATIC_URL = MEDIA_URL
# See: https://github.com/antonagestam/collectfast
# For Django 1.7+, 'collectfast' should come before
# 'django.contrib.staticfiles'
AWS_PRELOAD_METADATA = True
INSTALLED_APPS = ('collectfast', ) + INSTALLED_APPS
# EMAIL
# ------------------------------------------------------------------------------
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL',
default='Midas Gold Public Relations <noreply@pr.midasgoldinc.com>')
EMAIL_BACKEND = 'django_mailgun.MailgunBackend'
MAILGUN_ACCESS_KEY = env('DJANGO_MAILGUN_API_KEY')
MAILGUN_SERVER_NAME = env('DJANGO_MAILGUN_SERVER_NAME')
EMAIL_SUBJECT_PREFIX = env("DJANGO_EMAIL_SUBJECT_PREFIX", default='[Midas Gold Public Relations] ')
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See:
# https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ]),
]
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
DATABASES['default'] = env.db("DATABASE_URL")
# CACHING
# ------------------------------------------------------------------------------
# Heroku URL does not pass the DB number, so we parse it in
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "{0}/{1}".format(env.cache_url('REDIS_URL', default="redis://127.0.0.1:6379"), 0),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
"IGNORE_EXCEPTIONS": True, # mimics memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
}
}
}
# Your production stuff: Below this line define 3rd party library settings
|
odicraig/kodi2odi
|
refs/heads/master
|
addons/plugin.video.roggerstream-3.0.7/mechanize/_beautifulsoup.py
|
5
|
"""Beautiful Soup
Elixir and Tonic
"The Screen-Scraper's Friend"
v2.1.1
http://www.crummy.com/software/BeautifulSoup/
Beautiful Soup parses arbitrarily invalid XML- or HTML-like substance
into a tree representation. It provides methods and Pythonic idioms
that make it easy to search and modify the tree.
A well-formed XML/HTML document will yield a well-formed data
structure. An ill-formed XML/HTML document will yield a
correspondingly ill-formed data structure. If your document is only
locally well-formed, you can use this library to find and process the
well-formed part of it. The BeautifulSoup class has heuristics for
obtaining a sensible parse tree in the face of common HTML errors.
Beautiful Soup has no external dependencies. It works with Python 2.2
and up.
Beautiful Soup defines classes for four different parsing strategies:
* BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific
language that kind of looks like XML.
* BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid
or invalid.
* ICantBelieveItsBeautifulSoup, for parsing valid but bizarre HTML
that trips up BeautifulSoup.
* BeautifulSOAP, for making it easier to parse XML documents that use
lots of subelements containing a single string, where you'd prefer
they put that string into an attribute (such as SOAP messages).
You can subclass BeautifulStoneSoup or BeautifulSoup to create a
parsing strategy specific to an XML schema or a particular bizarre
HTML document. Typically your subclass would just override
SELF_CLOSING_TAGS and/or NESTABLE_TAGS.
""" #"
from __future__ import generators
__author__ = "Leonard Richardson (leonardr@segfault.org)"
__version__ = "2.1.1"
__date__ = "$Date: 2004/10/18 00:14:20 $"
__copyright__ = "Copyright (c) 2004-2005 Leonard Richardson"
__license__ = "PSF"
from _sgmllib_copy import SGMLParser, SGMLParseError
import types
import re
import _sgmllib_copy as sgmllib
class NullType(object):
"""Similar to NoneType with a corresponding singleton instance
'Null' that, unlike None, accepts any message and returns itself.
Examples:
>>> Null("send", "a", "message")("and one more",
... "and what you get still") is Null
True
"""
def __new__(cls): return Null
def __call__(self, *args, **kwargs): return Null
## def __getstate__(self, *args): return Null
def __getattr__(self, attr): return Null
def __getitem__(self, item): return Null
def __setattr__(self, attr, value): pass
def __setitem__(self, item, value): pass
def __len__(self): return 0
# FIXME: is this a python bug? otherwise ``for x in Null: pass``
# never terminates...
def __iter__(self): return iter([])
def __contains__(self, item): return False
def __repr__(self): return "Null"
Null = object.__new__(NullType)
class PageElement:
"""Contains the navigational information for some part of the page
(either a tag or a piece of text)"""
def setup(self, parent=Null, previous=Null):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous = previous
self.next = Null
self.previousSibling = Null
self.nextSibling = Null
if self.parent and self.parent.contents:
self.previousSibling = self.parent.contents[-1]
self.previousSibling.nextSibling = self
def findNext(self, name=None, attrs={}, text=None):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._first(self.fetchNext, name, attrs, text)
firstNext = findNext
def fetchNext(self, name=None, attrs={}, text=None, limit=None):
"""Returns all items that match the given criteria and appear
before after Tag in the document."""
return self._fetch(name, attrs, text, limit, self.nextGenerator)
def findNextSibling(self, name=None, attrs={}, text=None):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._first(self.fetchNextSiblings, name, attrs, text)
firstNextSibling = findNextSibling
def fetchNextSiblings(self, name=None, attrs={}, text=None, limit=None):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._fetch(name, attrs, text, limit, self.nextSiblingGenerator)
def findPrevious(self, name=None, attrs={}, text=None):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._first(self.fetchPrevious, name, attrs, text)
def fetchPrevious(self, name=None, attrs={}, text=None, limit=None):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._fetch(name, attrs, text, limit, self.previousGenerator)
firstPrevious = findPrevious
def findPreviousSibling(self, name=None, attrs={}, text=None):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._first(self.fetchPreviousSiblings, name, attrs, text)
firstPreviousSibling = findPreviousSibling
def fetchPreviousSiblings(self, name=None, attrs={}, text=None,
limit=None):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._fetch(name, attrs, text, limit,
self.previousSiblingGenerator)
def findParent(self, name=None, attrs={}):
"""Returns the closest parent of this Tag that matches the given
criteria."""
r = Null
l = self.fetchParents(name, attrs, 1)
if l:
r = l[0]
return r
firstParent = findParent
def fetchParents(self, name=None, attrs={}, limit=None):
"""Returns the parents of this Tag that match the given
criteria."""
return self._fetch(name, attrs, None, limit, self.parentGenerator)
#These methods do the real heavy lifting.
def _first(self, method, name, attrs, text):
r = Null
l = method(name, attrs, text, 1)
if l:
r = l[0]
return r
def _fetch(self, name, attrs, text, limit, generator):
"Iterates over a generator looking for things that match."
if not hasattr(attrs, 'items'):
attrs = {'class' : attrs}
results = []
g = generator()
while True:
try:
i = g.next()
except StopIteration:
break
found = None
if isinstance(i, Tag):
if not text:
if not name or self._matches(i, name):
match = True
for attr, matchAgainst in attrs.items():
check = i.get(attr)
if not self._matches(check, matchAgainst):
match = False
break
if match:
found = i
elif text:
if self._matches(i, text):
found = i
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#Generators that can be used to navigate starting from both
#NavigableTexts and Tags.
def nextGenerator(self):
i = self
while i:
i = i.next
yield i
def nextSiblingGenerator(self):
i = self
while i:
i = i.nextSibling
yield i
def previousGenerator(self):
i = self
while i:
i = i.previous
yield i
def previousSiblingGenerator(self):
i = self
while i:
i = i.previousSibling
yield i
def parentGenerator(self):
i = self
while i:
i = i.parent
yield i
def _matches(self, chunk, howToMatch):
#print 'looking for %s in %s' % (howToMatch, chunk)
#
# If given a list of items, return true if the list contains a
# text element that matches.
if isList(chunk) and not isinstance(chunk, Tag):
for tag in chunk:
if isinstance(tag, NavigableText) and self._matches(tag, howToMatch):
return True
return False
if callable(howToMatch):
return howToMatch(chunk)
if isinstance(chunk, Tag):
#Custom match methods take the tag as an argument, but all other
#ways of matching match the tag name as a string
chunk = chunk.name
#Now we know that chunk is a string
if not isinstance(chunk, basestring):
chunk = str(chunk)
if hasattr(howToMatch, 'match'):
# It's a regexp object.
return howToMatch.search(chunk)
if isList(howToMatch):
return chunk in howToMatch
if hasattr(howToMatch, 'items'):
return howToMatch.has_key(chunk)
#It's just a string
return str(howToMatch) == chunk
class NavigableText(PageElement):
def __getattr__(self, attr):
"For backwards compatibility, text.string gives you text"
if attr == 'string':
return self
else:
raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr)
class NavigableString(str, NavigableText):
pass
class NavigableUnicodeString(unicode, NavigableText):
pass
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
def __init__(self, name, attrs=None, parent=Null, previous=Null):
"Basic constructor."
self.name = name
if attrs == None:
attrs = []
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self._getAttrMap().get(key, default)
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the tag,
and throws an exception if it's not there."""
return self._getAttrMap()[key]
def __iter__(self):
"Iterating over a tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __nonzero__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""Setting tag[key] sets the value of the 'key' attribute for the
tag."""
self._getAttrMap()
self.attrMap[key] = value
found = False
for i in range(0, len(self.attrs)):
if self.attrs[i][0] == key:
self.attrs[i] = (key, value)
found = True
if not found:
self.attrs.append((key, value))
self._getAttrMap()[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
for item in self.attrs:
if item[0] == key:
self.attrs.remove(item)
#We don't break because bad HTML can define the same
#attribute multiple times.
self._getAttrMap()
if self.attrMap.has_key(key):
del self.attrMap[key]
def __call__(self, *args, **kwargs):
"""Calling a tag like a function is the same as calling its
fetch() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return apply(self.fetch, args, kwargs)
def __getattr__(self, tag):
if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3:
return self.first(tag[:-3])
elif tag.find('__') != 0:
return self.first(tag)
def __eq__(self, other):
"""Returns true iff this tag has the same name, the same attributes,
and the same contents (recursively) as the given tag.
NOTE: right now this will return false if two tags have the
same attributes in a different order. Should this be fixed?"""
if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other):
return False
for i in range(0, len(self.contents)):
if self.contents[i] != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this tag is not identical to the other tag,
as defined in __eq__."""
return not self == other
def __repr__(self):
"""Renders this tag as a string."""
return str(self)
def __unicode__(self):
return self.__str__(1)
def __str__(self, needUnicode=None, showStructureIndent=None):
"""Returns a string or Unicode representation of this tag and
its contents.
NOTE: since Python's HTML parser consumes whitespace, this
method is not certain to reproduce the whitespace present in
the original string."""
attrs = []
if self.attrs:
for key, val in self.attrs:
attrs.append('%s="%s"' % (key, val))
close = ''
closeTag = ''
if self.isSelfClosing():
close = ' /'
else:
closeTag = '</%s>' % self.name
indentIncrement = None
if showStructureIndent != None:
indentIncrement = showStructureIndent
if not self.hidden:
indentIncrement += 1
contents = self.renderContents(indentIncrement, needUnicode=needUnicode)
if showStructureIndent:
space = '\n%s' % (' ' * showStructureIndent)
if self.hidden:
s = contents
else:
s = []
attributeString = ''
if attrs:
attributeString = ' ' + ' '.join(attrs)
if showStructureIndent:
s.append(space)
s.append('<%s%s%s>' % (self.name, attributeString, close))
s.append(contents)
if closeTag and showStructureIndent != None:
s.append(space)
s.append(closeTag)
s = ''.join(s)
isUnicode = type(s) == types.UnicodeType
if needUnicode and not isUnicode:
s = unicode(s)
elif isUnicode and needUnicode==False:
s = str(s)
return s
def prettify(self, needUnicode=None):
return self.__str__(needUnicode, showStructureIndent=True)
def renderContents(self, showStructureIndent=None, needUnicode=None):
"""Renders the contents of this tag as a (possibly Unicode)
string."""
s=[]
for c in self:
text = None
if isinstance(c, NavigableUnicodeString) or type(c) == types.UnicodeType:
text = unicode(c)
elif isinstance(c, Tag):
s.append(c.__str__(needUnicode, showStructureIndent))
elif needUnicode:
text = unicode(c)
else:
text = str(c)
if text:
if showStructureIndent != None:
if text[-1] == '\n':
text = text[:-1]
s.append(text)
return ''.join(s)
#Soup methods
def firstText(self, text, recursive=True):
"""Convenience method to retrieve the first piece of text matching the
given criteria. 'text' can be a string, a regular expression object,
a callable that takes a string and returns whether or not the
string 'matches', etc."""
return self.first(recursive=recursive, text=text)
def fetchText(self, text, recursive=True, limit=None):
"""Convenience method to retrieve all pieces of text matching the
given criteria. 'text' can be a string, a regular expression object,
a callable that takes a string and returns whether or not the
string 'matches', etc."""
return self.fetch(recursive=recursive, text=text, limit=limit)
def first(self, name=None, attrs={}, recursive=True, text=None):
"""Return only the first child of this
Tag matching the given criteria."""
r = Null
l = self.fetch(name, attrs, recursive, text, 1)
if l:
r = l[0]
return r
findChild = first
def fetch(self, name=None, attrs={}, recursive=True, text=None,
limit=None):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.recursiveChildGenerator
if not recursive:
generator = self.childGenerator
return self._fetch(name, attrs, text, limit, generator)
fetchChildren = fetch
#Utility methods
def isSelfClosing(self):
"""Returns true iff this is a self-closing tag as defined in the HTML
standard.
TODO: This is specific to BeautifulSoup and its subclasses, but it's
used by __str__"""
return self.name in BeautifulSoup.SELF_CLOSING_TAGS
def append(self, tag):
"""Appends the given tag to the contents of this tag."""
self.contents.append(tag)
#Private methods
def _getAttrMap(self):
"""Initializes a map representation of this tag's attributes,
if not already initialized."""
if not getattr(self, 'attrMap'):
self.attrMap = {}
for (key, value) in self.attrs:
self.attrMap[key] = value
return self.attrMap
#Generator methods
def childGenerator(self):
for i in range(0, len(self.contents)):
yield self.contents[i]
raise StopIteration
def recursiveChildGenerator(self):
stack = [(self, 0)]
while stack:
tag, start = stack.pop()
if isinstance(tag, Tag):
for i in range(start, len(tag.contents)):
a = tag.contents[i]
yield a
if isinstance(a, Tag) and tag.contents:
if i < len(tag.contents) - 1:
stack.append((tag, i+1))
stack.append((a, 0))
break
raise StopIteration
def isList(l):
"""Convenience method that works with all 2.x versions of Python
to determine whether or not something is listlike."""
return hasattr(l, '__iter__') \
or (type(l) in (types.ListType, types.TupleType))
def buildTagMap(default, *args):
"""Turns a list of maps, lists, or scalars into a single map.
Used to build the SELF_CLOSING_TAGS and NESTABLE_TAGS maps out
of lists and partial maps."""
built = {}
for portion in args:
if hasattr(portion, 'items'):
#It's a map. Merge it.
for k,v in portion.items():
built[k] = v
elif isList(portion):
#It's a list. Map each item to the default.
for k in portion:
built[k] = default
else:
#It's a scalar. Map it to the default.
built[portion] = default
return built
class BeautifulStoneSoup(Tag, SGMLParser):
"""This class contains the basic parser and fetch code. It defines
a parser that knows nothing about tag behavior except for the
following:
You can't close a tag without closing all the tags it encloses.
That is, "<foo><bar></foo>" actually means
"<foo><bar></bar></foo>".
[Another possible explanation is "<foo><bar /></foo>", but since
this class defines no SELF_CLOSING_TAGS, it will never use that
explanation.]
This class is useful for parsing XML or made-up markup languages,
or when BeautifulSoup makes an assumption counter to what you were
expecting."""
SELF_CLOSING_TAGS = {}
NESTABLE_TAGS = {}
RESET_NESTING_TAGS = {}
QUOTE_TAGS = {}
#As a public service we will by default silently replace MS smart quotes
#and similar characters with their HTML or ASCII equivalents.
MS_CHARS = { '\x80' : '€',
'\x81' : ' ',
'\x82' : '‚',
'\x83' : 'ƒ',
'\x84' : '„',
'\x85' : '…',
'\x86' : '†',
'\x87' : '‡',
'\x88' : '⁁',
'\x89' : '%',
'\x8A' : 'Š',
'\x8B' : '<',
'\x8C' : 'Œ',
'\x8D' : '?',
'\x8E' : 'Z',
'\x8F' : '?',
'\x90' : '?',
'\x91' : '‘',
'\x92' : '’',
'\x93' : '“',
'\x94' : '”',
'\x95' : '•',
'\x96' : '–',
'\x97' : '—',
'\x98' : '˜',
'\x99' : '™',
'\x9a' : 'š',
'\x9b' : '>',
'\x9c' : 'œ',
'\x9d' : '?',
'\x9e' : 'z',
'\x9f' : 'Ÿ',}
PARSER_MASSAGE = [(re.compile('(<[^<>]*)/>'),
lambda(x):x.group(1) + ' />'),
(re.compile('<!\s+([^<>]*)>'),
lambda(x):'<!' + x.group(1) + '>'),
(re.compile("([\x80-\x9f])"),
lambda(x): BeautifulStoneSoup.MS_CHARS.get(x.group(1)))
]
ROOT_TAG_NAME = '[document]'
def __init__(self, text=None, avoidParserProblems=True,
initialTextIsEverything=True):
"""Initialize this as the 'root tag' and feed in any text to
the parser.
NOTE about avoidParserProblems: sgmllib will process most bad
HTML, and BeautifulSoup has tricks for dealing with some HTML
that kills sgmllib, but Beautiful Soup can nonetheless choke
or lose data if your data uses self-closing tags or
declarations incorrectly. By default, Beautiful Soup sanitizes
its input to avoid the vast majority of these problems. The
problems are relatively rare, even in bad HTML, so feel free
to pass in False to avoidParserProblems if they don't apply to
you, and you'll get better performance. The only reason I have
this turned on by default is so I don't get so many tech
support questions.
The two most common instances of invalid HTML that will choke
sgmllib are fixed by the default parser massage techniques:
<br/> (No space between name of closing tag and tag close)
<! --Comment--> (Extraneous whitespace in declaration)
You can pass in a custom list of (RE object, replace method)
tuples to get Beautiful Soup to scrub your input the way you
want."""
Tag.__init__(self, self.ROOT_TAG_NAME)
if avoidParserProblems \
and not isList(avoidParserProblems):
avoidParserProblems = self.PARSER_MASSAGE
self.avoidParserProblems = avoidParserProblems
SGMLParser.__init__(self)
self.quoteStack = []
self.hidden = 1
self.reset()
if hasattr(text, 'read'):
#It's a file-type object.
text = text.read()
if text:
self.feed(text)
if initialTextIsEverything:
self.done()
def __getattr__(self, methodName):
"""This method routes method call requests to either the SGMLParser
superclass or the Tag superclass, depending on the method name."""
if methodName.find('start_') == 0 or methodName.find('end_') == 0 \
or methodName.find('do_') == 0:
return SGMLParser.__getattr__(self, methodName)
elif methodName.find('__') != 0:
return Tag.__getattr__(self, methodName)
else:
raise AttributeError
def feed(self, text):
if self.avoidParserProblems:
for fix, m in self.avoidParserProblems:
text = fix.sub(m, text)
SGMLParser.feed(self, text)
def done(self):
"""Called when you're done parsing, so that the unclosed tags can be
correctly processed."""
self.endData() #NEW
while self.currentTag.name != self.ROOT_TAG_NAME:
self.popTag()
def reset(self):
SGMLParser.reset(self)
self.currentData = []
self.currentTag = None
self.tagStack = []
self.pushTag(self)
def popTag(self):
tag = self.tagStack.pop()
# Tags with just one string-owning child get the child as a
# 'string' property, so that soup.tag.string is shorthand for
# soup.tag.contents[0]
if len(self.currentTag.contents) == 1 and \
isinstance(self.currentTag.contents[0], NavigableText):
self.currentTag.string = self.currentTag.contents[0]
#print "Pop", tag.name
if self.tagStack:
self.currentTag = self.tagStack[-1]
return self.currentTag
def pushTag(self, tag):
#print "Push", tag.name
if self.currentTag:
self.currentTag.append(tag)
self.tagStack.append(tag)
self.currentTag = self.tagStack[-1]
def endData(self):
currentData = ''.join(self.currentData)
if currentData:
if not currentData.strip():
if '\n' in currentData:
currentData = '\n'
else:
currentData = ' '
c = NavigableString
if type(currentData) == types.UnicodeType:
c = NavigableUnicodeString
o = c(currentData)
o.setup(self.currentTag, self.previous)
if self.previous:
self.previous.next = o
self.previous = o
self.currentTag.contents.append(o)
self.currentData = []
def _popToTag(self, name, inclusivePop=True):
"""Pops the tag stack up to and including the most recent
instance of the given tag. If inclusivePop is false, pops the tag
stack up to but *not* including the most recent instqance of
the given tag."""
if name == self.ROOT_TAG_NAME:
return
numPops = 0
mostRecentTag = None
for i in range(len(self.tagStack)-1, 0, -1):
if name == self.tagStack[i].name:
numPops = len(self.tagStack)-i
break
if not inclusivePop:
numPops = numPops - 1
for i in range(0, numPops):
mostRecentTag = self.popTag()
return mostRecentTag
def _smartPop(self, name):
"""We need to pop up to the previous tag of this type, unless
one of this tag's nesting reset triggers comes between this
tag and the previous tag of this type, OR unless this tag is a
generic nesting trigger and another generic nesting trigger
comes between this tag and the previous tag of this type.
Examples:
<p>Foo<b>Bar<p> should pop to 'p', not 'b'.
<p>Foo<table>Bar<p> should pop to 'table', not 'p'.
<p>Foo<table><tr>Bar<p> should pop to 'tr', not 'p'.
<p>Foo<b>Bar<p> should pop to 'p', not 'b'.
<li><ul><li> *<li>* should pop to 'ul', not the first 'li'.
<tr><table><tr> *<tr>* should pop to 'table', not the first 'tr'
<td><tr><td> *<td>* should pop to 'tr', not the first 'td'
"""
nestingResetTriggers = self.NESTABLE_TAGS.get(name)
isNestable = nestingResetTriggers != None
isResetNesting = self.RESET_NESTING_TAGS.has_key(name)
popTo = None
inclusive = True
for i in range(len(self.tagStack)-1, 0, -1):
p = self.tagStack[i]
if (not p or p.name == name) and not isNestable:
#Non-nestable tags get popped to the top or to their
#last occurance.
popTo = name
break
if (nestingResetTriggers != None
and p.name in nestingResetTriggers) \
or (nestingResetTriggers == None and isResetNesting
and self.RESET_NESTING_TAGS.has_key(p.name)):
#If we encounter one of the nesting reset triggers
#peculiar to this tag, or we encounter another tag
#that causes nesting to reset, pop up to but not
#including that tag.
popTo = p.name
inclusive = False
break
p = p.parent
if popTo:
self._popToTag(popTo, inclusive)
def unknown_starttag(self, name, attrs, selfClosing=0):
#print "Start tag %s" % name
if self.quoteStack:
#This is not a real tag.
#print "<%s> is not real!" % name
attrs = ''.join(map(lambda(x, y): ' %s="%s"' % (x, y), attrs))
self.handle_data('<%s%s>' % (name, attrs))
return
self.endData()
if not name in self.SELF_CLOSING_TAGS and not selfClosing:
self._smartPop(name)
tag = Tag(name, attrs, self.currentTag, self.previous)
if self.previous:
self.previous.next = tag
self.previous = tag
self.pushTag(tag)
if selfClosing or name in self.SELF_CLOSING_TAGS:
self.popTag()
if name in self.QUOTE_TAGS:
#print "Beginning quote (%s)" % name
self.quoteStack.append(name)
self.literal = 1
def unknown_endtag(self, name):
if self.quoteStack and self.quoteStack[-1] != name:
#This is not a real end tag.
#print "</%s> is not real!" % name
self.handle_data('</%s>' % name)
return
self.endData()
self._popToTag(name)
if self.quoteStack and self.quoteStack[-1] == name:
self.quoteStack.pop()
self.literal = (len(self.quoteStack) > 0)
def handle_data(self, data):
self.currentData.append(data)
def handle_pi(self, text):
"Propagate processing instructions right through."
self.handle_data("<?%s>" % text)
def handle_comment(self, text):
"Propagate comments right through."
self.handle_data("<!--%s-->" % text)
def handle_charref(self, ref):
"Propagate char refs right through."
self.handle_data('&#%s;' % ref)
def handle_entityref(self, ref):
"Propagate entity refs right through."
self.handle_data('&%s;' % ref)
def handle_decl(self, data):
"Propagate DOCTYPEs and the like right through."
self.handle_data('<!%s>' % data)
def parse_declaration(self, i):
"""Treat a bogus SGML declaration as raw data. Treat a CDATA
declaration as regular data."""
j = None
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
k = len(self.rawdata)
self.handle_data(self.rawdata[i+9:k])
j = k+3
else:
try:
j = SGMLParser.parse_declaration(self, i)
except SGMLParseError:
toHandle = self.rawdata[i:]
self.handle_data(toHandle)
j = i + len(toHandle)
return j
class BeautifulSoup(BeautifulStoneSoup):
"""This parser knows the following facts about HTML:
* Some tags have no closing tag and should be interpreted as being
closed as soon as they are encountered.
* The text inside some tags (ie. 'script') may contain tags which
are not really part of the document and which should be parsed
as text, not tags. If you want to parse the text as tags, you can
always fetch it and parse it explicitly.
* Tag nesting rules:
Most tags can't be nested at all. For instance, the occurance of
a <p> tag should implicitly close the previous <p> tag.
<p>Para1<p>Para2
should be transformed into:
<p>Para1</p><p>Para2
Some tags can be nested arbitrarily. For instance, the occurance
of a <blockquote> tag should _not_ implicitly close the previous
<blockquote> tag.
Alice said: <blockquote>Bob said: <blockquote>Blah
should NOT be transformed into:
Alice said: <blockquote>Bob said: </blockquote><blockquote>Blah
Some tags can be nested, but the nesting is reset by the
interposition of other tags. For instance, a <tr> tag should
implicitly close the previous <tr> tag within the same <table>,
but not close a <tr> tag in another table.
<table><tr>Blah<tr>Blah
should be transformed into:
<table><tr>Blah</tr><tr>Blah
but,
<tr>Blah<table><tr>Blah
should NOT be transformed into
<tr>Blah<table></tr><tr>Blah
Differing assumptions about tag nesting rules are a major source
of problems with the BeautifulSoup class. If BeautifulSoup is not
treating as nestable a tag your page author treats as nestable,
try ICantBelieveItsBeautifulSoup before writing your own
subclass."""
SELF_CLOSING_TAGS = buildTagMap(None, ['br' , 'hr', 'input', 'img', 'meta',
'spacer', 'link', 'frame', 'base'])
QUOTE_TAGS = {'script': None}
#According to the HTML standard, each of these inline tags can
#contain another tag of the same type. Furthermore, it's common
#to actually use these tags this way.
NESTABLE_INLINE_TAGS = ['span', 'font', 'q', 'object', 'bdo', 'sub', 'sup',
'center']
#According to the HTML standard, these block tags can contain
#another tag of the same type. Furthermore, it's common
#to actually use these tags this way.
NESTABLE_BLOCK_TAGS = ['blockquote', 'div', 'fieldset', 'ins', 'del']
#Lists can contain other lists, but there are restrictions.
NESTABLE_LIST_TAGS = { 'ol' : [],
'ul' : [],
'li' : ['ul', 'ol'],
'dl' : [],
'dd' : ['dl'],
'dt' : ['dl'] }
#Tables can contain other tables, but there are restrictions.
NESTABLE_TABLE_TAGS = {'table' : [],
'tr' : ['table', 'tbody', 'tfoot', 'thead'],
'td' : ['tr'],
'th' : ['tr'],
}
NON_NESTABLE_BLOCK_TAGS = ['address', 'form', 'p', 'pre']
#If one of these tags is encountered, all tags up to the next tag of
#this type are popped.
RESET_NESTING_TAGS = buildTagMap(None, NESTABLE_BLOCK_TAGS, 'noscript',
NON_NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS,
NESTABLE_TABLE_TAGS)
NESTABLE_TAGS = buildTagMap([], NESTABLE_INLINE_TAGS, NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS)
class ICantBelieveItsBeautifulSoup(BeautifulSoup):
"""The BeautifulSoup class is oriented towards skipping over
common HTML errors like unclosed tags. However, sometimes it makes
errors of its own. For instance, consider this fragment:
<b>Foo<b>Bar</b></b>
This is perfectly valid (if bizarre) HTML. However, the
BeautifulSoup class will implicitly close the first b tag when it
encounters the second 'b'. It will think the author wrote
"<b>Foo<b>Bar", and didn't close the first 'b' tag, because
there's no real-world reason to bold something that's already
bold. When it encounters '</b></b>' it will close two more 'b'
tags, for a grand total of three tags closed instead of two. This
can throw off the rest of your document structure. The same is
true of a number of other tags, listed below.
It's much more common for someone to forget to close (eg.) a 'b'
tag than to actually use nested 'b' tags, and the BeautifulSoup
class handles the common case. This class handles the
not-co-common case: where you can't believe someone wrote what
they did, but it's valid HTML and BeautifulSoup screwed up by
assuming it wouldn't be.
If this doesn't do what you need, try subclassing this class or
BeautifulSoup, and providing your own list of NESTABLE_TAGS."""
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS = \
['em', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'strong',
'cite', 'code', 'dfn', 'kbd', 'samp', 'strong', 'var', 'b',
'big']
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS = ['noscript']
NESTABLE_TAGS = buildTagMap([], BeautifulSoup.NESTABLE_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS)
class BeautifulSOAP(BeautifulStoneSoup):
"""This class will push a tag with only a single string child into
the tag's parent as an attribute. The attribute's name is the tag
name, and the value is the string child. An example should give
the flavor of the change:
<foo><bar>baz</bar></foo>
=>
<foo bar="baz"><bar>baz</bar></foo>
You can then access fooTag['bar'] instead of fooTag.barTag.string.
This is, of course, useful for scraping structures that tend to
use subelements instead of attributes, such as SOAP messages. Note
that it modifies its input, so don't print the modified version
out.
I'm not sure how many people really want to use this class; let me
know if you do. Mainly I like the name."""
def popTag(self):
if len(self.tagStack) > 1:
tag = self.tagStack[-1]
parent = self.tagStack[-2]
parent._getAttrMap()
if (isinstance(tag, Tag) and len(tag.contents) == 1 and
isinstance(tag.contents[0], NavigableText) and
not parent.attrMap.has_key(tag.name)):
parent[tag.name] = tag.contents[0]
BeautifulStoneSoup.popTag(self)
#Enterprise class names! It has come to our attention that some people
#think the names of the Beautiful Soup parser classes are too silly
#and "unprofessional" for use in enterprise screen-scraping. We feel
#your pain! For such-minded folk, the Beautiful Soup Consortium And
#All-Night Kosher Bakery recommends renaming this file to
#"RobustParser.py" (or, in cases of extreme enterprisitude,
#"RobustParserBeanInterface.class") and using the following
#enterprise-friendly class aliases:
class RobustXMLParser(BeautifulStoneSoup):
pass
class RobustHTMLParser(BeautifulSoup):
pass
class RobustWackAssHTMLParser(ICantBelieveItsBeautifulSoup):
pass
class SimplifyingSOAPParser(BeautifulSOAP):
pass
###
#By default, act as an HTML pretty-printer.
if __name__ == '__main__':
import sys
soup = BeautifulStoneSoup(sys.stdin.read())
print soup.prettify()
|
SimonTheCoder/server_monitor
|
refs/heads/master
|
auto_swap.py
|
1
|
#!/usr/bin/python
import sys
import subprocess
import threading
import monitor
import json
import time
class Looper:
def __init__(self,exec_path, config_path, duration, host_list):
self.exec_path = exec_path
self.config_path = config_path
self.duration = duration
self.host_list = host_list
self.started = False
self.thread = threading.Thread(target=self.loop)
self.thread.setDaemon(True)
self.monitor = monitor.Monitor(host_list)
self.last_fast = None
self.popen = None
def loop(self):
while self.started is True:
#find fastest server
states = self.monitor.check_all()
if states[0].state > 0:
fast_ip = states[0].ip
if self.last_fast == fast_ip:
print "still the same one."
#print self.popen.stdout.readline()
else:
#load config file
config_dict = None
with open(self.config_path, "r") as config_fd:
config_dict = json.load(config_fd)
print "server: %s -> %s " % (config_dict["server"], fast_ip)
self.last_fast = fast_ip
config_dict["server"] = fast_ip
with open(self.config_path, "w") as config_fd:
json.dump(config_dict, config_fd)
if self.popen is not None:
print "restarting..."
self.popen.terminate()
else:
print "starting..."
self.popen = subprocess.Popen([self.exec_path,"-c",self.config_path],stderr=subprocess.PIPE)
err_times = 0
for stdout_line in iter(self.popen.stderr.readline, ""):
print "ss:" + stdout_line,
if stdout_line.find("ERROR") != -1:
err_times = err_times + 1
print "%d Errors found. check servers!" % (err_times)
if err_times > 10:
print "error times > 10, recheck..."
err_times = 0
break
single_server = monitor.Server(fast_ip)
single_server_state = single_server.check_state()
if single_server_state.state >0:
print "server still working."
continue
else:
print "server down."
print single_server_state
break
else:
print "no available host found!!!!"
time.sleep(self.duration)
def start_looper(self, using_thread = False):
self.started = True
if using_thread is True:
print "still in working..."
else:
self.loop()
pass
def stop_looper(self):
self.started = False
pass
if __name__ == "__main__":
if len(sys.argv) == 5:
exec_path = sys.argv[1]
config_path = sys.argv[2]
duration = int(sys.argv[3],10)
host_list = sys.argv[4]
print "exec: %s\nconf: %s\nduration: %s\nhost_list: %s\n" % (exec_path, config_path, duration, host_list)
looper = Looper(exec_path, config_path, duration, host_list)
looper.start_looper()
|
github4ry/pysentiment
|
refs/heads/master
|
pysentiment/__init__.py
|
3
|
from pysentiment.hiv4 import HIV4
from pysentiment.lm import LM
|
thiagorcdl/AdventOfCode
|
refs/heads/master
|
2015/adv7.py
|
1
|
#!/usr/bin/python2
import sys
f = open('./input7.txt', 'r')
var = {}
def AND(a, b):
return val(a) & val(b)
def OR(a, b):
return val(a) | val(b)
def LSHIFT(a, n):
return val(a) * (2 ** int(n))
def RSHIFT(a, n):
return val(a) / (2 ** int(n))
def val(key):
try:
return int(key)
except:
args = var[key]
if isinstance(args, type(1)):
return args
elif len(args) == 1: # 4000 -> a
var[key] = 0x0000ffff & val(args[0])
elif len(args) == 2: # NOT a -> b
var[key] = 0x0000ffff & ~ val(args[1])
else: # a OP n -> c
var[key] = 0x0000ffff & eval("%s(args[0],args[2])" % args[1])
return var[key]
while True:
line = f.readline().rstrip()
if not line:
break
args = line.split(' ')
var[args[-1]] = args[:-2]
if len(sys.argv) > 1 and sys.argv[1] == '2':
# backs up the variables dict; runs part 1; restores dict pointer and assign answer to b
var2 = var.copy()
val_a = val('a')
var = var2
var['b'] = val_a
print 0x0000ffff & val('a')
|
Matt-Deacalion/Rasa-Django
|
refs/heads/master
|
website/apps/core/views.py
|
1
|
from django.views.generic import TemplateView
class HomeView(TemplateView):
template_name = 'base.html'
|
untitaker/pytest
|
refs/heads/master
|
testing/test_unittest.py
|
9
|
import pytest
def test_simple_unittest(testdir):
testpath = testdir.makepyfile("""
import unittest
class MyTestCase(unittest.TestCase):
def testpassing(self):
self.assertEquals('foo', 'foo')
def test_failing(self):
self.assertEquals('foo', 'bar')
""")
reprec = testdir.inline_run(testpath)
assert reprec.matchreport("testpassing").passed
assert reprec.matchreport("test_failing").failed
def test_runTest_method(testdir):
testdir.makepyfile("""
import unittest
class MyTestCaseWithRunTest(unittest.TestCase):
def runTest(self):
self.assertEquals('foo', 'foo')
class MyTestCaseWithoutRunTest(unittest.TestCase):
def runTest(self):
self.assertEquals('foo', 'foo')
def test_something(self):
pass
""")
result = testdir.runpytest("-v")
result.stdout.fnmatch_lines("""
*MyTestCaseWithRunTest::runTest*
*MyTestCaseWithoutRunTest::test_something*
*2 passed*
""")
def test_isclasscheck_issue53(testdir):
testpath = testdir.makepyfile("""
import unittest
class _E(object):
def __getattr__(self, tag):
pass
E = _E()
""")
result = testdir.runpytest(testpath)
assert result.ret == 0
def test_setup(testdir):
testpath = testdir.makepyfile("""
import unittest
class MyTestCase(unittest.TestCase):
def setUp(self):
self.foo = 1
def setup_method(self, method):
self.foo2 = 1
def test_both(self):
self.assertEquals(1, self.foo)
assert self.foo2 == 1
def teardown_method(self, method):
assert 0, "42"
""")
reprec = testdir.inline_run("-s", testpath)
assert reprec.matchreport("test_both", when="call").passed
rep = reprec.matchreport("test_both", when="teardown")
assert rep.failed and '42' in str(rep.longrepr)
def test_setUpModule(testdir):
testpath = testdir.makepyfile("""
l = []
def setUpModule():
l.append(1)
def tearDownModule():
del l[0]
def test_hello():
assert l == [1]
def test_world():
assert l == [1]
""")
result = testdir.runpytest(testpath)
result.stdout.fnmatch_lines([
"*2 passed*",
])
def test_setUpModule_failing_no_teardown(testdir):
testpath = testdir.makepyfile("""
l = []
def setUpModule():
0/0
def tearDownModule():
l.append(1)
def test_hello():
pass
""")
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(passed=0, failed=1)
call = reprec.getcalls("pytest_runtest_setup")[0]
assert not call.item.module.l
def test_new_instances(testdir):
testpath = testdir.makepyfile("""
import unittest
class MyTestCase(unittest.TestCase):
def test_func1(self):
self.x = 2
def test_func2(self):
assert not hasattr(self, 'x')
""")
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(passed=2)
def test_teardown(testdir):
testpath = testdir.makepyfile("""
import unittest
class MyTestCase(unittest.TestCase):
l = []
def test_one(self):
pass
def tearDown(self):
self.l.append(None)
class Second(unittest.TestCase):
def test_check(self):
self.assertEquals(MyTestCase.l, [None])
""")
reprec = testdir.inline_run(testpath)
passed, skipped, failed = reprec.countoutcomes()
assert failed == 0, failed
assert passed == 2
assert passed + skipped + failed == 2
@pytest.mark.skipif("sys.version_info < (2,7)")
def test_unittest_skip_issue148(testdir):
testpath = testdir.makepyfile("""
import unittest
@unittest.skip("hello")
class MyTestCase(unittest.TestCase):
@classmethod
def setUpClass(self):
xxx
def test_one(self):
pass
@classmethod
def tearDownClass(self):
xxx
""")
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(skipped=1)
def test_method_and_teardown_failing_reporting(testdir):
testdir.makepyfile("""
import unittest, pytest
class TC(unittest.TestCase):
def tearDown(self):
assert 0, "down1"
def test_method(self):
assert False, "down2"
""")
result = testdir.runpytest("-s")
assert result.ret == 1
result.stdout.fnmatch_lines([
"*tearDown*",
"*assert 0*",
"*test_method*",
"*assert False*",
"*1 failed*1 error*",
])
def test_setup_failure_is_shown(testdir):
testdir.makepyfile("""
import unittest
import pytest
class TC(unittest.TestCase):
def setUp(self):
assert 0, "down1"
def test_method(self):
print ("never42")
xyz
""")
result = testdir.runpytest("-s")
assert result.ret == 1
result.stdout.fnmatch_lines([
"*setUp*",
"*assert 0*down1*",
"*1 failed*",
])
assert 'never42' not in result.stdout.str()
def test_setup_setUpClass(testdir):
testpath = testdir.makepyfile("""
import unittest
import pytest
class MyTestCase(unittest.TestCase):
x = 0
@classmethod
def setUpClass(cls):
cls.x += 1
def test_func1(self):
assert self.x == 1
def test_func2(self):
assert self.x == 1
@classmethod
def tearDownClass(cls):
cls.x -= 1
def test_teareddown():
assert MyTestCase.x == 0
""")
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(passed=3)
def test_setup_class(testdir):
testpath = testdir.makepyfile("""
import unittest
import pytest
class MyTestCase(unittest.TestCase):
x = 0
def setup_class(cls):
cls.x += 1
def test_func1(self):
assert self.x == 1
def test_func2(self):
assert self.x == 1
def teardown_class(cls):
cls.x -= 1
def test_teareddown():
assert MyTestCase.x == 0
""")
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(passed=3)
@pytest.mark.parametrize("type", ['Error', 'Failure'])
def test_testcase_adderrorandfailure_defers(testdir, type):
testdir.makepyfile("""
from unittest import TestCase
import pytest
class MyTestCase(TestCase):
def run(self, result):
excinfo = pytest.raises(ZeroDivisionError, lambda: 0/0)
try:
result.add%s(self, excinfo._excinfo)
except KeyboardInterrupt:
raise
except:
pytest.fail("add%s should not raise")
def test_hello(self):
pass
""" % (type, type))
result = testdir.runpytest()
assert 'should not raise' not in result.stdout.str()
@pytest.mark.parametrize("type", ['Error', 'Failure'])
def test_testcase_custom_exception_info(testdir, type):
testdir.makepyfile("""
from unittest import TestCase
import py, pytest
class MyTestCase(TestCase):
def run(self, result):
excinfo = pytest.raises(ZeroDivisionError, lambda: 0/0)
# we fake an incompatible exception info
from _pytest.monkeypatch import monkeypatch
mp = monkeypatch()
def t(*args):
mp.undo()
raise TypeError()
mp.setattr(py.code, 'ExceptionInfo', t)
try:
excinfo = excinfo._excinfo
result.add%(type)s(self, excinfo)
finally:
mp.undo()
def test_hello(self):
pass
""" % locals())
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"NOTE: Incompatible Exception Representation*",
"*ZeroDivisionError*",
"*1 failed*",
])
def test_testcase_totally_incompatible_exception_info(testdir):
item, = testdir.getitems("""
from unittest import TestCase
class MyTestCase(TestCase):
def test_hello(self):
pass
""")
item.addError(None, 42)
excinfo = item._excinfo.pop(0)
assert 'ERROR: Unknown Incompatible' in str(excinfo.getrepr())
def test_module_level_pytestmark(testdir):
testpath = testdir.makepyfile("""
import unittest
import pytest
pytestmark = pytest.mark.xfail
class MyTestCase(unittest.TestCase):
def test_func1(self):
assert 0
""")
reprec = testdir.inline_run(testpath, "-s")
reprec.assertoutcome(skipped=1)
def test_trial_testcase_skip_property(testdir):
pytest.importorskip('twisted.trial.unittest')
testpath = testdir.makepyfile("""
from twisted.trial import unittest
class MyTestCase(unittest.TestCase):
skip = 'dont run'
def test_func(self):
pass
""")
reprec = testdir.inline_run(testpath, "-s")
reprec.assertoutcome(skipped=1)
def test_trial_testfunction_skip_property(testdir):
pytest.importorskip('twisted.trial.unittest')
testpath = testdir.makepyfile("""
from twisted.trial import unittest
class MyTestCase(unittest.TestCase):
def test_func(self):
pass
test_func.skip = 'dont run'
""")
reprec = testdir.inline_run(testpath, "-s")
reprec.assertoutcome(skipped=1)
def test_trial_testcase_todo_property(testdir):
pytest.importorskip('twisted.trial.unittest')
testpath = testdir.makepyfile("""
from twisted.trial import unittest
class MyTestCase(unittest.TestCase):
todo = 'dont run'
def test_func(self):
assert 0
""")
reprec = testdir.inline_run(testpath, "-s")
reprec.assertoutcome(skipped=1)
def test_trial_testfunction_todo_property(testdir):
pytest.importorskip('twisted.trial.unittest')
testpath = testdir.makepyfile("""
from twisted.trial import unittest
class MyTestCase(unittest.TestCase):
def test_func(self):
assert 0
test_func.todo = 'dont run'
""")
reprec = testdir.inline_run(testpath, "-s")
reprec.assertoutcome(skipped=1)
class TestTrialUnittest:
def setup_class(cls):
cls.ut = pytest.importorskip("twisted.trial.unittest")
def test_trial_testcase_runtest_not_collected(self, testdir):
testdir.makepyfile("""
from twisted.trial.unittest import TestCase
class TC(TestCase):
def test_hello(self):
pass
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
testdir.makepyfile("""
from twisted.trial.unittest import TestCase
class TC(TestCase):
def runTest(self):
pass
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_trial_exceptions_with_skips(self, testdir):
testdir.makepyfile("""
from twisted.trial import unittest
import pytest
class TC(unittest.TestCase):
def test_hello(self):
pytest.skip("skip_in_method")
@pytest.mark.skipif("sys.version_info != 1")
def test_hello2(self):
pass
@pytest.mark.xfail(reason="iwanto")
def test_hello3(self):
assert 0
def test_hello4(self):
pytest.xfail("i2wanto")
def test_trial_skip(self):
pass
test_trial_skip.skip = "trialselfskip"
def test_trial_todo(self):
assert 0
test_trial_todo.todo = "mytodo"
def test_trial_todo_success(self):
pass
test_trial_todo_success.todo = "mytodo"
class TC2(unittest.TestCase):
def setup_class(cls):
pytest.skip("skip_in_setup_class")
def test_method(self):
pass
""")
result = testdir.runpytest("-rxs")
assert result.ret == 0
result.stdout.fnmatch_lines_random([
"*XFAIL*test_trial_todo*",
"*trialselfskip*",
"*skip_in_setup_class*",
"*iwanto*",
"*i2wanto*",
"*sys.version_info*",
"*skip_in_method*",
"*4 skipped*3 xfail*1 xpass*",
])
def test_trial_error(self, testdir):
testdir.makepyfile("""
from twisted.trial.unittest import TestCase
from twisted.internet.defer import Deferred
from twisted.internet import reactor
class TC(TestCase):
def test_one(self):
crash
def test_two(self):
def f(_):
crash
d = Deferred()
d.addCallback(f)
reactor.callLater(0.3, d.callback, None)
return d
def test_three(self):
def f():
pass # will never get called
reactor.callLater(0.3, f)
# will crash at teardown
def test_four(self):
def f(_):
reactor.callLater(0.3, f)
crash
d = Deferred()
d.addCallback(f)
reactor.callLater(0.3, d.callback, None)
return d
# will crash both at test time and at teardown
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*ERRORS*",
"*DelayedCalls*",
"*test_four*",
"*NameError*crash*",
"*test_one*",
"*NameError*crash*",
"*test_three*",
"*DelayedCalls*",
"*test_two*",
"*crash*",
])
def test_trial_pdb(self, testdir):
p = testdir.makepyfile("""
from twisted.trial import unittest
import pytest
class TC(unittest.TestCase):
def test_hello(self):
assert 0, "hellopdb"
""")
child = testdir.spawn_pytest(p)
child.expect("hellopdb")
child.sendeof()
def test_djangolike_testcase(testdir):
# contributed from Morten Breekevold
testdir.makepyfile("""
from unittest import TestCase, main
class DjangoLikeTestCase(TestCase):
def setUp(self):
print ("setUp()")
def test_presetup_has_been_run(self):
print ("test_thing()")
self.assertTrue(hasattr(self, 'was_presetup'))
def tearDown(self):
print ("tearDown()")
def __call__(self, result=None):
try:
self._pre_setup()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
import sys
result.addError(self, sys.exc_info())
return
super(DjangoLikeTestCase, self).__call__(result)
try:
self._post_teardown()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
import sys
result.addError(self, sys.exc_info())
return
def _pre_setup(self):
print ("_pre_setup()")
self.was_presetup = True
def _post_teardown(self):
print ("_post_teardown()")
""")
result = testdir.runpytest("-s")
assert result.ret == 0
result.stdout.fnmatch_lines([
"*_pre_setup()*",
"*setUp()*",
"*test_thing()*",
"*tearDown()*",
"*_post_teardown()*",
])
def test_unittest_not_shown_in_traceback(testdir):
testdir.makepyfile("""
import unittest
class t(unittest.TestCase):
def test_hello(self):
x = 3
self.assertEquals(x, 4)
""")
res = testdir.runpytest()
assert "failUnlessEqual" not in res.stdout.str()
def test_unorderable_types(testdir):
testdir.makepyfile("""
import unittest
class TestJoinEmpty(unittest.TestCase):
pass
def make_test():
class Test(unittest.TestCase):
pass
Test.__name__ = "TestFoo"
return Test
TestFoo = make_test()
""")
result = testdir.runpytest()
assert "TypeError" not in result.stdout.str()
assert result.ret == 0
def test_unittest_typerror_traceback(testdir):
testdir.makepyfile("""
import unittest
class TestJoinEmpty(unittest.TestCase):
def test_hello(self, arg1):
pass
""")
result = testdir.runpytest()
assert "TypeError" in result.stdout.str()
assert result.ret == 1
@pytest.mark.skipif("sys.version_info < (2,7)")
def test_unittest_unexpected_failure(testdir):
testdir.makepyfile("""
import unittest
class MyTestCase(unittest.TestCase):
@unittest.expectedFailure
def test_func1(self):
assert 0
@unittest.expectedFailure
def test_func2(self):
assert 1
""")
result = testdir.runpytest("-rxX")
result.stdout.fnmatch_lines([
"*XFAIL*MyTestCase*test_func1*",
"*XPASS*MyTestCase*test_func2*",
"*1 xfailed*1 xpass*",
])
@pytest.mark.parametrize('fix_type, stmt', [
('fixture', 'return'),
('yield_fixture', 'yield'),
])
def test_unittest_setup_interaction(testdir, fix_type, stmt):
testdir.makepyfile("""
import unittest
import pytest
class MyTestCase(unittest.TestCase):
@pytest.{fix_type}(scope="class", autouse=True)
def perclass(self, request):
request.cls.hello = "world"
{stmt}
@pytest.{fix_type}(scope="function", autouse=True)
def perfunction(self, request):
request.instance.funcname = request.function.__name__
{stmt}
def test_method1(self):
assert self.funcname == "test_method1"
assert self.hello == "world"
def test_method2(self):
assert self.funcname == "test_method2"
def test_classattr(self):
assert self.__class__.hello == "world"
""".format(fix_type=fix_type, stmt=stmt))
result = testdir.runpytest()
result.stdout.fnmatch_lines("*3 passed*")
def test_non_unittest_no_setupclass_support(testdir):
testpath = testdir.makepyfile("""
class TestFoo:
x = 0
@classmethod
def setUpClass(cls):
cls.x = 1
def test_method1(self):
assert self.x == 0
@classmethod
def tearDownClass(cls):
cls.x = 1
def test_not_teareddown():
assert TestFoo.x == 0
""")
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(passed=2)
def test_no_teardown_if_setupclass_failed(testdir):
testpath = testdir.makepyfile("""
import unittest
class MyTestCase(unittest.TestCase):
x = 0
@classmethod
def setUpClass(cls):
cls.x = 1
assert False
def test_func1(self):
cls.x = 10
@classmethod
def tearDownClass(cls):
cls.x = 100
def test_notTornDown():
assert MyTestCase.x == 1
""")
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(passed=1, failed=1)
def test_issue333_result_clearing(testdir):
testdir.makeconftest("""
def pytest_runtest_call(__multicall__, item):
__multicall__.execute()
assert 0
""")
testdir.makepyfile("""
import unittest
class TestIt(unittest.TestCase):
def test_func(self):
0/0
""")
reprec = testdir.inline_run()
reprec.assertoutcome(failed=1)
@pytest.mark.skipif("sys.version_info < (2,7)")
def test_unittest_raise_skip_issue748(testdir):
testdir.makepyfile(test_foo="""
import unittest
class MyTestCase(unittest.TestCase):
def test_one(self):
raise unittest.SkipTest('skipping due to reasons')
""")
result = testdir.runpytest("-v", '-rs')
result.stdout.fnmatch_lines("""
*SKIP*[1]*test_foo.py*skipping due to reasons*
*1 skipped*
""")
|
vabs22/zulip
|
refs/heads/master
|
zerver/views/messages.py
|
1
|
from __future__ import absolute_import
from django.utils.translation import ugettext as _
from django.utils.timezone import now as timezone_now
from django.conf import settings
from django.core import validators
from django.core.exceptions import ValidationError
from django.db import connection
from django.http import HttpRequest, HttpResponse
from typing import Dict, List, Set, Text, Any, AnyStr, Callable, Iterable, \
Optional, Tuple, Union
from zerver.lib.str_utils import force_text
from zerver.lib.exceptions import JsonableError, ErrorCode
from zerver.lib.html_diff import highlight_html_differences
from zerver.decorator import authenticated_json_post_view, has_request_variables, \
REQ, to_non_negative_int
from django.utils.html import escape as escape_html
from zerver.lib import bugdown
from zerver.lib.actions import recipient_for_emails, do_update_message_flags, \
compute_mit_user_fullname, compute_irc_user_fullname, compute_jabber_user_fullname, \
create_mirror_user_if_needed, check_send_message, do_update_message, \
extract_recipients, truncate_body, render_incoming_message, do_delete_message
from zerver.lib.queue import queue_json_publish
from zerver.lib.cache import (
generic_bulk_cached_fetch,
to_dict_cache_key_id,
)
from zerver.lib.message import (
access_message,
MessageDict,
extract_message_dict,
render_markdown,
stringify_message_dict,
)
from zerver.lib.response import json_success, json_error
from zerver.lib.sqlalchemy_utils import get_sqlalchemy_connection
from zerver.lib.timestamp import datetime_to_timestamp
from zerver.lib.utils import statsd
from zerver.lib.validator import \
check_list, check_int, check_dict, check_string, check_bool
from zerver.models import Message, UserProfile, Stream, Subscription, \
Realm, RealmDomain, Recipient, UserMessage, bulk_get_recipients, get_recipient, \
get_stream, parse_usermessage_flags, email_to_domain, get_realm, get_active_streams, \
bulk_get_streams, get_user_including_cross_realm
from sqlalchemy import func
from sqlalchemy.sql import select, join, column, literal_column, literal, and_, \
or_, not_, union_all, alias, Selectable, Select, ColumnElement, table
import re
import ujson
import datetime
from six.moves import map
import six
LARGER_THAN_MAX_MESSAGE_ID = 10000000000000000
class BadNarrowOperator(JsonableError):
code = ErrorCode.BAD_NARROW
data_fields = ['desc']
def __init__(self, desc):
# type: (str) -> None
self.desc = desc # type: str
@staticmethod
def msg_format():
# type: () -> str
return _('Invalid narrow operator: {desc}')
Query = Any # TODO: Should be Select, but sqlalchemy stubs are busted
ConditionTransform = Any # TODO: should be Callable[[ColumnElement], ColumnElement], but sqlalchemy stubs are busted
# When you add a new operator to this, also update zerver/lib/narrow.py
class NarrowBuilder(object):
'''
Build up a SQLAlchemy query to find messages matching a narrow.
'''
# This class has an important security invariant:
#
# None of these methods ever *add* messages to a query's result.
#
# That is, the `add_term` method, and its helpers the `by_*` methods,
# are passed a Query object representing a query for messages; they may
# call some methods on it, and then they return a resulting Query
# object. Things these methods may do to the queries they handle
# include
# * add conditions to filter out rows (i.e., messages), with `query.where`
# * add columns for more information on the same message, with `query.column`
# * add a join for more information on the same message
#
# Things they may not do include
# * anything that would pull in additional rows, or information on
# other messages.
def __init__(self, user_profile, msg_id_column):
# type: (UserProfile, str) -> None
self.user_profile = user_profile
self.msg_id_column = msg_id_column
self.user_realm = user_profile.realm
def add_term(self, query, term):
# type: (Query, Dict[str, Any]) -> Query
"""
Extend the given query to one narrowed by the given term, and return the result.
This method satisfies an important security property: the returned
query never includes a message that the given query didn't. In
particular, if the given query will only find messages that a given
user can legitimately see, then so will the returned query.
"""
# To maintain the security property, we hold all the `by_*`
# methods to the same criterion. See the class's block comment
# for details.
# We have to be careful here because we're letting users call a method
# by name! The prefix 'by_' prevents it from colliding with builtin
# Python __magic__ stuff.
operator = term['operator']
operand = term['operand']
negated = term.get('negated', False)
method_name = 'by_' + operator.replace('-', '_')
method = getattr(self, method_name, None)
if method is None:
raise BadNarrowOperator('unknown operator ' + operator)
if negated:
maybe_negate = not_
else:
maybe_negate = lambda cond: cond
return method(query, operand, maybe_negate)
def by_has(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
if operand not in ['attachment', 'image', 'link']:
raise BadNarrowOperator("unknown 'has' operand " + operand)
col_name = 'has_' + operand
cond = column(col_name)
return query.where(maybe_negate(cond))
def by_in(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
if operand == 'home':
conditions = exclude_muting_conditions(self.user_profile, [])
return query.where(and_(*conditions))
elif operand == 'all':
return query
raise BadNarrowOperator("unknown 'in' operand " + operand)
def by_is(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
if operand == 'private':
# The `.select_from` method extends the query with a join.
query = query.select_from(join(query.froms[0], table("zerver_recipient"),
column("recipient_id") ==
literal_column("zerver_recipient.id")))
cond = or_(column("type") == Recipient.PERSONAL,
column("type") == Recipient.HUDDLE)
return query.where(maybe_negate(cond))
elif operand == 'starred':
cond = column("flags").op("&")(UserMessage.flags.starred.mask) != 0
return query.where(maybe_negate(cond))
elif operand == 'unread':
cond = column("flags").op("&")(UserMessage.flags.read.mask) == 0
return query.where(maybe_negate(cond))
elif operand == 'mentioned' or operand == 'alerted':
cond = column("flags").op("&")(UserMessage.flags.mentioned.mask) != 0
return query.where(maybe_negate(cond))
raise BadNarrowOperator("unknown 'is' operand " + operand)
_alphanum = frozenset(
'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')
def _pg_re_escape(self, pattern):
# type: (Text) -> Text
"""
Escape user input to place in a regex
Python's re.escape escapes unicode characters in a way which postgres
fails on, u'\u03bb' to u'\\\u03bb'. This function will correctly escape
them for postgres, u'\u03bb' to u'\\u03bb'.
"""
s = list(pattern)
for i, c in enumerate(s):
if c not in self._alphanum:
if ord(c) >= 128:
# convert the character to hex postgres regex will take
# \uXXXX
s[i] = '\\u{:0>4x}'.format(ord(c))
else:
s[i] = '\\' + c
return ''.join(s)
def by_stream(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
try:
stream = get_stream(operand, self.user_profile.realm)
except Stream.DoesNotExist:
raise BadNarrowOperator('unknown stream ' + operand)
if self.user_profile.realm.is_zephyr_mirror_realm:
# MIT users expect narrowing to "social" to also show messages to
# /^(un)*social(.d)*$/ (unsocial, ununsocial, social.d, ...).
# In `ok_to_include_history`, we assume that a non-negated
# `stream` term for a public stream will limit the query to
# that specific stream. So it would be a bug to hit this
# codepath after relying on this term there. But all streams in
# a Zephyr realm are private, so that doesn't happen.
assert(not stream.is_public())
m = re.search(r'^(?:un)*(.+?)(?:\.d)*$', stream.name, re.IGNORECASE)
# Since the regex has a `.+` in it and "" is invalid as a
# stream name, this will always match
assert(m is not None)
base_stream_name = m.group(1)
matching_streams = get_active_streams(self.user_profile.realm).filter(
name__iregex=r'^(un)*%s(\.d)*$' % (self._pg_re_escape(base_stream_name),))
matching_stream_ids = [matching_stream.id for matching_stream in matching_streams]
recipients_map = bulk_get_recipients(Recipient.STREAM, matching_stream_ids)
cond = column("recipient_id").in_([recipient.id for recipient in recipients_map.values()])
return query.where(maybe_negate(cond))
recipient = get_recipient(Recipient.STREAM, type_id=stream.id)
cond = column("recipient_id") == recipient.id
return query.where(maybe_negate(cond))
def by_topic(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
if self.user_profile.realm.is_zephyr_mirror_realm:
# MIT users expect narrowing to topic "foo" to also show messages to /^foo(.d)*$/
# (foo, foo.d, foo.d.d, etc)
m = re.search(r'^(.*?)(?:\.d)*$', operand, re.IGNORECASE)
# Since the regex has a `.*` in it, this will always match
assert(m is not None)
base_topic = m.group(1)
# Additionally, MIT users expect the empty instance and
# instance "personal" to be the same.
if base_topic in ('', 'personal', '(instance "")'):
cond = or_(
func.upper(column("subject")) == func.upper(literal("")),
func.upper(column("subject")) == func.upper(literal(".d")),
func.upper(column("subject")) == func.upper(literal(".d.d")),
func.upper(column("subject")) == func.upper(literal(".d.d.d")),
func.upper(column("subject")) == func.upper(literal(".d.d.d.d")),
func.upper(column("subject")) == func.upper(literal("personal")),
func.upper(column("subject")) == func.upper(literal("personal.d")),
func.upper(column("subject")) == func.upper(literal("personal.d.d")),
func.upper(column("subject")) == func.upper(literal("personal.d.d.d")),
func.upper(column("subject")) == func.upper(literal("personal.d.d.d.d")),
func.upper(column("subject")) == func.upper(literal('(instance "")')),
func.upper(column("subject")) == func.upper(literal('(instance "").d')),
func.upper(column("subject")) == func.upper(literal('(instance "").d.d')),
func.upper(column("subject")) == func.upper(literal('(instance "").d.d.d')),
func.upper(column("subject")) == func.upper(literal('(instance "").d.d.d.d')),
)
else:
# We limit `.d` counts, since postgres has much better
# query planning for this than they do for a regular
# expression (which would sometimes table scan).
cond = or_(
func.upper(column("subject")) == func.upper(literal(base_topic)),
func.upper(column("subject")) == func.upper(literal(base_topic + ".d")),
func.upper(column("subject")) == func.upper(literal(base_topic + ".d.d")),
func.upper(column("subject")) == func.upper(literal(base_topic + ".d.d.d")),
func.upper(column("subject")) == func.upper(literal(base_topic + ".d.d.d.d")),
)
return query.where(maybe_negate(cond))
cond = func.upper(column("subject")) == func.upper(literal(operand))
return query.where(maybe_negate(cond))
def by_sender(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
try:
sender = get_user_including_cross_realm(operand, self.user_realm)
except UserProfile.DoesNotExist:
raise BadNarrowOperator('unknown user ' + operand)
cond = column("sender_id") == literal(sender.id)
return query.where(maybe_negate(cond))
def by_near(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
return query
def by_id(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
cond = self.msg_id_column == literal(operand)
return query.where(maybe_negate(cond))
def by_pm_with(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
if ',' in operand:
# Huddle
try:
# Ignore our own email if it is in this list
emails = [e.strip() for e in operand.split(',') if e.strip() != self.user_profile.email]
recipient = recipient_for_emails(emails, False,
self.user_profile, self.user_profile)
except ValidationError:
raise BadNarrowOperator('unknown recipient ' + operand)
cond = column("recipient_id") == recipient.id
return query.where(maybe_negate(cond))
else:
# Personal message
self_recipient = get_recipient(Recipient.PERSONAL, type_id=self.user_profile.id)
if operand == self.user_profile.email:
# Personals with self
cond = and_(column("sender_id") == self.user_profile.id,
column("recipient_id") == self_recipient.id)
return query.where(maybe_negate(cond))
# Personals with other user; include both directions.
try:
narrow_profile = get_user_including_cross_realm(operand, self.user_realm)
except UserProfile.DoesNotExist:
raise BadNarrowOperator('unknown user ' + operand)
narrow_recipient = get_recipient(Recipient.PERSONAL, narrow_profile.id)
cond = or_(and_(column("sender_id") == narrow_profile.id,
column("recipient_id") == self_recipient.id),
and_(column("sender_id") == self.user_profile.id,
column("recipient_id") == narrow_recipient.id))
return query.where(maybe_negate(cond))
def by_group_pm_with(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
try:
narrow_profile = get_user_including_cross_realm(operand, self.user_realm)
except UserProfile.DoesNotExist:
raise BadNarrowOperator('unknown user ' + operand)
self_recipient_ids = [
recipient_tuple['recipient_id'] for recipient_tuple
in Subscription.objects.filter(
user_profile=self.user_profile,
recipient__type=Recipient.HUDDLE
).values("recipient_id")]
narrow_recipient_ids = [
recipient_tuple['recipient_id'] for recipient_tuple
in Subscription.objects.filter(
user_profile=narrow_profile,
recipient__type=Recipient.HUDDLE
).values("recipient_id")]
recipient_ids = set(self_recipient_ids) & set(narrow_recipient_ids)
cond = column("recipient_id").in_(recipient_ids)
return query.where(maybe_negate(cond))
def by_search(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
if settings.USING_PGROONGA:
return self._by_search_pgroonga(query, operand, maybe_negate)
else:
return self._by_search_tsearch(query, operand, maybe_negate)
def _by_search_pgroonga(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
match_positions_character = func.pgroonga.match_positions_character
query_extract_keywords = func.pgroonga.query_extract_keywords
keywords = query_extract_keywords(operand)
query = query.column(match_positions_character(column("rendered_content"),
keywords).label("content_matches"))
query = query.column(match_positions_character(column("subject"),
keywords).label("subject_matches"))
condition = column("search_pgroonga").op("@@")(operand)
return query.where(maybe_negate(condition))
def _by_search_tsearch(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
tsquery = func.plainto_tsquery(literal("zulip.english_us_search"), literal(operand))
ts_locs_array = func.ts_match_locs_array
query = query.column(ts_locs_array(literal("zulip.english_us_search"),
column("rendered_content"),
tsquery).label("content_matches"))
# We HTML-escape the subject in Postgres to avoid doing a server round-trip
query = query.column(ts_locs_array(literal("zulip.english_us_search"),
func.escape_html(column("subject")),
tsquery).label("subject_matches"))
# Do quoted string matching. We really want phrase
# search here so we can ignore punctuation and do
# stemming, but there isn't a standard phrase search
# mechanism in Postgres
for term in re.findall('"[^"]+"|\S+', operand):
if term[0] == '"' and term[-1] == '"':
term = term[1:-1]
term = '%' + connection.ops.prep_for_like_query(term) + '%'
cond = or_(column("content").ilike(term),
column("subject").ilike(term))
query = query.where(maybe_negate(cond))
cond = column("search_tsvector").op("@@")(tsquery)
return query.where(maybe_negate(cond))
# Apparently, the offsets we get from tsearch_extras are counted in
# unicode characters, not in bytes, so we do our processing with text,
# not bytes.
def highlight_string(text, locs):
# type: (AnyStr, Iterable[Tuple[int, int]]) -> Text
string = force_text(text)
highlight_start = u'<span class="highlight">'
highlight_stop = u'</span>'
pos = 0
result = u''
in_tag = False
for loc in locs:
(offset, length) = loc
for character in string[pos:offset + length]:
if character == u'<':
in_tag = True
elif character == u'>':
in_tag = False
if in_tag:
result += string[pos:offset + length]
else:
result += string[pos:offset]
result += highlight_start
result += string[offset:offset + length]
result += highlight_stop
pos = offset + length
result += string[pos:]
return result
def get_search_fields(rendered_content, subject, content_matches, subject_matches):
# type: (Text, Text, Iterable[Tuple[int, int]], Iterable[Tuple[int, int]]) -> Dict[str, Text]
return dict(match_content=highlight_string(rendered_content, content_matches),
match_subject=highlight_string(escape_html(subject), subject_matches))
def narrow_parameter(json):
# type: (str) -> Optional[List[Dict[str, Any]]]
data = ujson.loads(json)
if not isinstance(data, list):
raise ValueError("argument is not a list")
if len(data) == 0:
# The "empty narrow" should be None, and not []
return None
def convert_term(elem):
# type: (Union[Dict, List]) -> Dict[str, Any]
# We have to support a legacy tuple format.
if isinstance(elem, list):
if (len(elem) != 2 or
any(not isinstance(x, str) and not isinstance(x, Text)
for x in elem)):
raise ValueError("element is not a string pair")
return dict(operator=elem[0], operand=elem[1])
if isinstance(elem, dict):
validator = check_dict([
('operator', check_string),
('operand', check_string),
])
error = validator('elem', elem)
if error:
raise JsonableError(error)
# whitelist the fields we care about for now
return dict(
operator=elem['operator'],
operand=elem['operand'],
negated=elem.get('negated', False),
)
raise ValueError("element is not a dictionary")
return list(map(convert_term, data))
def is_public_stream(stream_name, realm):
# type: (Text, Realm) -> bool
"""
Determine whether a stream is public, so that
our caller can decide whether we can get
historical messages for a narrowing search.
Because of the way our search is currently structured,
we may be passed an invalid stream here. We return
False in that situation, and subsequent code will do
validation and raise the appropriate JsonableError.
"""
try:
stream = get_stream(stream_name, realm)
except Stream.DoesNotExist:
return False
return stream.is_public()
def ok_to_include_history(narrow, realm):
# type: (Optional[Iterable[Dict[str, Any]]], Realm) -> bool
# There are occasions where we need to find Message rows that
# have no corresponding UserMessage row, because the user is
# reading a public stream that might include messages that
# were sent while the user was not subscribed, but which they are
# allowed to see. We have to be very careful about constructing
# queries in those situations, so this function should return True
# only if we are 100% sure that we're gonna add a clause to the
# query that narrows to a particular public stream on the user's realm.
# If we screw this up, then we can get into a nasty situation of
# polluting our narrow results with messages from other realms.
include_history = False
if narrow is not None:
for term in narrow:
if term['operator'] == "stream" and not term.get('negated', False):
if is_public_stream(term['operand'], realm):
include_history = True
# Disable historical messages if the user is narrowing on anything
# that's a property on the UserMessage table. There cannot be
# historical messages in these cases anyway.
for term in narrow:
if term['operator'] == "is":
include_history = False
return include_history
def get_stream_name_from_narrow(narrow):
# type: (Optional[Iterable[Dict[str, Any]]]) -> Optional[Text]
if narrow is not None:
for term in narrow:
if term['operator'] == 'stream':
return term['operand'].lower()
return None
def exclude_muting_conditions(user_profile, narrow):
# type: (UserProfile, Optional[Iterable[Dict[str, Any]]]) -> List[Selectable]
conditions = []
stream_name = get_stream_name_from_narrow(narrow)
if stream_name is None:
rows = Subscription.objects.filter(
user_profile=user_profile,
active=True,
in_home_view=False,
recipient__type=Recipient.STREAM
).values('recipient_id')
muted_recipient_ids = [row['recipient_id'] for row in rows]
if len(muted_recipient_ids) > 0:
# Only add the condition if we have muted streams to simplify/avoid warnings.
condition = not_(column("recipient_id").in_(muted_recipient_ids))
conditions.append(condition)
muted_topics = ujson.loads(user_profile.muted_topics)
if muted_topics:
if stream_name is not None:
muted_topics = [m for m in muted_topics if m[0].lower() == stream_name]
if not muted_topics:
return conditions
muted_streams = bulk_get_streams(user_profile.realm,
[muted[0] for muted in muted_topics])
muted_recipients = bulk_get_recipients(Recipient.STREAM,
[stream.id for stream in six.itervalues(muted_streams)])
recipient_map = dict((s.name.lower(), muted_recipients[s.id].id)
for s in six.itervalues(muted_streams))
muted_topics = [m for m in muted_topics if m[0].lower() in recipient_map]
if muted_topics:
def mute_cond(muted):
# type: (Tuple[str, str]) -> Selectable
stream_cond = column("recipient_id") == recipient_map[muted[0].lower()]
topic_cond = func.upper(column("subject")) == func.upper(muted[1])
return and_(stream_cond, topic_cond)
condition = not_(or_(*list(map(mute_cond, muted_topics))))
return conditions + [condition]
return conditions
@has_request_variables
def get_messages_backend(request, user_profile,
anchor = REQ(converter=int),
num_before = REQ(converter=to_non_negative_int),
num_after = REQ(converter=to_non_negative_int),
narrow = REQ('narrow', converter=narrow_parameter, default=None),
use_first_unread_anchor = REQ(default=False, converter=ujson.loads),
apply_markdown=REQ(default=True,
converter=ujson.loads)):
# type: (HttpRequest, UserProfile, int, int, int, Optional[List[Dict[str, Any]]], bool, bool) -> HttpResponse
include_history = ok_to_include_history(narrow, user_profile.realm)
if include_history and not use_first_unread_anchor:
# The initial query in this case doesn't use `zerver_usermessage`,
# and isn't yet limited to messages the user is entitled to see!
#
# This is OK only because we've made sure this is a narrow that
# will cause us to limit the query appropriately later.
# See `ok_to_include_history` for details.
query = select([column("id").label("message_id")], None, table("zerver_message"))
inner_msg_id_col = literal_column("zerver_message.id")
elif narrow is None and not use_first_unread_anchor:
# This is limited to messages the user received, as recorded in `zerver_usermessage`.
query = select([column("message_id"), column("flags")],
column("user_profile_id") == literal(user_profile.id),
table("zerver_usermessage"))
inner_msg_id_col = column("message_id")
else:
# This is limited to messages the user received, as recorded in `zerver_usermessage`.
# TODO: Don't do this join if we're not doing a search
query = select([column("message_id"), column("flags")],
column("user_profile_id") == literal(user_profile.id),
join(table("zerver_usermessage"), table("zerver_message"),
literal_column("zerver_usermessage.message_id") ==
literal_column("zerver_message.id")))
inner_msg_id_col = column("message_id")
num_extra_messages = 1
is_search = False
if narrow is not None:
# Add some metadata to our logging data for narrows
verbose_operators = []
for term in narrow:
if term['operator'] == "is":
verbose_operators.append("is:" + term['operand'])
else:
verbose_operators.append(term['operator'])
request._log_data['extra'] = "[%s]" % (",".join(verbose_operators),)
# Build the query for the narrow
num_extra_messages = 0
builder = NarrowBuilder(user_profile, inner_msg_id_col)
search_term = None # type: Optional[Dict[str, Any]]
for term in narrow:
if term['operator'] == 'search':
if not is_search:
search_term = term
query = query.column(column("subject")).column(column("rendered_content"))
is_search = True
else:
# Join the search operators if there are multiple of them
search_term['operand'] += ' ' + term['operand']
else:
query = builder.add_term(query, term)
if is_search:
query = builder.add_term(query, search_term)
# We add 1 to the number of messages requested if no narrow was
# specified to ensure that the resulting list always contains the
# anchor message. If a narrow was specified, the anchor message
# might not match the narrow anyway.
if num_after != 0:
num_after += num_extra_messages
else:
num_before += num_extra_messages
sa_conn = get_sqlalchemy_connection()
if use_first_unread_anchor:
condition = column("flags").op("&")(UserMessage.flags.read.mask) == 0
# We exclude messages on muted topics when finding the first unread
# message in this narrow
muting_conditions = exclude_muting_conditions(user_profile, narrow)
if muting_conditions:
condition = and_(condition, *muting_conditions)
# The mobile app uses narrow=[] and use_first_unread_anchor=True to
# determine what messages to show when you first load the app.
# Unfortunately, this means that if you have a years-old unread
# message, the mobile app could get stuck in the past.
#
# To fix this, we enforce that the "first unread anchor" must be on or
# after the user's current pointer location. Since the pointer
# location refers to the latest the user has read in the home view,
# we'll only apply this logic in the home view (ie, when narrow is
# empty).
if not narrow:
pointer_condition = inner_msg_id_col >= user_profile.pointer
condition = and_(condition, pointer_condition)
first_unread_query = query.where(condition)
first_unread_query = first_unread_query.order_by(inner_msg_id_col.asc()).limit(1)
first_unread_result = list(sa_conn.execute(first_unread_query).fetchall())
if len(first_unread_result) > 0:
anchor = first_unread_result[0][0]
else:
anchor = LARGER_THAN_MAX_MESSAGE_ID
before_query = None
after_query = None
if num_before != 0:
before_anchor = anchor
if num_after != 0:
# Don't include the anchor in both the before query and the after query
before_anchor = anchor - 1
before_query = query.where(inner_msg_id_col <= before_anchor) \
.order_by(inner_msg_id_col.desc()).limit(num_before)
if num_after != 0:
after_query = query.where(inner_msg_id_col >= anchor) \
.order_by(inner_msg_id_col.asc()).limit(num_after)
if anchor == LARGER_THAN_MAX_MESSAGE_ID:
# There's no need for an after_query if we're targeting just the target message.
after_query = None
if before_query is not None:
if after_query is not None:
query = union_all(before_query.self_group(), after_query.self_group())
else:
query = before_query
elif after_query is not None:
query = after_query
else:
# This can happen when a narrow is specified.
query = query.where(inner_msg_id_col == anchor)
main_query = alias(query)
query = select(main_query.c, None, main_query).order_by(column("message_id").asc())
# This is a hack to tag the query we use for testing
query = query.prefix_with("/* get_messages */")
query_result = list(sa_conn.execute(query).fetchall())
# The following is a little messy, but ensures that the code paths
# are similar regardless of the value of include_history. The
# 'user_messages' dictionary maps each message to the user's
# UserMessage object for that message, which we will attach to the
# rendered message dict before returning it. We attempt to
# bulk-fetch rendered message dicts from remote cache using the
# 'messages' list.
search_fields = dict() # type: Dict[int, Dict[str, Text]]
message_ids = [] # type: List[int]
user_message_flags = {} # type: Dict[int, List[str]]
if include_history:
message_ids = [row[0] for row in query_result]
# TODO: This could be done with an outer join instead of two queries
user_message_flags = dict((user_message.message_id, user_message.flags_list()) for user_message in
UserMessage.objects.filter(user_profile=user_profile,
message__id__in=message_ids))
for row in query_result:
message_id = row[0]
if user_message_flags.get(message_id) is None:
user_message_flags[message_id] = ["read", "historical"]
if is_search:
(_, subject, rendered_content, content_matches, subject_matches) = row
search_fields[message_id] = get_search_fields(rendered_content, subject,
content_matches, subject_matches)
else:
for row in query_result:
message_id = row[0]
flags = row[1]
user_message_flags[message_id] = parse_usermessage_flags(flags)
message_ids.append(message_id)
if is_search:
(_, _, subject, rendered_content, content_matches, subject_matches) = row
search_fields[message_id] = get_search_fields(rendered_content, subject,
content_matches, subject_matches)
cache_transformer = lambda row: MessageDict.build_dict_from_raw_db_row(row, apply_markdown)
id_fetcher = lambda row: row['id']
message_dicts = generic_bulk_cached_fetch(lambda message_id: to_dict_cache_key_id(message_id, apply_markdown),
Message.get_raw_db_rows,
message_ids,
id_fetcher=id_fetcher,
cache_transformer=cache_transformer,
extractor=extract_message_dict,
setter=stringify_message_dict)
message_list = []
for message_id in message_ids:
msg_dict = message_dicts[message_id]
msg_dict.update({"flags": user_message_flags[message_id]})
msg_dict.update(search_fields.get(message_id, {}))
# Make sure that we never send message edit history to clients
# in realms with allow_edit_history disabled.
if "edit_history" in msg_dict and not user_profile.realm.allow_edit_history:
del msg_dict["edit_history"]
message_list.append(msg_dict)
statsd.incr('loaded_old_messages', len(message_list))
ret = {'messages': message_list,
"result": "success",
"msg": ""}
return json_success(ret)
@has_request_variables
def update_message_flags(request, user_profile,
messages=REQ(validator=check_list(check_int)),
operation=REQ('op'), flag=REQ(),
all=REQ(validator=check_bool, default=False),
stream_name=REQ(default=None),
topic_name=REQ(default=None)):
# type: (HttpRequest, UserProfile, List[int], Text, Text, bool, Optional[Text], Optional[Text]) -> HttpResponse
if all:
target_count_str = "all"
else:
target_count_str = str(len(messages))
log_data_str = "[%s %s/%s]" % (operation, flag, target_count_str)
request._log_data["extra"] = log_data_str
stream = None
if stream_name is not None:
try:
stream = get_stream(stream_name, user_profile.realm)
except Stream.DoesNotExist:
raise JsonableError(_('No such stream \'%s\'') % (stream_name,))
if topic_name:
topic_exists = UserMessage.objects.filter(user_profile=user_profile,
message__recipient__type_id=stream.id,
message__recipient__type=Recipient.STREAM,
message__subject__iexact=topic_name).exists()
if not topic_exists:
raise JsonableError(_('No such topic \'%s\'') % (topic_name,))
count = do_update_message_flags(user_profile, operation, flag, messages,
all, stream, topic_name)
# If we succeed, update log data str with the actual count for how
# many messages were updated.
if count != len(messages):
log_data_str = "[%s %s/%s] actually %s" % (operation, flag, target_count_str, count)
request._log_data["extra"] = log_data_str
return json_success({'result': 'success',
'messages': messages,
'msg': ''})
def create_mirrored_message_users(request, user_profile, recipients):
# type: (HttpRequest, UserProfile, Iterable[Text]) -> Tuple[bool, Optional[UserProfile]]
if "sender" not in request.POST:
return (False, None)
sender_email = request.POST["sender"].strip().lower()
referenced_users = set([sender_email])
if request.POST['type'] == 'private':
for email in recipients:
referenced_users.add(email.lower())
if request.client.name == "zephyr_mirror":
user_check = same_realm_zephyr_user
fullname_function = compute_mit_user_fullname
elif request.client.name == "irc_mirror":
user_check = same_realm_irc_user
fullname_function = compute_irc_user_fullname
elif request.client.name in ("jabber_mirror", "JabberMirror"):
user_check = same_realm_jabber_user
fullname_function = compute_jabber_user_fullname
else:
# Unrecognized mirroring client
return (False, None)
for email in referenced_users:
# Check that all referenced users are in our realm:
if not user_check(user_profile, email):
return (False, None)
# Create users for the referenced users, if needed.
for email in referenced_users:
create_mirror_user_if_needed(user_profile.realm, email, fullname_function)
sender = get_user_including_cross_realm(sender_email, user_profile.realm)
return (True, sender)
def same_realm_zephyr_user(user_profile, email):
# type: (UserProfile, Text) -> bool
#
# Are the sender and recipient both addresses in the same Zephyr
# mirroring realm? We have to handle this specially, inferring
# the domain from the e-mail address, because the recipient may
# not existing in Zulip and we may need to make a stub Zephyr
# mirroring user on the fly.
try:
validators.validate_email(email)
except ValidationError:
return False
domain = email_to_domain(email)
# Assumes allow_subdomains=False for all RealmDomain's corresponding to
# these realms.
return user_profile.realm.is_zephyr_mirror_realm and \
RealmDomain.objects.filter(realm=user_profile.realm, domain=domain).exists()
def same_realm_irc_user(user_profile, email):
# type: (UserProfile, Text) -> bool
# Check whether the target email address is an IRC user in the
# same realm as user_profile, i.e. if the domain were example.com,
# the IRC user would need to be username@irc.example.com
try:
validators.validate_email(email)
except ValidationError:
return False
domain = email_to_domain(email).replace("irc.", "")
# Assumes allow_subdomains=False for all RealmDomain's corresponding to
# these realms.
return RealmDomain.objects.filter(realm=user_profile.realm, domain=domain).exists()
def same_realm_jabber_user(user_profile, email):
# type: (UserProfile, Text) -> bool
try:
validators.validate_email(email)
except ValidationError:
return False
# If your Jabber users have a different email domain than the
# Zulip users, this is where you would do any translation.
domain = email_to_domain(email)
# Assumes allow_subdomains=False for all RealmDomain's corresponding to
# these realms.
return RealmDomain.objects.filter(realm=user_profile.realm, domain=domain).exists()
# We do not @require_login for send_message_backend, since it is used
# both from the API and the web service. Code calling
# send_message_backend should either check the API key or check that
# the user is logged in.
@has_request_variables
def send_message_backend(request, user_profile,
message_type_name = REQ('type'),
message_to = REQ('to', converter=extract_recipients, default=[]),
forged = REQ(default=False),
subject_name = REQ('subject', lambda x: x.strip(), None),
message_content = REQ('content'),
realm_str = REQ('realm_str', default=None),
local_id = REQ(default=None),
queue_id = REQ(default=None)):
# type: (HttpRequest, UserProfile, Text, List[Text], bool, Optional[Text], Text, Optional[Text], Optional[Text], Optional[Text]) -> HttpResponse
client = request.client
is_super_user = request.user.is_api_super_user
if forged and not is_super_user:
return json_error(_("User not authorized for this query"))
realm = None
if realm_str and realm_str != user_profile.realm.string_id:
if not is_super_user:
# The email gateway bot needs to be able to send messages in
# any realm.
return json_error(_("User not authorized for this query"))
realm = get_realm(realm_str)
if not realm:
return json_error(_("Unknown realm %s") % (realm_str,))
if client.name in ["zephyr_mirror", "irc_mirror", "jabber_mirror", "JabberMirror"]:
# Here's how security works for mirroring:
#
# For private messages, the message must be (1) both sent and
# received exclusively by users in your realm, and (2)
# received by the forwarding user.
#
# For stream messages, the message must be (1) being forwarded
# by an API superuser for your realm and (2) being sent to a
# mirrored stream (any stream for the Zephyr and Jabber
# mirrors, but only streams with names starting with a "#" for
# IRC mirrors)
#
# The security checks are split between the below code
# (especially create_mirrored_message_users which checks the
# same-realm constraint) and recipient_for_emails (which
# checks that PMs are received by the forwarding user)
if "sender" not in request.POST:
return json_error(_("Missing sender"))
if message_type_name != "private" and not is_super_user:
return json_error(_("User not authorized for this query"))
(valid_input, mirror_sender) = \
create_mirrored_message_users(request, user_profile, message_to)
if not valid_input:
return json_error(_("Invalid mirrored message"))
if client.name == "zephyr_mirror" and not user_profile.realm.is_zephyr_mirror_realm:
return json_error(_("Invalid mirrored realm"))
if (client.name == "irc_mirror" and message_type_name != "private" and
not message_to[0].startswith("#")):
return json_error(_("IRC stream names must start with #"))
sender = mirror_sender
else:
sender = user_profile
ret = check_send_message(sender, client, message_type_name, message_to,
subject_name, message_content, forged=forged,
forged_timestamp = request.POST.get('time'),
forwarder_user_profile=user_profile, realm=realm,
local_id=local_id, sender_queue_id=queue_id)
return json_success({"id": ret})
def fill_edit_history_entries(message_history, message):
# type: (List[Dict[str, Any]], Message) -> None
"""This fills out the message edit history entries from the database,
which are designed to have the minimum data possible, to instead
have the current topic + content as of that time, plus data on
whatever changed. This makes it much simpler to do future
processing.
Note that this mutates what is passed to it, which is sorta a bad pattern.
"""
prev_content = message.content
prev_rendered_content = message.rendered_content
prev_topic = message.subject
assert(datetime_to_timestamp(message.last_edit_time) == message_history[0]['timestamp'])
for entry in message_history:
entry['topic'] = prev_topic
if 'prev_subject' in entry:
# We replace use of 'subject' with 'topic' for downstream simplicity
prev_topic = entry['prev_subject']
entry['prev_topic'] = prev_topic
del entry['prev_subject']
entry['content'] = prev_content
entry['rendered_content'] = prev_rendered_content
if 'prev_content' in entry:
del entry['prev_rendered_content_version']
prev_content = entry['prev_content']
prev_rendered_content = entry['prev_rendered_content']
entry['content_html_diff'] = highlight_html_differences(
prev_rendered_content,
entry['rendered_content'])
message_history.append(dict(
topic = prev_topic,
content = prev_content,
rendered_content = prev_rendered_content,
timestamp = datetime_to_timestamp(message.pub_date),
user_id = message.sender_id,
))
@has_request_variables
def get_message_edit_history(request, user_profile,
message_id=REQ(converter=to_non_negative_int)):
# type: (HttpRequest, UserProfile, int) -> HttpResponse
if not user_profile.realm.allow_edit_history:
return json_error(_("Message edit history is disabled in this organization"))
message, ignored_user_message = access_message(user_profile, message_id)
# Extract the message edit history from the message
message_edit_history = ujson.loads(message.edit_history)
# Fill in all the extra data that will make it usable
fill_edit_history_entries(message_edit_history, message)
return json_success({"message_history": reversed(message_edit_history)})
@has_request_variables
def update_message_backend(request, user_profile,
message_id=REQ(converter=to_non_negative_int),
subject=REQ(default=None),
propagate_mode=REQ(default="change_one"),
content=REQ(default=None)):
# type: (HttpRequest, UserProfile, int, Optional[Text], Optional[str], Optional[Text]) -> HttpResponse
if not user_profile.realm.allow_message_editing:
return json_error(_("Your organization has turned off message editing"))
message, ignored_user_message = access_message(user_profile, message_id)
# You only have permission to edit a message if:
# you change this value also change those two parameters in message_edit.js.
# 1. You sent it, OR:
# 2. This is a topic-only edit for a (no topic) message, OR:
# 3. This is a topic-only edit and you are an admin.
if message.sender == user_profile:
pass
elif (content is None) and ((message.topic_name() == "(no topic)") or
user_profile.is_realm_admin):
pass
else:
raise JsonableError(_("You don't have permission to edit this message"))
# If there is a change to the content, check that it hasn't been too long
# Allow an extra 20 seconds since we potentially allow editing 15 seconds
# past the limit, and in case there are network issues, etc. The 15 comes
# from (min_seconds_to_edit + seconds_left_buffer) in message_edit.js; if
# you change this value also change those two parameters in message_edit.js.
edit_limit_buffer = 20
if content is not None and user_profile.realm.message_content_edit_limit_seconds > 0:
deadline_seconds = user_profile.realm.message_content_edit_limit_seconds + edit_limit_buffer
if (timezone_now() - message.pub_date) > datetime.timedelta(seconds=deadline_seconds):
raise JsonableError(_("The time limit for editing this message has past"))
if subject is None and content is None:
return json_error(_("Nothing to change"))
if subject is not None:
subject = subject.strip()
if subject == "":
raise JsonableError(_("Topic can't be empty"))
rendered_content = None
links_for_embed = set() # type: Set[Text]
if content is not None:
content = content.strip()
if content == "":
content = "(deleted)"
content = truncate_body(content)
# We exclude UserMessage.flags.historical rows since those
# users did not receive the message originally, and thus
# probably are not relevant for reprocessed alert_words,
# mentions and similar rendering features. This may be a
# decision we change in the future.
ums = UserMessage.objects.filter(
message=message.id,
flags=~UserMessage.flags.historical)
message_users = UserProfile.objects.select_related().filter(
id__in={um.user_profile_id for um in ums})
# We render the message using the current user's realm; since
# the cross-realm bots never edit messages, this should be
# always correct.
# Note: If rendering fails, the called code will raise a JsonableError.
rendered_content = render_incoming_message(message,
content,
message_users,
user_profile.realm)
links_for_embed |= message.links_for_preview
number_changed = do_update_message(user_profile, message, subject,
propagate_mode, content, rendered_content)
# Include the number of messages changed in the logs
request._log_data['extra'] = "[%s]" % (number_changed,)
if links_for_embed and bugdown.url_embed_preview_enabled_for_realm(message):
event_data = {
'message_id': message.id,
'message_content': message.content,
# The choice of `user_profile.realm_id` rather than
# `sender.realm_id` must match the decision made in the
# `render_incoming_message` call earlier in this function.
'message_realm_id': user_profile.realm_id,
'urls': links_for_embed}
queue_json_publish('embed_links', event_data, lambda x: None)
return json_success()
@has_request_variables
def delete_message_backend(request, user_profile, message_id=REQ(converter=to_non_negative_int)):
# type: (HttpRequest, UserProfile, int) -> HttpResponse
message, ignored_user_message = access_message(user_profile, message_id)
if not user_profile.is_realm_admin:
raise JsonableError(_("You don't have permission to edit this message"))
do_delete_message(user_profile, message)
return json_success()
@has_request_variables
def json_fetch_raw_message(request, user_profile,
message_id=REQ(converter=to_non_negative_int)):
# type: (HttpRequest, UserProfile, int) -> HttpResponse
(message, user_message) = access_message(user_profile, message_id)
return json_success({"raw_content": message.content})
@has_request_variables
def render_message_backend(request, user_profile, content=REQ()):
# type: (HttpRequest, UserProfile, Text) -> HttpResponse
message = Message()
message.sender = user_profile
message.content = content
message.sending_client = request.client
rendered_content = render_markdown(message, content, realm=user_profile.realm)
return json_success({"rendered": rendered_content})
@authenticated_json_post_view
def json_messages_in_narrow(request, user_profile):
# type: (HttpRequest, UserProfile) -> HttpResponse
return messages_in_narrow_backend(request, user_profile)
@has_request_variables
def messages_in_narrow_backend(request, user_profile,
msg_ids = REQ(validator=check_list(check_int)),
narrow = REQ(converter=narrow_parameter)):
# type: (HttpRequest, UserProfile, List[int], Optional[List[Dict[str, Any]]]) -> HttpResponse
# This query is limited to messages the user has access to because they
# actually received them, as reflected in `zerver_usermessage`.
query = select([column("message_id"), column("subject"), column("rendered_content")],
and_(column("user_profile_id") == literal(user_profile.id),
column("message_id").in_(msg_ids)),
join(table("zerver_usermessage"), table("zerver_message"),
literal_column("zerver_usermessage.message_id") ==
literal_column("zerver_message.id")))
builder = NarrowBuilder(user_profile, column("message_id"))
if narrow is not None:
for term in narrow:
query = builder.add_term(query, term)
sa_conn = get_sqlalchemy_connection()
query_result = list(sa_conn.execute(query).fetchall())
search_fields = dict()
for row in query_result:
(message_id, subject, rendered_content, content_matches, subject_matches) = row
search_fields[message_id] = get_search_fields(rendered_content, subject,
content_matches, subject_matches)
return json_success({"messages": search_fields})
|
sodo13/openpli-gls
|
refs/heads/master
|
lib/python/Tools/BoundFunction.py
|
120
|
class boundFunction:
def __init__(self, fnc, *args, **kwargs):
self.fnc = fnc
self.args = args
self.kwargs = kwargs
def __call__(self, *args, **kwargs):
newkwargs = self.kwargs
newkwargs.update(kwargs)
return self.fnc(*self.args + args, **newkwargs)
|
jamtot/HackerEarth
|
refs/heads/master
|
Problems/My Fibonacci/myfib.py
|
1
|
def myfib(A,B,N):
for i in xrange(N-1):
A,B=B,A+B
return A
if __name__ == "__main__":
ABN = map(int,raw_input().split())
print myfib(ABN[0],ABN[1],ABN[2])
|
hybrideagle/django
|
refs/heads/master
|
django/contrib/contenttypes/migrations/0001_initial.py
|
585
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.contrib.contenttypes.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='ContentType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
('app_label', models.CharField(max_length=100)),
('model', models.CharField(max_length=100, verbose_name='python model class name')),
],
options={
'ordering': ('name',),
'db_table': 'django_content_type',
'verbose_name': 'content type',
'verbose_name_plural': 'content types',
},
bases=(models.Model,),
managers=[
('objects', django.contrib.contenttypes.models.ContentTypeManager()),
],
),
migrations.AlterUniqueTogether(
name='contenttype',
unique_together=set([('app_label', 'model')]),
),
]
|
Adai0808/scrapy-1
|
refs/heads/master
|
tests/test_utils_misc/test_walk_modules/mod/__init__.py
|
12133432
| |
lsaffre/lino_book
|
refs/heads/master
|
lino_book/projects/myroles/myroles.py
|
3
|
from lino_xl.lib.xl.user_types import *
from lino.api import dd, rt
from lino_xl.lib.polls.roles import PollsUser
AllPolls = rt.models.polls.AllPolls
AllPolls.required_roles = dd.login_required(PollsUser)
|
bjko/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/port/pulseaudio_sanitizer.py
|
122
|
# Copyright (C) 2010 Google Inc. All rights reserved.
# Copyrigth (C) 2012 Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import os
import subprocess
_log = logging.getLogger(__name__)
# Shared by GTK and EFL for pulseaudio sanitizing before running tests.
class PulseAudioSanitizer:
def unload_pulseaudio_module(self):
# Unload pulseaudio's module-stream-restore, since it remembers
# volume settings from different runs, and could affect
# multimedia tests results
self._pa_module_index = -1
with open(os.devnull, 'w') as devnull:
try:
pactl_process = subprocess.Popen(["pactl", "list", "short", "modules"], stdout=subprocess.PIPE, stderr=devnull)
pactl_process.wait()
except OSError:
# pactl might not be available.
_log.debug('pactl not found. Please install pulseaudio-utils to avoid some potential media test failures.')
return
modules_list = pactl_process.communicate()[0]
for module in modules_list.splitlines():
if module.find("module-stream-restore") >= 0:
# Some pulseaudio-utils versions don't provide
# the index, just an empty string
self._pa_module_index = module.split('\t')[0] or -1
try:
# Since they could provide other stuff (not an index
# nor an empty string, let's make sure this is an int.
if int(self._pa_module_index) != -1:
pactl_process = subprocess.Popen(["pactl", "unload-module", self._pa_module_index])
pactl_process.wait()
if pactl_process.returncode == 0:
_log.debug('Unloaded module-stream-restore successfully')
else:
_log.debug('Unloading module-stream-restore failed')
except ValueError:
# pactl should have returned an index if the module is found
_log.debug('Unable to parse module index. Please check if your pulseaudio-utils version is too old.')
return
def restore_pulseaudio_module(self):
# If pulseaudio's module-stream-restore was previously unloaded,
# restore it back. We shouldn't need extra checks here, since an
# index != -1 here means we successfully unloaded it previously.
if self._pa_module_index != -1:
with open(os.devnull, 'w') as devnull:
pactl_process = subprocess.Popen(["pactl", "load-module", "module-stream-restore"], stdout=devnull, stderr=devnull)
pactl_process.wait()
if pactl_process.returncode == 0:
_log.debug('Restored module-stream-restore successfully')
else:
_log.debug('Restoring module-stream-restore failed')
|
rockfruit/bika.lims
|
refs/heads/master
|
bika/lims/content/contact.py
|
1
|
# -*- coding: utf-8 -*-
#
# This file is part of Bika LIMS
#
# Copyright 2011-2016 by it's authors.
# Some rights reserved. See LICENSE.txt, AUTHORS.txt.
"""The contact person at an organisation.
"""
import types
from Acquisition import aq_base
from Acquisition import aq_inner
from Acquisition import aq_parent
from AccessControl import ClassSecurityInfo
from Products.Archetypes import atapi
from Products.CMFPlone.utils import safe_unicode
from Products.Archetypes.utils import DisplayList
from plone import api
from zope.interface import implements
from bika.lims.utils import isActive
from bika.lims.interfaces import IContact
from bika.lims.content.person import Person
from bika.lims.config import PROJECTNAME
from bika.lims.config import ManageClients
from bika.lims import logger
from bika.lims import bikaMessageFactory as _
ACTIVE_STATES = ["active"]
schema = Person.schema.copy() + atapi.Schema((
atapi.LinesField('PublicationPreference',
vocabulary_factory='bika.lims.vocabularies.CustomPubPrefVocabularyFactory',
schemata='Publication preference',
widget=atapi.MultiSelectionWidget(
label=_("Publication preference"),
)),
atapi.BooleanField('AttachmentsPermitted',
default=False,
schemata='Publication preference',
widget=atapi.BooleanWidget(
label=_("Results attachments permitted"),
description=_(
"File attachments to results, e.g. microscope "
"photos, will be included in emails to recipients "
"if this option is enabled")
)),
atapi.ReferenceField('CCContact',
schemata='Publication preference',
vocabulary='getContacts',
multiValued=1,
allowed_types=('Contact',),
relationship='ContactContact',
widget=atapi.ReferenceWidget(
checkbox_bound=0,
label=_("Contacts to CC"),
)),
))
schema['JobTitle'].schemata = 'default'
schema['Department'].schemata = 'default'
# Don't make title required - it will be computed from the Person's Fullname
schema['title'].required = 0
schema['title'].widget.visible = False
schema.moveField('CCContact', before='AttachmentsPermitted')
class Contact(Person):
"""A Contact of a Client which can be linked to a System User
"""
implements(IContact)
schema = schema
displayContentsTab = False
security = ClassSecurityInfo()
_at_rename_after_creation = True
@classmethod
def getContactByUsername(cls, username):
"""Convenience Classmethod which returns a Contact by a Username
"""
# Check if the User is linked already
pc = api.portal.get_tool("portal_catalog")
contacts = pc(portal_type=cls.portal_type,
getUsername=username)
# No Contact assigned to this username
if len(contacts) == 0:
return None
# Multiple Users assigned, this should never happen
if len(contacts) > 1:
logger.error("User '{}' is bound to multiple Contacts '{}'".format(
username, ",".join(map(lambda c: c.Title, contacts))))
return map(lambda x: x.getObject(), contacts)
# Return the found Contact object
return contacts[0].getObject()
def Title(self):
"""Return the contact's Fullname as title
"""
return safe_unicode(self.getFullname()).encode('utf-8')
def isActive(self):
"""Checks if the Contact is active
"""
wftool = api.portal.get_tool("portal_workflow")
status = wftool.getStatusOf("bika_inactive_workflow", self)
if status and status.get("inactive_state") in ACTIVE_STATES:
logger.debug("Contact '{}' is active".format(self.Title()))
return True
logger.debug("Contact '{}' is deactivated".format(self.Title()))
return False
security.declareProtected(ManageClients, 'getUser')
def getUser(self):
"""Returns the linked Plone User or None
"""
username = self.getUsername()
if not username:
return None
user = api.user.get(userid=username)
return user
security.declareProtected(ManageClients, 'setUser')
def setUser(self, user_or_username):
"""Link the user to the Contact
:returns: True if OK, False if the User could not be linked
:rtype: bool
"""
user = None
userid = None
# Handle User IDs (strings)
if isinstance(user_or_username, types.StringTypes):
userid = user_or_username
user = api.user.get(userid)
# Handle User Objects (MemberData/PloneUser)
if hasattr(user_or_username, "getId"):
userid = user_or_username.getId()
user = user_or_username
# Not a valid user
if user is None:
return False
# Link the User
return self._linkUser(user)
security.declareProtected(ManageClients, 'unlinkUser')
def unlinkUser(self, delete=False):
"""Unlink the user to the Contact
:returns: True if OK, False if no User was unlinked
:rtype: bool
"""
userid = self.getUsername()
user = self.getUser()
if user:
logger.debug("Unlinking User '{}' from Contact '{}'".format(
userid, self.Title()))
# Unlink the User
if not self._unlinkUser():
return False
# Also remove the Plone User (caution)
if delete:
logger.debug("Removing Plone User '{}'".format(userid))
api.user.delete(username=userid)
return True
return False
security.declareProtected(ManageClients, 'hasUser')
def hasUser(self):
"""Check if Contact has a linked a System User
"""
user = self.getUser()
if user is None:
return False
return True
def getContacts(self, dl=True):
pairs = []
objects = []
for contact in self.aq_parent.objectValues('Contact'):
if isActive(contact) and contact.UID() != self.UID():
pairs.append((contact.UID(), contact.Title()))
if not dl:
objects.append(contact)
pairs.sort(lambda x, y: cmp(x[1].lower(), y[1].lower()))
return dl and DisplayList(pairs) or objects
def getParentUID(self):
return self.aq_parent.UID()
def getParent(self):
return aq_parent(aq_inner(self))
def _renameAfterCreation(self, check_auto_id=False):
from bika.lims.idserver import renameAfterCreation
renameAfterCreation(self)
security.declarePrivate('_linkUser')
def _linkUser(self, user):
"""Set the UID of the current Contact in the User properties and update
all relevant own properties.
"""
KEY = "linked_contact_uid"
username = user.getId()
contact = self.getContactByUsername(username)
# User is linked to another contact (fix in UI)
if contact and contact.UID() != self.UID():
raise ValueError("User '{}' is already linked to Contact '{}'".format(
username, contact.Title()))
# User is linked to multiple other contacts (fix in Data)
if isinstance(contact, list):
raise ValueError("User '{}' is linked to multiple Contacts: '{}'".format(
username, ",".join(map(lambda x: x.Title(), contact))))
# XXX: Does it make sense to "remember" the UID as a User property?
tool = user.getTool()
try:
user.getProperty(KEY)
except ValueError:
logger.info("Adding User property {}".format(KEY))
tool.manage_addProperty(KEY, "", "string")
# Set the UID as a User Property
uid = self.UID()
user.setMemberProperties({KEY: uid})
logger.info("Linked Contact UID {} to User {}".format(
user.getProperty(KEY), username))
# Set the Username
self.setUsername(user.getId())
# Update the Email address from the user
self.setEmailAddress(user.getProperty("email"))
# Grant local Owner role
self._addLocalOwnerRole(username)
# Add user to "Clients" group
self._addUserToGroup(username, group="Clients")
# somehow the `getUsername` index gets out of sync
self.reindexObject()
return True
security.declarePrivate('_unlinkUser')
def _unlinkUser(self):
"""Remove the UID of the current Contact in the User properties and
update all relevant own properties.
"""
KEY = "linked_contact_uid"
# Nothing to do if no user is linked
if not self.hasUser():
return False
user = self.getUser()
username = user.getId()
# Unset the UID from the User Property
user.setMemberProperties({KEY: ""})
logger.info("Unlinked Contact UID from User {}".format(user.getProperty(KEY, "")))
# Unset the Username
self.setUsername(None)
# Unset the Email
self.setEmailAddress(None)
# Revoke local Owner role
self._delLocalOwnerRole(username)
# Remove user from "Clients" group
self._delUserFromGroup(username, group="Clients")
# somehow the `getUsername` index gets out of sync
self.reindexObject()
return True
security.declarePrivate('_addUserToGroup')
def _addUserToGroup(self, username, group="Clients"):
"""Add user to the goup
"""
portal_groups = api.portal.get_tool("portal_groups")
group = portal_groups.getGroupById('Clients')
group.addMember(username)
security.declarePrivate('_delUserFromGroup')
def _delUserFromGroup(self, username, group="Clients"):
"""Remove user from the group
"""
portal_groups = api.portal.get_tool("portal_groups")
group = portal_groups.getGroupById(group)
group.removeMember(username)
security.declarePrivate('_addLocalOwnerRole')
def _addLocalOwnerRole(self, username):
"""Add local owner role from parent object
"""
parent = self.getParent()
if parent.portal_type == 'Client':
parent.manage_setLocalRoles(username, ['Owner', ])
if hasattr(parent, 'reindexObjectSecurity'):
parent.reindexObjectSecurity()
security.declarePrivate('_delLocalOwnerRole')
def _delLocalOwnerRole(self, username):
"""Remove local owner role from parent object
"""
parent = self.getParent()
if parent.portal_type == 'Client':
parent.manage_delLocalRoles([ username ])
if hasattr(parent, 'reindexObjectSecurity'):
parent.reindexObjectSecurity()
atapi.registerType(Contact, PROJECTNAME)
|
geekboxzone/lollipop_external_chromium_org
|
refs/heads/geekbox
|
tools/python/google/path_utils.py
|
191
|
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Some utility methods for getting and manipulating paths."""
# TODO(pamg): Have the buildbot use these, too.
import errno
import os
import sys
class PathNotFound(Exception): pass
def ScriptDir():
"""Get the full path to the directory containing the current script."""
script_filename = os.path.abspath(sys.argv[0])
return os.path.dirname(script_filename)
def FindAncestor(start_dir, ancestor):
"""Finds an ancestor dir in a path.
For example, FindAncestor('c:\foo\bar\baz', 'bar') would return
'c:\foo\bar'. Unlike FindUpward*, this only looks at direct path ancestors.
"""
start_dir = os.path.abspath(start_dir)
path = start_dir
while True:
(parent, tail) = os.path.split(path)
if tail == ancestor:
return path
if not tail:
break
path = parent
raise PathNotFound("Unable to find ancestor %s in %s" % (ancestor, start_dir))
def FindUpwardParent(start_dir, *desired_list):
"""Finds the desired object's parent, searching upward from the start_dir.
Searches start_dir and all its parents looking for the desired directory
or file, which may be given in one or more path components. Returns the
first directory in which the top desired path component was found, or raises
PathNotFound if it wasn't.
"""
desired_path = os.path.join(*desired_list)
last_dir = ''
cur_dir = start_dir
found_path = os.path.join(cur_dir, desired_path)
while not os.path.exists(found_path):
last_dir = cur_dir
cur_dir = os.path.dirname(cur_dir)
if last_dir == cur_dir:
raise PathNotFound('Unable to find %s above %s' %
(desired_path, start_dir))
found_path = os.path.join(cur_dir, desired_path)
# Strip the entire original desired path from the end of the one found
# and remove a trailing path separator, if present.
found_path = found_path[:len(found_path) - len(desired_path)]
if found_path.endswith(os.sep):
found_path = found_path[:len(found_path) - 1]
return found_path
def FindUpward(start_dir, *desired_list):
"""Returns a path to the desired directory or file, searching upward.
Searches start_dir and all its parents looking for the desired directory
or file, which may be given in one or more path components. Returns the full
path to the desired object, or raises PathNotFound if it wasn't found.
"""
parent = FindUpwardParent(start_dir, *desired_list)
return os.path.join(parent, *desired_list)
def MaybeMakeDirectory(*path):
"""Creates an entire path, if it doesn't already exist."""
file_path = os.path.join(*path)
try:
os.makedirs(file_path)
except OSError, e:
# errno.EEXIST is "File exists". If we see another error, re-raise.
if e.errno != errno.EEXIST:
raise
|
saaros/kafka-python
|
refs/heads/master
|
test/test_util.py
|
15
|
# -*- coding: utf-8 -*-
import struct
import six
from . import unittest
import kafka.common
import kafka.util
class UtilTest(unittest.TestCase):
@unittest.skip("Unwritten")
def test_relative_unpack(self):
pass
def test_write_int_string(self):
self.assertEqual(
kafka.util.write_int_string(b'some string'),
b'\x00\x00\x00\x0bsome string'
)
def test_write_int_string__unicode(self):
with self.assertRaises(TypeError) as cm:
kafka.util.write_int_string(u'unicode')
#: :type: TypeError
te = cm.exception
if six.PY2:
self.assertIn('unicode', str(te))
else:
self.assertIn('str', str(te))
self.assertIn('to be bytes', str(te))
def test_write_int_string__empty(self):
self.assertEqual(
kafka.util.write_int_string(b''),
b'\x00\x00\x00\x00'
)
def test_write_int_string__null(self):
self.assertEqual(
kafka.util.write_int_string(None),
b'\xff\xff\xff\xff'
)
def test_read_int_string(self):
self.assertEqual(kafka.util.read_int_string(b'\xff\xff\xff\xff', 0), (None, 4))
self.assertEqual(kafka.util.read_int_string(b'\x00\x00\x00\x00', 0), (b'', 4))
self.assertEqual(kafka.util.read_int_string(b'\x00\x00\x00\x0bsome string', 0), (b'some string', 15))
def test_read_int_string__insufficient_data(self):
with self.assertRaises(kafka.common.BufferUnderflowError):
kafka.util.read_int_string(b'\x00\x00\x00\x021', 0)
def test_write_short_string(self):
self.assertEqual(
kafka.util.write_short_string(b'some string'),
b'\x00\x0bsome string'
)
def test_write_short_string__unicode(self):
with self.assertRaises(TypeError) as cm:
kafka.util.write_short_string(u'hello')
#: :type: TypeError
te = cm.exception
if six.PY2:
self.assertIn('unicode', str(te))
else:
self.assertIn('str', str(te))
self.assertIn('to be bytes', str(te))
def test_write_short_string__empty(self):
self.assertEqual(
kafka.util.write_short_string(b''),
b'\x00\x00'
)
def test_write_short_string__null(self):
self.assertEqual(
kafka.util.write_short_string(None),
b'\xff\xff'
)
def test_write_short_string__too_long(self):
with self.assertRaises(struct.error):
kafka.util.write_short_string(b' ' * 33000)
def test_read_short_string(self):
self.assertEqual(kafka.util.read_short_string(b'\xff\xff', 0), (None, 2))
self.assertEqual(kafka.util.read_short_string(b'\x00\x00', 0), (b'', 2))
self.assertEqual(kafka.util.read_short_string(b'\x00\x0bsome string', 0), (b'some string', 13))
def test_read_int_string__insufficient_data2(self):
with self.assertRaises(kafka.common.BufferUnderflowError):
kafka.util.read_int_string('\x00\x021', 0)
def test_relative_unpack2(self):
self.assertEqual(
kafka.util.relative_unpack('>hh', b'\x00\x01\x00\x00\x02', 0),
((1, 0), 4)
)
def test_relative_unpack3(self):
with self.assertRaises(kafka.common.BufferUnderflowError):
kafka.util.relative_unpack('>hh', '\x00', 0)
def test_group_by_topic_and_partition(self):
t = kafka.common.TopicAndPartition
l = [
t("a", 1),
t("a", 2),
t("a", 3),
t("b", 3),
]
self.assertEqual(kafka.util.group_by_topic_and_partition(l), {
"a": {
1: t("a", 1),
2: t("a", 2),
3: t("a", 3),
},
"b": {
3: t("b", 3),
}
})
# should not be able to group duplicate topic-partitions
t1 = t("a", 1)
with self.assertRaises(AssertionError):
kafka.util.group_by_topic_and_partition([t1, t1])
|
Mj258/weiboapi
|
refs/heads/master
|
srapyDemo/tutorial/tutorial/settings.py
|
14
|
# -*- coding: utf-8 -*-
# Scrapy settings for tutorial project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'tutorial'
SPIDER_MODULES = ['tutorial.spiders']
NEWSPIDER_MODULE = 'tutorial.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'tutorial (+http://www.yourdomain.com)'
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS=32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY=3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN=16
#CONCURRENT_REQUESTS_PER_IP=16
# Disable cookies (enabled by default)
#COOKIES_ENABLED=False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED=False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'tutorial.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'tutorial.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'tutorial.pipelines.SomePipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
# NOTE: AutoThrottle will honour the standard settings for concurrency and delay
#AUTOTHROTTLE_ENABLED=True
# The initial download delay
#AUTOTHROTTLE_START_DELAY=5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY=60
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG=False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED=True
#HTTPCACHE_EXPIRATION_SECS=0
#HTTPCACHE_DIR='httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES=[]
#HTTPCACHE_STORAGE='scrapy.extensions.httpcache.FilesystemCacheStorage'
|
sn1k/app_mundial
|
refs/heads/master
|
lib/python2.7/site-packages/django/contrib/comments/views/moderation.py
|
54
|
from django import template
from django.conf import settings
from django.contrib import comments
from django.contrib.auth.decorators import login_required, permission_required
from django.contrib.comments import signals
from django.contrib.comments.views.utils import next_redirect, confirmation_view
from django.shortcuts import get_object_or_404, render_to_response
from django.views.decorators.csrf import csrf_protect
@csrf_protect
@login_required
def flag(request, comment_id, next=None):
"""
Flags a comment. Confirmation on GET, action on POST.
Templates: :template:`comments/flag.html`,
Context:
comment
the flagged `comments.comment` object
"""
comment = get_object_or_404(comments.get_model(), pk=comment_id, site__pk=settings.SITE_ID)
# Flag on POST
if request.method == 'POST':
perform_flag(request, comment)
return next_redirect(request, fallback=next or 'comments-flag-done',
c=comment.pk)
# Render a form on GET
else:
return render_to_response('comments/flag.html',
{'comment': comment, "next": next},
template.RequestContext(request)
)
@csrf_protect
@permission_required("comments.can_moderate")
def delete(request, comment_id, next=None):
"""
Deletes a comment. Confirmation on GET, action on POST. Requires the "can
moderate comments" permission.
Templates: :template:`comments/delete.html`,
Context:
comment
the flagged `comments.comment` object
"""
comment = get_object_or_404(comments.get_model(), pk=comment_id, site__pk=settings.SITE_ID)
# Delete on POST
if request.method == 'POST':
# Flag the comment as deleted instead of actually deleting it.
perform_delete(request, comment)
return next_redirect(request, fallback=next or 'comments-delete-done',
c=comment.pk)
# Render a form on GET
else:
return render_to_response('comments/delete.html',
{'comment': comment, "next": next},
template.RequestContext(request)
)
@csrf_protect
@permission_required("comments.can_moderate")
def approve(request, comment_id, next=None):
"""
Approve a comment (that is, mark it as public and non-removed). Confirmation
on GET, action on POST. Requires the "can moderate comments" permission.
Templates: :template:`comments/approve.html`,
Context:
comment
the `comments.comment` object for approval
"""
comment = get_object_or_404(comments.get_model(), pk=comment_id, site__pk=settings.SITE_ID)
# Delete on POST
if request.method == 'POST':
# Flag the comment as approved.
perform_approve(request, comment)
return next_redirect(request, fallback=next or 'comments-approve-done',
c=comment.pk)
# Render a form on GET
else:
return render_to_response('comments/approve.html',
{'comment': comment, "next": next},
template.RequestContext(request)
)
# The following functions actually perform the various flag/approve/delete
# actions. They've been broken out into separate functions to that they
# may be called from admin actions.
def perform_flag(request, comment):
"""
Actually perform the flagging of a comment from a request.
"""
flag, created = comments.models.CommentFlag.objects.get_or_create(
comment=comment,
user=request.user,
flag=comments.models.CommentFlag.SUGGEST_REMOVAL
)
signals.comment_was_flagged.send(
sender=comment.__class__,
comment=comment,
flag=flag,
created=created,
request=request,
)
def perform_delete(request, comment):
flag, created = comments.models.CommentFlag.objects.get_or_create(
comment=comment,
user=request.user,
flag=comments.models.CommentFlag.MODERATOR_DELETION
)
comment.is_removed = True
comment.save()
signals.comment_was_flagged.send(
sender=comment.__class__,
comment=comment,
flag=flag,
created=created,
request=request,
)
def perform_approve(request, comment):
flag, created = comments.models.CommentFlag.objects.get_or_create(
comment=comment,
user=request.user,
flag=comments.models.CommentFlag.MODERATOR_APPROVAL,
)
comment.is_removed = False
comment.is_public = True
comment.save()
signals.comment_was_flagged.send(
sender=comment.__class__,
comment=comment,
flag=flag,
created=created,
request=request,
)
# Confirmation views.
flag_done = confirmation_view(
template="comments/flagged.html",
doc='Displays a "comment was flagged" success page.'
)
delete_done = confirmation_view(
template="comments/deleted.html",
doc='Displays a "comment was deleted" success page.'
)
approve_done = confirmation_view(
template="comments/approved.html",
doc='Displays a "comment was approved" success page.'
)
|
stylianos-kampakis/scikit-learn
|
refs/heads/master
|
examples/applications/svm_gui.py
|
287
|
"""
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhoer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
from sklearn.externals.six.moves import xrange
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onclick(self, event):
if event.xdata and event.ydata:
if event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
elif event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
|
olivierverdier/pydflatex
|
refs/heads/master
|
test/__init__.py
|
12133432
| |
mdanielwork/intellij-community
|
refs/heads/master
|
python/testData/completion/relativeFromImportInNamespacePackage/nspkg1/nspkg2/foo.py
|
12133432
| |
mjirayu/sit_academy
|
refs/heads/master
|
common/djangoapps/heartbeat/__init__.py
|
12133432
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.