repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
BonexGu/Blik2D-SDK | Blik2D/addon/tensorflow-1.2.1_for_blik/tensorflow/python/training/slot_creator.py | 51 | 7287 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Standard functions for creating slots.
A slot is a `Variable` created with the same shape as a primary variable or
`Tensor`. A slot is always scoped in the namespace of the primary object and
typically has the same device and type.
Slots are typically used as accumulators to track values associated with
the primary object:
```python
# Optimizers can create a slot for each variable to track accumulators
accumulators = {var : create_zeros_slot(var, "momentum") for var in vs}
for var in vs:
apply_momentum(var, accumulators[var], lr, grad, momentum_tensor)
# Slots can also be used for moving averages
mavg = create_slot(var, var.initialized_value(), "exponential_moving_avg")
update_mavg = mavg.assign_sub((mavg - var) * (1 - decay))
```
"""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
def _is_resource(v):
"""Returns true if v is something you get from a resource variable."""
return isinstance(v, resource_variable_ops.ResourceVariable)
def _create_slot_var(primary, val, scope, validate_shape, shape, dtype):
"""Helper function for creating a slot variable."""
# TODO(lukaszkaiser): Consider allowing partitioners to be set in the current
# scope.
current_partitioner = variable_scope.get_variable_scope().partitioner
variable_scope.get_variable_scope().set_partitioner(None)
slot = variable_scope.get_variable(
scope, initializer=val, trainable=False,
use_resource=_is_resource(primary),
shape=shape, dtype=dtype,
validate_shape=validate_shape)
variable_scope.get_variable_scope().set_partitioner(current_partitioner)
# pylint: disable=protected-access
if isinstance(primary, variables.Variable) and primary._save_slice_info:
# Primary is a partitioned variable, so we need to also indicate that
# the slot is a partitioned variable. Slots have the same partitioning
# as their primaries.
# For examples when using AdamOptimizer in linear model, slot.name
# here can be "linear//weights/Adam:0", while primary.op.name is
# "linear//weight". We want to get 'Adam' as real_slot_name, so we
# remove "'linear//weight' + '/'" and ':0'.
real_slot_name = slot.name[len(primary.op.name + "/"):-2]
slice_info = primary._save_slice_info
slot._set_save_slice_info(variables.Variable.SaveSliceInfo(
slice_info.full_name + "/" + real_slot_name,
slice_info.full_shape[:],
slice_info.var_offset[:],
slice_info.var_shape[:]))
# pylint: enable=protected-access
return slot
def create_slot(primary, val, name, colocate_with_primary=True):
"""Create a slot initialized to the given value.
The type of the slot is determined by the given value.
Args:
primary: The primary `Variable` or `Tensor`.
val: A `Tensor` specifying the initial value of the slot.
name: Name to use for the slot variable.
colocate_with_primary: Boolean. If True the slot is located
on the same device as `primary`.
Returns:
A `Variable` object.
"""
# Scope the slot name in the namespace of the primary variable.
# Set "primary.op.name + '/' + name" as default name, so the scope name of
# optimizer can be shared when reuse is True. Meanwhile when reuse is False
# and the same name has been previously used, the scope name will add '_N'
# as suffix for unique identifications.
validate_shape = val.get_shape().is_fully_defined()
with variable_scope.variable_scope(None, primary.op.name + "/" + name):
if colocate_with_primary:
with ops.colocate_with(primary):
return _create_slot_var(primary, val, "", validate_shape, None, None)
else:
return _create_slot_var(primary, val, "", validate_shape, None, None)
def create_slot_with_initializer(primary, initializer, shape, dtype, name,
colocate_with_primary=True):
"""Creates a slot initialized using an `Initializer`.
The type of the slot is determined by the given value.
Args:
primary: The primary `Variable` or `Tensor`.
initializer: An `Initializer`. The initial value of the slot.
shape: Shape of the initial value of the slot.
dtype: Type of the value of the slot.
name: Name to use for the slot variable.
colocate_with_primary: Boolean. If True the slot is located
on the same device as `primary`.
Returns:
A `Variable` object.
"""
# Scope the slot name in the namespace of the primary variable.
# Set "primary.op.name + '/' + name" as default name, so the scope name of
# optimizer can be shared when reuse is True. Meanwhile when reuse is False
# and the same name has been previously used, the scope name will add '_N'
# as suffix for unique identifications.
validate_shape = shape.is_fully_defined()
with variable_scope.variable_scope(None, primary.op.name + "/" + name):
if colocate_with_primary:
with ops.colocate_with(primary):
return _create_slot_var(primary, initializer, "", validate_shape, shape,
dtype)
else:
return _create_slot_var(primary, initializer, "", validate_shape, shape,
dtype)
def create_zeros_slot(primary, name, dtype=None, colocate_with_primary=True):
"""Create a slot initialized to 0 with same shape as the primary object.
Args:
primary: The primary `Variable` or `Tensor`.
name: Name to use for the slot variable.
dtype: Type of the slot variable. Defaults to the type of `primary`.
colocate_with_primary: Boolean. If True the slot is located
on the same device as `primary`.
Returns:
A `Variable` object.
"""
if dtype is None:
dtype = primary.dtype
slot_shape = primary.get_shape()
slot_shape = (slot_shape if slot_shape.is_fully_defined()
else array_ops.shape(primary.initialized_value()))
if slot_shape.is_fully_defined():
initializer = init_ops.zeros_initializer(dtype)
return create_slot_with_initializer(
primary, initializer, slot_shape, dtype, name,
colocate_with_primary=colocate_with_primary)
else:
val = array_ops.zeros(slot_shape, dtype=dtype)
return create_slot(primary, val, name,
colocate_with_primary=colocate_with_primary)
| mit |
dmlc/tvm | tests/python/unittest/test_target_codegen_device.py | 2 | 3264 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm.contrib import utils
import numpy as np
import tvm.testing
@tvm.testing.requires_gpu
def test_large_uint_imm():
value = (1 << 63) + 123
other = tvm.tir.const(3, "uint64")
n = 12
num_thread = 2
A = te.compute((n,), lambda *i: tvm.tir.const(value, "uint64") + other, name="A")
s = te.create_schedule(A.op)
xo, xi = s[A].split(A.op.axis[0], factor=num_thread)
s[A].bind(xi, te.thread_axis("threadIdx.x"))
s[A].bind(xo, te.thread_axis("blockIdx.x"))
def check_target(device):
if not tvm.testing.device_enabled(device):
return
ctx = tvm.context(device, 0)
f = tvm.build(s, [A], device)
# launch the kernel.
a = tvm.nd.empty((n,), dtype=A.dtype, ctx=ctx)
f(a)
assert a.asnumpy()[0] == value + 3
check_target("cuda")
check_target("vulkan")
@tvm.testing.requires_gpu
def test_add_pipeline():
n = te.size_var("n")
A = te.placeholder((n,), name="A")
B = te.placeholder((), name="B")
C = te.compute(A.shape, lambda *i: A(*i) + B(), name="C")
D = te.compute(A.shape, lambda *i: C(*i) + 1, name="D")
s = te.create_schedule(D.op)
# GPU schedule have to split by gridIdx and threadIdx
num_thread = 256
xo, xi = s[C].split(C.op.axis[0], factor=num_thread)
s[C].bind(xi, te.thread_axis("threadIdx.x"))
s[C].bind(xo, te.thread_axis("blockIdx.x"))
xo, xi = s[D].split(D.op.axis[0], factor=num_thread)
s[D].bind(xi, te.thread_axis("threadIdx.x"))
s[D].bind(xo, te.thread_axis("blockIdx.x"))
def check_target(device, host="stackvm"):
if not tvm.testing.device_enabled(device) or not tvm.testing.device_enabled(host):
return
ctx = tvm.context(device, 0)
mhost = tvm.driver.build(s, [A, B, D], target=device, target_host=host)
f = mhost.entry_func
# launch the kernel.
n = 1027
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), ctx)
b = tvm.nd.array(np.random.uniform(size=()).astype(B.dtype), ctx)
d = tvm.nd.array(np.zeros(n, dtype=D.dtype), ctx)
f(a, b, d)
tvm.testing.assert_allclose(d.asnumpy(), a.asnumpy() + b.asnumpy() + 1)
check_target("cuda", host="llvm")
check_target("nvptx", host="llvm")
check_target("vulkan", host="llvm")
check_target("rocm", host="llvm")
if __name__ == "__main__":
test_large_uint_imm()
test_add_pipeline()
| apache-2.0 |
perlygatekeeper/glowing-robot | Project_Euler/15_lattice_paths/find_all_lattice_paths.py | 1 | 1517 | #!/opt/local/bin/python
# Python program to find largest common multiple
import sys
import math
import timeit
import time
def find_lattice_paths(grid_size=2):
grid = [] * (grid_size+1)
for row in range(grid_size+1):
this_row = []
for col in range(grid_size+1):
this_row.append(0)
grid.append(this_row)
# print(type(grid))
# print(type(grid[0]))
# print(type(grid[0][0]))
i = 0
for row in range(len(grid)-1, -1, -1):
# print(row)
for col in range(len(grid[row])-1, -1, -1):
# print(f" {col}")
if (col+1) < grid_size+1:
grid[row][col] += grid[row][col+1]
if (row+1) < grid_size+1:
grid[row][col] += grid[row+1][col]
if grid[row][col] == 0:
grid[row][col] = 1
# print(f"Grid[{row}][{col}] is now {grid[row][col]}")
for row in range(len(grid)):
for col in range(len(grid)):
if grid[row][col] > 9999999:
print(" %7.1e" % (grid[row][col]), end="")
else:
print(" %7d" % (grid[row][col]), end="")
# print("row:%d col:%d -> %6d" % (row, col, grid[row][col]))
print("")
print("")
return (grid[0][0])
start_time = timeit.default_timer()
grid = 4
grid = 20
number_of_paths = find_lattice_paths(grid)
print ("Number of paths from top left, to bottom right is %d and took %f seconds."
% ( number_of_paths, ( timeit.default_timer() - start_time ) ) )
| artistic-2.0 |
hellsgod/hells-Core-N6P | tools/perf/scripts/python/netdev-times.py | 11271 | 15048 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 |
Microsoft/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/pip/_internal/commands/wheel.py | 6 | 6807 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
import os
from pip._internal import cmdoptions
from pip._internal.basecommand import RequirementCommand
from pip._internal.cache import WheelCache
from pip._internal.exceptions import CommandError, PreviousBuildDirError
from pip._internal.operations.prepare import RequirementPreparer
from pip._internal.req import RequirementSet
from pip._internal.resolve import Resolver
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.wheel import WheelBuilder
logger = logging.getLogger(__name__)
class WheelCommand(RequirementCommand):
"""
Build Wheel archives for your requirements and dependencies.
Wheel is a built-package format, and offers the advantage of not
recompiling your software during every install. For more details, see the
wheel docs: https://wheel.readthedocs.io/en/latest/
Requirements: setuptools>=0.8, and wheel.
'pip wheel' uses the bdist_wheel setuptools extension from the wheel
package to build individual wheels.
"""
name = 'wheel'
usage = """
%prog [options] <requirement specifier> ...
%prog [options] -r <requirements file> ...
%prog [options] [-e] <vcs project url> ...
%prog [options] [-e] <local project path> ...
%prog [options] <archive url/path> ..."""
summary = 'Build wheels from your requirements.'
def __init__(self, *args, **kw):
super(WheelCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'-w', '--wheel-dir',
dest='wheel_dir',
metavar='dir',
default=os.curdir,
help=("Build wheels into <dir>, where the default is the "
"current working directory."),
)
cmd_opts.add_option(cmdoptions.no_binary())
cmd_opts.add_option(cmdoptions.only_binary())
cmd_opts.add_option(
'--build-option',
dest='build_options',
metavar='options',
action='append',
help="Extra arguments to be supplied to 'setup.py bdist_wheel'.",
)
cmd_opts.add_option(cmdoptions.no_build_isolation())
cmd_opts.add_option(cmdoptions.constraints())
cmd_opts.add_option(cmdoptions.editable())
cmd_opts.add_option(cmdoptions.requirements())
cmd_opts.add_option(cmdoptions.src())
cmd_opts.add_option(cmdoptions.ignore_requires_python())
cmd_opts.add_option(cmdoptions.no_deps())
cmd_opts.add_option(cmdoptions.build_dir())
cmd_opts.add_option(cmdoptions.progress_bar())
cmd_opts.add_option(
'--global-option',
dest='global_options',
action='append',
metavar='options',
help="Extra global options to be supplied to the setup.py "
"call before the 'bdist_wheel' command.")
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help=("Include pre-release and development versions. By default, "
"pip only finds stable versions."),
)
cmd_opts.add_option(cmdoptions.no_clean())
cmd_opts.add_option(cmdoptions.require_hashes())
index_opts = cmdoptions.make_option_group(
cmdoptions.index_group,
self.parser,
)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def run(self, options, args):
cmdoptions.check_install_build_global(options)
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.debug('Ignoring indexes: %s', ','.join(index_urls))
index_urls = []
if options.build_dir:
options.build_dir = os.path.abspath(options.build_dir)
options.src_dir = os.path.abspath(options.src_dir)
with self._build_session(options) as session:
finder = self._build_package_finder(options, session)
build_delete = (not (options.no_clean or options.build_dir))
wheel_cache = WheelCache(options.cache_dir, options.format_control)
with TempDirectory(
options.build_dir, delete=build_delete, kind="wheel"
) as directory:
requirement_set = RequirementSet(
require_hashes=options.require_hashes,
)
try:
self.populate_requirement_set(
requirement_set, args, options, finder, session,
self.name, wheel_cache
)
preparer = RequirementPreparer(
build_dir=directory.path,
src_dir=options.src_dir,
download_dir=None,
wheel_download_dir=options.wheel_dir,
progress_bar=options.progress_bar,
build_isolation=options.build_isolation,
)
resolver = Resolver(
preparer=preparer,
finder=finder,
session=session,
wheel_cache=wheel_cache,
use_user_site=False,
upgrade_strategy="to-satisfy-only",
force_reinstall=False,
ignore_dependencies=options.ignore_dependencies,
ignore_requires_python=options.ignore_requires_python,
ignore_installed=True,
isolated=options.isolated_mode,
)
resolver.resolve(requirement_set)
# build wheels
wb = WheelBuilder(
finder, preparer, wheel_cache,
build_options=options.build_options or [],
global_options=options.global_options or [],
no_clean=options.no_clean,
)
wheels_built_successfully = wb.build(
requirement_set.requirements.values(), session=session,
)
if not wheels_built_successfully:
raise CommandError(
"Failed to build one or more wheels"
)
except PreviousBuildDirError:
options.no_clean = True
raise
finally:
if not options.no_clean:
requirement_set.cleanup_files()
wheel_cache.cleanup()
| apache-2.0 |
jhawkesworth/ansible | lib/ansible/modules/system/xfs_quota.py | 37 | 12876 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Emmanouil Kampitakis <info@kampitakis.de>
# Copyright: (c) 2018, William Leemans <willie@elaba.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: xfs_quota
short_description: Manage quotas on XFS filesystems
description:
- Configure quotas on XFS filesystems.
- Before using this module /etc/projects and /etc/projid need to be configured.
version_added: "2.8"
author:
- William Leemans (@bushvin)
options:
type:
description:
- The XFS quota type.
type: str
required: true
choices:
- user
- group
- project
name:
description:
- The name of the user, group or project to apply the quota to, if other than default.
type: str
mountpoint:
description:
- The mount point on which to apply the quotas.
type: str
required: true
bhard:
description:
- Hard blocks quota limit.
- This argument supports human readable sizes.
type: str
bsoft:
description:
- Soft blocks quota limit.
- This argument supports human readable sizes.
type: str
ihard:
description:
- Hard inodes quota limit.
type: int
isoft:
description:
- Soft inodes quota limit.
type: int
rtbhard:
description:
- Hard realtime blocks quota limit.
- This argument supports human readable sizes.
type: str
rtbsoft:
description:
- Soft realtime blocks quota limit.
- This argument supports human readable sizes.
type: str
state:
description:
- Whether to apply the limits or remove them.
- When removing limit, they are set to 0, and not quite removed.
type: str
default: present
choices:
- present
- absent
requirements:
- xfsprogs
'''
EXAMPLES = r'''
- name: Set default project soft and hard limit on /opt of 1g
xfs_quota:
type: project
mountpoint: /opt
bsoft: 1g
bhard: 1g
state: present
- name: Remove the default limits on /opt
xfs_quota:
type: project
mountpoint: /opt
state: absent
- name: Set default soft user inode limits on /home of 1024 inodes and hard of 2048
xfs_quota:
type: user
mountpoint: /home
isoft: 1024
ihard: 2048
'''
RETURN = r'''
bhard:
description: the current bhard setting in bytes
returned: always
type: int
sample: 1024
bsoft:
description: the current bsoft setting in bytes
returned: always
type: int
sample: 1024
ihard:
description: the current ihard setting in bytes
returned: always
type: int
sample: 100
isoft:
description: the current isoft setting in bytes
returned: always
type: int
sample: 100
rtbhard:
description: the current rtbhard setting in bytes
returned: always
type: int
sample: 1024
rtbsoft:
description: the current rtbsoft setting in bytes
returned: always
type: int
sample: 1024
'''
import grp
import os
import pwd
from ansible.module_utils.basic import AnsibleModule, human_to_bytes
def main():
module = AnsibleModule(
argument_spec=dict(
bhard=dict(type='str'),
bsoft=dict(type='str'),
ihard=dict(type='int'),
isoft=dict(type='int'),
mountpoint=dict(type='str', required=True),
name=dict(type='str'),
rtbhard=dict(type='str'),
rtbsoft=dict(type='str'),
state=dict(type='str', default='present', choices=['absent', 'present']),
type=dict(type='str', required=True, choices=['group', 'project', 'user'])
),
supports_check_mode=True,
)
quota_type = module.params['type']
name = module.params['name']
mountpoint = module.params['mountpoint']
bhard = module.params['bhard']
bsoft = module.params['bsoft']
ihard = module.params['ihard']
isoft = module.params['isoft']
rtbhard = module.params['rtbhard']
rtbsoft = module.params['rtbsoft']
state = module.params['state']
if bhard is not None:
bhard = human_to_bytes(bhard)
if bsoft is not None:
bsoft = human_to_bytes(bsoft)
if rtbhard is not None:
rtbhard = human_to_bytes(rtbhard)
if rtbsoft is not None:
rtbsoft = human_to_bytes(rtbsoft)
result = dict(
changed=False,
)
if not os.path.ismount(mountpoint):
module.fail_json(msg="Path '%s' is not a mount point" % mountpoint, **result)
mp = get_fs_by_mountpoint(mountpoint)
if mp is None:
module.fail_json(msg="Path '%s' is not a mount point or not located on an xfs file system." % mountpoint, **result)
if quota_type == 'user':
type_arg = '-u'
quota_default = 'root'
if name is None:
name = quota_default
if 'uquota' not in mp['mntopts'] and 'usrquota' not in mp['mntopts'] and 'quota' not in mp['mntopts'] and 'uqnoenforce' not in mp['mntopts'] and \
'qnoenforce' not in mp['mntopts']:
module.fail_json(
msg="Path '%s' is not mounted with the uquota/usrquota/quota/uqnoenforce/qnoenforce option." % mountpoint, **result
)
try:
pwd.getpwnam(name)
except KeyError as e:
module.fail_json(msg="User '%s' does not exist." % name, **result)
elif quota_type == 'group':
type_arg = '-g'
quota_default = 'root'
if name is None:
name = quota_default
if 'gquota' not in mp['mntopts'] and 'grpquota' not in mp['mntopts'] and 'gqnoenforce' not in mp['mntopts']:
module.fail_json(
msg="Path '%s' is not mounted with the gquota/grpquota/gqnoenforce option. (current options: %s)" % (mountpoint, mp['mntopts']), **result
)
try:
grp.getgrnam(name)
except KeyError as e:
module.fail_json(msg="User '%s' does not exist." % name, **result)
elif quota_type == 'project':
type_arg = '-p'
quota_default = '#0'
if name is None:
name = quota_default
if 'pquota' not in mp['mntopts'] and 'prjquota' not in mp['mntopts'] and 'pqnoenforce' not in mp['mntopts']:
module.fail_json(msg="Path '%s' is not mounted with the pquota/prjquota/pqnoenforce option." % mountpoint, **result)
if name != quota_default and not os.path.isfile('/etc/projects'):
module.fail_json(msg="Path '/etc/projects' does not exist.", **result)
if name != quota_default and not os.path.isfile('/etc/projid'):
module.fail_json(msg="Path '/etc/projid' does not exist.", **result)
if name != quota_default and name is not None and get_project_id(name) is None:
module.fail_json(msg="Entry '%s' has not been defined in /etc/projid." % name, **result)
prj_set = True
if name != quota_default:
cmd = 'project %s' % name
rc, stdout, stderr = exec_quota(module, cmd, mountpoint)
if rc != 0:
result['cmd'] = cmd
result['rc'] = rc
result['stdout'] = stdout
result['stderr'] = stderr
module.fail_json(msg='Could not get project state.', **result)
else:
for line in stdout.split('\n'):
if "Project Id '%s' - is not set." in line:
prj_set = False
break
if not prj_set and not module.check_mode:
cmd = 'project -s'
rc, stdout, stderr = exec_quota(module, cmd, mountpoint)
if rc != 0:
result['cmd'] = cmd
result['rc'] = rc
result['stdout'] = stdout
result['stderr'] = stderr
module.fail_json(msg='Could not get quota realtime block report.', **result)
result['changed'] = True
elif not prj_set and module.check_mode:
result['changed'] = True
# Set limits
if state == 'absent':
bhard = 0
bsoft = 0
ihard = 0
isoft = 0
rtbhard = 0
rtbsoft = 0
current_bsoft, current_bhard = quota_report(module, mountpoint, name, quota_type, 'b')
current_isoft, current_ihard = quota_report(module, mountpoint, name, quota_type, 'i')
current_rtbsoft, current_rtbhard = quota_report(module, mountpoint, name, quota_type, 'rtb')
result['xfs_quota'] = dict(
bsoft=current_bsoft,
bhard=current_bhard,
isoft=current_isoft,
ihard=current_ihard,
rtbsoft=current_rtbsoft,
rtbhard=current_rtbhard
)
limit = []
if bsoft is not None and int(bsoft) != current_bsoft:
limit.append('bsoft=%s' % bsoft)
result['bsoft'] = int(bsoft)
if bhard is not None and int(bhard) != current_bhard:
limit.append('bhard=%s' % bhard)
result['bhard'] = int(bhard)
if isoft is not None and isoft != current_isoft:
limit.append('isoft=%s' % isoft)
result['isoft'] = isoft
if ihard is not None and ihard != current_ihard:
limit.append('ihard=%s' % ihard)
result['ihard'] = ihard
if rtbsoft is not None and int(rtbsoft) != current_rtbsoft:
limit.append('rtbsoft=%s' % rtbsoft)
result['rtbsoft'] = int(rtbsoft)
if rtbhard is not None and int(rtbhard) != current_rtbhard:
limit.append('rtbhard=%s' % rtbhard)
result['rtbhard'] = int(rtbhard)
if len(limit) > 0 and not module.check_mode:
if name == quota_default:
cmd = 'limit %s -d %s' % (type_arg, ' '.join(limit))
else:
cmd = 'limit %s %s %s' % (type_arg, ' '.join(limit), name)
rc, stdout, stderr = exec_quota(module, cmd, mountpoint)
if rc != 0:
result['cmd'] = cmd
result['rc'] = rc
result['stdout'] = stdout
result['stderr'] = stderr
module.fail_json(msg='Could not set limits.', **result)
result['changed'] = True
elif len(limit) > 0 and module.check_mode:
result['changed'] = True
module.exit_json(**result)
def quota_report(module, mountpoint, name, quota_type, used_type):
soft = None
hard = None
if quota_type == 'project':
type_arg = '-p'
elif quota_type == 'user':
type_arg = '-u'
elif quota_type == 'group':
type_arg = '-g'
if used_type == 'b':
used_arg = '-b'
used_name = 'blocks'
factor = 1024
elif used_type == 'i':
used_arg = '-i'
used_name = 'inodes'
factor = 1
elif used_type == 'rtb':
used_arg = '-r'
used_name = 'realtime blocks'
factor = 1024
rc, stdout, stderr = exec_quota(module, 'report %s %s' % (type_arg, used_arg), mountpoint)
if rc != 0:
result = dict(
changed=False,
rc=rc,
stdout=stdout,
stderr=stderr,
)
module.fail_json(msg='Could not get quota report for %s.' % used_name, **result)
for line in stdout.split('\n'):
line = line.strip().split()
if len(line) > 3 and line[0] == name:
soft = int(line[2]) * factor
hard = int(line[3]) * factor
break
return soft, hard
def exec_quota(module, cmd, mountpoint):
cmd = ['xfs_quota', '-x', '-c'] + [cmd, mountpoint]
(rc, stdout, stderr) = module.run_command(cmd, use_unsafe_shell=True)
if "XFS_GETQUOTA: Operation not permitted" in stderr.split('\n') or \
rc == 1 and 'xfs_quota: cannot set limits: Operation not permitted' in stderr.split('\n'):
module.fail_json(msg='You need to be root or have CAP_SYS_ADMIN capability to perform this operation')
return rc, stdout, stderr
def get_fs_by_mountpoint(mountpoint):
mpr = None
with open('/proc/mounts', 'r') as s:
for line in s.readlines():
mp = line.strip().split()
if len(mp) == 6 and mp[1] == mountpoint and mp[2] == 'xfs':
mpr = dict(zip(['spec', 'file', 'vfstype', 'mntopts', 'freq', 'passno'], mp))
mpr['mntopts'] = mpr['mntopts'].split(',')
break
return mpr
def get_project_id(name):
prjid = None
with open('/etc/projid', 'r') as s:
for line in s.readlines():
line = line.strip().partition(':')
if line[0] == name:
prjid = line[2]
break
return prjid
if __name__ == '__main__':
main()
| gpl-3.0 |
johnkit/vtk-dev | Filters/Core/Testing/Python/backdrop.py | 78 | 1921 | """this is python equivalent of ./Wrapping/Tcl/vtktesting/backdrop.tcl
This script is used while running python tests translated from Tcl."""
import vtk
basePlane = None
baseMapper = None
base = None
backPlane = None
backMapper = None
back = None
leftPlane = None
leftMapper = None
left = None
def BuildBackdrop (minX, maxX, minY, maxY, minZ, maxZ, thickness):
global basePlane
global baseMapper
global base
global backPlane
global backMapper
global back
global left
global leftPlane
global leftMapper
if not basePlane:
basePlane = vtk.vtkCubeSource()
basePlane.SetCenter( (maxX + minX)/2.0, minY, (maxZ + minZ)/2.0)
basePlane.SetXLength(maxX-minX)
basePlane.SetYLength(thickness)
basePlane.SetZLength(maxZ - minZ)
if not baseMapper:
baseMapper = vtk.vtkPolyDataMapper()
baseMapper.SetInputConnection(basePlane.GetOutputPort())
if not base:
base = vtk.vtkActor()
base.SetMapper(baseMapper)
if not backPlane:
backPlane = vtk.vtkCubeSource()
backPlane.SetCenter( (maxX + minX)/2.0, (maxY + minY)/2.0, minZ)
backPlane.SetXLength(maxX-minX)
backPlane.SetYLength(maxY - minY)
backPlane.SetZLength(thickness)
if not backMapper:
backMapper = vtk.vtkPolyDataMapper()
backMapper.SetInputConnection(backPlane.GetOutputPort())
if not back:
back = vtk.vtkActor()
back.SetMapper(backMapper)
if not leftPlane:
leftPlane = vtk.vtkCubeSource()
leftPlane.SetCenter( minX, (maxY+minY)/2.0, (maxZ+minZ)/2.0)
leftPlane.SetXLength(thickness)
leftPlane.SetYLength(maxY-minY)
leftPlane.SetZLength(maxZ-minZ)
if not leftMapper:
leftMapper = vtk.vtkPolyDataMapper()
leftMapper.SetInputConnection(leftPlane.GetOutputPort())
if not left:
left = vtk.vtkActor()
left.SetMapper(leftMapper)
return [base, back, left]
| bsd-3-clause |
abenzbiria/clients_odoo | addons/sale/wizard/__init__.py | 444 | 1129 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sale_make_invoice
import sale_line_invoice
import sale_make_invoice_advance
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
DualSpark/ansible | v1/ansible/runner/poller.py | 132 | 4480 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import time
from ansible import errors
class AsyncPoller(object):
""" Manage asynchronous jobs. """
def __init__(self, results, runner):
self.runner = runner
self.results = { 'contacted': {}, 'dark': {}}
self.hosts_to_poll = []
self.completed = False
# flag to determine if at least one host was contacted
self.active = False
# True to work with the `and` below
skipped = True
jid = None
for (host, res) in results['contacted'].iteritems():
if res.get('started', False):
self.hosts_to_poll.append(host)
jid = res.get('ansible_job_id', None)
self.runner.vars_cache[host]['ansible_job_id'] = jid
self.active = True
else:
skipped = skipped and res.get('skipped', False)
self.runner.vars_cache[host]['ansible_job_id'] = ''
self.results['contacted'][host] = res
for (host, res) in results['dark'].iteritems():
self.runner.vars_cache[host]['ansible_job_id'] = ''
self.results['dark'][host] = res
if not skipped:
if jid is None:
raise errors.AnsibleError("unexpected error: unable to determine jid")
if len(self.hosts_to_poll)==0:
raise errors.AnsibleError("unexpected error: no hosts to poll")
def poll(self):
""" Poll the job status.
Returns the changes in this iteration."""
self.runner.module_name = 'async_status'
self.runner.module_args = "jid={{ansible_job_id}}"
self.runner.pattern = "*"
self.runner.background = 0
self.runner.complex_args = None
self.runner.inventory.restrict_to(self.hosts_to_poll)
results = self.runner.run()
self.runner.inventory.lift_restriction()
hosts = []
poll_results = { 'contacted': {}, 'dark': {}, 'polled': {}}
for (host, res) in results['contacted'].iteritems():
if res.get('started',False):
hosts.append(host)
poll_results['polled'][host] = res
else:
self.results['contacted'][host] = res
poll_results['contacted'][host] = res
if res.get('failed', False) or res.get('rc', 0) != 0:
self.runner.callbacks.on_async_failed(host, res, self.runner.vars_cache[host]['ansible_job_id'])
else:
self.runner.callbacks.on_async_ok(host, res, self.runner.vars_cache[host]['ansible_job_id'])
for (host, res) in results['dark'].iteritems():
self.results['dark'][host] = res
poll_results['dark'][host] = res
if host in self.hosts_to_poll:
self.runner.callbacks.on_async_failed(host, res, self.runner.vars_cache[host].get('ansible_job_id','XX'))
self.hosts_to_poll = hosts
if len(hosts)==0:
self.completed = True
return poll_results
def wait(self, seconds, poll_interval):
""" Wait a certain time for job completion, check status every poll_interval. """
# jid is None when all hosts were skipped
if not self.active:
return self.results
clock = seconds - poll_interval
while (clock >= 0 and not self.completed):
time.sleep(poll_interval)
poll_results = self.poll()
for (host, res) in poll_results['polled'].iteritems():
if res.get('started'):
self.runner.callbacks.on_async_poll(host, res, self.runner.vars_cache[host]['ansible_job_id'], clock)
clock = clock - poll_interval
return self.results
| gpl-3.0 |
GdZ/scriptfile | software/googleAppEngine/lib/django_1_3/django/contrib/gis/gdal/prototypes/errcheck.py | 404 | 4207 | """
This module houses the error-checking routines used by the GDAL
ctypes prototypes.
"""
from ctypes import c_void_p, string_at
from django.contrib.gis.gdal.error import check_err, OGRException, SRSException
from django.contrib.gis.gdal.libgdal import lgdal
# Helper routines for retrieving pointers and/or values from
# arguments passed in by reference.
def arg_byref(args, offset=-1):
"Returns the pointer argument's by-refernece value."
return args[offset]._obj.value
def ptr_byref(args, offset=-1):
"Returns the pointer argument passed in by-reference."
return args[offset]._obj
def check_bool(result, func, cargs):
"Returns the boolean evaluation of the value."
if bool(result): return True
else: return False
### String checking Routines ###
def check_const_string(result, func, cargs, offset=None):
"""
Similar functionality to `check_string`, but does not free the pointer.
"""
if offset:
check_err(result)
ptr = ptr_byref(cargs, offset)
return ptr.value
else:
return result
def check_string(result, func, cargs, offset=-1, str_result=False):
"""
Checks the string output returned from the given function, and frees
the string pointer allocated by OGR. The `str_result` keyword
may be used when the result is the string pointer, otherwise
the OGR error code is assumed. The `offset` keyword may be used
to extract the string pointer passed in by-reference at the given
slice offset in the function arguments.
"""
if str_result:
# For routines that return a string.
ptr = result
if not ptr: s = None
else: s = string_at(result)
else:
# Error-code return specified.
check_err(result)
ptr = ptr_byref(cargs, offset)
# Getting the string value
s = ptr.value
# Correctly freeing the allocated memory beind GDAL pointer
# w/the VSIFree routine.
if ptr: lgdal.VSIFree(ptr)
return s
### DataSource, Layer error-checking ###
### Envelope checking ###
def check_envelope(result, func, cargs, offset=-1):
"Checks a function that returns an OGR Envelope by reference."
env = ptr_byref(cargs, offset)
return env
### Geometry error-checking routines ###
def check_geom(result, func, cargs):
"Checks a function that returns a geometry."
# OGR_G_Clone may return an integer, even though the
# restype is set to c_void_p
if isinstance(result, (int, long)):
result = c_void_p(result)
if not result:
raise OGRException('Invalid geometry pointer returned from "%s".' % func.__name__)
return result
def check_geom_offset(result, func, cargs, offset=-1):
"Chcks the geometry at the given offset in the C parameter list."
check_err(result)
geom = ptr_byref(cargs, offset=offset)
return check_geom(geom, func, cargs)
### Spatial Reference error-checking routines ###
def check_srs(result, func, cargs):
if isinstance(result, (int, long)):
result = c_void_p(result)
if not result:
raise SRSException('Invalid spatial reference pointer returned from "%s".' % func.__name__)
return result
### Other error-checking routines ###
def check_arg_errcode(result, func, cargs):
"""
The error code is returned in the last argument, by reference.
Check its value with `check_err` before returning the result.
"""
check_err(arg_byref(cargs))
return result
def check_errcode(result, func, cargs):
"""
Check the error code returned (c_int).
"""
check_err(result)
return
def check_pointer(result, func, cargs):
"Makes sure the result pointer is valid."
if isinstance(result, (int, long)):
result = c_void_p(result)
if bool(result):
return result
else:
raise OGRException('Invalid pointer returned from "%s"' % func.__name__)
def check_str_arg(result, func, cargs):
"""
This is for the OSRGet[Angular|Linear]Units functions, which
require that the returned string pointer not be freed. This
returns both the double and tring values.
"""
dbl = result
ptr = cargs[-1]._obj
return dbl, ptr.value
| mit |
MiltosD/CEF-ELRC | metashare/sync/management/__init__.py | 6 | 1699 | import logging
from django.db.models import get_models, signals
from metashare.sync import models as sync_models
from metashare.sync.management.commands.createsyncuser import \
grant_sync_permissions
from metashare import settings
from django.contrib.auth.models import User
# Setup logging support.
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(settings.LOG_HANDLER)
def create_syncuser(app, created_models, verbosity, **kwargs):
from django.core.management import call_command
syncusers = getattr(settings, "SYNC_USERS", {})
for username, password in syncusers.iteritems():
try:
_user = User.objects.get(username=username)
# the sync permissions might have been removed for some reason; make
# sure to restore them
grant_sync_permissions(_user)
# the password may have been changed in the SYNC_USERS dict, so make
# sure to always update it in the database
_user.set_password(password)
LOGGER.info("Reset password for sync user account %s to the "
"SYNC_USERS settings and made sure that appropriate "
"permissions are granted.", username)
_user.save()
except User.DoesNotExist:
call_command("createsyncuser", username=username, password=password, verbosity=1)
# We must make sure that the post_syncdb hooks from auth.management are executed first,
# or else our code cannot find the permissions we want to use:\
from django.contrib.auth import management
signals.post_syncdb.connect(create_syncuser,
sender=sync_models, dispatch_uid = "metashare.sync.management.create_syncuser")
| bsd-3-clause |
markmcclain/astara | akanda/rug/cli/message.py | 2 | 1757 | # Copyright 2014 DreamHost, LLC
#
# Author: DreamHost, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base class for command that sends a message to the rug
"""
import abc
import logging
from cliff import command
from akanda.rug import notifications
class MessageSending(command.Command):
__metaclass__ = abc.ABCMeta
log = logging.getLogger(__name__)
@abc.abstractmethod
def make_message(self, parsed_args):
"""Return a dictionary containing the message contents
"""
return {}
def take_action(self, parsed_args):
self.log.info(
'using amqp at %r exchange %r',
self.app.rug_ini.amqp_url,
self.app.rug_ini.outgoing_notifications_exchange,
)
self.send_message(self.make_message(parsed_args))
def send_message(self, payload):
msg = {
'event_type': 'akanda.rug.command',
'payload': payload,
}
self.log.debug('sending %r', msg)
with notifications.Sender(
amqp_url=self.app.rug_ini.amqp_url,
exchange_name=self.app.rug_ini.outgoing_notifications_exchange,
topic='notifications.info',
) as sender:
sender.send(msg)
| apache-2.0 |
tavendo/AutobahnPython | autobahn/wamp/gen/wamp/proto/PublisherFeatures.py | 3 | 3124 | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: proto
import flatbuffers
class PublisherFeatures(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsPublisherFeatures(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = PublisherFeatures()
x.Init(buf, n + offset)
return x
# PublisherFeatures
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# PublisherFeatures
def PublisherIdentification(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# PublisherFeatures
def PublisherExclusion(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# PublisherFeatures
def SubscriberBlackwhiteListing(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# PublisherFeatures
def AcknowledgeEventReceived(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# PublisherFeatures
def PayloadTransparency(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# PublisherFeatures
def PayloadEncryptionCryptobox(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
def PublisherFeaturesStart(builder): builder.StartObject(6)
def PublisherFeaturesAddPublisherIdentification(builder, publisherIdentification): builder.PrependBoolSlot(0, publisherIdentification, 0)
def PublisherFeaturesAddPublisherExclusion(builder, publisherExclusion): builder.PrependBoolSlot(1, publisherExclusion, 0)
def PublisherFeaturesAddSubscriberBlackwhiteListing(builder, subscriberBlackwhiteListing): builder.PrependBoolSlot(2, subscriberBlackwhiteListing, 0)
def PublisherFeaturesAddAcknowledgeEventReceived(builder, acknowledgeEventReceived): builder.PrependBoolSlot(3, acknowledgeEventReceived, 0)
def PublisherFeaturesAddPayloadTransparency(builder, payloadTransparency): builder.PrependBoolSlot(4, payloadTransparency, 0)
def PublisherFeaturesAddPayloadEncryptionCryptobox(builder, payloadEncryptionCryptobox): builder.PrependBoolSlot(5, payloadEncryptionCryptobox, 0)
def PublisherFeaturesEnd(builder): return builder.EndObject()
| mit |
caslei/TfModels | im2txt/im2txt/ops/image_embedding_test.py | 29 | 5550 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow_models.im2txt.ops.image_embedding."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from im2txt.ops import image_embedding
class InceptionV3Test(tf.test.TestCase):
def setUp(self):
super(InceptionV3Test, self).setUp()
batch_size = 4
height = 299
width = 299
num_channels = 3
self._images = tf.placeholder(tf.float32,
[batch_size, height, width, num_channels])
self._batch_size = batch_size
def _countInceptionParameters(self):
"""Counts the number of parameters in the inception model at top scope."""
counter = {}
for v in tf.global_variables():
name_tokens = v.op.name.split("/")
if name_tokens[0] == "InceptionV3":
name = "InceptionV3/" + name_tokens[1]
num_params = v.get_shape().num_elements()
assert num_params
counter[name] = counter.get(name, 0) + num_params
return counter
def _verifyParameterCounts(self):
"""Verifies the number of parameters in the inception model."""
param_counts = self._countInceptionParameters()
expected_param_counts = {
"InceptionV3/Conv2d_1a_3x3": 960,
"InceptionV3/Conv2d_2a_3x3": 9312,
"InceptionV3/Conv2d_2b_3x3": 18624,
"InceptionV3/Conv2d_3b_1x1": 5360,
"InceptionV3/Conv2d_4a_3x3": 138816,
"InceptionV3/Mixed_5b": 256368,
"InceptionV3/Mixed_5c": 277968,
"InceptionV3/Mixed_5d": 285648,
"InceptionV3/Mixed_6a": 1153920,
"InceptionV3/Mixed_6b": 1298944,
"InceptionV3/Mixed_6c": 1692736,
"InceptionV3/Mixed_6d": 1692736,
"InceptionV3/Mixed_6e": 2143872,
"InceptionV3/Mixed_7a": 1699584,
"InceptionV3/Mixed_7b": 5047872,
"InceptionV3/Mixed_7c": 6080064,
}
self.assertDictEqual(expected_param_counts, param_counts)
def _assertCollectionSize(self, expected_size, collection):
actual_size = len(tf.get_collection(collection))
if expected_size != actual_size:
self.fail("Found %d items in collection %s (expected %d)." %
(actual_size, collection, expected_size))
def testTrainableTrueIsTrainingTrue(self):
embeddings = image_embedding.inception_v3(
self._images, trainable=True, is_training=True)
self.assertEqual([self._batch_size, 2048], embeddings.get_shape().as_list())
self._verifyParameterCounts()
self._assertCollectionSize(376, tf.GraphKeys.GLOBAL_VARIABLES)
self._assertCollectionSize(188, tf.GraphKeys.TRAINABLE_VARIABLES)
self._assertCollectionSize(188, tf.GraphKeys.UPDATE_OPS)
self._assertCollectionSize(94, tf.GraphKeys.REGULARIZATION_LOSSES)
self._assertCollectionSize(0, tf.GraphKeys.LOSSES)
self._assertCollectionSize(23, tf.GraphKeys.SUMMARIES)
def testTrainableTrueIsTrainingFalse(self):
embeddings = image_embedding.inception_v3(
self._images, trainable=True, is_training=False)
self.assertEqual([self._batch_size, 2048], embeddings.get_shape().as_list())
self._verifyParameterCounts()
self._assertCollectionSize(376, tf.GraphKeys.GLOBAL_VARIABLES)
self._assertCollectionSize(188, tf.GraphKeys.TRAINABLE_VARIABLES)
self._assertCollectionSize(0, tf.GraphKeys.UPDATE_OPS)
self._assertCollectionSize(94, tf.GraphKeys.REGULARIZATION_LOSSES)
self._assertCollectionSize(0, tf.GraphKeys.LOSSES)
self._assertCollectionSize(23, tf.GraphKeys.SUMMARIES)
def testTrainableFalseIsTrainingTrue(self):
embeddings = image_embedding.inception_v3(
self._images, trainable=False, is_training=True)
self.assertEqual([self._batch_size, 2048], embeddings.get_shape().as_list())
self._verifyParameterCounts()
self._assertCollectionSize(376, tf.GraphKeys.GLOBAL_VARIABLES)
self._assertCollectionSize(0, tf.GraphKeys.TRAINABLE_VARIABLES)
self._assertCollectionSize(0, tf.GraphKeys.UPDATE_OPS)
self._assertCollectionSize(0, tf.GraphKeys.REGULARIZATION_LOSSES)
self._assertCollectionSize(0, tf.GraphKeys.LOSSES)
self._assertCollectionSize(23, tf.GraphKeys.SUMMARIES)
def testTrainableFalseIsTrainingFalse(self):
embeddings = image_embedding.inception_v3(
self._images, trainable=False, is_training=False)
self.assertEqual([self._batch_size, 2048], embeddings.get_shape().as_list())
self._verifyParameterCounts()
self._assertCollectionSize(376, tf.GraphKeys.GLOBAL_VARIABLES)
self._assertCollectionSize(0, tf.GraphKeys.TRAINABLE_VARIABLES)
self._assertCollectionSize(0, tf.GraphKeys.UPDATE_OPS)
self._assertCollectionSize(0, tf.GraphKeys.REGULARIZATION_LOSSES)
self._assertCollectionSize(0, tf.GraphKeys.LOSSES)
self._assertCollectionSize(23, tf.GraphKeys.SUMMARIES)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
datamade/yournextmp-popit | elections/illinois_state_primary_2016/management/commands/illinois_state_primary_2016_save_geojson.py | 1 | 1221 | import json
from os.path import join, dirname, abspath
from django.core.management.base import BaseCommand
import requests
from elections.models import Election
class Command(BaseCommand):
help = 'Create posts and elections for the 2016 Illinois General Primary'
def handle(self, **options):
elections = Election.objects.prefetch_related('posts').prefetch_related('posts__base')
geojson = {}
for election in elections:
geojson[election.slug] = {
'type': 'FeatureCollection',
'features': [],
}
for post in election.posts.all():
feature = {
'type': 'Feature',
'geometry': json.loads(post.base.area.geom),
'properties': {'label': post.base.label, 'id': post.base.area.name}
}
geojson[election.slug]['features'].append(feature)
output_path = abspath(join(dirname(__file__), '..', '..', 'static'))
for election_slug, geom in geojson.items():
with open('{0}/{1}.geojson'.format(output_path, election_slug), 'w') as f:
f.write(json.dumps(geom))
| agpl-3.0 |
pfmoore/pip | noxfile.py | 1 | 12231 | """Automation using nox.
"""
import glob
import os
import shutil
import sys
from pathlib import Path
from typing import Iterator, List, Tuple
import nox
# fmt: off
sys.path.append(".")
from tools import release # isort:skip # noqa
sys.path.pop()
# fmt: on
nox.options.reuse_existing_virtualenvs = True
nox.options.sessions = ["lint"]
LOCATIONS = {
"common-wheels": "tests/data/common_wheels",
"protected-pip": "tools/tox_pip.py",
}
REQUIREMENTS = {
"docs": "tools/requirements/docs.txt",
"tests": "tools/requirements/tests.txt",
"common-wheels": "tools/requirements/tests-common_wheels.txt",
}
AUTHORS_FILE = "AUTHORS.txt"
VERSION_FILE = "src/pip/__init__.py"
def run_with_protected_pip(session, *arguments):
# type: (nox.Session, *str) -> None
"""Do a session.run("pip", *arguments), using a "protected" pip.
This invokes a wrapper script, that forwards calls to original virtualenv
(stable) version, and not the code being tested. This ensures pip being
used is not the code being tested.
"""
# https://github.com/theacodes/nox/pull/377
env = {"VIRTUAL_ENV": session.virtualenv.location} # type: ignore
command = ("python", LOCATIONS["protected-pip"]) + arguments
session.run(*command, env=env, silent=True)
def should_update_common_wheels():
# type: () -> bool
# If the cache hasn't been created, create it.
if not os.path.exists(LOCATIONS["common-wheels"]):
return True
# If the requirements was updated after cache, we'll repopulate it.
cache_last_populated_at = os.path.getmtime(LOCATIONS["common-wheels"])
requirements_updated_at = os.path.getmtime(REQUIREMENTS["common-wheels"])
need_to_repopulate = requirements_updated_at > cache_last_populated_at
# Clear the stale cache.
if need_to_repopulate:
shutil.rmtree(LOCATIONS["common-wheels"], ignore_errors=True)
return need_to_repopulate
# -----------------------------------------------------------------------------
# Development Commands
# These are currently prototypes to evaluate whether we want to switch over
# completely to nox for all our automation. Contributors should prefer using
# `tox -e ...` until this note is removed.
# -----------------------------------------------------------------------------
@nox.session(python=["3.6", "3.7", "3.8", "3.9", "pypy3"])
def test(session):
# type: (nox.Session) -> None
# Get the common wheels.
if should_update_common_wheels():
# fmt: off
run_with_protected_pip(
session,
"wheel",
"-w", LOCATIONS["common-wheels"],
"-r", REQUIREMENTS["common-wheels"],
)
# fmt: on
else:
msg = f"Re-using existing common-wheels at {LOCATIONS['common-wheels']}."
session.log(msg)
# Build source distribution
# https://github.com/theacodes/nox/pull/377
sdist_dir = os.path.join(session.virtualenv.location, "sdist") # type: ignore
if os.path.exists(sdist_dir):
shutil.rmtree(sdist_dir, ignore_errors=True)
# fmt: off
session.run(
"python", "setup.py", "sdist", "--formats=zip", "--dist-dir", sdist_dir,
silent=True,
)
# fmt: on
generated_files = os.listdir(sdist_dir)
assert len(generated_files) == 1
generated_sdist = os.path.join(sdist_dir, generated_files[0])
# Install source distribution
run_with_protected_pip(session, "install", generated_sdist)
# Install test dependencies
run_with_protected_pip(session, "install", "-r", REQUIREMENTS["tests"])
# Parallelize tests as much as possible, by default.
arguments = session.posargs or ["-n", "auto"]
# Run the tests
# LC_CTYPE is set to get UTF-8 output inside of the subprocesses that our
# tests use.
session.run("pytest", *arguments, env={"LC_CTYPE": "en_US.UTF-8"})
@nox.session
def docs(session):
# type: (nox.Session) -> None
session.install("-e", ".")
session.install("-r", REQUIREMENTS["docs"])
def get_sphinx_build_command(kind):
# type: (str) -> List[str]
# Having the conf.py in the docs/html is weird but needed because we
# can not use a different configuration directory vs source directory
# on RTD currently. So, we'll pass "-c docs/html" here.
# See https://github.com/rtfd/readthedocs.org/issues/1543.
# fmt: off
return [
"sphinx-build",
"-W",
"-c", "docs/html", # see note above
"-d", "docs/build/doctrees/" + kind,
"-b", kind,
"docs/" + kind,
"docs/build/" + kind,
]
# fmt: on
session.run(*get_sphinx_build_command("html"))
session.run(*get_sphinx_build_command("man"))
@nox.session(name="docs-live")
def docs_live(session):
# type: (nox.Session) -> None
session.install("-e", ".")
session.install("-r", REQUIREMENTS["docs"], "sphinx-autobuild")
session.run(
"sphinx-autobuild",
"-d=docs/build/doctrees/livehtml",
"-b=dirhtml",
"docs/html",
"docs/build/livehtml",
*session.posargs,
)
@nox.session
def lint(session):
# type: (nox.Session) -> None
session.install("pre-commit")
if session.posargs:
args = session.posargs + ["--all-files"]
else:
args = ["--all-files", "--show-diff-on-failure"]
session.run("pre-commit", "run", *args)
@nox.session
def vendoring(session):
# type: (nox.Session) -> None
session.install("vendoring>=0.3.0")
if "--upgrade" not in session.posargs:
session.run("vendoring", "sync", ".", "-v")
return
def pinned_requirements(path):
# type: (Path) -> Iterator[Tuple[str, str]]
for line in path.read_text().splitlines(keepends=False):
one, sep, two = line.partition("==")
if not sep:
continue
name = one.strip()
version = two.split("#", 1)[0].strip()
if name and version:
yield name, version
vendor_txt = Path("src/pip/_vendor/vendor.txt")
for name, old_version in pinned_requirements(vendor_txt):
if name == "setuptools":
continue
# update requirements.txt
session.run("vendoring", "update", ".", name)
# get the updated version
new_version = old_version
for inner_name, inner_version in pinned_requirements(vendor_txt):
if inner_name == name:
# this is a dedicated assignment, to make flake8 happy
new_version = inner_version
break
else:
session.error(f"Could not find {name} in {vendor_txt}")
# check if the version changed.
if new_version == old_version:
continue # no change, nothing more to do here.
# synchronize the contents
session.run("vendoring", "sync", ".")
# Determine the correct message
message = f"Upgrade {name} to {new_version}"
# Write our news fragment
news_file = Path("news") / (name + ".vendor.rst")
news_file.write_text(message + "\n") # "\n" appeases end-of-line-fixer
# Commit the changes
release.commit_file(session, ".", message=message)
# -----------------------------------------------------------------------------
# Release Commands
# -----------------------------------------------------------------------------
@nox.session(name="prepare-release")
def prepare_release(session):
# type: (nox.Session) -> None
version = release.get_version_from_arguments(session)
if not version:
session.error("Usage: nox -s prepare-release -- <version>")
session.log("# Ensure nothing is staged")
if release.modified_files_in_git("--staged"):
session.error("There are files staged in git")
session.log(f"# Updating {AUTHORS_FILE}")
release.generate_authors(AUTHORS_FILE)
if release.modified_files_in_git():
release.commit_file(session, AUTHORS_FILE, message=f"Update {AUTHORS_FILE}")
else:
session.log(f"# No changes to {AUTHORS_FILE}")
session.log("# Generating NEWS")
release.generate_news(session, version)
session.log(f"# Bumping for release {version}")
release.update_version_file(version, VERSION_FILE)
release.commit_file(session, VERSION_FILE, message="Bump for release")
session.log("# Tagging release")
release.create_git_tag(session, version, message=f"Release {version}")
session.log("# Bumping for development")
next_dev_version = release.get_next_development_version(version)
release.update_version_file(next_dev_version, VERSION_FILE)
release.commit_file(session, VERSION_FILE, message="Bump for development")
@nox.session(name="build-release")
def build_release(session):
# type: (nox.Session) -> None
version = release.get_version_from_arguments(session)
if not version:
session.error("Usage: nox -s build-release -- YY.N[.P]")
session.log("# Ensure no files in dist/")
if release.have_files_in_folder("dist"):
session.error(
"There are files in dist/. Remove them and try again. "
"You can use `git clean -fxdi -- dist` command to do this"
)
session.log("# Install dependencies")
session.install("setuptools", "wheel", "twine")
with release.isolated_temporary_checkout(session, version) as build_dir:
session.log(
"# Start the build in an isolated, "
f"temporary Git checkout at {build_dir!s}",
)
with release.workdir(session, build_dir):
tmp_dists = build_dists(session)
tmp_dist_paths = (build_dir / p for p in tmp_dists)
session.log(f"# Copying dists from {build_dir}")
os.makedirs("dist", exist_ok=True)
for dist, final in zip(tmp_dist_paths, tmp_dists):
session.log(f"# Copying {dist} to {final}")
shutil.copy(dist, final)
def build_dists(session):
# type: (nox.Session) -> List[str]
"""Return dists with valid metadata."""
session.log(
"# Check if there's any Git-untracked files before building the wheel",
)
has_forbidden_git_untracked_files = any(
# Don't report the environment this session is running in
not untracked_file.startswith(".nox/build-release/")
for untracked_file in release.get_git_untracked_files()
)
if has_forbidden_git_untracked_files:
session.error(
"There are untracked files in the working directory. "
"Remove them and try again",
)
session.log("# Build distributions")
session.run("python", "setup.py", "sdist", "bdist_wheel", silent=True)
produced_dists = glob.glob("dist/*")
session.log(f"# Verify distributions: {', '.join(produced_dists)}")
session.run("twine", "check", *produced_dists, silent=True)
return produced_dists
@nox.session(name="upload-release")
def upload_release(session):
# type: (nox.Session) -> None
version = release.get_version_from_arguments(session)
if not version:
session.error("Usage: nox -s upload-release -- YY.N[.P]")
session.log("# Install dependencies")
session.install("twine")
distribution_files = glob.glob("dist/*")
session.log(f"# Distribution files: {distribution_files}")
# Sanity check: Make sure there's 2 distribution files.
count = len(distribution_files)
if count != 2:
session.error(
f"Expected 2 distribution files for upload, got {count}. "
f"Remove dist/ and run 'nox -s build-release -- {version}'"
)
# Sanity check: Make sure the files are correctly named.
distfile_names = (os.path.basename(fn) for fn in distribution_files)
expected_distribution_files = [
f"pip-{version}-py3-none-any.whl",
f"pip-{version}.tar.gz",
]
if sorted(distfile_names) != sorted(expected_distribution_files):
session.error(f"Distribution files do not seem to be for {version} release.")
session.log("# Upload distributions")
session.run("twine", "upload", *distribution_files)
| mit |
atheiste/django-shop | shop/tests/payment.py | 2 | 4843 | # -*- coding: utf-8 -*-
from decimal import Decimal
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test.testcases import TestCase
from shop.models import (
ExtraOrderItemPriceField,
ExtraOrderPriceField,
LazyOrder,
LazyOrderItem,
LazyAddress,
Country
)
# from shop.payment import pay_on_delivery
from shop.tests.utils.context_managers import SettingsOverride
EXPECTED = (
'A new order was placed!'
' Ref: fakeref| Name: Test item| Price: 100| Q: 1| SubTot: 100|'
' Fake extra field: 10|Tot: 110|'
'Subtotal: 100'
'Fake Taxes: 10'
'Total: 120')
def mock_payment(request, *args, **kwargs):
"""A simple, useless backend."""
return ""
class PayOnDeliveryTestCase(TestCase):
def setUp(self):
self.user = User.objects.create(username="test",
email="test@example.com",
first_name="Test",
last_name="Toto")
self.user.save()
Address = LazyAddress()
self.country = Country.objects.create(name='CH')
self.address = Address()
self.address.client = self.client
self.address.address = 'address'
self.address.address2 = 'address2'
self.address.zip_code = '1234'
self.address.state = 'ZH'
self.address.country = self.country
self.address.is_billing = False
self.address.is_shipping = True
self.address.save()
self.address2 = Address()
self.address2.client = self.client
self.address2.address = '2address'
self.address2.address2 = '2address2'
self.address2.zip_code = '21234'
self.address2.state = '2ZH'
self.address2.country = self.country
self.address2.is_billing = True
self.address2.is_shipping = False
self.address2.save()
# The order fixture
Order = LazyOrder()
self.order = Order()
self.order.user = self.user
self.order.order_subtotal = Decimal('100') # One item worth 100
self.order.order_total = Decimal('120') # plus a test field worth 10
self.order.status = Order.PROCESSING
ship_address = self.address
bill_address = self.address2
self.order.set_shipping_address(ship_address)
self.order.set_billing_address(bill_address)
self.order.save()
# Orderitems
OrderItem = LazyOrderItem()
self.orderitem = OrderItem()
self.orderitem.order = self.order
self.orderitem.product_name = 'Test item'
self.orderitem.unit_price = Decimal("100")
self.orderitem.quantity = 1
self.orderitem.line_subtotal = Decimal('100')
self.orderitem.line_total = Decimal('110')
self.orderitem.save()
eoif = ExtraOrderItemPriceField()
eoif.order_item = self.orderitem
eoif.label = 'Fake extra field'
eoif.value = Decimal("10")
eoif.save()
eof = ExtraOrderPriceField()
eof.order = self.order
eof.label = "Fake Taxes"
eof.value = Decimal("10")
eof.save()
class MockRequest:
user = self.user
self.mock_request = MockRequest()
def test02_must_be_logged_in_if_setting_is_true(self):
with SettingsOverride(SHOP_FORCE_LOGIN=True):
resp = self.client.get(reverse('pay-on-delivery'))
self.assertEqual(resp.status_code, 302)
self.assertTrue('accounts/login/' in resp._headers['location'][1])
def test_order_required_before_payment(self):
""" See issue #84 """
# Session only (no order)
response = self.client.get(reverse('pay-on-delivery'))
self.assertEqual(302, response.status_code)
self.assertEqual('http://testserver/shop/cart/', response._headers['location'][1])
# User logged in (no order)
username = 'user'
pw = 'pass'
User.objects.create_user(username=username, password=pw, email='test@example.com')
logged_in = self.client.login(username=username, password=pw)
self.assertTrue(logged_in)
response = self.client.get(reverse('pay-on-delivery'))
self.assertEqual(302, response.status_code)
self.assertEqual('http://testserver/shop/cart/', response._headers['location'][1])
self.client.logout()
# User logged in and has order
self.user.set_password('blah')
self.user.save()
logged_in = self.client.login(username=self.user.username, password='blah')
self.assertTrue(logged_in)
response = self.client.get(reverse('pay-on-delivery'))
self.assertTrue(reverse('thank_you_for_your_order') in response._headers['location'][1])
| bsd-3-clause |
bcoca/ansible | test/units/plugins/action/test_pause.py | 13 | 2603 | # -*- coding: utf-8 -*-
# Copyright (c) 2021 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import curses
import importlib
import io
import pytest
import sys
from ansible.plugins.action import pause # noqa: F401
from ansible.module_utils.six import PY2
builtin_import = 'builtins.__import__'
if PY2:
builtin_import = '__builtin__.__import__'
def test_pause_curses_tigetstr_none(mocker, monkeypatch):
monkeypatch.delitem(sys.modules, 'ansible.plugins.action.pause')
dunder_import = __import__
def _import(*args, **kwargs):
if args[0] == 'curses':
mock_curses = mocker.Mock()
mock_curses.setupterm = mocker.Mock(return_value=True)
mock_curses.tigetstr = mocker.Mock(return_value=None)
return mock_curses
else:
return dunder_import(*args, **kwargs)
mocker.patch(builtin_import, _import)
mod = importlib.import_module('ansible.plugins.action.pause')
assert mod.HAS_CURSES is True
assert mod.MOVE_TO_BOL == b'\r'
assert mod.CLEAR_TO_EOL == b'\x1b[K'
def test_pause_missing_curses(mocker, monkeypatch):
monkeypatch.delitem(sys.modules, 'ansible.plugins.action.pause')
dunder_import = __import__
def _import(*args, **kwargs):
if args[0] == 'curses':
raise ImportError
else:
return dunder_import(*args, **kwargs)
mocker.patch(builtin_import, _import)
mod = importlib.import_module('ansible.plugins.action.pause')
with pytest.raises(AttributeError):
mod.curses
assert mod.HAS_CURSES is False
assert mod.MOVE_TO_BOL == b'\r'
assert mod.CLEAR_TO_EOL == b'\x1b[K'
@pytest.mark.parametrize('exc', (curses.error, TypeError, io.UnsupportedOperation))
def test_pause_curses_setupterm_error(mocker, monkeypatch, exc):
monkeypatch.delitem(sys.modules, 'ansible.plugins.action.pause')
dunder_import = __import__
def _import(*args, **kwargs):
if args[0] == 'curses':
mock_curses = mocker.Mock()
mock_curses.setupterm = mocker.Mock(side_effect=exc)
mock_curses.error = curses.error
return mock_curses
else:
return dunder_import(*args, **kwargs)
mocker.patch(builtin_import, _import)
mod = importlib.import_module('ansible.plugins.action.pause')
assert mod.HAS_CURSES is False
assert mod.MOVE_TO_BOL == b'\r'
assert mod.CLEAR_TO_EOL == b'\x1b[K'
| gpl-3.0 |
jnerin/ansible | lib/ansible/modules/cloud/centurylink/clc_group.py | 30 | 16779 | #!/usr/bin/python
#
# Copyright (c) 2015 CenturyLink
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: clc_group
short_description: Create/delete Server Groups at Centurylink Cloud
description:
- Create or delete Server Groups at Centurylink Centurylink Cloud
version_added: "2.0"
options:
name:
description:
- The name of the Server Group
required: True
description:
description:
- A description of the Server Group
required: False
parent:
description:
- The parent group of the server group. If parent is not provided, it creates the group at top level.
required: False
location:
description:
- Datacenter to create the group in. If location is not provided, the group gets created in the default datacenter
associated with the account
required: False
state:
description:
- Whether to create or delete the group
default: present
choices: ['present', 'absent']
wait:
description:
- Whether to wait for the tasks to finish before returning.
choices: [ True, False ]
default: True
required: False
requirements:
- python = 2.7
- requests >= 2.5.0
- clc-sdk
author: "CLC Runner (@clc-runner)"
notes:
- To use this module, it is required to set the below environment variables which enables access to the
Centurylink Cloud
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
'''
EXAMPLES = '''
# Create a Server Group
---
- name: Create Server Group
hosts: localhost
gather_facts: False
connection: local
tasks:
- name: Create / Verify a Server Group at CenturyLink Cloud
clc_group:
name: My Cool Server Group
parent: Default Group
state: present
register: clc
- name: debug
debug:
var: clc
# Delete a Server Group
---
- name: Delete Server Group
hosts: localhost
gather_facts: False
connection: local
tasks:
- name: Delete / Verify Absent a Server Group at CenturyLink Cloud
clc_group:
name: My Cool Server Group
parent: Default Group
state: absent
register: clc
- name: debug
debug:
var: clc
'''
RETURN = '''
group:
description: The group information
returned: success
type: dict
sample:
{
"changeInfo":{
"createdBy":"service.wfad",
"createdDate":"2015-07-29T18:52:47Z",
"modifiedBy":"service.wfad",
"modifiedDate":"2015-07-29T18:52:47Z"
},
"customFields":[
],
"description":"test group",
"groups":[
],
"id":"bb5f12a3c6044ae4ad0a03e73ae12cd1",
"links":[
{
"href":"/v2/groups/wfad",
"rel":"createGroup",
"verbs":[
"POST"
]
},
{
"href":"/v2/servers/wfad",
"rel":"createServer",
"verbs":[
"POST"
]
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1",
"rel":"self",
"verbs":[
"GET",
"PATCH",
"DELETE"
]
},
{
"href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0",
"id":"086ac1dfe0b6411989e8d1b77c4065f0",
"rel":"parentGroup"
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/defaults",
"rel":"defaults",
"verbs":[
"GET",
"POST"
]
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/billing",
"rel":"billing"
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/archive",
"rel":"archiveGroupAction"
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/statistics",
"rel":"statistics"
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/upcomingScheduledActivities",
"rel":"upcomingScheduledActivities"
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/horizontalAutoscalePolicy",
"rel":"horizontalAutoscalePolicyMapping",
"verbs":[
"GET",
"PUT",
"DELETE"
]
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/scheduledActivities",
"rel":"scheduledActivities",
"verbs":[
"GET",
"POST"
]
}
],
"locationId":"UC1",
"name":"test group",
"status":"active",
"type":"default"
}
'''
__version__ = '${version}'
import os
from distutils.version import LooseVersion
try:
import requests
except ImportError:
REQUESTS_FOUND = False
else:
REQUESTS_FOUND = True
#
# Requires the clc-python-sdk.
# sudo pip install clc-sdk
#
try:
import clc as clc_sdk
from clc import CLCException
except ImportError:
CLC_FOUND = False
clc_sdk = None
else:
CLC_FOUND = True
from ansible.module_utils.basic import AnsibleModule
class ClcGroup(object):
clc = None
root_group = None
def __init__(self, module):
"""
Construct module
"""
self.clc = clc_sdk
self.module = module
self.group_dict = {}
if not CLC_FOUND:
self.module.fail_json(
msg='clc-python-sdk required for this module')
if not REQUESTS_FOUND:
self.module.fail_json(
msg='requests library is required for this module')
if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
self.module.fail_json(
msg='requests library version should be >= 2.5.0')
self._set_user_agent(self.clc)
def process_request(self):
"""
Execute the main code path, and handle the request
:return: none
"""
location = self.module.params.get('location')
group_name = self.module.params.get('name')
parent_name = self.module.params.get('parent')
group_description = self.module.params.get('description')
state = self.module.params.get('state')
self._set_clc_credentials_from_env()
self.group_dict = self._get_group_tree_for_datacenter(
datacenter=location)
if state == "absent":
changed, group, requests = self._ensure_group_is_absent(
group_name=group_name, parent_name=parent_name)
if requests:
self._wait_for_requests_to_complete(requests)
else:
changed, group = self._ensure_group_is_present(
group_name=group_name, parent_name=parent_name, group_description=group_description)
try:
group = group.data
except AttributeError:
group = group_name
self.module.exit_json(changed=changed, group=group)
@staticmethod
def _define_module_argument_spec():
"""
Define the argument spec for the ansible module
:return: argument spec dictionary
"""
argument_spec = dict(
name=dict(required=True),
description=dict(default=None),
parent=dict(default=None),
location=dict(default=None),
state=dict(default='present', choices=['present', 'absent']),
wait=dict(type='bool', default=True))
return argument_spec
def _set_clc_credentials_from_env(self):
"""
Set the CLC Credentials on the sdk by reading environment variables
:return: none
"""
env = os.environ
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
clc_alias = env.get('CLC_ACCT_ALIAS', False)
api_url = env.get('CLC_V2_API_URL', False)
if api_url:
self.clc.defaults.ENDPOINT_URL_V2 = api_url
if v2_api_token and clc_alias:
self.clc._LOGIN_TOKEN_V2 = v2_api_token
self.clc._V2_ENABLED = True
self.clc.ALIAS = clc_alias
elif v2_api_username and v2_api_passwd:
self.clc.v2.SetCredentials(
api_username=v2_api_username,
api_passwd=v2_api_passwd)
else:
return self.module.fail_json(
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
"environment variables")
def _ensure_group_is_absent(self, group_name, parent_name):
"""
Ensure that group_name is absent by deleting it if necessary
:param group_name: string - the name of the clc server group to delete
:param parent_name: string - the name of the parent group for group_name
:return: changed, group
"""
changed = False
group = []
results = []
if self._group_exists(group_name=group_name, parent_name=parent_name):
if not self.module.check_mode:
group.append(group_name)
result = self._delete_group(group_name)
results.append(result)
changed = True
return changed, group, results
def _delete_group(self, group_name):
"""
Delete the provided server group
:param group_name: string - the server group to delete
:return: none
"""
response = None
group, parent = self.group_dict.get(group_name)
try:
response = group.Delete()
except CLCException as ex:
self.module.fail_json(msg='Failed to delete group :{0}. {1}'.format(
group_name, ex.response_text
))
return response
def _ensure_group_is_present(
self,
group_name,
parent_name,
group_description):
"""
Checks to see if a server group exists, creates it if it doesn't.
:param group_name: the name of the group to validate/create
:param parent_name: the name of the parent group for group_name
:param group_description: a short description of the server group (used when creating)
:return: (changed, group) -
changed: Boolean- whether a change was made,
group: A clc group object for the group
"""
if not self.root_group:
raise AssertionError("Implementation Error: Root Group not set")
parent = parent_name if parent_name is not None else self.root_group.name
description = group_description
changed = False
group = group_name
parent_exists = self._group_exists(group_name=parent, parent_name=None)
child_exists = self._group_exists(
group_name=group_name,
parent_name=parent)
if parent_exists and child_exists:
group, parent = self.group_dict[group_name]
changed = False
elif parent_exists and not child_exists:
if not self.module.check_mode:
group = self._create_group(
group=group,
parent=parent,
description=description)
changed = True
else:
self.module.fail_json(
msg="parent group: " +
parent +
" does not exist")
return changed, group
def _create_group(self, group, parent, description):
"""
Create the provided server group
:param group: clc_sdk.Group - the group to create
:param parent: clc_sdk.Parent - the parent group for {group}
:param description: string - a text description of the group
:return: clc_sdk.Group - the created group
"""
response = None
(parent, grandparent) = self.group_dict[parent]
try:
response = parent.Create(name=group, description=description)
except CLCException as ex:
self.module.fail_json(msg='Failed to create group :{0}. {1}'.format(
group, ex.response_text))
return response
def _group_exists(self, group_name, parent_name):
"""
Check to see if a group exists
:param group_name: string - the group to check
:param parent_name: string - the parent of group_name
:return: boolean - whether the group exists
"""
result = False
if group_name in self.group_dict:
(group, parent) = self.group_dict[group_name]
if parent_name is None or parent_name == parent.name:
result = True
return result
def _get_group_tree_for_datacenter(self, datacenter=None):
"""
Walk the tree of groups for a datacenter
:param datacenter: string - the datacenter to walk (ex: 'UC1')
:return: a dictionary of groups and parents
"""
self.root_group = self.clc.v2.Datacenter(
location=datacenter).RootGroup()
return self._walk_groups_recursive(
parent_group=None,
child_group=self.root_group)
def _walk_groups_recursive(self, parent_group, child_group):
"""
Walk a parent-child tree of groups, starting with the provided child group
:param parent_group: clc_sdk.Group - the parent group to start the walk
:param child_group: clc_sdk.Group - the child group to start the walk
:return: a dictionary of groups and parents
"""
result = {str(child_group): (child_group, parent_group)}
groups = child_group.Subgroups().groups
if len(groups) > 0:
for group in groups:
if group.type != 'default':
continue
result.update(self._walk_groups_recursive(child_group, group))
return result
def _wait_for_requests_to_complete(self, requests_lst):
"""
Waits until the CLC requests are complete if the wait argument is True
:param requests_lst: The list of CLC request objects
:return: none
"""
if not self.module.params['wait']:
return
for request in requests_lst:
request.WaitUntilComplete()
for request_details in request.requests:
if request_details.Status() != 'succeeded':
self.module.fail_json(
msg='Unable to process group request')
@staticmethod
def _set_user_agent(clc):
if hasattr(clc, 'SetRequestsSession'):
agent_string = "ClcAnsibleModule/" + __version__
ses = requests.Session()
ses.headers.update({"Api-Client": agent_string})
ses.headers['User-Agent'] += " " + agent_string
clc.SetRequestsSession(ses)
def main():
"""
The main function. Instantiates the module and calls process_request.
:return: none
"""
module = AnsibleModule(
argument_spec=ClcGroup._define_module_argument_spec(),
supports_check_mode=True)
clc_group = ClcGroup(module)
clc_group.process_request()
if __name__ == '__main__':
main()
| gpl-3.0 |
Juicechuan/AMRParsing | stanfordnlp/unidecode/x0b9.py | 253 | 4704 | data = (
'ruk', # 0x00
'rut', # 0x01
'rup', # 0x02
'ruh', # 0x03
'rweo', # 0x04
'rweog', # 0x05
'rweogg', # 0x06
'rweogs', # 0x07
'rweon', # 0x08
'rweonj', # 0x09
'rweonh', # 0x0a
'rweod', # 0x0b
'rweol', # 0x0c
'rweolg', # 0x0d
'rweolm', # 0x0e
'rweolb', # 0x0f
'rweols', # 0x10
'rweolt', # 0x11
'rweolp', # 0x12
'rweolh', # 0x13
'rweom', # 0x14
'rweob', # 0x15
'rweobs', # 0x16
'rweos', # 0x17
'rweoss', # 0x18
'rweong', # 0x19
'rweoj', # 0x1a
'rweoc', # 0x1b
'rweok', # 0x1c
'rweot', # 0x1d
'rweop', # 0x1e
'rweoh', # 0x1f
'rwe', # 0x20
'rweg', # 0x21
'rwegg', # 0x22
'rwegs', # 0x23
'rwen', # 0x24
'rwenj', # 0x25
'rwenh', # 0x26
'rwed', # 0x27
'rwel', # 0x28
'rwelg', # 0x29
'rwelm', # 0x2a
'rwelb', # 0x2b
'rwels', # 0x2c
'rwelt', # 0x2d
'rwelp', # 0x2e
'rwelh', # 0x2f
'rwem', # 0x30
'rweb', # 0x31
'rwebs', # 0x32
'rwes', # 0x33
'rwess', # 0x34
'rweng', # 0x35
'rwej', # 0x36
'rwec', # 0x37
'rwek', # 0x38
'rwet', # 0x39
'rwep', # 0x3a
'rweh', # 0x3b
'rwi', # 0x3c
'rwig', # 0x3d
'rwigg', # 0x3e
'rwigs', # 0x3f
'rwin', # 0x40
'rwinj', # 0x41
'rwinh', # 0x42
'rwid', # 0x43
'rwil', # 0x44
'rwilg', # 0x45
'rwilm', # 0x46
'rwilb', # 0x47
'rwils', # 0x48
'rwilt', # 0x49
'rwilp', # 0x4a
'rwilh', # 0x4b
'rwim', # 0x4c
'rwib', # 0x4d
'rwibs', # 0x4e
'rwis', # 0x4f
'rwiss', # 0x50
'rwing', # 0x51
'rwij', # 0x52
'rwic', # 0x53
'rwik', # 0x54
'rwit', # 0x55
'rwip', # 0x56
'rwih', # 0x57
'ryu', # 0x58
'ryug', # 0x59
'ryugg', # 0x5a
'ryugs', # 0x5b
'ryun', # 0x5c
'ryunj', # 0x5d
'ryunh', # 0x5e
'ryud', # 0x5f
'ryul', # 0x60
'ryulg', # 0x61
'ryulm', # 0x62
'ryulb', # 0x63
'ryuls', # 0x64
'ryult', # 0x65
'ryulp', # 0x66
'ryulh', # 0x67
'ryum', # 0x68
'ryub', # 0x69
'ryubs', # 0x6a
'ryus', # 0x6b
'ryuss', # 0x6c
'ryung', # 0x6d
'ryuj', # 0x6e
'ryuc', # 0x6f
'ryuk', # 0x70
'ryut', # 0x71
'ryup', # 0x72
'ryuh', # 0x73
'reu', # 0x74
'reug', # 0x75
'reugg', # 0x76
'reugs', # 0x77
'reun', # 0x78
'reunj', # 0x79
'reunh', # 0x7a
'reud', # 0x7b
'reul', # 0x7c
'reulg', # 0x7d
'reulm', # 0x7e
'reulb', # 0x7f
'reuls', # 0x80
'reult', # 0x81
'reulp', # 0x82
'reulh', # 0x83
'reum', # 0x84
'reub', # 0x85
'reubs', # 0x86
'reus', # 0x87
'reuss', # 0x88
'reung', # 0x89
'reuj', # 0x8a
'reuc', # 0x8b
'reuk', # 0x8c
'reut', # 0x8d
'reup', # 0x8e
'reuh', # 0x8f
'ryi', # 0x90
'ryig', # 0x91
'ryigg', # 0x92
'ryigs', # 0x93
'ryin', # 0x94
'ryinj', # 0x95
'ryinh', # 0x96
'ryid', # 0x97
'ryil', # 0x98
'ryilg', # 0x99
'ryilm', # 0x9a
'ryilb', # 0x9b
'ryils', # 0x9c
'ryilt', # 0x9d
'ryilp', # 0x9e
'ryilh', # 0x9f
'ryim', # 0xa0
'ryib', # 0xa1
'ryibs', # 0xa2
'ryis', # 0xa3
'ryiss', # 0xa4
'rying', # 0xa5
'ryij', # 0xa6
'ryic', # 0xa7
'ryik', # 0xa8
'ryit', # 0xa9
'ryip', # 0xaa
'ryih', # 0xab
'ri', # 0xac
'rig', # 0xad
'rigg', # 0xae
'rigs', # 0xaf
'rin', # 0xb0
'rinj', # 0xb1
'rinh', # 0xb2
'rid', # 0xb3
'ril', # 0xb4
'rilg', # 0xb5
'rilm', # 0xb6
'rilb', # 0xb7
'rils', # 0xb8
'rilt', # 0xb9
'rilp', # 0xba
'rilh', # 0xbb
'rim', # 0xbc
'rib', # 0xbd
'ribs', # 0xbe
'ris', # 0xbf
'riss', # 0xc0
'ring', # 0xc1
'rij', # 0xc2
'ric', # 0xc3
'rik', # 0xc4
'rit', # 0xc5
'rip', # 0xc6
'rih', # 0xc7
'ma', # 0xc8
'mag', # 0xc9
'magg', # 0xca
'mags', # 0xcb
'man', # 0xcc
'manj', # 0xcd
'manh', # 0xce
'mad', # 0xcf
'mal', # 0xd0
'malg', # 0xd1
'malm', # 0xd2
'malb', # 0xd3
'mals', # 0xd4
'malt', # 0xd5
'malp', # 0xd6
'malh', # 0xd7
'mam', # 0xd8
'mab', # 0xd9
'mabs', # 0xda
'mas', # 0xdb
'mass', # 0xdc
'mang', # 0xdd
'maj', # 0xde
'mac', # 0xdf
'mak', # 0xe0
'mat', # 0xe1
'map', # 0xe2
'mah', # 0xe3
'mae', # 0xe4
'maeg', # 0xe5
'maegg', # 0xe6
'maegs', # 0xe7
'maen', # 0xe8
'maenj', # 0xe9
'maenh', # 0xea
'maed', # 0xeb
'mael', # 0xec
'maelg', # 0xed
'maelm', # 0xee
'maelb', # 0xef
'maels', # 0xf0
'maelt', # 0xf1
'maelp', # 0xf2
'maelh', # 0xf3
'maem', # 0xf4
'maeb', # 0xf5
'maebs', # 0xf6
'maes', # 0xf7
'maess', # 0xf8
'maeng', # 0xf9
'maej', # 0xfa
'maec', # 0xfb
'maek', # 0xfc
'maet', # 0xfd
'maep', # 0xfe
'maeh', # 0xff
)
| gpl-2.0 |
jiangzhixiao/odoo | addons/l10n_fr/wizard/fr_report_compute_resultant.py | 374 | 2312 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2008 JAILLET Simon - CrysaLEAD - www.crysalead.fr
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
from openerp.osv import fields, osv
class account_cdr_report(osv.osv_memory):
_name = 'account.cdr.report'
_description = 'Account CDR Report'
def _get_defaults(self, cr, uid, context=None):
fiscalyear_id = self.pool.get('account.fiscalyear').find(cr, uid)
return fiscalyear_id
_columns = {
'fiscalyear_id': fields.many2one('account.fiscalyear', 'Fiscal Year', required=True),
}
_defaults = {
'fiscalyear_id': _get_defaults
}
def print_cdr_report(self, cr, uid, ids, context=None):
active_ids = context.get('active_ids', [])
data = {}
data['form'] = {}
data['ids'] = active_ids
data['form']['fiscalyear_id'] = self.browse(cr, uid, ids)[0].fiscalyear_id.id
return self.pool['report'].get_action(
cr, uid, ids, 'l10n_fr.report_l10nfrresultat', data=data, context=context
)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
popazerty/bnigma2 | lib/python/Plugins/Extensions/GraphMultiEPG/plugin.py | 3 | 4564 | from Plugins.Plugin import PluginDescriptor
from GraphMultiEpg import GraphMultiEPG
from Screens.ChannelSelection import BouquetSelector
from enigma import eServiceCenter, eServiceReference
from ServiceReference import ServiceReference
from Screens.Screen import Screen
from Components.ActionMap import ActionMap
from Components.MenuList import MenuList
Session = None
Servicelist = None
bouquetSel = None
epg_bouquet = None
epg = None
class SelectBouquet(Screen):
skin = """<screen name="SelectBouquet" position="center,center" size="300,240" title="Choose bouquet">
<widget name="menu" position="10,10" size="290,225" scrollbarMode="showOnDemand" />
</screen>"""
def __init__(self, session, bouquets, curbouquet, direction, enableWrapAround=True):
Screen.__init__(self, session)
self["actions"] = ActionMap(["OkCancelActions", "EPGSelectActions"],
{
"ok": self.okbuttonClick,
"cancel": self.cancelClick,
"nextBouquet": self.up,
"prevBouquet": self.down
})
entrys = [ (x[0], x[1]) for x in bouquets ]
self["menu"] = MenuList(entrys, enableWrapAround)
idx = 0
for x in bouquets:
if x[1] == curbouquet:
break
idx += 1
self.idx = idx
self.dir = direction
self.onShow.append(self.__onShow)
def __onShow(self):
self["menu"].moveToIndex(self.idx)
if self.dir == -1:
self.down()
else:
self.up()
def getCurrent(self):
cur = self["menu"].getCurrent()
return cur and cur[1]
def okbuttonClick(self):
self.close(self.getCurrent())
def up(self):
self["menu"].up()
def down(self):
self["menu"].down()
def cancelClick(self):
self.close(None)
def zapToService(service, preview = False, zapback = False):
if Servicelist.startServiceRef is None:
Servicelist.startServiceRef = Session.nav.getCurrentlyPlayingServiceReference()
if not service is None:
if not preview and not zapback:
if Servicelist.getRoot() != epg_bouquet:
Servicelist.clearPath()
if Servicelist.bouquet_root != epg_bouquet:
Servicelist.enterPath(Servicelist.bouquet_root)
Servicelist.enterPath(epg_bouquet)
Servicelist.setCurrentSelection(service)
if not zapback or preview:
Servicelist.zap(not preview, preview)
if (Servicelist.dopipzap or zapback) and not preview:
Servicelist.zapBack()
if not preview:
Servicelist.startServiceRef = None
Servicelist.startRoot = None
def getBouquetServices(bouquet):
services = [ ]
Servicelist = eServiceCenter.getInstance().list(bouquet)
if not Servicelist is None:
while True:
service = Servicelist.getNext()
if not service.valid(): #check if end of list
break
if service.flags & (eServiceReference.isDirectory | eServiceReference.isMarker): #ignore non playable services
continue
services.append(ServiceReference(service))
return services
def cleanup():
global Session
Session = None
global Servicelist
Servicelist = None
global bouquets
bouquets = None
global epg_bouquet
epg_bouquet = None
global epg
epg = None
def closed(ret=False):
cleanup()
def onSelectBouquetClose(bouquet):
if not bouquet is None:
services = getBouquetServices(bouquet)
if len(services):
global epg_bouquet
epg_bouquet = bouquet
epg.setServices(services)
epg.setTitle(ServiceReference(epg_bouquet).getServiceName())
def changeBouquetCB(direction, epgcall):
global epg
epg = epgcall
Session.openWithCallback(onSelectBouquetClose, SelectBouquet, bouquets, epg_bouquet, direction)
def main(session, servicelist = None, **kwargs):
global Session
Session = session
global Servicelist
Servicelist = servicelist
global bouquets
bouquets = Servicelist and Servicelist.getBouquetList()
global epg_bouquet
epg_bouquet = Servicelist and Servicelist.getRoot()
runGraphMultiEpg()
def runGraphMultiEpg():
global Servicelist
global bouquets
global epg_bouquet
if epg_bouquet is not None:
if len(bouquets) > 1 :
cb = changeBouquetCB
else:
cb = None
services = getBouquetServices(epg_bouquet)
Session.openWithCallback(reopen, GraphMultiEPG, services, zapToService, cb, ServiceReference(epg_bouquet).getServiceName())
def reopen(answer):
if answer is None:
runGraphMultiEpg()
else:
closed(answer)
def Plugins(**kwargs):
name = _("Graphical Multi EPG")
descr = _("A graphical EPG for all services of an specific bouquet")
return [PluginDescriptor(name=name, description=descr, where = PluginDescriptor.WHERE_EVENTINFO, needsRestart = False, fnc=main),
PluginDescriptor(name=name, description=descr, where = PluginDescriptor.WHERE_EXTENSIONSMENU, needsRestart = False, fnc=main)]
| gpl-2.0 |
albertomurillo/ansible | lib/ansible/modules/source_control/github_webhook_facts.py | 22 | 5087 | #!/usr/bin/python
#
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: github_webhook_facts
short_description: Query information about GitHub webhooks
version_added: "2.8"
description:
- "Query information about GitHub webhooks"
requirements:
- "PyGithub >= 1.3.5"
options:
repository:
description:
- Full name of the repository to configure a hook for
required: true
aliases:
- repo
user:
description:
- User to authenticate to GitHub as
required: true
password:
description:
- Password to authenticate to GitHub with
required: false
token:
description:
- Token to authenticate to GitHub with
required: false
github_url:
description:
- Base URL of the github api
required: false
default: https://api.github.com
author:
- "Chris St. Pierre (@stpierre)"
'''
EXAMPLES = '''
- name: list hooks for a repository (password auth)
github_webhook_facts:
repository: ansible/ansible
user: "{{ github_user }}"
password: "{{ github_password }}"
register: ansible_webhooks
- name: list hooks for a repository on GitHub Enterprise (token auth)
github_webhook_facts:
repository: myorg/myrepo
user: "{{ github_user }}"
token: "{{ github_user_api_token }}"
github_url: https://github.example.com/api/v3/
register: myrepo_webhooks
'''
RETURN = '''
---
hooks:
description: A list of hooks that exist for the repo
returned: always
type: list
sample: >
[{"has_shared_secret": true,
"url": "https://jenkins.example.com/ghprbhook/",
"events": ["issue_comment", "pull_request"],
"insecure_ssl": "1",
"content_type": "json",
"active": true,
"id": 6206,
"last_response": {"status": "active", "message": "OK", "code": 200}}]
'''
import traceback
GITHUB_IMP_ERR = None
try:
import github
HAS_GITHUB = True
except ImportError:
GITHUB_IMP_ERR = traceback.format_exc()
HAS_GITHUB = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native
def _munge_hook(hook_obj):
retval = {
"active": hook_obj.active,
"events": hook_obj.events,
"id": hook_obj.id,
"url": hook_obj.url,
}
retval.update(hook_obj.config)
retval["has_shared_secret"] = "secret" in retval
if "secret" in retval:
del retval["secret"]
retval["last_response"] = hook_obj.last_response.raw_data
return retval
def main():
module = AnsibleModule(
argument_spec=dict(
repository=dict(type='str', required=True, aliases=["repo"]),
user=dict(type='str', required=True),
password=dict(type='str', required=False, no_log=True),
token=dict(type='str', required=False, no_log=True),
github_url=dict(
type='str', required=False, default="https://api.github.com")),
mutually_exclusive=(('password', 'token'), ),
required_one_of=(("password", "token"), ),
supports_check_mode=True)
if not HAS_GITHUB:
module.fail_json(msg=missing_required_lib('PyGithub'),
exception=GITHUB_IMP_ERR)
try:
github_conn = github.Github(
module.params["user"],
module.params.get("password") or module.params.get("token"),
base_url=module.params["github_url"])
except github.GithubException as err:
module.fail_json(msg="Could not connect to GitHub at %s: %s" % (
module.params["github_url"], to_native(err)))
try:
repo = github_conn.get_repo(module.params["repository"])
except github.BadCredentialsException as err:
module.fail_json(msg="Could not authenticate to GitHub at %s: %s" % (
module.params["github_url"], to_native(err)))
except github.UnknownObjectException as err:
module.fail_json(
msg="Could not find repository %s in GitHub at %s: %s" % (
module.params["repository"], module.params["github_url"],
to_native(err)))
except Exception as err:
module.fail_json(
msg="Could not fetch repository %s from GitHub at %s: %s" %
(module.params["repository"], module.params["github_url"],
to_native(err)),
exception=traceback.format_exc())
try:
hooks = [_munge_hook(h) for h in repo.get_hooks()]
except github.GithubException as err:
module.fail_json(
msg="Unable to get hooks from repository %s: %s" %
(module.params["repository"], to_native(err)),
exception=traceback.format_exc())
module.exit_json(changed=False, hooks=hooks)
if __name__ == '__main__':
main()
| gpl-3.0 |
jkorell/PTVS | Python/Tests/TestData/DebuggerProject/UnhandledException1_v25.py | 18 | 1621 | try: raise Exception() # does not break
except: print('handled 1')
try: raise Exception() # does not break
except Exception: print('handled 2')
try: raise Exception() # does not break
except Exception, e: print('handled 4')
try: raise Exception() # does not break
except BaseException: print('handled 5')
try: raise Exception() # does not break
except BaseException, e: print('handled 7')
try: raise Exception() # does not break
except (Exception,): print('handled 8')
try: raise Exception() # does not break
except (Exception,), e: print('handled 10')
try: raise Exception() # does not break
except (Exception, ValueError): print('handled 11')
try: raise Exception() # does not break
except (Exception, ValueError), e: print('handled 13')
try: raise ValueError() # does not break
except: print('handled 14')
try: raise ValueError() # does not break
except Exception: print('handled 15')
try: raise ValueError() # does not break
except Exception, e: print('handled 17')
try: raise ValueError() # does not break
except BaseException: print('handled 18')
try: raise ValueError() # does not break
except BaseException, e: print('handled 20')
try: raise ValueError() # does not break
except (Exception,): print('handled 21')
try: raise ValueError() # does not break
except (Exception,), e: print('handled 23')
try: raise ValueError() # does not break
except (Exception, ValueError): print('handled 24')
try: raise ValueError() # does not break
except (Exception, ValueError), e: print('handled 26')
raise Exception() # breaks
| apache-2.0 |
swiftstack/swift | test/functional/__init__.py | 2 | 51999 | # Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import mock
import os
import six
from six.moves.urllib.parse import urlparse, urlsplit, urlunsplit
import sys
import pickle
import socket
import locale
import eventlet
import eventlet.debug
import functools
import random
import base64
from time import time, sleep
from contextlib import closing
from gzip import GzipFile
from shutil import rmtree
from tempfile import mkdtemp
from six.moves.configparser import ConfigParser, NoSectionError
from six.moves import http_client
from six.moves.http_client import HTTPException
from swift.common.middleware.memcache import MemcacheMiddleware
from swift.common.storage_policy import parse_storage_policies, PolicyError
from swift.common.utils import set_swift_dir
from test import get_config, listen_zero
from test.unit import debug_logger, FakeMemcache
# importing skip_if_no_xattrs so that functional tests can grab it from the
# test.functional namespace. Importing SkipTest so this works under both
# nose and testr test runners.
from test.unit import skip_if_no_xattrs as real_skip_if_no_xattrs
from test.unit import SkipTest
from swift.common import constraints, utils, ring, storage_policy
from swift.common.ring import Ring
from swift.common.wsgi import loadapp, SwiftHttpProtocol
from swift.common.utils import config_true_value, split_path
from swift.account import server as account_server
from swift.container import server as container_server
from swift.obj import server as object_server, mem_server as mem_object_server
import swift.proxy.controllers.obj
http_client._MAXHEADERS = constraints.MAX_HEADER_COUNT
DEBUG = True
# In order to get the proper blocking behavior of sockets without using
# threads, where we can set an arbitrary timeout for some piece of code under
# test, we use eventlet with the standard socket library patched. We have to
# perform this setup at module import time, since all the socket module
# bindings in the swiftclient code will have been made by the time nose
# invokes the package or class setup methods.
eventlet.hubs.use_hub(utils.get_hub())
eventlet.patcher.monkey_patch(all=False, socket=True)
eventlet.debug.hub_exceptions(False)
# swift_test_client import from swiftclient, so move after the monkey-patching
from test.functional.swift_test_client import Account, Connection, Container, \
ResponseError
from swiftclient import get_auth, http_connection
has_insecure = False
try:
from swiftclient import __version__ as client_version
# Prevent a ValueError in StrictVersion with '2.0.3.68.ga99c2ff'
client_version = '.'.join(client_version.split('.')[:3])
except ImportError:
# Pre-PBR we had version, not __version__. Anyhow...
client_version = '1.2'
from distutils.version import StrictVersion
if StrictVersion(client_version) >= StrictVersion('2.0'):
has_insecure = True
config = {}
web_front_end = None
normalized_urls = None
# If no config was read, we will fall back to old school env vars
swift_test_auth_version = None
swift_test_auth = os.environ.get('SWIFT_TEST_AUTH')
swift_test_user = [os.environ.get('SWIFT_TEST_USER'), None, None, '', '', '']
swift_test_key = [os.environ.get('SWIFT_TEST_KEY'), None, None, '', '', '']
swift_test_tenant = ['', '', '', '', '', '']
swift_test_perm = ['', '', '', '', '', '']
swift_test_domain = ['', '', '', '', '', '']
swift_test_user_id = ['', '', '', '', '', '']
swift_test_tenant_id = ['', '', '', '', '', '']
skip, skip2, skip3, skip_if_not_v3, skip_service_tokens, \
skip_if_no_reseller_admin = False, False, False, False, False, False
orig_collate = ''
insecure = False
in_process = False
_testdir = _test_servers = _test_coros = _test_socks = None
policy_specified = None
skip_if_no_xattrs = None
class FakeMemcacheMiddleware(MemcacheMiddleware):
"""
Caching middleware that fakes out caching in swift if memcached
does not appear to be running.
"""
def __init__(self, app, conf):
super(FakeMemcacheMiddleware, self).__init__(app, conf)
self.memcache = FakeMemcache()
class InProcessException(BaseException):
pass
def _info(msg):
print(msg, file=sys.stderr)
def _debug(msg):
if DEBUG:
_info('DEBUG: ' + msg)
def _in_process_setup_swift_conf(swift_conf_src, testdir):
# override swift.conf contents for in-process functional test runs
conf = ConfigParser()
conf.read(swift_conf_src)
try:
section = 'swift-hash'
conf.set(section, 'swift_hash_path_suffix', 'inprocfunctests')
conf.set(section, 'swift_hash_path_prefix', 'inprocfunctests')
section = 'swift-constraints'
max_file_size = (8 * 1024 * 1024) + 2 # 8 MB + 2
conf.set(section, 'max_file_size', str(max_file_size))
except NoSectionError:
msg = 'Conf file %s is missing section %s' % (swift_conf_src, section)
raise InProcessException(msg)
test_conf_file = os.path.join(testdir, 'swift.conf')
with open(test_conf_file, 'w') as fp:
conf.write(fp)
return test_conf_file
def _in_process_find_conf_file(conf_src_dir, conf_file_name, use_sample=True):
"""
Look for a file first in conf_src_dir, if it exists, otherwise optionally
look in the source tree sample 'etc' dir.
:param conf_src_dir: Directory in which to search first for conf file. May
be None
:param conf_file_name: Name of conf file
:param use_sample: If True and the conf_file_name is not found, then return
any sample conf file found in the source tree sample
'etc' dir by appending '-sample' to conf_file_name
:returns: Path to conf file
:raises InProcessException: If no conf file is found
"""
dflt_src_dir = os.path.normpath(os.path.join(os.path.abspath(__file__),
os.pardir, os.pardir, os.pardir,
'etc'))
conf_src_dir = dflt_src_dir if conf_src_dir is None else conf_src_dir
conf_file_path = os.path.join(conf_src_dir, conf_file_name)
if os.path.exists(conf_file_path):
return conf_file_path
if use_sample:
# fall back to using the corresponding sample conf file
conf_file_name += '-sample'
conf_file_path = os.path.join(dflt_src_dir, conf_file_name)
if os.path.exists(conf_file_path):
return conf_file_path
msg = 'Failed to find config file %s' % conf_file_name
raise InProcessException(msg)
def _in_process_setup_ring(swift_conf, conf_src_dir, testdir):
"""
If SWIFT_TEST_POLICY is set:
- look in swift.conf file for specified policy
- move this to be policy-0 but preserving its options
- copy its ring file to test dir, changing its devices to suit
in process testing, and renaming it to suit policy-0
Otherwise, create a default ring file.
"""
conf = ConfigParser()
conf.read(swift_conf)
sp_prefix = 'storage-policy:'
try:
# policy index 0 will be created if no policy exists in conf
policies = parse_storage_policies(conf)
except PolicyError as e:
raise InProcessException(e)
# clear all policies from test swift.conf before adding test policy back
for policy in policies:
conf.remove_section(sp_prefix + str(policy.idx))
if policy_specified:
policy_to_test = policies.get_by_name(policy_specified)
if policy_to_test is None:
raise InProcessException('Failed to find policy name "%s"'
% policy_specified)
_info('Using specified policy %s' % policy_to_test.name)
else:
policy_to_test = policies.default
_info('Defaulting to policy %s' % policy_to_test.name)
# make policy_to_test be policy index 0 and default for the test config
sp_zero_section = sp_prefix + '0'
conf.add_section(sp_zero_section)
for (k, v) in policy_to_test.get_info(config=True).items():
conf.set(sp_zero_section, k, str(v))
conf.set(sp_zero_section, 'default', 'True')
with open(swift_conf, 'w') as fp:
conf.write(fp)
# look for a source ring file
ring_file_src = ring_file_test = 'object.ring.gz'
if policy_to_test.idx:
ring_file_src = 'object-%s.ring.gz' % policy_to_test.idx
try:
ring_file_src = _in_process_find_conf_file(conf_src_dir, ring_file_src,
use_sample=False)
except InProcessException:
if policy_specified:
raise InProcessException('Failed to find ring file %s'
% ring_file_src)
ring_file_src = None
ring_file_test = os.path.join(testdir, ring_file_test)
if ring_file_src:
# copy source ring file to a policy-0 test ring file, re-homing servers
_info('Using source ring file %s' % ring_file_src)
ring_data = ring.RingData.load(ring_file_src)
obj_sockets = []
for dev in ring_data.devs:
device = 'sd%c1' % chr(len(obj_sockets) + ord('a'))
utils.mkdirs(os.path.join(_testdir, 'sda1'))
utils.mkdirs(os.path.join(_testdir, 'sda1', 'tmp'))
obj_socket = listen_zero()
obj_sockets.append(obj_socket)
dev['port'] = obj_socket.getsockname()[1]
dev['ip'] = '127.0.0.1'
dev['device'] = device
dev['replication_port'] = dev['port']
dev['replication_ip'] = dev['ip']
ring_data.save(ring_file_test)
else:
# make default test ring, 3 replicas, 4 partitions, 3 devices
# which will work for a replication policy or a 2+1 EC policy
_info('No source object ring file, creating 3rep/4part/3dev ring')
obj_sockets = [listen_zero() for _ in (0, 1, 2)]
replica2part2dev_id = [[0, 1, 2, 0],
[1, 2, 0, 1],
[2, 0, 1, 2]]
devs = [{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
'port': obj_sockets[0].getsockname()[1]},
{'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
'port': obj_sockets[1].getsockname()[1]},
{'id': 2, 'zone': 2, 'device': 'sdc1', 'ip': '127.0.0.1',
'port': obj_sockets[2].getsockname()[1]}]
ring_data = ring.RingData(replica2part2dev_id, devs, 30)
with closing(GzipFile(ring_file_test, 'wb')) as f:
pickle.dump(ring_data, f)
for dev in ring_data.devs:
_debug('Ring file dev: %s' % dev)
return obj_sockets
def _load_encryption(proxy_conf_file, swift_conf_file, **kwargs):
"""
Load encryption configuration and override proxy-server.conf contents.
:param proxy_conf_file: Source proxy conf filename
:param swift_conf_file: Source swift conf filename
:returns: Tuple of paths to the proxy conf file and swift conf file to use
:raises InProcessException: raised if proxy conf contents are invalid
"""
_debug('Setting configuration for encryption')
# The global conf dict cannot be used to modify the pipeline.
# The pipeline loader requires the pipeline to be set in the local_conf.
# If pipeline is set in the global conf dict (which in turn populates the
# DEFAULTS options) then it prevents pipeline being loaded into the local
# conf during wsgi load_app.
# Therefore we must modify the [pipeline:main] section.
conf = ConfigParser()
conf.read(proxy_conf_file)
try:
section = 'pipeline:main'
pipeline = conf.get(section, 'pipeline')
pipeline = pipeline.replace(
"proxy-logging proxy-server",
"keymaster encryption proxy-logging proxy-server")
pipeline = pipeline.replace(
"cache listing_formats",
"cache etag-quoter listing_formats")
conf.set(section, 'pipeline', pipeline)
root_secret = base64.b64encode(os.urandom(32))
if not six.PY2:
root_secret = root_secret.decode('ascii')
conf.set('filter:keymaster', 'encryption_root_secret', root_secret)
conf.set('filter:versioned_writes', 'allow_object_versioning', 'true')
conf.set('filter:etag-quoter', 'enable_by_default', 'true')
except NoSectionError as err:
msg = 'Error problem with proxy conf file %s: %s' % \
(proxy_conf_file, err)
raise InProcessException(msg)
test_conf_file = os.path.join(_testdir, 'proxy-server.conf')
with open(test_conf_file, 'w') as fp:
conf.write(fp)
return test_conf_file, swift_conf_file
def _load_ec_as_default_policy(proxy_conf_file, swift_conf_file, **kwargs):
"""
Override swift.conf [storage-policy:0] section to use a 2+1 EC policy.
:param proxy_conf_file: Source proxy conf filename
:param swift_conf_file: Source swift conf filename
:returns: Tuple of paths to the proxy conf file and swift conf file to use
"""
_debug('Setting configuration for default EC policy')
conf = ConfigParser()
conf.read(swift_conf_file)
# remove existing policy sections that came with swift.conf-sample
for section in list(conf.sections()):
if section.startswith('storage-policy'):
conf.remove_section(section)
# add new policy 0 section for an EC policy
conf.add_section('storage-policy:0')
ec_policy_spec = {
'name': 'ec-test',
'policy_type': 'erasure_coding',
'ec_type': 'liberasurecode_rs_vand',
'ec_num_data_fragments': 2,
'ec_num_parity_fragments': 1,
'ec_object_segment_size': 1048576,
'default': True
}
for k, v in ec_policy_spec.items():
conf.set('storage-policy:0', k, str(v))
with open(swift_conf_file, 'w') as fp:
conf.write(fp)
return proxy_conf_file, swift_conf_file
def _load_domain_remap_staticweb(proxy_conf_file, swift_conf_file, **kwargs):
"""
Load domain_remap and staticweb into proxy server pipeline.
:param proxy_conf_file: Source proxy conf filename
:param swift_conf_file: Source swift conf filename
:returns: Tuple of paths to the proxy conf file and swift conf file to use
:raises InProcessException: raised if proxy conf contents are invalid
"""
_debug('Setting configuration for domain_remap')
# add a domain_remap storage_domain to the test configuration
storage_domain = 'example.net'
global config
config['storage_domain'] = storage_domain
# The global conf dict cannot be used to modify the pipeline.
# The pipeline loader requires the pipeline to be set in the local_conf.
# If pipeline is set in the global conf dict (which in turn populates the
# DEFAULTS options) then it prevents pipeline being loaded into the local
# conf during wsgi load_app.
# Therefore we must modify the [pipeline:main] section.
conf = ConfigParser()
conf.read(proxy_conf_file)
try:
section = 'pipeline:main'
old_pipeline = conf.get(section, 'pipeline')
pipeline = old_pipeline.replace(
" tempauth ",
" tempauth staticweb ")
pipeline = pipeline.replace(
" listing_formats ",
" domain_remap listing_formats ")
if pipeline == old_pipeline:
raise InProcessException(
"Failed to insert domain_remap and staticweb into pipeline: %s"
% old_pipeline)
conf.set(section, 'pipeline', pipeline)
# set storage_domain in domain_remap middleware to match test config
section = 'filter:domain_remap'
conf.set(section, 'storage_domain', storage_domain)
except NoSectionError as err:
msg = 'Error problem with proxy conf file %s: %s' % \
(proxy_conf_file, err)
raise InProcessException(msg)
test_conf_file = os.path.join(_testdir, 'proxy-server.conf')
with open(test_conf_file, 'w') as fp:
conf.write(fp)
return test_conf_file, swift_conf_file
def _load_s3api(proxy_conf_file, swift_conf_file, **kwargs):
"""
Load s3api configuration and override proxy-server.conf contents.
:param proxy_conf_file: Source proxy conf filename
:param swift_conf_file: Source swift conf filename
:returns: Tuple of paths to the proxy conf file and swift conf file to use
:raises InProcessException: raised if proxy conf contents are invalid
"""
_debug('Setting configuration for s3api')
# The global conf dict cannot be used to modify the pipeline.
# The pipeline loader requires the pipeline to be set in the local_conf.
# If pipeline is set in the global conf dict (which in turn populates the
# DEFAULTS options) then it prevents pipeline being loaded into the local
# conf during wsgi load_app.
# Therefore we must modify the [pipeline:main] section.
conf = ConfigParser()
conf.read(proxy_conf_file)
try:
section = 'pipeline:main'
pipeline = conf.get(section, 'pipeline')
pipeline = pipeline.replace(
"tempauth",
"s3api tempauth")
conf.set(section, 'pipeline', pipeline)
conf.set('filter:s3api', 's3_acl', 'true')
conf.set('filter:versioned_writes', 'allow_object_versioning', 'true')
except NoSectionError as err:
msg = 'Error problem with proxy conf file %s: %s' % \
(proxy_conf_file, err)
raise InProcessException(msg)
test_conf_file = os.path.join(_testdir, 'proxy-server.conf')
with open(test_conf_file, 'w') as fp:
conf.write(fp)
return test_conf_file, swift_conf_file
# Mapping from possible values of the variable
# SWIFT_TEST_IN_PROCESS_CONF_LOADER
# to the method to call for loading the associated configuration
# The expected signature for these methods is:
# conf_filename_to_use loader(input_conf_filename, **kwargs)
conf_loaders = {
'encryption': _load_encryption,
'ec': _load_ec_as_default_policy,
}
def in_process_setup(the_object_server=object_server):
_info('IN-PROCESS SERVERS IN USE FOR FUNCTIONAL TESTS')
_info('Using object_server class: %s' % the_object_server.__name__)
conf_src_dir = os.environ.get('SWIFT_TEST_IN_PROCESS_CONF_DIR')
show_debug_logs = os.environ.get('SWIFT_TEST_DEBUG_LOGS')
if conf_src_dir is not None:
if not os.path.isdir(conf_src_dir):
msg = 'Config source %s is not a dir' % conf_src_dir
raise InProcessException(msg)
_info('Using config source dir: %s' % conf_src_dir)
# If SWIFT_TEST_IN_PROCESS_CONF specifies a config source dir then
# prefer config files from there, otherwise read config from source tree
# sample files. A mixture of files from the two sources is allowed.
proxy_conf = _in_process_find_conf_file(conf_src_dir, 'proxy-server.conf')
_info('Using proxy config from %s' % proxy_conf)
swift_conf_src = _in_process_find_conf_file(conf_src_dir, 'swift.conf')
_info('Using swift config from %s' % swift_conf_src)
global _testdir
_testdir = os.path.join(mkdtemp(), 'tmp_functional')
utils.mkdirs(_testdir)
rmtree(_testdir)
utils.mkdirs(os.path.join(_testdir, 'sda1'))
utils.mkdirs(os.path.join(_testdir, 'sda1', 'tmp'))
utils.mkdirs(os.path.join(_testdir, 'sdb1'))
utils.mkdirs(os.path.join(_testdir, 'sdb1', 'tmp'))
utils.mkdirs(os.path.join(_testdir, 'sdc1'))
utils.mkdirs(os.path.join(_testdir, 'sdc1', 'tmp'))
swift_conf = _in_process_setup_swift_conf(swift_conf_src, _testdir)
_info('prepared swift.conf: %s' % swift_conf)
# load s3api and staticweb configs
proxy_conf, swift_conf = _load_s3api(proxy_conf, swift_conf)
proxy_conf, swift_conf = _load_domain_remap_staticweb(proxy_conf,
swift_conf)
# Call the associated method for the value of
# 'SWIFT_TEST_IN_PROCESS_CONF_LOADER', if one exists
conf_loader_label = os.environ.get(
'SWIFT_TEST_IN_PROCESS_CONF_LOADER')
if conf_loader_label is not None:
try:
conf_loader = conf_loaders[conf_loader_label]
_debug('Calling method %s mapped to conf loader %s' %
(conf_loader.__name__, conf_loader_label))
except KeyError as missing_key:
raise InProcessException('No function mapped for conf loader %s' %
missing_key)
try:
# Pass-in proxy_conf, swift_conf files
proxy_conf, swift_conf = conf_loader(proxy_conf, swift_conf)
_debug('Now using proxy conf %s' % proxy_conf)
_debug('Now using swift conf %s' % swift_conf)
except Exception as err: # noqa
raise InProcessException(err)
obj_sockets = _in_process_setup_ring(swift_conf, conf_src_dir, _testdir)
# load new swift.conf file
if set_swift_dir(os.path.dirname(swift_conf)):
constraints.reload_constraints()
storage_policy.reload_storage_policies()
global config
if constraints.SWIFT_CONSTRAINTS_LOADED:
# Use the swift constraints that are loaded for the test framework
# configuration
_c = dict((k, str(v))
for k, v in constraints.EFFECTIVE_CONSTRAINTS.items())
config.update(_c)
else:
# In-process swift constraints were not loaded, somethings wrong
raise SkipTest
global _test_socks
_test_socks = []
# We create the proxy server listening socket to get its port number so
# that we can add it as the "auth_port" value for the functional test
# clients.
prolis = listen_zero()
_test_socks.append(prolis)
# The following set of configuration values is used both for the
# functional test frame work and for the various proxy, account, container
# and object servers.
config.update({
# Values needed by the various in-process swift servers
'devices': _testdir,
'swift_dir': _testdir,
'mount_check': 'false',
'client_timeout': '4',
'container_update_timeout': '3',
'allow_account_management': 'true',
'account_autocreate': 'true',
'allow_versions': 'True',
'allow_versioned_writes': 'True',
# Below are values used by the functional test framework, as well as
# by the various in-process swift servers
'auth_uri': 'http://127.0.0.1:%d/auth/v1.0/' % prolis.getsockname()[1],
's3_storage_url': 'http://%s:%d/' % prolis.getsockname(),
# Primary functional test account (needs admin access to the
# account)
'account': 'test',
'username': 'tester',
'password': 'testing',
's3_access_key': 'test:tester',
's3_secret_key': 'testing',
# Secondary user of the primary test account (needs admin access
# to the account) for s3api
's3_access_key2': 'test:tester2',
's3_secret_key2': 'testing2',
# User on a second account (needs admin access to the account)
'account2': 'test2',
'username2': 'tester2',
'password2': 'testing2',
# User on same account as first, but without admin access
'username3': 'tester3',
'password3': 'testing3',
's3_access_key3': 'test:tester3',
's3_secret_key3': 'testing3',
# Service user and prefix (emulates glance, cinder, etc. user)
'account5': 'test5',
'username5': 'tester5',
'password5': 'testing5',
'service_prefix': 'SERVICE',
# For tempauth middleware. Update reseller_prefix
'reseller_prefix': 'AUTH, SERVICE',
'SERVICE_require_group': 'service',
# Reseller admin user (needs reseller_admin_role)
'account6': 'test6',
'username6': 'tester6',
'password6': 'testing6'
})
acc1lis = listen_zero()
acc2lis = listen_zero()
con1lis = listen_zero()
con2lis = listen_zero()
_test_socks += [acc1lis, acc2lis, con1lis, con2lis] + obj_sockets
account_ring_path = os.path.join(_testdir, 'account.ring.gz')
with closing(GzipFile(account_ring_path, 'wb')) as f:
pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
[{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
'port': acc1lis.getsockname()[1]},
{'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
'port': acc2lis.getsockname()[1]}], 30),
f)
container_ring_path = os.path.join(_testdir, 'container.ring.gz')
with closing(GzipFile(container_ring_path, 'wb')) as f:
pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
[{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
'port': con1lis.getsockname()[1]},
{'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
'port': con2lis.getsockname()[1]}], 30),
f)
def get_logger_name(name):
if show_debug_logs:
return debug_logger(name)
else:
return None
acc1srv = account_server.AccountController(
config, logger=get_logger_name('acct1'))
acc2srv = account_server.AccountController(
config, logger=get_logger_name('acct2'))
con1srv = container_server.ContainerController(
config, logger=get_logger_name('cont1'))
con2srv = container_server.ContainerController(
config, logger=get_logger_name('cont2'))
objsrvs = [
(obj_sockets[index],
the_object_server.ObjectController(
config, logger=get_logger_name('obj%d' % (index + 1))))
for index in range(len(obj_sockets))
]
if show_debug_logs:
logger = get_logger_name('proxy')
else:
logger = utils.get_logger(config, 'wsgi-server', log_route='wsgi')
def get_logger(name, *args, **kwargs):
return logger
with mock.patch('swift.common.utils.get_logger', get_logger):
with mock.patch('swift.common.middleware.memcache.MemcacheMiddleware',
FakeMemcacheMiddleware):
try:
app = loadapp(proxy_conf, global_conf=config)
except Exception as e:
raise InProcessException(e)
nl = utils.NullLogger()
global proxy_srv
proxy_srv = prolis
prospa = eventlet.spawn(eventlet.wsgi.server, prolis, app, nl,
protocol=SwiftHttpProtocol)
acc1spa = eventlet.spawn(eventlet.wsgi.server, acc1lis, acc1srv, nl,
protocol=SwiftHttpProtocol)
acc2spa = eventlet.spawn(eventlet.wsgi.server, acc2lis, acc2srv, nl,
protocol=SwiftHttpProtocol)
con1spa = eventlet.spawn(eventlet.wsgi.server, con1lis, con1srv, nl,
protocol=SwiftHttpProtocol)
con2spa = eventlet.spawn(eventlet.wsgi.server, con2lis, con2srv, nl,
protocol=SwiftHttpProtocol)
objspa = [eventlet.spawn(eventlet.wsgi.server, objsrv[0], objsrv[1], nl,
protocol=SwiftHttpProtocol)
for objsrv in objsrvs]
global _test_coros
_test_coros = \
(prospa, acc1spa, acc2spa, con1spa, con2spa) + tuple(objspa)
# Create accounts "test" and "test2"
def create_account(act):
ts = utils.normalize_timestamp(time())
account_ring = Ring(_testdir, ring_name='account')
partition, nodes = account_ring.get_nodes(act)
for node in nodes:
# Note: we are just using the http_connect method in the object
# controller here to talk to the account server nodes.
conn = swift.proxy.controllers.obj.http_connect(
node['ip'], node['port'], node['device'], partition, 'PUT',
'/' + act, {'X-Timestamp': ts, 'x-trans-id': act})
resp = conn.getresponse()
assert resp.status == 201, 'Unable to create account: %s\n%s' % (
resp.status, resp.read())
create_account('AUTH_test')
create_account('AUTH_test2')
cluster_info = {}
def get_cluster_info():
# The fallback constraints used for testing will come from the current
# effective constraints.
eff_constraints = dict(constraints.EFFECTIVE_CONSTRAINTS)
# We'll update those constraints based on what the /info API provides, if
# anything.
global cluster_info
global config
try:
conn = Connection(config)
conn.authenticate()
cluster_info.update(conn.cluster_info())
except (ResponseError, socket.error, SkipTest):
# Failed to get cluster_information via /info API, so fall back on
# test.conf data
pass
else:
try:
eff_constraints.update(cluster_info['swift'])
except KeyError:
# Most likely the swift cluster has "expose_info = false" set
# in its proxy-server.conf file, so we'll just do the best we
# can.
print("** Swift Cluster not exposing /info **", file=sys.stderr)
# Finally, we'll allow any constraint present in the swift-constraints
# section of test.conf to override everything. Note that only those
# constraints defined in the constraints module are converted to integers.
test_constraints = get_config('swift-constraints')
for k in constraints.DEFAULT_CONSTRAINTS:
try:
test_constraints[k] = int(test_constraints[k])
except KeyError:
pass
except ValueError:
print("Invalid constraint value: %s = %s" % (
k, test_constraints[k]), file=sys.stderr)
eff_constraints.update(test_constraints)
# Just make it look like these constraints were loaded from a /info call,
# even if the /info call failed, or when they are overridden by values
# from the swift-constraints section of test.conf
cluster_info['swift'] = eff_constraints
def setup_package():
global policy_specified
global skip_if_no_xattrs
policy_specified = os.environ.get('SWIFT_TEST_POLICY')
in_process_env = os.environ.get('SWIFT_TEST_IN_PROCESS')
if in_process_env is not None:
use_in_process = utils.config_true_value(in_process_env)
else:
use_in_process = None
global in_process
global config
if use_in_process:
# Explicitly set to True, so barrel on ahead with in-process
# functional test setup.
in_process = True
# NOTE: No attempt is made to a read local test.conf file.
else:
if use_in_process is None:
# Not explicitly set, default to using in-process functional tests
# if the test.conf file is not found, or does not provide a usable
# configuration.
config.update(get_config('func_test'))
if not config:
in_process = True
# else... leave in_process value unchanged. It may be that
# setup_package is called twice, in which case in_process_setup may
# have loaded config before we reach here a second time, so the
# existence of config is not reliable to determine that in_process
# should be False. Anyway, it's default value is False.
else:
# Explicitly set to False, do not attempt to use in-process
# functional tests, be sure we attempt to read from local
# test.conf file.
in_process = False
config.update(get_config('func_test'))
if in_process:
in_mem_obj_env = os.environ.get('SWIFT_TEST_IN_MEMORY_OBJ')
in_mem_obj = utils.config_true_value(in_mem_obj_env)
skip_if_no_xattrs = real_skip_if_no_xattrs
try:
in_process_setup(the_object_server=(
mem_object_server if in_mem_obj else object_server))
except InProcessException as exc:
print(('Exception during in-process setup: %s'
% str(exc)), file=sys.stderr)
raise
else:
skip_if_no_xattrs = lambda: None
global web_front_end
web_front_end = config.get('web_front_end', 'integral')
global normalized_urls
normalized_urls = config.get('normalized_urls', False)
global orig_collate
orig_collate = locale.setlocale(locale.LC_COLLATE)
locale.setlocale(locale.LC_COLLATE, config.get('collate', 'C'))
global insecure
insecure = config_true_value(config.get('insecure', False))
global swift_test_auth_version
global swift_test_auth
global swift_test_user
global swift_test_key
global swift_test_tenant
global swift_test_perm
global swift_test_domain
global swift_test_service_prefix
swift_test_service_prefix = None
if config:
swift_test_auth_version = str(config.get('auth_version', '1'))
if 'auth_uri' in config:
swift_test_auth = config['auth_uri']
# Back-fill the individual parts -- really, we should just need
# host and port for s3_test_client, and that's only until we
# improve it to take a s3_storage_url option
parsed = urlsplit(config['auth_uri'])
config.update({
'auth_ssl': str(parsed.scheme == 'https'),
'auth_host': parsed.hostname,
'auth_port': str(
parsed.port if parsed.port is not None else
443 if parsed.scheme == 'https' else 80),
'auth_prefix': parsed.path,
})
config.setdefault('s3_storage_url',
urlunsplit(parsed[:2] + ('', None, None)))
elif 'auth_host' in config:
scheme = 'http'
if config_true_value(config.get('auth_ssl', 'no')):
scheme = 'https'
netloc = config['auth_host']
if 'auth_port' in config:
netloc += ':' + config['auth_port']
auth_prefix = config.get('auth_prefix', '/')
if swift_test_auth_version == "1":
auth_prefix += 'v1.0'
config['auth_uri'] = swift_test_auth = urlunsplit(
(scheme, netloc, auth_prefix, None, None))
config.setdefault('s3_storage_url', urlunsplit(
(scheme, netloc, '', None, None)))
# else, neither auth_uri nor auth_host; swift_test_auth will be unset
# and we'll skip everything later
if 'service_prefix' in config:
swift_test_service_prefix = utils.append_underscore(
config['service_prefix'])
if swift_test_auth_version == "1":
try:
if 'account' in config:
swift_test_user[0] = '%(account)s:%(username)s' % config
else:
swift_test_user[0] = '%(username)s' % config
swift_test_key[0] = config['password']
except KeyError:
# bad config, no account/username configured, tests cannot be
# run
pass
try:
swift_test_user[1] = '%s%s' % (
'%s:' % config['account2'] if 'account2' in config else '',
config['username2'])
swift_test_key[1] = config['password2']
except KeyError:
pass # old config, no second account tests can be run
try:
swift_test_user[2] = '%s%s' % (
'%s:' % config['account'] if 'account'
in config else '', config['username3'])
swift_test_key[2] = config['password3']
except KeyError:
pass # old config, no third account tests can be run
try:
swift_test_user[4] = '%s%s' % (
'%s:' % config['account5'], config['username5'])
swift_test_key[4] = config['password5']
swift_test_tenant[4] = config['account5']
except KeyError:
pass # no service token tests can be run
for _ in range(3):
swift_test_perm[_] = swift_test_user[_]
else:
swift_test_user[0] = config['username']
swift_test_tenant[0] = config['account']
swift_test_key[0] = config['password']
if 'domain' in config:
swift_test_domain[0] = config['domain']
swift_test_user[1] = config['username2']
swift_test_tenant[1] = config['account2']
swift_test_key[1] = config['password2']
if 'domain2' in config:
swift_test_domain[1] = config['domain2']
swift_test_user[2] = config['username3']
swift_test_tenant[2] = config['account']
swift_test_key[2] = config['password3']
if 'domain3' in config:
swift_test_domain[2] = config['domain3']
if 'username4' in config:
swift_test_user[3] = config['username4']
swift_test_tenant[3] = config['account4']
swift_test_key[3] = config['password4']
swift_test_domain[3] = config['domain4']
if 'username5' in config:
swift_test_user[4] = config['username5']
swift_test_tenant[4] = config['account5']
swift_test_key[4] = config['password5']
if 'domain5' in config:
swift_test_domain[4] = config['domain5']
if 'username6' in config:
swift_test_user[5] = config['username6']
swift_test_tenant[5] = config['account6']
swift_test_key[5] = config['password6']
if 'domain6' in config:
swift_test_domain[5] = config['domain6']
for _ in range(5):
swift_test_perm[_] = swift_test_tenant[_] + ':' \
+ swift_test_user[_]
global skip
if not skip:
skip = not all([swift_test_auth, swift_test_user[0],
swift_test_key[0]])
if skip:
print('SKIPPING FUNCTIONAL TESTS DUE TO NO CONFIG',
file=sys.stderr)
global skip2
if not skip2:
skip2 = not all([not skip, swift_test_user[1], swift_test_key[1]])
if not skip and skip2:
print('SKIPPING SECOND ACCOUNT FUNCTIONAL TESTS '
'DUE TO NO CONFIG FOR THEM', file=sys.stderr)
global skip3
if not skip3:
skip3 = not all([not skip, swift_test_user[2], swift_test_key[2]])
if not skip and skip3:
print('SKIPPING THIRD ACCOUNT FUNCTIONAL TESTS '
'DUE TO NO CONFIG FOR THEM', file=sys.stderr)
global skip_if_not_v3
if not skip_if_not_v3:
skip_if_not_v3 = (swift_test_auth_version != '3'
or not all([not skip,
swift_test_user[3],
swift_test_key[3]]))
if not skip and skip_if_not_v3:
print('SKIPPING FUNCTIONAL TESTS SPECIFIC TO AUTH VERSION 3',
file=sys.stderr)
global skip_service_tokens
if not skip_service_tokens:
skip_service_tokens = not all([not skip, swift_test_user[4],
swift_test_key[4], swift_test_tenant[4],
swift_test_service_prefix])
if not skip and skip_service_tokens:
print(
'SKIPPING FUNCTIONAL TESTS SPECIFIC TO SERVICE TOKENS',
file=sys.stderr)
if policy_specified:
policies = FunctionalStoragePolicyCollection.from_info()
for p in policies:
# policy names are case-insensitive
if policy_specified.lower() == p['name'].lower():
_info('Using specified policy %s' % policy_specified)
FunctionalStoragePolicyCollection.policy_specified = p
Container.policy_specified = policy_specified
break
else:
_info(
'SKIPPING FUNCTIONAL TESTS: Failed to find specified policy %s'
% policy_specified)
raise Exception('Failed to find specified policy %s'
% policy_specified)
global skip_if_no_reseller_admin
if not skip_if_no_reseller_admin:
skip_if_no_reseller_admin = not all([not skip, swift_test_user[5],
swift_test_key[5],
swift_test_tenant[5]])
if not skip and skip_if_no_reseller_admin:
print('SKIPPING FUNCTIONAL TESTS DUE TO NO CONFIG FOR '
'RESELLER ADMIN', file=sys.stderr)
get_cluster_info()
def teardown_package():
global orig_collate
locale.setlocale(locale.LC_COLLATE, orig_collate)
# clean up containers and objects left behind after running tests
global config
if config:
try:
conn = Connection(config)
conn.authenticate()
account = Account(conn, config.get('account', config['username']))
account.delete_containers()
except (SkipTest):
pass
global in_process
global _test_socks
if in_process:
try:
for i, server in enumerate(_test_coros):
server.kill()
if not server.dead:
# kill it from the socket level
_test_socks[i].close()
except Exception:
pass
try:
rmtree(os.path.dirname(_testdir))
except Exception:
pass
reset_globals()
class AuthError(Exception):
pass
class InternalServerError(Exception):
pass
url = [None, None, None, None, None]
token = [None, None, None, None, None]
service_token = [None, None, None, None, None]
parsed = [None, None, None, None, None]
conn = [None, None, None, None, None]
def reset_globals():
global url, token, service_token, parsed, conn, config
url = [None, None, None, None, None]
token = [None, None, None, None, None]
service_token = [None, None, None, None, None]
parsed = [None, None, None, None, None]
conn = [None, None, None, None, None]
if config:
config = {}
def connection(url):
if has_insecure:
parsed_url, http_conn = http_connection(url, insecure=insecure)
else:
parsed_url, http_conn = http_connection(url)
orig_request = http_conn.request
# Add the policy header if policy_specified is set
def request_with_policy(method, url, body=None, headers={}):
version, account, container, obj = split_path(url, 1, 4, True)
if policy_specified and method == 'PUT' and container and not obj \
and 'X-Storage-Policy' not in headers:
headers['X-Storage-Policy'] = policy_specified
return orig_request(method, url, body, headers)
http_conn.request = request_with_policy
return parsed_url, http_conn
def get_url_token(user_index, os_options):
authargs = dict(snet=False,
tenant_name=swift_test_tenant[user_index],
auth_version=swift_test_auth_version,
os_options=os_options,
insecure=insecure)
url, token = get_auth(swift_test_auth,
swift_test_user[user_index],
swift_test_key[user_index],
**authargs)
if six.PY2 and not isinstance(url, bytes):
url = url.encode('utf-8')
if six.PY2 and not isinstance(token, bytes):
token = token.encode('utf-8')
return url, token
def retry(func, *args, **kwargs):
"""
You can use the kwargs to override:
'retries' (default: 5)
'use_account' (default: 1) - which user's token to pass
'url_account' (default: matches 'use_account') - which user's storage URL
'resource' (default: url[url_account] - URL to connect to; retry()
will interpolate the variable :storage_url: if present
'service_user' - add a service token from this user (1 indexed)
"""
global url, token, service_token, parsed, conn
retries = kwargs.get('retries', 5)
attempts, backoff = 0, 1
# use account #1 by default; turn user's 1-indexed account into 0-indexed
use_account = kwargs.pop('use_account', 1) - 1
service_user = kwargs.pop('service_user', None)
if service_user:
service_user -= 1 # 0-index
# access our own account by default
url_account = kwargs.pop('url_account', use_account + 1) - 1
os_options = {'user_domain_name': swift_test_domain[use_account],
'project_domain_name': swift_test_domain[use_account]}
while attempts <= retries:
auth_failure = False
attempts += 1
try:
if not url[use_account] or not token[use_account]:
url[use_account], token[use_account] = get_url_token(
use_account, os_options)
parsed[use_account] = conn[use_account] = None
if not parsed[use_account] or not conn[use_account]:
parsed[use_account], conn[use_account] = \
connection(url[use_account])
# default resource is the account url[url_account]
resource = kwargs.pop('resource', '%(storage_url)s')
template_vars = {'storage_url': url[url_account]}
parsed_result = urlparse(resource % template_vars)
if isinstance(service_user, int):
if not service_token[service_user]:
dummy, service_token[service_user] = get_url_token(
service_user, os_options)
kwargs['service_token'] = service_token[service_user]
return func(url[url_account], token[use_account],
parsed_result, conn[url_account],
*args, **kwargs)
except (socket.error, HTTPException):
if attempts > retries:
raise
parsed[use_account] = conn[use_account] = None
if service_user:
service_token[service_user] = None
except AuthError:
auth_failure = True
url[use_account] = token[use_account] = None
if service_user:
service_token[service_user] = None
except InternalServerError:
pass
if attempts <= retries:
if not auth_failure:
sleep(backoff)
backoff *= 2
raise Exception('No result after %s retries.' % retries)
def check_response(conn):
resp = conn.getresponse()
if resp.status == 401:
resp.read()
raise AuthError()
elif resp.status // 100 == 5:
resp.read()
raise InternalServerError()
return resp
def load_constraint(name):
global cluster_info
try:
c = cluster_info['swift'][name]
except KeyError:
raise SkipTest("Missing constraint: %s" % name)
if not isinstance(c, int):
raise SkipTest("Bad value, %r, for constraint: %s" % (c, name))
return c
def get_storage_policy_from_cluster_info(info):
policies = info['swift'].get('policies', {})
default_policy = []
non_default_policies = []
for p in policies:
if p.get('default', {}):
default_policy.append(p)
else:
non_default_policies.append(p)
return default_policy, non_default_policies
def reset_acl():
def post(url, token, parsed, conn):
conn.request('POST', parsed.path, '', {
'X-Auth-Token': token,
'X-Account-Access-Control': '{}'
})
return check_response(conn)
resp = retry(post, use_account=1)
resp.read()
def requires_acls(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
global skip, cluster_info
if skip or not cluster_info:
raise SkipTest('Requires account ACLs')
# Determine whether this cluster has account ACLs; if not, skip test
if not cluster_info.get('tempauth', {}).get('account_acls'):
raise SkipTest('Requires account ACLs')
if swift_test_auth_version != '1':
# remove when keystoneauth supports account acls
raise SkipTest('Requires account ACLs')
reset_acl()
try:
rv = f(*args, **kwargs)
finally:
reset_acl()
return rv
return wrapper
class FunctionalStoragePolicyCollection(object):
# policy_specified is set in __init__.py when tests are being set up.
policy_specified = None
def __init__(self, policies):
self._all = policies
self.default = None
for p in self:
if p.get('default', False):
assert self.default is None, 'Found multiple default ' \
'policies %r and %r' % (self.default, p)
self.default = p
@classmethod
def from_info(cls, info=None):
if not (info or cluster_info):
get_cluster_info()
info = info or cluster_info
try:
policy_info = info['swift']['policies']
except KeyError:
raise AssertionError('Did not find any policy info in %r' % info)
policies = cls(policy_info)
assert policies.default, \
'Did not find default policy in %r' % policy_info
return policies
def __len__(self):
return len(self._all)
def __iter__(self):
return iter(self._all)
def __getitem__(self, index):
return self._all[index]
def filter(self, **kwargs):
return self.__class__([p for p in self if all(
p.get(k) == v for k, v in kwargs.items())])
def exclude(self, **kwargs):
return self.__class__([p for p in self if all(
p.get(k) != v for k, v in kwargs.items())])
def select(self):
# check that a policy was specified and that it is available
# in the current list (i.e., hasn't been excluded of the current list)
if self.policy_specified and self.policy_specified in self:
return self.policy_specified
else:
return random.choice(self)
def requires_policies(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
if skip:
raise SkipTest
try:
self.policies = FunctionalStoragePolicyCollection.from_info()
except AssertionError:
raise SkipTest("Unable to determine available policies")
if len(self.policies) < 2:
raise SkipTest("Multiple policies not enabled")
return f(self, *args, **kwargs)
return wrapper
def requires_bulk(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
if skip or not cluster_info:
raise SkipTest('Requires bulk middleware')
# Determine whether this cluster has bulk middleware; if not, skip test
if not cluster_info.get('bulk_upload', {}):
raise SkipTest('Requires bulk middleware')
return f(*args, **kwargs)
return wrapper
| apache-2.0 |
alvarolopez/nova | nova/virt/xenapi/image/utils.py | 62 | 3274 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import shutil
import tarfile
from nova import image
IMAGE_API = image.API()
class GlanceImage(object):
def __init__(self, context, image_href_or_id):
self._context = context
self._image_id = image_href_or_id
self._cached_meta = None
@property
def meta(self):
if self._cached_meta is None:
self._cached_meta = IMAGE_API.get(self._context, self._image_id)
return self._cached_meta
def download_to(self, fileobj):
return IMAGE_API.download(self._context, self._image_id, fileobj)
def is_raw_tgz(self):
return ['raw', 'tgz'] == [
self.meta.get(key) for key in ('disk_format', 'container_format')]
def data(self):
return IMAGE_API.download(self._context, self._image_id)
class RawImage(object):
def __init__(self, glance_image):
self.glance_image = glance_image
def get_size(self):
return int(self.glance_image.meta['size'])
def stream_to(self, fileobj):
return self.glance_image.download_to(fileobj)
class IterableToFileAdapter(object):
"""A degenerate file-like so that an iterable could be read like a file.
As Glance client returns an iterable, but tarfile requires a file like,
this is the adapter between the two. This allows tarfile to access the
glance stream.
"""
def __init__(self, iterable):
self.iterator = iterable.__iter__()
self.remaining_data = ''
def read(self, size):
chunk = self.remaining_data
try:
while not chunk:
chunk = next(self.iterator)
except StopIteration:
return ''
return_value = chunk[0:size]
self.remaining_data = chunk[size:]
return return_value
class RawTGZImage(object):
def __init__(self, glance_image):
self.glance_image = glance_image
self._tar_info = None
self._tar_file = None
def _as_file(self):
return IterableToFileAdapter(self.glance_image.data())
def _as_tarfile(self):
return tarfile.open(mode='r|gz', fileobj=self._as_file())
def get_size(self):
if self._tar_file is None:
self._tar_file = self._as_tarfile()
self._tar_info = self._tar_file.next()
return self._tar_info.size
def stream_to(self, target_file):
if self._tar_file is None:
self._tar_file = self._as_tarfile()
self._tar_info = self._tar_file.next()
source_file = self._tar_file.extractfile(self._tar_info)
shutil.copyfileobj(source_file, target_file)
self._tar_file.close()
| apache-2.0 |
glewis17/cvxpy | doc/sphinxext/docscrape_sphinx.py | 154 | 7759 | import re, inspect, textwrap, pydoc
import sphinx
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param,param_type,desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc,8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
out += ['.. autosummary::', ' :toctree:', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "="*maxlen_0 + " " + "="*maxlen_1 + " " + "="*10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default','')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex','']
else:
out += ['.. latexonly::','']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Other Parameters',
'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out,indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| gpl-3.0 |
we7/vamp-aubio-plugins | aubio/interfaces/python/test_filter.py | 4 | 2001 | from numpy.testing import TestCase, run_module_suite
from numpy.testing import assert_equal, assert_almost_equal
from aubio import fvec, digital_filter
from numpy import array
def array_from_text_file(filename, dtype = 'float'):
return array([line.split() for line in open(filename).readlines()],
dtype = dtype)
class aubio_filter_test_case(TestCase):
def test_members(self):
f = digital_filter()
assert_equal (f.order, 7)
f = digital_filter(5)
assert_equal (f.order, 5)
f(fvec())
def test_cweighting_error(self):
f = digital_filter (2)
self.assertRaises ( ValueError, f.set_c_weighting, 44100 )
f = digital_filter (8)
self.assertRaises ( ValueError, f.set_c_weighting, 44100 )
f = digital_filter (5)
self.assertRaises ( ValueError, f.set_c_weighting, 4000 )
f = digital_filter (5)
self.assertRaises ( ValueError, f.set_c_weighting, 193000 )
f = digital_filter (7)
self.assertRaises ( ValueError, f.set_a_weighting, 193000 )
f = digital_filter (5)
self.assertRaises ( ValueError, f.set_a_weighting, 192000 )
def test_c_weighting(self):
expected = array_from_text_file('c_weighting_test_simple.expected')
f = digital_filter(5)
f.set_c_weighting(44100)
v = fvec(32)
v[12] = .5
u = f(v)
assert_almost_equal (expected[1], u)
def test_a_weighting(self):
expected = array_from_text_file('a_weighting_test_simple.expected')
f = digital_filter(7)
f.set_a_weighting(44100)
v = fvec(32)
v[12] = .5
u = f(v)
assert_almost_equal (expected[1], u)
def test_a_weighting_parted(self):
expected = array_from_text_file('a_weighting_test_simple.expected')
f = digital_filter(7)
f.set_a_weighting(44100)
v = fvec(16)
v[12] = .5
u = f(v)
assert_almost_equal (expected[1][:16], u)
# one more time
v = fvec(16)
u = f(v)
assert_almost_equal (expected[1][16:], u)
if __name__ == '__main__':
from unittest import main
main()
| gpl-2.0 |
Aryan-Barbarian/bigbang | bigbang/git_repo.py | 2 | 5894 | from git import *
import git
import pandas as pd
import numpy as np
from time import mktime
from datetime import datetime
from entity_resolution import entity_resolve
import networkx as nx
from config.config import CONFIG
ALL_ATTRIBUTES = CONFIG.all_attributes #["HEXSHA", "Committer Name", "Committer Email", "Commit Message", "Time", "Parent Commit", "Touched File"]
def cache_fixer(r): # Adds info from row to graph
r["Touched File"] = [x.strip() for x in r["Touched File"][1:-1].split(",")]
r["Time"] = pd.to_datetime(r["Time"], unit = "s");
return r
"""
Class that stores an instance of a git repository given the address to that
repo relative to this file. It returns the data in multiple useful forms.
"""
class GitRepo(object):
""" A pandas DataFrame object indexed by time that stores
the raw form of the repo's commit data as a table where
each row is a commit and each col represents an attribute
of that commit (time, message, commiter name, committer email,
commit hexsha)
"""
def __init__(self, name, url=None, attribs = ALL_ATTRIBUTES, cache=None):
self._commit_data = None;
self.url = url;
self.repo = None
self.name = name;
if cache is None:
self.repo = Repo(url)
self.populate_data(ALL_ATTRIBUTES)
else:
cache = cache.apply(cache_fixer, axis=1)
cache.set_index(cache["Time"])
self._commit_data = cache;
missing = list();
cols = self.commit_data.columns
for attr in attribs:
if attr not in cols and unicode(attr) not in cols:
missing.append(attr);
if len(missing) > 0:
print("There were " + str(len(missing)) + " missing attributes: ")
print(missing);
if ("Committer Name" in attribs and "Committer Email" in attribs):
self._commit_data["Person-ID"] = None;
self._commit_data = self._commit_data.apply(lambda row: entity_resolve(row, "Committer Email", "Committer Name"), axis=1)
def gen_data(self, repo, raw):
if not repo.active_branch.is_valid():
print("Found an empty repo: " + str(self.name))
return;
first = repo.commit()
commit = first
firstHexSha = first.hexsha;
generator = git.Commit.iter_items(repo, firstHexSha);
if "Touched File" in raw:
print("WARNING: Currently going through file diffs. This will take a very long time (1 minute per 3000 commits.) We suggest using a small repository.")
for commit in generator:
try:
if "Touched File" in raw:
diff_list = list();
for diff in commit.diff(commit.parents[0]):
if diff.b_blob:
diff_list.append(diff.b_blob.path);
else:
diff_list.append(diff.a_blob.path);
raw["Touched File"].append(diff_list)
if "Committer Name" in raw:
raw["Committer Name"].append(commit.committer.name)
if "Committer Email" in raw:
raw["Committer Email"].append(commit.committer.email)
if "Commit Message" in raw:
raw["Commit Message"].append(commit.message)
if "Time" in raw or True: # TODO: For now, we always ask for the time
raw["Time"].append(pd.to_datetime(commit.committed_date, unit = "s"));
if "Parent Commit" in raw:
raw["Parent Commit"].append([par.hexsha for par in commit.parents])
if "HEXSHA" in raw:
raw["HEXSHA"].append(commit.hexsha)
except LookupError:
print("failed to add a commit because of an encoding error")
def populate_data(self, attribs = ALL_ATTRIBUTES):
raw = dict()
for attrib in attribs:
raw[attrib] = list();
repo = self.repo
self.gen_data(repo, raw);
print(type(raw["Time"]))
# TODO: NEEDS TIME
time_index = pd.DatetimeIndex(raw["Time"], periods = 24, freq = "H")
self._commit_data = pd.DataFrame(raw, index = time_index);
def by_committer(self):
return self.commit_data.groupby('Committer Name').size().order()
def commits_per_day(self):
ans = self.commit_data.groupby(self.commit_data.index).size()
ans = ans.resample("D", how=np.sum)
return ans;
def commits_per_week(self):
ans = self.commits_per_day();
ans = ans.resample("W", how=np.sum)
return ans;
def commits_per_day_full(self):
ans = self.commit_data.groupby([self.commit_data.index, "Committer Name" ]).size()
return ans;
@property
def commit_data(self):
return self._commit_data;
def commits_for_committer(self, committer_name):
full_info = self.commit_data
time_index = pd.DatetimeIndex(self.commit_data["Time"], periods = 24, freq = "H");
df = full_info.loc[full_info["Committer Name"] == committer_name]
df = df.groupby([df.index]).size()
df = df.resample("D", how = np.sum, axis = 0)
return df
def merge_with_repo(self, other):
# TODO: What if commits have the same time?
self._commit_data = self.commit_data.append(other.commit_data);
class MultiGitRepo(GitRepo):
"""
Repos must have a "Repo Name" column
"""
def __init__(self, repos, attribs=ALL_ATTRIBUTES):
self._commit_data = repos[0].commit_data.copy(deep=True);
for i in range(1, len(repos)):
self.merge_with_repo(repos[i]);
| gpl-2.0 |
chriskoz/OctoPrint | src/octoprint/plugins/softwareupdate/version_checks/commandline.py | 37 | 1727 | # coding=utf-8
from __future__ import absolute_import
__author__ = "Gina Häußge <osd@foosel.net>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2014 The OctoPrint Project - Released under terms of the AGPLv3 License"
import logging
from ..exceptions import ConfigurationInvalid
from ..util import execute
def get_latest(target, check):
if not "command" in check:
raise ConfigurationInvalid("Update configuration for %s of type commandline needs command defined" % target)
returncode, stdout, stderr = execute(check["command"], evaluate_returncode=False)
# We expect command line check commands to
#
# * have a return code of 0 if an update is available, a value != 0 otherwise
# * return the display name of the new version as the final line on stdout
# * return the display name of the current version as the next to final line on stdout
#
# Example output:
# 1.1.0
# 1.1.1
#
# 1.1.0 is the current version, 1.1.1 is the remote version. If only one line is output, it's taken to be the
# display name of the new version
stdout_lines = filter(lambda x: len(x.strip()), stdout.splitlines())
local_name = stdout_lines[-2] if len(stdout_lines) >= 2 else "unknown"
remote_name = stdout_lines[-1] if len(stdout_lines) >= 1 else "unknown"
is_current = returncode != 0
information =dict(
local=dict(
name=local_name,
value=local_name,
),
remote=dict(
name=remote_name,
value=remote_name
)
)
logger = logging.getLogger("octoprint.plugins.softwareupdate.version_checks.github_commit")
logger.debug("Target: %s, local: %s, remote: %s" % (target, local_name, remote_name))
return information, is_current
| agpl-3.0 |
rudkx/swift | utils/vim/swift-format.py | 29 | 3125 | # This file is a minimal swift-format vim-integration. To install:
# - Change 'binary' if swift-format is not on the path (see below).
# - Add to your .vimrc:
#
# map <C-I> :pyf <path-to-this-file>/swift-format.py<cr>
# imap <C-I> <c-o>:pyf <path-to-this-file>/swift-format.py<cr>
#
# The first line enables swift-format for NORMAL and VISUAL mode, the second
# line adds support for INSERT mode. Change "C-I" to another binding if you
# need swift-format on a different key (C-I stands for Ctrl+i).
#
# With this integration you can press the bound key and swift-format will
# format the current line in NORMAL and INSERT mode or the selected region in
# VISUAL mode. The line or region is extended to the next bigger syntactic
# entity.
#
# You can also pass in the variable "l:lines" to choose the range for
# formatting. This variable can either contain "<start line>:<end line> or
# "all" to format the full file. So, to format the full file, write a function
# like:
#
# :function FormatFile()
# : let l:lines="all"
# : pyf <path-to-this-file>/swift-format.py
# :endfunction
#
# It operates on the current, potentially unsaved buffer and does not create or
# save any files. To revert a formatting, just undo.
from __future__ import print_function
import difflib
import platform
import subprocess
import sys
import vim
binary = 'swift-format'
if vim.eval('exists("g:swift_format_path")') == "1":
binary = vim.eval('g:swift_format_path')
def get_buffer(encoding):
if platform.python_version_tuple()[0] == "3":
return vim.current.buffer
return [line.decode(encoding) for line in vim.current.buffer]
def main(argc, argv):
encoding = vim.eval("&encoding")
buf = get_buffer(encoding)
if vim.eval('exists("l:lines")') == '1':
lines = vim.eval('l:lines')
else:
lines = '%s:%s' % (vim.current.range.start + 1,
vim.current.range.end + 1)
cursor = int(vim.eval('line2byte(line(".")) + col(".")')) - 2
if cursor < 0:
print("Couldn't determine cursor position. Is your file empty?")
return
# avoid the cmd prompt on windows
SI = None
if sys.platform.startswith('win32'):
SI = subprocess.STARTUPINFO()
SI.dwFlags |= subprocess.STARTF_USESHOWWINDOW
SI.wShowWindow = subprocess.SW_HIDE
command = [binary]
if lines != 'all':
command.extend(['-line-range', lines])
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
stdin=subprocess.PIPE, startupinfo=SI)
stdout, stderr = p.communicate(input='\n'.join(buf).encode(encoding))
if stderr:
print(stderr)
if not stdout:
print('No output from swift-format (crashed?).')
return
lines = stdout.decode(encoding).split('\n')
sequence = difflib.SequenceMatcher(None, buf, lines)
for op in reversed(sequence.get_opcodes()):
if op[0] is not 'equal':
vim.current.buffer[op[1]:op[2]] = lines[op[3]:op[4]]
if __name__ == '__main__':
main(len(sys.argv), sys.argv)
| apache-2.0 |
chrys87/fenrir | src/fenrirscreenreader/utils/word_utils.py | 1 | 4100 | #!/bin/python
# -*- coding: utf-8 -*-
# Fenrir TTY screen reader
# By Chrys, Storm Dragon, and contributers.
from fenrirscreenreader.core import debug
import string
def getCurrentWord(currX,currY, currText):
lineBreak = False
endOfScreen = False
if currText == '':
return -1, -1, '', endOfScreen, lineBreak
if currText.strip( string.whitespace) == '':
return currX, currY, '', endOfScreen, lineBreak
x = currX
y = currY
currWord = ''
wrappedLines = currText.split('\n')
currLine = wrappedLines[y]
Found = False
while(not Found):
if not currLine[x] in string.whitespace:
if x == 0:
Found = True
else:
if currLine[x - 1] in string.whitespace:
Found = True
if not Found:
if x - 1 < 0:
if y - 1 < 0:
lineBreak = False
endOfScreen = True
return currX, currY, '', endOfScreen, lineBreak
else:
y -= 1
currLine = wrappedLines[y]
x = len( currLine) - 1
lineBreak = True
else:
x -= 1
if Found:
currWord = currLine[x:]
for d in string.whitespace:
delimiterPos = currWord.find(d)
if delimiterPos != -1:
currWord = currWord[:delimiterPos]
return x, y, currWord, endOfScreen, lineBreak
return currX, currY, '', False, False
def getPrevWord(currX,currY, currText):
lineBreak = False
endOfScreen = False
if currText == '':
return -1, -1, '', endOfScreen, lineBreak
if currText.strip( string.whitespace) == '':
return currX, currY, '', endOfScreen, lineBreak
x, y, currWord, endOfScreen, lineBreakCurrWord = getCurrentWord(currX,currY,currText)
if endOfScreen:
return x, y, currWord, endOfScreen, lineBreak
wrappedLines = currText.split('\n')
currLine = wrappedLines[y]
if x - 1 < 0:
if y - 1 < 0:
lineBreak = False
endOfScreen = True
return currX, currY, '', endOfScreen, lineBreak
else:
y -= 1
currLine = wrappedLines[y]
x = len( currLine) - 1
lineBreak = True
else:
x -= 1
lineBreakCurrWord = lineBreak or lineBreakCurrWord
x, y, currWord, endOfScreen, lineBreak = getCurrentWord(x,y,currText)
lineBreak = lineBreak or lineBreakCurrWord
return x, y, currWord, endOfScreen, lineBreak
def getNextWord(currX,currY, currText):
lineBreak = False
endOfScreen = False
if currText == '':
return -1, -1, '', endOfScreen, lineBreak
if currText.strip( string.whitespace) == '':
return currX, currY, '', endOfScreen, lineBreak
x = currX
y = currY
currWord = ''
wrappedLines = currText.split('\n')
currLine = wrappedLines[y]
Found = False
while(not Found):
if not Found:
if x + 1 > len( currLine ) - 1:
if y + 1 > len( wrappedLines ) - 1:
lineBreak = False
endOfScreen = True
return currX, currY, '', endOfScreen, lineBreak
else:
y += 1
currLine = wrappedLines[y]
x = 0
lineBreak = True
else:
x += 1
if not currLine[x] in string.whitespace:
if x == 0:
Found = True
else:
if currLine[x - 1] in string.whitespace:
Found = True
if Found:
currWord = currLine[x:]
for d in string.whitespace:
delimiterPos = currWord.find(d)
if delimiterPos != -1:
currWord = currWord[:delimiterPos]
return x, y, currWord, endOfScreen, lineBreak
return currX, currY, '', False, False
| lgpl-3.0 |
kodybrown/lime | backend/testdata/Vintage/transformers.py | 14 | 2107 | import sublime
import sublime_plugin
from Vintageous.vi.constants import regions_transformer
from Vintageous.vi.utils import back_one_char
from Vintageous.vi.utils import forward_one_char
from Vintageous.vi.utils import is_at_bol
from Vintageous.vi.utils import is_at_eol
from Vintageous.vi.utils import is_at_hard_eol
from Vintageous.vi.utils import is_line_empty
from Vintageous.vi.utils import is_on_empty_line
from Vintageous.vi.utils import next_non_white_space_char
class ClipEndToLine(sublime_plugin.TextCommand):
def run(self, edit):
def f(view, s):
if not is_on_empty_line(self.view, s) and is_at_eol(self.view, s):
return back_one_char(s)
else:
return s
regions_transformer(self.view, f)
class DontStayOnEolForward(sublime_plugin.TextCommand):
def run(self, edit, **kwargs):
def f(view, s):
if is_at_eol(self.view, s):
return forward_one_char(s)
else:
return s
regions_transformer(self.view, f)
class DontStayOnEolBackward(sublime_plugin.TextCommand):
def run(self, edit, **kwargs):
def f(view, s):
if is_at_eol(self.view, s) and not self.view.line(s.b).empty():
return back_one_char(s)
else:
return s
regions_transformer(self.view, f)
class _vi_d_post_action(sublime_plugin.TextCommand):
def run(self, edit, **kwargs):
def f(view, s):
if is_at_eol(self.view, s) and not self.view.line(s.b).empty():
s = back_one_char(s)
# s = next_non_white_space_char(self.view, s.b)
return s
regions_transformer(self.view, f)
class DontOvershootLineLeft(sublime_plugin.TextCommand):
def run(self, edit, **kwargs):
def f(view, s):
if view.size() > 0 and is_at_eol(self.view, s):
return forward_one_char(s)
else:
return s
regions_transformer(self.view, f) | bsd-2-clause |
ageron/tensorflow | tensorflow/contrib/optimizer_v2/optimizer_v2_test.py | 9 | 12158 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional test for OptimizerV2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.optimizer_v2 import gradient_descent
from tensorflow.contrib.optimizer_v2 import optimizer_v2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gradients_util
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class OptimizerTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testBasic(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
def loss():
return 5 * var0 + 3 * var1 # pylint: disable=cell-var-from-loop
# Note that for eager execution, minimize expects a function instead of a
# Tensor.
global_step = resource_variable_ops.ResourceVariable(
array_ops.zeros([], dtypes.int64), name='global_step_%d' % i)
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
opt_op = sgd_op.minimize(loss, global_step, [var0, var1])
self.evaluate(opt_op)
# Validate updated params
self.assertAllClose([-14., -13.], self.evaluate(var0))
self.assertAllClose([-6., -5.], self.evaluate(var1))
def testAggregationMethod(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
cost = 5 * var0 + 3 * var1
global_step = variables.Variable(
array_ops.zeros([], dtypes.int64), name='global_step')
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
opt_op = sgd_op.minimize(
cost,
global_step, [var0, var1],
aggregation_method=gradients_util.AggregationMethod.
EXPERIMENTAL_ACCUMULATE_N)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 1 step of sgd through optimizer
opt_op.run()
# Validate updated params
self.assertAllClose([-14., -13.], var0.eval())
self.assertAllClose([-6., -5.], var1.eval())
def testPrecomputedGradient(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
cost = 5 * var0 + 3 * var1
grad_loss = constant_op.constant([42, -42], dtype=dtype)
global_step = variables.Variable(
array_ops.zeros([], dtypes.int64), name='global_step')
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
opt_op = sgd_op.minimize(
cost, global_step, [var0, var1], grad_loss=grad_loss)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 1 step of sgd through optimizer
opt_op.run()
# Validate updated params
self.assertAllClose([1.0 - 3 * 5 * 42.0, 2.0 - 3 * 5 * (-42.0)],
var0.eval())
self.assertAllClose([3.0 - 3 * 3 * 42.0, 4.0 - 3 * 3 * (-42.0)],
var1.eval())
@test_util.run_in_graph_and_eager_modes
def testNoVariables(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
# pylint: disable=cell-var-from-loop
def loss():
var0 = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtype, trainable=False, name='a')
var1 = resource_variable_ops.ResourceVariable(
[3.0, 4.0], dtype=dtype, trainable=False, name='b')
return 5 * var0 + var1
# pylint: enable=cell-var-from-loop
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
with self.assertRaisesRegexp(ValueError, 'No.*variables'):
sgd_op.minimize(loss)
@test_util.run_in_graph_and_eager_modes
def testNoGradients(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
# pylint: disable=cell-var-from-loop
def loss():
return 5 * var0
# pylint: enable=cell-var-from-loop
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
with self.assertRaisesRegexp(ValueError, 'No gradients'):
# var1 has no gradient
sgd_op.minimize(loss, var_list=[var1])
@test_util.run_in_graph_and_eager_modes
def testNoGradientsForAnyVariables_Minimize(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
def loss():
return constant_op.constant(5.0)
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
with self.assertRaisesRegexp(ValueError,
'No gradients provided for any variable'):
sgd_op.minimize(loss, var_list=[var0, var1])
@test_util.run_in_graph_and_eager_modes
def testNoGradientsForAnyVariables_ApplyGradients(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
with self.assertRaisesRegexp(ValueError,
'No gradients provided for any variable'):
sgd_op.apply_gradients([(None, var0), (None, var1)])
@test_util.run_in_graph_and_eager_modes
def testGradientsAsVariables(self):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
def loss():
return 5 * var0 + 3 * var1 # pylint: disable=cell-var-from-loop
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
grads_and_vars = sgd_op.compute_gradients(loss, [var0, var1])
# Convert gradients to tf.Variables
converted_grads = [
resource_variable_ops.ResourceVariable(array_ops.zeros([2], dtype),
name='c_%d_%d' % (i, j))
for j, gv in enumerate(grads_and_vars)
]
convert_ops = [
state_ops.assign(converted_grads[j], gv[0])
for j, gv in enumerate(grads_and_vars)
]
self.evaluate(variables.global_variables_initializer())
# Run convert_ops to achieve the gradietns converting
self.evaluate(convert_ops)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd through optimizer
converted_grads_and_vars = list(zip(converted_grads, [var0, var1]))
opt_op = sgd_op.apply_gradients(converted_grads_and_vars)
self.evaluate(opt_op)
# Validate updated params
self.assertAllClose([-14., -13.], self.evaluate(var0))
self.assertAllClose([-6., -5.], self.evaluate(var1))
@test_util.run_in_graph_and_eager_modes
def testComputeGradientsWithTensors(self):
x = ops.convert_to_tensor(1.0)
def f():
return x * x
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
grads_and_vars = sgd_op.compute_gradients(f, [x])
self.assertEqual(1, len(grads_and_vars))
grad, x_as_var = grads_and_vars[0]
self.assertIs(x, x_as_var)
self.assertEqual(2.0, self.evaluate(grad))
with self.assertRaises(NotImplementedError):
sgd_op.apply_gradients(grads_and_vars)
def testTrainOp(self):
with self.cached_session():
var0 = variables.Variable([1.0, 2.0])
var1 = variables.Variable([3.0, 4.0])
cost = 5 * var0 + 3 * var1
global_step = variables.Variable(
array_ops.zeros([], dtypes.int64), name='global_step')
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
opt_op = sgd_op.minimize(cost, global_step, [var0, var1])
self.assertTrue(opt_op in ops.get_collection(ops.GraphKeys.TRAIN_OP))
def testConstraint(self):
constraint_01 = lambda x: clip_ops.clip_by_value(x, -0.1, 0.)
constraint_0 = lambda x: clip_ops.clip_by_value(x, 0., 1.)
with self.cached_session():
var0 = variables.Variable([1.0, 2.0],
constraint=constraint_01)
var1 = variables.Variable([3.0, 4.0],
constraint=constraint_0)
cost = 5 * var0 + 3 * var1
global_step = variables.Variable(
array_ops.zeros([], dtypes.int64), name='global_step')
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
opt_op = sgd_op.minimize(cost, global_step, [var0, var1])
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 1 step of sgd through optimizer
opt_op.run()
# Validate updated params
self.assertAllClose([-0.1, -0.1], var0.eval())
self.assertAllClose([0., 0.], var1.eval())
def testStopGradients(self):
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], name='var0')
var1 = variables.Variable([3.0, 4.0], name='var1')
var0_id = array_ops.identity(var0)
cost = 5 * var0_id + 3 * var1
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
grads_and_vars = sgd_op.compute_gradients(cost, [var0, var1],
stop_gradients=[var0_id])
grad_dict = {var.op.name: grad for grad, var in grads_and_vars}
self.assertIsNone(grad_dict['var0'])
self.assertIsNotNone(grad_dict['var1'])
def testDoNotOverrideCreateSlots(self):
class ShouldNotOverrideCreateSlots(optimizer_v2.OptimizerV2):
def _create_slots(self, var_list):
"""In OptimizerV2 _create_slots was renamed _create_vars."""
return var_list
with self.assertRaises(RuntimeError):
ShouldNotOverrideCreateSlots(True, 'name')
if __name__ == '__main__':
test.main()
| apache-2.0 |
hectord/lettuce | tests/integration/lib/Django-1.3/tests/regressiontests/null_fk_ordering/tests.py | 92 | 2012 | from django.test import TestCase
from regressiontests.null_fk_ordering.models import *
class NullFkOrderingTests(TestCase):
def test_ordering_across_null_fk(self):
"""
Regression test for #7512
ordering across nullable Foreign Keys shouldn't exclude results
"""
author_1 = Author.objects.create(name='Tom Jones')
author_2 = Author.objects.create(name='Bob Smith')
article_1 = Article.objects.create(title='No author on this article')
article_2 = Article.objects.create(author=author_1, title='This article written by Tom Jones')
article_3 = Article.objects.create(author=author_2, title='This article written by Bob Smith')
# We can't compare results directly (since different databases sort NULLs to
# different ends of the ordering), but we can check that all results are
# returned.
self.assertTrue(len(list(Article.objects.all())) == 3)
s = SystemInfo.objects.create(system_name='System Info')
f = Forum.objects.create(system_info=s, forum_name='First forum')
p = Post.objects.create(forum=f, title='First Post')
c1 = Comment.objects.create(post=p, comment_text='My first comment')
c2 = Comment.objects.create(comment_text='My second comment')
s2 = SystemInfo.objects.create(system_name='More System Info')
f2 = Forum.objects.create(system_info=s2, forum_name='Second forum')
p2 = Post.objects.create(forum=f2, title='Second Post')
c3 = Comment.objects.create(comment_text='Another first comment')
c4 = Comment.objects.create(post=p2, comment_text='Another second comment')
# We have to test this carefully. Some databases sort NULL values before
# everything else, some sort them afterwards. So we extract the ordered list
# and check the length. Before the fix, this list was too short (some values
# were omitted).
self.assertTrue(len(list(Comment.objects.all())) == 4)
| gpl-3.0 |
sander76/home-assistant | homeassistant/components/flo/__init__.py | 4 | 2250 | """The flo integration."""
import asyncio
import logging
from aioflo import async_get_api
from aioflo.errors import RequestError
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from .const import CLIENT, DOMAIN
from .device import FloDeviceDataUpdateCoordinator
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["binary_sensor", "sensor", "switch"]
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the flo component."""
hass.data[DOMAIN] = {}
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up flo from a config entry."""
session = async_get_clientsession(hass)
hass.data[DOMAIN][entry.entry_id] = {}
try:
hass.data[DOMAIN][entry.entry_id][CLIENT] = client = await async_get_api(
entry.data[CONF_USERNAME], entry.data[CONF_PASSWORD], session=session
)
except RequestError as err:
raise ConfigEntryNotReady from err
user_info = await client.user.get_info(include_location_info=True)
_LOGGER.debug("Flo user information with locations: %s", user_info)
hass.data[DOMAIN][entry.entry_id]["devices"] = devices = [
FloDeviceDataUpdateCoordinator(hass, client, location["id"], device["id"])
for location in user_info["locations"]
for device in location["devices"]
]
tasks = [device.async_refresh() for device in devices]
await asyncio.gather(*tasks)
for platform in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, platform)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, platform)
for platform in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
| apache-2.0 |
vivekmishra1991/scikit-learn | sklearn/cluster/tests/test_dbscan.py | 176 | 12155 | """
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from scipy.spatial import distance
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.neighbors import NearestNeighbors
from sklearn.cluster.dbscan_ import DBSCAN
from sklearn.cluster.dbscan_ import dbscan
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
# Tests the DBSCAN algorithm with a similarity array.
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
# Tests the DBSCAN algorithm with a feature vector array.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_sparse():
core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8,
min_samples=10)
core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10)
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_sparse_precomputed():
D = pairwise_distances(X)
nn = NearestNeighbors(radius=.9).fit(X)
D_sparse = nn.radius_neighbors_graph(mode='distance')
# Ensure it is sparse not merely on diagonals:
assert D_sparse.nnz < D.shape[0] * (D.shape[0] - 1)
core_sparse, labels_sparse = dbscan(D_sparse,
eps=.8,
min_samples=10,
metric='precomputed')
core_dense, labels_dense = dbscan(D, eps=.8, min_samples=10,
metric='precomputed')
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_no_core_samples():
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
for X_ in [X, sparse.csr_matrix(X)]:
db = DBSCAN(min_samples=6).fit(X_)
assert_array_equal(db.components_, np.empty((0, X_.shape[1])))
assert_array_equal(db.labels_, -1)
assert_equal(db.core_sample_indices_.shape, (0,))
def test_dbscan_callable():
# Tests the DBSCAN algorithm with a callable metric.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_balltree():
# Tests the DBSCAN algorithm with balltree for neighbor calculation.
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
# DBSCAN.fit should accept a list of lists.
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
def test_dbscan_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
def test_boundaries():
# ensure min_samples is inclusive of core point
core, _ = dbscan([[0], [1]], eps=2, min_samples=2)
assert_in(0, core)
# ensure eps is inclusive of circumference
core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2)
assert_in(0, core)
core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2)
assert_not_in(0, core)
def test_weighted_dbscan():
# ensure sample_weight is validated
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2])
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4])
# ensure sample_weight has an effect
assert_array_equal([], dbscan([[0], [1]], sample_weight=None,
min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5],
min_samples=6)[0])
assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5],
min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6],
min_samples=6)[0])
# points within eps of each other:
assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5,
sample_weight=[5, 1], min_samples=6)[0])
# and effect of non-positive and non-integer sample_weight:
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1],
eps=1.5, min_samples=6)[0])
# for non-negative sample_weight, cores should be identical to repetition
rng = np.random.RandomState(42)
sample_weight = rng.randint(0, 5, X.shape[0])
core1, label1 = dbscan(X, sample_weight=sample_weight)
assert_equal(len(label1), len(X))
X_repeated = np.repeat(X, sample_weight, axis=0)
core_repeated, label_repeated = dbscan(X_repeated)
core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool)
core_repeated_mask[core_repeated] = True
core_mask = np.zeros(X.shape[0], dtype=bool)
core_mask[core1] = True
assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask)
# sample_weight should work with precomputed distance matrix
D = pairwise_distances(X)
core3, label3 = dbscan(D, sample_weight=sample_weight,
metric='precomputed')
assert_array_equal(core1, core3)
assert_array_equal(label1, label3)
# sample_weight should work with estimator
est = DBSCAN().fit(X, sample_weight=sample_weight)
core4 = est.core_sample_indices_
label4 = est.labels_
assert_array_equal(core1, core4)
assert_array_equal(label1, label4)
est = DBSCAN()
label5 = est.fit_predict(X, sample_weight=sample_weight)
core5 = est.core_sample_indices_
assert_array_equal(core1, core5)
assert_array_equal(label1, label5)
assert_array_equal(label1, est.labels_)
def test_dbscan_core_samples_toy():
X = [[0], [2], [3], [4], [6], [8], [10]]
n_samples = len(X)
for algorithm in ['brute', 'kd_tree', 'ball_tree']:
# Degenerate case: every sample is a core sample, either with its own
# cluster or including other close core samples.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=1)
assert_array_equal(core_samples, np.arange(n_samples))
assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4])
# With eps=1 and min_samples=2 only the 3 samples from the denser area
# are core samples. All other points are isolated and considered noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=2)
assert_array_equal(core_samples, [1, 2, 3])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# Only the sample in the middle of the dense area is core. Its two
# neighbors are edge samples. Remaining samples are noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=3)
assert_array_equal(core_samples, [2])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# It's no longer possible to extract core samples with eps=1:
# everything is noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=4)
assert_array_equal(core_samples, [])
assert_array_equal(labels, -np.ones(n_samples))
def test_dbscan_precomputed_metric_with_degenerate_input_arrays():
# see https://github.com/scikit-learn/scikit-learn/issues/4641 for
# more details
X = np.eye(10)
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
X = np.zeros((10, 10))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
| bsd-3-clause |
ssut/flickr-dump | flickrdump/flickrdump.py | 1 | 2803 | from __future__ import division
import sys
if sys.version_info < (3, 4):
raise Exception('')
exit(1)
import asyncio
import aiohttp
import functools
import flickrapi
import itertools
import os
from ctypes import c_bool
def dump(api, secret, user_id, target='./data'):
try:
loop = asyncio.get_event_loop()
loop.run_until_complete(_dump(api, secret, user_id, target))
except OSError:
return 1
except KeyboardInterrupt:
print('Stopped')
finally:
loop.close()
return 0
def _dump(api, secret, user_id, target):
if not os.path.exists(target):
os.makedirs(target)
flickr = flickrapi.FlickrAPI(api, secret, format='parsed-json')
pages = flickr.people.getPublicPhotos(
user_id=user_id, per_page=500, page=1)['photos']['pages']
print('Total pages to download:', pages)
coros = []
for i in range(pages):
coros.append(asyncio.Task(_get_urls(flickr, user_id, i + 1, target)))
yield from asyncio.gather(*coros)
sys.stdout.write('\n')
print('done!')
COUNT_LOCK = asyncio.Lock()
COUNT_DLS = [0, 0]
@asyncio.coroutine
def _count(i, incr=False):
global COUNT_DLS
with (yield from COUNT_LOCK):
if incr:
COUNT_DLS[i] += 1
return COUNT_DLS[i]
@asyncio.coroutine
def _download(url, target):
filename = url.split('/')[-1]
dl = yield from _count(1, True)
cnt = yield from _count(0)
sys.stdout.write('[{}/{}, {:.02f}%] Downloading..\r'.format(
cnt, dl, (cnt / dl * 100.0)))
sys.stdout.flush()
target = '{}/{}'.format(target, filename)
r = yield from aiohttp.request('get', url)
with open(target, 'wb') as fd:
while True:
chunk = yield from r.content.read(1024)
if not chunk:
break
fd.write(chunk)
r.close()
cnt = yield from _count(0, True)
@asyncio.coroutine
def _get_urls(flickr, user_id, p, target):
items = flickr.people.getPublicPhotos(
user_id=user_id, per_page=500, page=p)['photos']['photo']
count = len(items)
for i, item in enumerate(items):
item_id, item_secret = item.get('id'), item.get('secret')
res = yield from asyncio.async(_get_url(flickr, item_id, item_secret))
asyncio.async(_download(res, target))
return count
@asyncio.coroutine
def _get_url(flickr, item_id, item_secret):
loop = asyncio.get_event_loop()
sizes_callback = functools.partial(flickr.photos.getSizes, photo_id=item_id)
sizes = loop.run_in_executor(None, sizes_callback)
sizes = yield from sizes
sizes = sizes.get('sizes').get('size')
original = ''
try:
original = next(d for d in sizes if d['label'] == 'Original')
except: pass
return original.get('source', '')
| gpl-2.0 |
briansan/inv | server/inv/util.py | 1 | 3088 | """
@file inv/api/util.py
@author Brian Kim
@brief a script that defines utility methods for the server
"""
from auth import auth_inv
from datetime import timedelta
from functools import update_wrapper, wraps
from flask import current_app, make_response, request, Response
import base64
"""
convenience methods to determine the http method used
"""
def is_create():
return request.method == 'POST'
def is_read():
return request.method == 'GET'
def is_update():
return request.method == 'PUT'
def is_delete():
return request.method == 'DELETE'
"""
img methods
"""
def tag2name(x):
return base64.encodestring(x).strip('/=+')
def img_path(asset_tag):
fname = tag2name(asset_tag)
return '/var/inv/img/'+fname+'.png'
def thumbnail_path(asset_tag):
fname = tag2name(asset_tag)
return '/var/inv/img/'+fname+'.thumbnail.png'
def receipt_path(asset_tag):
fname = tag2name(asset_tag)
return '/var/inv/receipt/'+fname+'.png'
"""
authentication decorator
"""
def authenticate():
"""sends a 401 resposne that enables basic auth"""
msg = 'Could not verify your access level for that URL.\n'
msg += 'You have to login with proper credentials'
header = {'WWW-Authenticate': 'Basic realm="Login Required"'}
return Response( msg, 401, header )
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or not auth_inv(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated
"""
cross-origin resource sharing decorator
"""
def crossdomain(origin='*', methods=None, headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, basestring):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, basestring):
print origin
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
h['Access-Control-Allow-Credentials'] = 'true'
h['Access-Control-Allow-Headers'] = 'X-Requested-With'
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
| mit |
x111ong/odoo | addons/mail/mail_thread.py | 111 | 109095 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import base64
from collections import OrderedDict
import datetime
import dateutil
import email
try:
import simplejson as json
except ImportError:
import json
from lxml import etree
import logging
import pytz
import re
import socket
import time
import xmlrpclib
from email.message import Message
from email.utils import formataddr
from urllib import urlencode
from openerp import api, tools
from openerp import SUPERUSER_ID
from openerp.addons.mail.mail_message import decode
from openerp.osv import fields, osv, orm
from openerp.osv.orm import BaseModel
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
mail_header_msgid_re = re.compile('<[^<>]+>')
def decode_header(message, header, separator=' '):
return separator.join(map(decode, filter(None, message.get_all(header, []))))
class mail_thread(osv.AbstractModel):
''' mail_thread model is meant to be inherited by any model that needs to
act as a discussion topic on which messages can be attached. Public
methods are prefixed with ``message_`` in order to avoid name
collisions with methods of the models that will inherit from this class.
``mail.thread`` defines fields used to handle and display the
communication history. ``mail.thread`` also manages followers of
inheriting classes. All features and expected behavior are managed
by mail.thread. Widgets has been designed for the 7.0 and following
versions of OpenERP.
Inheriting classes are not required to implement any method, as the
default implementation will work for any model. However it is common
to override at least the ``message_new`` and ``message_update``
methods (calling ``super``) to add model-specific behavior at
creation and update of a thread when processing incoming emails.
Options:
- _mail_flat_thread: if set to True, all messages without parent_id
are automatically attached to the first message posted on the
ressource. If set to False, the display of Chatter is done using
threads, and no parent_id is automatically set.
'''
_name = 'mail.thread'
_description = 'Email Thread'
_mail_flat_thread = True
_mail_post_access = 'write'
# Automatic logging system if mail installed
# _track = {
# 'field': {
# 'module.subtype_xml': lambda self, cr, uid, obj, context=None: obj[state] == done,
# 'module.subtype_xml2': lambda self, cr, uid, obj, context=None: obj[state] != done,
# },
# 'field2': {
# ...
# },
# }
# where
# :param string field: field name
# :param module.subtype_xml: xml_id of a mail.message.subtype (i.e. mail.mt_comment)
# :param obj: is a browse_record
# :param function lambda: returns whether the tracking should record using this subtype
_track = {}
# Mass mailing feature
_mail_mass_mailing = False
def get_empty_list_help(self, cr, uid, help, context=None):
""" Override of BaseModel.get_empty_list_help() to generate an help message
that adds alias information. """
model = context.get('empty_list_help_model')
res_id = context.get('empty_list_help_id')
ir_config_parameter = self.pool.get("ir.config_parameter")
catchall_domain = ir_config_parameter.get_param(cr, SUPERUSER_ID, "mail.catchall.domain", context=context)
document_name = context.get('empty_list_help_document_name', _('document'))
alias = None
if catchall_domain and model and res_id: # specific res_id -> find its alias (i.e. section_id specified)
object_id = self.pool.get(model).browse(cr, uid, res_id, context=context)
# check that the alias effectively creates new records
if object_id.alias_id and object_id.alias_id.alias_name and \
object_id.alias_id.alias_model_id and \
object_id.alias_id.alias_model_id.model == self._name and \
object_id.alias_id.alias_force_thread_id == 0:
alias = object_id.alias_id
if not alias and catchall_domain and model: # no res_id or res_id not linked to an alias -> generic help message, take a generic alias of the model
alias_obj = self.pool.get('mail.alias')
alias_ids = alias_obj.search(cr, uid, [("alias_parent_model_id.model", "=", model), ("alias_name", "!=", False), ('alias_force_thread_id', '=', False), ('alias_parent_thread_id', '=', False)], context=context, order='id ASC')
if alias_ids and len(alias_ids) == 1:
alias = alias_obj.browse(cr, uid, alias_ids[0], context=context)
add_arrow = not help or help.find("oe_view_nocontent_create") == -1
if alias:
email_link = "<a href='mailto:%(email)s'>%(email)s</a>" % {'email': alias.name_get()[0][1]}
if add_arrow:
return _("""<p class='oe_view_nocontent_create'>
Click here to add new %(document)s or send an email to: %(email)s.
</p>
%(static_help)s"""
) % {
'document': document_name, 'email': email_link, 'static_help': help or ''
}
return _("""%(static_help)s
<p>
You could also add a new %(document)s by sending an email to: %(email)s.
</p>""") % {
'document': document_name, 'email': email_link, 'static_help': help or ''
}
if add_arrow:
return _("<p class='oe_view_nocontent_create'>Click here to add new %(document)s</p>%(static_help)s") % {
'document': document_name, 'static_help': help or ''
}
return help
def _get_message_data(self, cr, uid, ids, name, args, context=None):
""" Computes:
- message_unread: has uid unread message for the document
- message_summary: html snippet summarizing the Chatter for kanban views """
res = dict((id, dict(message_unread=False, message_unread_count=0, message_summary=' ')) for id in ids)
user_pid = self.pool.get('res.users').read(cr, uid, [uid], ['partner_id'], context=context)[0]['partner_id'][0]
# search for unread messages, directly in SQL to improve performances
cr.execute(""" SELECT m.res_id FROM mail_message m
RIGHT JOIN mail_notification n
ON (n.message_id = m.id AND n.partner_id = %s AND (n.is_read = False or n.is_read IS NULL))
WHERE m.model = %s AND m.res_id in %s""",
(user_pid, self._name, tuple(ids),))
for result in cr.fetchall():
res[result[0]]['message_unread'] = True
res[result[0]]['message_unread_count'] += 1
for id in ids:
if res[id]['message_unread_count']:
title = res[id]['message_unread_count'] > 1 and _("You have %d unread messages") % res[id]['message_unread_count'] or _("You have one unread message")
res[id]['message_summary'] = "<span class='oe_kanban_mail_new' title='%s'><span class='oe_e'>9</span> %d %s</span>" % (title, res[id].pop('message_unread_count'), _("New"))
res[id].pop('message_unread_count', None)
return res
def read_followers_data(self, cr, uid, follower_ids, context=None):
result = []
for follower in self.pool.get('res.partner').browse(cr, uid, follower_ids, context=context):
is_editable = self.pool['res.users'].has_group(cr, uid, 'base.group_no_one')
is_uid = uid in map(lambda x: x.id, follower.user_ids)
data = (follower.id,
follower.name,
{'is_editable': is_editable, 'is_uid': is_uid},
)
result.append(data)
return result
def _get_subscription_data(self, cr, uid, ids, name, args, user_pid=None, context=None):
""" Computes:
- message_subtype_data: data about document subtypes: which are
available, which are followed if any """
res = dict((id, dict(message_subtype_data='')) for id in ids)
if user_pid is None:
user_pid = self.pool.get('res.users').read(cr, uid, [uid], ['partner_id'], context=context)[0]['partner_id'][0]
# find current model subtypes, add them to a dictionary
subtype_obj = self.pool.get('mail.message.subtype')
subtype_ids = subtype_obj.search(
cr, uid, [
'&', ('hidden', '=', False), '|', ('res_model', '=', self._name), ('res_model', '=', False)
], context=context)
subtype_dict = OrderedDict(
(subtype.name, {
'default': subtype.default,
'followed': False,
'parent_model': subtype.parent_id and subtype.parent_id.res_model or self._name,
'id': subtype.id}
) for subtype in subtype_obj.browse(cr, uid, subtype_ids, context=context))
for id in ids:
res[id]['message_subtype_data'] = subtype_dict.copy()
# find the document followers, update the data
fol_obj = self.pool.get('mail.followers')
fol_ids = fol_obj.search(cr, uid, [
('partner_id', '=', user_pid),
('res_id', 'in', ids),
('res_model', '=', self._name),
], context=context)
for fol in fol_obj.browse(cr, uid, fol_ids, context=context):
thread_subtype_dict = res[fol.res_id]['message_subtype_data']
for subtype in [st for st in fol.subtype_ids if st.name in thread_subtype_dict]:
thread_subtype_dict[subtype.name]['followed'] = True
res[fol.res_id]['message_subtype_data'] = thread_subtype_dict
return res
def _search_message_unread(self, cr, uid, obj=None, name=None, domain=None, context=None):
return [('message_ids.to_read', '=', True)]
def _get_followers(self, cr, uid, ids, name, arg, context=None):
fol_obj = self.pool.get('mail.followers')
fol_ids = fol_obj.search(cr, SUPERUSER_ID, [('res_model', '=', self._name), ('res_id', 'in', ids)])
res = dict((id, dict(message_follower_ids=[], message_is_follower=False)) for id in ids)
user_pid = self.pool.get('res.users').read(cr, uid, [uid], ['partner_id'], context=context)[0]['partner_id'][0]
for fol in fol_obj.browse(cr, SUPERUSER_ID, fol_ids):
res[fol.res_id]['message_follower_ids'].append(fol.partner_id.id)
if fol.partner_id.id == user_pid:
res[fol.res_id]['message_is_follower'] = True
return res
def _set_followers(self, cr, uid, id, name, value, arg, context=None):
if not value:
return
partner_obj = self.pool.get('res.partner')
fol_obj = self.pool.get('mail.followers')
# read the old set of followers, and determine the new set of followers
fol_ids = fol_obj.search(cr, SUPERUSER_ID, [('res_model', '=', self._name), ('res_id', '=', id)])
old = set(fol.partner_id.id for fol in fol_obj.browse(cr, SUPERUSER_ID, fol_ids))
new = set(old)
for command in value or []:
if isinstance(command, (int, long)):
new.add(command)
elif command[0] == 0:
new.add(partner_obj.create(cr, uid, command[2], context=context))
elif command[0] == 1:
partner_obj.write(cr, uid, [command[1]], command[2], context=context)
new.add(command[1])
elif command[0] == 2:
partner_obj.unlink(cr, uid, [command[1]], context=context)
new.discard(command[1])
elif command[0] == 3:
new.discard(command[1])
elif command[0] == 4:
new.add(command[1])
elif command[0] == 5:
new.clear()
elif command[0] == 6:
new = set(command[2])
# remove partners that are no longer followers
self.message_unsubscribe(cr, uid, [id], list(old-new), context=context)
# add new followers
self.message_subscribe(cr, uid, [id], list(new-old), context=context)
def _search_followers(self, cr, uid, obj, name, args, context):
"""Search function for message_follower_ids
Do not use with operator 'not in'. Use instead message_is_followers
"""
fol_obj = self.pool.get('mail.followers')
res = []
for field, operator, value in args:
assert field == name
# TOFIX make it work with not in
assert operator != "not in", "Do not search message_follower_ids with 'not in'"
fol_ids = fol_obj.search(cr, SUPERUSER_ID, [('res_model', '=', self._name), ('partner_id', operator, value)])
res_ids = [fol.res_id for fol in fol_obj.browse(cr, SUPERUSER_ID, fol_ids)]
res.append(('id', 'in', res_ids))
return res
def _search_is_follower(self, cr, uid, obj, name, args, context):
"""Search function for message_is_follower"""
res = []
for field, operator, value in args:
assert field == name
partner_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).partner_id.id
if (operator == '=' and value) or (operator == '!=' and not value): # is a follower
res_ids = self.search(cr, uid, [('message_follower_ids', 'in', [partner_id])], context=context)
else: # is not a follower or unknown domain
mail_ids = self.search(cr, uid, [('message_follower_ids', 'in', [partner_id])], context=context)
res_ids = self.search(cr, uid, [('id', 'not in', mail_ids)], context=context)
res.append(('id', 'in', res_ids))
return res
_columns = {
'message_is_follower': fields.function(_get_followers, type='boolean',
fnct_search=_search_is_follower, string='Is a Follower', multi='_get_followers,'),
'message_follower_ids': fields.function(_get_followers, fnct_inv=_set_followers,
fnct_search=_search_followers, type='many2many', priority=-10,
obj='res.partner', string='Followers', multi='_get_followers'),
'message_ids': fields.one2many('mail.message', 'res_id',
domain=lambda self: [('model', '=', self._name)],
auto_join=True,
string='Messages',
help="Messages and communication history"),
'message_last_post': fields.datetime('Last Message Date',
help='Date of the last message posted on the record.'),
'message_unread': fields.function(_get_message_data,
fnct_search=_search_message_unread, multi="_get_message_data",
type='boolean', string='Unread Messages',
help="If checked new messages require your attention."),
'message_summary': fields.function(_get_message_data, method=True,
type='text', string='Summary', multi="_get_message_data",
help="Holds the Chatter summary (number of messages, ...). "\
"This summary is directly in html format in order to "\
"be inserted in kanban views."),
}
def _get_user_chatter_options(self, cr, uid, context=None):
options = {
'display_log_button': False
}
is_employee = self.pool['res.users'].has_group(cr, uid, 'base.group_user')
if is_employee:
options['display_log_button'] = True
return options
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
res = super(mail_thread, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu)
if view_type == 'form':
doc = etree.XML(res['arch'])
for node in doc.xpath("//field[@name='message_ids']"):
options = json.loads(node.get('options', '{}'))
options.update(self._get_user_chatter_options(cr, uid, context=context))
node.set('options', json.dumps(options))
res['arch'] = etree.tostring(doc)
return res
#------------------------------------------------------
# CRUD overrides for automatic subscription and logging
#------------------------------------------------------
def create(self, cr, uid, values, context=None):
""" Chatter override :
- subscribe uid
- subscribe followers of parent
- log a creation message
"""
if context is None:
context = {}
if context.get('tracking_disable'):
return super(mail_thread, self).create(
cr, uid, values, context=context)
# subscribe uid unless asked not to
if not context.get('mail_create_nosubscribe'):
pid = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid).partner_id.id
message_follower_ids = values.get('message_follower_ids') or [] # webclient can send None or False
message_follower_ids.append([4, pid])
values['message_follower_ids'] = message_follower_ids
thread_id = super(mail_thread, self).create(cr, uid, values, context=context)
# automatic logging unless asked not to (mainly for various testing purpose)
if not context.get('mail_create_nolog'):
ir_model_pool = self.pool['ir.model']
ids = ir_model_pool.search(cr, uid, [('model', '=', self._name)], context=context)
name = ir_model_pool.read(cr, uid, ids, ['name'], context=context)[0]['name']
self.message_post(cr, uid, thread_id, body=_('%s created') % name, context=context)
# track values
track_ctx = dict(context)
if 'lang' not in track_ctx:
track_ctx['lang'] = self.pool.get('res.users').browse(cr, uid, uid, context=context).lang
if not context.get('mail_notrack'):
tracked_fields = self._get_tracked_fields(cr, uid, values.keys(), context=track_ctx)
if tracked_fields:
initial_values = {thread_id: dict.fromkeys(tracked_fields, False)}
self.message_track(cr, uid, [thread_id], tracked_fields, initial_values, context=track_ctx)
# auto_subscribe: take values and defaults into account
create_values = dict(values)
for key, val in context.iteritems():
if key.startswith('default_') and key[8:] not in create_values:
create_values[key[8:]] = val
self.message_auto_subscribe(cr, uid, [thread_id], create_values.keys(), context=context, values=create_values)
return thread_id
def write(self, cr, uid, ids, values, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
if context.get('tracking_disable'):
return super(mail_thread, self).write(
cr, uid, ids, values, context=context)
# Track initial values of tracked fields
track_ctx = dict(context)
if 'lang' not in track_ctx:
track_ctx['lang'] = self.pool.get('res.users').browse(cr, uid, uid, context=context).lang
tracked_fields = None
if not context.get('mail_notrack'):
tracked_fields = self._get_tracked_fields(cr, uid, values.keys(), context=track_ctx)
if tracked_fields:
records = self.browse(cr, uid, ids, context=track_ctx)
initial_values = dict((record.id, dict((key, getattr(record, key)) for key in tracked_fields))
for record in records)
# Perform write
result = super(mail_thread, self).write(cr, uid, ids, values, context=context)
# Perform the tracking
if tracked_fields:
self.message_track(cr, uid, ids, tracked_fields, initial_values, context=track_ctx)
# update followers
self.message_auto_subscribe(cr, uid, ids, values.keys(), context=context, values=values)
return result
def unlink(self, cr, uid, ids, context=None):
""" Override unlink to delete messages and followers. This cannot be
cascaded, because link is done through (res_model, res_id). """
msg_obj = self.pool.get('mail.message')
fol_obj = self.pool.get('mail.followers')
if isinstance(ids, (int, long)):
ids = [ids]
# delete messages and notifications
msg_ids = msg_obj.search(cr, uid, [('model', '=', self._name), ('res_id', 'in', ids)], context=context)
msg_obj.unlink(cr, uid, msg_ids, context=context)
# delete
res = super(mail_thread, self).unlink(cr, uid, ids, context=context)
# delete followers
fol_ids = fol_obj.search(cr, SUPERUSER_ID, [('res_model', '=', self._name), ('res_id', 'in', ids)], context=context)
fol_obj.unlink(cr, SUPERUSER_ID, fol_ids, context=context)
return res
def copy_data(self, cr, uid, id, default=None, context=None):
# avoid tracking multiple temporary changes during copy
context = dict(context or {}, mail_notrack=True)
return super(mail_thread, self).copy_data(cr, uid, id, default=default, context=context)
#------------------------------------------------------
# Automatically log tracked fields
#------------------------------------------------------
def _get_tracked_fields(self, cr, uid, updated_fields, context=None):
""" Return a structure of tracked fields for the current model.
:param list updated_fields: modified field names
:return dict: a dict mapping field name to description, containing
always tracked fields and modified on_change fields
"""
tracked_fields = []
for name, field in self._fields.items():
visibility = getattr(field, 'track_visibility', False)
if visibility == 'always' or (visibility == 'onchange' and name in updated_fields) or name in self._track:
tracked_fields.append(name)
if tracked_fields:
return self.fields_get(cr, uid, tracked_fields, context=context)
return {}
def message_track(self, cr, uid, ids, tracked_fields, initial_values, context=None):
def convert_for_display(value, col_info):
if not value and col_info['type'] == 'boolean':
return 'False'
if not value:
return ''
if col_info['type'] == 'many2one':
return value.name_get()[0][1]
if col_info['type'] == 'selection':
return dict(col_info['selection'])[value]
return value
def format_message(message_description, tracked_values):
message = ''
if message_description:
message = '<span>%s</span>' % message_description
for name, change in tracked_values.items():
message += '<div> • <b>%s</b>: ' % change.get('col_info')
if change.get('old_value'):
message += '%s → ' % change.get('old_value')
message += '%s</div>' % change.get('new_value')
return message
if not tracked_fields:
return True
for browse_record in self.browse(cr, uid, ids, context=context):
initial = initial_values[browse_record.id]
changes = set()
tracked_values = {}
# generate tracked_values data structure: {'col_name': {col_info, new_value, old_value}}
for col_name, col_info in tracked_fields.items():
field = self._fields[col_name]
initial_value = initial[col_name]
record_value = getattr(browse_record, col_name)
if record_value == initial_value and getattr(field, 'track_visibility', None) == 'always':
tracked_values[col_name] = dict(
col_info=col_info['string'],
new_value=convert_for_display(record_value, col_info),
)
elif record_value != initial_value and (record_value or initial_value): # because browse null != False
if getattr(field, 'track_visibility', None) in ['always', 'onchange']:
tracked_values[col_name] = dict(
col_info=col_info['string'],
old_value=convert_for_display(initial_value, col_info),
new_value=convert_for_display(record_value, col_info),
)
if col_name in tracked_fields:
changes.add(col_name)
if not changes:
continue
# find subtypes and post messages or log if no subtype found
subtypes = []
# By passing this key, that allows to let the subtype empty and so don't sent email because partners_to_notify from mail_message._notify will be empty
if not context.get('mail_track_log_only'):
for field, track_info in self._track.items():
if field not in changes:
continue
for subtype, method in track_info.items():
if method(self, cr, uid, browse_record, context):
subtypes.append(subtype)
posted = False
for subtype in subtypes:
subtype_rec = self.pool.get('ir.model.data').xmlid_to_object(cr, uid, subtype, context=context)
if not (subtype_rec and subtype_rec.exists()):
_logger.debug('subtype %s not found' % subtype)
continue
message = format_message(subtype_rec.description if subtype_rec.description else subtype_rec.name, tracked_values)
self.message_post(cr, uid, browse_record.id, body=message, subtype=subtype, context=context)
posted = True
if not posted:
message = format_message('', tracked_values)
self.message_post(cr, uid, browse_record.id, body=message, context=context)
return True
#------------------------------------------------------
# mail.message wrappers and tools
#------------------------------------------------------
def _needaction_domain_get(self, cr, uid, context=None):
if self._needaction:
return [('message_unread', '=', True)]
return []
def _garbage_collect_attachments(self, cr, uid, context=None):
""" Garbage collect lost mail attachments. Those are attachments
- linked to res_model 'mail.compose.message', the composer wizard
- with res_id 0, because they were created outside of an existing
wizard (typically user input through Chatter or reports
created on-the-fly by the templates)
- unused since at least one day (create_date and write_date)
"""
limit_date = datetime.datetime.utcnow() - datetime.timedelta(days=1)
limit_date_str = datetime.datetime.strftime(limit_date, tools.DEFAULT_SERVER_DATETIME_FORMAT)
ir_attachment_obj = self.pool.get('ir.attachment')
attach_ids = ir_attachment_obj.search(cr, uid, [
('res_model', '=', 'mail.compose.message'),
('res_id', '=', 0),
('create_date', '<', limit_date_str),
('write_date', '<', limit_date_str),
], context=context)
ir_attachment_obj.unlink(cr, uid, attach_ids, context=context)
return True
@api.cr_uid_ids_context
def check_mail_message_access(self, cr, uid, mids, operation, model_obj=None, context=None):
""" mail.message check permission rules for related document. This method is
meant to be inherited in order to implement addons-specific behavior.
A common behavior would be to allow creating messages when having read
access rule on the document, for portal document such as issues. """
if not model_obj:
model_obj = self
if hasattr(self, '_mail_post_access'):
create_allow = self._mail_post_access
else:
create_allow = 'write'
if operation in ['write', 'unlink']:
check_operation = 'write'
elif operation == 'create' and create_allow in ['create', 'read', 'write', 'unlink']:
check_operation = create_allow
elif operation == 'create':
check_operation = 'write'
else:
check_operation = operation
model_obj.check_access_rights(cr, uid, check_operation)
model_obj.check_access_rule(cr, uid, mids, check_operation, context=context)
def _get_inbox_action_xml_id(self, cr, uid, context=None):
""" When redirecting towards the Inbox, choose which action xml_id has
to be fetched. This method is meant to be inherited, at least in portal
because portal users have a different Inbox action than classic users. """
return ('mail', 'action_mail_inbox_feeds')
def message_redirect_action(self, cr, uid, context=None):
""" For a given message, return an action that either
- opens the form view of the related document if model, res_id, and
read access to the document
- opens the Inbox with a default search on the conversation if model,
res_id
- opens the Inbox with context propagated
"""
if context is None:
context = {}
# default action is the Inbox action
self.pool.get('res.users').browse(cr, SUPERUSER_ID, uid, context=context)
act_model, act_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, *self._get_inbox_action_xml_id(cr, uid, context=context))
action = self.pool.get(act_model).read(cr, uid, [act_id], [])[0]
params = context.get('params')
msg_id = model = res_id = None
if params:
msg_id = params.get('message_id')
model = params.get('model')
res_id = params.get('res_id', params.get('id')) # signup automatically generated id instead of res_id
if not msg_id and not (model and res_id):
return action
if msg_id and not (model and res_id):
msg = self.pool.get('mail.message').browse(cr, uid, msg_id, context=context)
if msg.exists():
model, res_id = msg.model, msg.res_id
# if model + res_id found: try to redirect to the document or fallback on the Inbox
if model and res_id:
model_obj = self.pool.get(model)
if model_obj.check_access_rights(cr, uid, 'read', raise_exception=False):
try:
model_obj.check_access_rule(cr, uid, [res_id], 'read', context=context)
action = model_obj.get_access_action(cr, uid, res_id, context=context)
except (osv.except_osv, orm.except_orm):
pass
action.update({
'context': {
'search_default_model': model,
'search_default_res_id': res_id,
}
})
return action
def _get_access_link(self, cr, uid, mail, partner, context=None):
# the parameters to encode for the query and fragment part of url
query = {'db': cr.dbname}
fragment = {
'login': partner.user_ids[0].login,
'action': 'mail.action_mail_redirect',
}
if mail.notification:
fragment['message_id'] = mail.mail_message_id.id
elif mail.model and mail.res_id:
fragment.update(model=mail.model, res_id=mail.res_id)
return "/web?%s#%s" % (urlencode(query), urlencode(fragment))
#------------------------------------------------------
# Email specific
#------------------------------------------------------
def message_get_default_recipients(self, cr, uid, ids, context=None):
if context and context.get('thread_model') and context['thread_model'] in self.pool and context['thread_model'] != self._name:
if hasattr(self.pool[context['thread_model']], 'message_get_default_recipients'):
sub_ctx = dict(context)
sub_ctx.pop('thread_model')
return self.pool[context['thread_model']].message_get_default_recipients(cr, uid, ids, context=sub_ctx)
res = {}
for record in self.browse(cr, SUPERUSER_ID, ids, context=context):
recipient_ids, email_to, email_cc = set(), False, False
if 'partner_id' in self._fields and record.partner_id:
recipient_ids.add(record.partner_id.id)
elif 'email_from' in self._fields and record.email_from:
email_to = record.email_from
elif 'email' in self._fields:
email_to = record.email
res[record.id] = {'partner_ids': list(recipient_ids), 'email_to': email_to, 'email_cc': email_cc}
return res
def message_get_reply_to(self, cr, uid, ids, default=None, context=None):
""" Returns the preferred reply-to email address that is basically
the alias of the document, if it exists. """
if context is None:
context = {}
model_name = context.get('thread_model') or self._name
alias_domain = self.pool['ir.config_parameter'].get_param(cr, uid, "mail.catchall.domain", context=context)
res = dict.fromkeys(ids, False)
# alias domain: check for aliases and catchall
aliases = {}
doc_names = {}
if alias_domain:
if model_name and model_name != 'mail.thread':
alias_ids = self.pool['mail.alias'].search(
cr, SUPERUSER_ID, [
('alias_parent_model_id.model', '=', model_name),
('alias_parent_thread_id', 'in', ids),
('alias_name', '!=', False)
], context=context)
aliases.update(
dict((alias.alias_parent_thread_id, '%s@%s' % (alias.alias_name, alias_domain))
for alias in self.pool['mail.alias'].browse(cr, SUPERUSER_ID, alias_ids, context=context)))
doc_names.update(
dict((ng_res[0], ng_res[1])
for ng_res in self.pool[model_name].name_get(cr, SUPERUSER_ID, aliases.keys(), context=context)))
# left ids: use catchall
left_ids = set(ids).difference(set(aliases.keys()))
if left_ids:
catchall_alias = self.pool['ir.config_parameter'].get_param(cr, uid, "mail.catchall.alias", context=context)
if catchall_alias:
aliases.update(dict((res_id, '%s@%s' % (catchall_alias, alias_domain)) for res_id in left_ids))
# compute name of reply-to
company_name = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context).company_id.name
for res_id in aliases.keys():
email_name = '%s%s' % (company_name, doc_names.get(res_id) and (' ' + doc_names[res_id]) or '')
email_addr = aliases[res_id]
res[res_id] = formataddr((email_name, email_addr))
left_ids = set(ids).difference(set(aliases.keys()))
if left_ids and default:
res.update(dict((res_id, default) for res_id in left_ids))
return res
def message_get_email_values(self, cr, uid, id, notif_mail=None, context=None):
""" Get specific notification email values to store on the notification
mail_mail. Void method, inherit it to add custom values. """
res = dict()
return res
#------------------------------------------------------
# Mail gateway
#------------------------------------------------------
def message_capable_models(self, cr, uid, context=None):
""" Used by the plugin addon, based for plugin_outlook and others. """
ret_dict = {}
for model_name in self.pool.obj_list():
model = self.pool[model_name]
if hasattr(model, "message_process") and hasattr(model, "message_post"):
ret_dict[model_name] = model._description
return ret_dict
def _message_find_partners(self, cr, uid, message, header_fields=['From'], context=None):
""" Find partners related to some header fields of the message.
:param string message: an email.message instance """
s = ', '.join([decode(message.get(h)) for h in header_fields if message.get(h)])
return filter(lambda x: x, self._find_partner_from_emails(cr, uid, None, tools.email_split(s), context=context))
def message_route_verify(self, cr, uid, message, message_dict, route, update_author=True, assert_model=True, create_fallback=True, allow_private=False, context=None):
""" Verify route validity. Check and rules:
1 - if thread_id -> check that document effectively exists; otherwise
fallback on a message_new by resetting thread_id
2 - check that message_update exists if thread_id is set; or at least
that message_new exist
[ - find author_id if udpate_author is set]
3 - if there is an alias, check alias_contact:
'followers' and thread_id:
check on target document that the author is in the followers
'followers' and alias_parent_thread_id:
check on alias parent document that the author is in the
followers
'partners': check that author_id id set
"""
assert isinstance(route, (list, tuple)), 'A route should be a list or a tuple'
assert len(route) == 5, 'A route should contain 5 elements: model, thread_id, custom_values, uid, alias record'
message_id = message.get('Message-Id')
email_from = decode_header(message, 'From')
author_id = message_dict.get('author_id')
model, thread_id, alias = route[0], route[1], route[4]
model_pool = None
def _create_bounce_email():
mail_mail = self.pool.get('mail.mail')
mail_id = mail_mail.create(cr, uid, {
'body_html': '<div><p>Hello,</p>'
'<p>The following email sent to %s cannot be accepted because this is '
'a private email address. Only allowed people can contact us at this address.</p></div>'
'<blockquote>%s</blockquote>' % (message.get('to'), message_dict.get('body')),
'subject': 'Re: %s' % message.get('subject'),
'email_to': message.get('from'),
'auto_delete': True,
}, context=context)
mail_mail.send(cr, uid, [mail_id], context=context)
def _warn(message):
_logger.warning('Routing mail with Message-Id %s: route %s: %s',
message_id, route, message)
# Wrong model
if model and not model in self.pool:
if assert_model:
assert model in self.pool, 'Routing: unknown target model %s' % model
_warn('unknown target model %s' % model)
return ()
elif model:
model_pool = self.pool[model]
# Private message: should not contain any thread_id
if not model and thread_id:
if assert_model:
if thread_id:
raise ValueError('Routing: posting a message without model should be with a null res_id (private message), received %s.' % thread_id)
_warn('posting a message without model should be with a null res_id (private message), received %s resetting thread_id' % thread_id)
thread_id = 0
# Private message: should have a parent_id (only answers)
if not model and not message_dict.get('parent_id'):
if assert_model:
if not message_dict.get('parent_id'):
raise ValueError('Routing: posting a message without model should be with a parent_id (private mesage).')
_warn('posting a message without model should be with a parent_id (private mesage), skipping')
return False
# Existing Document: check if exists; if not, fallback on create if allowed
if thread_id and not model_pool.exists(cr, uid, thread_id):
if create_fallback:
_warn('reply to missing document (%s,%s), fall back on new document creation' % (model, thread_id))
thread_id = None
elif assert_model:
assert model_pool.exists(cr, uid, thread_id), 'Routing: reply to missing document (%s,%s)' % (model, thread_id)
else:
_warn('reply to missing document (%s,%s), skipping' % (model, thread_id))
return False
# Existing Document: check model accepts the mailgateway
if thread_id and model and not hasattr(model_pool, 'message_update'):
if create_fallback:
_warn('model %s does not accept document update, fall back on document creation' % model)
thread_id = None
elif assert_model:
assert hasattr(model_pool, 'message_update'), 'Routing: model %s does not accept document update, crashing' % model
else:
_warn('model %s does not accept document update, skipping' % model)
return False
# New Document: check model accepts the mailgateway
if not thread_id and model and not hasattr(model_pool, 'message_new'):
if assert_model:
if not hasattr(model_pool, 'message_new'):
raise ValueError(
'Model %s does not accept document creation, crashing' % model
)
_warn('model %s does not accept document creation, skipping' % model)
return False
# Update message author if asked
# We do it now because we need it for aliases (contact settings)
if not author_id and update_author:
author_ids = self._find_partner_from_emails(cr, uid, thread_id, [email_from], model=model, context=context)
if author_ids:
author_id = author_ids[0]
message_dict['author_id'] = author_id
# Alias: check alias_contact settings
if alias and alias.alias_contact == 'followers' and (thread_id or alias.alias_parent_thread_id):
if thread_id:
obj = self.pool[model].browse(cr, uid, thread_id, context=context)
else:
obj = self.pool[alias.alias_parent_model_id.model].browse(cr, uid, alias.alias_parent_thread_id, context=context)
if not author_id or not author_id in [fol.id for fol in obj.message_follower_ids]:
_warn('alias %s restricted to internal followers, skipping' % alias.alias_name)
_create_bounce_email()
return False
elif alias and alias.alias_contact == 'partners' and not author_id:
_warn('alias %s does not accept unknown author, skipping' % alias.alias_name)
_create_bounce_email()
return False
if not model and not thread_id and not alias and not allow_private:
return ()
return (model, thread_id, route[2], route[3], None if context.get('drop_alias', False) else route[4])
def message_route(self, cr, uid, message, message_dict, model=None, thread_id=None,
custom_values=None, context=None):
"""Attempt to figure out the correct target model, thread_id,
custom_values and user_id to use for an incoming message.
Multiple values may be returned, if a message had multiple
recipients matching existing mail.aliases, for example.
The following heuristics are used, in this order:
1. If the message replies to an existing thread_id, and
properly contains the thread model in the 'In-Reply-To'
header, use this model/thread_id pair, and ignore
custom_value (not needed as no creation will take place)
2. Look for a mail.alias entry matching the message
recipient, and use the corresponding model, thread_id,
custom_values and user_id.
3. Fallback to the ``model``, ``thread_id`` and ``custom_values``
provided.
4. If all the above fails, raise an exception.
:param string message: an email.message instance
:param dict message_dict: dictionary holding message variables
:param string model: the fallback model to use if the message
does not match any of the currently configured mail aliases
(may be None if a matching alias is supposed to be present)
:type dict custom_values: optional dictionary of default field values
to pass to ``message_new`` if a new record needs to be created.
Ignored if the thread record already exists, and also if a
matching mail.alias was found (aliases define their own defaults)
:param int thread_id: optional ID of the record/thread from ``model``
to which this mail should be attached. Only used if the message
does not reply to an existing thread and does not match any mail alias.
:return: list of [model, thread_id, custom_values, user_id, alias]
:raises: ValueError, TypeError
"""
if not isinstance(message, Message):
raise TypeError('message must be an email.message.Message at this point')
mail_msg_obj = self.pool['mail.message']
mail_alias = self.pool.get('mail.alias')
fallback_model = model
# Get email.message.Message variables for future processing
message_id = message.get('Message-Id')
email_from = decode_header(message, 'From')
email_to = decode_header(message, 'To')
references = decode_header(message, 'References')
in_reply_to = decode_header(message, 'In-Reply-To').strip()
thread_references = references or in_reply_to
# 0. First check if this is a bounce message or not.
# See http://datatracker.ietf.org/doc/rfc3462/?include_text=1
# As all MTA does not respect this RFC (googlemail is one of them),
# we also need to verify if the message come from "mailer-daemon"
localpart = (tools.email_split(email_from) or [''])[0].split('@', 1)[0].lower()
if message.get_content_type() == 'multipart/report' or localpart == 'mailer-daemon':
_logger.info("Not routing bounce email from %s to %s with Message-Id %s",
email_from, email_to, message_id)
return []
# 1. message is a reply to an existing message (exact match of message_id)
ref_match = thread_references and tools.reference_re.search(thread_references)
msg_references = mail_header_msgid_re.findall(thread_references)
mail_message_ids = mail_msg_obj.search(cr, uid, [('message_id', 'in', msg_references)], context=context)
if ref_match and mail_message_ids:
original_msg = mail_msg_obj.browse(cr, SUPERUSER_ID, mail_message_ids[0], context=context)
model, thread_id = original_msg.model, original_msg.res_id
alias_ids = mail_alias.search(cr, uid, [('alias_name', '=', (tools.email_split(email_to) or [''])[0].split('@', 1)[0].lower())])
alias = None
if alias_ids:
alias = mail_alias.browse(cr, uid, [alias_ids[0]], context=context)
route = self.message_route_verify(
cr, uid, message, message_dict,
(model, thread_id, custom_values, uid, alias),
update_author=True, assert_model=False, create_fallback=True, context=dict(context, drop_alias=True))
if route:
_logger.info(
'Routing mail from %s to %s with Message-Id %s: direct reply to msg: model: %s, thread_id: %s, custom_values: %s, uid: %s',
email_from, email_to, message_id, model, thread_id, custom_values, uid)
return [route]
elif route is False:
return []
# 2. message is a reply to an existign thread (6.1 compatibility)
if ref_match:
reply_thread_id = int(ref_match.group(1))
reply_model = ref_match.group(2) or fallback_model
reply_hostname = ref_match.group(3)
local_hostname = socket.gethostname()
# do not match forwarded emails from another OpenERP system (thread_id collision!)
if local_hostname == reply_hostname:
thread_id, model = reply_thread_id, reply_model
if thread_id and model in self.pool:
model_obj = self.pool[model]
compat_mail_msg_ids = mail_msg_obj.search(
cr, uid, [
('message_id', '=', False),
('model', '=', model),
('res_id', '=', thread_id),
], context=context)
if compat_mail_msg_ids and model_obj.exists(cr, uid, thread_id) and hasattr(model_obj, 'message_update'):
route = self.message_route_verify(
cr, uid, message, message_dict,
(model, thread_id, custom_values, uid, None),
update_author=True, assert_model=True, create_fallback=True, context=context)
if route:
# parent is invalid for a compat-reply
message_dict.pop('parent_id', None)
_logger.info(
'Routing mail from %s to %s with Message-Id %s: direct thread reply (compat-mode) to model: %s, thread_id: %s, custom_values: %s, uid: %s',
email_from, email_to, message_id, model, thread_id, custom_values, uid)
return [route]
elif route is False:
return []
# 3. Reply to a private message
if in_reply_to:
mail_message_ids = mail_msg_obj.search(cr, uid, [
('message_id', '=', in_reply_to),
'!', ('message_id', 'ilike', 'reply_to')
], limit=1, context=context)
if mail_message_ids:
mail_message = mail_msg_obj.browse(cr, uid, mail_message_ids[0], context=context)
route = self.message_route_verify(cr, uid, message, message_dict,
(mail_message.model, mail_message.res_id, custom_values, uid, None),
update_author=True, assert_model=True, create_fallback=True, allow_private=True, context=context)
if route:
_logger.info(
'Routing mail from %s to %s with Message-Id %s: direct reply to a private message: %s, custom_values: %s, uid: %s',
email_from, email_to, message_id, mail_message.id, custom_values, uid)
return [route]
elif route is False:
return []
# no route found for a matching reference (or reply), so parent is invalid
message_dict.pop('parent_id', None)
# 4. Look for a matching mail.alias entry
# Delivered-To is a safe bet in most modern MTAs, but we have to fallback on To + Cc values
# for all the odd MTAs out there, as there is no standard header for the envelope's `rcpt_to` value.
rcpt_tos = \
','.join([decode_header(message, 'Delivered-To'),
decode_header(message, 'To'),
decode_header(message, 'Cc'),
decode_header(message, 'Resent-To'),
decode_header(message, 'Resent-Cc')])
local_parts = [e.split('@')[0] for e in tools.email_split(rcpt_tos)]
if local_parts:
alias_ids = mail_alias.search(cr, uid, [('alias_name', 'in', local_parts)])
if alias_ids:
routes = []
for alias in mail_alias.browse(cr, uid, alias_ids, context=context):
user_id = alias.alias_user_id.id
if not user_id:
# TDE note: this could cause crashes, because no clue that the user
# that send the email has the right to create or modify a new document
# Fallback on user_id = uid
# Note: recognized partners will be added as followers anyway
# user_id = self._message_find_user_id(cr, uid, message, context=context)
user_id = uid
_logger.info('No matching user_id for the alias %s', alias.alias_name)
route = (alias.alias_model_id.model, alias.alias_force_thread_id, eval(alias.alias_defaults), user_id, alias)
route = self.message_route_verify(cr, uid, message, message_dict, route,
update_author=True, assert_model=True, create_fallback=True, context=context)
if route:
_logger.info(
'Routing mail from %s to %s with Message-Id %s: direct alias match: %r',
email_from, email_to, message_id, route)
routes.append(route)
return routes
# 5. Fallback to the provided parameters, if they work
if not thread_id:
# Legacy: fallback to matching [ID] in the Subject
match = tools.res_re.search(decode_header(message, 'Subject'))
thread_id = match and match.group(1)
# Convert into int (bug spotted in 7.0 because of str)
try:
thread_id = int(thread_id)
except:
thread_id = False
route = self.message_route_verify(cr, uid, message, message_dict,
(fallback_model, thread_id, custom_values, uid, None),
update_author=True, assert_model=True, context=context)
if route:
_logger.info(
'Routing mail from %s to %s with Message-Id %s: fallback to model:%s, thread_id:%s, custom_values:%s, uid:%s',
email_from, email_to, message_id, fallback_model, thread_id, custom_values, uid)
return [route]
# ValueError if no routes found and if no bounce occured
raise ValueError(
'No possible route found for incoming message from %s to %s (Message-Id %s:). '
'Create an appropriate mail.alias or force the destination model.' %
(email_from, email_to, message_id)
)
def message_route_process(self, cr, uid, message, message_dict, routes, context=None):
# postpone setting message_dict.partner_ids after message_post, to avoid double notifications
context = dict(context or {})
partner_ids = message_dict.pop('partner_ids', [])
thread_id = False
for model, thread_id, custom_values, user_id, alias in routes or ():
if self._name == 'mail.thread':
context['thread_model'] = model
if model:
model_pool = self.pool[model]
if not (thread_id and hasattr(model_pool, 'message_update') or hasattr(model_pool, 'message_new')):
raise ValueError(
"Undeliverable mail with Message-Id %s, model %s does not accept incoming emails" %
(message_dict['message_id'], model)
)
# disabled subscriptions during message_new/update to avoid having the system user running the
# email gateway become a follower of all inbound messages
nosub_ctx = dict(context, mail_create_nosubscribe=True, mail_create_nolog=True)
if thread_id and hasattr(model_pool, 'message_update'):
model_pool.message_update(cr, user_id, [thread_id], message_dict, context=nosub_ctx)
else:
# if a new thread is created, parent is irrelevant
message_dict.pop('parent_id', None)
thread_id = model_pool.message_new(cr, user_id, message_dict, custom_values, context=nosub_ctx)
else:
if thread_id:
raise ValueError("Posting a message without model should be with a null res_id, to create a private message.")
model_pool = self.pool.get('mail.thread')
if not hasattr(model_pool, 'message_post'):
context['thread_model'] = model
model_pool = self.pool['mail.thread']
new_msg_id = model_pool.message_post(cr, uid, [thread_id], context=context, subtype='mail.mt_comment', **message_dict)
if partner_ids:
# postponed after message_post, because this is an external message and we don't want to create
# duplicate emails due to notifications
self.pool.get('mail.message').write(cr, uid, [new_msg_id], {'partner_ids': partner_ids}, context=context)
return thread_id
def message_process(self, cr, uid, model, message, custom_values=None,
save_original=False, strip_attachments=False,
thread_id=None, context=None):
""" Process an incoming RFC2822 email message, relying on
``mail.message.parse()`` for the parsing operation,
and ``message_route()`` to figure out the target model.
Once the target model is known, its ``message_new`` method
is called with the new message (if the thread record did not exist)
or its ``message_update`` method (if it did).
There is a special case where the target model is False: a reply
to a private message. In this case, we skip the message_new /
message_update step, to just post a new message using mail_thread
message_post.
:param string model: the fallback model to use if the message
does not match any of the currently configured mail aliases
(may be None if a matching alias is supposed to be present)
:param message: source of the RFC2822 message
:type message: string or xmlrpclib.Binary
:type dict custom_values: optional dictionary of field values
to pass to ``message_new`` if a new record needs to be created.
Ignored if the thread record already exists, and also if a
matching mail.alias was found (aliases define their own defaults)
:param bool save_original: whether to keep a copy of the original
email source attached to the message after it is imported.
:param bool strip_attachments: whether to strip all attachments
before processing the message, in order to save some space.
:param int thread_id: optional ID of the record/thread from ``model``
to which this mail should be attached. When provided, this
overrides the automatic detection based on the message
headers.
"""
if context is None:
context = {}
# extract message bytes - we are forced to pass the message as binary because
# we don't know its encoding until we parse its headers and hence can't
# convert it to utf-8 for transport between the mailgate script and here.
if isinstance(message, xmlrpclib.Binary):
message = str(message.data)
# Warning: message_from_string doesn't always work correctly on unicode,
# we must use utf-8 strings here :-(
if isinstance(message, unicode):
message = message.encode('utf-8')
msg_txt = email.message_from_string(message)
# parse the message, verify we are not in a loop by checking message_id is not duplicated
msg = self.message_parse(cr, uid, msg_txt, save_original=save_original, context=context)
if strip_attachments:
msg.pop('attachments', None)
if msg.get('message_id'): # should always be True as message_parse generate one if missing
existing_msg_ids = self.pool.get('mail.message').search(cr, SUPERUSER_ID, [
('message_id', '=', msg.get('message_id')),
], context=context)
if existing_msg_ids:
_logger.info('Ignored mail from %s to %s with Message-Id %s: found duplicated Message-Id during processing',
msg.get('from'), msg.get('to'), msg.get('message_id'))
return False
# find possible routes for the message
routes = self.message_route(cr, uid, msg_txt, msg, model, thread_id, custom_values, context=context)
thread_id = self.message_route_process(cr, uid, msg_txt, msg, routes, context=context)
return thread_id
def message_new(self, cr, uid, msg_dict, custom_values=None, context=None):
"""Called by ``message_process`` when a new message is received
for a given thread model, if the message did not belong to
an existing thread.
The default behavior is to create a new record of the corresponding
model (based on some very basic info extracted from the message).
Additional behavior may be implemented by overriding this method.
:param dict msg_dict: a map containing the email details and
attachments. See ``message_process`` and
``mail.message.parse`` for details.
:param dict custom_values: optional dictionary of additional
field values to pass to create()
when creating the new thread record.
Be careful, these values may override
any other values coming from the message.
:param dict context: if a ``thread_model`` value is present
in the context, its value will be used
to determine the model of the record
to create (instead of the current model).
:rtype: int
:return: the id of the newly created thread object
"""
if context is None:
context = {}
data = {}
if isinstance(custom_values, dict):
data = custom_values.copy()
model = context.get('thread_model') or self._name
model_pool = self.pool[model]
fields = model_pool.fields_get(cr, uid, context=context)
if 'name' in fields and not data.get('name'):
data['name'] = msg_dict.get('subject', '')
res_id = model_pool.create(cr, uid, data, context=context)
return res_id
def message_update(self, cr, uid, ids, msg_dict, update_vals=None, context=None):
"""Called by ``message_process`` when a new message is received
for an existing thread. The default behavior is to update the record
with update_vals taken from the incoming email.
Additional behavior may be implemented by overriding this
method.
:param dict msg_dict: a map containing the email details and
attachments. See ``message_process`` and
``mail.message.parse()`` for details.
:param dict update_vals: a dict containing values to update records
given their ids; if the dict is None or is
void, no write operation is performed.
"""
if update_vals:
self.write(cr, uid, ids, update_vals, context=context)
return True
def _message_extract_payload(self, message, save_original=False):
"""Extract body as HTML and attachments from the mail message"""
attachments = []
body = u''
if save_original:
attachments.append(('original_email.eml', message.as_string()))
# Be careful, content-type may contain tricky content like in the
# following example so test the MIME type with startswith()
#
# Content-Type: multipart/related;
# boundary="_004_3f1e4da175f349248b8d43cdeb9866f1AMSPR06MB343eurprd06pro_";
# type="text/html"
if not message.is_multipart() or message.get('content-type', '').startswith("text/"):
encoding = message.get_content_charset()
body = message.get_payload(decode=True)
body = tools.ustr(body, encoding, errors='replace')
if message.get_content_type() == 'text/plain':
# text/plain -> <pre/>
body = tools.append_content_to_html(u'', body, preserve=True)
else:
alternative = False
mixed = False
html = u''
for part in message.walk():
if part.get_content_type() == 'multipart/alternative':
alternative = True
if part.get_content_type() == 'multipart/mixed':
mixed = True
if part.get_content_maintype() == 'multipart':
continue # skip container
# part.get_filename returns decoded value if able to decode, coded otherwise.
# original get_filename is not able to decode iso-8859-1 (for instance).
# therefore, iso encoded attachements are not able to be decoded properly with get_filename
# code here partially copy the original get_filename method, but handle more encoding
filename=part.get_param('filename', None, 'content-disposition')
if not filename:
filename=part.get_param('name', None)
if filename:
if isinstance(filename, tuple):
# RFC2231
filename=email.utils.collapse_rfc2231_value(filename).strip()
else:
filename=decode(filename)
encoding = part.get_content_charset() # None if attachment
# 1) Explicit Attachments -> attachments
if filename or part.get('content-disposition', '').strip().startswith('attachment'):
attachments.append((filename or 'attachment', part.get_payload(decode=True)))
continue
# 2) text/plain -> <pre/>
if part.get_content_type() == 'text/plain' and (not alternative or not body):
body = tools.append_content_to_html(body, tools.ustr(part.get_payload(decode=True),
encoding, errors='replace'), preserve=True)
# 3) text/html -> raw
elif part.get_content_type() == 'text/html':
# mutlipart/alternative have one text and a html part, keep only the second
# mixed allows several html parts, append html content
append_content = not alternative or (html and mixed)
html = tools.ustr(part.get_payload(decode=True), encoding, errors='replace')
if not append_content:
body = html
else:
body = tools.append_content_to_html(body, html, plaintext=False)
# 4) Anything else -> attachment
else:
attachments.append((filename or 'attachment', part.get_payload(decode=True)))
return body, attachments
def message_parse(self, cr, uid, message, save_original=False, context=None):
"""Parses a string or email.message.Message representing an
RFC-2822 email, and returns a generic dict holding the
message details.
:param message: the message to parse
:type message: email.message.Message | string | unicode
:param bool save_original: whether the returned dict
should include an ``original`` attachment containing
the source of the message
:rtype: dict
:return: A dict with the following structure, where each
field may not be present if missing in original
message::
{ 'message_id': msg_id,
'subject': subject,
'from': from,
'to': to,
'cc': cc,
'body': unified_body,
'attachments': [('file1', 'bytes'),
('file2', 'bytes')}
}
"""
msg_dict = {
'type': 'email',
}
if not isinstance(message, Message):
if isinstance(message, unicode):
# Warning: message_from_string doesn't always work correctly on unicode,
# we must use utf-8 strings here :-(
message = message.encode('utf-8')
message = email.message_from_string(message)
message_id = message['message-id']
if not message_id:
# Very unusual situation, be we should be fault-tolerant here
message_id = "<%s@localhost>" % time.time()
_logger.debug('Parsing Message without message-id, generating a random one: %s', message_id)
msg_dict['message_id'] = message_id
if message.get('Subject'):
msg_dict['subject'] = decode(message.get('Subject'))
# Envelope fields not stored in mail.message but made available for message_new()
msg_dict['from'] = decode(message.get('from'))
msg_dict['to'] = decode(message.get('to'))
msg_dict['cc'] = decode(message.get('cc'))
msg_dict['email_from'] = decode(message.get('from'))
partner_ids = self._message_find_partners(cr, uid, message, ['To', 'Cc'], context=context)
msg_dict['partner_ids'] = [(4, partner_id) for partner_id in partner_ids]
if message.get('Date'):
try:
date_hdr = decode(message.get('Date'))
parsed_date = dateutil.parser.parse(date_hdr, fuzzy=True)
if parsed_date.utcoffset() is None:
# naive datetime, so we arbitrarily decide to make it
# UTC, there's no better choice. Should not happen,
# as RFC2822 requires timezone offset in Date headers.
stored_date = parsed_date.replace(tzinfo=pytz.utc)
else:
stored_date = parsed_date.astimezone(tz=pytz.utc)
except Exception:
_logger.warning('Failed to parse Date header %r in incoming mail '
'with message-id %r, assuming current date/time.',
message.get('Date'), message_id)
stored_date = datetime.datetime.now()
msg_dict['date'] = stored_date.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)
if message.get('In-Reply-To'):
parent_ids = self.pool.get('mail.message').search(cr, uid, [('message_id', '=', decode(message['In-Reply-To'].strip()))])
if parent_ids:
msg_dict['parent_id'] = parent_ids[0]
if message.get('References') and 'parent_id' not in msg_dict:
msg_list = mail_header_msgid_re.findall(decode(message['References']))
parent_ids = self.pool.get('mail.message').search(cr, uid, [('message_id', 'in', [x.strip() for x in msg_list])])
if parent_ids:
msg_dict['parent_id'] = parent_ids[0]
msg_dict['body'], msg_dict['attachments'] = self._message_extract_payload(message, save_original=save_original)
return msg_dict
#------------------------------------------------------
# Note specific
#------------------------------------------------------
def _message_add_suggested_recipient(self, cr, uid, result, obj, partner=None, email=None, reason='', context=None):
""" Called by message_get_suggested_recipients, to add a suggested
recipient in the result dictionary. The form is :
partner_id, partner_name<partner_email> or partner_name, reason """
if email and not partner:
# get partner info from email
partner_info = self.message_partner_info_from_emails(cr, uid, obj.id, [email], context=context)[0]
if partner_info.get('partner_id'):
partner = self.pool.get('res.partner').browse(cr, SUPERUSER_ID, [partner_info['partner_id']], context=context)[0]
if email and email in [val[1] for val in result[obj.id]]: # already existing email -> skip
return result
if partner and partner in obj.message_follower_ids: # recipient already in the followers -> skip
return result
if partner and partner.id in [val[0] for val in result[obj.id]]: # already existing partner ID -> skip
return result
if partner and partner.email: # complete profile: id, name <email>
result[obj.id].append((partner.id, '%s<%s>' % (partner.name, partner.email), reason))
elif partner: # incomplete profile: id, name
result[obj.id].append((partner.id, '%s' % (partner.name), reason))
else: # unknown partner, we are probably managing an email address
result[obj.id].append((False, email, reason))
return result
def message_get_suggested_recipients(self, cr, uid, ids, context=None):
""" Returns suggested recipients for ids. Those are a list of
tuple (partner_id, partner_name, reason), to be managed by Chatter. """
result = dict((res_id, []) for res_id in ids)
if 'user_id' in self._fields:
for obj in self.browse(cr, SUPERUSER_ID, ids, context=context): # SUPERUSER because of a read on res.users that would crash otherwise
if not obj.user_id or not obj.user_id.partner_id:
continue
self._message_add_suggested_recipient(cr, uid, result, obj, partner=obj.user_id.partner_id, reason=self._fields['user_id'].string, context=context)
return result
def _find_partner_from_emails(self, cr, uid, id, emails, model=None, context=None, check_followers=True):
""" Utility method to find partners from email addresses. The rules are :
1 - check in document (model | self, id) followers
2 - try to find a matching partner that is also an user
3 - try to find a matching partner
:param list emails: list of email addresses
:param string model: model to fetch related record; by default self
is used.
:param boolean check_followers: check in document followers
"""
partner_obj = self.pool['res.partner']
partner_ids = []
obj = None
if id and (model or self._name != 'mail.thread') and check_followers:
if model:
obj = self.pool[model].browse(cr, uid, id, context=context)
else:
obj = self.browse(cr, uid, id, context=context)
for contact in emails:
partner_id = False
email_address = tools.email_split(contact)
if not email_address:
partner_ids.append(partner_id)
continue
email_address = email_address[0]
# first try: check in document's followers
if obj:
for follower in obj.message_follower_ids:
if follower.email == email_address:
partner_id = follower.id
# second try: check in partners that are also users
# Escape special SQL characters in email_address to avoid invalid matches
email_address = (email_address.replace('\\', '\\\\').replace('%', '\\%').replace('_', '\\_'))
email_brackets = "<%s>" % email_address
if not partner_id:
# exact, case-insensitive match
ids = partner_obj.search(cr, SUPERUSER_ID,
[('email', '=ilike', email_address),
('user_ids', '!=', False)],
limit=1, context=context)
if not ids:
# if no match with addr-spec, attempt substring match within name-addr pair
ids = partner_obj.search(cr, SUPERUSER_ID,
[('email', 'ilike', email_brackets),
('user_ids', '!=', False)],
limit=1, context=context)
if ids:
partner_id = ids[0]
# third try: check in partners
if not partner_id:
# exact, case-insensitive match
ids = partner_obj.search(cr, SUPERUSER_ID,
[('email', '=ilike', email_address)],
limit=1, context=context)
if not ids:
# if no match with addr-spec, attempt substring match within name-addr pair
ids = partner_obj.search(cr, SUPERUSER_ID,
[('email', 'ilike', email_brackets)],
limit=1, context=context)
if ids:
partner_id = ids[0]
partner_ids.append(partner_id)
return partner_ids
def message_partner_info_from_emails(self, cr, uid, id, emails, link_mail=False, context=None):
""" Convert a list of emails into a list partner_ids and a list
new_partner_ids. The return value is non conventional because
it is meant to be used by the mail widget.
:return dict: partner_ids and new_partner_ids """
mail_message_obj = self.pool.get('mail.message')
partner_ids = self._find_partner_from_emails(cr, uid, id, emails, context=context)
result = list()
for idx in range(len(emails)):
email_address = emails[idx]
partner_id = partner_ids[idx]
partner_info = {'full_name': email_address, 'partner_id': partner_id}
result.append(partner_info)
# link mail with this from mail to the new partner id
if link_mail and partner_info['partner_id']:
# Escape special SQL characters in email_address to avoid invalid matches
email_address = (email_address.replace('\\', '\\\\').replace('%', '\\%').replace('_', '\\_'))
email_brackets = "<%s>" % email_address
message_ids = mail_message_obj.search(cr, SUPERUSER_ID, [
'|',
('email_from', '=ilike', email_address),
('email_from', 'ilike', email_brackets),
('author_id', '=', False)
], context=context)
if message_ids:
mail_message_obj.write(cr, SUPERUSER_ID, message_ids, {'author_id': partner_info['partner_id']}, context=context)
return result
def _message_preprocess_attachments(self, cr, uid, attachments, attachment_ids, attach_model, attach_res_id, context=None):
""" Preprocess attachments for mail_thread.message_post() or mail_mail.create().
:param list attachments: list of attachment tuples in the form ``(name,content)``,
where content is NOT base64 encoded
:param list attachment_ids: a list of attachment ids, not in tomany command form
:param str attach_model: the model of the attachments parent record
:param integer attach_res_id: the id of the attachments parent record
"""
Attachment = self.pool['ir.attachment']
m2m_attachment_ids = []
if attachment_ids:
filtered_attachment_ids = Attachment.search(cr, SUPERUSER_ID, [
('res_model', '=', 'mail.compose.message'),
('create_uid', '=', uid),
('id', 'in', attachment_ids)], context=context)
if filtered_attachment_ids:
Attachment.write(cr, SUPERUSER_ID, filtered_attachment_ids, {'res_model': attach_model, 'res_id': attach_res_id}, context=context)
m2m_attachment_ids += [(4, id) for id in attachment_ids]
# Handle attachments parameter, that is a dictionary of attachments
for name, content in attachments:
if isinstance(content, unicode):
content = content.encode('utf-8')
data_attach = {
'name': name,
'datas': base64.b64encode(str(content)),
'datas_fname': name,
'description': name,
'res_model': attach_model,
'res_id': attach_res_id,
}
m2m_attachment_ids.append((0, 0, data_attach))
return m2m_attachment_ids
@api.cr_uid_ids_context
def message_post(self, cr, uid, thread_id, body='', subject=None, type='notification',
subtype=None, parent_id=False, attachments=None, context=None,
content_subtype='html', **kwargs):
""" Post a new message in an existing thread, returning the new
mail.message ID.
:param int thread_id: thread ID to post into, or list with one ID;
if False/0, mail.message model will also be set as False
:param str body: body of the message, usually raw HTML that will
be sanitized
:param str type: see mail_message.type field
:param str content_subtype:: if plaintext: convert body into html
:param int parent_id: handle reply to a previous message by adding the
parent partners to the message in case of private discussion
:param tuple(str,str) attachments or list id: list of attachment tuples in the form
``(name,content)``, where content is NOT base64 encoded
Extra keyword arguments will be used as default column values for the
new mail.message record. Special cases:
- attachment_ids: supposed not attached to any document; attach them
to the related document. Should only be set by Chatter.
:return int: ID of newly created mail.message
"""
if context is None:
context = {}
if attachments is None:
attachments = {}
mail_message = self.pool.get('mail.message')
ir_attachment = self.pool.get('ir.attachment')
assert (not thread_id) or \
isinstance(thread_id, (int, long)) or \
(isinstance(thread_id, (list, tuple)) and len(thread_id) == 1), \
"Invalid thread_id; should be 0, False, an ID or a list with one ID"
if isinstance(thread_id, (list, tuple)):
thread_id = thread_id[0]
# if we're processing a message directly coming from the gateway, the destination model was
# set in the context.
model = False
if thread_id:
model = context.get('thread_model', False) if self._name == 'mail.thread' else self._name
if model and model != self._name and hasattr(self.pool[model], 'message_post'):
del context['thread_model']
return self.pool[model].message_post(cr, uid, thread_id, body=body, subject=subject, type=type, subtype=subtype, parent_id=parent_id, attachments=attachments, context=context, content_subtype=content_subtype, **kwargs)
#0: Find the message's author, because we need it for private discussion
author_id = kwargs.get('author_id')
if author_id is None: # keep False values
author_id = self.pool.get('mail.message')._get_default_author(cr, uid, context=context)
# 1: Handle content subtype: if plaintext, converto into HTML
if content_subtype == 'plaintext':
body = tools.plaintext2html(body)
# 2: Private message: add recipients (recipients and author of parent message) - current author
# + legacy-code management (! we manage only 4 and 6 commands)
partner_ids = set()
kwargs_partner_ids = kwargs.pop('partner_ids', [])
for partner_id in kwargs_partner_ids:
if isinstance(partner_id, (list, tuple)) and partner_id[0] == 4 and len(partner_id) == 2:
partner_ids.add(partner_id[1])
if isinstance(partner_id, (list, tuple)) and partner_id[0] == 6 and len(partner_id) == 3:
partner_ids |= set(partner_id[2])
elif isinstance(partner_id, (int, long)):
partner_ids.add(partner_id)
else:
pass # we do not manage anything else
if parent_id and not model:
parent_message = mail_message.browse(cr, uid, parent_id, context=context)
private_followers = set([partner.id for partner in parent_message.partner_ids])
if parent_message.author_id:
private_followers.add(parent_message.author_id.id)
private_followers -= set([author_id])
partner_ids |= private_followers
# 3. Attachments
# - HACK TDE FIXME: Chatter: attachments linked to the document (not done JS-side), load the message
attachment_ids = self._message_preprocess_attachments(cr, uid, attachments, kwargs.pop('attachment_ids', []), model, thread_id, context)
# 4: mail.message.subtype
subtype_id = False
if subtype:
if '.' not in subtype:
subtype = 'mail.%s' % subtype
subtype_id = self.pool.get('ir.model.data').xmlid_to_res_id(cr, uid, subtype)
# automatically subscribe recipients if asked to
if context.get('mail_post_autofollow') and thread_id and partner_ids:
partner_to_subscribe = partner_ids
if context.get('mail_post_autofollow_partner_ids'):
partner_to_subscribe = filter(lambda item: item in context.get('mail_post_autofollow_partner_ids'), partner_ids)
self.message_subscribe(cr, uid, [thread_id], list(partner_to_subscribe), context=context)
# _mail_flat_thread: automatically set free messages to the first posted message
if self._mail_flat_thread and model and not parent_id and thread_id:
message_ids = mail_message.search(cr, uid, ['&', ('res_id', '=', thread_id), ('model', '=', model), ('type', '=', 'email')], context=context, order="id ASC", limit=1)
if not message_ids:
message_ids = message_ids = mail_message.search(cr, uid, ['&', ('res_id', '=', thread_id), ('model', '=', model)], context=context, order="id ASC", limit=1)
parent_id = message_ids and message_ids[0] or False
# we want to set a parent: force to set the parent_id to the oldest ancestor, to avoid having more than 1 level of thread
elif parent_id:
message_ids = mail_message.search(cr, SUPERUSER_ID, [('id', '=', parent_id), ('parent_id', '!=', False)], context=context)
# avoid loops when finding ancestors
processed_list = []
if message_ids:
message = mail_message.browse(cr, SUPERUSER_ID, message_ids[0], context=context)
while (message.parent_id and message.parent_id.id not in processed_list):
processed_list.append(message.parent_id.id)
message = message.parent_id
parent_id = message.id
values = kwargs
values.update({
'author_id': author_id,
'model': model,
'res_id': model and thread_id or False,
'body': body,
'subject': subject or False,
'type': type,
'parent_id': parent_id,
'attachment_ids': attachment_ids,
'subtype_id': subtype_id,
'partner_ids': [(4, pid) for pid in partner_ids],
})
# Avoid warnings about non-existing fields
for x in ('from', 'to', 'cc'):
values.pop(x, None)
# Post the message
msg_id = mail_message.create(cr, uid, values, context=context)
# Post-process: subscribe author, update message_last_post
if model and model != 'mail.thread' and thread_id and subtype_id:
# done with SUPERUSER_ID, because on some models users can post only with read access, not necessarily write access
self.write(cr, SUPERUSER_ID, [thread_id], {'message_last_post': fields.datetime.now()}, context=context)
message = mail_message.browse(cr, uid, msg_id, context=context)
if message.author_id and model and thread_id and type != 'notification' and not context.get('mail_create_nosubscribe'):
self.message_subscribe(cr, uid, [thread_id], [message.author_id.id], context=context)
return msg_id
#------------------------------------------------------
# Followers API
#------------------------------------------------------
def message_get_subscription_data(self, cr, uid, ids, user_pid=None, context=None):
""" Wrapper to get subtypes data. """
return self._get_subscription_data(cr, uid, ids, None, None, user_pid=user_pid, context=context)
def message_subscribe_users(self, cr, uid, ids, user_ids=None, subtype_ids=None, context=None):
""" Wrapper on message_subscribe, using users. If user_ids is not
provided, subscribe uid instead. """
if user_ids is None:
user_ids = [uid]
partner_ids = [user.partner_id.id for user in self.pool.get('res.users').browse(cr, uid, user_ids, context=context)]
result = self.message_subscribe(cr, uid, ids, partner_ids, subtype_ids=subtype_ids, context=context)
if partner_ids and result:
self.pool['ir.ui.menu'].clear_cache()
return result
def message_subscribe(self, cr, uid, ids, partner_ids, subtype_ids=None, context=None):
""" Add partners to the records followers. """
if context is None:
context = {}
# not necessary for computation, but saves an access right check
if not partner_ids:
return True
mail_followers_obj = self.pool.get('mail.followers')
subtype_obj = self.pool.get('mail.message.subtype')
user_pid = self.pool.get('res.users').browse(cr, uid, uid, context=context).partner_id.id
if set(partner_ids) == set([user_pid]):
try:
self.check_access_rights(cr, uid, 'read')
self.check_access_rule(cr, uid, ids, 'read')
except (osv.except_osv, orm.except_orm):
return False
else:
self.check_access_rights(cr, uid, 'write')
self.check_access_rule(cr, uid, ids, 'write')
existing_pids_dict = {}
fol_ids = mail_followers_obj.search(cr, SUPERUSER_ID, ['&', '&', ('res_model', '=', self._name), ('res_id', 'in', ids), ('partner_id', 'in', partner_ids)])
for fol in mail_followers_obj.browse(cr, SUPERUSER_ID, fol_ids, context=context):
existing_pids_dict.setdefault(fol.res_id, set()).add(fol.partner_id.id)
# subtype_ids specified: update already subscribed partners
if subtype_ids and fol_ids:
mail_followers_obj.write(cr, SUPERUSER_ID, fol_ids, {'subtype_ids': [(6, 0, subtype_ids)]}, context=context)
# subtype_ids not specified: do not update already subscribed partner, fetch default subtypes for new partners
if subtype_ids is None:
subtype_ids = subtype_obj.search(
cr, uid, [
('default', '=', True), '|', ('res_model', '=', self._name), ('res_model', '=', False)], context=context)
for id in ids:
existing_pids = existing_pids_dict.get(id, set())
new_pids = set(partner_ids) - existing_pids
# subscribe new followers
for new_pid in new_pids:
mail_followers_obj.create(
cr, SUPERUSER_ID, {
'res_model': self._name,
'res_id': id,
'partner_id': new_pid,
'subtype_ids': [(6, 0, subtype_ids)],
}, context=context)
return True
def message_unsubscribe_users(self, cr, uid, ids, user_ids=None, context=None):
""" Wrapper on message_subscribe, using users. If user_ids is not
provided, unsubscribe uid instead. """
if user_ids is None:
user_ids = [uid]
partner_ids = [user.partner_id.id for user in self.pool.get('res.users').browse(cr, uid, user_ids, context=context)]
result = self.message_unsubscribe(cr, uid, ids, partner_ids, context=context)
if partner_ids and result:
self.pool['ir.ui.menu'].clear_cache()
return result
def message_unsubscribe(self, cr, uid, ids, partner_ids, context=None):
""" Remove partners from the records followers. """
# not necessary for computation, but saves an access right check
if not partner_ids:
return True
user_pid = self.pool.get('res.users').read(cr, uid, uid, ['partner_id'], context=context)['partner_id'][0]
if set(partner_ids) == set([user_pid]):
self.check_access_rights(cr, uid, 'read')
self.check_access_rule(cr, uid, ids, 'read')
else:
self.check_access_rights(cr, uid, 'write')
self.check_access_rule(cr, uid, ids, 'write')
fol_obj = self.pool['mail.followers']
fol_ids = fol_obj.search(
cr, SUPERUSER_ID, [
('res_model', '=', self._name),
('res_id', 'in', ids),
('partner_id', 'in', partner_ids)
], context=context)
return fol_obj.unlink(cr, SUPERUSER_ID, fol_ids, context=context)
def _message_get_auto_subscribe_fields(self, cr, uid, updated_fields, auto_follow_fields=None, context=None):
""" Returns the list of relational fields linking to res.users that should
trigger an auto subscribe. The default list checks for the fields
- called 'user_id'
- linking to res.users
- with track_visibility set
In OpenERP V7, this is sufficent for all major addon such as opportunity,
project, issue, recruitment, sale.
Override this method if a custom behavior is needed about fields
that automatically subscribe users.
"""
if auto_follow_fields is None:
auto_follow_fields = ['user_id']
user_field_lst = []
for name, field in self._fields.items():
if name in auto_follow_fields and name in updated_fields and getattr(field, 'track_visibility', False) and field.comodel_name == 'res.users':
user_field_lst.append(name)
return user_field_lst
def _message_auto_subscribe_notify(self, cr, uid, ids, partner_ids, context=None):
""" Send notifications to the partners automatically subscribed to the thread
Override this method if a custom behavior is needed about partners
that should be notified or messages that should be sent
"""
# find first email message, set it as unread for auto_subscribe fields for them to have a notification
if partner_ids:
for record_id in ids:
message_obj = self.pool.get('mail.message')
msg_ids = message_obj.search(cr, SUPERUSER_ID, [
('model', '=', self._name),
('res_id', '=', record_id),
('type', '=', 'email')], limit=1, context=context)
if not msg_ids:
msg_ids = message_obj.search(cr, SUPERUSER_ID, [
('model', '=', self._name),
('res_id', '=', record_id)], limit=1, context=context)
if msg_ids:
notification_obj = self.pool.get('mail.notification')
notification_obj._notify(cr, uid, msg_ids[0], partners_to_notify=partner_ids, context=context)
message = message_obj.browse(cr, uid, msg_ids[0], context=context)
if message.parent_id:
partner_ids_to_parent_notify = set(partner_ids).difference(partner.id for partner in message.parent_id.notified_partner_ids)
for partner_id in partner_ids_to_parent_notify:
notification_obj.create(cr, uid, {
'message_id': message.parent_id.id,
'partner_id': partner_id,
'is_read': True,
}, context=context)
def message_auto_subscribe(self, cr, uid, ids, updated_fields, context=None, values=None):
""" Handle auto subscription. Two methods for auto subscription exist:
- tracked res.users relational fields, such as user_id fields. Those fields
must be relation fields toward a res.users record, and must have the
track_visilibity attribute set.
- using subtypes parent relationship: check if the current model being
modified has an header record (such as a project for tasks) whose followers
can be added as followers of the current records. Example of structure
with project and task:
- st_project_1.parent_id = st_task_1
- st_project_1.res_model = 'project.project'
- st_project_1.relation_field = 'project_id'
- st_task_1.model = 'project.task'
:param list updated_fields: list of updated fields to track
:param dict values: updated values; if None, the first record will be browsed
to get the values. Added after releasing 7.0, therefore
not merged with updated_fields argumment.
"""
subtype_obj = self.pool.get('mail.message.subtype')
follower_obj = self.pool.get('mail.followers')
new_followers = dict()
# fetch auto_follow_fields: res.users relation fields whose changes are tracked for subscription
user_field_lst = self._message_get_auto_subscribe_fields(cr, uid, updated_fields, context=context)
# fetch header subtypes
header_subtype_ids = subtype_obj.search(cr, uid, ['|', ('res_model', '=', False), ('parent_id.res_model', '=', self._name)], context=context)
subtypes = subtype_obj.browse(cr, uid, header_subtype_ids, context=context)
# if no change in tracked field or no change in tracked relational field: quit
relation_fields = set([subtype.relation_field for subtype in subtypes if subtype.relation_field is not False])
if not any(relation in updated_fields for relation in relation_fields) and not user_field_lst:
return True
# legacy behavior: if values is not given, compute the values by browsing
# @TDENOTE: remove me in 8.0
if values is None:
record = self.browse(cr, uid, ids[0], context=context)
for updated_field in updated_fields:
field_value = getattr(record, updated_field)
if isinstance(field_value, BaseModel):
field_value = field_value.id
values[updated_field] = field_value
# find followers of headers, update structure for new followers
headers = set()
for subtype in subtypes:
if subtype.relation_field and values.get(subtype.relation_field):
headers.add((subtype.res_model, values.get(subtype.relation_field)))
if headers:
header_domain = ['|'] * (len(headers) - 1)
for header in headers:
header_domain += ['&', ('res_model', '=', header[0]), ('res_id', '=', header[1])]
header_follower_ids = follower_obj.search(
cr, SUPERUSER_ID,
header_domain,
context=context
)
for header_follower in follower_obj.browse(cr, SUPERUSER_ID, header_follower_ids, context=context):
for subtype in header_follower.subtype_ids:
if subtype.parent_id and subtype.parent_id.res_model == self._name:
new_followers.setdefault(header_follower.partner_id.id, set()).add(subtype.parent_id.id)
elif subtype.res_model is False:
new_followers.setdefault(header_follower.partner_id.id, set()).add(subtype.id)
# add followers coming from res.users relational fields that are tracked
user_ids = [values[name] for name in user_field_lst if values.get(name)]
user_pids = [user.partner_id.id for user in self.pool.get('res.users').browse(cr, SUPERUSER_ID, user_ids, context=context)]
for partner_id in user_pids:
new_followers.setdefault(partner_id, None)
for pid, subtypes in new_followers.items():
subtypes = list(subtypes) if subtypes is not None else None
self.message_subscribe(cr, uid, ids, [pid], subtypes, context=context)
self._message_auto_subscribe_notify(cr, uid, ids, user_pids, context=context)
return True
#------------------------------------------------------
# Thread state
#------------------------------------------------------
def message_mark_as_unread(self, cr, uid, ids, context=None):
""" Set as unread. """
partner_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).partner_id.id
cr.execute('''
UPDATE mail_notification SET
is_read=false
WHERE
message_id IN (SELECT id from mail_message where res_id=any(%s) and model=%s limit 1) and
partner_id = %s
''', (ids, self._name, partner_id))
self.pool.get('mail.notification').invalidate_cache(cr, uid, ['is_read'], context=context)
return True
def message_mark_as_read(self, cr, uid, ids, context=None):
""" Set as read. """
partner_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).partner_id.id
cr.execute('''
UPDATE mail_notification SET
is_read=true
WHERE
message_id IN (SELECT id FROM mail_message WHERE res_id=ANY(%s) AND model=%s) AND
partner_id = %s
''', (ids, self._name, partner_id))
self.pool.get('mail.notification').invalidate_cache(cr, uid, ['is_read'], context=context)
return True
#------------------------------------------------------
# Thread suggestion
#------------------------------------------------------
def get_suggested_thread(self, cr, uid, removed_suggested_threads=None, context=None):
"""Return a list of suggested threads, sorted by the numbers of followers"""
if context is None:
context = {}
# TDE HACK: originally by MAT from portal/mail_mail.py but not working until the inheritance graph bug is not solved in trunk
# TDE FIXME: relocate in portal when it won't be necessary to reload the hr.employee model in an additional bridge module
if 'is_portal' in self.pool['res.groups']._fields:
user = self.pool.get('res.users').browse(cr, SUPERUSER_ID, uid, context=context)
if any(group.is_portal for group in user.groups_id):
return []
threads = []
if removed_suggested_threads is None:
removed_suggested_threads = []
thread_ids = self.search(cr, uid, [('id', 'not in', removed_suggested_threads), ('message_is_follower', '=', False)], context=context)
for thread in self.browse(cr, uid, thread_ids, context=context):
data = {
'id': thread.id,
'popularity': len(thread.message_follower_ids),
'name': thread.name,
'image_small': thread.image_small
}
threads.append(data)
return sorted(threads, key=lambda x: (x['popularity'], x['id']), reverse=True)[:3]
def message_change_thread(self, cr, uid, id, new_res_id, new_model, context=None):
"""
Transfert the list of the mail thread messages from an model to another
:param id : the old res_id of the mail.message
:param new_res_id : the new res_id of the mail.message
:param new_model : the name of the new model of the mail.message
Example : self.pool.get("crm.lead").message_change_thread(self, cr, uid, 2, 4, "project.issue", context)
will transfert thread of the lead (id=2) to the issue (id=4)
"""
# get the sbtype id of the comment Message
subtype_res_id = self.pool.get('ir.model.data').xmlid_to_res_id(cr, uid, 'mail.mt_comment', raise_if_not_found=True)
# get the ids of the comment and none-comment of the thread
message_obj = self.pool.get('mail.message')
msg_ids_comment = message_obj.search(cr, uid, [
('model', '=', self._name),
('res_id', '=', id),
('subtype_id', '=', subtype_res_id)], context=context)
msg_ids_not_comment = message_obj.search(cr, uid, [
('model', '=', self._name),
('res_id', '=', id),
('subtype_id', '!=', subtype_res_id)], context=context)
# update the messages
message_obj.write(cr, uid, msg_ids_comment, {"res_id" : new_res_id, "model" : new_model}, context=context)
message_obj.write(cr, uid, msg_ids_not_comment, {"res_id" : new_res_id, "model" : new_model, "subtype_id" : None}, context=context)
return True
| agpl-3.0 |
nikkuman/jsunpack-n | debug.py | 23 | 5831 | #!/usr/bin/python
'''
Jsunpackn - A generic JavaScript Unpacker Network Edition
Copyright (C) 2010 Blake Hartstein
http://jsunpack.jeek.org/
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
'''
from time import time
class DebugStats:
'''Used to track performance statistics
Within the application being debugged, you should not modify the members
directly. Instead, modify those elements by using the add* functions like
add_launch and add_detect
Attributes:
js_launches: One element for each times the SpiderMonkey js was called, could
be either because there is a script error, also when evaluating
other versions of the environment.
rule_detects: One element for each time running YARA detection.
'''
def __init__(self, name, tmpdir):
self.name = name
self.tmpdir = tmpdir
self.js_launches = []
self.rule_detects = []
self.html_parsing = []
self.total_js_launches = []
self.total_rule_detects = []
self.ignored_main = 0
self.before_decode = None
self.during_decode = None
self.responsibility = None
self.start = None
def add_launch(self, script_filename):
'''
Adds a launch
'''
self.js_launches.append([{'filename':script_filename},
self.end_timer()])
self.total_js_launches.append([{'filename':script_filename},
self.end_timer()])
if self.end_timer() > 3:
print 'add_launch %s/%s took %.02f ' % (self.name, script_filename,
self.end_timer())
def add_detect(self, contents):
'''
Adds a detection
'''
self.rule_detects.append([{'len':len(contents)}, self.end_timer()])
self.total_rule_detects.append([{'len':len(contents)},
self.end_timer()])
fout = open('%s/signature_%03d' % (self.tmpdir, self.end_timer()), 'wb')
fout.write(contents)
fout.close()
def start_main(self):
'''
Start main
'''
self.before_decode = self.during_decode = time()
self.responsibility = { 'init': 0, 'decoding':0, 'shellcode':0 }
def record_main(self, record_type):
'''
Record main
'''
right_now = time()
self.responsibility[record_type] = right_now - self.during_decode
self.during_decode = right_now
def finalize_main(self):
'''
Finalize main
'''
if time() - self.before_decode > 3:
print ('main_decoder %s took %.02f seconds (ignored %d urls since '
'last print)' % (self.name[0:20],
time() - self.before_decode,
DebugStats.ignored_main))
if (self.responsibility['init'] > 0.5 or
self.responsibility['shellcode'] > 1):
print ('\t(%.02f init, %.02f decoding, %.02f sc)'
% (self.responsibility['init'],
self.responsibility['decoding'],
self.responsibility['shellcode']))
DebugStats.ignored_main = 0
elif DebugStats.ignored_main >= 10:
print ('main_decoder ignored %d urls since they are below threshold'
% (DebugStats.ignored_main))
DebugStats.ignored_main = 0
else:
DebugStats.ignored_main += 1
def add_timer(self):
'''
Add timer
'''
self.start = time()
def end_timer(self):
'''
Get the time elapsed
'''
return (time() - self.start)
def number_launches(self):
'''
Get the number of js lauches
'''
return len(self.js_launches)
def js_time(self):
'''
Js time
'''
secs = 0
for js_lauches in self.js_launches:
secs += js_lauches[1]
return secs
def detect_time(self):
'''
Detect times
'''
secs = 0
for rule_detects in self.rule_detects:
secs += rule_detects[1]
return secs
@staticmethod
def number_total_launches():
'''
Get the number of how many times js has been Launched
'''
return len(DebugStats.total_js_launches)
@staticmethod
def total_js_time():
'''
Get the total runtime of all js lauches combined
'''
secs = 0
for launches in DebugStats.total_js_launches:
secs += launches[1]
return secs
@staticmethod
def total_detect_time():
'''
Total detect time
'''
secs = 0
for detects in DebugStats.total_rule_detects:
secs += detects[1]
return secs
@staticmethod
def reset_total_stats():
'''
Reset total stats
'''
DebugStats.total_js_launches = []
DebugStats.total_rule_detects = []
| gpl-2.0 |
GitHublong/hue | desktop/core/ext-py/Django-1.6.10/django/core/files/uploadedfile.py | 223 | 4156 | """
Classes representing uploaded files.
"""
import os
from io import BytesIO
from django.conf import settings
from django.core.files.base import File
from django.core.files import temp as tempfile
from django.utils.encoding import force_str
__all__ = ('UploadedFile', 'TemporaryUploadedFile', 'InMemoryUploadedFile',
'SimpleUploadedFile')
class UploadedFile(File):
"""
A abstract uploaded file (``TemporaryUploadedFile`` and
``InMemoryUploadedFile`` are the built-in concrete subclasses).
An ``UploadedFile`` object behaves somewhat like a file object and
represents some file data that the user submitted with a form.
"""
DEFAULT_CHUNK_SIZE = 64 * 2**10
def __init__(self, file=None, name=None, content_type=None, size=None, charset=None):
super(UploadedFile, self).__init__(file, name)
self.size = size
self.content_type = content_type
self.charset = charset
def __repr__(self):
return force_str("<%s: %s (%s)>" % (
self.__class__.__name__, self.name, self.content_type))
def _get_name(self):
return self._name
def _set_name(self, name):
# Sanitize the file name so that it can't be dangerous.
if name is not None:
# Just use the basename of the file -- anything else is dangerous.
name = os.path.basename(name)
# File names longer than 255 characters can cause problems on older OSes.
if len(name) > 255:
name, ext = os.path.splitext(name)
name = name[:255 - len(ext)] + ext
self._name = name
name = property(_get_name, _set_name)
class TemporaryUploadedFile(UploadedFile):
"""
A file uploaded to a temporary location (i.e. stream-to-disk).
"""
def __init__(self, name, content_type, size, charset):
if settings.FILE_UPLOAD_TEMP_DIR:
file = tempfile.NamedTemporaryFile(suffix='.upload',
dir=settings.FILE_UPLOAD_TEMP_DIR)
else:
file = tempfile.NamedTemporaryFile(suffix='.upload')
super(TemporaryUploadedFile, self).__init__(file, name, content_type, size, charset)
def temporary_file_path(self):
"""
Returns the full path of this file.
"""
return self.file.name
def close(self):
try:
return self.file.close()
except OSError as e:
if e.errno != 2:
# Means the file was moved or deleted before the tempfile
# could unlink it. Still sets self.file.close_called and
# calls self.file.file.close() before the exception
raise
class InMemoryUploadedFile(UploadedFile):
"""
A file uploaded into memory (i.e. stream-to-memory).
"""
def __init__(self, file, field_name, name, content_type, size, charset):
super(InMemoryUploadedFile, self).__init__(file, name, content_type, size, charset)
self.field_name = field_name
def open(self, mode=None):
self.file.seek(0)
def close(self):
pass
def chunks(self, chunk_size=None):
self.file.seek(0)
yield self.read()
def multiple_chunks(self, chunk_size=None):
# Since it's in memory, we'll never have multiple chunks.
return False
class SimpleUploadedFile(InMemoryUploadedFile):
"""
A simple representation of a file, which just has content, size, and a name.
"""
def __init__(self, name, content, content_type='text/plain'):
content = content or b''
super(SimpleUploadedFile, self).__init__(BytesIO(content), None, name,
content_type, len(content), None)
def from_dict(cls, file_dict):
"""
Creates a SimpleUploadedFile object from
a dictionary object with the following keys:
- filename
- content-type
- content
"""
return cls(file_dict['filename'],
file_dict['content'],
file_dict.get('content-type', 'text/plain'))
from_dict = classmethod(from_dict)
| apache-2.0 |
tesidroni/mp | Lib/lib2to3/fixes/fix_execfile.py | 324 | 1998 | # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for execfile.
This converts usages of the execfile function into calls to the built-in
exec() function.
"""
from .. import fixer_base
from ..fixer_util import (Comma, Name, Call, LParen, RParen, Dot, Node,
ArgList, String, syms)
class FixExecfile(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power< 'execfile' trailer< '(' arglist< filename=any [',' globals=any [',' locals=any ] ] > ')' > >
|
power< 'execfile' trailer< '(' filename=any ')' > >
"""
def transform(self, node, results):
assert results
filename = results["filename"]
globals = results.get("globals")
locals = results.get("locals")
# Copy over the prefix from the right parentheses end of the execfile
# call.
execfile_paren = node.children[-1].children[-1].clone()
# Construct open().read().
open_args = ArgList([filename.clone()], rparen=execfile_paren)
open_call = Node(syms.power, [Name(u"open"), open_args])
read = [Node(syms.trailer, [Dot(), Name(u'read')]),
Node(syms.trailer, [LParen(), RParen()])]
open_expr = [open_call] + read
# Wrap the open call in a compile call. This is so the filename will be
# preserved in the execed code.
filename_arg = filename.clone()
filename_arg.prefix = u" "
exec_str = String(u"'exec'", u" ")
compile_args = open_expr + [Comma(), filename_arg, Comma(), exec_str]
compile_call = Call(Name(u"compile"), compile_args, u"")
# Finally, replace the execfile call with an exec call.
args = [compile_call]
if globals is not None:
args.extend([Comma(), globals.clone()])
if locals is not None:
args.extend([Comma(), locals.clone()])
return Call(Name(u"exec"), args, prefix=node.prefix)
| gpl-3.0 |
thresholdsoftware/asylum | openerp/addons/account/wizard/account_open_closed_fiscalyear.py | 14 | 2510 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_open_closed_fiscalyear(osv.osv_memory):
_name = "account.open.closed.fiscalyear"
_description = "Choose Fiscal Year"
_columns = {
'fyear_id': fields.many2one('account.fiscalyear', \
'Fiscal Year', required=True, help='Select Fiscal Year which you want to remove entries for its End of year entries journal'),
}
def remove_entries(self, cr, uid, ids, context=None):
move_obj = self.pool.get('account.move')
data = self.browse(cr, uid, ids, context=context)[0]
period_journal = data.fyear_id.end_journal_period_id or False
if not period_journal:
raise osv.except_osv(_('Error!'), _("You have to set the 'End of Year Entries Journal' for this Fiscal Year which is set after generating opening entries from 'Generate Opening Entries'."))
if period_journal.period_id.state == 'done':
raise osv.except_osv(_('Error!'), _("You can not cancel closing entries if the 'End of Year Entries Journal' period is closed."))
ids_move = move_obj.search(cr, uid, [('journal_id','=',period_journal.journal_id.id),('period_id','=',period_journal.period_id.id)])
if ids_move:
cr.execute('delete from account_move where id IN %s', (tuple(ids_move),))
return {'type': 'ir.actions.act_window_close'}
account_open_closed_fiscalyear()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
derickr/mongo-c-driver | build/generate-evergreen-config.py | 2 | 2332 | #!/usr/bin/env python
#
# Copyright 2018-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate C Driver's config.yml for Evergreen testing.
We find that generating configuration from Python data structures is more
legible than Evergreen's matrix syntax or a handwritten file.
Written for Python 2.6+. Requires the evergreen_config_generator package.
"""
from collections import OrderedDict as OD
from os.path import dirname, join as joinpath, normpath
import sys
try:
from evergreen_config_generator import generate
except ImportError:
sys.stderr.write("""\
Could not find evergreen_config_generator package, try:
python -m pip install -e git+https://github.com/mongodb-labs/\
drivers-evergreen-tools#subdirectory=evergreen_config_generator\
&egg=evergreen_config_generator
""")
raise
from evergreen_config_lib.functions import all_functions
from evergreen_config_lib.tasks import all_tasks
from evergreen_config_lib.variants import all_variants
config = OD([
('stepback', True),
('command_type', 'system'),
# 40 minute max except valgrind tasks, which get 2 hours.
('exec_timeout_secs', 2400),
('functions', all_functions),
('pre', [
OD([('func', 'fetch source')]),
OD([('func', 'windows fix')]),
OD([('func', 'make files executable')]),
OD([('func', 'prepare kerberos')]),
]),
('post', [
OD([('func', 'backtrace')]),
OD([('func', 'upload working dir')]),
OD([('func', 'upload mo artifacts')]),
OD([('func', 'upload test results')]),
OD([('func', 'cleanup')]),
]),
('tasks', all_tasks),
('buildvariants', all_variants),
])
this_dir = dirname(__file__)
evergreen_dir = normpath(joinpath(this_dir, '../.evergreen'))
generate(config, joinpath(evergreen_dir, 'config.yml'))
| apache-2.0 |
dpac-vlsi/SynchroTrace | src/arch/x86/isa/insts/simd64/integer/data_conversion.py | 91 | 2209 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
# CVTPI2PS and CVTPI2PD are implemented in simd128
# PI2FW
# PI2FD
'''
| bsd-3-clause |
thruflo/pyramid_weblayer | src/pyramid_weblayer/request_logger.py | 1 | 5109 | # -*- coding: utf-8 -*-
"""WSGI middleware to log all requests to Dynamodb on AWS.
Username and API Key are infered from following environment variables:
- AWS_ACCESS_KEY_ID
- AWS_SECRET_ACCESS_KEY
The user needs a policy on AWS that allows it to PUT to DynamoDB.
You can ignore body of the requests by controlling the regexes set by env var:
- REQUEST_LOGGER_MIMETYPE_IGNORE_REGEX
- REQUEST_LOGGER_PATH_IGNORE_REGEX
"""
import logging
logger = logging.getLogger(__name__)
import threading
import os
from boto.dynamodb2.fields import HashKey
from boto.dynamodb2.table import Table
from boto import dynamodb2
import re
import datetime
DEFAULTS = {
'request_logger.max_body_size_in_bytes': os.environ.get('REQUEST_LOGGER_MAX_BODY_SIZE_IN_BYTES', 2000),
'request_logger.path_ignore_regex': os.environ.get('REQUEST_LOGGER_PATH_IGNORE_REGEX', '.*/auth/.*'),
'request_logger.mimetype_ignore_regex': os.environ.get('REQUEST_LOGGER_MIMETYPE_IGNORE_REGEX', '.*multipart.*'),
'request_logger.request_id_header_name': os.environ.get('REQUEST_LOGGER_REQUEST_ID_HEADER_NAME', 'X_REQUEST_ID'),
}
# 20 KB.
MAX_BODY_SIZE_IN_BYTES = 20000
IGNORE_PATH_VALIDATOR = re.compile('{0}'.format(DEFAULTS['request_logger.path_ignore_regex']))
IGNORE_MIMETYPE_VALIDATOR = re.compile('{0}'.format(DEFAULTS['request_logger.mimetype_ignore_regex']))
WRITE_METHODS = ('POST', 'PUT', 'DELETE', 'PATCH')
REQUEST_ID_HEADER_NAME = DEFAULTS['request_logger.request_id_header_name']
def client_factory():
"""Return an AmazonDB client that provides a
``put_item(data=data)`` method.
"""
return Table(
'request_storer',
schema=[HashKey('request_id')],
connection=dynamodb2.connect_to_region('eu-west-1')
)
class RequestLoggerTweenFactory(object):
"""Simple pyramid tween to log all of our requests by Heroku request id."""
def __init__(self, handler, registry, client=None):
self.handler = handler
self.registry = registry
self.settings = registry.settings
self.client = client
if not self.client:
# If we are testing and haven't supplied a client, mock it out.
if self.settings.get('mode') == 'testing':
from mock import Mock
self.client = Mock()
else:
self.client = client_factory()
def __call__(self, request):
"""Request logger pyramid tween, logs requests that have errored and
are of HTTP WRITE_METHODS to DynamoDB."""
# Unpack the request.
path = request.path
headers = request.headers
# Figure out if we'll want to log an exception or not.
request_id = headers.get(REQUEST_ID_HEADER_NAME, None)
should_log_exc = request_id and request.method.upper() in WRITE_METHODS
# Let the app actually handle the request.
try:
response = self.handler(request)
except Exception:
# Extract the information if needed and and re-raise.
if should_log_exc:
body = self.get_body(request)
self.log_request(request_id, path, headers, body)
raise
# Then if the request was interesting and resulted in
# an error response, then spawn a green thread to log
# the request data in the background.
if should_log_exc and response.status_int > 399:
body = self.get_body(request)
self.log_request(request_id, path, headers, body)
return response
def get_body(self, request):
"""Read the request body upto a maximum length."""
if IGNORE_MIMETYPE_VALIDATOR.match(request.content_type):
return 'N/a - multipart request.'
if IGNORE_PATH_VALIDATOR.match(request.path):
return 'N/a - may contain password'
# Get the body file and wind it back to the beginning.
sock = request.body_file_seekable
start_pos = sock.tell()
sock.seek(0)
# Read upto a maximum length.
body = sock.read(MAX_BODY_SIZE_IN_BYTES)
# And just for sanity's sake, put it back where we
# found it.
sock.seek(start_pos)
return body
def put_to_dynamodb(self, data):
"""Make a PUT request to dynamodb2 and insert the data"""
try:
self.client.put_item(data=data)
except UnicodeDecodeError:
data['body'] = 'Unicode error when parsing'
self.client.put_item(data=data)
def log_request(self, key, path, headers, body):
"""Log the path, headers and body of the HTTP request on a
key value store db, key is heroku request id.
"""
if not body:
body = {}
# Build a dict with key, headers and body.
now = datetime.datetime.now().isoformat()
data = {'request_id': key, 'body': body, 'path': path, 'created': now}
for k, v in headers.items():
data[k] = v
# Fire and forget.
t = threading.Thread(target=self.put_to_dynamodb, args=(data,))
t.start()
| unlicense |
spraints/plugin.video.prime_instant | default.py | 1 | 63995 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import urllib
import urllib2
import socket
import mechanize
import cookielib
import sys
import re
import os
import json
import time
import string
import random
import shutil
import subprocess
import xbmcplugin
import xbmcgui
import xbmcaddon
import xbmcvfs
addon = xbmcaddon.Addon()
addonID = addon.getAddonInfo('id')
addonFolder = downloadScript = xbmc.translatePath('special://home/addons/'+addonID).decode('utf-8')
addonUserDataFolder = xbmc.translatePath("special://profile/addon_data/"+addonID).decode('utf-8')
icon = os.path.join(addonFolder, "icon.png").encode('utf-8')
def translation(id):
return addon.getLocalizedString(id).encode('utf-8')
if not os.path.exists(os.path.join(addonUserDataFolder, "settings.xml")):
xbmc.executebuiltin('XBMC.Notification(Info:,'+translation(30081)+',10000,'+icon+')')
addon.openSettings()
socket.setdefaulttimeout(30)
pluginhandle = int(sys.argv[1])
cj = cookielib.MozillaCookieJar()
downloadScript = os.path.join(addonFolder, "download.py").encode('utf-8')
downloadScriptTV = os.path.join(addonFolder, "downloadTV.py").encode('utf-8')
cacheFolder = os.path.join(addonUserDataFolder, "cache")
cacheFolderCoversTMDB = os.path.join(cacheFolder, "covers")
cacheFolderFanartTMDB = os.path.join(cacheFolder, "fanart")
addonFolderResources = os.path.join(addonFolder, "resources")
defaultFanart = os.path.join(addonFolderResources, "fanart.png")
libraryFolder = os.path.join(addonUserDataFolder, "library")
libraryFolderMovies = os.path.join(libraryFolder, "Movies")
libraryFolderTV = os.path.join(libraryFolder, "TV")
cookieFile = os.path.join(addonUserDataFolder, "cookies")
debugFile = os.path.join(addonUserDataFolder, "debug")
preferAmazonTrailer = addon.getSetting("preferAmazonTrailer") == "true"
showNotification = addon.getSetting("showNotification") == "true"
showOriginals = addon.getSetting("showOriginals") == "true"
showLibrary = addon.getSetting("showLibrary") == "true"
showKids = addon.getSetting("showKids") == "true"
forceView = addon.getSetting("forceView") == "true"
updateDB = addon.getSetting("updateDB") == "true"
useTMDb = addon.getSetting("useTMDb") == "true"
watchlistOrder = addon.getSetting("watchlistOrder")
watchlistOrder = ["DATE_ADDED_DESC", "TITLE_ASC"][int(watchlistOrder)]
maxBitrate = addon.getSetting("maxBitrate")
maxBitrate = [300, 600, 900, 1350, 2000, 2500, 4000, 6000, 10000, -1][int(maxBitrate)]
siteVersion = addon.getSetting("siteVersion")
apiMain = ["atv-ps", "atv-ps-eu", "atv-ps-eu"][int(siteVersion)]
rtmpMain = ["azusfms", "azeufms", "azeufms"][int(siteVersion)]
siteVersion = ["com", "co.uk", "de"][int(siteVersion)]
viewIdMovies = addon.getSetting("viewIdMovies")
viewIdShows = addon.getSetting("viewIdShows")
viewIdSeasons = addon.getSetting("viewIdSeasons")
viewIdEpisodes = addon.getSetting("viewIdEpisodes")
viewIdDetails = addon.getSetting("viewIdDetails")
urlMain = "http://www.amazon."+siteVersion
urlMainS = "https://www.amazon."+siteVersion
addon.setSetting('email', '')
addon.setSetting('password', '')
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
userAgent = "Mozilla/5.0 (Windows NT 6.3; WOW64; rv:35.0) Gecko/20100101 Firefox/35.0"
opener.addheaders = [('User-agent', userAgent)]
if not os.path.isdir(addonUserDataFolder):
os.mkdir(addonUserDataFolder)
if not os.path.isdir(cacheFolder):
os.mkdir(cacheFolder)
if not os.path.isdir(cacheFolderCoversTMDB):
os.mkdir(cacheFolderCoversTMDB)
if not os.path.isdir(cacheFolderFanartTMDB):
os.mkdir(cacheFolderFanartTMDB)
if not os.path.isdir(libraryFolder):
os.mkdir(libraryFolder)
if not os.path.isdir(libraryFolderMovies):
os.mkdir(libraryFolderMovies)
if not os.path.isdir(libraryFolderTV):
os.mkdir(libraryFolderTV)
if os.path.exists(cookieFile):
cj.load(cookieFile)
def index():
loginResult = login()
if loginResult=="prime":
addDir(translation(30002), "", 'browseMovies', "")
addDir(translation(30003), "", 'browseTV', "")
xbmcplugin.endOfDirectory(pluginhandle)
elif loginResult=="noprime":
listOriginals()
elif loginResult=="none":
xbmc.executebuiltin('XBMC.Notification(Info:,'+translation(30082)+',10000,'+icon+')')
def browseMovies():
addDir(translation(30004), urlMain+"/gp/video/watchlist/movie/?ie=UTF8&show=all&sort="+watchlistOrder, 'listWatchList', "")
if showLibrary:
addDir(translation(30005), urlMain+"/gp/video/library/movie?ie=UTF8&show=all&sort="+watchlistOrder, 'listWatchList', "")
if siteVersion=="de":
addDir(translation(30006), urlMain+"/gp/search/ajax/?_encoding=UTF8&rh=n%3A3010075031%2Cn%3A3356018031&sort=popularity-rank", 'listMovies', "")
addDir(translation(30011), urlMain+"/gp/search/other/?rh=n%3A3279204031%2Cn%3A!3010076031%2Cn%3A3356018031&pickerToList=theme_browse-bin&ie=UTF8", 'listGenres', "", "movie")
addDir(translation(30014), "", 'listDecadesMovie', "")
if showKids:
addDir(translation(30007), urlMain+"/gp/search/ajax/?rh=n%3A3010075031%2Cn%3A!3010076031%2Cn%3A3015915031%2Cp_n_theme_browse-bin%3A3015972031%2Cp_85%3A3282148031&ie=UTF8", 'listMovies', "")
addDir(translation(30008), urlMain+"/gp/search/ajax/?_encoding=UTF8&rh=n%3A3279204031%2Cn%3A!3010076031%2Cn%3A3356018031&sort=date-desc-rank", 'listMovies', "")
addDir(translation(30009), urlMain+"/s/?n=4963842031", 'listMovies', "")
elif siteVersion=="com":
addDir(translation(30006), urlMain+"/gp/search/ajax/?_encoding=UTF8&rh=n%3A2858778011%2Cn%3A7613704011&sort=popularity-rank", 'listMovies', "")
addDir(translation(30011), urlMain+"/gp/search/other/?rh=n%3A2676882011%2Cn%3A7613704011&pickerToList=theme_browse-bin&ie=UTF8", 'listGenres', "", "movie")
addDir(translation(30012), urlMain+"/gp/search/other/?rh=n%3A2676882011%2Cn%3A7613704011&pickerToList=feature_five_browse-bin&ie=UTF8", 'listGenres', "", "movie")
addDir(translation(30013), urlMain+"/gp/search/other/?rh=n%3A2676882011%2Cn%3A7613704011&pickerToList=feature_six_browse-bin&ie=UTF8", 'listGenres', "", "movie")
addDir(translation(30014), "", 'listDecadesMovie', "")
if showKids:
addDir(translation(30007), urlMain+"/gp/search/ajax/?rh=n%3A2676882011%2Cn%3A7613704011%2Cp_n_theme_browse-bin%3A2650365011&ie=UTF8", 'listMovies', "")
addDir(translation(30008), urlMain+"/gp/search/ajax/?_encoding=UTF8&rh=n%3A2858778011%2Cn%3A7613704011&sort=date-desc-rank", 'listMovies', "")
elif siteVersion=="co.uk":
addDir(translation(30006), urlMain+"/gp/search/ajax/?_encoding=UTF8&rh=n%3A3010085031%2Cn%3A3356010031&sort=popularity-rank", 'listMovies', "")
addDir(translation(30011), urlMain+"/gp/search/other/?rh=n%3A3280626031%2Cn%3A!3010086031%2Cn%3A3356010031&pickerToList=theme_browse-bin&ie=UTF8", 'listGenres', "", "movie")
addDir(translation(30014), "", 'listDecadesMovie', "")
if showKids:
addDir(translation(30007), urlMain+"/gp/search/ajax/?rh=n%3A3280626031%2Cn%3A!3010086031%2Cn%3A3356010031%2Cp_n_theme_browse-bin%3A3046745031&sort=popularity-rank&ie=UTF8", 'listMovies', "")
addDir(translation(30008), urlMain+"/gp/search/ajax/?_encoding=UTF8&rh=n%3A3010085031%2Cn%3A3356010031&sort=date-desc-rank", 'listMovies', "")
addDir(translation(30015), "movies", 'search', "")
xbmcplugin.endOfDirectory(pluginhandle)
def browseTV():
addDir(translation(30004), urlMain+"/gp/video/watchlist/tv/?ie=UTF8&show=all&sort="+watchlistOrder, 'listWatchList', "")
if showLibrary:
addDir(translation(30005), urlMain+"/gp/video/library/tv/?ie=UTF8&show=all&sort="+watchlistOrder, 'listWatchList', "")
if siteVersion=="de":
addDir(translation(30006), urlMain+"/gp/search/ajax/?_encoding=UTF8&rh=n%3A3010075031%2Cn%3A3356019031&sort=popularity-rank", 'listShows', "")
addDir(translation(30011), urlMain+"/gp/search/other/?rh=n%3A3279204031%2Cn%3A!3010076031%2Cn%3A3356019031&pickerToList=theme_browse-bin&ie=UTF8", 'listGenres', "", "tv")
if showKids:
addDir(translation(30007), urlMain+"/gp/search/ajax/?rh=n%3A3010075031%2Cn%3A!3010076031%2Cn%3A3015916031%2Cp_n_theme_browse-bin%3A3015972031%2Cp_85%3A3282148031&ie=UTF8", 'listShows', "")
addDir(translation(30010), urlMain+"/gp/search/ajax/?_encoding=UTF8&keywords=[OV]&rh=n%3A3010075031%2Cn%3A3015916031%2Ck%3A[OV]%2Cp_85%3A3282148031&sort=date-desc-rank", 'listShows', "")
addDir(translation(30008), urlMain+"/gp/search/ajax/?_encoding=UTF8&rh=n%3A3279204031%2Cn%3A3010075031%2Cn%3A3015916031&sort=date-desc-rank", 'listShows', "")
elif siteVersion=="com":
addDir(translation(30006), urlMain+"/gp/search/ajax/?_encoding=UTF8&rh=n%3A2858778011%2Cn%3A7613705011&sort=popularity-rank", 'listShows', "")
addDir(translation(30011), urlMain+"/gp/search/other/?rh=n%3A2676882011%2Cn%3A7613705011&pickerToList=theme_browse-bin&ie=UTF8", 'listGenres', "", "tv")
addDir(translation(30012), urlMain+"/gp/search/other/?rh=n%3A2676882011%2Cn%3A7613705011&pickerToList=feature_five_browse-bin&ie=UTF8", 'listGenres', "", "tv")
addDir(translation(30013), urlMain+"/gp/search/other/?rh=n%3A2676882011%2Cn%3A7613705011&pickerToList=feature_six_browse-bin&ie=UTF8", 'listGenres', "", "tv")
if showKids:
addDir(translation(30007), urlMain+"/gp/search/ajax/?rh=n%3A2676882011%2Cn%3A7613705011%2Cp_n_theme_browse-bin%3A2650365011&sort=csrank&ie=UTF8", 'listShows', "")
addDir(translation(30008), urlMain+"/gp/search/ajax/?_encoding=UTF8&rh=n%3A2858778011%2Cn%3A7613705011&sort=date-desc-rank", 'listShows', "")
elif siteVersion=="co.uk":
addDir(translation(30006), urlMain+"/gp/search/ajax/?_encoding=UTF8&rh=n%3A3010085031%2Cn%3A3356011031&sort=popularity-rank", 'listShows', "")
addDir(translation(30011), urlMain+"/gp/search/other/?rh=n%3A3280626031%2Cn%3A!3010086031%2Cn%3A3356011031&pickerToList=theme_browse-bin&ie=UTF8", 'listGenres', "", "tv")
if showKids:
addDir(translation(30007), urlMain+"/gp/search/ajax/?rh=n%3A3280626031%2Cn%3A!3010086031%2Cn%3A3356011031%2Cp_n_theme_browse-bin%3A3046745031&sort=popularity-rank&ie=UTF8", 'listShows', "")
addDir(translation(30008), urlMain+"/gp/search/ajax/?_encoding=UTF8&rh=n%3A3010085031%2Cn%3A3356011031&sort=date-desc-rank", 'listShows', "")
if showOriginals:
addDir("Amazon Originals: Pilot Season 2015", "", 'listOriginals', "")
addDir(translation(30015), "tv", 'search', "")
xbmcplugin.endOfDirectory(pluginhandle)
def listDecadesMovie():
if siteVersion=="de":
addDir(translation(30016), urlMain+"/gp/search/ajax/?rh=n%3A3279204031%2Cn%3A!3010076031%2Cn%3A3356018031%2Cp_n_feature_three_browse-bin%3A3289642031&sort=popularity-rank&ie=UTF8", 'listMovies', "")
addDir(translation(30017), urlMain+"/gp/search/ajax/?rh=n%3A3279204031%2Cn%3A!3010076031%2Cn%3A3356018031%2Cp_n_feature_three_browse-bin%3A3289643031&sort=popularity-rank&ie=UTF8", 'listMovies', "")
addDir(translation(30018), urlMain+"/gp/search/ajax/?rh=n%3A3279204031%2Cn%3A!3010076031%2Cn%3A3356018031%2Cp_n_feature_three_browse-bin%3A3289644031&sort=popularity-rank&ie=UTF8", 'listMovies', "")
addDir(translation(30019), urlMain+"/gp/search/ajax/?rh=n%3A3279204031%2Cn%3A!3010076031%2Cn%3A3356018031%2Cp_n_feature_three_browse-bin%3A3289645031&sort=popularity-rank&ie=UTF8", 'listMovies', "")
addDir(translation(30020), urlMain+"/gp/search/ajax/?rh=n%3A3279204031%2Cn%3A!3010076031%2Cn%3A3356018031%2Cp_n_feature_three_browse-bin%3A3289646031&sort=popularity-rank&ie=UTF8", 'listMovies', "")
addDir(translation(30021), urlMain+"/gp/search/ajax/?rh=n%3A3279204031%2Cn%3A!3010076031%2Cn%3A3356018031%2Cp_n_feature_three_browse-bin%3A3289647031&sort=popularity-rank&ie=UTF8", 'listMovies', "")
addDir(translation(30022), urlMain+"/gp/search/ajax/?rh=n%3A3279204031%2Cn%3A!3010076031%2Cn%3A3356018031%2Cp_n_feature_three_browse-bin%3A3289648031&sort=popularity-rank&ie=UTF8", 'listMovies', "")
elif siteVersion=="com":
addDir(translation(30016), urlMain+"/gp/search/ajax/?rh=n%3A2676882011%2Cn%3A7613704011%2Cp_n_feature_three_browse-bin%3A2651255011&sort=popularity-rank&ie=UTF8", 'listMovies', "")
addDir(translation(30017), urlMain+"/gp/search/ajax/?rh=n%3A2676882011%2Cn%3A7613704011%2Cp_n_feature_three_browse-bin%3A2651256011&sort=popularity-rank&ie=UTF8", 'listMovies', "")
addDir(translation(30018), urlMain+"/gp/search/ajax/?rh=n%3A2676882011%2Cn%3A7613704011%2Cp_n_feature_three_browse-bin%3A2651257011&sort=popularity-rank&ie=UTF8", 'listMovies', "")
addDir(translation(30019), urlMain+"/gp/search/ajax/?rh=n%3A2676882011%2Cn%3A7613704011%2Cp_n_feature_three_browse-bin%3A2651258011&sort=popularity-rank&ie=UTF8", 'listMovies', "")
addDir(translation(30020), urlMain+"/gp/search/ajax/?rh=n%3A2676882011%2Cn%3A7613704011%2Cp_n_feature_three_browse-bin%3A2651259011&sort=popularity-rank&ie=UTF8", 'listMovies', "")
addDir(translation(30021), urlMain+"/gp/search/ajax/?rh=n%3A2676882011%2Cn%3A7613704011%2Cp_n_feature_three_browse-bin%3A2651260011&sort=popularity-rank&ie=UTF8", 'listMovies', "")
addDir(translation(30022), urlMain+"/gp/search/ajax/?rh=n%3A2676882011%2Cn%3A7613704011%2Cp_n_feature_three_browse-bin%3A2651261011&sort=popularity-rank&ie=UTF8", 'listMovies', "")
elif siteVersion=="co.uk":
addDir(translation(30016), urlMain+"/gp/search/ajax/?rh=n%3A3280626031%2Cn%3A!3010086031%2Cn%3A3356010031%2Cp_n_feature_three_browse-bin%3A3289666031&sort=popularity-rank&ie=UTF8", 'listMovies', "")
addDir(translation(30017), urlMain+"/gp/search/ajax/?rh=n%3A3280626031%2Cn%3A!3010086031%2Cn%3A3356010031%2Cp_n_feature_three_browse-bin%3A3289667031&sort=popularity-rank&ie=UTF8", 'listMovies', "")
addDir(translation(30018), urlMain+"/gp/search/ajax/?rh=n%3A3280626031%2Cn%3A!3010086031%2Cn%3A3356010031%2Cp_n_feature_three_browse-bin%3A3289668031&sort=popularity-rank&ie=UTF8", 'listMovies', "")
addDir(translation(30019), urlMain+"/gp/search/ajax/?rh=n%3A3280626031%2Cn%3A!3010086031%2Cn%3A3356010031%2Cp_n_feature_three_browse-bin%3A3289669031&sort=popularity-rank&ie=UTF8", 'listMovies', "")
addDir(translation(30020), urlMain+"/gp/search/ajax/?rh=n%3A3280626031%2Cn%3A!3010086031%2Cn%3A3356010031%2Cp_n_feature_three_browse-bin%3A3289670031&sort=popularity-rank&ie=UTF8", 'listMovies', "")
addDir(translation(30021), urlMain+"/gp/search/ajax/?rh=n%3A3280626031%2Cn%3A!3010086031%2Cn%3A3356010031%2Cp_n_feature_three_browse-bin%3A3289671031&sort=popularity-rank&ie=UTF8", 'listMovies', "")
addDir(translation(30022), urlMain+"/gp/search/ajax/?rh=n%3A3280626031%2Cn%3A!3010086031%2Cn%3A3356010031%2Cp_n_feature_three_browse-bin%3A3289672031&sort=popularity-rank&ie=UTF8", 'listMovies', "")
xbmcplugin.endOfDirectory(pluginhandle)
def listOriginals():
if siteVersion=="de":
content = opener.open(urlMain+"/b/?ie=UTF8&node=5457207031").read()
elif siteVersion=="com":
content = opener.open(urlMain+"/b/?ie=UTF8&node=9940930011").read()
elif siteVersion=="co.uk":
content = opener.open(urlMain+"/b/?ie=UTF8&node=5687760031").read()
debug(content)
match = re.compile("token : '(.+?)'", re.DOTALL).findall(content)
if match:
addon.setSetting('csrfToken', match[0])
content = content[content.find('<map name="pilots'):]
content = content[:content.find('</map>')]
spl = content.split('shape="rect"')
thumbs = {}
thumbs['maninthehighcastle'] = 'http://ecx.images-amazon.com/images/I/5114a5G6oQL.jpg'
thumbs['cocked'] = 'http://ecx.images-amazon.com/images/I/51ky16-xESL.jpg'
thumbs['maddogs'] = 'http://ecx.images-amazon.com/images/I/61mWRYn7U2L.jpg'
thumbs['thenewyorkerpresents'] = 'http://ecx.images-amazon.com/images/I/41Yb8SUjMzL.jpg'
thumbs['pointofhonor'] = 'http://ecx.images-amazon.com/images/I/51OBmT5ARUL.jpg'
thumbs['downdog'] = 'http://ecx.images-amazon.com/images/I/51N2zkhOxGL.jpg'
thumbs['salemrogers'] = 'http://ecx.images-amazon.com/images/I/510nXRWkoaL.jpg'
thumbs['table58'] = 'http://ecx.images-amazon.com/images/I/51AIPgzNiWL.jpg'
thumbs['buddytechdetective'] = 'http://ecx.images-amazon.com/images/I/513pbjgDLYL.jpg'
thumbs['sarasolvesit'] = 'http://ecx.images-amazon.com/images/I/51Y5G5RbLUL.jpg'
thumbs['stinkyanddirty'] = 'http://ecx.images-amazon.com/images/I/51WzytCUmdL.jpg'
thumbs['niko'] = 'http://ecx.images-amazon.com/images/I/51XjJrg9JLL.jpg'
thumbs['justaddmagic'] = 'http://ecx.images-amazon.com/images/I/5159YFd0hQL.jpg'
for i in range(1, len(spl), 1):
entry = spl[i]
match = re.compile("/gp/product/(.+?)/", re.DOTALL).findall(entry)
videoID = match[0]
match = re.compile('alt="(.+?)"', re.DOTALL).findall(entry)
title = match[0]
title = cleanTitle(title)
titleT = title.lower().replace(' ', '').strip()
titleT = titleT.replace("pointofhonour", "pointofhonor")
titleT = titleT.replace("buddytechdective", "buddytechdetective")
titleT = titleT.replace("buddytechdetectives", "buddytechdetective")
titleT = titleT.replace("thestinkyanddirtyshow", "stinkyanddirty")
titleT = titleT.replace("nikkoandtheswordoflight", "niko")
titleT = titleT.replace("nikoandtheswordoflight", "niko")
thumb = ""
if titleT in thumbs:
thumb = thumbs[titleT]
addShowDir(title, videoID, "listSeasons", thumb, "tv")
xbmcplugin.endOfDirectory(pluginhandle)
xbmc.sleep(100)
if forceView:
xbmc.executebuiltin('Container.SetViewMode(500)')
def listWatchList(url):
content = opener.open(url).read()
debug(content)
match = re.compile("token : '(.+?)'", re.DOTALL).findall(content)
if match:
addon.setSetting('csrfToken', match[0])
spl = content.split('<div class="innerItem"')
dlParams = []
videoType = ""
showEntries = []
for i in range(1, len(spl), 1):
entry = spl[i]
entry = entry[:entry.find('</td>')]
if "/library/" in url or ("/watchlist/" in url and ("class='prime-meta'" in entry or 'class="prime-logo"' in entry)):
match = re.compile('data-prod-type="(.+?)"', re.DOTALL).findall(entry)
if match:
videoType = match[0]
match = re.compile('id="(.+?)"', re.DOTALL).findall(entry)
videoID = match[0]
match = re.compile('title="(.+?)"', re.DOTALL).findall(entry)
title = match[0]
title = cleanTitle(title)
dlParams.append({'type':videoType, 'id':videoID, 'title':cleanTitleTMDB(cleanSeasonTitle(title)), 'year':''})
if videoType=="tv":
title = cleanSeasonTitle(title)
if title in showEntries:
continue
showEntries.append(title)
match = re.compile('src="(.+?)"', re.DOTALL).findall(entry)
thumbUrl = ""
if match:
thumbUrl = match[0].replace(".jpg", "")
thumbUrl = thumbUrl[:thumbUrl.rfind(".")]+".jpg"
if videoType == "movie":
addLinkR(title, videoID, "playVideo", thumbUrl, videoType)
else:
addShowDirR(title, videoID, "listSeasons", thumbUrl, videoType)
if videoType == "movie":
xbmcplugin.setContent(pluginhandle, "movies")
else:
xbmcplugin.setContent(pluginhandle, "tvshows")
if useTMDb and videoType == "movie":
dlParams = json.dumps(dlParams)
xbmc.executebuiltin('XBMC.RunScript('+downloadScript+', '+urllib.quote_plus(str(dlParams))+')')
elif useTMDb:
dlParams = json.dumps(dlParams)
xbmc.executebuiltin('XBMC.RunScript('+downloadScriptTV+', '+urllib.quote_plus(str(dlParams))+')')
xbmcplugin.endOfDirectory(pluginhandle)
xbmc.sleep(100)
if forceView:
if videoType == "movie":
xbmc.executebuiltin('Container.SetViewMode('+viewIdMovies+')')
else:
xbmc.executebuiltin('Container.SetViewMode('+viewIdShows+')')
def listMovies(url):
xbmcplugin.setContent(pluginhandle, "movies")
content = opener.open(url).read()
debug(content)
content = content.replace("\\","")
if 'id="catCorResults"' in content:
content = content[:content.find('id="catCorResults"')]
match = re.compile('"csrfToken":"(.+?)"', re.DOTALL).findall(content)
if match:
addon.setSetting('csrfToken', match[0])
spl = content.split('id="result_')
dlParams = []
for i in range(1, len(spl), 1):
entry = spl[i]
match = re.compile('asin="(.+?)"', re.DOTALL).findall(entry)
if match and ">Prime Instant Video<" in entry:
videoID = match[0]
match1 = re.compile('title="(.+?)"', re.DOTALL).findall(entry)
match2 = re.compile('class="ilt2">(.+?)<', re.DOTALL).findall(entry)
title = ""
if match1:
title = match1[0]
elif match2:
title = match2[0]
title = cleanTitle(title)
match1 = re.compile('class="a-size-small a-color-secondary">(.+?)<', re.DOTALL).findall(entry)
match2 = re.compile('class="med reg subt">(.+?)<', re.DOTALL).findall(entry)
year = ""
if match1:
year = match1[0].strip()
if match2:
year = match2[0].strip()
dlParams.append({'type':'movie', 'id':videoID, 'title':cleanTitleTMDB(cleanSeasonTitle(title)), 'year':year})
match = re.compile('src="(.+?)"', re.DOTALL).findall(entry)
thumbUrl = match[0].replace(".jpg", "")
thumbUrl = thumbUrl[:thumbUrl.rfind(".")]+".jpg"
match = re.compile('data-action="s-watchlist-add".+?class="a-button a-button-small(.+?)"', re.DOTALL).findall(entry)
if match and match[0]==" s-hidden":
addLinkR(title, videoID, "playVideo", thumbUrl, "movie", "", "", year)
else:
addLink(title, videoID, "playVideo", thumbUrl, "movie", "", "", year)
if useTMDb:
dlParams = json.dumps(dlParams)
xbmc.executebuiltin('XBMC.RunScript('+downloadScript+', '+urllib.quote_plus(str(dlParams))+')')
match = re.compile('class="pagnNext".*?href="(.+?)"', re.DOTALL).findall(content)
if match:
addDir(translation(30001), urlMain+match[0].replace("&","&"), "listMovies", "DefaultTVShows.png")
xbmcplugin.endOfDirectory(pluginhandle)
xbmc.sleep(100)
if forceView:
xbmc.executebuiltin('Container.SetViewMode('+viewIdMovies+')')
def listShows(url):
xbmcplugin.setContent(pluginhandle, "tvshows")
content = opener.open(url).read()
debug(content)
content = content.replace("\\","")
if 'id="catCorResults"' in content:
content = content[:content.find('id="catCorResults"')]
match = re.compile('"csrfToken":"(.+?)"', re.DOTALL).findall(content)
if match:
addon.setSetting('csrfToken', match[0])
spl = content.split('id="result_')
showEntries = []
dlParams = []
for i in range(1, len(spl), 1):
entry = spl[i]
match = re.compile('asin="(.+?)"', re.DOTALL).findall(entry)
if match and ">Prime Instant Video<" in entry:
videoID = match[0]
match1 = re.compile('title="(.+?)"', re.DOTALL).findall(entry)
match2 = re.compile('class="ilt2">(.+?)<', re.DOTALL).findall(entry)
title = ""
if match1:
title = match1[0]
elif match2:
title = match2[0]
title = cleanTitle(title)
title = cleanSeasonTitle(title)
if title in showEntries:
continue
showEntries.append(title)
match1 = re.compile('class="a-size-small a-color-secondary">(.+?)<', re.DOTALL).findall(entry)
match2 = re.compile('class="med reg subt">(.+?)<', re.DOTALL).findall(entry)
year = ""
if match1:
year = match1[0].strip()
if match2:
year = match2[0].strip()
dlParams.append({'type':'tv', 'id':videoID, 'title':cleanTitleTMDB(cleanSeasonTitle(title)), 'year':year})
match = re.compile('src="(.+?)"', re.DOTALL).findall(entry)
thumbUrl = match[0].replace(".jpg", "")
thumbUrl = thumbUrl[:thumbUrl.rfind(".")]+".jpg"
match = re.compile('data-action="s-watchlist-add".+?class="a-button a-button-small(.*?)"', re.DOTALL).findall(entry)
if match and match[0]==" s-hidden":
addShowDirR(title, videoID, "listSeasons", thumbUrl, "tv")
else:
addShowDir(title, videoID, "listSeasons", thumbUrl, "tv")
if useTMDb:
dlParams = json.dumps(dlParams)
xbmc.executebuiltin('XBMC.RunScript('+downloadScriptTV+', '+urllib.quote_plus(str(dlParams))+')')
match = re.compile('class="pagnNext".*?href="(.+?)"', re.DOTALL).findall(content)
if match:
addDir(translation(30001), urlMain+match[0].replace("&","&"), "listShows", "DefaultTVShows.png")
xbmcplugin.endOfDirectory(pluginhandle)
xbmc.sleep(100)
if forceView:
xbmc.executebuiltin('Container.SetViewMode('+viewIdShows+')')
def listSimilarMovies(videoID):
xbmcplugin.setContent(pluginhandle, "movies")
content = opener.open(urlMain+"/gp/product/"+videoID).read()
debug(content)
match = re.compile("token : '(.+?)'", re.DOTALL).findall(content)
if match:
addon.setSetting('csrfToken', match[0])
spl = content.split('<li class="packshot')
dlParams = []
for i in range(1, len(spl), 1):
entry = spl[i]
entry = entry[:entry.find('</li>')]
if 'packshot-sash-prime' in entry:
match = re.compile("data-type='downloadable_(.+?)'", re.DOTALL).findall(entry)
if match:
videoType = match[0]
match = re.compile("asin='(.+?)'", re.DOTALL).findall(entry)
videoID = match[0]
match = re.compile('alt="(.+?)"', re.DOTALL).findall(entry)
title = match[0]
title = cleanTitle(title)
dlParams.append({'type':'movie', 'id':videoID, 'title':cleanTitleTMDB(cleanSeasonTitle(title)), 'year':''})
match = re.compile('src="(.+?)"', re.DOTALL).findall(entry)
thumbUrl = ""
if match:
thumbUrl = match[0].replace(".jpg", "")
thumbUrl = thumbUrl[:thumbUrl.rfind(".")]+".jpg"
if videoType == "movie":
addLinkR(title, videoID, "playVideo", thumbUrl, videoType)
if useTMDb:
dlParams = json.dumps(dlParams)
xbmc.executebuiltin('XBMC.RunScript('+downloadScript+', '+urllib.quote_plus(str(dlParams))+')')
xbmcplugin.endOfDirectory(pluginhandle)
xbmc.sleep(100)
if forceView:
xbmc.executebuiltin('Container.SetViewMode('+viewIdMovies+')')
def listSimilarShows(videoID):
xbmcplugin.setContent(pluginhandle, "tvshows")
content = opener.open(urlMain+"/gp/product/"+videoID).read()
debug(content)
match = re.compile("token : '(.+?)'", re.DOTALL).findall(content)
if match:
addon.setSetting('csrfToken', match[0])
spl = content.split('<li class="packshot')
showEntries = []
dlParams = []
for i in range(1, len(spl), 1):
entry = spl[i]
entry = entry[:entry.find('</li>')]
if 'packshot-sash-prime' in entry:
match = re.compile("data-type='downloadable_(.+?)'", re.DOTALL).findall(entry)
if match:
videoType = match[0]
match = re.compile("asin='(.+?)'", re.DOTALL).findall(entry)
videoID = match[0]
match = re.compile('alt="(.+?)"', re.DOTALL).findall(entry)
title = match[0]
title = cleanTitle(title)
dlParams.append({'type':'tv', 'id':videoID, 'title':cleanTitleTMDB(cleanSeasonTitle(title)), 'year':''})
match = re.compile('src="(.+?)"', re.DOTALL).findall(entry)
thumbUrl = ""
if match:
thumbUrl = match[0].replace(".jpg", "")
thumbUrl = thumbUrl[:thumbUrl.rfind(".")]+".jpg"
if videoType=="tv_season":
videoType="tv"
title = cleanSeasonTitle(title)
if title in showEntries:
continue
showEntries.append(title)
addShowDirR(title, videoID, "listSeasons", thumbUrl, videoType)
if useTMDb:
dlParams = json.dumps(dlParams)
xbmc.executebuiltin('XBMC.RunScript('+downloadScriptTV+', '+urllib.quote_plus(str(dlParams))+')')
xbmcplugin.endOfDirectory(pluginhandle)
xbmc.sleep(100)
if forceView:
xbmc.executebuiltin('Container.SetViewMode('+viewIdShows+')')
def listSeasons(seriesName, seriesID, thumb):
xbmcplugin.setContent(pluginhandle, "seasons")
content = opener.open(urlMain+"/gp/product/"+seriesID).read()
debug(content)
match = re.compile('"csrfToken":"(.+?)"', re.DOTALL).findall(content)
if match:
addon.setSetting('csrfToken', match[0])
contentMain = content
content = content[content.find('<select name="seasonAsinAndRef"'):]
content = content[:content.find('</select>')]
match = re.compile('<option value="(.+?):.+?data-a-html-content="(.+?)"', re.DOTALL).findall(content)
if match:
for seasonID, title in match:
if "dv-dropdown-prime" in title:
if "\n" in title:
title = title[:title.find("\n")]
addSeasonDir(title, seasonID, 'listEpisodes', thumb, seriesName, seriesID)
xbmcplugin.endOfDirectory(pluginhandle)
xbmc.sleep(100)
if forceView:
xbmc.executebuiltin('Container.SetViewMode('+viewIdSeasons+')')
else:
listEpisodes(seriesID, seriesID, thumb, contentMain)
def listEpisodes(seriesID, seasonID, thumb, content="", seriesName=""):
xbmcplugin.setContent(pluginhandle, "episodes")
if not content:
content = opener.open(urlMain+"/gp/product/"+seasonID).read()
debug(content)
match = re.compile('"csrfToken":"(.+?)"', re.DOTALL).findall(content)
if match:
addon.setSetting('csrfToken', match[0])
matchSeason = re.compile('"seasonNumber":"(.+?)"', re.DOTALL).findall(content)
spl = content.split('href="'+urlMain+'/gp/product')
for i in range(1, len(spl), 1):
entry = spl[i]
entry = entry[:entry.find('</li>')]
match = re.compile('class="episode-title">(.+?)<', re.DOTALL).findall(entry)
if match and ('class="prime-logo-small"' in entry or 'class="episode-status cell-free"' in entry):
title = match[0]
title = cleanTitle(title)
episodeNr = title[:title.find('.')]
title = title[title.find('.')+1:].strip()
match = re.compile('/(.+?)/', re.DOTALL).findall(entry)
episodeID = match[0]
match = re.compile('<p>.+?</span>(.+?)</p>', re.DOTALL).findall(entry)
desc = ""
if match:
desc = cleanTitle(match[0])
length = ""
match1 = re.compile('class="dv-badge runtime">(.+?)h(.+?)min<', re.DOTALL).findall(entry)
match2 = re.compile('class="dv-badge runtime">(.+?)Std.(.+?)Min.<', re.DOTALL).findall(entry)
match3 = re.compile('class="dv-badge runtime">(.+?)min<', re.DOTALL).findall(entry)
match4 = re.compile('class="dv-badge runtime">(.+?)Min.<', re.DOTALL).findall(entry)
if match1:
length = str(int(match1[0][0].strip())*60+int(match1[0][1].strip()))
elif match2:
length = str(int(match2[0][0].strip())*60+int(match2[0][1].strip()))
elif match3:
length = match3[0].strip()
elif match4:
length = match4[0].strip()
match = re.compile('class="dv-badge release-date">(.+?)<', re.DOTALL).findall(entry)
aired = ""
if match:
aired = match[0]+"-01-01"
match = re.compile('src="(.+?)"', re.DOTALL).findall(entry)
if match:
thumb = match[0].replace("._SX133_QL80_.jpg","._SX400_.jpg")
match = re.compile('class="progress-bar">.+?width: (.+?)%', re.DOTALL).findall(entry)
playcount = 0
if match:
percentage = match[0]
if int(percentage)>95:
playcount = 1
addEpisodeLink(title, episodeID, 'playVideo', thumb, desc, length, matchSeason[0], episodeNr, seriesID, playcount, aired, seriesName)
xbmcplugin.endOfDirectory(pluginhandle)
xbmc.sleep(100)
if forceView:
xbmc.executebuiltin('Container.SetViewMode('+viewIdEpisodes+')')
def listGenres(url, videoType):
content = opener.open(url).read()
debug(content)
content = content[content.find('<ul class="column vPage1">'):]
content = content[:content.find('</div>')]
match = re.compile('href="(.+?)">.+?>(.+?)</span>.+?>(.+?)<', re.DOTALL).findall(content)
for url, title, nr in match:
if videoType=="movie":
addDir(cleanTitle(title)+nr.replace(" "," "), urlMain+url.replace("/s/","/mn/search/ajax/").replace("&","&"), 'listMovies', "")
else:
addDir(cleanTitle(title), urlMain+url.replace("/s/","/mn/search/ajax/").replace("&","&"), 'listShows', "")
xbmcplugin.endOfDirectory(pluginhandle)
def playVideo(videoID, selectQuality=False, playTrailer=False):
streamTitles = []
streamURLs = []
cMenu = False
if selectQuality:
cMenu = True
if maxBitrate==-1:
selectQuality = True
content=opener.open(urlMain+"/dp/"+videoID).read()
hasTrailer = False
if '"hasTrailer":true' in content:
hasTrailer = True
matchCID=re.compile('"customerID":"(.+?)"').findall(content)
if matchCID:
matchTitle=re.compile('"product":.+?"title":"(.+?)"', re.DOTALL).findall(content)
matchThumb=re.compile('"product":.+?"image":"(.+?)"', re.DOTALL).findall(content)
matchToken=re.compile('"csrfToken":"(.+?)"', re.DOTALL).findall(content)
matchSWFUrl=re.compile('\<script[\ \t]+type[\ \t]*=[\ \t]*"text\/javascript\"[\ \t]+src[\ \t]*=[\ \t]*"(.+?webplayer.+?)"\>\<\/script\>', re.DOTALL).findall(content)
flashContent=opener.open(matchSWFUrl[0]).read()
matchTitle=re.compile('"contentRating":".+?","name":"(.+?)"', re.DOTALL).findall(content)
matchThumb=re.compile('"video":.+?"thumbnailUrl":"(.+?)"', re.DOTALL).findall(content)
matchSWF=re.compile('LEGACY_FLASH_SWF="(.+?)"').findall(flashContent)
matchMID=re.compile('"marketplaceID":"(.+?)"').findall(content)
matchDID=re.compile('FLASH="(.+?)"').findall(flashContent)
if not playTrailer or (playTrailer and hasTrailer and preferAmazonTrailer and siteVersion!="com"):
content=opener.open(urlMainS+'/gp/video/streaming/player-token.json?callback=jQuery1640'+''.join(random.choice(string.digits) for x in range(18))+'_'+str(int(time.time()*1000))+'&csrftoken='+urllib.quote_plus(matchToken[0])+'&_='+str(int(time.time()*1000))).read()
matchToken=re.compile('"token":"(.+?)"', re.DOTALL).findall(content)
content = ""
if playTrailer and hasTrailer and preferAmazonTrailer and siteVersion!="com":
content = opener.open('https://'+apiMain+'.amazon.com/cdp/catalog/GetStreamingTrailerUrls?version=1&format=json&firmware=WIN%2011,7,700,224%20PlugIn&marketplaceID='+urllib.quote_plus(matchMID[0])+'&token='+urllib.quote_plus(matchToken[0])+'&deviceTypeID='+urllib.quote_plus(matchDID[0])+'&asin='+videoID+'&customerID='+urllib.quote_plus(matchCID[0])+'&deviceID='+urllib.quote_plus(matchCID[0])+str(int(time.time()*1000))+videoID).read()
elif not playTrailer:
content = opener.open('https://'+apiMain+'.amazon.com/cdp/catalog/GetStreamingUrlSets?version=1&format=json&firmware=WIN%2011,7,700,224%20PlugIn&marketplaceID='+urllib.quote_plus(matchMID[0])+'&token='+urllib.quote_plus(matchToken[0])+'&deviceTypeID='+urllib.quote_plus(matchDID[0])+'&asin='+videoID+'&customerID='+urllib.quote_plus(matchCID[0])+'&deviceID='+urllib.quote_plus(matchCID[0])+str(int(time.time()*1000))+videoID).read()
elif playTrailer:
try:
strT = ""
if siteVersion=="de":
strT = "+german"
contentT = opener.open("http://gdata.youtube.com/feeds/api/videos?vq="+cleanTitle(matchTitle[0]).replace(" ", "+")+"+trailer"+strT+"&racy=include&orderby=relevance").read()
match = re.compile('<id>http://gdata.youtube.com/feeds/api/videos/(.+?)</id>', re.DOTALL).findall(contentT.split('<entry>')[1])
xbmc.Player().play("plugin://plugin.video.youtube/?path=/root/video&action=play_video&videoid=" + match[0])
except:
pass
debug(content)
if content:
content = json.loads(content)
thumbUrl = matchThumb[0].replace(".jpg", "")
thumbUrl = thumbUrl[:thumbUrl.rfind(".")]+".jpg"
if "CDP.Authorization.InvalidGeoIP" in str(content):
xbmc.executebuiltin('XBMC.Notification(Info:,'+translation(30083)+',10000,'+icon+')')
elif "CDP.Playback.NotOwned" in str(content):
xbmc.executebuiltin('XBMC.Notification(Info:,'+translation(30084)+',10000,'+icon+')')
elif "CDP.Playback.TemporarilyUnavailable" in str(content):
xbmc.executebuiltin('XBMC.Notification(Info:,'+translation(30087)+',5000,'+icon+')')
else:
contentT = ""
try:
contentT = content['message']['body']['urlSets']['streamingURLInfoSet'][0]['streamingURLInfo']
except:
contentT = content['message']['body']['streamingURLInfoSet']['streamingURLInfo']
if contentT:
for item in contentT:
if selectQuality:
streamTitles.append(str(item['bitrate'])+"kb")
streamURLs.append(item['url'])
url = item['url']
elif item['bitrate']<=maxBitrate:
url = item['url']
if not rtmpMain in url:
try:
if selectQuality:
streamTitles = []
streamURLs = []
for item in content['message']['body']['urlSets']['streamingURLInfoSet'][1]['streamingURLInfo']:
if selectQuality:
streamTitles.append(str(item['bitrate'])+"kb")
streamURLs.append(item['url'])
elif item['bitrate']<=maxBitrate:
url = item['url']
except:
pass
if selectQuality:
dialog = xbmcgui.Dialog()
nr=dialog.select(translation(30059), streamTitles)
if nr>=0:
url=streamURLs[nr]
if url.startswith("rtmpe"):
url = url.replace('rtmpe','rtmp')+' swfVfy=1 swfUrl='+matchSWF[0]+' pageUrl='+urlMain+'/dp/'+videoID+' app='+rtmpMain+'-vod playpath='+url[url.find('mp4:'):]+' tcUrl=rtmpe://'+rtmpMain+'-vodfs.fplive.net:1935/'+rtmpMain+'-vod/'
if playTrailer or (selectQuality and cMenu):
listitem = xbmcgui.ListItem(cleanTitle(matchTitle[0]), path=url, thumbnailImage=thumbUrl)
xbmc.Player().play(url, listitem)
else:
listitem = xbmcgui.ListItem(cleanTitle(matchTitle[0]), path=url, thumbnailImage=thumbUrl)
xbmcplugin.setResolvedUrl(pluginhandle, True, listitem)
elif url.startswith("http"):
dialog = xbmcgui.Dialog()
if dialog.yesno('Info', translation(30085)):
content=opener.open(urlMainS+"/gp/video/settings/ajax/player-preferences-endpoint.html", "rurl="+urllib.quote_plus(urlMainS+"/gp/video/settings")+"&csrfToken="+urllib.quote_plus(addon.getSetting('csrfToken'))+"&aiv-pp-toggle=flash").read()
xbmc.executebuiltin('XBMC.Notification(Info:,'+translation(30086)+',10000,'+icon+')')
playVideo(videoID, selectQuality)
else:
xbmc.executebuiltin('XBMC.Notification(Info:,'+translation(30082)+',10000,'+icon+')')
def showInfo(videoID):
xbmcplugin.setContent(pluginhandle, "movies")
content=opener.open(urlMain+"/dp/"+videoID).read()
match=re.compile('"product":.+?"title":"(.+?)"', re.DOTALL).findall(content)
title = match[0]
match=re.compile('class="release-year".*?>(.+?)<', re.DOTALL).findall(content)
year = match[0]
title = title+" ("+year+")"
title = cleanTitle(title)
match=re.compile('"product":.+?"image":"(.+?)"', re.DOTALL).findall(content)
thumb = match[0].replace(".jpg", "")
thumb = thumb[:thumb.rfind(".")]+".jpg"
match=re.compile('"director":.+?"name":"(.+?)"', re.DOTALL).findall(content)
director = match[0].replace(",",", ")
match=re.compile('"actor":.+?"name":"(.+?)"', re.DOTALL).findall(content)
actors = match[0].replace(",",", ")
match=re.compile('property="og:duration" content="(.+?)"', re.DOTALL).findall(content)
length = str(int(match[0])/60)+" min."
match=re.compile('"ratingValue":"(.+?)"', re.DOTALL).findall(content)
rating = match[0]
match=re.compile('"reviewCount":"(.+?)"', re.DOTALL).findall(content)
ratingCount = match[0]
match=re.compile('"description":"(.+?)"', re.DOTALL).findall(content)
description = cleanTitle(match[0])
match=re.compile('"genre":"(.+?)"', re.DOTALL).findall(content)
genre = ""
if match:
genre = cleanTitle(match[0])
addLink(title, videoID, "playVideo", thumb, videoType="movie", desc=description, duration=length, year=year, mpaa="", director=director, genre=genre, rating=rating)
xbmcplugin.endOfDirectory(pluginhandle)
xbmc.sleep(100)
if forceView:
xbmc.executebuiltin('Container.SetViewMode('+viewIdDetails+')')
try:
wnd = xbmcgui.Window(xbmcgui.getCurrentWindowId())
wnd.getControl(wnd.getFocusId()).selectItem(1)
except:
pass
def deleteCookies():
if os.path.exists(cookieFile):
os.remove(cookieFile)
def deleteCache():
if os.path.exists(cacheFolder):
try:
shutil.rmtree(cacheFolder)
except:
shutil.rmtree(cacheFolder)
def search(type):
keyboard = xbmc.Keyboard('', translation(30015))
keyboard.doModal()
if keyboard.isConfirmed() and keyboard.getText():
search_string = keyboard.getText().replace(" ", "+")
if siteVersion=="de":
if type=="movies":
listMovies(urlMain+"/mn/search/ajax/?_encoding=UTF8&url=node%3D3356018031&field-keywords="+search_string)
elif type=="tv":
listShows(urlMain+"/mn/search/ajax/?_encoding=UTF8&url=node%3D3356019031&field-keywords="+search_string)
elif siteVersion=="com":
if type=="movies":
listMovies(urlMain+"/mn/search/ajax/?_encoding=UTF8&url=node%3D7613704011&field-keywords="+search_string)
elif type=="tv":
listShows(urlMain+"/mn/search/ajax/?_encoding=UTF8&url=node%3D7613705011&field-keywords="+search_string)
elif siteVersion=="co.uk":
if type=="movies":
listMovies(urlMain+"/mn/search/ajax/?_encoding=UTF8&url=node%3D3356010031&field-keywords="+search_string)
elif type=="tv":
listShows(urlMain+"/mn/search/ajax/?_encoding=UTF8&url=node%3D3356011031&field-keywords="+search_string)
def addToQueue(videoID, videoType):
if videoType=="tv":
videoType = "tv_episode"
content = opener.open(urlMain+"/gp/video/watchlist/ajax/addRemove.html/ref=sr_1_1_watchlist_add?token="+urllib.quote_plus(addon.getSetting('csrfToken'))+"&dataType=json&prodType="+videoType+"&ASIN="+videoID+"&pageType=Search&subPageType=SASLeafSingleSearch&store=instant-video").read()
if showNotification:
xbmc.executebuiltin('XBMC.Notification(Info:,'+translation(30088)+',3000,'+icon+')')
def removeFromQueue(videoID, videoType):
if videoType=="tv":
videoType = "tv_episode"
content = opener.open(urlMain+"/gp/video/watchlist/ajax/addRemove.html/ref=sr_1_1_watchlist_remove?token="+urllib.quote_plus(addon.getSetting('csrfToken'))+"&dataType=json&prodType="+videoType+"&ASIN="+videoID+"&pageType=Search&subPageType=SASLeafSingleSearch&store=instant-video").read()
xbmc.executebuiltin("Container.Refresh")
if showNotification:
xbmc.executebuiltin('XBMC.Notification(Info:,'+translation(30089)+',3000,'+icon+')')
def login():
content = opener.open(urlMain).read()
if '"isPrime":1' in content:
return "prime"
elif 'id="nav-item-signout"' in content:
return "noprime"
else:
content = ""
keyboard = xbmc.Keyboard('', translation(30090))
keyboard.doModal()
if keyboard.isConfirmed() and keyboard.getText():
email = keyboard.getText()
keyboard = xbmc.Keyboard('', translation(30091))
keyboard.doModal()
if keyboard.isConfirmed() and keyboard.getText():
password = keyboard.getText()
br = mechanize.Browser()
br.set_cookiejar(cj)
br.set_handle_robots(False)
br.addheaders = [('User-agent', userAgent)]
content = br.open(urlMainS+"/gp/sign-in.html")
br.select_form(name="signIn")
br["email"] = email
br["password"] = password
content = br.submit().read()
cj.save(cookieFile)
content = opener.open(urlMain).read()
if '"isPrime":1' in content:
return "prime"
elif 'id="nav-item-signout"' in content:
return "noprime"
else:
return "none"
def cleanTitle(title):
if "[HD]" in title:
title = title[:title.find("[HD]")]
title = title.replace("&","&").replace("'","'").replace("é","é").replace("ä","ä").replace("ö","ö").replace("ü","ü").replace("Ä","Ä").replace("Ö","Ö").replace("Ü","Ü").replace("ß","ß").replace("…","…")
title = title.replace("é","é").replace("ä","ä").replace("ö","ö").replace("ü","ü").replace("Ä","Ä").replace("Ö","Ö").replace("Ü","Ü").replace("ß","ß")
return title.replace("\xe4","ä").replace("\xf6","ö").replace("\xfc","ü").replace("\xc4","Ä").replace("\xd6","Ö").replace("\xdc","Ü").replace("\xdf","ß").strip()
def cleanSeasonTitle(title):
if ": The Complete" in title:
title = title[:title.rfind(": The Complete")]
if "Season" in title:
title = title[:title.rfind("Season")]
if "Staffel" in title:
title = title[:title.rfind("Staffel")]
if "Volume" in title:
title = title[:title.rfind("Volume")]
if "Series" in title:
title = title[:title.rfind("Series")]
return title.strip(" -,")
def cleanTitleTMDB(title):
if "[" in title:
title = title[:title.find("[")]
if " OmU" in title:
title = title[:title.find(" OmU")]
return title
def addMovieToLibrary(movieID, title):
movieFolderName = (''.join(c for c in unicode(title, 'utf-8') if c not in '/\\:?"*|<>')).strip(' .')
dir = os.path.join(libraryFolderMovies, movieFolderName)
if not os.path.isdir(dir):
xbmcvfs.mkdir(dir)
fh = xbmcvfs.File(os.path.join(dir, "movie.strm"), 'w')
fh.write('plugin://'+addonID+'/?mode=playVideo&url='+movieID)
fh.close()
if updateDB:
xbmc.executebuiltin('UpdateLibrary(video)')
def addSeasonToLibrary(seriesID, seriesTitle, seasonID):
seriesFolderName = (''.join(c for c in unicode(seriesTitle, 'utf-8') if c not in '/\\:?"*|<>')).strip(' .')
seriesDir = os.path.join(libraryFolderTV, seriesFolderName)
if not os.path.isdir(seriesDir):
xbmcvfs.mkdir(seriesDir)
content = opener.open(urlMain+"/gp/product/"+seasonID).read()
matchSeason = re.compile('"seasonNumber":"(.+?)"', re.DOTALL).findall(content)
spl = content.split('href="'+urlMain+'/gp/product')
for i in range(1, len(spl), 1):
entry = spl[i]
match = re.compile('class="episode-title">(.+?)<', re.DOTALL).findall(entry)
if match:
title = match[0]
title = cleanTitle(title)
episodeNr = title[:title.find('.')]
title = title[title.find('.')+1:].strip()
match = re.compile('/(.+?)/', re.DOTALL).findall(entry)
episodeID = match[0]
if len(episodeNr) == 1:
episodeNr = "0"+episodeNr
seasonNr = matchSeason[0]
if len(seasonNr) == 1:
seasonNr = "0"+seasonNr
filename = "S"+seasonNr+"E"+episodeNr+" - "+title+".strm"
filename = (''.join(c for c in unicode(filename, 'utf-8') if c not in '/\\:?"*|<>')).strip(' .')
fh = xbmcvfs.File(os.path.join(seriesDir, filename), 'w')
fh.write('plugin://'+addonID+'/?mode=playVideo&url='+episodeID)
fh.close()
if updateDB:
xbmc.executebuiltin('UpdateLibrary(video)')
def debug(content):
fh=open(debugFile, "w")
fh.write(content)
fh.close()
def parameters_string_to_dict(parameters):
paramDict = {}
if parameters:
paramPairs = parameters[1:].split("&")
for paramsPair in paramPairs:
paramSplits = paramsPair.split('=')
if (len(paramSplits)) == 2:
paramDict[paramSplits[0]] = paramSplits[1]
return paramDict
def addDir(name, url, mode, iconimage, videoType=""):
u = sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&thumb="+urllib.quote_plus(iconimage)+"&videoType="+urllib.quote_plus(videoType)
ok = True
liz = xbmcgui.ListItem(name, iconImage="DefaultTVShows.png", thumbnailImage=iconimage)
liz.setInfo(type="video", infoLabels={"title": name})
liz.setProperty("fanart_image", defaultFanart)
ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz, isFolder=True)
return ok
def addShowDir(name, url, mode, iconimage, videoType="", desc="", duration="", year="", mpaa="", director="", genre="", rating=""):
filename = (''.join(c for c in unicode(url, 'utf-8') if c not in '/\\:?"*|<>')).strip()+".jpg"
coverFile = os.path.join(cacheFolderCoversTMDB, filename)
fanartFile = os.path.join(cacheFolderFanartTMDB, filename)
if os.path.exists(coverFile):
iconimage = coverFile
u = sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&thumb="+urllib.quote_plus(iconimage)+"&name="+urllib.quote_plus(name)
ok = True
liz = xbmcgui.ListItem(name, iconImage="DefaultTVShows.png", thumbnailImage=iconimage)
liz.setInfo(type="video", infoLabels={"title": name, "plot": desc, "duration": duration, "year": year, "mpaa": mpaa, "director": director, "genre": genre, "rating": rating})
liz.setProperty("fanart_image", fanartFile)
entries = []
entries.append((translation(30051), 'RunPlugin(plugin://'+addonID+'/?mode=playTrailer&url='+urllib.quote_plus(url)+')',))
entries.append((translation(30052), 'RunPlugin(plugin://'+addonID+'/?mode=addToQueue&url='+urllib.quote_plus(url)+'&videoType='+urllib.quote_plus(videoType)+')',))
entries.append((translation(30057), 'Container.Update(plugin://'+addonID+'/?mode=listSimilarMovies&url='+urllib.quote_plus(url)+')',))
entries.append((translation(30058), 'Container.Update(plugin://'+addonID+'/?mode=listSimilarShows&url='+urllib.quote_plus(url)+')',))
liz.addContextMenuItems(entries)
ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz, isFolder=True)
return ok
def addShowDirR(name, url, mode, iconimage, videoType="", desc="", duration="", year="", mpaa="", director="", genre="", rating=""):
filename = (''.join(c for c in unicode(url, 'utf-8') if c not in '/\\:?"*|<>')).strip()+".jpg"
coverFile = os.path.join(cacheFolderCoversTMDB, filename)
fanartFile = os.path.join(cacheFolderFanartTMDB, filename)
if os.path.exists(coverFile):
iconimage = coverFile
u = sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&thumb="+urllib.quote_plus(iconimage)+"&name="+urllib.quote_plus(name)
ok = True
liz = xbmcgui.ListItem(name, iconImage="DefaultTVShows.png", thumbnailImage=iconimage)
liz.setInfo(type="video", infoLabels={"title": name, "plot": desc, "duration": duration, "year": year, "mpaa": mpaa, "director": director, "genre": genre, "rating": rating})
liz.setProperty("fanart_image", fanartFile)
entries = []
entries.append((translation(30051), 'RunPlugin(plugin://'+addonID+'/?mode=playTrailer&url='+urllib.quote_plus(url)+')',))
entries.append((translation(30053), 'RunPlugin(plugin://'+addonID+'/?mode=removeFromQueue&url='+urllib.quote_plus(url)+'&videoType='+urllib.quote_plus(videoType)+')',))
entries.append((translation(30057), 'Container.Update(plugin://'+addonID+'/?mode=listSimilarMovies&url='+urllib.quote_plus(url)+')',))
entries.append((translation(30058), 'Container.Update(plugin://'+addonID+'/?mode=listSimilarShows&url='+urllib.quote_plus(url)+')',))
liz.addContextMenuItems(entries)
ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz, isFolder=True)
return ok
def addLink(name, url, mode, iconimage, videoType="", desc="", duration="", year="", mpaa="", director="", genre="", rating=""):
filename = (''.join(c for c in unicode(url, 'utf-8') if c not in '/\\:?"*|<>')).strip()+".jpg"
fanartFile = os.path.join(cacheFolderFanartTMDB, filename)
u = sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)+"&thumb="+urllib.quote_plus(iconimage)
ok = True
liz = xbmcgui.ListItem(name, iconImage="DefaultTVShows.png", thumbnailImage=iconimage)
liz.setInfo(type="video", infoLabels={"title": name, "plot": desc, "duration": duration, "year": year, "mpaa": mpaa, "director": director, "genre": genre, "rating": rating})
liz.setProperty("fanart_image", fanartFile)
entries = []
entries.append((translation(30054), 'RunPlugin(plugin://'+addonID+'/?mode=playVideo&url='+urllib.quote_plus(url)+'&selectQuality=true)',))
if videoType != "episode":
entries.append((translation(30060), 'Container.Update(plugin://'+addonID+'/?mode=showInfo&url='+urllib.quote_plus(url)+')',))
entries.append((translation(30051), 'RunPlugin(plugin://'+addonID+'/?mode=playTrailer&url='+urllib.quote_plus(url)+')',))
entries.append((translation(30052), 'RunPlugin(plugin://'+addonID+'/?mode=addToQueue&url='+urllib.quote_plus(url)+'&videoType='+urllib.quote_plus(videoType)+')',))
if videoType == "movie":
titleTemp = name.strip()
if year:
titleTemp += ' ('+year+')'
entries.append((translation(30055), 'RunPlugin(plugin://'+addonID+'/?mode=addMovieToLibrary&url='+urllib.quote_plus(url)+'&name='+urllib.quote_plus(titleTemp)+')',))
entries.append((translation(30057), 'Container.Update(plugin://'+addonID+'/?mode=listSimilarMovies&url='+urllib.quote_plus(url)+')',))
entries.append((translation(30058), 'Container.Update(plugin://'+addonID+'/?mode=listSimilarShows&url='+urllib.quote_plus(url)+')',))
liz.addContextMenuItems(entries)
liz.setProperty('IsPlayable', 'true')
ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz)
return ok
def addLinkR(name, url, mode, iconimage, videoType="", desc="", duration="", year="", mpaa="", director="", genre="", rating=""):
filename = (''.join(c for c in unicode(url, 'utf-8') if c not in '/\\:?"*|<>')).strip()+".jpg"
fanartFile = os.path.join(cacheFolderFanartTMDB, filename)
u = sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)+"&thumb="+urllib.quote_plus(iconimage)
ok = True
liz = xbmcgui.ListItem(name, iconImage="DefaultTVShows.png", thumbnailImage=iconimage)
liz.setInfo(type="video", infoLabels={"title": name, "plot": desc, "duration": duration, "year": year, "mpaa": mpaa, "director": director, "genre": genre, "rating": rating})
liz.setProperty("fanart_image", fanartFile)
entries = []
entries.append((translation(30054), 'RunPlugin(plugin://'+addonID+'/?mode=playVideo&url='+urllib.quote_plus(url)+'&selectQuality=true)',))
entries.append((translation(30060), 'Container.Update(plugin://'+addonID+'/?mode=showInfo&url='+urllib.quote_plus(url)+')',))
entries.append((translation(30051), 'RunPlugin(plugin://'+addonID+'/?mode=playTrailer&url='+urllib.quote_plus(url)+')',))
entries.append((translation(30053), 'RunPlugin(plugin://'+addonID+'/?mode=removeFromQueue&url='+urllib.quote_plus(url)+'&videoType='+urllib.quote_plus(videoType)+')',))
if videoType == "movie":
titleTemp = name.strip()
if year:
titleTemp += ' ('+year+')'
entries.append((translation(30055), 'RunPlugin(plugin://'+addonID+'/?mode=addMovieToLibrary&url='+urllib.quote_plus(url)+'&name='+urllib.quote_plus(titleTemp)+')',))
entries.append((translation(30057), 'Container.Update(plugin://'+addonID+'/?mode=listSimilarMovies&url='+urllib.quote_plus(url)+')',))
entries.append((translation(30058), 'Container.Update(plugin://'+addonID+'/?mode=listSimilarShows&url='+urllib.quote_plus(url)+')',))
liz.addContextMenuItems(entries)
liz.setProperty('IsPlayable', 'true')
ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz)
return ok
def addSeasonDir(name, url, mode, iconimage, seriesName, seriesID):
filename = (''.join(c for c in unicode(seriesID, 'utf-8') if c not in '/\\:?"*|<>')).strip()+".jpg"
fanartFile = os.path.join(cacheFolderFanartTMDB, filename)
u = sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&seriesID="+urllib.quote_plus(seriesID)+"&thumb="+urllib.quote_plus(iconimage)+"&name="+urllib.quote_plus(seriesName)
ok = True
liz = xbmcgui.ListItem(name, iconImage="DefaultTVShows.png", thumbnailImage=iconimage)
liz.setInfo(type="video", infoLabels={"title": name, "TVShowTitle": seriesName})
liz.setProperty("fanart_image", fanartFile)
entries = []
entries.append((translation(30056), 'RunPlugin(plugin://'+addonID+'/?mode=addSeasonToLibrary&url='+urllib.quote_plus(url)+'&seriesID='+urllib.quote_plus(seriesID)+'&name='+urllib.quote_plus(seriesName.strip())+')',))
liz.addContextMenuItems(entries)
ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz, isFolder=True)
return ok
def addEpisodeLink(name, url, mode, iconimage, desc="", duration="", season="", episodeNr="", seriesID="", playcount="", aired="", seriesName=""):
filename = (''.join(c for c in unicode(seriesID, 'utf-8') if c not in '/\\:?"*|<>')).strip()+".jpg"
fanartFile = os.path.join(cacheFolderFanartTMDB, filename)
u = sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)
ok = True
liz = xbmcgui.ListItem(name, iconImage="DefaultTVShows.png", thumbnailImage=iconimage)
liz.setInfo(type="video", infoLabels={"title": name, "plot": desc, "duration": duration, "season": season, "episode": episodeNr, "aired": aired, "playcount": playcount, "TVShowTitle": seriesName})
liz.setProperty("fanart_image", fanartFile)
entries = []
entries.append((translation(30054), 'RunPlugin(plugin://'+addonID+'/?mode=playVideo&url='+urllib.quote_plus(url)+'&selectQuality=true)',))
liz.addContextMenuItems(entries)
liz.setProperty('IsPlayable', 'true')
ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz)
return ok
params = parameters_string_to_dict(sys.argv[2])
mode = urllib.unquote_plus(params.get('mode', ''))
url = urllib.unquote_plus(params.get('url', ''))
thumb = urllib.unquote_plus(params.get('thumb', ''))
name = urllib.unquote_plus(params.get('name', ''))
season = urllib.unquote_plus(params.get('season', ''))
seriesID = urllib.unquote_plus(params.get('seriesID', ''))
videoType = urllib.unquote_plus(params.get('videoType', ''))
selectQuality = urllib.unquote_plus(params.get('selectQuality', ''))
if mode == 'listMovies':
listMovies(url)
elif mode == 'listShows':
listShows(url)
elif mode == 'listWatchList':
listWatchList(url)
elif mode == 'listGenres':
listGenres(url, videoType)
elif mode == 'addToQueue':
addToQueue(url, videoType)
elif mode == 'removeFromQueue':
removeFromQueue(url, videoType)
elif mode == 'playVideo':
playVideo(url, selectQuality=="true")
elif mode == 'playVideoSelect':
playVideo(url, True)
elif mode == 'browseMovies':
browseMovies()
elif mode == 'browseTV':
browseTV()
elif mode == 'search':
search(url)
elif mode == 'login':
login()
elif mode == 'listDecadesMovie':
listDecadesMovie()
elif mode == 'listOriginals':
listOriginals()
elif mode == 'listSeasons':
listSeasons(name, url, thumb)
elif mode == 'listEpisodes':
listEpisodes(seriesID, url, thumb, "", name)
elif mode == 'deleteCookies':
deleteCookies()
elif mode == 'deleteCache':
deleteCache()
elif mode == 'playTrailer':
playVideo(url, selectQuality=="true", True)
elif mode == 'listSimilarMovies':
listSimilarMovies(url)
elif mode == 'listSimilarShows':
listSimilarShows(url)
elif mode == 'showInfo':
showInfo(url)
elif mode == 'addMyListToLibrary':
addMyListToLibrary()
elif mode == 'addMovieToLibrary':
addMovieToLibrary(url, name)
elif mode == 'addSeasonToLibrary':
addSeasonToLibrary(seriesID, name, url)
else:
index()
| gpl-2.0 |
gomi1992/tiny210v2-uboot | tools/patman/test.py | 66 | 7785 | #
# Copyright (c) 2011 The Chromium OS Authors.
#
# See file CREDITS for list of people who contributed to this
# project.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
import os
import tempfile
import unittest
import checkpatch
import gitutil
import patchstream
import series
class TestPatch(unittest.TestCase):
"""Test this program
TODO: Write tests for the rest of the functionality
"""
def testBasic(self):
"""Test basic filter operation"""
data='''
From 656c9a8c31fa65859d924cd21da920d6ba537fad Mon Sep 17 00:00:00 2001
From: Simon Glass <sjg@chromium.org>
Date: Thu, 28 Apr 2011 09:58:51 -0700
Subject: [PATCH (resend) 3/7] Tegra2: Add more clock support
This adds functions to enable/disable clocks and reset to on-chip peripherals.
BUG=chromium-os:13875
TEST=build U-Boot for Seaboard, boot
Change-Id: I80fe1d0c0b7dd10aa58ce5bb1d9290b6664d5413
Review URL: http://codereview.chromium.org/6900006
Signed-off-by: Simon Glass <sjg@chromium.org>
---
arch/arm/cpu/armv7/tegra2/Makefile | 2 +-
arch/arm/cpu/armv7/tegra2/ap20.c | 57 ++----
arch/arm/cpu/armv7/tegra2/clock.c | 163 +++++++++++++++++
'''
expected='''
From 656c9a8c31fa65859d924cd21da920d6ba537fad Mon Sep 17 00:00:00 2001
From: Simon Glass <sjg@chromium.org>
Date: Thu, 28 Apr 2011 09:58:51 -0700
Subject: [PATCH (resend) 3/7] Tegra2: Add more clock support
This adds functions to enable/disable clocks and reset to on-chip peripherals.
Signed-off-by: Simon Glass <sjg@chromium.org>
---
arch/arm/cpu/armv7/tegra2/Makefile | 2 +-
arch/arm/cpu/armv7/tegra2/ap20.c | 57 ++----
arch/arm/cpu/armv7/tegra2/clock.c | 163 +++++++++++++++++
'''
out = ''
inhandle, inname = tempfile.mkstemp()
infd = os.fdopen(inhandle, 'w')
infd.write(data)
infd.close()
exphandle, expname = tempfile.mkstemp()
expfd = os.fdopen(exphandle, 'w')
expfd.write(expected)
expfd.close()
patchstream.FixPatch(None, inname, series.Series(), None)
rc = os.system('diff -u %s %s' % (inname, expname))
self.assertEqual(rc, 0)
os.remove(inname)
os.remove(expname)
def GetData(self, data_type):
data='''
From 4924887af52713cabea78420eff03badea8f0035 Mon Sep 17 00:00:00 2001
From: Simon Glass <sjg@chromium.org>
Date: Thu, 7 Apr 2011 10:14:41 -0700
Subject: [PATCH 1/4] Add microsecond boot time measurement
This defines the basics of a new boot time measurement feature. This allows
logging of very accurate time measurements as the boot proceeds, by using
an available microsecond counter.
%s
---
README | 11 ++++++++
common/bootstage.c | 50 ++++++++++++++++++++++++++++++++++++
include/bootstage.h | 71 +++++++++++++++++++++++++++++++++++++++++++++++++++
include/common.h | 8 ++++++
5 files changed, 141 insertions(+), 0 deletions(-)
create mode 100644 common/bootstage.c
create mode 100644 include/bootstage.h
diff --git a/README b/README
index 6f3748d..f9e4e65 100644
--- a/README
+++ b/README
@@ -2026,6 +2026,17 @@ The following options need to be configured:
example, some LED's) on your board. At the moment,
the following checkpoints are implemented:
+- Time boot progress
+ CONFIG_BOOTSTAGE
+
+ Define this option to enable microsecond boot stage timing
+ on supported platforms. For this to work your platform
+ needs to define a function timer_get_us() which returns the
+ number of microseconds since reset. This would normally
+ be done in your SOC or board timer.c file.
+
+ You can add calls to bootstage_mark() to set time markers.
+
- Standalone program support:
CONFIG_STANDALONE_LOAD_ADDR
diff --git a/common/bootstage.c b/common/bootstage.c
new file mode 100644
index 0000000..2234c87
--- /dev/null
+++ b/common/bootstage.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2011, Google Inc. All rights reserved.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+
+/*
+ * This module records the progress of boot and arbitrary commands, and
+ * permits accurate timestamping of each. The records can optionally be
+ * passed to kernel in the ATAGs
+ */
+
+#include <common.h>
+
+
+struct bootstage_record {
+ uint32_t time_us;
+ const char *name;
+};
+
+static struct bootstage_record record[BOOTSTAGE_COUNT];
+
+uint32_t bootstage_mark(enum bootstage_id id, const char *name)
+{
+ struct bootstage_record *rec = &record[id];
+
+ /* Only record the first event for each */
+%sif (!rec->name) {
+ rec->time_us = (uint32_t)timer_get_us();
+ rec->name = name;
+ }
+%sreturn rec->time_us;
+}
--
1.7.3.1
'''
signoff = 'Signed-off-by: Simon Glass <sjg@chromium.org>\n'
tab = ' '
if data_type == 'good':
pass
elif data_type == 'no-signoff':
signoff = ''
elif data_type == 'spaces':
tab = ' '
else:
print 'not implemented'
return data % (signoff, tab, tab)
def SetupData(self, data_type):
inhandle, inname = tempfile.mkstemp()
infd = os.fdopen(inhandle, 'w')
data = self.GetData(data_type)
infd.write(data)
infd.close()
return inname
def testCheckpatch(self):
"""Test checkpatch operation"""
inf = self.SetupData('good')
result, problems, err, warn, lines, stdout = checkpatch.CheckPatch(inf)
self.assertEqual(result, True)
self.assertEqual(problems, [])
self.assertEqual(err, 0)
self.assertEqual(warn, 0)
self.assertEqual(lines, 67)
os.remove(inf)
inf = self.SetupData('no-signoff')
result, problems, err, warn, lines, stdout = checkpatch.CheckPatch(inf)
self.assertEqual(result, False)
self.assertEqual(len(problems), 1)
self.assertEqual(err, 1)
self.assertEqual(warn, 0)
self.assertEqual(lines, 67)
os.remove(inf)
inf = self.SetupData('spaces')
result, problems, err, warn, lines, stdout = checkpatch.CheckPatch(inf)
self.assertEqual(result, False)
self.assertEqual(len(problems), 2)
self.assertEqual(err, 0)
self.assertEqual(warn, 2)
self.assertEqual(lines, 67)
os.remove(inf)
if __name__ == "__main__":
unittest.main()
gitutil.RunTests()
| gpl-2.0 |
pattisdr/osf.io | scripts/analytics/addon_snapshot.py | 5 | 5536 | from __future__ import absolute_import
import logging
# App must be initialized before models or ADDONS_AVAILABLE are available
from website.app import init_app
init_app()
from osf.models import OSFUser, AbstractNode
from framework.database import paginated
from scripts.analytics.base import SnapshotAnalytics
from website.settings import ADDONS_AVAILABLE
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
# Modified from scripts/analytics/benchmarks.py
def get_enabled_authorized_linked(user_settings_list, has_external_account, short_name):
""" Gather the number of users who have at least one node in each of the stages for an addon
:param user_settings_list: list of user_settings for a particualr addon
:param has_external_account: where addon is derrived from, determines method to load node settings
:param short_name: short name of addon to get correct node_settings
:return: dict with number of users that have at least one project at each stage
"""
from addons.forward.models import NodeSettings as ForwardNodeSettings
num_enabled = 0 # of users w/ 1+ addon account connected
num_authorized = 0 # of users w/ 1+ addon account connected to 1+ node
num_linked = 0 # of users w/ 1+ addon account connected to 1+ node and configured
# osfstorage and wiki don't have user_settings, so always assume they're enabled, authorized, linked
if short_name == 'osfstorage' or short_name == 'wiki':
num_enabled = num_authorized = num_linked = OSFUser.objects.filter(
is_registered=True,
password__isnull=False,
merged_by__isnull=True,
date_disabled__isnull=True,
date_confirmed__isnull=False
).count()
elif short_name == 'forward':
num_enabled = num_authorized = ForwardNodeSettings.objects.count()
num_linked = ForwardNodeSettings.objects.filter(url__isnull=False).count()
else:
for user_settings in paginated(user_settings_list):
node_settings_list = []
if has_external_account:
if user_settings.has_auth:
num_enabled += 1
node_settings_list = [AbstractNode.load(guid).get_addon(short_name) for guid in user_settings.oauth_grants.keys()]
else:
num_enabled += 1
node_settings_list = [AbstractNode.load(guid).get_addon(short_name) for guid in user_settings.nodes_authorized]
if any([ns.has_auth for ns in node_settings_list if ns]):
num_authorized += 1
if any([(ns.complete and ns.configured) for ns in node_settings_list if ns]):
num_linked += 1
return {
'enabled': num_enabled,
'authorized': num_authorized,
'linked': num_linked
}
class AddonSnapshot(SnapshotAnalytics):
@property
def collection_name(self):
return 'addon_snapshot'
def get_events(self, date=None):
super(AddonSnapshot, self).get_events(date)
counts = []
addons_available = {k: v for k, v in [(addon.short_name, addon) for addon in ADDONS_AVAILABLE]}
for short_name, addon in addons_available.items():
has_external_account = hasattr(addon.models.get('nodesettings'), 'external_account')
connected_count = 0
deleted_count = 0
disconnected_count = 0
node_settings_model = addon.models.get('nodesettings')
if node_settings_model:
for node_settings in paginated(node_settings_model):
if node_settings.owner and not node_settings.owner.all_tags.filter(name='old_node_collection', system=True).exists():
connected_count += 1
deleted_count = addon.models['nodesettings'].objects.filter(deleted=True).count() if addon.models.get('nodesettings') else 0
if has_external_account:
disconnected_count = addon.models['nodesettings'].objects.filter(external_account__isnull=True, deleted=False).count() if addon.models.get('nodesettings') else 0
else:
if addon.models.get('nodesettings'):
for nsm in addon.models['nodesettings'].objects.filter(deleted=False):
if nsm.configured and not nsm.complete:
disconnected_count += 1
total = connected_count + deleted_count + disconnected_count
usage_counts = get_enabled_authorized_linked(addon.models.get('usersettings'), has_external_account, addon.short_name)
counts.append({
'provider': {
'name': short_name
},
'users': usage_counts,
'nodes': {
'total': total,
'connected': connected_count,
'deleted': deleted_count,
'disconnected': disconnected_count
}
})
logger.info(
'{} counted. Users with a linked node: {}, Total connected nodes: {}.'.format(
addon.short_name,
usage_counts['linked'],
total
)
)
return counts
def get_class():
return AddonSnapshot
if __name__ == '__main__':
addon_snapshot = AddonSnapshot()
events = addon_snapshot.get_events()
addon_snapshot.send_events(events)
| apache-2.0 |
firerszd/kbengine | kbe/src/lib/python/Lib/macurl2path.py | 110 | 2732 | """Macintosh-specific module for conversion between pathnames and URLs.
Do not import directly; use urllib instead."""
import urllib.parse
import os
__all__ = ["url2pathname","pathname2url"]
def url2pathname(pathname):
"""OS-specific conversion from a relative URL of the 'file' scheme
to a file system path; not recommended for general use."""
#
# XXXX The .. handling should be fixed...
#
tp = urllib.parse.splittype(pathname)[0]
if tp and tp != 'file':
raise RuntimeError('Cannot convert non-local URL to pathname')
# Turn starting /// into /, an empty hostname means current host
if pathname[:3] == '///':
pathname = pathname[2:]
elif pathname[:2] == '//':
raise RuntimeError('Cannot convert non-local URL to pathname')
components = pathname.split('/')
# Remove . and embedded ..
i = 0
while i < len(components):
if components[i] == '.':
del components[i]
elif components[i] == '..' and i > 0 and \
components[i-1] not in ('', '..'):
del components[i-1:i+1]
i = i-1
elif components[i] == '' and i > 0 and components[i-1] != '':
del components[i]
else:
i = i+1
if not components[0]:
# Absolute unix path, don't start with colon
rv = ':'.join(components[1:])
else:
# relative unix path, start with colon. First replace
# leading .. by empty strings (giving ::file)
i = 0
while i < len(components) and components[i] == '..':
components[i] = ''
i = i + 1
rv = ':' + ':'.join(components)
# and finally unquote slashes and other funny characters
return urllib.parse.unquote(rv)
def pathname2url(pathname):
"""OS-specific conversion from a file system path to a relative URL
of the 'file' scheme; not recommended for general use."""
if '/' in pathname:
raise RuntimeError("Cannot convert pathname containing slashes")
components = pathname.split(':')
# Remove empty first and/or last component
if components[0] == '':
del components[0]
if components[-1] == '':
del components[-1]
# Replace empty string ('::') by .. (will result in '/../' later)
for i in range(len(components)):
if components[i] == '':
components[i] = '..'
# Truncate names longer than 31 bytes
components = map(_pncomp2url, components)
if os.path.isabs(pathname):
return '/' + '/'.join(components)
else:
return '/'.join(components)
def _pncomp2url(component):
# We want to quote slashes
return urllib.parse.quote(component[:31], safe='')
| lgpl-3.0 |
krez13/scikit-learn | sklearn/ensemble/tests/test_forest.py | 26 | 41675 | """
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import combinations
from itertools import product
import numpy as np
from scipy.misc import comb
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import skip_if_32bit
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.model_selection import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.fixes import bincount
from sklearn.utils.validation import check_random_state
from sklearn.tree.tree import SPARSE_SPLITTERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also make a hastie_10_2 dataset
hastie_X, hastie_y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
hastie_X = hastie_X.astype(np.float32)
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
# Check consistency on dataset iris.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
# Check consistency on dataset boston house prices.
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion,
random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", )):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
# Regression models should not have a classes_ attribute.
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importances(name, criterion, X, y):
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=20, criterion=criterion,
random_state=0)
est.fit(X, y)
importances = est.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
# XXX: Remove this test in 0.19 after transform support to estimators
# is removed.
X_new = assert_warns(
DeprecationWarning, est.transform, X, threshold="mean")
assert_less(0 < X_new.shape[1], X.shape[1])
# Check with parallel
importances = est.feature_importances_
est.set_params(n_jobs=2)
importances_parrallel = est.feature_importances_
assert_array_almost_equal(importances, importances_parrallel)
# Check with sample weights
sample_weight = check_random_state(0).randint(1, 10, len(X))
est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=sample_weight)
importances = est.feature_importances_
assert_true(np.all(importances >= 0.0))
for scale in [0.5, 10, 100]:
est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=scale * sample_weight)
importances_bis = est.feature_importances_
assert_less(np.abs(importances - importances_bis).mean(), 0.001)
@skip_if_32bit
def test_importances():
X, y = datasets.make_classification(n_samples=500, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name, criterion in product(FOREST_CLASSIFIERS, ["gini", "entropy"]):
yield check_importances, name, criterion, X, y
for name, criterion in product(FOREST_REGRESSORS, ["mse", "friedman_mse"]):
yield check_importances, name, criterion, X, y
def test_importances_asymptotic():
# Check whether variable importances of totally randomized trees
# converge towards their theoretical values (See Louppe et al,
# Understanding variable importances in forests of randomized trees, 2013).
def binomial(k, n):
return 0 if k < 0 or k > n else comb(int(n), int(k), exact=True)
def entropy(samples):
n_samples = len(samples)
entropy = 0.
for count in bincount(samples):
p = 1. * count / n_samples
if p > 0:
entropy -= p * np.log2(p)
return entropy
def mdi_importance(X_m, X, y):
n_samples, n_features = X.shape
features = list(range(n_features))
features.pop(X_m)
values = [np.unique(X[:, i]) for i in range(n_features)]
imp = 0.
for k in range(n_features):
# Weight of each B of size k
coef = 1. / (binomial(k, n_features) * (n_features - k))
# For all B of size k
for B in combinations(features, k):
# For all values B=b
for b in product(*[values[B[j]] for j in range(k)]):
mask_b = np.ones(n_samples, dtype=np.bool)
for j in range(k):
mask_b &= X[:, B[j]] == b[j]
X_, y_ = X[mask_b, :], y[mask_b]
n_samples_b = len(X_)
if n_samples_b > 0:
children = []
for xi in values[X_m]:
mask_xi = X_[:, X_m] == xi
children.append(y_[mask_xi])
imp += (coef
* (1. * n_samples_b / n_samples) # P(B=b)
* (entropy(y_) -
sum([entropy(c) * len(c) / n_samples_b
for c in children])))
return imp
data = np.array([[0, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 2],
[1, 0, 1, 1, 0, 1, 1, 3],
[0, 1, 1, 1, 0, 1, 0, 4],
[1, 1, 0, 1, 0, 1, 1, 5],
[1, 1, 0, 1, 1, 1, 1, 6],
[1, 0, 1, 0, 0, 1, 0, 7],
[1, 1, 1, 1, 1, 1, 1, 8],
[1, 1, 1, 1, 0, 1, 1, 9],
[1, 1, 1, 0, 1, 1, 1, 0]])
X, y = np.array(data[:, :7], dtype=np.bool), data[:, 7]
n_features = X.shape[1]
# Compute true importances
true_importances = np.zeros(n_features)
for i in range(n_features):
true_importances[i] = mdi_importance(i, X, y)
# Estimate importances with totally randomized trees
clf = ExtraTreesClassifier(n_estimators=500,
max_features=1,
criterion="entropy",
random_state=0).fit(X, y)
importances = sum(tree.tree_.compute_feature_importances(normalize=False)
for tree in clf.estimators_) / clf.n_estimators
# Check correctness
assert_almost_equal(entropy(y), sum(importances))
assert_less(np.abs(true_importances - importances).mean(), 0.01)
def check_unfitted_feature_importances(name):
assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0),
"feature_importances_")
def test_unfitted_feature_importances():
for name in FOREST_ESTIMATORS:
yield check_unfitted_feature_importances, name
def check_oob_score(name, X, y, n_estimators=20):
# Check that oob prediction is a good estimation of the generalization
# error.
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# csc matrix
yield check_oob_score, name, csc_matrix(iris.data), iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
# csc matrix
yield check_oob_score, name, csc_matrix(boston.data), boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
# Check that base trees can be grid-searched.
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
# Check pickability.
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
# Check estimators on multi-output problems.
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
# Test that n_classes_ and classes_ have proper shape.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning a dense array.
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning the same array for both argument values.
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_random_hasher_sparse_data():
X, y = datasets.make_multilabel_classification(random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name):
X, y = hastie_X, hastie_y
# Test precedence of max_leaf_nodes over max_depth.
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1, random_state=0).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1,
random_state=0).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name
def check_min_samples_split(name):
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# test boundary value
assert_raises(ValueError,
ForestEstimator(min_samples_split=-1).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_split=0).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_split=1.1).fit, X, y)
est = ForestEstimator(min_samples_split=10, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert_greater(np.min(node_samples), len(X) * 0.5 - 1,
"Failed with {0}".format(name))
est = ForestEstimator(min_samples_split=0.5, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert_greater(np.min(node_samples), len(X) * 0.5 - 1,
"Failed with {0}".format(name))
def test_min_samples_split():
for name in FOREST_ESTIMATORS:
yield check_min_samples_split, name
def check_min_samples_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain more than leaf_count training examples
ForestEstimator = FOREST_ESTIMATORS[name]
# test boundary value
assert_raises(ValueError,
ForestEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_leaf=0).fit, X, y)
est = ForestEstimator(min_samples_leaf=5, n_estimators=1, random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
est = ForestEstimator(min_samples_leaf=0.25, n_estimators=1,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), len(X) * 0.25 - 1,
"Failed with {0}".format(name))
def test_min_samples_leaf():
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name
def check_min_weight_fraction_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain at least min_weight_fraction_leaf of the
# training set
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac, n_estimators=1,
random_state=0)
if "RandomForest" in name:
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name
def check_sparse_input(name, X, X_sparse, y):
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(sparse.transform(X).toarray(),
dense.transform(X).toarray())
assert_array_almost_equal(sparse.fit_transform(X).toarray(),
dense.fit_transform(X).toarray())
def test_sparse_input():
X, y = datasets.make_multilabel_classification(random_state=0,
n_samples=50)
for name, sparse_matrix in product(FOREST_ESTIMATORS,
(csr_matrix, csc_matrix, coo_matrix)):
yield check_sparse_input, name, X, sparse_matrix(X), y
def check_memory_layout(name, dtype):
# Check that it works no matter the memory layout
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.base_estimator.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# coo_matrix
X = coo_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
@ignore_warnings
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(n_estimators=1,
random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
@ignore_warnings
def test_1d_input():
X = iris.data[:, 0]
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_class_weights(name):
# Check class_weights resemble sample_weights behavior.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = ForestClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "balanced" which should also have no effect
clf4 = ForestClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in FOREST_CLASSIFIERS:
yield check_class_weights, name
def check_class_weight_balanced_and_bootstrap_multi_output(name):
# Test class_weight works for multi-output"""
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(class_weight='balanced', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}],
random_state=0)
clf.fit(X, _y)
# smoke test for subsample and balanced subsample
clf = ForestClassifier(class_weight='balanced_subsample', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight='subsample', random_state=0)
ignore_warnings(clf.fit)(X, _y)
def test_class_weight_balanced_and_bootstrap_multi_output():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_balanced_and_bootstrap_multi_output, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = ForestClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Warning warm_start with preset
clf = ForestClassifier(class_weight='auto', warm_start=True,
random_state=0)
assert_warns(UserWarning, clf.fit, X, y)
assert_warns(UserWarning, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = ForestClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_errors, name
def check_warm_start(name, random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
# Test if fit clears state and grows a new forest when warm_start==False.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
# Test if warm start second fit with smaller n_estimators raises error.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
# Test if warm start with equal n_estimators does nothing and returns the
# same forest and raises a warning.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
# Test that the warm start computes oob score when asked.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
def test_dtype_convert(n_classes=15):
classifier = RandomForestClassifier(random_state=0, bootstrap=False)
X = np.eye(n_classes)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:n_classes]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(classifier.classes_, y)
assert_array_equal(result, y)
def check_decision_path(name):
X, y = hastie_X, hastie_y
n_samples = X.shape[0]
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
est.fit(X, y)
indicator, n_nodes_ptr = est.decision_path(X)
assert_equal(indicator.shape[1], n_nodes_ptr[-1])
assert_equal(indicator.shape[0], n_samples)
assert_array_equal(np.diff(n_nodes_ptr),
[e.tree_.node_count for e in est.estimators_])
# Assert that leaves index are correct
leaves = est.apply(X)
for est_id in range(leaves.shape[1]):
leave_indicator = [indicator[i, n_nodes_ptr[est_id] + j]
for i, j in enumerate(leaves[:, est_id])]
assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples))
def test_decision_path():
for name in FOREST_CLASSIFIERS:
yield check_decision_path, name
for name in FOREST_REGRESSORS:
yield check_decision_path, name
| bsd-3-clause |
grevutiu-gabriel/sympy | sympy/ntheory/multinomial.py | 91 | 6870 | from __future__ import print_function, division
from collections import defaultdict
from sympy.core.compatibility import range
def binomial_coefficients(n):
"""Return a dictionary containing pairs :math:`{(k1,k2) : C_kn}` where
:math:`C_kn` are binomial coefficients and :math:`n=k1+k2`.
Examples
========
>>> from sympy.ntheory import binomial_coefficients
>>> binomial_coefficients(9)
{(0, 9): 1, (1, 8): 9, (2, 7): 36, (3, 6): 84,
(4, 5): 126, (5, 4): 126, (6, 3): 84, (7, 2): 36, (8, 1): 9, (9, 0): 1}
See Also
========
binomial_coefficients_list, multinomial_coefficients
"""
d = {(0, n): 1, (n, 0): 1}
a = 1
for k in range(1, n//2 + 1):
a = (a * (n - k + 1))//k
d[k, n - k] = d[n - k, k] = a
return d
def binomial_coefficients_list(n):
""" Return a list of binomial coefficients as rows of the Pascal's
triangle.
Examples
========
>>> from sympy.ntheory import binomial_coefficients_list
>>> binomial_coefficients_list(9)
[1, 9, 36, 84, 126, 126, 84, 36, 9, 1]
See Also
========
binomial_coefficients, multinomial_coefficients
"""
d = [1] * (n + 1)
a = 1
for k in range(1, n//2 + 1):
a = (a * (n - k + 1))//k
d[k] = d[n - k] = a
return d
def multinomial_coefficients0(m, n, _tuple=tuple, _zip=zip):
"""Return a dictionary containing pairs ``{(k1,k2,..,km) : C_kn}``
where ``C_kn`` are multinomial coefficients such that
``n=k1+k2+..+km``.
For example:
>>> from sympy import multinomial_coefficients
>>> multinomial_coefficients(2, 5) # indirect doctest
{(0, 5): 1, (1, 4): 5, (2, 3): 10, (3, 2): 10, (4, 1): 5, (5, 0): 1}
The algorithm is based on the following result:
Consider a polynomial and its ``n``-th exponent::
P(x) = sum_{i=0}^m p_i x^i
P(x)^n = sum_{k=0}^{m n} a(n,k) x^k
The coefficients ``a(n,k)`` can be computed using the
J.C.P. Miller Pure Recurrence [see D.E.Knuth, Seminumerical
Algorithms, The art of Computer Programming v.2, Addison
Wesley, Reading, 1981;]::
a(n,k) = 1/(k p_0) sum_{i=1}^m p_i ((n+1)i-k) a(n,k-i),
where ``a(n,0) = p_0^n``.
"""
if not m:
if n:
return {}
return {(): 1}
if m == 2:
return binomial_coefficients(n)
symbols = [(0,)*i + (1,) + (0,)*(m - i - 1) for i in range(m)]
s0 = symbols[0]
p0 = [_tuple(aa - bb for aa, bb in _zip(s, s0)) for s in symbols]
r = {_tuple(aa*n for aa in s0): 1}
l = [0] * (n*(m - 1) + 1)
l[0] = r.items()
for k in range(1, n*(m - 1) + 1):
d = defaultdict(int)
for i in range(1, min(m, k + 1)):
nn = (n + 1)*i - k
if not nn:
continue
t = p0[i]
for t2, c2 in l[k - i]:
tt = _tuple([aa + bb for aa, bb in _zip(t2, t)])
d[tt] += nn*c2
if not d[tt]:
del d[tt]
r1 = [(t, c//k) for (t, c) in d.items()]
l[k] = r1
r.update(r1)
return r
def multinomial_coefficients(m, n):
r"""Return a dictionary containing pairs ``{(k1,k2,..,km) : C_kn}``
where ``C_kn`` are multinomial coefficients such that
``n=k1+k2+..+km``.
For example:
>>> from sympy.ntheory import multinomial_coefficients
>>> multinomial_coefficients(2, 5) # indirect doctest
{(0, 5): 1, (1, 4): 5, (2, 3): 10, (3, 2): 10, (4, 1): 5, (5, 0): 1}
The algorithm is based on the following result:
.. math::
\binom{n}{k_1, \ldots, k_m} =
\frac{k_1 + 1}{n - k_1} \sum_{i=2}^m \binom{n}{k_1 + 1, \ldots, k_i - 1, \ldots}
Code contributed to Sage by Yann Laigle-Chapuy, copied with permission
of the author.
See Also
========
binomial_coefficients_list, binomial_coefficients
"""
if not m:
if n:
return {}
return {(): 1}
if m == 2:
return binomial_coefficients(n)
if m >= 2*n and n > 1:
return dict(multinomial_coefficients_iterator(m, n))
t = [n] + [0] * (m - 1)
r = {tuple(t): 1}
if n:
j = 0 # j will be the leftmost nonzero position
else:
j = m
# enumerate tuples in co-lex order
while j < m - 1:
# compute next tuple
tj = t[j]
if j:
t[j] = 0
t[0] = tj
if tj > 1:
t[j + 1] += 1
j = 0
start = 1
v = 0
else:
j += 1
start = j + 1
v = r[tuple(t)]
t[j] += 1
# compute the value
# NB: the initialization of v was done above
for k in range(start, m):
if t[k]:
t[k] -= 1
v += r[tuple(t)]
t[k] += 1
t[0] -= 1
r[tuple(t)] = (v * tj) // (n - t[0])
return r
def multinomial_coefficients_iterator(m, n, _tuple=tuple):
"""multinomial coefficient iterator
This routine has been optimized for `m` large with respect to `n` by taking
advantage of the fact that when the monomial tuples `t` are stripped of
zeros, their coefficient is the same as that of the monomial tuples from
``multinomial_coefficients(n, n)``. Therefore, the latter coefficients are
precomputed to save memory and time.
>>> from sympy.ntheory.multinomial import multinomial_coefficients
>>> m53, m33 = multinomial_coefficients(5,3), multinomial_coefficients(3,3)
>>> m53[(0,0,0,1,2)] == m53[(0,0,1,0,2)] == m53[(1,0,2,0,0)] == m33[(0,1,2)]
True
Examples
========
>>> from sympy.ntheory.multinomial import multinomial_coefficients_iterator
>>> it = multinomial_coefficients_iterator(20,3)
>>> next(it)
((3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), 1)
"""
if m < 2*n or n == 1:
mc = multinomial_coefficients(m, n)
for k, v in mc.items():
yield(k, v)
else:
mc = multinomial_coefficients(n, n)
mc1 = {}
for k, v in mc.items():
mc1[_tuple(filter(None, k))] = v
mc = mc1
t = [n] + [0] * (m - 1)
t1 = _tuple(t)
b = _tuple(filter(None, t1))
yield (t1, mc[b])
if n:
j = 0 # j will be the leftmost nonzero position
else:
j = m
# enumerate tuples in co-lex order
while j < m - 1:
# compute next tuple
tj = t[j]
if j:
t[j] = 0
t[0] = tj
if tj > 1:
t[j + 1] += 1
j = 0
else:
j += 1
t[j] += 1
t[0] -= 1
t1 = _tuple(t)
b = _tuple(filter(None, t1))
yield (t1, mc[b])
| bsd-3-clause |
edmundgentle/schoolscript | SchoolScript/bin/Debug/pythonlib/Lib/lib2to3/fixes/fix_print.py | 5 | 2941 | # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for print.
Change:
'print' into 'print()'
'print ...' into 'print(...)'
'print ... ,' into 'print(..., end=" ")'
'print >>x, ...' into 'print(..., file=x)'
No changes are applied if print_function is imported from __future__
"""
# Local imports
from .. import patcomp
from .. import pytree
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, Call, Comma, String, is_tuple
parend_expr = patcomp.compile_pattern(
"""atom< '(' [atom|STRING|NAME] ')' >"""
)
class FixPrint(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
simple_stmt< any* bare='print' any* > | print_stmt
"""
def transform(self, node, results):
assert results
bare_print = results.get("bare")
if bare_print:
# Special-case print all by itself
bare_print.replace(Call(Name("print"), [],
prefix=bare_print.prefix))
return
assert node.children[0] == Name("print")
args = node.children[1:]
if len(args) == 1 and parend_expr.match(args[0]):
# We don't want to keep sticking parens around an
# already-parenthesised expression.
return
sep = end = file = None
if args and args[-1] == Comma():
args = args[:-1]
end = " "
if args and args[0] == pytree.Leaf(token.RIGHTSHIFT, ">>"):
assert len(args) >= 2
file = args[1].clone()
args = args[3:] # Strip a possible comma after the file expression
# Now synthesize a print(args, sep=..., end=..., file=...) node.
l_args = [arg.clone() for arg in args]
if l_args:
l_args[0].prefix = ""
if sep is not None or end is not None or file is not None:
if sep is not None:
self.add_kwarg(l_args, "sep", String(repr(sep)))
if end is not None:
self.add_kwarg(l_args, "end", String(repr(end)))
if file is not None:
self.add_kwarg(l_args, "file", file)
n_stmt = Call(Name("print"), l_args)
n_stmt.prefix = node.prefix
return n_stmt
def add_kwarg(self, l_nodes, s_kwd, n_expr):
# XXX All this prefix-setting may lose comments (though rarely)
n_expr.prefix = ""
n_argument = pytree.Node(self.syms.argument,
(Name(s_kwd),
pytree.Leaf(token.EQUAL, "="),
n_expr))
if l_nodes:
l_nodes.append(Comma())
n_argument.prefix = " "
l_nodes.append(n_argument)
| gpl-2.0 |
mbernasocchi/inasafe | safe_extras/raven/__init__.py | 6 | 1237 | """
raven
~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import os
import os.path
__all__ = ('VERSION', 'Client', 'get_version')
VERSION = '6.7.0'
def _get_git_revision(path):
revision_file = os.path.join(path, 'refs', 'heads', 'master')
if os.path.exists(revision_file):
with open(revision_file) as fh:
return fh.read().strip()[:7]
def get_revision():
"""
:returns: Revision number of this branch/checkout, if available. None if
no revision number can be determined.
"""
package_dir = os.path.dirname(__file__)
checkout_dir = os.path.normpath(os.path.join(package_dir, os.pardir, os.pardir))
path = os.path.join(checkout_dir, '.git')
if os.path.exists(path):
return _get_git_revision(path)
def get_version():
base = VERSION
if __build__:
base = '%s (%s)' % (base, __build__)
return base
__build__ = get_revision()
__docformat__ = 'restructuredtext en'
# Declare child imports last to prevent recursion
from raven.base import * # NOQA
from raven.conf import * # NOQA
from raven.versioning import * # NOQA
| gpl-3.0 |
sebmarchand/syzygy | third_party/numpy/files/numpy/core/tests/test_numeric.py | 11 | 49172 | import sys
import platform
from decimal import Decimal
import numpy as np
from numpy.core import *
from numpy.random import rand, randint, randn
from numpy.testing import *
from numpy.testing.utils import WarningManager
from numpy.core.multiarray import dot as dot_
import warnings
class Vec:
def __init__(self,sequence=None):
if sequence is None:
sequence=[]
self.array=array(sequence)
def __add__(self,other):
out=Vec()
out.array=self.array+other.array
return out
def __sub__(self,other):
out=Vec()
out.array=self.array-other.array
return out
def __mul__(self,other): # with scalar
out=Vec(self.array.copy())
out.array*=other
return out
def __rmul__(self,other):
return self*other
class TestDot(TestCase):
def setUp(self):
self.A = rand(10,8)
self.b1 = rand(8,1)
self.b2 = rand(8)
self.b3 = rand(1,8)
self.b4 = rand(10)
self.N = 14
def test_matmat(self):
A = self.A
c1 = dot(A.transpose(), A)
c2 = dot_(A.transpose(), A)
assert_almost_equal(c1, c2, decimal=self.N)
def test_matvec(self):
A, b1 = self.A, self.b1
c1 = dot(A, b1)
c2 = dot_(A, b1)
assert_almost_equal(c1, c2, decimal=self.N)
def test_matvec2(self):
A, b2 = self.A, self.b2
c1 = dot(A, b2)
c2 = dot_(A, b2)
assert_almost_equal(c1, c2, decimal=self.N)
def test_vecmat(self):
A, b4 = self.A, self.b4
c1 = dot(b4, A)
c2 = dot_(b4, A)
assert_almost_equal(c1, c2, decimal=self.N)
def test_vecmat2(self):
b3, A = self.b3, self.A
c1 = dot(b3, A.transpose())
c2 = dot_(b3, A.transpose())
assert_almost_equal(c1, c2, decimal=self.N)
def test_vecmat3(self):
A, b4 = self.A, self.b4
c1 = dot(A.transpose(),b4)
c2 = dot_(A.transpose(),b4)
assert_almost_equal(c1, c2, decimal=self.N)
def test_vecvecouter(self):
b1, b3 = self.b1, self.b3
c1 = dot(b1, b3)
c2 = dot_(b1, b3)
assert_almost_equal(c1, c2, decimal=self.N)
def test_vecvecinner(self):
b1, b3 = self.b1, self.b3
c1 = dot(b3, b1)
c2 = dot_(b3, b1)
assert_almost_equal(c1, c2, decimal=self.N)
def test_columnvect1(self):
b1 = ones((3,1))
b2 = [5.3]
c1 = dot(b1,b2)
c2 = dot_(b1,b2)
assert_almost_equal(c1, c2, decimal=self.N)
def test_columnvect2(self):
b1 = ones((3,1)).transpose()
b2 = [6.2]
c1 = dot(b2,b1)
c2 = dot_(b2,b1)
assert_almost_equal(c1, c2, decimal=self.N)
def test_vecscalar(self):
b1 = rand(1,1)
b2 = rand(1,8)
c1 = dot(b1,b2)
c2 = dot_(b1,b2)
assert_almost_equal(c1, c2, decimal=self.N)
def test_vecscalar2(self):
b1 = rand(8,1)
b2 = rand(1,1)
c1 = dot(b1,b2)
c2 = dot_(b1,b2)
assert_almost_equal(c1, c2, decimal=self.N)
def test_all(self):
dims = [(),(1,),(1,1)]
for dim1 in dims:
for dim2 in dims:
arg1 = rand(*dim1)
arg2 = rand(*dim2)
c1 = dot(arg1, arg2)
c2 = dot_(arg1, arg2)
assert_(c1.shape == c2.shape)
assert_almost_equal(c1, c2, decimal=self.N)
def test_vecobject(self):
U_non_cont = transpose([[1.,1.],[1.,2.]])
U_cont = ascontiguousarray(U_non_cont)
x = array([Vec([1.,0.]),Vec([0.,1.])])
zeros = array([Vec([0.,0.]),Vec([0.,0.])])
zeros_test = dot(U_cont,x) - dot(U_non_cont,x)
assert_equal(zeros[0].array, zeros_test[0].array)
assert_equal(zeros[1].array, zeros_test[1].array)
class TestResize(TestCase):
def test_copies(self):
A = array([[1,2],[3,4]])
Ar1 = array([[1,2,3,4],[1,2,3,4]])
assert_equal(resize(A, (2,4)), Ar1)
Ar2 = array([[1,2],[3,4],[1,2],[3,4]])
assert_equal(resize(A, (4,2)), Ar2)
Ar3 = array([[1,2,3],[4,1,2],[3,4,1],[2,3,4]])
assert_equal(resize(A, (4,3)), Ar3)
def test_zeroresize(self):
A = array([[1,2],[3,4]])
Ar = resize(A, (0,))
assert_equal(Ar, array([]))
class TestNonarrayArgs(TestCase):
# check that non-array arguments to functions wrap them in arrays
def test_squeeze(self):
A = [[[1,1,1],[2,2,2],[3,3,3]]]
assert_(squeeze(A).shape == (3,3))
def test_cumproduct(self):
A = [[1,2,3],[4,5,6]]
assert_(all(cumproduct(A) == array([1,2,6,24,120,720])))
def test_size(self):
A = [[1,2,3],[4,5,6]]
assert_(size(A) == 6)
assert_(size(A,0) == 2)
assert_(size(A,1) == 3)
def test_mean(self):
A = [[1,2,3],[4,5,6]]
assert_(mean(A) == 3.5)
assert_(all(mean(A,0) == array([2.5,3.5,4.5])))
assert_(all(mean(A,1) == array([2.,5.])))
def test_std(self):
A = [[1,2,3],[4,5,6]]
assert_almost_equal(std(A), 1.707825127659933)
assert_almost_equal(std(A,0), array([1.5, 1.5, 1.5]))
assert_almost_equal(std(A,1), array([0.81649658, 0.81649658]))
def test_var(self):
A = [[1,2,3],[4,5,6]]
assert_almost_equal(var(A), 2.9166666666666665)
assert_almost_equal(var(A,0), array([2.25, 2.25, 2.25]))
assert_almost_equal(var(A,1), array([0.66666667, 0.66666667]))
class TestBoolScalar(TestCase):
def test_logical(self):
f = False_
t = True_
s = "xyz"
self.assertTrue((t and s) is s)
self.assertTrue((f and s) is f)
def test_bitwise_or(self):
f = False_
t = True_
self.assertTrue((t | t) is t)
self.assertTrue((f | t) is t)
self.assertTrue((t | f) is t)
self.assertTrue((f | f) is f)
def test_bitwise_and(self):
f = False_
t = True_
self.assertTrue((t & t) is t)
self.assertTrue((f & t) is f)
self.assertTrue((t & f) is f)
self.assertTrue((f & f) is f)
def test_bitwise_xor(self):
f = False_
t = True_
self.assertTrue((t ^ t) is f)
self.assertTrue((f ^ t) is t)
self.assertTrue((t ^ f) is t)
self.assertTrue((f ^ f) is f)
class TestSeterr(TestCase):
def test_default(self):
err = geterr()
self.assertEqual(err, dict(
divide='warn',
invalid='warn',
over='warn',
under='ignore',
))
def test_set(self):
err = seterr()
try:
old = seterr(divide='print')
self.assertTrue(err == old)
new = seterr()
self.assertTrue(new['divide'] == 'print')
seterr(over='raise')
self.assertTrue(geterr()['over'] == 'raise')
self.assertTrue(new['divide'] == 'print')
seterr(**old)
self.assertTrue(geterr() == old)
finally:
seterr(**err)
def test_divide_err(self):
err = seterr(divide='raise')
try:
try:
array([1.]) / array([0.])
except FloatingPointError:
pass
else:
self.fail()
seterr(divide='ignore')
array([1.]) / array([0.])
finally:
seterr(**err)
class TestFloatExceptions(TestCase):
def assert_raises_fpe(self, fpeerr, flop, x, y):
ftype = type(x)
try:
flop(x, y)
assert_(False,
"Type %s did not raise fpe error '%s'." % (ftype, fpeerr))
except FloatingPointError, exc:
assert_(str(exc).find(fpeerr) >= 0,
"Type %s raised wrong fpe error '%s'." % (ftype, exc))
def assert_op_raises_fpe(self, fpeerr, flop, sc1, sc2):
"""Check that fpe exception is raised.
Given a floating operation `flop` and two scalar values, check that
the operation raises the floating point exception specified by
`fpeerr`. Tests all variants with 0-d array scalars as well.
"""
self.assert_raises_fpe(fpeerr, flop, sc1, sc2);
self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2);
self.assert_raises_fpe(fpeerr, flop, sc1, sc2[()]);
self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2[()]);
@dec.knownfailureif((sys.platform == "darwin") and
("powerpc" in platform.processor()),
"See ticket 1755")
def test_floating_exceptions(self):
"""Test basic arithmetic function errors"""
oldsettings = np.seterr(all='raise')
try:
# Test for all real and complex float types
for typecode in np.typecodes['AllFloat']:
ftype = np.obj2sctype(typecode)
if np.dtype(ftype).kind == 'f':
# Get some extreme values for the type
fi = np.finfo(ftype)
ft_tiny = fi.tiny
ft_max = fi.max
ft_eps = fi.eps
underflow = 'underflow'
divbyzero = 'divide by zero'
else:
# 'c', complex, corresponding real dtype
rtype = type(ftype(0).real)
fi = np.finfo(rtype)
ft_tiny = ftype(fi.tiny)
ft_max = ftype(fi.max)
ft_eps = ftype(fi.eps)
# The complex types raise different exceptions
underflow = ''
divbyzero = ''
overflow = 'overflow'
invalid = 'invalid'
self.assert_raises_fpe(underflow,
lambda a,b:a/b, ft_tiny, ft_max)
self.assert_raises_fpe(underflow,
lambda a,b:a*b, ft_tiny, ft_tiny)
self.assert_raises_fpe(overflow,
lambda a,b:a*b, ft_max, ftype(2))
self.assert_raises_fpe(overflow,
lambda a,b:a/b, ft_max, ftype(0.5))
self.assert_raises_fpe(overflow,
lambda a,b:a+b, ft_max, ft_max*ft_eps)
self.assert_raises_fpe(overflow,
lambda a,b:a-b, -ft_max, ft_max*ft_eps)
self.assert_raises_fpe(divbyzero,
lambda a,b:a/b, ftype(1), ftype(0))
self.assert_raises_fpe(invalid,
lambda a,b:a/b, ftype(np.inf), ftype(np.inf))
self.assert_raises_fpe(invalid,
lambda a,b:a/b, ftype(0), ftype(0))
self.assert_raises_fpe(invalid,
lambda a,b:a-b, ftype(np.inf), ftype(np.inf))
self.assert_raises_fpe(invalid,
lambda a,b:a+b, ftype(np.inf), ftype(-np.inf))
self.assert_raises_fpe(invalid,
lambda a,b:a*b, ftype(0), ftype(np.inf))
finally:
np.seterr(**oldsettings)
@dec.knownfailureif(sys.platform.startswith('win') or
(sys.platform == "darwin" and "powerpc" in platform.processor()),
"See ticket 1755")
def test_floating_exceptions_power(self):
"""Test basic arithmetic function errors"""
oldsettings = np.seterr(all='raise')
try:
# Test for all real and complex float types
for typecode in np.typecodes['AllFloat']:
ftype = np.obj2sctype(typecode)
if np.dtype(ftype).kind == 'f':
# Get some extreme values for the type
fi = np.finfo(ftype)
else:
# 'c', complex, corresponding real dtype
rtype = type(ftype(0).real)
fi = np.finfo(rtype)
overflow = 'overflow'
self.assert_raises_fpe(overflow,
np.power, ftype(2), ftype(2**fi.nexp))
finally:
np.seterr(**oldsettings)
class TestTypes(TestCase):
def check_promotion_cases(self, promote_func):
"""Tests that the scalars get coerced correctly."""
b = np.bool_(0)
i8, i16, i32, i64 = int8(0), int16(0), int32(0), int64(0)
u8, u16, u32, u64 = uint8(0), uint16(0), uint32(0), uint64(0)
f32, f64, fld = float32(0), float64(0), longdouble(0)
c64, c128, cld = complex64(0), complex128(0), clongdouble(0)
# coercion within the same kind
assert_equal(promote_func(i8,i16), np.dtype(int16))
assert_equal(promote_func(i32,i8), np.dtype(int32))
assert_equal(promote_func(i16,i64), np.dtype(int64))
assert_equal(promote_func(u8,u32), np.dtype(uint32))
assert_equal(promote_func(f32,f64), np.dtype(float64))
assert_equal(promote_func(fld,f32), np.dtype(longdouble))
assert_equal(promote_func(f64,fld), np.dtype(longdouble))
assert_equal(promote_func(c128,c64), np.dtype(complex128))
assert_equal(promote_func(cld,c128), np.dtype(clongdouble))
assert_equal(promote_func(c64,fld), np.dtype(clongdouble))
# coercion between kinds
assert_equal(promote_func(b,i32), np.dtype(int32))
assert_equal(promote_func(b,u8), np.dtype(uint8))
assert_equal(promote_func(i8,u8), np.dtype(int16))
assert_equal(promote_func(u8,i32), np.dtype(int32))
assert_equal(promote_func(i64,u32), np.dtype(int64))
assert_equal(promote_func(u64,i32), np.dtype(float64))
assert_equal(promote_func(i32,f32), np.dtype(float64))
assert_equal(promote_func(i64,f32), np.dtype(float64))
assert_equal(promote_func(f32,i16), np.dtype(float32))
assert_equal(promote_func(f32,u32), np.dtype(float64))
assert_equal(promote_func(f32,c64), np.dtype(complex64))
assert_equal(promote_func(c128,f32), np.dtype(complex128))
assert_equal(promote_func(cld,f64), np.dtype(clongdouble))
# coercion between scalars and 1-D arrays
assert_equal(promote_func(array([b]),i8), np.dtype(int8))
assert_equal(promote_func(array([b]),u8), np.dtype(uint8))
assert_equal(promote_func(array([b]),i32), np.dtype(int32))
assert_equal(promote_func(array([b]),u32), np.dtype(uint32))
assert_equal(promote_func(array([i8]),i64), np.dtype(int8))
assert_equal(promote_func(u64,array([i32])), np.dtype(int32))
assert_equal(promote_func(i64,array([u32])), np.dtype(uint32))
assert_equal(promote_func(int32(-1),array([u64])), np.dtype(float64))
assert_equal(promote_func(f64,array([f32])), np.dtype(float32))
assert_equal(promote_func(fld,array([f32])), np.dtype(float32))
assert_equal(promote_func(array([f64]),fld), np.dtype(float64))
assert_equal(promote_func(fld,array([c64])), np.dtype(complex64))
assert_equal(promote_func(c64,array([f64])), np.dtype(complex128))
assert_equal(promote_func(complex64(3j),array([f64])),
np.dtype(complex128))
# coercion between scalars and 1-D arrays, where
# the scalar has greater kind than the array
assert_equal(promote_func(array([b]),f64), np.dtype(float64))
assert_equal(promote_func(array([b]),i64), np.dtype(int64))
assert_equal(promote_func(array([b]),u64), np.dtype(uint64))
assert_equal(promote_func(array([i8]),f64), np.dtype(float64))
assert_equal(promote_func(array([u16]),f64), np.dtype(float64))
# uint and int are treated as the same "kind" for
# the purposes of array-scalar promotion.
assert_equal(promote_func(array([u16]), i32), np.dtype(uint16))
# float and complex are treated as the same "kind" for
# the purposes of array-scalar promotion, so that you can do
# (0j + float32array) to get a complex64 array instead of
# a complex128 array.
assert_equal(promote_func(array([f32]),c128), np.dtype(complex64))
def test_coercion(self):
def res_type(a, b):
return np.add(a, b).dtype
self.check_promotion_cases(res_type)
# Use-case: float/complex scalar * bool/int8 array
# shouldn't narrow the float/complex type
for a in [np.array([True,False]), np.array([-3,12], dtype=np.int8)]:
b = 1.234 * a
assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype)
b = np.longdouble(1.234) * a
assert_equal(b.dtype, np.dtype(np.longdouble),
"array type %s" % a.dtype)
b = np.float64(1.234) * a
assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype)
b = np.float32(1.234) * a
assert_equal(b.dtype, np.dtype('f4'), "array type %s" % a.dtype)
b = np.float16(1.234) * a
assert_equal(b.dtype, np.dtype('f2'), "array type %s" % a.dtype)
b = 1.234j * a
assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype)
b = np.clongdouble(1.234j) * a
assert_equal(b.dtype, np.dtype(np.clongdouble),
"array type %s" % a.dtype)
b = np.complex128(1.234j) * a
assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype)
b = np.complex64(1.234j) * a
assert_equal(b.dtype, np.dtype('c8'), "array type %s" % a.dtype)
# The following use-case is problematic, and to resolve its
# tricky side-effects requires more changes.
#
## Use-case: (1-t)*a, where 't' is a boolean array and 'a' is
## a float32, shouldn't promote to float64
#a = np.array([1.0, 1.5], dtype=np.float32)
#t = np.array([True, False])
#b = t*a
#assert_equal(b, [1.0, 0.0])
#assert_equal(b.dtype, np.dtype('f4'))
#b = (1-t)*a
#assert_equal(b, [0.0, 1.5])
#assert_equal(b.dtype, np.dtype('f4'))
## Probably ~t (bitwise negation) is more proper to use here,
## but this is arguably less intuitive to understand at a glance, and
## would fail if 't' is actually an integer array instead of boolean:
#b = (~t)*a
#assert_equal(b, [0.0, 1.5])
#assert_equal(b.dtype, np.dtype('f4'))
def test_result_type(self):
self.check_promotion_cases(np.result_type)
def test_can_cast(self):
assert_(np.can_cast(np.int32, np.int64))
assert_(np.can_cast(np.float64, np.complex))
assert_(not np.can_cast(np.complex, np.float))
assert_(np.can_cast('i8', 'f8'))
assert_(not np.can_cast('i8', 'f4'))
assert_(np.can_cast('i4', 'S4'))
assert_(np.can_cast('i8', 'i8', 'no'))
assert_(not np.can_cast('<i8', '>i8', 'no'))
assert_(np.can_cast('<i8', '>i8', 'equiv'))
assert_(not np.can_cast('<i4', '>i8', 'equiv'))
assert_(np.can_cast('<i4', '>i8', 'safe'))
assert_(not np.can_cast('<i8', '>i4', 'safe'))
assert_(np.can_cast('<i8', '>i4', 'same_kind'))
assert_(not np.can_cast('<i8', '>u4', 'same_kind'))
assert_(np.can_cast('<i8', '>u4', 'unsafe'))
assert_raises(TypeError, np.can_cast, 'i4', None)
assert_raises(TypeError, np.can_cast, None, 'i4')
class TestFromiter(TestCase):
def makegen(self):
for x in xrange(24):
yield x**2
def test_types(self):
ai32 = fromiter(self.makegen(), int32)
ai64 = fromiter(self.makegen(), int64)
af = fromiter(self.makegen(), float)
self.assertTrue(ai32.dtype == dtype(int32))
self.assertTrue(ai64.dtype == dtype(int64))
self.assertTrue(af.dtype == dtype(float))
def test_lengths(self):
expected = array(list(self.makegen()))
a = fromiter(self.makegen(), int)
a20 = fromiter(self.makegen(), int, 20)
self.assertTrue(len(a) == len(expected))
self.assertTrue(len(a20) == 20)
try:
fromiter(self.makegen(), int, len(expected) + 10)
except ValueError:
pass
else:
self.fail()
def test_values(self):
expected = array(list(self.makegen()))
a = fromiter(self.makegen(), int)
a20 = fromiter(self.makegen(), int, 20)
self.assertTrue(alltrue(a == expected,axis=0))
self.assertTrue(alltrue(a20 == expected[:20],axis=0))
class TestNonzero(TestCase):
def test_nonzero_trivial(self):
assert_equal(np.count_nonzero(array([])), 0)
assert_equal(np.nonzero(array([])), ([],))
assert_equal(np.count_nonzero(array(0)), 0)
assert_equal(np.nonzero(array(0)), ([],))
assert_equal(np.count_nonzero(array(1)), 1)
assert_equal(np.nonzero(array(1)), ([0],))
def test_nonzero_onedim(self):
x = array([1,0,2,-1,0,0,8])
assert_equal(np.count_nonzero(x), 4)
assert_equal(np.nonzero(x), ([0, 2, 3, 6],))
x = array([(1,2),(0,0),(1,1),(-1,3),(0,7)],
dtype=[('a','i4'),('b','i2')])
assert_equal(np.count_nonzero(x['a']), 3)
assert_equal(np.count_nonzero(x['b']), 4)
assert_equal(np.nonzero(x['a']), ([0,2,3],))
assert_equal(np.nonzero(x['b']), ([0,2,3,4],))
def test_nonzero_twodim(self):
x = array([[0,1,0],[2,0,3]])
assert_equal(np.count_nonzero(x), 3)
assert_equal(np.nonzero(x), ([0,1,1],[1,0,2]))
x = np.eye(3)
assert_equal(np.count_nonzero(x), 3)
assert_equal(np.nonzero(x), ([0,1,2],[0,1,2]))
x = array([[(0,1),(0,0),(1,11)],
[(1,1),(1,0),(0,0)],
[(0,0),(1,5),(0,1)]], dtype=[('a','f4'),('b','u1')])
assert_equal(np.count_nonzero(x['a']), 4)
assert_equal(np.count_nonzero(x['b']), 5)
assert_equal(np.nonzero(x['a']), ([0,1,1,2],[2,0,1,1]))
assert_equal(np.nonzero(x['b']), ([0,0,1,2,2],[0,2,0,1,2]))
assert_equal(np.count_nonzero(x['a'].T), 4)
assert_equal(np.count_nonzero(x['b'].T), 5)
assert_equal(np.nonzero(x['a'].T), ([0,1,1,2],[1,1,2,0]))
assert_equal(np.nonzero(x['b'].T), ([0,0,1,2,2],[0,1,2,0,2]))
class TestIndex(TestCase):
def test_boolean(self):
a = rand(3,5,8)
V = rand(5,8)
g1 = randint(0,5,size=15)
g2 = randint(0,8,size=15)
V[g1,g2] = -V[g1,g2]
assert_((array([a[0][V>0],a[1][V>0],a[2][V>0]]) == a[:,V>0]).all())
class TestBinaryRepr(TestCase):
def test_zero(self):
assert_equal(binary_repr(0),'0')
def test_large(self):
assert_equal(binary_repr(10736848),'101000111101010011010000')
def test_negative(self):
assert_equal(binary_repr(-1), '-1')
assert_equal(binary_repr(-1, width=8), '11111111')
class TestBaseRepr(TestCase):
def test_base3(self):
assert_equal(base_repr(3**5, 3), '100000')
def test_positive(self):
assert_equal(base_repr(12, 10), '12')
assert_equal(base_repr(12, 10, 4), '000012')
assert_equal(base_repr(12, 4), '30')
assert_equal(base_repr(3731624803700888, 36), '10QR0ROFCEW')
def test_negative(self):
assert_equal(base_repr(-12, 10), '-12')
assert_equal(base_repr(-12, 10, 4), '-000012')
assert_equal(base_repr(-12, 4), '-30')
class TestArrayComparisons(TestCase):
def test_array_equal(self):
res = array_equal(array([1,2]), array([1,2]))
assert_(res)
assert_(type(res) is bool)
res = array_equal(array([1,2]), array([1,2,3]))
assert_(not res)
assert_(type(res) is bool)
res = array_equal(array([1,2]), array([3,4]))
assert_(not res)
assert_(type(res) is bool)
res = array_equal(array([1,2]), array([1,3]))
assert_(not res)
assert_(type(res) is bool)
def test_array_equiv(self):
res = array_equiv(array([1,2]), array([1,2]))
assert_(res)
assert_(type(res) is bool)
res = array_equiv(array([1,2]), array([1,2,3]))
assert_(not res)
assert_(type(res) is bool)
res = array_equiv(array([1,2]), array([3,4]))
assert_(not res)
assert_(type(res) is bool)
res = array_equiv(array([1,2]), array([1,3]))
assert_(not res)
assert_(type(res) is bool)
res = array_equiv(array([1,1]), array([1]))
assert_(res)
assert_(type(res) is bool)
res = array_equiv(array([1,1]), array([[1],[1]]))
assert_(res)
assert_(type(res) is bool)
res = array_equiv(array([1,2]), array([2]))
assert_(not res)
assert_(type(res) is bool)
res = array_equiv(array([1,2]), array([[1],[2]]))
assert_(not res)
assert_(type(res) is bool)
res = array_equiv(array([1,2]), array([[1,2,3],[4,5,6],[7,8,9]]))
assert_(not res)
assert_(type(res) is bool)
def assert_array_strict_equal(x, y):
assert_array_equal(x, y)
# Check flags
assert_(x.flags == y.flags)
# check endianness
assert_(x.dtype.isnative == y.dtype.isnative)
class TestClip(TestCase):
def setUp(self):
self.nr = 5
self.nc = 3
def fastclip(self, a, m, M, out=None):
if out is None:
return a.clip(m,M)
else:
return a.clip(m,M,out)
def clip(self, a, m, M, out=None):
# use slow-clip
selector = less(a, m)+2*greater(a, M)
return selector.choose((a, m, M), out=out)
# Handy functions
def _generate_data(self, n, m):
return randn(n, m)
def _generate_data_complex(self, n, m):
return randn(n, m) + 1.j *rand(n, m)
def _generate_flt_data(self, n, m):
return (randn(n, m)).astype(float32)
def _neg_byteorder(self, a):
a = asarray(a)
if sys.byteorder == 'little':
a = a.astype(a.dtype.newbyteorder('>'))
else:
a = a.astype(a.dtype.newbyteorder('<'))
return a
def _generate_non_native_data(self, n, m):
data = randn(n, m)
data = self._neg_byteorder(data)
assert_(not data.dtype.isnative)
return data
def _generate_int_data(self, n, m):
return (10 * rand(n, m)).astype(int64)
def _generate_int32_data(self, n, m):
return (10 * rand(n, m)).astype(int32)
# Now the real test cases
def test_simple_double(self):
"""Test native double input with scalar min/max."""
a = self._generate_data(self.nr, self.nc)
m = 0.1
M = 0.6
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_simple_int(self):
"""Test native int input with scalar min/max."""
a = self._generate_int_data(self.nr, self.nc)
a = a.astype(int)
m = -2
M = 4
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_array_double(self):
"""Test native double input with array min/max."""
a = self._generate_data(self.nr, self.nc)
m = zeros(a.shape)
M = m + 0.5
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_simple_nonnative(self):
"""Test non native double input with scalar min/max.
Test native double input with non native double scalar min/max."""
a = self._generate_non_native_data(self.nr, self.nc)
m = -0.5
M = 0.6
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_equal(ac, act)
"Test native double input with non native double scalar min/max."
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = self._neg_byteorder(0.6)
assert_(not M.dtype.isnative)
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_equal(ac, act)
def test_simple_complex(self):
"""Test native complex input with native double scalar min/max.
Test native input with complex double scalar min/max.
"""
a = 3 * self._generate_data_complex(self.nr, self.nc)
m = -0.5
M = 1.
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
"Test native input with complex double scalar min/max."
a = 3 * self._generate_data(self.nr, self.nc)
m = -0.5 + 1.j
M = 1. + 2.j
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_clip_non_contig(self):
"""Test clip for non contiguous native input and native scalar min/max."""
a = self._generate_data(self.nr * 2, self.nc * 3)
a = a[::2, ::3]
assert_(not a.flags['F_CONTIGUOUS'])
assert_(not a.flags['C_CONTIGUOUS'])
ac = self.fastclip(a, -1.6, 1.7)
act = self.clip(a, -1.6, 1.7)
assert_array_strict_equal(ac, act)
def test_simple_out(self):
"""Test native double input with scalar min/max."""
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 0.6
ac = zeros(a.shape)
act = zeros(a.shape)
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_simple_int32_inout(self):
"""Test native int32 input with double min/max and int32 out."""
a = self._generate_int32_data(self.nr, self.nc)
m = float64(0)
M = float64(2)
ac = zeros(a.shape, dtype = int32)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_simple_int64_out(self):
"""Test native int32 input with int32 scalar min/max and int64 out."""
a = self._generate_int32_data(self.nr, self.nc)
m = int32(-1)
M = int32(1)
ac = zeros(a.shape, dtype = int64)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_simple_int64_inout(self):
"""Test native int32 input with double array min/max and int32 out."""
a = self._generate_int32_data(self.nr, self.nc)
m = zeros(a.shape, float64)
M = float64(1)
ac = zeros(a.shape, dtype = int32)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_simple_int32_out(self):
"""Test native double input with scalar min/max and int out."""
a = self._generate_data(self.nr, self.nc)
m = -1.0
M = 2.0
ac = zeros(a.shape, dtype = int32)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_simple_inplace_01(self):
"""Test native double input with array min/max in-place."""
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = zeros(a.shape)
M = 1.0
self.fastclip(a, m, M, a)
self.clip(a, m, M, ac)
assert_array_strict_equal(a, ac)
def test_simple_inplace_02(self):
"""Test native double input with scalar min/max in-place."""
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = -0.5
M = 0.6
self.fastclip(a, m, M, a)
self.clip(a, m, M, ac)
assert_array_strict_equal(a, ac)
def test_noncontig_inplace(self):
"""Test non contiguous double input with double scalar min/max in-place."""
a = self._generate_data(self.nr * 2, self.nc * 3)
a = a[::2, ::3]
assert_(not a.flags['F_CONTIGUOUS'])
assert_(not a.flags['C_CONTIGUOUS'])
ac = a.copy()
m = -0.5
M = 0.6
self.fastclip(a, m, M, a)
self.clip(a, m, M, ac)
assert_array_equal(a, ac)
def test_type_cast_01(self):
"Test native double input with scalar min/max."
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 0.6
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_02(self):
"Test native int32 input with int32 scalar min/max."
a = self._generate_int_data(self.nr, self.nc)
a = a.astype(int32)
m = -2
M = 4
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_03(self):
"Test native int32 input with float64 scalar min/max."
a = self._generate_int32_data(self.nr, self.nc)
m = -2
M = 4
ac = self.fastclip(a, float64(m), float64(M))
act = self.clip(a, float64(m), float64(M))
assert_array_strict_equal(ac, act)
def test_type_cast_04(self):
"Test native int32 input with float32 scalar min/max."
a = self._generate_int32_data(self.nr, self.nc)
m = float32(-2)
M = float32(4)
act = self.fastclip(a,m,M)
ac = self.clip(a,m,M)
assert_array_strict_equal(ac, act)
def test_type_cast_05(self):
"Test native int32 with double arrays min/max."
a = self._generate_int_data(self.nr, self.nc)
m = -0.5
M = 1.
ac = self.fastclip(a, m * zeros(a.shape), M)
act = self.clip(a, m * zeros(a.shape), M)
assert_array_strict_equal(ac, act)
def test_type_cast_06(self):
"Test native with NON native scalar min/max."
a = self._generate_data(self.nr, self.nc)
m = 0.5
m_s = self._neg_byteorder(m)
M = 1.
act = self.clip(a, m_s, M)
ac = self.fastclip(a, m_s, M)
assert_array_strict_equal(ac, act)
def test_type_cast_07(self):
"Test NON native with native array min/max."
a = self._generate_data(self.nr, self.nc)
m = -0.5 * ones(a.shape)
M = 1.
a_s = self._neg_byteorder(a)
assert_(not a_s.dtype.isnative)
act = a_s.clip(m, M)
ac = self.fastclip(a_s, m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_08(self):
"Test NON native with native scalar min/max."
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 1.
a_s = self._neg_byteorder(a)
assert_(not a_s.dtype.isnative)
ac = self.fastclip(a_s, m , M)
act = a_s.clip(m, M)
assert_array_strict_equal(ac, act)
def test_type_cast_09(self):
"Test native with NON native array min/max."
a = self._generate_data(self.nr, self.nc)
m = -0.5 * ones(a.shape)
M = 1.
m_s = self._neg_byteorder(m)
assert_(not m_s.dtype.isnative)
ac = self.fastclip(a, m_s , M)
act = self.clip(a, m_s, M)
assert_array_strict_equal(ac, act)
def test_type_cast_10(self):
"""Test native int32 with float min/max and float out for output argument."""
a = self._generate_int_data(self.nr, self.nc)
b = zeros(a.shape, dtype = float32)
m = float32(-0.5)
M = float32(1)
act = self.clip(a, m, M, out = b)
ac = self.fastclip(a, m , M, out = b)
assert_array_strict_equal(ac, act)
def test_type_cast_11(self):
"Test non native with native scalar, min/max, out non native"
a = self._generate_non_native_data(self.nr, self.nc)
b = a.copy()
b = b.astype(b.dtype.newbyteorder('>'))
bt = b.copy()
m = -0.5
M = 1.
self.fastclip(a, m , M, out = b)
self.clip(a, m, M, out = bt)
assert_array_strict_equal(b, bt)
def test_type_cast_12(self):
"Test native int32 input and min/max and float out"
a = self._generate_int_data(self.nr, self.nc)
b = zeros(a.shape, dtype = float32)
m = int32(0)
M = int32(1)
act = self.clip(a, m, M, out = b)
ac = self.fastclip(a, m , M, out = b)
assert_array_strict_equal(ac, act)
def test_clip_with_out_simple(self):
"Test native double input with scalar min/max"
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = 0.6
ac = zeros(a.shape)
act = zeros(a.shape)
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_clip_with_out_simple2(self):
"Test native int32 input with double min/max and int32 out"
a = self._generate_int32_data(self.nr, self.nc)
m = float64(0)
M = float64(2)
ac = zeros(a.shape, dtype = int32)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_clip_with_out_simple_int32(self):
"Test native int32 input with int32 scalar min/max and int64 out"
a = self._generate_int32_data(self.nr, self.nc)
m = int32(-1)
M = int32(1)
ac = zeros(a.shape, dtype = int64)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_clip_with_out_array_int32(self):
"Test native int32 input with double array min/max and int32 out"
a = self._generate_int32_data(self.nr, self.nc)
m = zeros(a.shape, float64)
M = float64(1)
ac = zeros(a.shape, dtype = int32)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_clip_with_out_array_outint32(self):
"Test native double input with scalar min/max and int out"
a = self._generate_data(self.nr, self.nc)
m = -1.0
M = 2.0
ac = zeros(a.shape, dtype = int32)
act = ac.copy()
self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
def test_clip_inplace_array(self):
"Test native double input with array min/max"
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = zeros(a.shape)
M = 1.0
self.fastclip(a, m, M, a)
self.clip(a, m, M, ac)
assert_array_strict_equal(a, ac)
def test_clip_inplace_simple(self):
"Test native double input with scalar min/max"
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = -0.5
M = 0.6
self.fastclip(a, m, M, a)
self.clip(a, m, M, ac)
assert_array_strict_equal(a, ac)
def test_clip_func_takes_out(self):
""" Ensure that the clip() function takes an out= argument.
"""
a = self._generate_data(self.nr, self.nc)
ac = a.copy()
m = -0.5
M = 0.6
a2 = clip(a, m, M, out=a)
self.clip(a, m, M, ac)
assert_array_strict_equal(a2, ac)
self.assertTrue(a2 is a)
class TestAllclose:
rtol = 1e-5
atol = 1e-8
def tst_allclose(self,x,y):
assert_(allclose(x,y), "%s and %s not close" % (x,y))
def tst_not_allclose(self,x,y):
assert_(not allclose(x,y), "%s and %s shouldn't be close" % (x,y))
def test_ip_allclose(self):
"""Parametric test factory."""
arr = array([100,1000])
aran = arange(125).reshape((5,5,5))
atol = self.atol
rtol = self.rtol
data = [([1,0], [1,0]),
([atol], [0]),
([1], [1+rtol+atol]),
(arr, arr + arr*rtol),
(arr, arr + arr*rtol + atol*2),
(aran, aran + aran*rtol),
(inf, inf),
(inf, [inf])]
for (x,y) in data:
yield (self.tst_allclose,x,y)
def test_ip_not_allclose(self):
"""Parametric test factory."""
aran = arange(125).reshape((5,5,5))
atol = self.atol
rtol = self.rtol
data = [([inf,0], [1,inf]),
([inf,0], [1,0]),
([inf,inf], [1,inf]),
([inf,inf], [1,0]),
([-inf, 0], [inf, 0]),
([nan,0], [nan,0]),
([atol*2], [0]),
([1], [1+rtol+atol*2]),
(aran, aran + aran*atol + atol*2),
(array([inf,1]), array([0,inf]))]
for (x,y) in data:
yield (self.tst_not_allclose,x,y)
def test_no_parameter_modification(self):
x = array([inf,1])
y = array([0,inf])
allclose(x,y)
assert_array_equal(x,array([inf,1]))
assert_array_equal(y,array([0,inf]))
class TestStdVar(TestCase):
def setUp(self):
self.A = array([1,-1,1,-1])
self.real_var = 1
def test_basic(self):
assert_almost_equal(var(self.A),self.real_var)
assert_almost_equal(std(self.A)**2,self.real_var)
def test_ddof1(self):
assert_almost_equal(var(self.A,ddof=1),
self.real_var*len(self.A)/float(len(self.A)-1))
assert_almost_equal(std(self.A,ddof=1)**2,
self.real_var*len(self.A)/float(len(self.A)-1))
def test_ddof2(self):
assert_almost_equal(var(self.A,ddof=2),
self.real_var*len(self.A)/float(len(self.A)-2))
assert_almost_equal(std(self.A,ddof=2)**2,
self.real_var*len(self.A)/float(len(self.A)-2))
class TestStdVarComplex(TestCase):
def test_basic(self):
A = array([1,1.j,-1,-1.j])
real_var = 1
assert_almost_equal(var(A),real_var)
assert_almost_equal(std(A)**2,real_var)
class TestLikeFuncs(TestCase):
'''Test ones_like, zeros_like, and empty_like'''
def setUp(self):
self.data = [
# Array scalars
(array(3.), None),
(array(3), 'f8'),
# 1D arrays
(arange(6, dtype='f4'), None),
(arange(6), 'c16'),
# 2D C-layout arrays
(arange(6).reshape(2,3), None),
(arange(6).reshape(3,2), 'i1'),
# 2D F-layout arrays
(arange(6).reshape((2,3), order='F'), None),
(arange(6).reshape((3,2), order='F'), 'i1'),
# 3D C-layout arrays
(arange(24).reshape(2,3,4), None),
(arange(24).reshape(4,3,2), 'f4'),
# 3D F-layout arrays
(arange(24).reshape((2,3,4), order='F'), None),
(arange(24).reshape((4,3,2), order='F'), 'f4'),
# 3D non-C/F-layout arrays
(arange(24).reshape(2,3,4).swapaxes(0,1), None),
(arange(24).reshape(4,3,2).swapaxes(0,1), '?'),
]
def check_like_function(self, like_function, value):
for d, dtype in self.data:
# default (K) order, dtype
dz = like_function(d, dtype=dtype)
assert_equal(dz.shape, d.shape)
assert_equal(array(dz.strides)*d.dtype.itemsize,
array(d.strides)*dz.dtype.itemsize)
if dtype is None:
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
if not value is None:
assert_(all(dz == value))
# C order, default dtype
dz = like_function(d, order='C', dtype=dtype)
assert_equal(dz.shape, d.shape)
assert_(dz.flags.c_contiguous)
if dtype is None:
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
if not value is None:
assert_(all(dz == value))
# F order, default dtype
dz = like_function(d, order='F', dtype=dtype)
assert_equal(dz.shape, d.shape)
assert_(dz.flags.f_contiguous)
if dtype is None:
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
if not value is None:
assert_(all(dz == value))
# A order
dz = like_function(d, order='A', dtype=dtype)
assert_equal(dz.shape, d.shape)
if d.flags.f_contiguous:
assert_(dz.flags.f_contiguous)
else:
assert_(dz.flags.c_contiguous)
if dtype is None:
assert_equal(dz.dtype, d.dtype)
else:
assert_equal(dz.dtype, np.dtype(dtype))
if not value is None:
assert_(all(dz == value))
# Test the 'subok' parameter'
a = np.matrix([[1,2],[3,4]])
b = like_function(a)
assert_(type(b) is np.matrix)
b = like_function(a, subok=False)
assert_(not (type(b) is np.matrix))
def test_ones_like(self):
self.check_like_function(np.ones_like, 1)
def test_zeros_like(self):
self.check_like_function(np.zeros_like, 0)
def test_empty_like(self):
self.check_like_function(np.empty_like, None)
class _TestCorrelate(TestCase):
def _setup(self, dt):
self.x = np.array([1, 2, 3, 4, 5], dtype=dt)
self.y = np.array([-1, -2, -3], dtype=dt)
self.z1 = np.array([ -3., -8., -14., -20., -26., -14., -5.], dtype=dt)
self.z2 = np.array([ -5., -14., -26., -20., -14., -8., -3.], dtype=dt)
def test_float(self):
self._setup(np.float)
z = np.correlate(self.x, self.y, 'full', old_behavior=self.old_behavior)
assert_array_almost_equal(z, self.z1)
z = np.correlate(self.y, self.x, 'full', old_behavior=self.old_behavior)
assert_array_almost_equal(z, self.z2)
def test_object(self):
self._setup(Decimal)
z = np.correlate(self.x, self.y, 'full', old_behavior=self.old_behavior)
assert_array_almost_equal(z, self.z1)
z = np.correlate(self.y, self.x, 'full', old_behavior=self.old_behavior)
assert_array_almost_equal(z, self.z2)
class TestCorrelate(_TestCorrelate):
old_behavior = True
def _setup(self, dt):
# correlate uses an unconventional definition so that correlate(a, b)
# == correlate(b, a), so force the corresponding outputs to be the same
# as well
_TestCorrelate._setup(self, dt)
self.z2 = self.z1
@dec.deprecated()
def test_complex(self):
x = np.array([1, 2, 3, 4+1j], dtype=np.complex)
y = np.array([-1, -2j, 3+1j], dtype=np.complex)
r_z = np.array([3+1j, 6, 8-1j, 9+1j, -1-8j, -4-1j], dtype=np.complex)
z = np.correlate(x, y, 'full', old_behavior=self.old_behavior)
assert_array_almost_equal(z, r_z)
@dec.deprecated()
def test_float(self):
_TestCorrelate.test_float(self)
@dec.deprecated()
def test_object(self):
_TestCorrelate.test_object(self)
class TestCorrelateNew(_TestCorrelate):
old_behavior = False
def test_complex(self):
x = np.array([1, 2, 3, 4+1j], dtype=np.complex)
y = np.array([-1, -2j, 3+1j], dtype=np.complex)
r_z = np.array([3-1j, 6, 8+1j, 11+5j, -5+8j, -4-1j], dtype=np.complex)
#z = np.acorrelate(x, y, 'full')
#assert_array_almost_equal(z, r_z)
r_z = r_z[::-1].conjugate()
z = np.correlate(y, x, 'full', old_behavior=self.old_behavior)
assert_array_almost_equal(z, r_z)
class TestArgwhere:
def test_2D(self):
x = np.arange(6).reshape((2, 3))
assert_array_equal(np.argwhere(x > 1),
[[0, 2],
[1, 0],
[1, 1],
[1, 2]])
def test_list(self):
assert_equal(np.argwhere([4, 0, 2, 1, 3]), [[0], [2], [3], [4]])
class TestStringFunction:
def test_set_string_function(self):
a = np.array([1])
np.set_string_function(lambda x: "FOO", repr=True)
assert_equal(repr(a), "FOO")
np.set_string_function(None, repr=True)
assert_equal(repr(a), "array([1])")
np.set_string_function(lambda x: "FOO", repr=False)
assert_equal(str(a), "FOO")
np.set_string_function(None, repr=False)
assert_equal(str(a), "[1]")
if __name__ == "__main__":
run_module_suite()
| apache-2.0 |
aparo/django-nonrel | django/contrib/syndication/views.py | 20 | 8227 | import datetime
from django.conf import settings
from django.contrib.sites.models import Site, RequestSite
from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist
from django.http import HttpResponse, Http404
from django.template import loader, Template, TemplateDoesNotExist, RequestContext
from django.utils import feedgenerator, tzinfo
from django.utils.encoding import force_unicode, iri_to_uri, smart_unicode
from django.utils.html import escape
def add_domain(domain, url):
if not (url.startswith('http://')
or url.startswith('https://')
or url.startswith('mailto:')):
# 'url' must already be ASCII and URL-quoted, so no need for encoding
# conversions here.
url = iri_to_uri(u'http://%s%s' % (domain, url))
return url
class FeedDoesNotExist(ObjectDoesNotExist):
pass
class Feed(object):
feed_type = feedgenerator.DefaultFeed
title_template = None
description_template = None
def __call__(self, request, *args, **kwargs):
try:
obj = self.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
raise Http404('Feed object does not exist.')
feedgen = self.get_feed(obj, request)
response = HttpResponse(mimetype=feedgen.mime_type)
feedgen.write(response, 'utf-8')
return response
def item_title(self, item):
# Titles should be double escaped by default (see #6533)
return escape(force_unicode(item))
def item_description(self, item):
return force_unicode(item)
def item_link(self, item):
try:
return item.get_absolute_url()
except AttributeError:
raise ImproperlyConfigured('Give your %s class a get_absolute_url() method, or define an item_link() method in your Feed class.' % item.__class__.__name__)
def __get_dynamic_attr(self, attname, obj, default=None):
try:
attr = getattr(self, attname)
except AttributeError:
return default
if callable(attr):
# Check func_code.co_argcount rather than try/excepting the
# function and catching the TypeError, because something inside
# the function may raise the TypeError. This technique is more
# accurate.
if hasattr(attr, 'func_code'):
argcount = attr.func_code.co_argcount
else:
argcount = attr.__call__.func_code.co_argcount
if argcount == 2: # one argument is 'self'
return attr(obj)
else:
return attr()
return attr
def feed_extra_kwargs(self, obj):
"""
Returns an extra keyword arguments dictionary that is used when
initializing the feed generator.
"""
return {}
def item_extra_kwargs(self, item):
"""
Returns an extra keyword arguments dictionary that is used with
the `add_item` call of the feed generator.
"""
return {}
def get_object(self, request, *args, **kwargs):
return None
def get_feed(self, obj, request):
"""
Returns a feedgenerator.DefaultFeed object, fully populated, for
this feed. Raises FeedDoesNotExist for invalid parameters.
"""
if Site._meta.installed:
current_site = Site.objects.get_current()
else:
current_site = RequestSite(request)
link = self.__get_dynamic_attr('link', obj)
link = add_domain(current_site.domain, link)
feed = self.feed_type(
title = self.__get_dynamic_attr('title', obj),
subtitle = self.__get_dynamic_attr('subtitle', obj),
link = link,
description = self.__get_dynamic_attr('description', obj),
language = settings.LANGUAGE_CODE.decode(),
feed_url = add_domain(current_site.domain,
self.__get_dynamic_attr('feed_url', obj) or request.path),
author_name = self.__get_dynamic_attr('author_name', obj),
author_link = self.__get_dynamic_attr('author_link', obj),
author_email = self.__get_dynamic_attr('author_email', obj),
categories = self.__get_dynamic_attr('categories', obj),
feed_copyright = self.__get_dynamic_attr('feed_copyright', obj),
feed_guid = self.__get_dynamic_attr('feed_guid', obj),
ttl = self.__get_dynamic_attr('ttl', obj),
**self.feed_extra_kwargs(obj)
)
title_tmp = None
if self.title_template is not None:
try:
title_tmp = loader.get_template(self.title_template)
except TemplateDoesNotExist:
pass
description_tmp = None
if self.description_template is not None:
try:
description_tmp = loader.get_template(self.description_template)
except TemplateDoesNotExist:
pass
for item in self.__get_dynamic_attr('items', obj):
if title_tmp is not None:
title = title_tmp.render(RequestContext(request, {'obj': item, 'site': current_site}))
else:
title = self.__get_dynamic_attr('item_title', item)
if description_tmp is not None:
description = description_tmp.render(RequestContext(request, {'obj': item, 'site': current_site}))
else:
description = self.__get_dynamic_attr('item_description', item)
link = add_domain(current_site.domain, self.__get_dynamic_attr('item_link', item))
enc = None
enc_url = self.__get_dynamic_attr('item_enclosure_url', item)
if enc_url:
enc = feedgenerator.Enclosure(
url = smart_unicode(enc_url),
length = smart_unicode(self.__get_dynamic_attr('item_enclosure_length', item)),
mime_type = smart_unicode(self.__get_dynamic_attr('item_enclosure_mime_type', item))
)
author_name = self.__get_dynamic_attr('item_author_name', item)
if author_name is not None:
author_email = self.__get_dynamic_attr('item_author_email', item)
author_link = self.__get_dynamic_attr('item_author_link', item)
else:
author_email = author_link = None
pubdate = self.__get_dynamic_attr('item_pubdate', item)
if pubdate and not pubdate.tzinfo:
ltz = tzinfo.LocalTimezone(pubdate)
pubdate = pubdate.replace(tzinfo=ltz)
feed.add_item(
title = title,
link = link,
description = description,
unique_id = self.__get_dynamic_attr('item_guid', item, link),
enclosure = enc,
pubdate = pubdate,
author_name = author_name,
author_email = author_email,
author_link = author_link,
categories = self.__get_dynamic_attr('item_categories', item),
item_copyright = self.__get_dynamic_attr('item_copyright', item),
**self.item_extra_kwargs(item)
)
return feed
def feed(request, url, feed_dict=None):
"""Provided for backwards compatibility."""
import warnings
warnings.warn('The syndication feed() view is deprecated. Please use the '
'new class based view API.',
category=PendingDeprecationWarning)
if not feed_dict:
raise Http404("No feeds are registered.")
try:
slug, param = url.split('/', 1)
except ValueError:
slug, param = url, ''
try:
f = feed_dict[slug]
except KeyError:
raise Http404("Slug %r isn't registered." % slug)
try:
feedgen = f(slug, request).get_feed(param)
except FeedDoesNotExist:
raise Http404("Invalid feed parameters. Slug %r is valid, but other parameters, or lack thereof, are not." % slug)
response = HttpResponse(mimetype=feedgen.mime_type)
feedgen.write(response, 'utf-8')
return response
| bsd-3-clause |
ngpestelos/ansible-for-devops | dynamic-inventory/custom/inventory.py | 7 | 2005 | #!/usr/bin/env python
'''
Example custom dynamic inventory script for Ansible, in Python.
'''
import os
import sys
import argparse
try:
import json
except ImportError:
import simplejson as json
class ExampleInventory(object):
def __init__(self):
self.inventory = {}
self.read_cli_args()
# Called with `--list`.
if self.args.list:
self.inventory = self.example_inventory()
# Called with `--host [hostname]`.
elif self.args.host:
# Not implemented, since we return _meta info `--list`.
self.inventory = self.empty_inventory()
# If no groups or vars are present, return an empty inventory.
else:
self.inventory = self.empty_inventory()
print json.dumps(self.inventory);
# Example inventory for testing.
def example_inventory(self):
return {
'group': {
'hosts': ['192.168.28.71', '192.168.28.72'],
'vars': {
'ansible_ssh_user': 'vagrant',
'ansible_ssh_private_key_file':
'~/.vagrant.d/insecure_private_key',
'example_variable': 'value'
}
},
'_meta': {
'hostvars': {
'192.168.28.71': {
'host_specific_var': 'foo'
},
'192.168.28.72': {
'host_specific_var': 'bar'
}
}
}
}
# Empty inventory for testing.
def empty_inventory(self):
return {'_meta': {'hostvars': {}}}
# Read the command line args passed to the script.
def read_cli_args(self):
parser = argparse.ArgumentParser()
parser.add_argument('--list', action = 'store_true')
parser.add_argument('--host', action = 'store')
self.args = parser.parse_args()
# Get the inventory.
ExampleInventory()
| mit |
seadsystem/website | web2py/scripts/standalone_exe_cxfreeze.py | 38 | 1722 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Usage:
Install cx_Freeze: http://cx-freeze.sourceforge.net/
Copy script to the web2py directory
c:\Python27\python standalone_exe_cxfreeze.py build_exe
"""
from cx_Freeze import setup, Executable
from gluon.import_all import base_modules, contributed_modules
from gluon.fileutils import readlines_file
from glob import glob
import fnmatch
import os
import shutil
import sys
import re
#read web2py version from VERSION file
web2py_version_line = readlines_file('VERSION')[0]
#use regular expression to get just the version number
v_re = re.compile('[0-9]+\.[0-9]+\.[0-9]+')
web2py_version = v_re.search(web2py_version_line).group(0)
base = None
if sys.platform == 'win32':
base = "Win32GUI"
base_modules.remove('macpath')
buildOptions = dict(
compressed=True,
excludes=["macpath", "PyQt4"],
includes=base_modules,
include_files=[
'applications',
'ABOUT',
'LICENSE',
'VERSION',
'logging.example.conf',
'options_std.py',
'app.example.yaml',
'queue.example.yaml',
],
# append any extra module by extending the list below -
# "contributed_modules+["lxml"]"
packages=contributed_modules,
)
setup(
name="Web2py",
version=web2py_version,
author="Massimo DiPierro",
description="web2py web framework",
license="LGPL v3",
options=dict(build_exe=buildOptions),
executables=[Executable("web2py.py",
base=base,
compress=True,
icon="web2py.ico",
targetName="web2py.exe",
copyDependentFiles=True)],
)
| mit |
jhaux/tensorflow | tensorflow/examples/learn/mnist.py | 45 | 3999 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This showcases how simple it is to build image classification networks.
It follows description from this TensorFlow tutorial:
https://www.tensorflow.org/versions/master/tutorials/mnist/pros/index.html#deep-mnist-for-experts
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import metrics
import tensorflow as tf
layers = tf.contrib.layers
learn = tf.contrib.learn
def max_pool_2x2(tensor_in):
return tf.nn.max_pool(
tensor_in, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def conv_model(feature, target, mode):
"""2-layer convolution model."""
# Convert the target to a one-hot tensor of shape (batch_size, 10) and
# with a on-value of 1 for each one-hot vector of length 10.
target = tf.one_hot(tf.cast(target, tf.int32), 10, 1, 0)
# Reshape feature to 4d tensor with 2nd and 3rd dimensions being
# image width and height final dimension being the number of color channels.
feature = tf.reshape(feature, [-1, 28, 28, 1])
# First conv layer will compute 32 features for each 5x5 patch
with tf.variable_scope('conv_layer1'):
h_conv1 = layers.convolution2d(
feature, 32, kernel_size=[5, 5], activation_fn=tf.nn.relu)
h_pool1 = max_pool_2x2(h_conv1)
# Second conv layer will compute 64 features for each 5x5 patch.
with tf.variable_scope('conv_layer2'):
h_conv2 = layers.convolution2d(
h_pool1, 64, kernel_size=[5, 5], activation_fn=tf.nn.relu)
h_pool2 = max_pool_2x2(h_conv2)
# reshape tensor into a batch of vectors
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
# Densely connected layer with 1024 neurons.
h_fc1 = layers.dropout(
layers.fully_connected(
h_pool2_flat, 1024, activation_fn=tf.nn.relu),
keep_prob=0.5,
is_training=mode == tf.contrib.learn.ModeKeys.TRAIN)
# Compute logits (1 per class) and compute loss.
logits = layers.fully_connected(h_fc1, 10, activation_fn=None)
loss = tf.losses.softmax_cross_entropy(target, logits)
# Create a tensor for training op.
train_op = layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='SGD',
learning_rate=0.001)
return tf.argmax(logits, 1), loss, train_op
def main(unused_args):
### Download and load MNIST dataset.
mnist = learn.datasets.load_dataset('mnist')
### Linear classifier.
feature_columns = learn.infer_real_valued_columns_from_input(
mnist.train.images)
classifier = learn.LinearClassifier(
feature_columns=feature_columns, n_classes=10)
classifier.fit(mnist.train.images,
mnist.train.labels.astype(np.int32),
batch_size=100,
steps=1000)
score = metrics.accuracy_score(mnist.test.labels,
list(classifier.predict(mnist.test.images)))
print('Accuracy: {0:f}'.format(score))
### Convolutional network
classifier = learn.Estimator(model_fn=conv_model)
classifier.fit(mnist.train.images,
mnist.train.labels,
batch_size=100,
steps=20000)
score = metrics.accuracy_score(mnist.test.labels,
list(classifier.predict(mnist.test.images)))
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
windmill/windmill | windmill/dep/_simplejson/scanner.py | 6 | 2130 | """JSON token scanner
"""
import re
c_make_scanner = None
__all__ = ['make_scanner']
NUMBER_RE = re.compile(
r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
(re.VERBOSE | re.MULTILINE | re.DOTALL))
def py_make_scanner(context):
parse_object = context.parse_object
parse_array = context.parse_array
parse_string = context.parse_string
match_number = NUMBER_RE.match
encoding = context.encoding
strict = context.strict
parse_float = context.parse_float
parse_int = context.parse_int
parse_constant = context.parse_constant
object_hook = context.object_hook
def _scan_once(string, idx):
try:
nextchar = string[idx]
except IndexError:
raise StopIteration
if nextchar == '"':
return parse_string(string, idx + 1, encoding, strict)
elif nextchar == '{':
return parse_object((string, idx + 1), encoding, strict, _scan_once, object_hook)
elif nextchar == '[':
return parse_array((string, idx + 1), _scan_once)
elif nextchar == 'n' and string[idx:idx + 4] == 'null':
return None, idx + 4
elif nextchar == 't' and string[idx:idx + 4] == 'true':
return True, idx + 4
elif nextchar == 'f' and string[idx:idx + 5] == 'false':
return False, idx + 5
m = match_number(string, idx)
if m is not None:
integer, frac, exp = m.groups()
if frac or exp:
res = parse_float(integer + (frac or '') + (exp or ''))
else:
res = parse_int(integer)
return res, m.end()
elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
return parse_constant('NaN'), idx + 3
elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
return parse_constant('Infinity'), idx + 8
elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
return parse_constant('-Infinity'), idx + 9
else:
raise StopIteration
return _scan_once
make_scanner = c_make_scanner or py_make_scanner
| apache-2.0 |
jnishi/chainer | tests/chainer_tests/distributions_tests/test_bernoulli.py | 2 | 5265 | import unittest
import numpy
from chainer.backends import cuda
from chainer import distributions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product({
'shape': [(3, 2), (1,)],
'is_variable': [True, False],
'sample_shape': [(3, 2), ()],
'extreme_values': [True, False],
'binary_check': [True, False],
}))
@testing.fix_random()
@testing.with_requires('scipy')
class TestBernoulli(testing.distribution_unittest):
scipy_onebyone = True
def setUp_configure(self):
from scipy import stats
self.dist = distributions.Bernoulli
self.scipy_dist = stats.bernoulli
self.options = {"binary_check": self.binary_check}
self.test_targets = set([
"batch_shape", "entropy", "log_prob", "mean", "prob", "sample",
"stddev", "support", "variance"])
if self.extreme_values:
p = numpy.random.randint(0, 2, self.shape).astype(numpy.float32)
else:
p = numpy.random.uniform(0, 1, self.shape).astype(numpy.float32)
self.params = {"p": p}
self.scipy_params = {"p": p}
self.support = '{0, 1}'
self.continuous = False
self.old_settings = None
if self.extreme_values:
self.old_settings = numpy.seterr(divide='ignore', invalid='ignore')
def tearDown(self):
if self.old_settings is not None:
numpy.seterr(**self.old_settings)
def sample_for_test(self):
smp = numpy.random.randint(
2, size=self.sample_shape + self.shape).astype(numpy.float32)
return smp
def sample_for_binary_check_test(self):
smp = numpy.random.uniform(
low=0.1, high=0.9,
size=self.sample_shape + self.shape).astype(numpy.float32)
return smp
def check_log_prob_binary_check(self, is_gpu):
smp = self.sample_for_binary_check_test()
if is_gpu:
log_prob = self.gpu_dist.log_prob(cuda.to_gpu(smp)).data
else:
log_prob = self.cpu_dist.log_prob(smp).data
xp = cuda.get_array_module(log_prob)
if self.binary_check:
self.assertTrue(xp.all(log_prob == -xp.inf))
else:
self.assertTrue(xp.all(xp.isfinite(log_prob)))
def test_log_prob_binary_check_cpu(self):
self.check_log_prob_binary_check(False)
@attr.gpu
def test_log_prob_binary_check_gpu(self):
self.check_log_prob_binary_check(True)
def check_prob_binary_check(self, is_gpu):
smp = self.sample_for_binary_check_test()
if is_gpu:
prob = self.gpu_dist.prob(cuda.to_gpu(smp)).data
else:
prob = self.cpu_dist.prob(smp).data
xp = cuda.get_array_module(prob)
if self.binary_check:
self.assertTrue(xp.all(prob == 0))
else:
self.assertTrue(xp.all(prob > 0))
def test_prob_binary_check_cpu(self):
self.check_prob_binary_check(False)
@attr.gpu
def test_prob_binary_check_gpu(self):
self.check_prob_binary_check(True)
@testing.parameterize(*testing.product({
'shape': [(2, 3), ()],
'dtype': [numpy.float32, numpy.float64],
}))
class TestBernoulliLogProb(unittest.TestCase):
def setUp(self):
self.logit = numpy.random.normal(size=self.shape).astype(self.dtype)
self.x = numpy.random.randint(0, 2, size=self.shape).astype(self.dtype)
self.gy = numpy.random.normal(size=self.shape).astype(self.dtype)
self.ggx = numpy.random.normal(size=self.shape).astype(self.dtype)
self.backward_options = {'atol': 1e-2, 'rtol': 1e-2}
def check_forward(self, logit_data, x_data):
distributions.bernoulli._bernoulli_log_prob(logit_data, x_data)
def test_forward_cpu(self):
self.check_forward(self.logit, self.x)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.logit), cuda.to_gpu(self.x))
def check_backward(self, logit_data, x_data, y_grad):
def f(logit):
return distributions.bernoulli._bernoulli_log_prob(
logit, x_data)
gradient_check.check_backward(
f, logit_data, y_grad, **self.backward_options)
def test_backward_cpu(self):
self.check_backward(self.logit, self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.logit), cuda.to_gpu(self.x),
cuda.to_gpu(self.gy))
def check_double_backward(self, logit_data, x_data, y_grad, x_grad_grad):
def f(logit):
return distributions.bernoulli._bernoulli_log_prob(
logit, x_data)
gradient_check.check_double_backward(
f, logit_data, y_grad, x_grad_grad, dtype=numpy.float64,
**self.backward_options)
def test_double_backward_cpu(self):
self.check_double_backward(self.logit, self.x, self.gy, self.ggx)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(
cuda.to_gpu(self.logit), cuda.to_gpu(self.x),
cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx))
testing.run_module(__name__, __file__)
| mit |
iMichka/mini-iw | pygccxml/binary_parsers/get_dll_exported_symbols.py | 12 | 9547 | # The content of this file was contributed by leppton
# (http://mail.python.org/pipermail/patches/2006-November/020942.html) to
# ctypes project, under MIT License.
# This example shows how to use ctypes module to read all
# function names from dll export directory
import os
if os.name != "nt":
raise Exception("Wrong OS")
import ctypes as ctypes
import ctypes.wintypes as wintypes
def convert_cdef_to_pydef(line):
"""convert_cdef_to_pydef(line_from_c_header_file) -> python_tuple_string
'DWORD var_name[LENGTH];' -> '("var_name", DWORD*LENGTH)'
doesn't work for all valid c/c++ declarations"""
l = line[:line.find(';')].split()
if len(l) != 2:
return None
type_ = l[0]
name = l[1]
i = name.find('[')
if i != -1:
name, brac = name[:i], name[i:][1:-1]
return '("%s", %s*%s)' % (name, type_, brac)
else:
return '("%s", %s)' % (name, type_)
def convert_cdef_to_structure(cdef, name, data_dict=ctypes.__dict__):
"""convert_cdef_to_structure(struct_definition_from_c_header_file)
-> python class derived from ctypes.Structure
limited support for c/c++ syntax"""
py_str = '[\n'
for line in cdef.split('\n'):
field = convert_cdef_to_pydef(line)
if field is not None:
py_str += ' ' * 4 + field + ',\n'
py_str += ']\n'
pyarr = eval(py_str, data_dict)
class ret_val(ctypes.Structure):
_fields_ = pyarr
ret_val.__name__ = name
ret_val.__module__ = None
return ret_val
# struct definitions we need to read dll file export table
winnt = (
('IMAGE_DOS_HEADER', """\
WORD e_magic;
WORD e_cblp;
WORD e_cp;
WORD e_crlc;
WORD e_cparhdr;
WORD e_minalloc;
WORD e_maxalloc;
WORD e_ss;
WORD e_sp;
WORD e_csum;
WORD e_ip;
WORD e_cs;
WORD e_lfarlc;
WORD e_ovno;
WORD e_res[4];
WORD e_oemid;
WORD e_oeminfo;
WORD e_res2[10];
LONG e_lfanew;
"""),
('IMAGE_FILE_HEADER', """\
WORD Machine;
WORD NumberOfSections;
DWORD TimeDateStamp;
DWORD PointerToSymbolTable;
DWORD NumberOfSymbols;
WORD SizeOfOptionalHeader;
WORD Characteristics;
"""),
('IMAGE_DATA_DIRECTORY', """\
DWORD VirtualAddress;
DWORD Size;
"""),
('IMAGE_OPTIONAL_HEADER32', """\
WORD Magic;
BYTE MajorLinkerVersion;
BYTE MinorLinkerVersion;
DWORD SizeOfCode;
DWORD SizeOfInitializedData;
DWORD SizeOfUninitializedData;
DWORD AddressOfEntryPoint;
DWORD BaseOfCode;
DWORD BaseOfData;
DWORD ImageBase;
DWORD SectionAlignment;
DWORD FileAlignment;
WORD MajorOperatingSystemVersion;
WORD MinorOperatingSystemVersion;
WORD MajorImageVersion;
WORD MinorImageVersion;
WORD MajorSubsystemVersion;
WORD MinorSubsystemVersion;
DWORD Win32VersionValue;
DWORD SizeOfImage;
DWORD SizeOfHeaders;
DWORD CheckSum;
WORD Subsystem;
WORD DllCharacteristics;
DWORD SizeOfStackReserve;
DWORD SizeOfStackCommit;
DWORD SizeOfHeapReserve;
DWORD SizeOfHeapCommit;
DWORD LoaderFlags;
DWORD NumberOfRvaAndSizes;
IMAGE_DATA_DIRECTORY DataDirectory[IMAGE_NUMBEROF_DIRECTORY_ENTRIES];
""",
{'IMAGE_NUMBEROF_DIRECTORY_ENTRIES': 16}),
('IMAGE_NT_HEADERS', """\
DWORD Signature;
IMAGE_FILE_HEADER FileHeader;
IMAGE_OPTIONAL_HEADER32 OptionalHeader;
"""),
('IMAGE_EXPORT_DIRECTORY', """\
DWORD Characteristics;
DWORD TimeDateStamp;
WORD MajorVersion;
WORD MinorVersion;
DWORD Name;
DWORD Base;
DWORD NumberOfFunctions;
DWORD NumberOfNames;
DWORD AddressOfFunctions;
DWORD AddressOfNames;
DWORD AddressOfNameOrdinals;
"""),
)
# Construct python ctypes.Structures from above definitions
data_dict = dict(wintypes.__dict__)
for definition in winnt:
name = definition[0]
def_str = definition[1]
if len(definition) == 3:
data_dict.update(definition[2])
type_ = convert_cdef_to_structure(def_str, name, data_dict)
data_dict[name] = type_
globals()[name] = type_
ptype = ctypes.POINTER(type_)
pname = 'P' + name
data_dict[pname] = ptype
globals()[pname] = ptype
del data_dict
del winnt
class DllException(Exception):
pass
def read_export_table(dll_name, mmap=False, use_kernel=False):
"""
read_export_table(dll_name [,mmap=False [,use_kernel=False]]])
-> list of exported names
default is to load dll into memory: dll sections are aligned to
page boundaries, dll entry points is called, etc...
with mmap=True dll file image is mapped to memory, Relative Virtual
Addresses (RVAs) must be mapped to real addresses manually
with use_kernel=True direct kernel32.dll calls are used,
instead of python mmap module
see http://www.windowsitlibrary.com/Content/356/11/1.html
for details on Portable Executable (PE) file format
"""
if not mmap:
dll = ctypes.cdll.LoadLibrary(dll_name)
if dll is None:
raise DllException("Cant load dll")
base_addr = dll._handle
else:
if not use_kernel:
fileH = open(dll_name)
if fileH is None:
raise DllException("Cant load dll")
import mmap
m = mmap.mmap(fileH.fileno(), 0, None, mmap.ACCESS_READ)
# id(m)+8 sucks, is there better way?
base_addr = ctypes.cast(id(m) + 8, ctypes.POINTER(ctypes.c_int))[0]
else:
kernel32 = ctypes.windll.kernel32
if kernel32 is None:
raise DllException("cant load kernel")
fileH = kernel32.CreateFileA(dll_name, 0x00120089, 1, 0, 3, 0, 0)
if fileH == 0:
raise DllException(
"Cant open, errcode = %d" %
kernel32.GetLastError())
mapH = kernel32.CreateFileMappingW(fileH, 0, 0x8000002, 0, 0, 0)
if mapH == 0:
raise DllException(
"Cant mmap, errocode = %d" %
kernel32.GetLastError())
base_addr = ctypes.windll.kernel32.MapViewOfFile(
mapH, 0x4, 0, 0, 0)
if base_addr == 0:
raise DllException(
"Cant mmap(2), errocode = %d" %
kernel32.GetLastError())
dbghelp = ctypes.windll.dbghelp
if dbghelp is None:
raise DllException("dbghelp.dll not installed")
pimage_nt_header = dbghelp.ImageNtHeader(base_addr)
if pimage_nt_header == 0:
raise DllException("Cant find IMAGE_NT_HEADER")
# Functions like dbghelp.ImageNtHeader above have no type information
# let's make one prototype for extra buzz
# PVOID ImageRvaToVa(PIMAGE_NT_HEADERS NtHeaders, PVOID Base,
# ULONG Rva, PIMAGE_SECTION_HEADER* LastRvaSection)
# we use integers instead of pointers, coz integers are better
# for pointer arithmetic
prototype = ctypes.WINFUNCTYPE(
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int)
paramflags = (
(1, "NtHeaders", pimage_nt_header),
(1, "Base", base_addr),
(1, "Rva"),
(1, "LastRvaSection", 0))
ImageRvaToVa = prototype(('ImageRvaToVa', dbghelp), paramflags)
def cast_rva(rva, type_):
va = base_addr + rva
if mmap and va > pimage_nt_header:
va = ImageRvaToVa(Rva=rva)
if va == 0:
raise DllException("ImageRvaToVa failed")
return ctypes.cast(va, type_)
if not mmap:
dos_header = cast_rva(0, PIMAGE_DOS_HEADER)[0]
if dos_header.e_magic != 0x5A4D:
raise DllException("IMAGE_DOS_HEADER.e_magic error")
nt_header = cast_rva(dos_header.e_lfanew, PIMAGE_NT_HEADERS)[0]
else:
nt_header = ctypes.cast(pimage_nt_header, PIMAGE_NT_HEADERS)[0]
if nt_header.Signature != 0x00004550:
raise DllException("IMAGE_NT_HEADERS.Signature error")
opt_header = nt_header.OptionalHeader
if opt_header.Magic != 0x010b:
raise DllException("IMAGE_OPTIONAL_HEADERS32.Magic error")
ret_val = []
exports_dd = opt_header.DataDirectory[0]
if opt_header.NumberOfRvaAndSizes > 0 or exports_dd != 0:
export_dir = cast_rva(
exports_dd.VirtualAddress,
PIMAGE_EXPORT_DIRECTORY)[0]
nNames = export_dir.NumberOfNames
if nNames > 0:
PNamesType = ctypes.POINTER(ctypes.c_int * nNames)
names = cast_rva(export_dir.AddressOfNames, PNamesType)[0]
for rva in names:
name = cast_rva(rva, ctypes.c_char_p).value
ret_val.append(name)
if mmap:
if use_kernel:
kernel32.UnmapViewOfFile(base_addr)
kernel32.CloseHandle(mapH)
kernel32.CloseHandle(fileH)
else:
m.close()
fileH.close()
return ret_val
if __name__ == '__main__':
import sys
if len(sys.argv) != 2:
print('usage: %s dll_file_name' % sys.argv[0])
sys.exit()
# names = read_export_table(sys.argv[1], mmap=False, use_kernel=False)
names = read_export_table(sys.argv[1], mmap=False, use_kernel=False)
for name in names:
print(name)
| apache-2.0 |
pashango2/sphinxcontrib-pandoc-markdown | setup.py | 1 | 1386 | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from sphinxcontrib.pandoc_markdown import __version__
# long_desc = open('README.md').read()
requires = ['Sphinx>=0.6', "pandocfilters"]
setup(
name='sphinxcontrib-pandoc-markdown',
version=__version__,
url='https://github.com/pashango2/sphinxcontrib-pandoc-markdown/',
license='BSD',
author='Toshiyuki Ishii',
author_email='pashango2@gmail.com',
description='Yet another markdown processor for Sphinx',
long_description="",
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Framework :: Sphinx :: Extension',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Documentation',
'Topic :: Documentation :: Sphinx',
'Topic :: Utilities',
],
platforms='any',
packages=find_packages(),
include_package_data=True,
install_requires=requires,
namespace_packages=['sphinxcontrib'],
)
| bsd-2-clause |
sogelink/ansible | lib/ansible/modules/identity/opendj/opendj_backendprop.py | 72 | 6925 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Werner Dijkerman (ikben@werner-dijkerman.nl)
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: opendj_backendprop
short_description: Will update the backend configuration of OpenDJ via the dsconfig set-backend-prop command.
description:
- This module will update settings for OpenDJ with the command set-backend-prop.
- It will check first via de get-backend-prop if configuration needs to be applied.
version_added: "2.2"
author:
- Werner Dijkerman
options:
opendj_bindir:
description:
- The path to the bin directory of OpenDJ.
required: false
default: /opt/opendj/bin
hostname:
description:
- The hostname of the OpenDJ server.
required: true
port:
description:
- The Admin port on which the OpenDJ instance is available.
required: true
username:
description:
- The username to connect to.
required: false
default: cn=Directory Manager
password:
description:
- The password for the cn=Directory Manager user.
- Either password or passwordfile is needed.
required: false
passwordfile:
description:
- Location to the password file which holds the password for the cn=Directory Manager user.
- Either password or passwordfile is needed.
required: false
backend:
description:
- The name of the backend on which the property needs to be updated.
required: true
name:
description:
- The configuration setting to update.
required: true
value:
description:
- The value for the configuration item.
required: true
state:
description:
- If configuration needs to be added/updated
required: false
default: "present"
'''
EXAMPLES = '''
- name: "Add or update OpenDJ backend properties"
action: opendj_backendprop
hostname=localhost
port=4444
username="cn=Directory Manager"
password=password
backend=userRoot
name=index-entry-limit
value=5000
'''
RETURN = '''
'''
from ansible.module_utils.basic import AnsibleModule
class BackendProp(object):
def __init__(self, module):
self._module = module
def get_property(self, opendj_bindir, hostname, port, username, password_method, backend_name):
my_command = [
opendj_bindir + '/dsconfig',
'get-backend-prop',
'-h', hostname,
'--port', str(port),
'--bindDN', username,
'--backend-name', backend_name,
'-n', '-X', '-s'
] + password_method
rc, stdout, stderr = self._module.run_command(my_command)
if rc == 0:
return stdout
else:
self._module.fail_json(msg="Error message: " + str(stderr))
def set_property(self, opendj_bindir, hostname, port, username, password_method, backend_name, name, value):
my_command = [
opendj_bindir + '/dsconfig',
'set-backend-prop',
'-h', hostname,
'--port', str(port),
'--bindDN', username,
'--backend-name', backend_name,
'--set', name + ":" + value,
'-n', '-X'
] + password_method
rc, stdout, stderr = self._module.run_command(my_command)
if rc == 0:
return True
else:
self._module.fail_json(msg="Error message: " + stderr)
def validate_data(self, data=None, name=None, value=None):
for config_line in data.split('\n'):
if config_line:
split_line = config_line.split()
if split_line[0] == name:
if split_line[1] == value:
return True
return False
def main():
module = AnsibleModule(
argument_spec=dict(
opendj_bindir=dict(default="/opt/opendj/bin", type="path"),
hostname=dict(required=True),
port=dict(required=True),
username=dict(default="cn=Directory Manager", required=False),
password=dict(required=False, no_log=True),
passwordfile=dict(required=False, type="path"),
backend=dict(required=True),
name=dict(required=True),
value=dict(required=True),
state=dict(default="present"),
),
supports_check_mode=True,
mutually_exclusive=[['password', 'passwordfile']],
required_one_of=[['password', 'passwordfile']]
)
opendj_bindir = module.params['opendj_bindir']
hostname = module.params['hostname']
port = module.params['port']
username = module.params['username']
password = module.params['password']
passwordfile = module.params['passwordfile']
backend_name = module.params['backend']
name = module.params['name']
value = module.params['value']
state = module.params['state']
if module.params["password"] is not None:
password_method = ['-w', password]
elif module.params["passwordfile"] is not None:
password_method = ['-j', passwordfile]
opendj = BackendProp(module)
validate = opendj.get_property(opendj_bindir=opendj_bindir,
hostname=hostname,
port=port,
username=username,
password_method=password_method,
backend_name=backend_name)
if validate:
if not opendj.validate_data(data=validate, name=name, value=value):
if module.check_mode:
module.exit_json(changed=True)
if opendj.set_property(opendj_bindir=opendj_bindir,
hostname=hostname,
port=port,
username=username,
password_method=password_method,
backend_name=backend_name,
name=name,
value=value):
module.exit_json(changed=True)
else:
module.exit_json(changed=False)
else:
module.exit_json(changed=False)
else:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
| gpl-3.0 |
0359xiaodong/viewfinder | backend/www/test/new_client_log_url_test.py | 13 | 3479 | # Copyright 2012 Viewfinder Inc. All Rights Reserved.
# -*- coding: utf-8 -*-
__author__ = 'spencer@emailscrubbed.com (Spencer Kimball)'
import datetime
import json
import time
from viewfinder.backend.base import util
from viewfinder.backend.base.testing import async_test
from viewfinder.backend.db.client_log import ClientLog, CLIENT_LOG_CONTENT_TYPE
from viewfinder.backend.www import json_schema
from viewfinder.backend.www.test import service_base_test
class NewClientLogUrlTestCase(service_base_test.ServiceBaseTestCase):
def setUp(self):
super(NewClientLogUrlTestCase, self).setUp()
self._validate = False
def testNewClientLogUrl(self):
"""Verify the put url can be fetched via /service/get_client_log."""
timestamp = time.time()
log_timestamp = timestamp - 24 * 60 * 60
response_dict = self._SendRequest('new_client_log_url', self._cookie,
{'headers': {'op_id': 'o1', 'op_timestamp': timestamp},
'timestamp': log_timestamp,
'client_log_id': 'log1'})
exp_put_url = ClientLog.GetPutUrl(
self._user.user_id, self._device_ids[0], log_timestamp, 'log1')
self.assertEqual(exp_put_url, response_dict['client_log_put_url'])
def testContentType(self):
"""Verify that content type can be set explicitly and used."""
request_dict = {'headers': {'op_id': 'o1', 'op_timestamp': time.time()},
'timestamp': time.time(),
'client_log_id': 'log_content_type',
'content_type': 'text/plain'}
self._GetNewLogUrlAndVerify(request_dict, 'test log file',
content_type='test/plain',
content_md5=None)
def testDefaultContentType(self):
"""Verify default content type."""
request_dict = {'headers': {'op_id': 'o1', 'op_timestamp': time.time()},
'timestamp': time.time(),
'client_log_id': 'default_content_type'}
self._GetNewLogUrlAndVerify(request_dict, 'test log file',
content_type=CLIENT_LOG_CONTENT_TYPE,
content_md5=None)
def testMD5ClientLog(self):
"""Verify MD5 validation for client logs."""
log_body = 'test log file'
content_md5 = util.ComputeMD5Hex(log_body)
request_dict = {'headers': {'op_id': 'o1', 'op_timestamp': time.time()},
'timestamp': time.time(),
'client_log_id': 'log1',
'content_md5': content_md5}
self._GetNewLogUrlAndVerify(request_dict, log_body,
content_type=CLIENT_LOG_CONTENT_TYPE,
content_md5=content_md5)
def _GetNewLogUrlAndVerify(self, request_dict, log_body, content_type, content_md5):
"""Get a new client log url based on "request_dict" and verify
the URL can be PUT using the specified content-type and md5.
"""
response_dict = self._SendRequest('new_client_log_url', self._cookie, request_dict)
url = response_dict['client_log_put_url']
headers = {'Content-Type': content_type}
if content_md5 is not None:
headers['Content-MD5'] = content_md5
response = self._RunAsync(self._tester.http_client.fetch, url, method='PUT',
body=log_body, follow_redirects=False, headers=headers)
self.assertEqual(200, response.code)
| apache-2.0 |
Nepherhotep/django | tests/template_tests/syntax_tests/test_firstof.py | 199 | 3728 | from django.template import TemplateSyntaxError
from django.test import SimpleTestCase, ignore_warnings
from django.utils.deprecation import RemovedInDjango110Warning
from ..utils import setup
class FirstOfTagTests(SimpleTestCase):
libraries = {'future': 'django.templatetags.future'}
@setup({'firstof01': '{% firstof a b c %}'})
def test_firstof01(self):
output = self.engine.render_to_string('firstof01', {'a': 0, 'c': 0, 'b': 0})
self.assertEqual(output, '')
@setup({'firstof02': '{% firstof a b c %}'})
def test_firstof02(self):
output = self.engine.render_to_string('firstof02', {'a': 1, 'c': 0, 'b': 0})
self.assertEqual(output, '1')
@setup({'firstof03': '{% firstof a b c %}'})
def test_firstof03(self):
output = self.engine.render_to_string('firstof03', {'a': 0, 'c': 0, 'b': 2})
self.assertEqual(output, '2')
@setup({'firstof04': '{% firstof a b c %}'})
def test_firstof04(self):
output = self.engine.render_to_string('firstof04', {'a': 0, 'c': 3, 'b': 0})
self.assertEqual(output, '3')
@setup({'firstof05': '{% firstof a b c %}'})
def test_firstof05(self):
output = self.engine.render_to_string('firstof05', {'a': 1, 'c': 3, 'b': 2})
self.assertEqual(output, '1')
@setup({'firstof06': '{% firstof a b c %}'})
def test_firstof06(self):
output = self.engine.render_to_string('firstof06', {'c': 3, 'b': 0})
self.assertEqual(output, '3')
@setup({'firstof07': '{% firstof a b "c" %}'})
def test_firstof07(self):
output = self.engine.render_to_string('firstof07', {'a': 0})
self.assertEqual(output, 'c')
@setup({'firstof08': '{% firstof a b "c and d" %}'})
def test_firstof08(self):
output = self.engine.render_to_string('firstof08', {'a': 0, 'b': 0})
self.assertEqual(output, 'c and d')
@setup({'firstof09': '{% firstof %}'})
def test_firstof09(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('firstof09')
@setup({'firstof10': '{% firstof a %}'})
def test_firstof10(self):
output = self.engine.render_to_string('firstof10', {'a': '<'})
self.assertEqual(output, '<')
@ignore_warnings(category=RemovedInDjango110Warning)
@setup({'firstof11': '{% load firstof from future %}{% firstof a b %}'})
def test_firstof11(self):
output = self.engine.render_to_string('firstof11', {'a': '<', 'b': '>'})
self.assertEqual(output, '<')
@ignore_warnings(category=RemovedInDjango110Warning)
@setup({'firstof12': '{% load firstof from future %}{% firstof a b %}'})
def test_firstof12(self):
output = self.engine.render_to_string('firstof12', {'a': '', 'b': '>'})
self.assertEqual(output, '>')
@ignore_warnings(category=RemovedInDjango110Warning)
@setup({'firstof13': '{% load firstof from future %}'
'{% autoescape off %}{% firstof a %}{% endautoescape %}'})
def test_firstof13(self):
output = self.engine.render_to_string('firstof13', {'a': '<'})
self.assertEqual(output, '<')
@ignore_warnings(category=RemovedInDjango110Warning)
@setup({'firstof14': '{% load firstof from future %}{% firstof a|safe b %}'})
def test_firstof14(self):
output = self.engine.render_to_string('firstof14', {'a': '<'})
self.assertEqual(output, '<')
@setup({'firstof15': '{% firstof a b c as myvar %}'})
def test_firstof15(self):
ctx = {'a': 0, 'b': 2, 'c': 3}
output = self.engine.render_to_string('firstof15', ctx)
self.assertEqual(ctx['myvar'], '2')
self.assertEqual(output, '')
| bsd-3-clause |
ludoo/wpkit | attic/wpfrontman/wp_frontman/wpf_cached_template_loader.py | 1 | 2690 | """
Wrapper class that takes a list of template loaders as an argument and attempts
to load templates from them in order, caching the result.
Basically identical to the django default caching loader, but takes
into account the blog id.
"""
from django.core.exceptions import ImproperlyConfigured
from django.template import TemplateDoesNotExist
from django.template.loader import BaseLoader, get_template_from_string, find_template_loader, make_origin
from django.utils.hashcompat import sha_constructor
from django.utils.importlib import import_module
from wp_frontman.blog import Blog
class WPFLoader(BaseLoader):
is_usable = True
def __init__(self, loaders):
self.template_cache = {}
self._loaders = loaders
self._cached_loaders = []
@property
def loaders(self):
# Resolve loaders on demand to avoid circular imports
if not self._cached_loaders:
for loader in self._loaders:
self._cached_loaders.append(find_template_loader(loader))
return self._cached_loaders
def find_template(self, name, dirs=None):
for loader in self.loaders:
try:
template, display_name = loader(name, dirs)
return (template, make_origin(display_name, loader, name, dirs))
except TemplateDoesNotExist:
pass
raise TemplateDoesNotExist(name)
def load_template(self, template_name, template_dirs=None):
blog_id = Blog.get_active().blog_id
key = '%s-%s' % (blog_id, template_name)
if template_dirs:
# If template directories were specified, use a hash to differentiate
key = '-'.join([blog_id, template_name, sha_constructor('|'.join(template_dirs)).hexdigest()])
if key not in self.template_cache:
template, origin = self.find_template(template_name, template_dirs)
if not hasattr(template, 'render'):
try:
template = get_template_from_string(template, origin, template_name)
except TemplateDoesNotExist:
# If compiling the template we found raises TemplateDoesNotExist,
# back off to returning the source and display name for the template
# we were asked to load. This allows for correct identification (later)
# of the actual template that does not exist.
return template, origin
self.template_cache[key] = template
return self.template_cache[key], None
def reset(self):
"Empty the template cache."
self.template_cache.clear()
| bsd-3-clause |
liuquansheng47/ansible | lib/ansible/plugins/lookup/redis_kv.py | 251 | 2433 | # (c) 2012, Jan-Piet Mens <jpmens(at)gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
HAVE_REDIS=False
try:
import redis # https://github.com/andymccurdy/redis-py/
HAVE_REDIS=True
except ImportError:
pass
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
# ==============================================================
# REDISGET: Obtain value from a GET on a Redis key. Terms
# expected: 0 = URL, 1 = Key
# URL may be empty, in which case redis://localhost:6379 assumed
# --------------------------------------------------------------
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
if not HAVE_REDIS:
raise AnsibleError("Can't LOOKUP(redis_kv): module redis is not installed")
ret = []
for term in terms:
(url,key) = term.split(',')
if url == "":
url = 'redis://localhost:6379'
# urlsplit on Python 2.6.1 is broken. Hmm. Probably also the reason
# Redis' from_url() doesn't work here.
p = '(?P<scheme>[^:]+)://?(?P<host>[^:/ ]+).?(?P<port>[0-9]*).*'
try:
m = re.search(p, url)
host = m.group('host')
port = int(m.group('port'))
except AttributeError:
raise AnsibleError("Bad URI in redis lookup")
try:
conn = redis.Redis(host=host, port=port)
res = conn.get(key)
if res is None:
res = ""
ret.append(res)
except:
ret.append("") # connection failed or key not found
return ret
| gpl-3.0 |
stefanw/froide | froide/upload/migrations/0001_initial.py | 1 | 1428 | # Generated by Django 2.1.9 on 2019-06-24 13:21
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_fsm
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Upload',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('guid', models.UUIDField(default=uuid.uuid4, unique=True, verbose_name='GUID')),
('state', django_fsm.FSMField(default='initial', max_length=50)),
('upload_offset', models.BigIntegerField(default=0)),
('upload_length', models.BigIntegerField(default=-1)),
('upload_metadata', models.TextField(blank=True)),
('filename', models.CharField(blank=True, max_length=255)),
('temporary_file_path', models.CharField(max_length=4096, null=True)),
('expires', models.DateTimeField(blank=True, null=True)),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
]
| mit |
havard024/prego | crm/lib/python2.7/site-packages/django/contrib/gis/db/backends/mysql/introspection.py | 624 | 1426 | from MySQLdb.constants import FIELD_TYPE
from django.contrib.gis.gdal import OGRGeomType
from django.db.backends.mysql.introspection import DatabaseIntrospection
class MySQLIntrospection(DatabaseIntrospection):
# Updating the data_types_reverse dictionary with the appropriate
# type for Geometry fields.
data_types_reverse = DatabaseIntrospection.data_types_reverse.copy()
data_types_reverse[FIELD_TYPE.GEOMETRY] = 'GeometryField'
def get_geometry_type(self, table_name, geo_col):
cursor = self.connection.cursor()
try:
# In order to get the specific geometry type of the field,
# we introspect on the table definition using `DESCRIBE`.
cursor.execute('DESCRIBE %s' %
self.connection.ops.quote_name(table_name))
# Increment over description info until we get to the geometry
# column.
for column, typ, null, key, default, extra in cursor.fetchall():
if column == geo_col:
# Using OGRGeomType to convert from OGC name to Django field.
# MySQL does not support 3D or SRIDs, so the field params
# are empty.
field_type = OGRGeomType(typ).django
field_params = {}
break
finally:
cursor.close()
return field_type, field_params
| mit |
Probely/CTF-Challenges | WebHacking/100-Rolodex/tests/t_app.py | 1 | 9779 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016, Bright Pixel
#
# For these tests to work, remember to start the service
# from a clean slate. Otherwise, if a test fails, it may
# leave stuff around and mess up the next testing round.
#
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
import unittest
import requests
import json
import os
TEST_USERNAME = "team0"
TEST_PASSWORD = "abcd1234-1"
# In this challenge, setting the participant's position attribute
# to "admin" (case-insensitive) gives it privileged access...
PRIVILEGED_POSITION = "admin"
# Valid employee UID...
EMPLOYEE_UID = 1001
class TestApp(unittest.TestCase):
def setUp(self):
r = requests.get("http://127.0.0.1:30878/token", auth=(TEST_USERNAME, TEST_PASSWORD))
response = r.json()
self.token = response["token"]
self.uid = response["uid"]
def test_failed_auth(self):
r = requests.get("http://127.0.0.1:30878/token", auth=(TEST_USERNAME, TEST_PASSWORD + "--bad--"))
self.assertEqual(r.status_code, 401)
response = r.json()
self.assertEqual(response["status"], r.status_code)
self.assertTrue("error" in response)
r = requests.get("http://127.0.0.1:30878/token", auth=(TEST_USERNAME + "--bad--", TEST_PASSWORD))
self.assertEqual(r.status_code, 401)
response = r.json()
self.assertEqual(response["status"], r.status_code)
self.assertTrue("error" in response)
def test_empty_auth(self):
r = requests.get("http://127.0.0.1:30878/token", auth=(TEST_USERNAME, ""))
self.assertEqual(r.status_code, 401)
response = r.json()
self.assertEqual(response["status"], r.status_code)
self.assertTrue("error" in response)
r = requests.get("http://127.0.0.1:30878/token", auth=("", TEST_PASSWORD))
self.assertEqual(r.status_code, 401)
response = r.json()
self.assertEqual(response["status"], r.status_code)
self.assertTrue("error" in response)
r = requests.get("http://127.0.0.1:30878/token", auth=("", ""))
self.assertEqual(r.status_code, 401)
response = r.json()
self.assertEqual(response["status"], r.status_code)
self.assertTrue("error" in response)
def test_invalid_auth_header(self):
for auth in (b"", b"--garbage--", b"--áççêntèd--"):
r = requests.get(b"http://127.0.0.1:30878/token", headers={b"Authorization": auth})
self.assertEqual(r.status_code, 401)
response = r.json()
self.assertEqual(response["status"], r.status_code)
self.assertTrue("error" in response)
def test_successful_auth(self):
r = requests.get("http://127.0.0.1:30878/token", auth=(TEST_USERNAME, TEST_PASSWORD))
self.assertEqual(r.status_code, 200)
response = r.json()
self.assertEqual(response["status"], r.status_code)
self.assertFalse("error" in response)
self.assertTrue("uid" in response)
self.assertTrue("token" in response)
self.assertEqual(len(response["token"]), 64)
def test_missing_token(self):
r = requests.get("http://127.0.0.1:30878/users/1")
self.assertEqual(r.status_code, 403)
response = r.json()
self.assertEqual(response["status"], r.status_code)
self.assertTrue("error" in response)
def test_failed_token_header(self):
for token in (b"", b"--garbage--", b"--áççêntèd--"):
r = requests.get(b"http://127.0.0.1:30878/users/1",
headers={b"X-API-Token": token})
self.assertEqual(r.status_code, 403)
response = r.json()
self.assertEqual(response["status"], r.status_code)
self.assertTrue("error" in response)
def test_successful_token_header(self):
r = requests.get(b"http://127.0.0.1:30878/users/%d" % self.uid,
headers={b"X-API-Token": self.token})
self.assertEqual(r.status_code, 200)
response = r.json()
self.assertEqual(response["status"], r.status_code)
self.assertFalse("error" in response)
def test_failed_token_qs(self):
for token in (b"", b"--garbage--", b"--áççêntèd--"):
r = requests.get(b"http://127.0.0.1:30878/users/1?token=%s" % token)
self.assertEqual(r.status_code, 403)
response = r.json()
self.assertEqual(response["status"], r.status_code)
self.assertTrue("error" in response)
def test_successful_token_qs(self):
r = requests.get(b"http://127.0.0.1:30878/users/%s?token=%s" % (self.uid, self.token))
self.assertEqual(r.status_code, 200)
response = r.json()
self.assertEqual(response["status"], r.status_code)
self.assertFalse("error" in response)
def test_set_participant_attributes(self):
random_name = os.urandom(10).encode("hex")
r = requests.put(b"http://127.0.0.1:30878/users/%d" % self.uid,
headers={b"X-API-Token": self.token},
json={"name": random_name})
self.assertTrue(r.status_code in (200, 304))
r = requests.get(b"http://127.0.0.1:30878/users/%s?token=%s" % (self.uid, self.token))
self.assertEqual(r.status_code, 200)
response = r.json()
self.assertEqual(response["user"]["name"], random_name)
def test_set_unknown_participant_attributes(self):
r = requests.put(b"http://127.0.0.1:30878/users/%d" % self.uid,
headers={b"X-API-Token": self.token},
json={"whatisthisattribute": "Testing Set"})
self.assertEqual(r.status_code, 400)
def test_set_employee_attributes(self):
r = requests.put(b"http://127.0.0.1:30878/users/%d" % EMPLOYEE_UID,
headers={b"X-API-Token": self.token},
json={"name": "Testing Set"})
self.assertEqual(r.status_code, 403) # ...cannot change employees.
def test_get_all_users_unprivileged(self):
r = requests.put(b"http://127.0.0.1:30878/users/%d" % self.uid,
headers={b"X-API-Token": self.token},
json={"position": "unprivileged"})
self.assertTrue(r.status_code in (200, 304))
r = requests.get(b"http://127.0.0.1:30878/users",
headers={b"X-API-Token": self.token})
self.assertEqual(r.status_code, 200)
response = r.json()
self.assertEqual(response["status"], r.status_code)
self.assertFalse("error" in response)
self.assertTrue("users" in response)
self.assertTrue(isinstance(response["users"], list))
self.assertTrue(len(response["users"]) > 0)
for user in response["users"]:
self.assertTrue("uid" in user)
if user["uid"] != self.uid:
self.assertFalse("notes" in user) # ...privileged attribute.
def test_get_user_by_uid_unprivileged(self):
r = requests.put(b"http://127.0.0.1:30878/users/%d" % self.uid,
headers={b"X-API-Token": self.token},
json={"position": "unprivileged"})
self.assertTrue(r.status_code in (200, 304))
r = requests.get(b"http://127.0.0.1:30878/users/%d" % EMPLOYEE_UID,
headers={b"X-API-Token": self.token})
self.assertEqual(r.status_code, 200)
response = r.json()
self.assertEqual(response["status"], r.status_code)
self.assertFalse("error" in response)
self.assertTrue("user" in response)
self.assertEqual(response["user"]["uid"], EMPLOYEE_UID)
self.assertTrue("notes" not in response["user"])
def test_get_all_users_privileged(self):
r = requests.put(b"http://127.0.0.1:30878/users/%d" % self.uid,
headers={b"X-API-Token": self.token},
json={"position": PRIVILEGED_POSITION})
self.assertTrue(r.status_code in (200, 304))
r = requests.get(b"http://127.0.0.1:30878/users",
headers={b"X-API-Token": self.token})
self.assertEqual(r.status_code, 200)
response = r.json()
self.assertEqual(response["status"], r.status_code)
self.assertFalse("error" in response)
self.assertTrue("users" in response)
self.assertTrue(isinstance(response["users"], list))
self.assertTrue(len(response["users"]) > 0)
for user in response["users"]:
self.assertTrue("uid" in user)
if user["uid"] != self.uid:
self.assertTrue("notes" in user) # ...privileged attribute.
def test_get_user_by_uid_privileged(self):
r = requests.put(b"http://127.0.0.1:30878/users/%d" % self.uid,
headers={b"X-API-Token": self.token},
json={"position": PRIVILEGED_POSITION})
self.assertTrue(r.status_code in (200, 304))
r = requests.get(b"http://127.0.0.1:30878/users/%d" % EMPLOYEE_UID,
headers={b"X-API-Token": self.token})
self.assertEqual(r.status_code, 200)
response = r.json()
self.assertEqual(response["status"], r.status_code)
self.assertFalse("error" in response)
self.assertTrue("user" in response)
self.assertEqual(response["user"]["uid"], EMPLOYEE_UID)
self.assertTrue("notes" in response["user"])
if __name__ == "__main__":
unittest.main()
# vim: set expandtab ts=4 sw=4:
| apache-2.0 |
rukku/inasafe | safe/impact_functions/inundation/flood_OSM_building_impact.py | 1 | 15830 | from safe.impact_functions.core import (FunctionProvider,
get_hazard_layer,
get_exposure_layer,
get_question)
from safe.storage.vector import Vector
from safe.storage.utilities import DEFAULT_ATTRIBUTE
from safe.common.utilities import (ugettext as tr,
format_int,
verify)
from safe.common.tables import Table, TableRow
from safe.engine.interpolation import assign_hazard_values_to_exposure_data
from third_party.odict import OrderedDict
import logging
LOGGER = logging.getLogger('InaSAFE')
class FloodBuildingImpactFunction(FunctionProvider):
"""Inundation impact on building data
:author Ole Nielsen, Kristy van Putten
# this rating below is only for testing a function, not the real one
:rating 0
:param requires category=='hazard' and \
subcategory in ['flood', 'tsunami']
:param requires category=='exposure' and \
subcategory=='structure' and \
layertype=='vector'
"""
# Function documentation
target_field = 'INUNDATED'
title = tr('Be flooded')
synopsis = tr('To assess the impacts of (flood or tsunami) inundation '
'on building footprints originating from OpenStreetMap '
'(OSM).')
actions = tr('Provide details about where critical infrastructure might '
'be flooded')
# citations must be a list
# This is just an example, fiction example actually
#citations = [tr('Hutchings, Field & Parks. Assessment of Flood impacts '
# 'on buildings. Impact. Vol 66(2). 2012')]
detailed_description = \
tr('The inundation status is calculated for each '
'building (using the centroid if it is a polygon) based on the '
'hazard levels provided. if the hazard is given as a raster a '
'threshold of 1 meter is used. This is configurable through the '
'InaSAFE interface. If the hazard is given as a vector polygon '
'layer buildings are considered to be impacted depending on the '
'value of hazard attributes (in order) "affected" or "FLOODPRONE":'
' If a building is in a region that has attribute "affected" set '
'to True (or 1) it is impacted. If attribute "affected" does not '
'exist but "FLOODPRONE" does, then the building is considered '
'impacted if "FLOODPRONE" is '
'"yes". If neither "affected" nor "FLOODPRONE" is available, a '
'building will be impacted if it belongs to any polygon. The latter'
' behaviour is implemented through the attribute "inapolygon" which'
' is automatically assigned.')
hazard_input = \
tr('A hazard raster layer where each cell '
'represents flood depth (in meters), or a vector polygon layer '
'where each polygon represents an inundated area. In the latter '
'case, the following attributes are recognised '
'(in order): "affected" (True or False) or "FLOODPRONE" '
'(Yes or No). (True may be represented as 1, False as 0')
exposure_input = \
tr('Vector polygon layer extracted from OSM '
'where each polygon represents the footprint of a building.')
output = tr('Vector layer contains building is estimated to be flooded '
'and the breakdown of the building by type.')
limitation = tr('This function only flags buildings as impacted or not '
'either based on a fixed threshold in case of raster '
'hazard or the the attributes mentioned under input '
'in case of vector hazard.')
# parameters = {'postprocessors': {'BuildingType': {'on': True}}}
parameters = OrderedDict([
('threshold [m]', 1.0),
('postprocessors', OrderedDict([('BuildingType', {'on': True})]))])
def run(self, layers):
"""Flood impact to buildings (e.g. from Open Street Map)
"""
threshold = self.parameters['threshold [m]'] # Flood threshold [m]
verify(isinstance(threshold, float),
'Expected thresholds to be a float. Got %s' % str(threshold))
# Extract data
H = get_hazard_layer(layers) # Depth
E = get_exposure_layer(layers) # Building locations
question = get_question(H.get_name(),
E.get_name(),
self)
# Determine attribute name for hazard levels
if H.is_raster:
mode = 'grid'
hazard_attribute = 'depth'
else:
mode = 'regions'
hazard_attribute = None
# Interpolate hazard level to building locations
I = assign_hazard_values_to_exposure_data(
H, E, attribute_name=hazard_attribute)
# Extract relevant exposure data
attribute_names = I.get_attribute_names()
attributes = I.get_data()
N = len(I)
# Calculate building impact
count = 0
buildings = {}
affected_buildings = {}
for i in range(N):
if mode == 'grid':
# Get the interpolated depth
x = float(attributes[i]['depth'])
x = x >= threshold
elif mode == 'regions':
# Use interpolated polygon attribute
atts = attributes[i]
# FIXME (Ole): Need to agree whether to use one or the
# other as this can be very confusing!
# For now look for 'affected' first
if 'affected' in atts:
# E.g. from flood forecast
# Assume that building is wet if inside polygon
# as flagged by attribute Flooded
res = atts['affected']
if res is None:
x = False
else:
x = bool(res)
#if x:
# print 'Got affected', x
elif 'FLOODPRONE' in atts:
res = atts['FLOODPRONE']
if res is None:
x = False
else:
x = res.lower() == 'yes'
elif DEFAULT_ATTRIBUTE in atts:
# Check the default attribute assigned for points
# covered by a polygon
res = atts[DEFAULT_ATTRIBUTE]
if res is None:
x = False
else:
x = res
else:
# there is no flood related attribute
msg = ('No flood related attribute found in %s. '
'I was looking for either "affected", "FLOODPRONE" '
'or "inapolygon". The latter should have been '
'automatically set by call to '
'assign_hazard_values_to_exposure_data(). '
'Sorry I can\'t help more.')
raise Exception(msg)
else:
msg = (tr('Unknown hazard type %s. '
'Must be either "depth" or "grid"')
% mode)
raise Exception(msg)
# Count affected buildings by usage type if available
if 'type' in attribute_names:
usage = attributes[i]['type']
else:
usage = None
if 'amenity' in attribute_names and (usage is None or usage == 0):
usage = attributes[i]['amenity']
if 'building_t' in attribute_names and (usage is None
or usage == 0):
usage = attributes[i]['building_t']
if 'office' in attribute_names and (usage is None or usage == 0):
usage = attributes[i]['office']
if 'tourism' in attribute_names and (usage is None or usage == 0):
usage = attributes[i]['tourism']
if 'leisure' in attribute_names and (usage is None or usage == 0):
usage = attributes[i]['leisure']
if 'building' in attribute_names and (usage is None or usage == 0):
usage = attributes[i]['building']
if usage == 'yes':
usage = 'building'
#LOGGER.debug('usage ')
if usage is not None and usage != 0:
key = usage
else:
key = 'unknown'
if key not in buildings:
buildings[key] = 0
affected_buildings[key] = 0
# Count all buildings by type
buildings[key] += 1
if x is True:
# Count affected buildings by type
affected_buildings[key] += 1
# Count total affected buildings
count += 1
# Add calculated impact to existing attributes
attributes[i][self.target_field] = x
# Lump small entries and 'unknown' into 'other' category
for usage in buildings.keys():
x = buildings[usage]
if x < 25 or usage == 'unknown':
if 'other' not in buildings:
buildings['other'] = 0
affected_buildings['other'] = 0
buildings['other'] += x
affected_buildings['other'] += affected_buildings[usage]
del buildings[usage]
del affected_buildings[usage]
# Generate csv file of results
## fid = open('C:\dki_table_%s.csv' % H.get_name(), 'wb')
## fid.write('%s, %s, %s\n' % (tr('Building type'),
## tr('Temporarily closed'),
## tr('Total')))
## fid.write('%s, %i, %i\n' % (tr('All'), count, N))
# Generate simple impact report
table_body = [question,
TableRow([tr('Building type'),
tr('Number flooded'),
tr('Total')],
header=True),
TableRow([tr('All'), format_int(count), format_int(N)])]
## fid.write('%s, %s, %s\n' % (tr('Building type'),
## tr('Temporarily closed'),
## tr('Total')))
school_closed = 0
hospital_closed = 0
# Generate break down by building usage type is available
list_type_attribute = ['type',
'amenity',
'building_t',
'office',
'tourism',
'leisure',
'building']
intersect_type = set(attribute_names) & set(list_type_attribute)
if len(intersect_type) > 0:
# Make list of building types
building_list = []
for usage in buildings:
building_type = usage.replace('_', ' ')
# Lookup internationalised value if available
building_type = tr(building_type)
#==============================================================
# print ('WARNING: %s could not be translated'
# % building_type)
#==============================================================
# FIXME (Sunni) : I change affected_buildings[usage] to string
# because it will be replace with   in html
building_list.append([building_type.capitalize(),
format_int(affected_buildings[usage]),
format_int(buildings[usage])])
if building_type == 'school':
school_closed = affected_buildings[usage]
if building_type == 'hospital':
hospital_closed = affected_buildings[usage]
## fid.write('%s, %i, %i\n' % (building_type.capitalize(),
## affected_buildings[usage],
## buildings[usage]))
# Sort alphabetically
building_list.sort()
#table_body.append(TableRow([tr('Building type'),
# tr('Temporarily closed'),
# tr('Total')], header=True))
table_body.append(TableRow(tr('Breakdown by building type'),
header=True))
for row in building_list:
s = TableRow(row)
table_body.append(s)
## fid.close()
table_body.append(TableRow(tr('Action Checklist:'), header=True))
table_body.append(TableRow(tr('Are the critical facilities still '
'open?')))
table_body.append(TableRow(tr('Which structures have warning capacity '
'(eg. sirens, speakers, etc.)?')))
table_body.append(TableRow(tr('Which buildings will be evacuation '
'centres?')))
table_body.append(TableRow(tr('Where will we locate the operations '
'centre?')))
table_body.append(TableRow(tr('Where will we locate warehouse and/or '
'distribution centres?')))
if school_closed > 0:
table_body.append(TableRow(tr('Where will the students from the %s'
' closed schools go to study?') %
format_int(school_closed)))
if hospital_closed > 0:
table_body.append(TableRow(tr('Where will the patients from the %s'
' closed hospitals go for treatment '
'and how will we transport them?') %
format_int(hospital_closed)))
table_body.append(TableRow(tr('Notes'), header=True))
assumption = tr('Buildings are said to be flooded when ')
if mode == 'grid':
assumption += tr('flood levels exceed %.1f m') % threshold
else:
assumption += tr('in regions marked as affected')
table_body.append(assumption)
impact_summary = Table(table_body).toNewlineFreeString()
impact_table = impact_summary
map_title = tr('Buildings inundated')
# Create style
style_classes = [dict(label=tr('Not Flooded'), min=0, max=0,
colour='#1EFC7C', transparency=0, size=1),
dict(label=tr('Flooded'), min=1, max=1,
colour='#F31A1C', transparency=0, size=1)]
style_info = dict(target_field=self.target_field,
style_classes=style_classes)
# Create vector layer and return
V = Vector(data=attributes,
projection=I.get_projection(),
geometry=I.get_geometry(),
name=tr('Estimated buildings affected'),
keywords={'impact_summary': impact_summary,
'impact_table': impact_table,
'map_title': map_title,
'target_field': self.target_field},
style_info=style_info)
return V
| gpl-3.0 |
sergecodd/FireFox-OS | B2G/gecko/python/virtualenv/virtualenv.py | 5 | 114569 | #!/usr/bin/env python
"""Create a "virtual" Python installation
"""
# If you change the version here, change it in setup.py
# and docs/conf.py as well.
__version__ = "1.8.4" # following best practices
virtualenv_version = __version__ # legacy, again
import base64
import sys
import os
import codecs
import optparse
import re
import shutil
import logging
import tempfile
import zlib
import errno
import glob
import distutils.sysconfig
from distutils.util import strtobool
import struct
import subprocess
if sys.version_info < (2, 5):
print('ERROR: %s' % sys.exc_info()[1])
print('ERROR: this script requires Python 2.5 or greater.')
sys.exit(101)
try:
set
except NameError:
from sets import Set as set
try:
basestring
except NameError:
basestring = str
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
join = os.path.join
py_version = 'python%s.%s' % (sys.version_info[0], sys.version_info[1])
is_jython = sys.platform.startswith('java')
is_pypy = hasattr(sys, 'pypy_version_info')
is_win = (sys.platform == 'win32')
is_cygwin = (sys.platform == 'cygwin')
is_darwin = (sys.platform == 'darwin')
abiflags = getattr(sys, 'abiflags', '')
user_dir = os.path.expanduser('~')
if is_win:
default_storage_dir = os.path.join(user_dir, 'virtualenv')
else:
default_storage_dir = os.path.join(user_dir, '.virtualenv')
default_config_file = os.path.join(default_storage_dir, 'virtualenv.ini')
if is_pypy:
expected_exe = 'pypy'
elif is_jython:
expected_exe = 'jython'
else:
expected_exe = 'python'
REQUIRED_MODULES = ['os', 'posix', 'posixpath', 'nt', 'ntpath', 'genericpath',
'fnmatch', 'locale', 'encodings', 'codecs',
'stat', 'UserDict', 'readline', 'copy_reg', 'types',
're', 'sre', 'sre_parse', 'sre_constants', 'sre_compile',
'zlib']
REQUIRED_FILES = ['lib-dynload', 'config']
majver, minver = sys.version_info[:2]
if majver == 2:
if minver >= 6:
REQUIRED_MODULES.extend(['warnings', 'linecache', '_abcoll', 'abc'])
if minver >= 7:
REQUIRED_MODULES.extend(['_weakrefset'])
if minver <= 3:
REQUIRED_MODULES.extend(['sets', '__future__'])
elif majver == 3:
# Some extra modules are needed for Python 3, but different ones
# for different versions.
REQUIRED_MODULES.extend(['_abcoll', 'warnings', 'linecache', 'abc', 'io',
'_weakrefset', 'copyreg', 'tempfile', 'random',
'__future__', 'collections', 'keyword', 'tarfile',
'shutil', 'struct', 'copy', 'tokenize', 'token',
'functools', 'heapq', 'bisect', 'weakref',
'reprlib'])
if minver >= 2:
REQUIRED_FILES[-1] = 'config-%s' % majver
if minver == 3:
import sysconfig
platdir = sysconfig.get_config_var('PLATDIR')
REQUIRED_FILES.append(platdir)
# The whole list of 3.3 modules is reproduced below - the current
# uncommented ones are required for 3.3 as of now, but more may be
# added as 3.3 development continues.
REQUIRED_MODULES.extend([
#"aifc",
#"antigravity",
#"argparse",
#"ast",
#"asynchat",
#"asyncore",
"base64",
#"bdb",
#"binhex",
#"bisect",
#"calendar",
#"cgi",
#"cgitb",
#"chunk",
#"cmd",
#"codeop",
#"code",
#"colorsys",
#"_compat_pickle",
#"compileall",
#"concurrent",
#"configparser",
#"contextlib",
#"cProfile",
#"crypt",
#"csv",
#"ctypes",
#"curses",
#"datetime",
#"dbm",
#"decimal",
#"difflib",
#"dis",
#"doctest",
#"dummy_threading",
"_dummy_thread",
#"email",
#"filecmp",
#"fileinput",
#"formatter",
#"fractions",
#"ftplib",
#"functools",
#"getopt",
#"getpass",
#"gettext",
#"glob",
#"gzip",
"hashlib",
#"heapq",
"hmac",
#"html",
#"http",
#"idlelib",
#"imaplib",
#"imghdr",
"imp",
"importlib",
#"inspect",
#"json",
#"lib2to3",
#"logging",
#"macpath",
#"macurl2path",
#"mailbox",
#"mailcap",
#"_markupbase",
#"mimetypes",
#"modulefinder",
#"multiprocessing",
#"netrc",
#"nntplib",
#"nturl2path",
#"numbers",
#"opcode",
#"optparse",
#"os2emxpath",
#"pdb",
#"pickle",
#"pickletools",
#"pipes",
#"pkgutil",
#"platform",
#"plat-linux2",
#"plistlib",
#"poplib",
#"pprint",
#"profile",
#"pstats",
#"pty",
#"pyclbr",
#"py_compile",
#"pydoc_data",
#"pydoc",
#"_pyio",
#"queue",
#"quopri",
#"reprlib",
"rlcompleter",
#"runpy",
#"sched",
#"shelve",
#"shlex",
#"smtpd",
#"smtplib",
#"sndhdr",
#"socket",
#"socketserver",
#"sqlite3",
#"ssl",
#"stringprep",
#"string",
#"_strptime",
#"subprocess",
#"sunau",
#"symbol",
#"symtable",
#"sysconfig",
#"tabnanny",
#"telnetlib",
#"test",
#"textwrap",
#"this",
#"_threading_local",
#"threading",
#"timeit",
#"tkinter",
#"tokenize",
#"token",
#"traceback",
#"trace",
#"tty",
#"turtledemo",
#"turtle",
#"unittest",
#"urllib",
#"uuid",
#"uu",
#"wave",
#"weakref",
#"webbrowser",
#"wsgiref",
#"xdrlib",
#"xml",
#"xmlrpc",
#"zipfile",
])
if is_pypy:
# these are needed to correctly display the exceptions that may happen
# during the bootstrap
REQUIRED_MODULES.extend(['traceback', 'linecache'])
class Logger(object):
"""
Logging object for use in command-line script. Allows ranges of
levels, to avoid some redundancy of displayed information.
"""
DEBUG = logging.DEBUG
INFO = logging.INFO
NOTIFY = (logging.INFO+logging.WARN)/2
WARN = WARNING = logging.WARN
ERROR = logging.ERROR
FATAL = logging.FATAL
LEVELS = [DEBUG, INFO, NOTIFY, WARN, ERROR, FATAL]
def __init__(self, consumers):
self.consumers = consumers
self.indent = 0
self.in_progress = None
self.in_progress_hanging = False
def debug(self, msg, *args, **kw):
self.log(self.DEBUG, msg, *args, **kw)
def info(self, msg, *args, **kw):
self.log(self.INFO, msg, *args, **kw)
def notify(self, msg, *args, **kw):
self.log(self.NOTIFY, msg, *args, **kw)
def warn(self, msg, *args, **kw):
self.log(self.WARN, msg, *args, **kw)
def error(self, msg, *args, **kw):
self.log(self.ERROR, msg, *args, **kw)
def fatal(self, msg, *args, **kw):
self.log(self.FATAL, msg, *args, **kw)
def log(self, level, msg, *args, **kw):
if args:
if kw:
raise TypeError(
"You may give positional or keyword arguments, not both")
args = args or kw
rendered = None
for consumer_level, consumer in self.consumers:
if self.level_matches(level, consumer_level):
if (self.in_progress_hanging
and consumer in (sys.stdout, sys.stderr)):
self.in_progress_hanging = False
sys.stdout.write('\n')
sys.stdout.flush()
if rendered is None:
if args:
rendered = msg % args
else:
rendered = msg
rendered = ' '*self.indent + rendered
if hasattr(consumer, 'write'):
consumer.write(rendered+'\n')
else:
consumer(rendered)
def start_progress(self, msg):
assert not self.in_progress, (
"Tried to start_progress(%r) while in_progress %r"
% (msg, self.in_progress))
if self.level_matches(self.NOTIFY, self._stdout_level()):
sys.stdout.write(msg)
sys.stdout.flush()
self.in_progress_hanging = True
else:
self.in_progress_hanging = False
self.in_progress = msg
def end_progress(self, msg='done.'):
assert self.in_progress, (
"Tried to end_progress without start_progress")
if self.stdout_level_matches(self.NOTIFY):
if not self.in_progress_hanging:
# Some message has been printed out since start_progress
sys.stdout.write('...' + self.in_progress + msg + '\n')
sys.stdout.flush()
else:
sys.stdout.write(msg + '\n')
sys.stdout.flush()
self.in_progress = None
self.in_progress_hanging = False
def show_progress(self):
"""If we are in a progress scope, and no log messages have been
shown, write out another '.'"""
if self.in_progress_hanging:
sys.stdout.write('.')
sys.stdout.flush()
def stdout_level_matches(self, level):
"""Returns true if a message at this level will go to stdout"""
return self.level_matches(level, self._stdout_level())
def _stdout_level(self):
"""Returns the level that stdout runs at"""
for level, consumer in self.consumers:
if consumer is sys.stdout:
return level
return self.FATAL
def level_matches(self, level, consumer_level):
"""
>>> l = Logger([])
>>> l.level_matches(3, 4)
False
>>> l.level_matches(3, 2)
True
>>> l.level_matches(slice(None, 3), 3)
False
>>> l.level_matches(slice(None, 3), 2)
True
>>> l.level_matches(slice(1, 3), 1)
True
>>> l.level_matches(slice(2, 3), 1)
False
"""
if isinstance(level, slice):
start, stop = level.start, level.stop
if start is not None and start > consumer_level:
return False
if stop is not None and stop <= consumer_level:
return False
return True
else:
return level >= consumer_level
#@classmethod
def level_for_integer(cls, level):
levels = cls.LEVELS
if level < 0:
return levels[0]
if level >= len(levels):
return levels[-1]
return levels[level]
level_for_integer = classmethod(level_for_integer)
# create a silent logger just to prevent this from being undefined
# will be overridden with requested verbosity main() is called.
logger = Logger([(Logger.LEVELS[-1], sys.stdout)])
def mkdir(path):
if not os.path.exists(path):
logger.info('Creating %s', path)
os.makedirs(path)
else:
logger.info('Directory %s already exists', path)
def copyfileordir(src, dest):
if os.path.isdir(src):
shutil.copytree(src, dest, True)
else:
shutil.copy2(src, dest)
def copyfile(src, dest, symlink=True):
if not os.path.exists(src):
# Some bad symlink in the src
logger.warn('Cannot find file %s (bad symlink)', src)
return
if os.path.exists(dest):
logger.debug('File %s already exists', dest)
return
if not os.path.exists(os.path.dirname(dest)):
logger.info('Creating parent directories for %s' % os.path.dirname(dest))
os.makedirs(os.path.dirname(dest))
if not os.path.islink(src):
srcpath = os.path.abspath(src)
else:
srcpath = os.readlink(src)
if symlink and hasattr(os, 'symlink') and not is_win:
logger.info('Symlinking %s', dest)
try:
os.symlink(srcpath, dest)
except (OSError, NotImplementedError):
logger.info('Symlinking failed, copying to %s', dest)
copyfileordir(src, dest)
else:
logger.info('Copying to %s', dest)
copyfileordir(src, dest)
def writefile(dest, content, overwrite=True):
if not os.path.exists(dest):
logger.info('Writing %s', dest)
f = open(dest, 'wb')
f.write(content.encode('utf-8'))
f.close()
return
else:
f = open(dest, 'rb')
c = f.read()
f.close()
if c != content.encode("utf-8"):
if not overwrite:
logger.notify('File %s exists with different content; not overwriting', dest)
return
logger.notify('Overwriting %s with new content', dest)
f = open(dest, 'wb')
f.write(content.encode('utf-8'))
f.close()
else:
logger.info('Content %s already in place', dest)
def rmtree(dir):
if os.path.exists(dir):
logger.notify('Deleting tree %s', dir)
shutil.rmtree(dir)
else:
logger.info('Do not need to delete %s; already gone', dir)
def make_exe(fn):
if hasattr(os, 'chmod'):
oldmode = os.stat(fn).st_mode & 0xFFF # 0o7777
newmode = (oldmode | 0x16D) & 0xFFF # 0o555, 0o7777
os.chmod(fn, newmode)
logger.info('Changed mode of %s to %s', fn, oct(newmode))
def _find_file(filename, dirs):
for dir in reversed(dirs):
files = glob.glob(os.path.join(dir, filename))
if files and os.path.isfile(files[0]):
return True, files[0]
return False, filename
def _install_req(py_executable, unzip=False, distribute=False,
search_dirs=None, never_download=False):
if search_dirs is None:
search_dirs = file_search_dirs()
if not distribute:
egg_path = 'setuptools-*-py%s.egg' % sys.version[:3]
found, egg_path = _find_file(egg_path, search_dirs)
project_name = 'setuptools'
bootstrap_script = EZ_SETUP_PY
tgz_path = None
else:
# Look for a distribute egg (these are not distributed by default,
# but can be made available by the user)
egg_path = 'distribute-*-py%s.egg' % sys.version[:3]
found, egg_path = _find_file(egg_path, search_dirs)
project_name = 'distribute'
if found:
tgz_path = None
bootstrap_script = DISTRIBUTE_FROM_EGG_PY
else:
# Fall back to sdist
# NB: egg_path is not None iff tgz_path is None
# iff bootstrap_script is a generic setup script accepting
# the standard arguments.
egg_path = None
tgz_path = 'distribute-*.tar.gz'
found, tgz_path = _find_file(tgz_path, search_dirs)
bootstrap_script = DISTRIBUTE_SETUP_PY
if is_jython and os._name == 'nt':
# Jython's .bat sys.executable can't handle a command line
# argument with newlines
fd, ez_setup = tempfile.mkstemp('.py')
os.write(fd, bootstrap_script)
os.close(fd)
cmd = [py_executable, ez_setup]
else:
cmd = [py_executable, '-c', bootstrap_script]
if unzip and egg_path:
cmd.append('--always-unzip')
env = {}
remove_from_env = ['__PYVENV_LAUNCHER__']
if logger.stdout_level_matches(logger.DEBUG) and egg_path:
cmd.append('-v')
old_chdir = os.getcwd()
if egg_path is not None and os.path.exists(egg_path):
logger.info('Using existing %s egg: %s' % (project_name, egg_path))
cmd.append(egg_path)
if os.environ.get('PYTHONPATH'):
env['PYTHONPATH'] = egg_path + os.path.pathsep + os.environ['PYTHONPATH']
else:
env['PYTHONPATH'] = egg_path
elif tgz_path is not None and os.path.exists(tgz_path):
# Found a tgz source dist, let's chdir
logger.info('Using existing %s egg: %s' % (project_name, tgz_path))
os.chdir(os.path.dirname(tgz_path))
# in this case, we want to be sure that PYTHONPATH is unset (not
# just empty, really unset), else CPython tries to import the
# site.py that it's in virtualenv_support
remove_from_env.append('PYTHONPATH')
elif never_download:
logger.fatal("Can't find any local distributions of %s to install "
"and --never-download is set. Either re-run virtualenv "
"without the --never-download option, or place a %s "
"distribution (%s) in one of these "
"locations: %r" % (project_name, project_name,
egg_path or tgz_path,
search_dirs))
sys.exit(1)
elif egg_path:
logger.info('No %s egg found; downloading' % project_name)
cmd.extend(['--always-copy', '-U', project_name])
else:
logger.info('No %s tgz found; downloading' % project_name)
logger.start_progress('Installing %s...' % project_name)
logger.indent += 2
cwd = None
if project_name == 'distribute':
env['DONT_PATCH_SETUPTOOLS'] = 'true'
def _filter_ez_setup(line):
return filter_ez_setup(line, project_name)
if not os.access(os.getcwd(), os.W_OK):
cwd = tempfile.mkdtemp()
if tgz_path is not None and os.path.exists(tgz_path):
# the current working dir is hostile, let's copy the
# tarball to a temp dir
target = os.path.join(cwd, os.path.split(tgz_path)[-1])
shutil.copy(tgz_path, target)
try:
call_subprocess(cmd, show_stdout=False,
filter_stdout=_filter_ez_setup,
extra_env=env,
remove_from_env=remove_from_env,
cwd=cwd)
finally:
logger.indent -= 2
logger.end_progress()
if cwd is not None:
shutil.rmtree(cwd)
if os.getcwd() != old_chdir:
os.chdir(old_chdir)
if is_jython and os._name == 'nt':
os.remove(ez_setup)
def file_search_dirs():
here = os.path.dirname(os.path.abspath(__file__))
dirs = ['.', here,
join(here, 'virtualenv_support')]
if os.path.splitext(os.path.dirname(__file__))[0] != 'virtualenv':
# Probably some boot script; just in case virtualenv is installed...
try:
import virtualenv
except ImportError:
pass
else:
dirs.append(os.path.join(os.path.dirname(virtualenv.__file__), 'virtualenv_support'))
return [d for d in dirs if os.path.isdir(d)]
def install_setuptools(py_executable, unzip=False,
search_dirs=None, never_download=False):
_install_req(py_executable, unzip,
search_dirs=search_dirs, never_download=never_download)
def install_distribute(py_executable, unzip=False,
search_dirs=None, never_download=False):
_install_req(py_executable, unzip, distribute=True,
search_dirs=search_dirs, never_download=never_download)
_pip_re = re.compile(r'^pip-.*(zip|tar.gz|tar.bz2|tgz|tbz)$', re.I)
def install_pip(py_executable, search_dirs=None, never_download=False):
if search_dirs is None:
search_dirs = file_search_dirs()
filenames = []
for dir in search_dirs:
filenames.extend([join(dir, fn) for fn in os.listdir(dir)
if _pip_re.search(fn)])
filenames = [(os.path.basename(filename).lower(), i, filename) for i, filename in enumerate(filenames)]
filenames.sort()
filenames = [filename for basename, i, filename in filenames]
if not filenames:
filename = 'pip'
else:
filename = filenames[-1]
easy_install_script = 'easy_install'
if is_win:
easy_install_script = 'easy_install-script.py'
# There's two subtle issues here when invoking easy_install.
# 1. On unix-like systems the easy_install script can *only* be executed
# directly if its full filesystem path is no longer than 78 characters.
# 2. A work around to [1] is to use the `python path/to/easy_install foo`
# pattern, but that breaks if the path contains non-ASCII characters, as
# you can't put the file encoding declaration before the shebang line.
# The solution is to use Python's -x flag to skip the first line of the
# script (and any ASCII decoding errors that may have occurred in that line)
cmd = [py_executable, '-x', join(os.path.dirname(py_executable), easy_install_script), filename]
# jython and pypy don't yet support -x
if is_jython or is_pypy:
cmd.remove('-x')
if filename == 'pip':
if never_download:
logger.fatal("Can't find any local distributions of pip to install "
"and --never-download is set. Either re-run virtualenv "
"without the --never-download option, or place a pip "
"source distribution (zip/tar.gz/tar.bz2) in one of these "
"locations: %r" % search_dirs)
sys.exit(1)
logger.info('Installing pip from network...')
else:
logger.info('Installing existing %s distribution: %s' % (
os.path.basename(filename), filename))
logger.start_progress('Installing pip...')
logger.indent += 2
def _filter_setup(line):
return filter_ez_setup(line, 'pip')
try:
call_subprocess(cmd, show_stdout=False,
filter_stdout=_filter_setup)
finally:
logger.indent -= 2
logger.end_progress()
def filter_ez_setup(line, project_name='setuptools'):
if not line.strip():
return Logger.DEBUG
if project_name == 'distribute':
for prefix in ('Extracting', 'Now working', 'Installing', 'Before',
'Scanning', 'Setuptools', 'Egg', 'Already',
'running', 'writing', 'reading', 'installing',
'creating', 'copying', 'byte-compiling', 'removing',
'Processing'):
if line.startswith(prefix):
return Logger.DEBUG
return Logger.DEBUG
for prefix in ['Reading ', 'Best match', 'Processing setuptools',
'Copying setuptools', 'Adding setuptools',
'Installing ', 'Installed ']:
if line.startswith(prefix):
return Logger.DEBUG
return Logger.INFO
class UpdatingDefaultsHelpFormatter(optparse.IndentedHelpFormatter):
"""
Custom help formatter for use in ConfigOptionParser that updates
the defaults before expanding them, allowing them to show up correctly
in the help listing
"""
def expand_default(self, option):
if self.parser is not None:
self.parser.update_defaults(self.parser.defaults)
return optparse.IndentedHelpFormatter.expand_default(self, option)
class ConfigOptionParser(optparse.OptionParser):
"""
Custom option parser which updates its defaults by by checking the
configuration files and environmental variables
"""
def __init__(self, *args, **kwargs):
self.config = ConfigParser.RawConfigParser()
self.files = self.get_config_files()
self.config.read(self.files)
optparse.OptionParser.__init__(self, *args, **kwargs)
def get_config_files(self):
config_file = os.environ.get('VIRTUALENV_CONFIG_FILE', False)
if config_file and os.path.exists(config_file):
return [config_file]
return [default_config_file]
def update_defaults(self, defaults):
"""
Updates the given defaults with values from the config files and
the environ. Does a little special handling for certain types of
options (lists).
"""
# Then go and look for the other sources of configuration:
config = {}
# 1. config files
config.update(dict(self.get_config_section('virtualenv')))
# 2. environmental variables
config.update(dict(self.get_environ_vars()))
# Then set the options with those values
for key, val in config.items():
key = key.replace('_', '-')
if not key.startswith('--'):
key = '--%s' % key # only prefer long opts
option = self.get_option(key)
if option is not None:
# ignore empty values
if not val:
continue
# handle multiline configs
if option.action == 'append':
val = val.split()
else:
option.nargs = 1
if option.action == 'store_false':
val = not strtobool(val)
elif option.action in ('store_true', 'count'):
val = strtobool(val)
try:
val = option.convert_value(key, val)
except optparse.OptionValueError:
e = sys.exc_info()[1]
print("An error occured during configuration: %s" % e)
sys.exit(3)
defaults[option.dest] = val
return defaults
def get_config_section(self, name):
"""
Get a section of a configuration
"""
if self.config.has_section(name):
return self.config.items(name)
return []
def get_environ_vars(self, prefix='VIRTUALENV_'):
"""
Returns a generator with all environmental vars with prefix VIRTUALENV
"""
for key, val in os.environ.items():
if key.startswith(prefix):
yield (key.replace(prefix, '').lower(), val)
def get_default_values(self):
"""
Overridding to make updating the defaults after instantiation of
the option parser possible, update_defaults() does the dirty work.
"""
if not self.process_default_values:
# Old, pre-Optik 1.5 behaviour.
return optparse.Values(self.defaults)
defaults = self.update_defaults(self.defaults.copy()) # ours
for option in self._get_all_options():
default = defaults.get(option.dest)
if isinstance(default, basestring):
opt_str = option.get_opt_string()
defaults[option.dest] = option.check_value(opt_str, default)
return optparse.Values(defaults)
def main():
parser = ConfigOptionParser(
version=virtualenv_version,
usage="%prog [OPTIONS] DEST_DIR",
formatter=UpdatingDefaultsHelpFormatter())
parser.add_option(
'-v', '--verbose',
action='count',
dest='verbose',
default=0,
help="Increase verbosity")
parser.add_option(
'-q', '--quiet',
action='count',
dest='quiet',
default=0,
help='Decrease verbosity')
parser.add_option(
'-p', '--python',
dest='python',
metavar='PYTHON_EXE',
help='The Python interpreter to use, e.g., --python=python2.5 will use the python2.5 '
'interpreter to create the new environment. The default is the interpreter that '
'virtualenv was installed with (%s)' % sys.executable)
parser.add_option(
'--clear',
dest='clear',
action='store_true',
help="Clear out the non-root install and start from scratch")
parser.set_defaults(system_site_packages=False)
parser.add_option(
'--no-site-packages',
dest='system_site_packages',
action='store_false',
help="Don't give access to the global site-packages dir to the "
"virtual environment (default)")
parser.add_option(
'--system-site-packages',
dest='system_site_packages',
action='store_true',
help="Give access to the global site-packages dir to the "
"virtual environment")
parser.add_option(
'--unzip-setuptools',
dest='unzip_setuptools',
action='store_true',
help="Unzip Setuptools or Distribute when installing it")
parser.add_option(
'--relocatable',
dest='relocatable',
action='store_true',
help='Make an EXISTING virtualenv environment relocatable. '
'This fixes up scripts and makes all .pth files relative')
parser.add_option(
'--distribute', '--use-distribute', # the second option is for legacy reasons here. Hi Kenneth!
dest='use_distribute',
action='store_true',
help='Use Distribute instead of Setuptools. Set environ variable '
'VIRTUALENV_DISTRIBUTE to make it the default ')
parser.add_option(
'--setuptools',
dest='use_distribute',
action='store_false',
help='Use Setuptools instead of Distribute. Set environ variable '
'VIRTUALENV_SETUPTOOLS to make it the default ')
# Set this to True to use distribute by default, even in Python 2.
parser.set_defaults(use_distribute=False)
default_search_dirs = file_search_dirs()
parser.add_option(
'--extra-search-dir',
dest="search_dirs",
action="append",
default=default_search_dirs,
help="Directory to look for setuptools/distribute/pip distributions in. "
"You can add any number of additional --extra-search-dir paths.")
parser.add_option(
'--never-download',
dest="never_download",
action="store_true",
help="Never download anything from the network. Instead, virtualenv will fail "
"if local distributions of setuptools/distribute/pip are not present.")
parser.add_option(
'--prompt',
dest='prompt',
help='Provides an alternative prompt prefix for this environment')
if 'extend_parser' in globals():
extend_parser(parser)
options, args = parser.parse_args()
global logger
if 'adjust_options' in globals():
adjust_options(options, args)
verbosity = options.verbose - options.quiet
logger = Logger([(Logger.level_for_integer(2 - verbosity), sys.stdout)])
if options.python and not os.environ.get('VIRTUALENV_INTERPRETER_RUNNING'):
env = os.environ.copy()
interpreter = resolve_interpreter(options.python)
if interpreter == sys.executable:
logger.warn('Already using interpreter %s' % interpreter)
else:
logger.notify('Running virtualenv with interpreter %s' % interpreter)
env['VIRTUALENV_INTERPRETER_RUNNING'] = 'true'
file = __file__
if file.endswith('.pyc'):
file = file[:-1]
popen = subprocess.Popen([interpreter, file] + sys.argv[1:], env=env)
raise SystemExit(popen.wait())
# Force --distribute on Python 3, since setuptools is not available.
if majver > 2:
options.use_distribute = True
if os.environ.get('PYTHONDONTWRITEBYTECODE') and not options.use_distribute:
print(
"The PYTHONDONTWRITEBYTECODE environment variable is "
"not compatible with setuptools. Either use --distribute "
"or unset PYTHONDONTWRITEBYTECODE.")
sys.exit(2)
if not args:
print('You must provide a DEST_DIR')
parser.print_help()
sys.exit(2)
if len(args) > 1:
print('There must be only one argument: DEST_DIR (you gave %s)' % (
' '.join(args)))
parser.print_help()
sys.exit(2)
home_dir = args[0]
if os.environ.get('WORKING_ENV'):
logger.fatal('ERROR: you cannot run virtualenv while in a workingenv')
logger.fatal('Please deactivate your workingenv, then re-run this script')
sys.exit(3)
if 'PYTHONHOME' in os.environ:
logger.warn('PYTHONHOME is set. You *must* activate the virtualenv before using it')
del os.environ['PYTHONHOME']
if options.relocatable:
make_environment_relocatable(home_dir)
return
create_environment(home_dir,
site_packages=options.system_site_packages,
clear=options.clear,
unzip_setuptools=options.unzip_setuptools,
use_distribute=options.use_distribute,
prompt=options.prompt,
search_dirs=options.search_dirs,
never_download=options.never_download)
if 'after_install' in globals():
after_install(options, home_dir)
def call_subprocess(cmd, show_stdout=True,
filter_stdout=None, cwd=None,
raise_on_returncode=True, extra_env=None,
remove_from_env=None):
cmd_parts = []
for part in cmd:
if len(part) > 45:
part = part[:20]+"..."+part[-20:]
if ' ' in part or '\n' in part or '"' in part or "'" in part:
part = '"%s"' % part.replace('"', '\\"')
if hasattr(part, 'decode'):
try:
part = part.decode(sys.getdefaultencoding())
except UnicodeDecodeError:
part = part.decode(sys.getfilesystemencoding())
cmd_parts.append(part)
cmd_desc = ' '.join(cmd_parts)
if show_stdout:
stdout = None
else:
stdout = subprocess.PIPE
logger.debug("Running command %s" % cmd_desc)
if extra_env or remove_from_env:
env = os.environ.copy()
if extra_env:
env.update(extra_env)
if remove_from_env:
for varname in remove_from_env:
env.pop(varname, None)
else:
env = None
try:
proc = subprocess.Popen(
cmd, stderr=subprocess.STDOUT, stdin=None, stdout=stdout,
cwd=cwd, env=env)
except Exception:
e = sys.exc_info()[1]
logger.fatal(
"Error %s while executing command %s" % (e, cmd_desc))
raise
all_output = []
if stdout is not None:
stdout = proc.stdout
encoding = sys.getdefaultencoding()
fs_encoding = sys.getfilesystemencoding()
while 1:
line = stdout.readline()
try:
line = line.decode(encoding)
except UnicodeDecodeError:
line = line.decode(fs_encoding)
if not line:
break
line = line.rstrip()
all_output.append(line)
if filter_stdout:
level = filter_stdout(line)
if isinstance(level, tuple):
level, line = level
logger.log(level, line)
if not logger.stdout_level_matches(level):
logger.show_progress()
else:
logger.info(line)
else:
proc.communicate()
proc.wait()
if proc.returncode:
if raise_on_returncode:
if all_output:
logger.notify('Complete output from command %s:' % cmd_desc)
logger.notify('\n'.join(all_output) + '\n----------------------------------------')
raise OSError(
"Command %s failed with error code %s"
% (cmd_desc, proc.returncode))
else:
logger.warn(
"Command %s had error code %s"
% (cmd_desc, proc.returncode))
def create_environment(home_dir, site_packages=False, clear=False,
unzip_setuptools=False, use_distribute=False,
prompt=None, search_dirs=None, never_download=False):
"""
Creates a new environment in ``home_dir``.
If ``site_packages`` is true, then the global ``site-packages/``
directory will be on the path.
If ``clear`` is true (default False) then the environment will
first be cleared.
"""
home_dir, lib_dir, inc_dir, bin_dir = path_locations(home_dir)
py_executable = os.path.abspath(install_python(
home_dir, lib_dir, inc_dir, bin_dir,
site_packages=site_packages, clear=clear))
install_distutils(home_dir)
if use_distribute:
install_distribute(py_executable, unzip=unzip_setuptools,
search_dirs=search_dirs, never_download=never_download)
else:
install_setuptools(py_executable, unzip=unzip_setuptools,
search_dirs=search_dirs, never_download=never_download)
install_pip(py_executable, search_dirs=search_dirs, never_download=never_download)
install_activate(home_dir, bin_dir, prompt)
def is_executable_file(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
def path_locations(home_dir):
"""Return the path locations for the environment (where libraries are,
where scripts go, etc)"""
# XXX: We'd use distutils.sysconfig.get_python_inc/lib but its
# prefix arg is broken: http://bugs.python.org/issue3386
if is_win:
# Windows has lots of problems with executables with spaces in
# the name; this function will remove them (using the ~1
# format):
mkdir(home_dir)
if ' ' in home_dir:
import ctypes
GetShortPathName = ctypes.windll.kernel32.GetShortPathNameW
size = max(len(home_dir)+1, 256)
buf = ctypes.create_unicode_buffer(size)
try:
u = unicode
except NameError:
u = str
ret = GetShortPathName(u(home_dir), buf, size)
if not ret:
print('Error: the path "%s" has a space in it' % home_dir)
print('We could not determine the short pathname for it.')
print('Exiting.')
sys.exit(3)
home_dir = str(buf.value)
lib_dir = join(home_dir, 'Lib')
inc_dir = join(home_dir, 'Include')
bin_dir = join(home_dir, 'Scripts')
if is_jython:
lib_dir = join(home_dir, 'Lib')
inc_dir = join(home_dir, 'Include')
bin_dir = join(home_dir, 'bin')
elif is_pypy:
lib_dir = home_dir
inc_dir = join(home_dir, 'include')
bin_dir = join(home_dir, 'bin')
elif not is_win:
lib_dir = join(home_dir, 'lib', py_version)
multiarch_exec = '/usr/bin/multiarch-platform'
if is_executable_file(multiarch_exec):
# In Mageia (2) and Mandriva distros the include dir must be like:
# virtualenv/include/multiarch-x86_64-linux/python2.7
# instead of being virtualenv/include/python2.7
p = subprocess.Popen(multiarch_exec, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
# stdout.strip is needed to remove newline character
inc_dir = join(home_dir, 'include', stdout.strip(), py_version + abiflags)
else:
inc_dir = join(home_dir, 'include', py_version + abiflags)
bin_dir = join(home_dir, 'bin')
return home_dir, lib_dir, inc_dir, bin_dir
def change_prefix(filename, dst_prefix):
prefixes = [sys.prefix]
if is_darwin:
prefixes.extend((
os.path.join("/Library/Python", sys.version[:3], "site-packages"),
os.path.join(sys.prefix, "Extras", "lib", "python"),
os.path.join("~", "Library", "Python", sys.version[:3], "site-packages"),
# Python 2.6 no-frameworks
os.path.join("~", ".local", "lib","python", sys.version[:3], "site-packages"),
# System Python 2.7 on OSX Mountain Lion
os.path.join("~", "Library", "Python", sys.version[:3], "lib", "python", "site-packages")))
if hasattr(sys, 'real_prefix'):
prefixes.append(sys.real_prefix)
if hasattr(sys, 'base_prefix'):
prefixes.append(sys.base_prefix)
prefixes = list(map(os.path.expanduser, prefixes))
prefixes = list(map(os.path.abspath, prefixes))
# Check longer prefixes first so we don't split in the middle of a filename
prefixes = sorted(prefixes, key=len, reverse=True)
filename = os.path.abspath(filename)
for src_prefix in prefixes:
if filename.startswith(src_prefix):
_, relpath = filename.split(src_prefix, 1)
if src_prefix != os.sep: # sys.prefix == "/"
assert relpath[0] == os.sep
relpath = relpath[1:]
return join(dst_prefix, relpath)
assert False, "Filename %s does not start with any of these prefixes: %s" % \
(filename, prefixes)
def copy_required_modules(dst_prefix):
import imp
# If we are running under -p, we need to remove the current
# directory from sys.path temporarily here, so that we
# definitely get the modules from the site directory of
# the interpreter we are running under, not the one
# virtualenv.py is installed under (which might lead to py2/py3
# incompatibility issues)
_prev_sys_path = sys.path
if os.environ.get('VIRTUALENV_INTERPRETER_RUNNING'):
sys.path = sys.path[1:]
try:
for modname in REQUIRED_MODULES:
if modname in sys.builtin_module_names:
logger.info("Ignoring built-in bootstrap module: %s" % modname)
continue
try:
f, filename, _ = imp.find_module(modname)
except ImportError:
logger.info("Cannot import bootstrap module: %s" % modname)
else:
if f is not None:
f.close()
# special-case custom readline.so on OS X:
if modname == 'readline' and sys.platform == 'darwin' and not filename.endswith(join('lib-dynload', 'readline.so')):
dst_filename = join(dst_prefix, 'lib', 'python%s' % sys.version[:3], 'readline.so')
else:
dst_filename = change_prefix(filename, dst_prefix)
copyfile(filename, dst_filename)
if filename.endswith('.pyc'):
pyfile = filename[:-1]
if os.path.exists(pyfile):
copyfile(pyfile, dst_filename[:-1])
finally:
sys.path = _prev_sys_path
def subst_path(prefix_path, prefix, home_dir):
prefix_path = os.path.normpath(prefix_path)
prefix = os.path.normpath(prefix)
home_dir = os.path.normpath(home_dir)
if not prefix_path.startswith(prefix):
logger.warn('Path not in prefix %r %r', prefix_path, prefix)
return
return prefix_path.replace(prefix, home_dir, 1)
def install_python(home_dir, lib_dir, inc_dir, bin_dir, site_packages, clear):
"""Install just the base environment, no distutils patches etc"""
if sys.executable.startswith(bin_dir):
print('Please use the *system* python to run this script')
return
if clear:
rmtree(lib_dir)
## FIXME: why not delete it?
## Maybe it should delete everything with #!/path/to/venv/python in it
logger.notify('Not deleting %s', bin_dir)
if hasattr(sys, 'real_prefix'):
logger.notify('Using real prefix %r' % sys.real_prefix)
prefix = sys.real_prefix
elif hasattr(sys, 'base_prefix'):
logger.notify('Using base prefix %r' % sys.base_prefix)
prefix = sys.base_prefix
else:
prefix = sys.prefix
mkdir(lib_dir)
fix_lib64(lib_dir)
stdlib_dirs = [os.path.dirname(os.__file__)]
if is_win:
stdlib_dirs.append(join(os.path.dirname(stdlib_dirs[0]), 'DLLs'))
elif is_darwin:
stdlib_dirs.append(join(stdlib_dirs[0], 'site-packages'))
if hasattr(os, 'symlink'):
logger.info('Symlinking Python bootstrap modules')
else:
logger.info('Copying Python bootstrap modules')
logger.indent += 2
try:
# copy required files...
for stdlib_dir in stdlib_dirs:
if not os.path.isdir(stdlib_dir):
continue
for fn in os.listdir(stdlib_dir):
bn = os.path.splitext(fn)[0]
if fn != 'site-packages' and bn in REQUIRED_FILES:
copyfile(join(stdlib_dir, fn), join(lib_dir, fn))
# ...and modules
copy_required_modules(home_dir)
finally:
logger.indent -= 2
mkdir(join(lib_dir, 'site-packages'))
import site
site_filename = site.__file__
if site_filename.endswith('.pyc'):
site_filename = site_filename[:-1]
elif site_filename.endswith('$py.class'):
site_filename = site_filename.replace('$py.class', '.py')
site_filename_dst = change_prefix(site_filename, home_dir)
site_dir = os.path.dirname(site_filename_dst)
writefile(site_filename_dst, SITE_PY)
writefile(join(site_dir, 'orig-prefix.txt'), prefix)
site_packages_filename = join(site_dir, 'no-global-site-packages.txt')
if not site_packages:
writefile(site_packages_filename, '')
if is_pypy or is_win:
stdinc_dir = join(prefix, 'include')
else:
stdinc_dir = join(prefix, 'include', py_version + abiflags)
if os.path.exists(stdinc_dir):
copyfile(stdinc_dir, inc_dir)
else:
logger.debug('No include dir %s' % stdinc_dir)
platinc_dir = distutils.sysconfig.get_python_inc(plat_specific=1)
if platinc_dir != stdinc_dir:
platinc_dest = distutils.sysconfig.get_python_inc(
plat_specific=1, prefix=home_dir)
if platinc_dir == platinc_dest:
# Do platinc_dest manually due to a CPython bug;
# not http://bugs.python.org/issue3386 but a close cousin
platinc_dest = subst_path(platinc_dir, prefix, home_dir)
if platinc_dest:
# PyPy's stdinc_dir and prefix are relative to the original binary
# (traversing virtualenvs), whereas the platinc_dir is relative to
# the inner virtualenv and ignores the prefix argument.
# This seems more evolved than designed.
copyfile(platinc_dir, platinc_dest)
# pypy never uses exec_prefix, just ignore it
if sys.exec_prefix != prefix and not is_pypy:
if is_win:
exec_dir = join(sys.exec_prefix, 'lib')
elif is_jython:
exec_dir = join(sys.exec_prefix, 'Lib')
else:
exec_dir = join(sys.exec_prefix, 'lib', py_version)
for fn in os.listdir(exec_dir):
copyfile(join(exec_dir, fn), join(lib_dir, fn))
if is_jython:
# Jython has either jython-dev.jar and javalib/ dir, or just
# jython.jar
for name in 'jython-dev.jar', 'javalib', 'jython.jar':
src = join(prefix, name)
if os.path.exists(src):
copyfile(src, join(home_dir, name))
# XXX: registry should always exist after Jython 2.5rc1
src = join(prefix, 'registry')
if os.path.exists(src):
copyfile(src, join(home_dir, 'registry'), symlink=False)
copyfile(join(prefix, 'cachedir'), join(home_dir, 'cachedir'),
symlink=False)
mkdir(bin_dir)
py_executable = join(bin_dir, os.path.basename(sys.executable))
if 'Python.framework' in prefix:
# OS X framework builds cause validation to break
# https://github.com/pypa/virtualenv/issues/322
if os.environ.get('__PYVENV_LAUNCHER__'):
os.unsetenv('__PYVENV_LAUNCHER__')
if re.search(r'/Python(?:-32|-64)*$', py_executable):
# The name of the python executable is not quite what
# we want, rename it.
py_executable = os.path.join(
os.path.dirname(py_executable), 'python')
logger.notify('New %s executable in %s', expected_exe, py_executable)
pcbuild_dir = os.path.dirname(sys.executable)
pyd_pth = os.path.join(lib_dir, 'site-packages', 'virtualenv_builddir_pyd.pth')
if is_win and os.path.exists(os.path.join(pcbuild_dir, 'build.bat')):
logger.notify('Detected python running from build directory %s', pcbuild_dir)
logger.notify('Writing .pth file linking to build directory for *.pyd files')
writefile(pyd_pth, pcbuild_dir)
else:
pcbuild_dir = None
if os.path.exists(pyd_pth):
logger.info('Deleting %s (not Windows env or not build directory python)' % pyd_pth)
os.unlink(pyd_pth)
if sys.executable != py_executable:
## FIXME: could I just hard link?
executable = sys.executable
if is_cygwin and os.path.exists(executable + '.exe'):
# Cygwin misreports sys.executable sometimes
executable += '.exe'
py_executable += '.exe'
logger.info('Executable actually exists in %s' % executable)
shutil.copyfile(executable, py_executable)
make_exe(py_executable)
if is_win or is_cygwin:
pythonw = os.path.join(os.path.dirname(sys.executable), 'pythonw.exe')
if os.path.exists(pythonw):
logger.info('Also created pythonw.exe')
shutil.copyfile(pythonw, os.path.join(os.path.dirname(py_executable), 'pythonw.exe'))
python_d = os.path.join(os.path.dirname(sys.executable), 'python_d.exe')
python_d_dest = os.path.join(os.path.dirname(py_executable), 'python_d.exe')
if os.path.exists(python_d):
logger.info('Also created python_d.exe')
shutil.copyfile(python_d, python_d_dest)
elif os.path.exists(python_d_dest):
logger.info('Removed python_d.exe as it is no longer at the source')
os.unlink(python_d_dest)
# we need to copy the DLL to enforce that windows will load the correct one.
# may not exist if we are cygwin.
py_executable_dll = 'python%s%s.dll' % (
sys.version_info[0], sys.version_info[1])
py_executable_dll_d = 'python%s%s_d.dll' % (
sys.version_info[0], sys.version_info[1])
pythondll = os.path.join(os.path.dirname(sys.executable), py_executable_dll)
pythondll_d = os.path.join(os.path.dirname(sys.executable), py_executable_dll_d)
pythondll_d_dest = os.path.join(os.path.dirname(py_executable), py_executable_dll_d)
if os.path.exists(pythondll):
logger.info('Also created %s' % py_executable_dll)
shutil.copyfile(pythondll, os.path.join(os.path.dirname(py_executable), py_executable_dll))
if os.path.exists(pythondll_d):
logger.info('Also created %s' % py_executable_dll_d)
shutil.copyfile(pythondll_d, pythondll_d_dest)
elif os.path.exists(pythondll_d_dest):
logger.info('Removed %s as the source does not exist' % pythondll_d_dest)
os.unlink(pythondll_d_dest)
if is_pypy:
# make a symlink python --> pypy-c
python_executable = os.path.join(os.path.dirname(py_executable), 'python')
if sys.platform in ('win32', 'cygwin'):
python_executable += '.exe'
logger.info('Also created executable %s' % python_executable)
copyfile(py_executable, python_executable)
if is_win:
for name in 'libexpat.dll', 'libpypy.dll', 'libpypy-c.dll', 'libeay32.dll', 'ssleay32.dll', 'sqlite.dll':
src = join(prefix, name)
if os.path.exists(src):
copyfile(src, join(bin_dir, name))
if os.path.splitext(os.path.basename(py_executable))[0] != expected_exe:
secondary_exe = os.path.join(os.path.dirname(py_executable),
expected_exe)
py_executable_ext = os.path.splitext(py_executable)[1]
if py_executable_ext == '.exe':
# python2.4 gives an extension of '.4' :P
secondary_exe += py_executable_ext
if os.path.exists(secondary_exe):
logger.warn('Not overwriting existing %s script %s (you must use %s)'
% (expected_exe, secondary_exe, py_executable))
else:
logger.notify('Also creating executable in %s' % secondary_exe)
shutil.copyfile(sys.executable, secondary_exe)
make_exe(secondary_exe)
if '.framework' in prefix:
if 'Python.framework' in prefix:
logger.debug('MacOSX Python framework detected')
# Make sure we use the the embedded interpreter inside
# the framework, even if sys.executable points to
# the stub executable in ${sys.prefix}/bin
# See http://groups.google.com/group/python-virtualenv/
# browse_thread/thread/17cab2f85da75951
original_python = os.path.join(
prefix, 'Resources/Python.app/Contents/MacOS/Python')
if 'EPD' in prefix:
logger.debug('EPD framework detected')
original_python = os.path.join(prefix, 'bin/python')
shutil.copy(original_python, py_executable)
# Copy the framework's dylib into the virtual
# environment
virtual_lib = os.path.join(home_dir, '.Python')
if os.path.exists(virtual_lib):
os.unlink(virtual_lib)
copyfile(
os.path.join(prefix, 'Python'),
virtual_lib)
# And then change the install_name of the copied python executable
try:
mach_o_change(py_executable,
os.path.join(prefix, 'Python'),
'@executable_path/../.Python')
except:
e = sys.exc_info()[1]
logger.warn("Could not call mach_o_change: %s. "
"Trying to call install_name_tool instead." % e)
try:
call_subprocess(
["install_name_tool", "-change",
os.path.join(prefix, 'Python'),
'@executable_path/../.Python',
py_executable])
except:
logger.fatal("Could not call install_name_tool -- you must "
"have Apple's development tools installed")
raise
if not is_win:
# Ensure that 'python', 'pythonX' and 'pythonX.Y' all exist
py_exe_version_major = 'python%s' % sys.version_info[0]
py_exe_version_major_minor = 'python%s.%s' % (
sys.version_info[0], sys.version_info[1])
py_exe_no_version = 'python'
required_symlinks = [ py_exe_no_version, py_exe_version_major,
py_exe_version_major_minor ]
py_executable_base = os.path.basename(py_executable)
if py_executable_base in required_symlinks:
# Don't try to symlink to yourself.
required_symlinks.remove(py_executable_base)
for pth in required_symlinks:
full_pth = join(bin_dir, pth)
if os.path.exists(full_pth):
os.unlink(full_pth)
os.symlink(py_executable_base, full_pth)
if is_win and ' ' in py_executable:
# There's a bug with subprocess on Windows when using a first
# argument that has a space in it. Instead we have to quote
# the value:
py_executable = '"%s"' % py_executable
# NOTE: keep this check as one line, cmd.exe doesn't cope with line breaks
cmd = [py_executable, '-c', 'import sys;out=sys.stdout;'
'getattr(out, "buffer", out).write(sys.prefix.encode("utf-8"))']
logger.info('Testing executable with %s %s "%s"' % tuple(cmd))
try:
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE)
proc_stdout, proc_stderr = proc.communicate()
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.EACCES:
logger.fatal('ERROR: The executable %s could not be run: %s' % (py_executable, e))
sys.exit(100)
else:
raise e
proc_stdout = proc_stdout.strip().decode("utf-8")
proc_stdout = os.path.normcase(os.path.abspath(proc_stdout))
norm_home_dir = os.path.normcase(os.path.abspath(home_dir))
if hasattr(norm_home_dir, 'decode'):
norm_home_dir = norm_home_dir.decode(sys.getfilesystemencoding())
if proc_stdout != norm_home_dir:
logger.fatal(
'ERROR: The executable %s is not functioning' % py_executable)
logger.fatal(
'ERROR: It thinks sys.prefix is %r (should be %r)'
% (proc_stdout, norm_home_dir))
logger.fatal(
'ERROR: virtualenv is not compatible with this system or executable')
if is_win:
logger.fatal(
'Note: some Windows users have reported this error when they '
'installed Python for "Only this user" or have multiple '
'versions of Python installed. Copying the appropriate '
'PythonXX.dll to the virtualenv Scripts/ directory may fix '
'this problem.')
sys.exit(100)
else:
logger.info('Got sys.prefix result: %r' % proc_stdout)
pydistutils = os.path.expanduser('~/.pydistutils.cfg')
if os.path.exists(pydistutils):
logger.notify('Please make sure you remove any previous custom paths from '
'your %s file.' % pydistutils)
## FIXME: really this should be calculated earlier
fix_local_scheme(home_dir)
if site_packages:
if os.path.exists(site_packages_filename):
logger.info('Deleting %s' % site_packages_filename)
os.unlink(site_packages_filename)
return py_executable
def install_activate(home_dir, bin_dir, prompt=None):
home_dir = os.path.abspath(home_dir)
if is_win or is_jython and os._name == 'nt':
files = {
'activate.bat': ACTIVATE_BAT,
'deactivate.bat': DEACTIVATE_BAT,
'activate.ps1': ACTIVATE_PS,
}
# MSYS needs paths of the form /c/path/to/file
drive, tail = os.path.splitdrive(home_dir.replace(os.sep, '/'))
home_dir_msys = (drive and "/%s%s" or "%s%s") % (drive[:1], tail)
# Run-time conditional enables (basic) Cygwin compatibility
home_dir_sh = ("""$(if [ "$OSTYPE" "==" "cygwin" ]; then cygpath -u '%s'; else echo '%s'; fi;)""" %
(home_dir, home_dir_msys))
files['activate'] = ACTIVATE_SH.replace('__VIRTUAL_ENV__', home_dir_sh)
else:
files = {'activate': ACTIVATE_SH}
# suppling activate.fish in addition to, not instead of, the
# bash script support.
files['activate.fish'] = ACTIVATE_FISH
# same for csh/tcsh support...
files['activate.csh'] = ACTIVATE_CSH
files['activate_this.py'] = ACTIVATE_THIS
if hasattr(home_dir, 'decode'):
home_dir = home_dir.decode(sys.getfilesystemencoding())
vname = os.path.basename(home_dir)
for name, content in files.items():
content = content.replace('__VIRTUAL_PROMPT__', prompt or '')
content = content.replace('__VIRTUAL_WINPROMPT__', prompt or '(%s)' % vname)
content = content.replace('__VIRTUAL_ENV__', home_dir)
content = content.replace('__VIRTUAL_NAME__', vname)
content = content.replace('__BIN_NAME__', os.path.basename(bin_dir))
writefile(os.path.join(bin_dir, name), content)
def install_distutils(home_dir):
distutils_path = change_prefix(distutils.__path__[0], home_dir)
mkdir(distutils_path)
## FIXME: maybe this prefix setting should only be put in place if
## there's a local distutils.cfg with a prefix setting?
home_dir = os.path.abspath(home_dir)
## FIXME: this is breaking things, removing for now:
#distutils_cfg = DISTUTILS_CFG + "\n[install]\nprefix=%s\n" % home_dir
writefile(os.path.join(distutils_path, '__init__.py'), DISTUTILS_INIT)
writefile(os.path.join(distutils_path, 'distutils.cfg'), DISTUTILS_CFG, overwrite=False)
def fix_local_scheme(home_dir):
"""
Platforms that use the "posix_local" install scheme (like Ubuntu with
Python 2.7) need to be given an additional "local" location, sigh.
"""
try:
import sysconfig
except ImportError:
pass
else:
if sysconfig._get_default_scheme() == 'posix_local':
local_path = os.path.join(home_dir, 'local')
if not os.path.exists(local_path):
os.mkdir(local_path)
for subdir_name in os.listdir(home_dir):
if subdir_name == 'local':
continue
os.symlink(os.path.abspath(os.path.join(home_dir, subdir_name)), \
os.path.join(local_path, subdir_name))
def fix_lib64(lib_dir):
"""
Some platforms (particularly Gentoo on x64) put things in lib64/pythonX.Y
instead of lib/pythonX.Y. If this is such a platform we'll just create a
symlink so lib64 points to lib
"""
if [p for p in distutils.sysconfig.get_config_vars().values()
if isinstance(p, basestring) and 'lib64' in p]:
logger.debug('This system uses lib64; symlinking lib64 to lib')
assert os.path.basename(lib_dir) == 'python%s' % sys.version[:3], (
"Unexpected python lib dir: %r" % lib_dir)
lib_parent = os.path.dirname(lib_dir)
top_level = os.path.dirname(lib_parent)
lib_dir = os.path.join(top_level, 'lib')
lib64_link = os.path.join(top_level, 'lib64')
assert os.path.basename(lib_parent) == 'lib', (
"Unexpected parent dir: %r" % lib_parent)
if os.path.lexists(lib64_link):
return
os.symlink('lib', lib64_link)
def resolve_interpreter(exe):
"""
If the executable given isn't an absolute path, search $PATH for the interpreter
"""
if os.path.abspath(exe) != exe:
paths = os.environ.get('PATH', '').split(os.pathsep)
for path in paths:
if os.path.exists(os.path.join(path, exe)):
exe = os.path.join(path, exe)
break
if not os.path.exists(exe):
logger.fatal('The executable %s (from --python=%s) does not exist' % (exe, exe))
raise SystemExit(3)
if not is_executable(exe):
logger.fatal('The executable %s (from --python=%s) is not executable' % (exe, exe))
raise SystemExit(3)
return exe
def is_executable(exe):
"""Checks a file is executable"""
return os.access(exe, os.X_OK)
############################################################
## Relocating the environment:
def make_environment_relocatable(home_dir):
"""
Makes the already-existing environment use relative paths, and takes out
the #!-based environment selection in scripts.
"""
home_dir, lib_dir, inc_dir, bin_dir = path_locations(home_dir)
activate_this = os.path.join(bin_dir, 'activate_this.py')
if not os.path.exists(activate_this):
logger.fatal(
'The environment doesn\'t have a file %s -- please re-run virtualenv '
'on this environment to update it' % activate_this)
fixup_scripts(home_dir)
fixup_pth_and_egg_link(home_dir)
## FIXME: need to fix up distutils.cfg
OK_ABS_SCRIPTS = ['python', 'python%s' % sys.version[:3],
'activate', 'activate.bat', 'activate_this.py']
def fixup_scripts(home_dir):
# This is what we expect at the top of scripts:
shebang = '#!%s/bin/python' % os.path.normcase(os.path.abspath(home_dir))
# This is what we'll put:
new_shebang = '#!/usr/bin/env python%s' % sys.version[:3]
if is_win:
bin_suffix = 'Scripts'
else:
bin_suffix = 'bin'
bin_dir = os.path.join(home_dir, bin_suffix)
home_dir, lib_dir, inc_dir, bin_dir = path_locations(home_dir)
for filename in os.listdir(bin_dir):
filename = os.path.join(bin_dir, filename)
if not os.path.isfile(filename):
# ignore subdirs, e.g. .svn ones.
continue
f = open(filename, 'rb')
try:
try:
lines = f.read().decode('utf-8').splitlines()
except UnicodeDecodeError:
# This is probably a binary program instead
# of a script, so just ignore it.
continue
finally:
f.close()
if not lines:
logger.warn('Script %s is an empty file' % filename)
continue
if not lines[0].strip().startswith(shebang):
if os.path.basename(filename) in OK_ABS_SCRIPTS:
logger.debug('Cannot make script %s relative' % filename)
elif lines[0].strip() == new_shebang:
logger.info('Script %s has already been made relative' % filename)
else:
logger.warn('Script %s cannot be made relative (it\'s not a normal script that starts with %s)'
% (filename, shebang))
continue
logger.notify('Making script %s relative' % filename)
script = relative_script([new_shebang] + lines[1:])
f = open(filename, 'wb')
f.write('\n'.join(script).encode('utf-8'))
f.close()
def relative_script(lines):
"Return a script that'll work in a relocatable environment."
activate = "import os; activate_this=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'activate_this.py'); execfile(activate_this, dict(__file__=activate_this)); del os, activate_this"
# Find the last future statement in the script. If we insert the activation
# line before a future statement, Python will raise a SyntaxError.
activate_at = None
for idx, line in reversed(list(enumerate(lines))):
if line.split()[:3] == ['from', '__future__', 'import']:
activate_at = idx + 1
break
if activate_at is None:
# Activate after the shebang.
activate_at = 1
return lines[:activate_at] + ['', activate, ''] + lines[activate_at:]
def fixup_pth_and_egg_link(home_dir, sys_path=None):
"""Makes .pth and .egg-link files use relative paths"""
home_dir = os.path.normcase(os.path.abspath(home_dir))
if sys_path is None:
sys_path = sys.path
for path in sys_path:
if not path:
path = '.'
if not os.path.isdir(path):
continue
path = os.path.normcase(os.path.abspath(path))
if not path.startswith(home_dir):
logger.debug('Skipping system (non-environment) directory %s' % path)
continue
for filename in os.listdir(path):
filename = os.path.join(path, filename)
if filename.endswith('.pth'):
if not os.access(filename, os.W_OK):
logger.warn('Cannot write .pth file %s, skipping' % filename)
else:
fixup_pth_file(filename)
if filename.endswith('.egg-link'):
if not os.access(filename, os.W_OK):
logger.warn('Cannot write .egg-link file %s, skipping' % filename)
else:
fixup_egg_link(filename)
def fixup_pth_file(filename):
lines = []
prev_lines = []
f = open(filename)
prev_lines = f.readlines()
f.close()
for line in prev_lines:
line = line.strip()
if (not line or line.startswith('#') or line.startswith('import ')
or os.path.abspath(line) != line):
lines.append(line)
else:
new_value = make_relative_path(filename, line)
if line != new_value:
logger.debug('Rewriting path %s as %s (in %s)' % (line, new_value, filename))
lines.append(new_value)
if lines == prev_lines:
logger.info('No changes to .pth file %s' % filename)
return
logger.notify('Making paths in .pth file %s relative' % filename)
f = open(filename, 'w')
f.write('\n'.join(lines) + '\n')
f.close()
def fixup_egg_link(filename):
f = open(filename)
link = f.readline().strip()
f.close()
if os.path.abspath(link) != link:
logger.debug('Link in %s already relative' % filename)
return
new_link = make_relative_path(filename, link)
logger.notify('Rewriting link %s in %s as %s' % (link, filename, new_link))
f = open(filename, 'w')
f.write(new_link)
f.close()
def make_relative_path(source, dest, dest_is_directory=True):
"""
Make a filename relative, where the filename is dest, and it is
being referred to from the filename source.
>>> make_relative_path('/usr/share/something/a-file.pth',
... '/usr/share/another-place/src/Directory')
'../another-place/src/Directory'
>>> make_relative_path('/usr/share/something/a-file.pth',
... '/home/user/src/Directory')
'../../../home/user/src/Directory'
>>> make_relative_path('/usr/share/a-file.pth', '/usr/share/')
'./'
"""
source = os.path.dirname(source)
if not dest_is_directory:
dest_filename = os.path.basename(dest)
dest = os.path.dirname(dest)
dest = os.path.normpath(os.path.abspath(dest))
source = os.path.normpath(os.path.abspath(source))
dest_parts = dest.strip(os.path.sep).split(os.path.sep)
source_parts = source.strip(os.path.sep).split(os.path.sep)
while dest_parts and source_parts and dest_parts[0] == source_parts[0]:
dest_parts.pop(0)
source_parts.pop(0)
full_parts = ['..']*len(source_parts) + dest_parts
if not dest_is_directory:
full_parts.append(dest_filename)
if not full_parts:
# Special case for the current directory (otherwise it'd be '')
return './'
return os.path.sep.join(full_parts)
############################################################
## Bootstrap script creation:
def create_bootstrap_script(extra_text, python_version=''):
"""
Creates a bootstrap script, which is like this script but with
extend_parser, adjust_options, and after_install hooks.
This returns a string that (written to disk of course) can be used
as a bootstrap script with your own customizations. The script
will be the standard virtualenv.py script, with your extra text
added (your extra text should be Python code).
If you include these functions, they will be called:
``extend_parser(optparse_parser)``:
You can add or remove options from the parser here.
``adjust_options(options, args)``:
You can change options here, or change the args (if you accept
different kinds of arguments, be sure you modify ``args`` so it is
only ``[DEST_DIR]``).
``after_install(options, home_dir)``:
After everything is installed, this function is called. This
is probably the function you are most likely to use. An
example would be::
def after_install(options, home_dir):
subprocess.call([join(home_dir, 'bin', 'easy_install'),
'MyPackage'])
subprocess.call([join(home_dir, 'bin', 'my-package-script'),
'setup', home_dir])
This example immediately installs a package, and runs a setup
script from that package.
If you provide something like ``python_version='2.5'`` then the
script will start with ``#!/usr/bin/env python2.5`` instead of
``#!/usr/bin/env python``. You can use this when the script must
be run with a particular Python version.
"""
filename = __file__
if filename.endswith('.pyc'):
filename = filename[:-1]
f = codecs.open(filename, 'r', encoding='utf-8')
content = f.read()
f.close()
py_exe = 'python%s' % python_version
content = (('#!/usr/bin/env %s\n' % py_exe)
+ '## WARNING: This file is generated\n'
+ content)
return content.replace('##EXT' 'END##', extra_text)
##EXTEND##
def convert(s):
b = base64.b64decode(s.encode('ascii'))
return zlib.decompress(b).decode('utf-8')
##file site.py
SITE_PY = convert("""
eJzFPf1z2zaWv/OvwMqToeTKdOJ0eztO3RsncVrfuYm3SWdz63q0lARZrCmSJUjL6s3d337vAwAB
kpLtTXdO04klEnh4eHhfeHgPHQwGp0Uhs7lY5fM6lULJuJwtRRFXSyUWeSmqZVLOD4q4rDbwdHYb
30glqlyojYqwVRQE+1/4CfbFp2WiDArwLa6rfBVXySxO041IVkVeVnIu5nWZZDciyZIqidPkd2iR
Z5HY/3IMgvNMwMzTRJbiTpYK4CqRL8TlplrmmRjWBc75RfTn+OVoLNSsTIoKGpQaZ6DIMq6CTMo5
oAktawWkTCp5oAo5SxbJzDZc53U6F0Uaz6T4xz94atQ0DAOVr+R6KUspMkAGYEqAVSAe8DUpxSyf
y0iI13IW4wD8vCFWwNDGuGYKyZjlIs2zG5hTJmdSqbjciOG0rggQoSzmOeCUAAZVkqbBOi9v1QiW
lNZjDY9EzOzhT4bZA+aJ43c5B3D8kAU/Z8n9mGED9yC4aslsU8pFci9iBAs/5b2cTfSzYbIQ82Sx
ABpk1QibBIyAEmkyPSxoOb7VK/TdIWFluTKGMSSizI35JfWIgvNKxKkCtq0LpJEizN/KaRJnQI3s
DoYDiEDSoG+ceaIqOw7NTuQAoMR1rEBKVkoMV3GSAbP+GM8I7b8l2TxfqxFRAFZLiV9rVbnzH/YQ
AFo7BBgHuFhmNessTW5luhkBAp8A+1KqOq1QIOZJKWdVXiZSEQBAbSPkPSA9FnEpNQmZM43cjon+
RJMkw4VFAUOBx5dIkkVyU5ckYWKRAOcCV7z78JN4e/b6/PS95jEDjGX2ZgU4AxRaaAcnGEAc1qo8
THMQ6Ci4wD8ins9RyG5wfMCraXD44EoHQ5h7EbX7OAsOZNeLq4eBOVagTGisgPr9N3QZqyXQ538e
WO8gON1GFZo4f1svc5DJLF5JsYyZv5Azgm81nO+iolq+Am5QCKcCUilcHEQwQXhAEpdmwzyTogAW
S5NMjgKg0JTa+qsIrPA+zw5orVucABDKIIOXzrMRjZhJmGgX1ivUF6bxhmammwR2nVd5SYoD+D+b
kS5K4+yWcFTEUPxtKm+SLEOEkBeCcC+kgdVtApw4j8QFtSK9YBqJkLUXt0SRqIGXkOmAJ+V9vCpS
OWbxRd26W43QYLISZq1T5jhoWZF6pVVrptrLe0fR5xbXEZrVspQAvJ56QrfI87GYgs4mbIp4xeJV
rXPinKBHnqgT8gS1hL74HSh6qlS9kvYl8gpoFmKoYJGnab4Gkh0HgRB72MgYZZ854S28g38BLv6b
ymq2DAJnJAtYg0Lkt4FCIGASZKa5WiPhcZtm5baSSTLWFHk5lyUN9ThiHzLij2yMcw3e55U2ajxd
XOV8lVSokqbaZCZs8bKwYv34iucN0wDLrYhmpmlDpxVOLy2W8VQal2QqFygJepFe2WWHMYOeMckW
V2LFVgbeAVlkwhakX7Gg0llUkpwAgMHCF2dJUafUSCGDiRgGWhUEfxWjSc+1swTszWY5QIXE5nsG
9gdw+x3EaL1MgD4zgAAaBrUULN80qUp0EBp9FPhG3/Tn8YFTzxfaNvGQizhJtZWPs+CcHp6VJYnv
TBbYa6yJoWCGWYWu3U0GdEQxHwwGQWDcoY0yX3MVVOXmGFhBmHEmk2mdoOGbTNDU6x8q4FGEM7DX
zbaz8EBDmE7vgUpOl0WZr/C1ndtHUCYwFvYI9sQlaRnJDrLHia+QfK5KL0xTtN0OOwvUQ8HlT2fv
zj+ffRQn4qpRaeO2PruGMc+yGNiaLAIwVWvYRpdBS1R8Ceo+8Q7MOzEF2DPqTeIr46oG3gXUP5U1
vYZpzLyXwdn709cXZ5OfP579NPl4/ukMEAQ7I4M9mjKaxxocRhWBcABXzlWk7WvQ6UEPXp9+tA+C
SaImxabYwAMwlMDC5RDmOxYhPpxoGzxJskUejqjxr+yEn7Ba0R7X1fHX1+LkRIS/xndxGIDX0zTl
RfyRBODTppDQtYI/w1yNgmAuFyAstxJFarhPnuyIOwARoWWuLeuveZKZ98xH7hAk8UPqAThMJrM0
VgobTyYhkJY69HygQ8TuMMrJEDoWG7frSKOCn1LCUmTYZYz/9KAYT6kfosEoul1MIxCw1SxWklvR
9KHfZIJaZjIZ6gFB/IjHwUVixREK0wS1TJmAJ0q8glpnqvIUfyJ8lFsSGdwMoV7DRdKbneguTmup
hs6kgIjDYYuMqBoTRRwETsUQbGezdKNRm5qGZ6AZkC/NQe+VLcrhZw88FFAwZtuFWzPeLTHNENP/
4L0B4QBOYogKWqkWFNZn4vLsUrx8fnSADgjsEueWOl5ztKlJVkv7cAGrdSMrB2HuFY5JGEYuXRao
GfHp8W6Yq8iuTJcVFnqJS7nK7+QcsEUGdlZZ/ERvYC8N85jFsIqgicmCs9Iznl6MO0eePUgLWnug
3oqgmPU3S7/H23eZKVAdvBUmUut9OhuvoszvEnQPphv9EqwbqDe0ccYVCZyF85gMjRhoCPBbM6TU
WoagwMqaXUzCG0Gihp83qjAicBeoW6/p622Wr7MJ711PUE0OR5Z1UbA082KDZgn2xDuwG4BkDlux
hmgMBZx0gbJ1AMjD9GG6QFnaDQAgMM2KNlgOLLM5oynyPg+HRRijV4KEt5Ro4e/MELQ5MsRwINHb
yD4wWgIhweSsgrOqQTMZypppBgM7JPG57iLiTaMPoEXFCAzdUEPjRoZ+V8egnMWFq5ScfmhDP3/+
zGyjlhQRQcSmOGk0+gsyb1GxAVOYgE4wPhTHV4gN1uCOAphaadYUBx9FXrD/BOt5qWUbLDfsx6qq
OD48XK/XkY4H5OXNoVoc/vkv33zzl+esE+dz4h+YjiMtOjgWHdI79EGjb40F+s6sXIsfk8znRoI1
lORHkfOI+H1fJ/NcHB+MrP5ELm4MK/5rnA9QIBMzKFMZaDtoMHqmDp5FL9VAPBNDt+1wxJ6ENqnW
ivlWF3pUOdhu8DRmeZ1VoaNIlfgKrBvsqedyWt+EdnDPRpofMFWU06HlgYMX14iBzxmGr4wpnqCW
ILZAi++Q/idmm5j8Ga0hkLxoojr73U2/FjPEnT9e3q136AiNmWGikDtQIvwmjxZA27grOfjRzija
PccZNR/PEBhLHxjm1a7gEAWHcMG1GLtS53A1+qggTWtWzaBgXNUIChrcuilTW4MjGxkiSPRuzPac
F1a3ADq1Yn1mR29WAVq443SsXZsVHE8IeeBEvKAnEpza486757y0dZpSCKbFox5VGLC30Ginc+DL
oQEwFoPy5wG3NBuLD61F4TXoAZZz1AYZbNFhJnzj+oCDvUEPO3Ws/rbeTOM+ELhIQ44ePQo6oXzC
I5QKpKkY+j23sbglbnewnRaHmMosE4m02iJcbeXRK1Q7DdMiyVD3OosUzdIcvGKrFYmRmve+s0A7
F3zcZ8y0BGoyNORwGp2Qt+cJYKjbYcDrpsbQgBs5QYxWiSLrhmRawj/gVlBEgsJHQEuCZsE8Vsr8
if0BMmfnq79sYQ9LaGQIt+k2N6RjMTQc835PIBl1/ASkIAMSdoWRXEdWBylwmLNAnnDDa7lVuglG
pEDOHAtCagZwp5feBgefRHhmQRKKkO8rJQvxlRjA8rVF9XG6+w/lUhM2GDoNyFXQ8YYTNxbhxCFO
WnEJn6H9iASdbhQ5cPAUXB43NO+yuWFaGyYBZ91X3BYp0MgUORmMXFSvDWHc8O+fTpwWDbHMIIah
vIG8Qxgz0iiwy61Bw4IbMN6at8fSj92IBfYdhrk6kqt72P+FZaJmuQpxg9uJXbgfzRVd2lhsL5Lp
AP54CzAYXXuQZKrjFxhl+ScGCT3oYR90b7IYFpnH5TrJQlJgmn4n/tJ08LCk9Izc4UfaTh3CRDFG
efiuBAGhw8pDECfUBLArl6HSbn8X7M4pDixc7j7w3Oar45fXXeKOt0V47Kd/qc7uqzJWuFopLxoL
Ba5W14Cj0oXJxdlGHzfq42jcJpS5gl2k+PDxs0BCcCR2HW+eNvWG4RGbB+fkfQzqoNceJFdrdsQu
gAhqRWSUw/DpHPl4ZJ86uR0TewKQJzHMjkUxkDSf/DNwdi0UjHEw32QYR2urDPzA62++nvRERl0k
v/l68MAoLWL0if2w5QnakSkNQPRO23QpZZySr+F0oqhi1vCObVOMWKHT/k8z2XWP06nxonhmfK+S
36X4Thzt77886m+LHwMP4+ES3IXn44aC3Vnjx/estsLTJmsHsM7G1Xz2aAqwzc+nv8JmWen42l2c
pHQoAGQ4OEA9a/b5HLroVyQepJ26xiFB31ZMXT0Hxgg5sDDqTkf7Zacm9tyzITafIlZdVPZ0AkBz
fuUd6rtnl12oetkNDz/nk4ajHi3lzbjPr/gSDYufP0QZWWAeDQZ9ZN/Kjv8fs3z+r5nkI6dijp3/
QMP4REB/1EzYjjA0bUd8WP3L7ppgA+wRlmqLB7rDQ+wOzQeNC+PnsUpSYh91175YU64BhVnx0Aig
zNkF7IGDy6hPrN/UZcnnzqQCC1ke4FnqWGCalXECKXurC+bwvawQE9tsRmFrJykn71MjoQ5E25mE
zRaiX86WuQlfyewuKaEvKNxh+MOHH8/CfhOCIV3o9Hgr8ngXAuE+gWlDTZzwKX2YQk/p8s9LVXsX
4xPUHOeb4LAmW6998GPCJqTQvwYPBIe8s1s+I8fN+mwpZ7cTSef+yKbY1YmPv8HXiIlNB/CTt1S8
oAw0mMksrZFW7INj6uCizmZ0VFJJcLV0ni/m/dBpPkcCF2l8I4bUeY5RKM2NFKi6i0vtiBZljpml
ok7mhzfJXMjf6jjFHb5cLAAXPMfSryIenoJR4i0nJHDGoZKzukyqDZAgVrk+BqTcBafhdMMTHXpI
8okPExCzGY7FR5w2vmfCzQ25TJzAP/zASeIWHDuYc1rkLnoO77N8gqNOKEF3zEh1j+jpcdAeIQcA
AwAK8x+MdBDPfyPplXvWSGvuEhW1pEdKN+iQkzOGUIYjDHrwb/rpM6LLW1uwvNmO5c1uLG/aWN70
YnnjY3mzG0tXJHBhbfzKSEJfDKt9wtGbLOOGn3iYs3i25HaYuIkJmgBRFGavbWSK85e9IBcf9REQ
UtvO2TM9bJJfEs4ILXOOhmuQyP14nqX39Sbz3OlMqTS6M0/FuLPbEoD8vodRRHleU+rO8jaPqzjy
5OImzacgthbdcQNgLNq5QBw2ze4mUw70tizV4PK/Pv3w4T02R1ADk+lA3XAR0bDgVIb7cXmjutLU
7AMLYEdq6efkUDcNcO+RQTYeZY//eUvJHsg4Yk3JCrkowAOglCzbzE1cCsPWc53hpJ8zk/O504kY
ZNWgmdQWIp1eXr49/XQ6oOjf4H8HrsAY2vrS4eJjWtgGXf/NbW4pjn1AqHWYqzF+7pw8Wjcc8bCN
NWA7PnzrwdFjDHZvxMCf5b+UUrAkQKhIx4GfQqhH74G+aJvQoY9hxE7mnnvIxs6KfefInuOiOKLf
dwLRHsA/98Q9xgQIoF2oodkZNJupltK35nUHRR2gj/T1vngL1t56tXcyPkXtJHocQIeyttl29887
smGjp2T1+uz78/cX568vTz/94LiA6Mp9+Hh4JM5+/CwoVQQNGPtEMWZJVJiUBIbFrVIS8xz+qzHy
M68rjhdDr7cXF/rQZoV1Kpi4jDYnguec0WShcfiMA9L2oU5FQoxSvUFyCoIoc4cKhnC/tOJiFJXr
5GaqM5qis1rrrZcu9DIFYXTEHYH0QWOXFAyCs83gFaWgV2ZXWPJhoC6S6kFK22ibI5JSeLCTOeAc
hZmDFi9mSp3hSdNZK/qr0MU1vI5UkSawk3sVWlnS3TBTpmEc/dCedTNefRrQ6Q4j64Y8661YoNV6
FfLcdP9Rw2i/1YBhw2BvYd6ZpEwRSp/GPDcRYiM+1AnlPXy1S6/XQMGC4ZlfhYtomC6B2cewuRbL
BDYQwJNLsL64TwAIrZXwzwaOneiAzDFRI3yzmh/8NdQE8Vv/8ktP86pMD/4uCtgFCc4qCnuI6TZ+
CxufSEbi7MO7UcjIUZau+GuNuf3gkFAA1JF2SmXiw/TJUMl0oVNNfH2AL7SfQK9b3UtZlLp7v2sc
ogQ8U0PyGp4pQ78QM78s7DFOZdQCjWULFjMs/3MzEsxnT3xcyjTVyernby/OwHfESgqUID6CO4Ph
OF6Cp+k6D4/LE1ug8KwdXpfIxiW6sJRvMY+8Zr1BaxQ56u2laNh1osBwt1cnClzGiXLRHuK0GZap
BYiQlWEtzLIya7faIIXdNkRxlBlmicllSbmsPk8AL9PTmLdEsFfCOg0TgecD5SSrTPJimsxAj4LK
BYU6BiFB4mIxInFennEMPC+VqWGCh8WmTG6WFZ5zQOeI6iew+Y+nny/O31NNwdHLxuvuYc4x7QTG
nE9ygtmCGO2AL24GIHLVZNLHs/oVwkDtA3/arzhR5YQH6PTjwCL+ab/iIrITZyfIMwAFVRdt8cAN
gNOtT24aWWBc7T4YP242YIOZD4ZikVizonM+3Pl1OdG2bJkSivuYl084wFkUNjtNd3az09ofPcdF
gSde82F/I3jbJ1vmM4Wut5032/Lg3E9HCrGeFTDqtvbHMIlinaZ6Og6zbeMW2tZqG/tL5sRKnXaz
DEUZiDh0O49cJutXwro5c6CXUd0BJr7V6BpJ7FXlg1+ygfYwPEwssTv7FNORKlrICGAkQ+rCnRp8
KLQIoEAoEXjoSO54tH/kzdGxBg/PUesusI0/gCLUablU7pGXwInw5Td2HPkVoYV69FiEjqeSySy3
KWP4WS/Rq3zhz7FXBiiIiWJXxtmNHDKssYH5lU/sLSFY0rYeqa+S6z7DIs7BOb3fwuFdueg/ODGo
tRih0+5WbtrqyCcPNuitCtlJMB98Ga9B9xd1NeSV3HIO3VsItx3qwxAxERGa6nP4YYjhmN/CLevT
AO6lhoaFHt5vW05heW2MI2vtY6vAKbQvtAc7K2FrVik6lnEqC40var2AxuCeNCZ/YJ/qnCH7u6dk
zIkJWaA8uAvSm9tAN2iFEwZcucRlnwllxjeFNfrdXN7JFIwGGNkhFj78agsfRpENp/SmhH0xdpeb
y00/bvSmwezYVGagZ6YKWL8ok9UhotMb8dmFWYMMLvUveksSZ7fkJb/52/lYvHn/E/z7Wn4AU4qV
i2Pxd0BDvMlL2F9y6S3diYBFHRVvHPNaYXkjQaOjCr4+At22S4/OeCyiq038MhOrKQUm2JYrvrME
UOQJUjl64yeYGgr4bYq8Wt6o8RT7FmWgXyINtte9YK3IoW4ZLatVivbCiZI0q3k1uDh/c/b+41lU
3SOHm58DJ4ri52bhdPQZcYnnYWNhn8xqfHLtOM4/yLTo8Zv1ptPU0OCmU4SwLynsRpPvw4jt5iIu
MXIgis08n0XYEliey/aqNTjSI2d/+aCh96wswhqO9Cla483jY6CG+KWtiAbQkProOVFPQiieYv0Y
P44G/aZ4LCi0DX/2b9dzNzKuC4Fogm1Mm1kP/e5WFy6Zzhqe5STC68Qug6kNTZNYraYzt2bwQyb0
dSag5eicQy7iOq2EzEByaZNP90qApnfL/FhCmFXYnFHtG4Vp0nW8UU4SUqzEAEcdUGk8HshQxBD2
4D/Gt2wPsP5Q1FzIDNAJUdo/5U5XVc+WLMG8JSLq9SQerJPspZvPoynMg/IOedY4sjBPdBsZoxtZ
6fnzg+Ho6kWT6UBR6ZlX5DsrwOq5bLIHqrPY398fiH9/2PthVKI0z2/BLQPYvV7LBb3eYrn15Oxq
dT178yYCfpwt5RU8uKbouX1eZxSa3NGVFkTavwZGiGsTWmY07Vt2mYN2JR80cws+sNKW4+csoUuL
MLQkUdnqu58w7GSkiVgSFEMYq1mShBymgPXY5DXW52GYUfOLvAeOTxDMGN/iCRlvtZfoYVIureUe
i86JGBDgAeWW8WhU4EwVaoDn5HKj0ZycZ0nVlJY8dw9PdSF/Ze8i0nwl4jVKhplHixhOqafHqo2H
ne9kUW/Hks+u3IBja5b8+iHcgbVB0vLFwmAKD80izXJZzow5xRVLZknlgDHtEA53piuYyPpEQQ9K
A1DvZBXmVqLt2z/ZdXEx/UDnyAdmJJ0+VNlrrTg4FGetBMMoasanMJQlpOVb82UEo7ynsLb2BLyx
xJ90UBXrCrzbN9wSxzrTt2pw/kZz1QbAoZucrIK07OjpCOf6MAufmXbLXRj4oS064XaXlFUdpxN9
ecMEHbaJPVjXeNrSuJ1Fn9ZbASc/Bw/4QGfxg+NgsmyQnpiEa6o0TsRChygit9rML8wqcvTyjthX
Ap8CKTOfmBppE0S6suxqi091zqaj4hHUV6agaYtnbippOkUoLuZjynMyJRBbvGiDS/tOC/HdiRi+
GIs/tzZCs2KDtzIBys/m0bN56Ptk1PXq+KixM92NZwvCViAvr5883TSZ0vTCvvTzh/vqpEPcAK5A
dhaJnB88U4gd4/ylUDWUZl7bOYVmjNXpezbrSRdmN+UqVJU2Ba9+3SgUR7UY/9MOYiq+tR7g4lgU
WrZAtqDfJJ60kv/spWuYGKjR81cWPZdJ3+EUfsOLU+C9Jqjr6Gw9tNQZ9hZsz55cl1HyEVTTsOH4
Bz2qJ4lSO0e80wqPJxuJc1n/CTwc0iUzxDRftY6F/5XMw0n72w4XO1h+8/UuPF0F01sx0bOgj61i
4EVvly40C28+/UEvj2X6un6R6DhQGiEC/8/c4Uie2zFmBvNVL7nNEF7hLYl06otWJpWuJ8K7U74O
C10D2o5TohasCjgn9QIPae/o0sdTRQlleBPM10cvxqYAlwHpCbyMXn6l70akbuaOAd9tHevtELzU
/Y6if3OAJZXf277qbBBoWidu6uKYyu6dwUKKTI2iiaVDY0MerrFo1iwWeJlgQ0x2V+09G+/AgZiC
62BuBc3BB4I9XtSHrqs7GjnepuS2ifLWOKL9bBOiR+SAuDoVsd4ld9v06C4d+mRZ0L36jQluy5H1
lzG/QfeqLo5t2MqYFzxWHLqpkvipbnvru9i0oJ2tbsNd0+f+u+auWwR25iZToDN3v47TpYHuu92g
9tGAQwaaEI+qP2iUf79dcU3CWCeo9Ie/9QfJ73bCmW6xMA+BMekwGEwn9tTYGw9gFc/c75htdcBX
Fbtlu71+R2vaHfPR5vjHOA2cWYLPbSU/pQvNNQfyHoTunptwAupE3tyoSYz3Hk5o40rJUp0didkK
vaNr62SsNmYHg5cDAQjDTTqt1c1cB8YAZcn3L3OukXMLjKChKSPXSfFVyZxtgt4uAbiIo5DU30S5
OUd3kEoMHqu6LErYYQ70Lb2cBNOXN9wANUGxVaxuDeqmx1hf2kqalTKvTKkvxz5ZzXrUAUKwJe/Y
h8nEvgN+ed5caZCMLUPIrF7JMq6a63z8I/cEvIhmBCrwxgV2wl6NZLY4xUUssfzhIIWBJ/v9K71h
8zasO+4xGFmt93i2oh46nPqUKxIcq2S4Obm31/m510jN6fJn12G0zRrus1cYwVKYu+lIV+o4FL/V
92XxLawcJUeX0+EjkHu3vNFP9bZqjR1ei4bzzFUM3QuSsEKfrHTfvYcdLujecNjatNrxfb1hmaXj
am5pbKmo3fRexdOMNmqiM5iV+UB0xs+8f2J0xoP/yOiMvqUSDI7GR+uD3lz8B8I4rCbcKx8bRoA+
EyASnhK3Lgw0JnPoZrYXuUruB/bKZdaZTrmcMRPIkd27wAgEX+2o3DRg7+q1XdEZX7ro8fcXH16f
XhAtJpenb/7z9HvKfMKjiJbNenT4KssPmNoHXo61G8rS2Sp9gzfY9tyhyoVCGkLnfeegvwdCf1FY
34K2FZn7eluHTnFNtxMgvnvaLajbVHYv5I5/pgs53ByVVjJ0oJ9y5qr55Rz/m0fmFIzFoTnlMu+b
gwkto5247baFc3JIu+pE+6361s0tMeWRzWSmFcDzCOQvexxhylJs0Jsdlfb/CIPSr7Gkz9xYA1I4
k87NiXRpIoOq/P/jRgnKLsZNHjuMY3t7NbXjoxdlr2XHc9WZjAxBvJq6QXd+rrDPZbqFCkHACk/f
C8iIGP2nDyvt0f4zJa4OqHr3AJXNtf2Fa6ad3L8leIBf2fu1FGcB8REmNF7UqXsob/t0OpDzRyc9
+cIpFwHNdwh0bsRTAXujz8QKcboRIWwg9eEzZqASHfXleA7yaDcd7A2tnouDbbWdbm2jEC+2N5y3
yid1jyPuoR7ooWpTQedYYEyF3Fa0Kb4jyHxUKOhCLc/5wPNpvf+Hr3dXL45t4B75HV87ioRStgaO
Yb9yUh53XuLodCdmKceUE4d7NqfkV7e4dqCyT7Btj9Op+9iyDzI5BAxp4L3vj52ZHt7l9IM2ppb1
jmFKYvhMjWhaTqWMxt0+GXWn26itLhCu+nkEkI4KBFgIpqMbnSzMDadSD5/rXeG0putv3dOb0JEK
ysjyOYJ7GN+vwa/dnap1H9WdUTT9uUGbA/WSg3Cz78CtRl5IZLtJaE+94YMtLgAXPvf3f/GI/t1c
Qdv9aJdfbVu97C22Y18W00sx66ZFIvM4AiMDenNI2hprEoyg410vDR1dhmrmhnyBjh+lrOLl1rTB
IGd2oj0AaxSC/wPOBKoy
""")
##file ez_setup.py
EZ_SETUP_PY = convert("""
eJzNWmmP20YS/a5fwSgYSIJlDu9DhrzIJg5gIMgGuYCFPavpc8SYIhWS8li7yH/f181DJDWcJIt8
WAbOzJDN6qpXVa+qWvr8s+O52ufZbD6f/z3Pq7IqyNEoRXU6VnmelkaSlRVJU1IlWDR7K41zfjIe
SVYZVW6cSjFcq54WxpGwD+RBLMr6oXk8r41fTmWFBSw9cWFU+6ScySQV6pVqDyHkIAyeFIJVeXE2
HpNqbyTV2iAZNwjn+gW1oVpb5Ucjl/VOrfzNZjYzcMkiPxji3zt930gOx7yolJa7i5Z63fDWcnVl
WSF+PUEdgxjlUbBEJsz4KIoSIKi9L6+u1e9YxfPHLM0Jnx2SosiLtZEXGh2SGSStRJGRSnSLLpau
9aYMq3hulLlBz0Z5Oh7Tc5I9zJSx5Hgs8mORqNfzo3KCxuH+fmzB/b05m/2oYNK4Mr2xkiiM4oTf
S2UKK5KjNq/xqtby+FAQ3vejqYJh1oBXnsvZV2++/uKnb37c/fzm+x/e/uNbY2vMLTNgtj3vHv30
/TcKV/VoX1XHze3t8XxMzDq4zLx4uG2Cory9KW/xX7fb7dy4UbuYDb7vNu7dbHbg/o6TikDgf7TH
Fpc3XmJzar88nh3TNcXDw2JjLKLIcRiRsWU7vsUjL6JxHNBQOj4LRMDIYv2MFK+VQsOYRMSzXOH5
liMpjXwhXGnHnh26PqMTUpyhLn7gh6Ef84gEPJLM86zQIjG3Qid0eBw/L6XTxYMBJOJ2EHOHiiCw
JXEdEgjfEZ6MnCmL3KEulLo2syQL3TgmgeuHcRz6jPBY+sQK7OhZKZ0ubkQihrs8EIw7juOF0g5j
GXISBLEkbEKKN9QlcCzPJ44nuCdsQVkYSmG5MSGeCGQo/GelXHBh1CF25EOPiBMmJXW4DX0sl7rU
Zt7TUtgoXqgrHer7bswD+DWUoUd4GNsOBJHYiiYsYuN4gT1ccCAZhNzhjpTC9iwrdgNPOsSb8DSz
raEyDHA4hPrcJZbjB54fwD/MdiPLIqEVW8+L6bTxQ44X4aOYRlYYOsyPie+SyHNd4nM+iUwtxm/F
cOEFhEXAMg5ZFPt+6AhfRD7CUdCIhc+LCTptIoFMIkJaAQBymAg824M0B0YC8Alvg1SG2DiUCIIc
tl2O95FGTiRCSnzqE2jExfNiLp7igRvLmFoQ5jHP8eLQcj0umCOYxZxJT9lDbAKPxZ50qQxJiCh0
BYtcYVEH7g69mDrPi+mwoZLEjm1ZlMNNHDkBSYJzF44PPCsKJsSMeEZaVuBRGRDi0JBbUAvIeghs
K7JD5kw5asQzgR3YsSMEc33phQJeswPGA2I7kOqEU1JGPCPtCAQF8uUSoUIcP2YxpEibhzSM5ARb
sRHPCEvw0Asih8VxRCUNgXRkIXot+Dy0p5ztDp1EqJB2IDmHYb7v217k2SwEf/E4igN/SsqIrahF
Y9u1CSPUdSyAAZ4LpecxH0QR2vJZKZ1FCBKJPQPuSSpdZBSVsRcwC1CB9cRUwHhDiyLF1iB+12Gc
xix0KJMe6MsJpBMROcVW/tAiIWLJIwvqICERsdIV4HQ/BGHwyA6mPO0PLSISXMUlqoodWrYQADdE
cfIpQ8EjwRTL+CMfRdyVAQjBY4yQKLQ9BA53Q8oYd7nPJ6QEQ4uQMBGqfGTbASpRFHmhAxGomL4X
I7WniDMYVTfmB0T6IQW+6B6QDYEFQzzPRYL5ZIobgqFF1JERCX0HxR60S10UaQuu5sKXaCV8d0JK
OKI7Cz6SMeHMJYHtC9+2faQhWooIFDgZL+GoEpBIxr6HKsDB5ZakQcikLR24AY+cqQwIhxZ5qLEE
fCvRMiABPdezbVtyEbk2/oVTukSjbshSvZATA5GYo36oEASBR66lGivreSmdRYwSNwI3oOfwIpdZ
KmYRbQCbobJMloFoaJEdOnYIkoOjY85s3/Jji/gRdQXyPPanPB0PLYLuzLPQzNgKYerFgfCYpMKK
YCuzpjwdj5gBQYbGDrXVjSIegJ2IEFYA8mKB6031d42UziIp4FpX+MQOqe0wuIn5nk1D1F5UfjFV
SeJhPWIEaWNLxZrEERzEZMcuKltI/dhBjwMpv816EwHGm3JWFedNPXDtSblPE9rOW+jdZ+ITExg1
3uo7b9RI1KzFw/66GRfS2H0kaYJuX+xwawmddhnmwbWhBoDVRhuQSKO9r2bGdjyoH6qLJ5gtKowL
SoR+0dyLT/VdzHftMshpVn627aS8a0XfXeSpC3MXpsHXr9V0UlZcFJjrloMV6porkxoLmvnwBlMY
wRjGPzOM5Xd5WSY07Y1/GOnw9+Fvq/mVsJvOzMGj1eAvpY/4lFRLp75fwLlFpuGqAR0Nh3pRM15t
R8PculNrR0kptr2Bbo1JcYdRdZuXJjsV+K0Opu4FLlJy3tr+rHESxsYvTlV+AA4M0+UZo2jGbzuz
eycFaq4/kA/wJYbnj4CKKIAAnjLtSKp9Pc7fN0rfG+U+P6VcTbOkxrovrZ3Ms9OBisKo9qQyMAh3
grUsNQFnCl1DYurtlDplXL8ijPsBEPeGGmmXj/uE7dvdBbRWRxO1PGNxu1iZULJG6V5tqeT0jjH2
ohgckDwmmLnpJRIEXyMi6wDXKmc58EgLQfj5oj72eCt76mnY9XbN2YQWUzVaamlUaFUaQPSJBcsz
XtbYtGocCQJFgQpEVFolVQLXZQ+984za4439eSb0eUJ9NsJrvQBqnioMnzwfUVo2hw2iEabPcor8
hJ1ErUqdZ8Q4iLIkD6I+4Lgk3f29jpeCJKUwfjiXlTi8+aTwympHZAapcK8+2SBUUYsyXoWgMqY+
9TDbCNU/H0m5q1kI9m+NxfHDw64QZX4qmCgXimHU9oecn1JRqlOSHoGOH9c5gazjiIMGtuXqwiQq
5LaXpOnlZYPYKAXbtFuPEu3CAW2SmEBWFNXSWqtNeiTXEHW306v+6Q5tj/l2jWN2mpi3SkbtIBD7
WNYAIP3wCYbvXmoJqQ9I8+h6h4Foswmu5fyi8evt/EUD1epVI7uvwlDAz/XKL/NMpgmrAM2mz/59
z/9Ztp//uL9E/0S8L19vb8pVl8ttDuujzPfZkPDnjGSLSqVUlyLgDHV8p3OkOa5T2XLKMoSyaXyX
CkRIu/xKnsohlcogIAFbWg1lUpQA4lSqdFhAwrl1vfHyp57yC3Mk7332Plt+eSoKSAOd1wJuilHd
WqFqXWJZmKR4KN9Zd8/XrCd991WCwEzoSdXRb/Pq6xzs3AsUUpazJtvS4ZvrfkK+G6XznXrlc4Ci
CT//MKiZ/RCti+dTmfpXV1CVz8i4Qen86ok6qTOTXHjeSHNWdxmaEWsbkqo+9NVdw/9p3axZVx3r
t3Xz98qmuqd2va6ZNZXfX8rgRKnL6wLX1jdVJ1h1IunFiKZuDGtD+6lBgfJBHUTWHvGY1kHbtqBb
o8dPL29KtNM3peqm5/1cGJ1q14EPuf1yoDAzXgy7vpJ8FNB+iy675vlf8iRbtlWhXVqLKwumxOnW
91sU6LZbVuzTvo68K6tyWYtdbVQyfPExT1QAHQVRJbBVp+ySbUDR6tKhyCFIoVG2KKX5w2CV6q+V
X4bvqgsrzUdSZEuF88u/7qo/9Gi4siHn8qkov9EhoT4MWYqPIlN/wJwjlJ3tRXpUrdzbOtp67UQX
Kug3VPyrj2uWCooZWH5tgKpm6tYB6ZwJAIlXkIeqmQXpikdFsQQTalnqt/u0rknZnDVbgo2btuWy
I1TmbTSbs9kSjCg2CmEt5kDYXnVQPBd1rdnDvVCiesyLD82ma+NYF4ycVqT5qE0xhWaJG5CpYhEg
wHQjrhdA8iUTm8wpRFOA+gaYq7/SiwiK9VXI9Ej3qkfSUbZW2XT1GpoEHaxVoobFphdKhTi+qn8s
R+3UMDpbGtalrpzrLUalTKdcww8mfuZHkS2vln1ufI8+/vaxSCqQD3wMfHUHDQ7/sFaf9j0q76kO
gBUqDUGNLC+Kkw6OVIyEab/3w0M11pXQ61tObK/mk7OpuRoGmGrGWK6GGtcsoq2puWI9f6RzwIkH
prajnqy7lzDfqTlvM6YAbLDRu7A0L8VydUURZbXRQvvPm2rWkhYUTNUvLW3N/sil6vcBkb5ED/Jx
PVWxLzX37XOfg+oa+wbdUrOqLRBP9cejz5efa47reaDj6iuJlzXPzwx6+Lauu6zhZDAYDLTPVGr0
xgGWHw4w1By0he0JDWlmrPZqfKQhTlELNM6rF+oA5W6lw/RRLAod1sJQZfx3Q0VZqnAe1Sql9nUN
waJThqHuw7IzS6TlsMHvmbbbNWjtdsYWU55lWqa9+NNd/z9B8Jpc1ahLyzwVyNWJabft41FM6l79
qkcvxCH/qPlWe6L+GoMealE5KlBv+ju8O2q+J7vsJql+HTYrvWGq3+1cz3d/YEbDz2ea+dEgtpmO
9v85JJ9Ls07w70q5iuan8q5Nt7vhGK7BtlYIfFilqj8cx3SkqCdPR6ja5S8CoFNfa37BZbCldqAO
8/kPV23RfN0yyhwk+KALUaFOdBGEaJIuAT1/Qt5i+T3aqXn7hRvzeB4OlPP6qzTX3zYxV4vmpPLY
1ad2hCkv9PyTfmqoFKGnJK1e1ke/EPmgJsWzYuR+FBfN/KN6rfaouBN7AUT33JfuWv2pViwvXbUW
0tZCXTQXBV1cnnUnx+rdu+bUWbZF9cmTZ9kVu3oErEv0u7n646bY4N8aXIHxoek064as3chE8T2U
y9Vd97JZwuKudB7VUDGf15NCXaT7wMADGCGrdmLQXxHatnfNB1HVSavuL/uT9E53DLtdE/UdJI2M
taFhedW0RC0Ar8bGHkiFaXALPc1SkILtl/P3Wf8rPu+z5bt//Xb3YvXbXLcnq/4Yo9/ucdETjI1C
rr9klRpCscBn8+skbRmxVhX/f7fRgk3dei/t1R3GMA3kC/20fojRFY82d0+bv3hsYkI27VGneg+A
GcxocdxuF7udStjdbtF9sJEqiVBT5/BrR5fD9u939h3eefkSYNWp0itfvdzpljubu6fqouaIi0y1
qL7+C1AkCcw=
""")
##file distribute_from_egg.py
DISTRIBUTE_FROM_EGG_PY = convert("""
eJw9j8tqAzEMRfcG/4MgmxQyptkGusonZBmGoGTUGYFfWPKE6dfXTkM3gqt7rh47OKP3NMF3SQFW
LlrRU1zhybpAxoKBlIqcrNnBdRjQP3GTocYfzmNrrCPQPN9iwzpxSQfQhWBi0cL3qtRtYIG/4Mv0
KApY5hooqrOGQ05FQTaxptF9Fnx16Rq0XofjaE1XGXVxHIWK7j8P8EY/rHndLqQ1a0pe3COFgHFy
hLLdWkDbi/DeEpCjNb3u/zccT2Ob8gtnwVyI
""")
##file distribute_setup.py
DISTRIBUTE_SETUP_PY = convert("""
eJztPF1z2ziS7/oVOLlcpHISE2fm5q5cp6nKTDyzrs0mqTjZfUhcMkRCEsf8GpC0ov31190ACICk
ZOdm9uGqzrtjS0Sj0ejvboA5+7fq0OzKYjKdTn8qy6ZuJK9YksLfdN02gqVF3fAs400KQJPrDTuU
LdvzomFNydpasFo0bdWUZVYDLI5KVvH4nm9FUKvBqDrM2W9t3QBAnLWJYM0urSebNEP08AWQ8FzA
qlLETSkPbJ82O5Y2c8aLhPEkoQm4IMI2ZcXKjVrJ4L+8nEwY/GxkmTvUr2icpXlVygapXVlqCd5/
FM4GO5Ti9xbIYpzVlYjTTRqzByFrYAbSYKfO8TNAJeW+yEqeTPJUylLOWSmJS7xgPGuELDjw1ADZ
Hc9p0RigkpLVJVsfWN1WVXZIi+0EN82rSpaVTHF6WaEwiB93d/0d3N1Fk8lHZBfxN6aFEaNgsoXP
NW4llmlF29PSJSqrreSJK88IlWKimVfW5lO9a5s0674duoEmzYX5vCly3sS7bkjkFdLTfefS/Qo7
qrisxWTSCRDXqI3ksnI7mTTycGmFXKeonGr4083Vh9XN9cerifgaC9jZNT2/QgmoKR0EW7K3ZSEc
bGYf7Ro4HIu6VpqUiA1bKdtYxXkSPuNyW8/UFPzBr4AshP1H4quI24avMzGfsX+noQ5OAjtl4aCP
YmB4SNjYcsleTI4SfQZ2ALIByYGQE7YBISmC2Mvouz+VyDP2e1s2oGv4uM1F0QDrN7B8AapqweAR
YqrAGwAxOZIfAMx3LwO7pCELEQrc5swf03gC+B/YPowPhx22BdPzehqwcwQcwGmY/pDe9GdLAbEO
PugV69u+dMo6qisORhnCp/erf7y6/jhnPaaxZ67MXl/98urTm4+rv199uLl+9xbWm76Ifoi+u5h2
Q58+vMHHu6apLp8/rw5VGilRRaXcPtc+sn5egx+LxfPkuXVbz6eTm6uPn95/fPfuzc3ql1d/vXrd
Wyi+gIVcoPd//XV1/faXdzg+nX6Z/E00POENX/xdeatLdhG9mLwFN3vpWPikGz2vJzdtnnOwCvYV
fiZ/KXOxqIBC+j551QLl0v28EDlPM/XkTRqLotagr4XyL4QXHwBBIMFjO5pMJqTG2hWF4BrW8Hdu
fNMK2b4MZzNjFOIrxKiYtJXCgYKnwSavwKUCD4y/ifL7BD+DZ8dx8CPRnssiDK4sElCK8zqY68kK
sMyS1T4BRKAPW9HE+0Rj6NwGQYEx72BO6E4lKE5EKCcXlZUozLYszErvQ+/ZmxzFWVkLDEfWQrel
JhY33QWODgAcjNo6EFXxZhf9BvCasDk+zEC9HFo/v7idDTeisNgBy7C35Z7tS3nvcsxAO1RqoWHY
GuK47gbZ607Zg5nrX4qy8TxaYCI8LBdo5PDxmascPQ9j17sBHYbMAZbbg0tje1nCx6SVRnXc3CZy
6OhhEYKgBXpmloMLB6tgfF0+iP4kVM60iUsIo8Z1v/QAtL9RDzdpAauP6ZNSP4tbhdxI5o0UotM2
bTjrNgVwsd2G8N+cdfbTlCsE+3+z+T9gNiRDir8FAymOIPqpg3BsB2GtIJS8LaeOmdHid/y9xniD
akOPFvgNfkkH0Z+ipGp/Su+N7klRt1njqxYQooC1EzDyAIOqm5qGLQ2Sp5BTX7+jZCkMfi7bLKFZ
xEdlrdstWqe2kQS2pJPuUOfv8y4NX615Lcy2nceJyPhBr4qM7iuJhg9s4F6c14vqcJ5E8H/k7Ghq
Az/nzFKBaYb+AjFwU4KGjTy8uJ09nT3aaIDgbi9OiXBk/8do7f0c4ZLVukfcEQFSFonkgwcWsglf
zJmVv87H/ULNqUrWpkw1KcOKCoIlGY6Sd68o0jte9pK2HgeWTuI2yg21gyUaQCtHmLC8+I85CGe1
4fdi+VG2ovO9OScHULdQSe4pnScd5eu6zNCMkRcTu4SjaQCCf0OXe3terxSXBPraoLrfrsCkKI+s
Ka1G/uZl0maixtLuS7ebwHKlDzj0094XRzTeej6AUs4dr3nTyNADBENZJU7UHy0LcLbm4HhdQEN+
yd4H0c7BVlMdxLFCq5upovMf8RbHmecxI9J9hXBqWfLjcgp1mV5vNkJYfx8+Rp3K/1wWmyyNG39x
AXqi6pmY/Ek4A4/SF52rV0Pu43QIhZAFRXsJxXc4gJh+JN9OG0vcNonTTgp/XJ5DEZXWJGr+ACUE
VVdfiukQH3Z/Yl4EDSZS2tgB836HnQ1qCelOBnySbYHxJWLvMwECGsVnuh2c5aVEUmNMCw2hm1TW
zRyME9CMTg8A8cE4Hbb45OwriEbgvxRfivDnVkpYJTsoxOxczgC5FwFEhFksZhZDZVZCS5vwpT8m
snrEQkAHWc/oHAv/3PMUtzgFYzP1osr7YwX2t9jDk6LIMZsZ1esu24FV35bNL2VbJH/YbB8lc4zE
QSp0ymGtYil4I/r+aoWbIwvssiyKWCcC9R8NW/QzErt0yNKOGIr017Yt2dkrhdau+QnGl5Ux1UvU
mtWcTxvVbSx4LlTWeKdpv4OskJKzNbZQH3iWetiN6RVtvhYSTJqTLXdugXBhy5KyYmrjdL1TUAOa
Itidx487ho2XEJxEvDOriyJRkRP7ypwFz4NZxO4UT+5wRa84AAcjpDBZZFfJmVVEEqk9Ege76XoP
1BWOyyKh/mzFMdavxQb9DbZi46blme0S0/4aLLWayIjhX5IzeOGIhNpKqMTXFIgEtuZ1j1xmWHdN
HHMcDZcOipdjc5vtP1eoDtiP8vLjCOu07T/RA2rpq0a89NJVFCQEQ4NFpYD8QQBLj2ThBlQnmDJG
dLAv3e91zLWXOiu0s0vk+auHMkWtrtB0k44cm+QMonpXv3TWQ06+ns5xS77PVkRpLoWD4TP2QfDk
OQVXhhEG8jMgna3B5O7neCqwRyXEcKh8C2hyXEoJ7oKsr4cMdktabewlxfOZRhC8UWHzg51CzBBk
DPrAk15SpdhIRCtmzdl0v54OgHRegMjs2MBpaknAWiM5BhBgavgePOAfiXewqAtv27kkYdhLRpag
ZWyqQXDYNbivdfk13LRFjO5Me0Eadsep6Ttnz57d72cnMmN1JGFrFD3dWMZr41pu1PNTSXMfFvNm
KLXHEmak9iEtVQNr0Px3fype14OB/koRrgOSHj7vFnkCjg4WMB2fV+HpEJUvWCg9IbWxE37hAPDk
nL4/77gMtfIYjfBE/6g662WGdJ9m0KgIRtO6cUhX6129NZpOZK3QO4RoCHNwGOADisYG/X9QdOPx
fVuRv9io3FoUaksQ201IIn8J3m2lcRifgIhnrt8Adgxhl2Zpy6Iz8HI47WC4N9L2euVDuA1XvW2r
DnbWe4TGaiAyEyChxOiwIndAFKuUzt0EWNo+GAuX2rEZ3o0ng5sxT0TKPXHEAOu57sUZ6bwTnoUb
vo1KzXi5PvMdJhtcg10rDIXYm+iMTyHSBtG7N6+j8xrP2vAcN8Jfg/bvB0SnAhxmN9R2VBQajLoP
jAUufg3HRjX95qGlNS8fIGEG41i5nfmwyngsdqDuwnSze5E8rbEfOQTzif9U3EMs9Jr+kHvpTThz
jyvYBmsPzwNhRmruMTjN4nFSgGp9LB7pvyHOnbtdmWfYN1xggdB3+Gbxgb9cg/TvXbZs/BLJcsD2
SSmLd8/63XV7DJj0lOBv5QOqgMiEOigu2wazXnQee36wJmcqnX7G5jBnzpTma+J78tTzHT5YZ64N
B4heebDKU3kRZDBJuUM9Y85GTlF171vzc+DbLS/ADnjfQ82ZT82oKp0B5j3LRBPUDNW+8719fnZq
pvmNmha6bbx5rwGom/x4PwI/OtwzGE7JQ8N4Z3L9XrMG6dW7rqsZYBnG9DGtBJ+qmvfAVkOs5sSR
VnpwY28fJU6jIOjtxHfHxzxN3zkfg+tcNd9AQt2dXCMBmitOAEOQ7p5N17vujMQyHwsWwIAHZ+D+
8xyoWJXr38Lu2HMWmYZ3BUUhVF4qsj3WaPB8myb8W+Z4LtelF5RypJ56zA2PiNtwx/QWhi6IWHV4
ICaB0elAFT757EQVhXajOhQ7dqSPbmrrB2GBL57WhceuMMwVbd/g9nqkDDyg4eXQBY76HgV+wvP0
ffjPKH8VyAez/NynS5A6f9klSTr1vioeUlkWaGy9/NstjrFs3UEZxioh87SuzQ02Ve6eY6fyPq0q
oGl6YhtD+nRuNurECeB4nqbE1XSJ2XFxOXoSwYSgnxf12NnsHKlaDurHj6WZHhlOw66vM4/v7zEz
7/m7J7mTycyvLboIbLPLMx3XIBzG96jVKX4by/WP2orKxq9+/XWBksR4BlJVn7/BVtJBNn0y6B8L
UE8N8lZPnUB/pPAA4vP7jm/+o5OsmD3iZR7l3CmL/tNMy2GFVwJpbRmvgvSgvdhCbdMuvA5C60+q
rXo0to6cFWrM1DteVVJs0q+hiTo20HURl8KUPiblcvtw2fNHNhnXlw4N4GfzAUJ2Ir46MRxqrYvL
2y6ro+G5uZwoijYXkqtri24vB0HVtV+V/y0WEnarbm6obfTLBdgG4IhgVdnU2PdGPV5iUFN4RhpF
TVlp4dDMKkubMMB1lsHs86J3XugwwTDQXUzj6h9aKaqwUFVUjB4CZ6Cc6q7lj4o/4z0tj9z6M0Ei
d4d0fiutlkpgb1sLGdBph71ErI8vsbM82kMaW6WbPWIdSisH6tpX+JuY0yGncxZqrpGOGfDR4/pT
PbMzthcBWFUMJIwkHU6+DSrp3ERKSqGYUguRY2B3j2yHbRv6ukeT8YsXfVcK2TDckBOOMFOGyfs6
wizSP4v2MX5QB9KYnkR0ybxXPUlBoR7Hl+S2fZ31Up2Ph0oM+IVNU+dM69X7638lwZY6W6T2lwH1
9FXTvY/mvrDhlkyqbTAuqDOWiEboe38Yz/GuQBcUUW+TfobdnRMu++RFZqiv3e6LJE5RppYGXTfN
mpFVNC/o1EP5RlRP8o3pVyK2kuVDmohEvVOSbjS8+/ZK7bRGEn1lMJ/bUxfTEHXrIT+UjFE2LgWN
DRg67xMMiNRhzdhl2aFvU/fogZYdVEfHKygvMwMbVXKs3QuHeksjm4hEkeggQvfajmyqWKj7iFZ4
Hh1o7ce7fKNSNZM1aYBjzN+ONH2cK6vHSTqWRI2Qcjqn0iSGx1JS1Dm/W/INaenRvPREb7zHG3/e
sDvu6kZ3tohmTQfgykPSYbTj/QvRF61fEPxReQ7phZiUV0CkcJr6GW+LeGczO/ukHzw/6BFv4xjt
VFlK73opCOpJmJeBFFSVVizn8h5vHJSM0zExtxPW7VYXT3lyge+eBIvYv7AOiQRe/8nEQrcmFuIr
vQ4GCfQi5wXE8CS47ZC8PIZEiriUBlK/j0MJ5+V3t5iwKArAlYwNvHRCqRl+cdv1QbBd6Cazn/03
YG4huTLTJgYH3U0afbmpE4lzYbsW2UadGCynEdT5ucA7E/USo5U9ktKXzOkMXEOoA1a6/yBBhEpe
+DVW16vMHWuzP3uXA709vppX7gus5PMywZf4VGTBMw4CcHsS9rDSIElBvanTB4qU1BG7ww0E3Z0Y
fKMOkG4EETK4Yg6Eag7AR5isdxSgj1dJMM+IiBzfkKR7MsBPIplanwYPni1o+4DotD6wrWg0rnDm
Xx7RiV9cVgf3O1R9UFvo+5CKoeqqvQHQjLeXJl0OgD7cdhmHEcsg0zADGPWzzaSrc2Al8rQQqzSI
V6brYd3573m8M0OYR4++y1PzjUCpit6NBgsZ8QrK3STUa/hO0tC1JG5F+OskIN6lw17R99//l0qL
4jQH+VF9BgS++M8XL5zsL9tEWvYGqdL+Ll35INAdCFYj+12aXft2m5nsv1n4cs6+d1iERobzhQwB
w8Uc8bycjdYlcV4RTIQtCQUY2XO5Pt8QaagwjwNIRX04duoyQHQvDkujgRHedAD9RZoDJCCYYSJO
2NTNacMgSArpkgvg6ky4M1vUXZIHZol95vW0zhn3iKTzz9EmipG4z6DBtQGScrwD4qyMNd7ZELCl
c9UnAMY72NkJQNN8dUz2f3HlV6koTs6A+xkU3BfDYpsuVPcK+bErGoRslay3ISjhVPsWfLUQL3uJ
3vtK7gtcoX6j2YYA+vtT9zKHfSsVvGmgX4I1MYt13ZrSvOXTFWO6PPa9o7Oy8mqaGZqKCCt+Q5/n
pY4Y4w/HMrSp6h6YO9E1e29e3/0BQzTko0L2rlGpy+s3h7oR+RXG1gsnaXIIN07NNCi8poIL2DVr
wbQUs3tcfo8jKpaqQyeINIVwOk61B06I6Lahfmc7ekdQhEZqV6CAIp4kK4XD1ruGYLyAWjfLwGU2
POR092YZ1A22/hpwBQS54W2my3N7x3Unsmpp0iO0cWI2vRiu5c7CU6yfBU+h1lygW+CdxI5s76Zi
gJlMwx+4XE4/fXgztSQaykfv6Cr6zT8LgEkN3lylwKxvoJb2+t64YusdaEHNTeamd+QK3SSyJfBH
5xydUXHsom4L4HjiqpERP2lQzsExHrmRbDXq+tS/J0A++4rXBw1lVMr8ewZLX01V/+fkq0z+RWhj
v95TzzCGLxmf8kbgsVK6Doi12oragasV8mG10i+8dxkwcQcm/A9nRa43
""")
##file activate.sh
ACTIVATE_SH = convert("""
eJytVVFvokAQfudXTLEPtTlLeo9tvMSmJpq02hSvl7u2wRUG2QR2DSxSe7n/frOACEVNLlceRHa+
nfl25pvZDswCnoDPQ4QoTRQsENIEPci4CsBMZBq7CAsuLOYqvmYKTTj3YxnBgiXBudGBjUzBZUJI
BXEqgCvweIyuCjeG4eF2F5x14bcB9KQiQQWrjSddI1/oQIx6SYYeoFjzWIoIhYI1izlbhJjkKO7D
M/QEmKfO9O7WeRo/zr4P7pyHwWxkwitcgwpQ5Ej96OX+PmiFwLeVjFUOrNYKaq1Nud3nR2n8nI2m
k9H0friPTGVsUdptaxGrTEfpNVFEskxpXtUkkCkl1UNF9cgLBkx48J4EXyALuBtAwNYIjF5kcmUU
abMKmMq1ULoiRbgsDEkTSsKSGFCJ6Z8vY/2xYiSacmtyAfCDdCNTVZoVF8vSTQOoEwSnOrngBkws
MYGMBMg8/bMBLSYKS7pYEXP0PqT+ZmBT0Xuy+Pplj5yn4aM9nk72JD8/Wi+Gr98sD9eWSMOwkapD
BbUv91XSvmyVkICt2tmXR4tWmrcUCsjWOpw87YidEC8i0gdTSOFhouJUNxR+4NYBG0MftoCTD9F7
2rTtxG3oPwY1b2HncYwhrlmj6Wq924xtGDWqfdNxap+OYxplEurnMVo9RWks+rH8qKEtx7kZT5zJ
4H7oOFclrN6uFe+d+nW2aIUsSgs/42EIPuOhXq+jEo3S6tX6w2ilNkDnIpHCWdEQhFgwj9pkk7FN
l/y5eQvRSIQ5+TrL05lewxWpt/Lbhes5cJF3mLET1MGhcKCF+40tNWnUulxrpojwDo2sObdje3Bz
N3QeHqf3D7OjEXMVV8LN3ZlvuzoWHqiUcNKHtwNd0IbvPGKYYM31nPKCgkUILw3KL+Y8l7aO1ArS
Ad37nIU0fCj5NE5gQCuC5sOSu+UdI2NeXg/lFkQIlFpdWVaWZRfvqGiirC9o6liJ9FXGYrSY9mI1
D/Ncozgn13vJvsznr7DnkJWXsyMH7e42ljdJ+aqNDF1bFnKWFLdj31xtaJYK6EXFgqmV/ymD/ROG
+n8O9H8f5vsGOWXsL1+1k3g=
""")
##file activate.fish
ACTIVATE_FISH = convert("""
eJyVVWFv2jAQ/c6vuBoqQVWC9nVSNVGVCaS2VC2rNLWVZZILWAs2s52wVvvxsyEJDrjbmgpK7PP5
3bt3d22YLbmGlGcIq1wbmCPkGhPYcLMEEsGciwGLDS+YwSjlekngLFVyBe73GXSXxqw/DwbuTS8x
yyKpFr1WG15lDjETQhpQuQBuIOEKY5O9tlppLqxHKSDByjVAPwEy+mXtCq5MzjIUBTCRgEKTKwFG
gpBqxTLYXgN2myspVigMaYF92tZSowGZJf4mFExxNs9Qb614CgZtmH0BpEOn11f0cXI/+za8pnfD
2ZjA1sg9zlV/8QvcMhxbNu0QwgYokn/d+n02nt6Opzcjcnx1vXcIoN74O4ymWQXmHURfJw9jenc/
vbmb0enj6P5+cuVhqlKm3S0u2XRtRbA2QQAhV7VhBF0rsgUX9Ur1rBUXJgVSy8O751k8mzY5OrKH
RW3eaQhYGTr8hrXO59ALhxQ83mCsDLAid3T72CCSdJhaFE+fXgicXAARUiR2WeVO37gH3oYHzFKo
9k7CaPZ1UeNwH1tWuXA4uFKYYcEa8vaKqXl7q1UpygMPhFLvlVKyNzsSM3S2km7UBOl4xweUXk5u
6e3wZmQ9leY1XE/Ili670tr9g/5POBBpGIJXCCF79L1siarl/dbESa8mD8PL61GpzqpzuMS7tqeB
1YkALrRBloBMbR9yLcVx7frQAgUqR7NZIuzkEu110gbNit1enNs82Rx5utq7Z3prU78HFRgulqNC
OTwbqJa9vkJFclQgZSjbKeBgSsUtCtt9D8OwAbIVJuewQdfvQRaoFE9wd1TmCuRG7OgJ1bVXGHc7
z5WDL/WW36v2oi37CyVBak61+yPBA9C1qqGxzKQqZ0oPuocU9hpud0PIp8sDHkXR1HKkNlzjuUWA
a0enFUyzOWZA4yXGP+ZMI3Tdt2OuqU/SO4q64526cPE0A7ZyW2PMbWZiZ5HamIZ2RcCKLXhcDl2b
vXL+eccQoRzem80mekPDEiyiWK4GWqZmwxQOmPM0eIfgp1P9cqrBsewR2p/DPMtt+pfcYM+Ls2uh
hALufTAdmGl8B1H3VPd2af8fQAc4PgqjlIBL9cGQqNpXaAwe3LrtVn8AkZTUxg==
""")
##file activate.csh
ACTIVATE_CSH = convert("""
eJx9VG1P2zAQ/u5fcYQKNgTNPtN1WxlIQ4KCUEGaxuQ6yYVYSuzKdhqVX7+zk3bpy5YPUXL3PPfc
ne98DLNCWshliVDV1kGCUFvMoJGugMjq2qQIiVSxSJ1cCofD1BYRnOVGV0CfZ0N2DD91DalQSjsw
tQLpIJMGU1euvPe7QeJlkKzgWixlhnAt4aoUVsLnLBiy5NtbJWQ5THX1ZciYKKWwkOFaE04dUm6D
r/zh7pq/3D7Nnid3/HEy+wFHY/gEJydg0aFaQrBFgz1c5DG1IhTs+UZgsBC2GMFBlaeH+8dZXwcW
VPvCjXdlAvCfQsE7al0+07XjZvrSCUevR5dnkVeKlFYZmUztG4BdzL2u9KyLVabTU0bdfg7a0hgs
cSmUg6UwUiQl2iHrcbcVGNvPCiLOe7+cRwG13z9qRGgx2z6DHjfm/Op2yqeT+xvOLzs0PTKHDz2V
tkckFHoQfQRXoGJAj9el0FyJCmEMhzgMS4sB7KPOE2ExoLcSieYwDvR+cP8cg11gKkVJc2wRcm1g
QhYFlXiTaTfO2ki0fQoiFM4tLuO4aZrhOzqR4dIPcWx17hphMBY+Srwh7RTyN83XOWkcSPh1Pg/k
TXX/jbJTbMtUmcxZ+/bbqOsy82suFQg/BhdSOTRhMNBHlUarCpU7JzBhmkKmRejKOQzayQe6MWoa
n1wqWmuh6LZAaHxcdeqIlVLhIBJdO9/kbl0It2oEXQj+eGjJOuvOIR/YGRqvFhttUB2XTvLXYN2H
37CBdbW2W7j2r2+VsCn0doVWcFG1/4y1VwBjfwAyoZhD
""")
##file activate.bat
ACTIVATE_BAT = convert("""
eJx9UdEKgjAUfW6wfxjiIH+hEDKUFHSKLCMI7kNOEkIf9P9pTJ3OLJ/03HPPPed4Es9XS9qqwqgT
PbGKKOdXL4aAFS7A4gvAwgijuiKlqOpGlATS2NeMLE+TjJM9RkQ+SmqAXLrBo1LLIeLdiWlD6jZt
r7VNubWkndkXaxg5GO3UaOOKS6drO3luDDiO5my3iA0YAKGzPRV1ack8cOdhysI0CYzIPzjSiH5X
0QcvC8Lfaj0emsVKYF2rhL5L3fCkVjV76kShi59NHwDniAHzkgDgqBcwOgTMx+gDQQqXCw==
""")
##file deactivate.bat
DEACTIVATE_BAT = convert("""
eJxzSE3OyFfIT0vj4spMU0hJTcvMS01RiPf3cYkP8wwKCXX0iQ8I8vcNCFHQ4FIAguLUEgUliIit
KhZlqkpcnCA1WKRsuTTxWBIZ4uHv5+Hv64piEVwU3TK4BNBCmHIcKvDb6xjigWIjkI9uF1AIu7dA
akGGW7n6uXABALCXXUI=
""")
##file activate.ps1
ACTIVATE_PS = convert("""
eJylWdmS40Z2fVeE/oHT6rCloNUEAXDThB6wAyQAEjsB29GBjdgXYiWgmC/zgz/Jv+AEWNVd3S2N
xuOKYEUxM+/Jmzfvcm7W//zXf/+wUMOoXtyi1F9kbd0sHH/hFc2iLtrK9b3FrSqyxaVQwr8uhqJd
uHaeg9mqzRdR8/13Pyy8qPLdJh0+LMhi0QCoXxYfFh9WtttEnd34H8p6/f1300KauwrULws39e18
0ZaLNm9rgN/ZVf3h++/e124Vlc0vKsspHy+Yyi5+XbzPhijvCtduoiL/kA1ukWV27n0o7Sb8LIFj
CvWR5GQgUJdp1Pw8TS9+rPy6SDv/+e3d+0+4qw8f3v20+PliV37efEYBAB9FTKC+RHn/Cfxn3rdv
00Fube5O+iyCtHDs9BfPfz3q4sfFv9d91Ljhfy7ei0VO+nVTtdOkv/jpt0l2AX6iG1jXgKnnDuD4
ke2k/i8fzzz5UedkVcP4pwF+Wvz2FJl+3vt598urXf5Y6LNA5WcFOP7r0sW7b9a+W/xcu0Xpv5zk
Kfq3P9Dz9di/fCxS72MXVU1rpx9L4Bxl85Wmn5a+zP76Zuh3pL9ROWr87PN+//GHIl+oOtvn9XSU
qH+p0gQBFnx1uV+JLH5O5zv+PXW+WepXVVHZT0+oQezkIATcIm+ivPV/z5J/+cYj3ir4w0Lx09vC
e5n/y5/Y5LPPfdrqb88ga/PabxZRVfmp39l588m/6u+/e+OpP+dF7n1WZpJ9//Z4v372fDDz9eHB
7Juvs/BLMHzrxL9+9twXpJfhd1/DrpQ5Euu/vlss3wp9HXC/54C/Ld69m6zwdx3tC0d8daSv0V8B
n4b9YYF53sJelJV/ix6LZspw/sJtqyl5LJ5r/23htA1Imfm/gt9R7dqVB1LjhydAX4Gb+zksQF59
9+P7H//U+376afFuvh2/T6P85Xr/5c8C6OXyFY4BGuN+EE0+GeR201b+wkkLN5mmBY5TfMw8ngqL
CztXxCSXKMCYrRIElWkEJlEPYsSOeKBVZCAQTKBhApMwRFQzmCThE0YQu2CdEhgjbgmk9GluHpfR
/hhwJCZhGI5jt5FsAkOrObVyE6g2y1snyhMGFlDY1x+BoHpCMulTj5JYWNAYJmnKpvLxXgmQ8az1
4fUGxxcitMbbhDFcsiAItg04E+OSBIHTUYD1HI4FHH4kMREPknuYRMyhh3AARWMkfhCketqD1CWJ
mTCo/nhUScoQcInB1hpFhIKoIXLo5jLpwFCgsnLCx1QlEMlz/iFEGqzH3vWYcpRcThgWnEKm0QcS
rA8ek2a2IYYeowUanOZOlrbWSJUC4c7y2EMI3uJPMnMF/SSXdk6E495VLhzkWHps0rOhKwqk+xBI
DhJirhdUCTamMfXz2Hy303hM4DFJ8QL21BcPBULR+gcdYxoeiDqOFSqpi5B5PUISfGg46gFZBPo4
jdh8lueaWuVSMTURfbAUnLINr/QYuuYoMQV6l1aWxuZVTjlaLC14UzqZ+ziTGDzJzhiYoPLrt3uI
tXkVR47kAo09lo5BD76CH51cTt1snVpMOttLhY93yxChCQPI4OBecS7++h4p4Bdn4H97bJongtPk
s9gQnXku1vzsjjmX4/o4YUDkXkjHwDg5FXozU0fW4y5kyeYW0uJWlh536BKr0kMGjtzTkng6Ep62
uTWnQtiIqKnEsx7e1hLtzlXs7Upw9TwEnp0t9yzCGgUJIZConx9OHJArLkRYW0dW42G9OeR5Nzwk
yk1mX7du5RGHT7dka7N3AznmSif7y6tuKe2N1Al/1TUPRqH6E2GLVc27h9IptMLkCKQYRqPQJgzV
2m6WLsSipS3v3b1/WmXEYY1meLEVIU/arOGVkyie7ZsH05ZKpjFW4cpY0YkjySpSExNG2TS8nnJx
nrQmWh2WY3cP1eISP9wbaVK35ZXc60yC3VN/j9n7UFoK6zvjSTE2+Pvz6Mx322rnftfP8Y0XKIdv
Qd7AfK0nexBTMqRiErvCMa3Hegpfjdh58glW2oNMsKeAX8x6YJLZs9K8/ozjJkWL+JmECMvhQ54x
9rsTHwcoGrDi6Y4I+H7yY4/rJVPAbYymUH7C2D3uiUS3KQ1nrCAUkE1dJMneDQIJMQQx5SONxoEO
OEn1/Ig1eBBUeEDRuOT2WGGGE4bNypBLFh2PeIg3bEbg44PHiqNDbGIQm50LW6MJU62JHCGBrmc9
2F7WBJrrj1ssnTAK4sxwRgh5LLblhwNAclv3Gd+jC/etCfyfR8TMhcWQz8TBIbG8IIyAQ81w2n/C
mHWAwRzxd3WoBY7BZnsqGOWrOCKwGkMMNfO0Kci/joZgEocLjNnzgcmdehPHJY0FudXgsr+v44TB
I3jnMGnsK5veAhgi9iXGifkHMOC09Rh9cAw9sQ0asl6wKMk8mpzFYaaDSgG4F0wisQDDBRpjCINg
FIxhlhQ31xdSkkk6odXZFpTYOQpOOgw9ugM2cDQ+2MYa7JsEirGBrOuxsQy5nPMRdYjsTJ/j1iNw
FeSt1jY2+dd5yx1/pzZMOQXUIDcXeAzR7QlDRM8AMkUldXOmGmvYXPABjxqkYKO7VAY6JRU7kpXr
+Epu2BU3qFFXClFi27784LrDZsJwbNlDw0JzhZ6M0SMXE4iBHehCpHVkrQhpTFn2dsvsZYkiPEEB
GSEAwdiur9LS1U6P2U9JhGp4hnFpJo4FfkdJHcwV6Q5dV1Q9uNeeu7rV8PAjwdFg9RLtroifOr0k
uOiRTo/obNPhQIf42Fr4mtThWoSjitEdAmFW66UCe8WFjPk1YVNpL9srFbond7jrLg8tqAasIMpy
zkH0SY/6zVAwJrEc14zt14YRXdY+fcJ4qOd2XKB0/Kghw1ovd11t2o+zjt+txndo1ZDZ2T+uMVHT
VSXhedBAHoJIID9xm6wPQI3cXY+HR7vxtrJuCKh6kbXaW5KkVeJsdsjqsYsOwYSh0w5sMbu7LF8J
5T7U6LJdiTx+ca7RKlulGgS5Z1JSU2Llt32cHFipkaurtBrvNX5UtvNZjkufZ/r1/XyLl6yOpytL
Km8Fn+y4wkhlqZP5db0rooqy7xdL4wxzFVTX+6HaxuQJK5E5B1neSSovZ9ALB8091dDbbjVxhWNY
Ve5hn1VnI9OF0wpvaRm7SZuC1IRczwC7GnkhPt3muHV1YxUJfo+uh1sYnJy+vI0ZwuPV2uqWJYUH
bmBsi1zmFSxHrqwA+WIzLrHkwW4r+bad7xbOzJCnKIa3S3YvrzEBK1Dc0emzJW+SqysQfdEDorQG
9ZJlbQzEHQV8naPaF440YXzJk/7vHGK2xwuP+Gc5xITxyiP+WQ4x18oXHjFzCBy9kir1EFTAm0Zq
LYwS8MpiGhtfxiBRDXpxDWxk9g9Q2fzPPAhS6VFDAc/aiNGatUkPtZIStZFQ1qD0IlJa/5ZPAi5J
ySp1ETDomZMnvgiysZSBfMikrSDte/K5lqV6iwC5q7YN9I1dBZXUytDJNqU74MJsUyNNLAPopWK3
tzmLkCiDyl7WQnj9sm7Kd5kzgpoccdNeMw/6zPVB3pUwMgi4C7hj4AMFAf4G27oXH8NNT9zll/sK
S6wVlQwazjxWKWy20ZzXb9ne8ngGalPBWSUSj9xkc1drsXkZ8oOyvYT3e0rnYsGwx85xZB9wKeKg
cJKZnamYwiaMymZvzk6wtDUkxmdUg0mPad0YHtvzpjEfp2iMxvORhnx0kCVLf5Qa43WJsVoyfEyI
pzmf8ruM6xBr7dnBgzyxpqXuUPYaKahOaz1LrxNkS/Q3Ae5AC+xl6NbxAqXXlzghZBZHmOrM6Y6Y
ctAkltwlF7SKEsShjVh7QHuxMU0a08/eiu3x3M+07OijMcKFFltByXrpk8w+JNnZpnp3CfgjV1Ax
gUYCnWwYow42I5wHCcTzLXK0hMZN2DrPM/zCSqe9jRSlJnr70BPE4+zrwbk/xVIDHy2FAQyHoomT
Tt5jiM68nBQut35Y0qLclLiQrutxt/c0OlSqXAC8VrxW97lGoRWzhOnifE2zbF05W4xuyhg7JTUL
aqJ7SWDywhjlal0b+NLTpERBgnPW0+Nw99X2Ws72gOL27iER9jgzj7Uu09JaZ3n+hmCjjvZpjNst
vOWWTbuLrg+/1ltX8WpPauEDEvcunIgTxuMEHweWKCx2KQ9DU/UKdO/3za4Szm2iHYL+ss9AAttm
gZHq2pkUXFbV+FiJCKrpBms18zH75vax5jSo7FNunrVWY3Chvd8KKnHdaTt/6ealwaA1x17yTlft
8VBle3nAE+7R0MScC3MJofNCCkA9PGKBgGMYEwfB2QO5j8zUqa8F/EkWKCzGQJ5EZ05HTly1B01E
z813G5BY++RZ2sxbQS8ZveGPJNabp5kXAeoign6Tlt5+L8i5ZquY9+S+KEUHkmYMRFBxRrHnbl2X
rVemKnG+oB1yd9+zT+4c43jQ0wWmQRR6mTCkY1q3VG05Y120ZzKOMBe6Vy7I5Vz4ygPB3yY4G0FP
8RxiMx985YJPXsgRU58EuHj75gygTzejP+W/zKGe78UQN3yOJ1aMQV9hFH+GAfLRsza84WlPLAI/
9G/5JdcHftEfH+Y3/fHUG7/o8bv98dzzy3e8S+XCvgqB+VUf7sH0yDHpONdbRE8tAg9NWOzcTJ7q
TuAxe/AJ07c1Rs9okJvl1/0G60qvbdDzz5zO0FuPFQIHNp9y9Bd1CufYVx7dB26mAxwa8GMNrN/U
oGbNZ3EQ7inLzHy5tRg9AXJrN8cB59cCUBeCiVO7zKM0jU0MamhnRThkg/NMmBOGb6StNeD9tDfA
7czsAWopDdnGoXUHtA+s/k0vNPkBcxEI13jVd/axp85va3LpwGggXXWw12Gwr/JGAH0b8CPboiZd
QO1l0mk/UHukud4C+w5uRoNzpCmoW6GbgbMyaQNkga2pQINB18lOXOCJzSWPFOhZcwzdgrsQnne7
nvjBi+7cP2BbtBeDOW5uOLGf3z94FasKIguOqJl+8ss/6Kumns4cuWbqq5592TN/RNIbn5Qo6qbi
O4F0P9txxPAwagqPlftztO8cWBzdN/jz3b7GD6JHYP/Zp4ToAMaA74M+EGSft3hEGMuf8EwjnTk/
nz/P7SLipB/ogQ6xNX0fDqNncMCfHqGLCMM0ZzFa+6lPJYQ5p81vW4HkCvidYf6kb+P/oB965g8K
C6uR0rdjX1DNKc5pOSTquI8uQ6KXxYaKBn+30/09tK4kMpJPgUIQkbENEPbuezNPPje2Um83SgyX
GTCJb6MnGVIpgncdQg1qz2bvPfxYD9fewCXDomx9S+HQJuX6W3VAL+v5WZMudRQZk9ZdOk6GIUtC
PqEb/uwSIrtR7/edzqgEdtpEwq7p2J5OQV+RLrmtTvFwFpf03M/VrRyTZ73qVod7v7Jh2Dwe5J25
JqFOU2qEu1sP+CRotklediycKfLjeIZzjJQsvKmiGSNQhxuJpKa+hoWUizaE1PuIRGzJqropwgVB
oo1hr870MZLgnXF5ZIpr6mF0L8aSy2gVnTAuoB4WEd4d5NPVC9TMotYXERKlTcwQ2KiB/C48AEfH
Qbyq4CN8xTFnTvf/ebOc3isnjD95s0QF0nx9s+y+zMmz782xL0SgEmRpA3x1w1Ff9/74xcxKEPdS
IEFTz6GgU0+BK/UZ5Gwbl4gZwycxEw+Kqa5QmMkh4OzgzEVPnDAiAOGBFaBW4wkDmj1G4RyElKgj
NlLCq8zsp085MNh/+R4t1Q8yxoSv8PUpTt7izZwf2BTHZZ3pIZpUIpuLkL1nNL6sYcHqcKm237wp
T2+RCjgXweXd2Zp7ZM8W6dG5bZsqo0nrJBTx8EC0+CQQdzEGnabTnkzofu1pYkWl4E7XSniECdxy
vLYavPMcL9LW5SToJFNnos+uqweOHriUZ1ntIYZUonc7ltEQ6oTRtwOHNwez2sVREskHN+bqG3ua
eaEbJ8XpyO8CeD9QJc8nbLP2C2R3A437ISUNyt5Yd0TbDNcl11/DSsOzdbi/VhCC0KE6v1vqVNkq
45ZnG6fiV2NwzInxCNth3BwL0+8814jE6+1W1EeWtpWbSZJOJNYXmWRXa7vLnAljE692eHjZ4y5u
y1u63De0IzKca7As48Z3XshVF+3XiLNz0JIMh/JOpbiNLlMi672uO0wYzOCZjRxcxj3D+gVenGIE
MvFUGGXuRps2RzMcgWIRolHXpGUP6sMsQt1hspUBnVKUn/WQj2u6j3SXd9Xz0QtEzoM7qTu5y7gR
q9gNNsrlEMLdikBt9bFvBnfbUIh6voTw7eDsyTmPKUvF0bHqWLbHe3VRHyRZnNeSGKsB73q66Vsk
taxWYmwz1tYVFG/vOQhlM0gUkyvIab3nv2caJ1udU1F3pDMty7stubTE4OJqm0i0ECfrJIkLtraC
HwRWKzlqpfhEIqYH09eT9WrOhQyt8YEoyBlnXtAT37WHIQ03TIuEHbnRxZDdLun0iok9PUC79prU
m5beZzfQUelEXnhzb/pIROKx3F7qCttYIFGh5dXNzFzID7u8vKykA8Uejf7XXz//S4nKvW//ofS/
QastYw==
""")
##file distutils-init.py
DISTUTILS_INIT = convert("""
eJytV1uL4zYUfvevOE0ottuMW9q3gVDa3aUMXXbLMlDKMBiNrSTqOJKRlMxkf33PkXyRbGe7Dw2E
UXTu37lpxLFV2oIyifAncxmOL0xLIfcG+gv80x9VW6maw7o/CANSWWBwFtqeWMPlGY6qPjV8A0bB
C4eKSTgZ5LRgFeyErMEeOBhbN+Ipgeizhjtnhkn7DdyjuNLPoCS0l/ayQTG0djwZC08cLXozeMss
aG5EzQ0IScpnWtHSTXuxByV/QCmxE7y+eS0uxWeoheaVVfqSJHiU7Mhhi6gULbOHorshkrEnKxpT
0n3A8Y8SMpuwZx6aoix3ouFlmW8gHRSkeSJ2g7hU+kiHLDaQw3bmRDaTGfTnty7gPm0FHbIBg9U9
oh1kZzAFLaue2R6htPCtAda2nGlDSUJ4PZBgCJBGVcwKTAMz/vJiLD+Oin5Z5QlvDPdulC6EsiyE
NFzb7McNTKJzbJqzphx92VKRFY1idenzmq3K0emRcbWBD0ryqc4NZGmKOOOX9Pz5x+/l27tP797c
f/z0d+4NruGNai8uAM0bfsYaw8itFk8ny41jsfpyO+BWlpqfhcG4yxLdi/0tQqoT4a8Vby382mt8
p7XSo7aWGdPBc+b6utaBmCQ7rQKQoWtAuthQCiold2KfJIPTT8xwg9blPumc+YDZC/wYGdAyHpJk
vUbHbHWAp5No6pK/WhhLEWrFjUwtPEv1Agf8YmnsuXUQYkeZoHm8ogP16gt2uHoxcEMdf2C6pmbw
hUMsWGhanboh4IzzmsIpWs134jVPqD/c74bZHdY69UKKSn/+KfVhxLgUlToemayLMYQOqfEC61bh
cbhwaqoGUzIyZRFHPmau5juaWqwRn3mpWmoEA5nhzS5gog/5jbcFQqOZvmBasZtwYlG93k5GEiyw
buHhMWLjDarEGpMGB2LFs5nIJkhp/nUmZneFaRth++lieJtHepIvKgx6PJqIlD9X2j6pG1i9x3pZ
5bHuCPFiirGHeO7McvoXkz786GaKVzC9DSpnOxJdc4xm6NSVq7lNEnKdVlnpu9BNYoKX2Iq3wvgh
gGEUM66kK6j4NiyoneuPLSwaCWDxczgaolEWpiMyDVDb7dNuLAbriL8ig8mmeju31oNvQdpnvEPC
1vAXbWacGRVrGt/uXN/gU0CDDwgooKRrHfTBb1/s9lYZ8ZqOBU0yLvpuP6+K9hLFsvIjeNhBi0KL
MlOuWRn3FRwx5oHXjl0YImUx0+gLzjGchrgzca026ETmYJzPD+IpuKzNi8AFn048Thd63OdD86M6
84zE8yQm0VqXdbbgvub2pKVnS76icBGdeTHHXTKspUmr4NYo/furFLKiMdQzFjHJNcdAnMhltBJK
0/IKX3DVFqvPJ2dLE7bDBkH0l/PJ29074+F0CsGYOxsb7U3myTUncYfXqnLLfa6sJybX4g+hmcjO
kMRBfA1JellfRRKJcyRpxdS4rIl6FdmQCWjo/o9Qz7yKffoP4JHjOvABcRn4CZIT2RH4jnxmfpVG
qgLaAvQBNfuO6X0/Ux02nb4FKx3vgP+XnkX0QW9pLy/NsXgdN24dD3LxO2Nwil7Zlc1dqtP3d7/h
kzp1/+7hGBuY4pk0XD/0Ao/oTe/XGrfyM773aB7iUhgkpy+dwAMalxMP0DrBcsVw/6p25+/hobP9
GBknrWExDhLJ1bwt1NcCNblaFbMKCyvmX0PeRaQ=
""")
##file distutils.cfg
DISTUTILS_CFG = convert("""
eJxNj00KwkAMhfc9xYNuxe4Ft57AjYiUtDO1wXSmNJnK3N5pdSEEAu8nH6lxHVlRhtDHMPATA4uH
xJ4EFmGbvfJiicSHFRzUSISMY6hq3GLCRLnIvSTnEefN0FIjw5tF0Hkk9Q5dRunBsVoyFi24aaLg
9FDOlL0FPGluf4QjcInLlxd6f6rqkgPu/5nHLg0cXCscXoozRrP51DRT3j9QNl99AP53T2Q=
""")
##file activate_this.py
ACTIVATE_THIS = convert("""
eJyNU01v2zAMvetXEB4K21jmDOstQA4dMGCHbeihlyEIDMWmG62yJEiKE//7kXKdpN2KzYBt8euR
fKSyLPs8wiEo8wh4wqZTGou4V6Hm0wJa1cSiTkJdr8+GsoTRHuCotBayiWqQEYGtMCgfD1KjGYBe
5a3p0cRKiAe2NtLADikftnDco0ko/SFEVgEZ8aRC5GLux7i3BpSJ6J1H+i7A2CjiHq9z7JRZuuQq
siwTIvpxJYCeuWaBpwZdhB+yxy/eWz+ZvVSU8C4E9FFZkyxFsvCT/ZzL8gcz9aXVE14Yyp2M+2W0
y7n5mp0qN+avKXvbsyyzUqjeWR8hjGE+2iCE1W1tQ82hsCZN9UzlJr+/e/iab8WfqsmPI6pWeUPd
FrMsd4H/55poeO9n54COhUs+sZNEzNtg/wanpjpuqHJaxs76HtZryI/K3H7KJ/KDIhqcbJ7kI4ar
XL+sMgXnX0D+Te2Iy5xdP8yueSlQB/x/ED2BTAtyE3K4SYUN6AMNfbO63f4lBW3bUJPbTL+mjSxS
PyRfJkZRgj+VbFv+EzHFi5pKwUEepa4JslMnwkowSRCXI+m5XvEOvtuBrxHdhLalG0JofYBok6qj
YdN2dEngUlbC4PG60M1WEN0piu7Nq7on0mgyyUw3iV1etLo6r/81biWdQ9MWHFaePWZYaq+nmp+t
s3az+sj7eA0jfgPfeoN1
""")
MH_MAGIC = 0xfeedface
MH_CIGAM = 0xcefaedfe
MH_MAGIC_64 = 0xfeedfacf
MH_CIGAM_64 = 0xcffaedfe
FAT_MAGIC = 0xcafebabe
BIG_ENDIAN = '>'
LITTLE_ENDIAN = '<'
LC_LOAD_DYLIB = 0xc
maxint = majver == 3 and getattr(sys, 'maxsize') or getattr(sys, 'maxint')
class fileview(object):
"""
A proxy for file-like objects that exposes a given view of a file.
Modified from macholib.
"""
def __init__(self, fileobj, start=0, size=maxint):
if isinstance(fileobj, fileview):
self._fileobj = fileobj._fileobj
else:
self._fileobj = fileobj
self._start = start
self._end = start + size
self._pos = 0
def __repr__(self):
return '<fileview [%d, %d] %r>' % (
self._start, self._end, self._fileobj)
def tell(self):
return self._pos
def _checkwindow(self, seekto, op):
if not (self._start <= seekto <= self._end):
raise IOError("%s to offset %d is outside window [%d, %d]" % (
op, seekto, self._start, self._end))
def seek(self, offset, whence=0):
seekto = offset
if whence == os.SEEK_SET:
seekto += self._start
elif whence == os.SEEK_CUR:
seekto += self._start + self._pos
elif whence == os.SEEK_END:
seekto += self._end
else:
raise IOError("Invalid whence argument to seek: %r" % (whence,))
self._checkwindow(seekto, 'seek')
self._fileobj.seek(seekto)
self._pos = seekto - self._start
def write(self, bytes):
here = self._start + self._pos
self._checkwindow(here, 'write')
self._checkwindow(here + len(bytes), 'write')
self._fileobj.seek(here, os.SEEK_SET)
self._fileobj.write(bytes)
self._pos += len(bytes)
def read(self, size=maxint):
assert size >= 0
here = self._start + self._pos
self._checkwindow(here, 'read')
size = min(size, self._end - here)
self._fileobj.seek(here, os.SEEK_SET)
bytes = self._fileobj.read(size)
self._pos += len(bytes)
return bytes
def read_data(file, endian, num=1):
"""
Read a given number of 32-bits unsigned integers from the given file
with the given endianness.
"""
res = struct.unpack(endian + 'L' * num, file.read(num * 4))
if len(res) == 1:
return res[0]
return res
def mach_o_change(path, what, value):
"""
Replace a given name (what) in any LC_LOAD_DYLIB command found in
the given binary with a new name (value), provided it's shorter.
"""
def do_macho(file, bits, endian):
# Read Mach-O header (the magic number is assumed read by the caller)
cputype, cpusubtype, filetype, ncmds, sizeofcmds, flags = read_data(file, endian, 6)
# 64-bits header has one more field.
if bits == 64:
read_data(file, endian)
# The header is followed by ncmds commands
for n in range(ncmds):
where = file.tell()
# Read command header
cmd, cmdsize = read_data(file, endian, 2)
if cmd == LC_LOAD_DYLIB:
# The first data field in LC_LOAD_DYLIB commands is the
# offset of the name, starting from the beginning of the
# command.
name_offset = read_data(file, endian)
file.seek(where + name_offset, os.SEEK_SET)
# Read the NUL terminated string
load = file.read(cmdsize - name_offset).decode()
load = load[:load.index('\0')]
# If the string is what is being replaced, overwrite it.
if load == what:
file.seek(where + name_offset, os.SEEK_SET)
file.write(value.encode() + '\0'.encode())
# Seek to the next command
file.seek(where + cmdsize, os.SEEK_SET)
def do_file(file, offset=0, size=maxint):
file = fileview(file, offset, size)
# Read magic number
magic = read_data(file, BIG_ENDIAN)
if magic == FAT_MAGIC:
# Fat binaries contain nfat_arch Mach-O binaries
nfat_arch = read_data(file, BIG_ENDIAN)
for n in range(nfat_arch):
# Read arch header
cputype, cpusubtype, offset, size, align = read_data(file, BIG_ENDIAN, 5)
do_file(file, offset, size)
elif magic == MH_MAGIC:
do_macho(file, 32, BIG_ENDIAN)
elif magic == MH_CIGAM:
do_macho(file, 32, LITTLE_ENDIAN)
elif magic == MH_MAGIC_64:
do_macho(file, 64, BIG_ENDIAN)
elif magic == MH_CIGAM_64:
do_macho(file, 64, LITTLE_ENDIAN)
assert(len(what) >= len(value))
do_file(open(path, 'r+b'))
if __name__ == '__main__':
main()
## TODO:
## Copy python.exe.manifest
## Monkeypatch distutils.sysconfig
| apache-2.0 |
repotvsupertuga/repo | script.module.requests/lib/requests/packages/chardet/eucjpprober.py | 2919 | 3678 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCJPDistributionAnalysis
from .jpcntx import EUCJPContextAnalysis
from .mbcssm import EUCJPSMModel
class EUCJPProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCJPSMModel)
self._mDistributionAnalyzer = EUCJPDistributionAnalysis()
self._mContextAnalyzer = EUCJPContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return "EUC-JP"
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
# PY3K: aBuf is a byte array, so aBuf[i] is an int, not a byte
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar, charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i - 1:i + 1], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
| gpl-2.0 |
zhelfinstein/ABOUT | todo/models.py | 1 | 1131 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from social_django.models import USER_MODEL
class Item(models.Model):
user = models.ForeignKey(USER_MODEL)
priority = models.PositiveSmallIntegerField()
title = models.CharField(max_length=100)
description = models.TextField(blank=True)
def __str__(self):
return '%s (%s #%d)' % (self.title, self.user, self.priority)
def url(self):
return "item/id=%s" % str(self.pk)
class Restriction(models.Model):
MIN_DUR = "min"
MAX_DUR = "max"
CHOICES = ((MIN_DUR, "Minimum Duration (minutes)"), (MAX_DUR, "Maximum Duration (minutes)"))
item = models.ForeignKey(Item)
rtype = models.CharField(max_length=3, choices=CHOICES)
cvalue = models.CharField(max_length=20, blank=True)
ivalue = models.IntegerField(blank=True)
def __str__(self):
if self.rtype == Restriction.MIN_DUR or self.rtype == Restriction.MAX_DUR:
return '%s of %dmin for %s' % (self.rtype, self.ivalue, self.item)
else:
return '%s (%s)' % (self.rtype, self.item)
| agpl-3.0 |
atsao72/sympy | sympy/utilities/tests/test_lambdify.py | 28 | 19078 | from sympy.utilities.pytest import XFAIL, raises
from sympy import (
symbols, lambdify, sqrt, sin, cos, tan, pi, acos, acosh, Rational,
Float, Matrix, Lambda, Piecewise, exp, Integral, oo, I, Abs, Function,
true, false, And, Or, Not)
from sympy.printing.lambdarepr import LambdaPrinter
import mpmath
from sympy.utilities.lambdify import implemented_function
from sympy.utilities.pytest import skip
from sympy.utilities.decorator import conserve_mpmath_dps
from sympy.external import import_module
import math
import sympy
MutableDenseMatrix = Matrix
numpy = import_module('numpy')
numexpr = import_module('numexpr')
w, x, y, z = symbols('w,x,y,z')
#================== Test different arguments =======================
def test_no_args():
f = lambdify([], 1)
raises(TypeError, lambda: f(-1))
assert f() == 1
def test_single_arg():
f = lambdify(x, 2*x)
assert f(1) == 2
def test_list_args():
f = lambdify([x, y], x + y)
assert f(1, 2) == 3
def test_str_args():
f = lambdify('x,y,z', 'z,y,x')
assert f(3, 2, 1) == (1, 2, 3)
assert f(1.0, 2.0, 3.0) == (3.0, 2.0, 1.0)
# make sure correct number of args required
raises(TypeError, lambda: f(0))
def test_own_namespace():
myfunc = lambda x: 1
f = lambdify(x, sin(x), {"sin": myfunc})
assert f(0.1) == 1
assert f(100) == 1
def test_own_module():
f = lambdify(x, sin(x), math)
assert f(0) == 0.0
def test_bad_args():
# no vargs given
raises(TypeError, lambda: lambdify(1))
# same with vector exprs
raises(TypeError, lambda: lambdify([1, 2]))
def test_atoms():
# Non-Symbol atoms should not be pulled out from the expression namespace
f = lambdify(x, pi + x, {"pi": 3.14})
assert f(0) == 3.14
f = lambdify(x, I + x, {"I": 1j})
assert f(1) == 1 + 1j
#================== Test different modules =========================
# high precision output of sin(0.2*pi) is used to detect if precision is lost unwanted
@conserve_mpmath_dps
def test_sympy_lambda():
mpmath.mp.dps = 50
sin02 = mpmath.mpf("0.19866933079506121545941262711838975037020672954020")
f = lambdify(x, sin(x), "sympy")
assert f(x) == sin(x)
prec = 1e-15
assert -prec < f(Rational(1, 5)).evalf() - Float(str(sin02)) < prec
# arctan is in numpy module and should not be available
raises(NameError, lambda: lambdify(x, arctan(x), "sympy"))
@conserve_mpmath_dps
def test_math_lambda():
mpmath.mp.dps = 50
sin02 = mpmath.mpf("0.19866933079506121545941262711838975037020672954020")
f = lambdify(x, sin(x), "math")
prec = 1e-15
assert -prec < f(0.2) - sin02 < prec
raises(TypeError, lambda: f(x))
# if this succeeds, it can't be a python math function
@conserve_mpmath_dps
def test_mpmath_lambda():
mpmath.mp.dps = 50
sin02 = mpmath.mpf("0.19866933079506121545941262711838975037020672954020")
f = lambdify(x, sin(x), "mpmath")
prec = 1e-49 # mpmath precision is around 50 decimal places
assert -prec < f(mpmath.mpf("0.2")) - sin02 < prec
raises(TypeError, lambda: f(x))
# if this succeeds, it can't be a mpmath function
@conserve_mpmath_dps
@XFAIL
def test_number_precision():
mpmath.mp.dps = 50
sin02 = mpmath.mpf("0.19866933079506121545941262711838975037020672954020")
f = lambdify(x, sin02, "mpmath")
prec = 1e-49 # mpmath precision is around 50 decimal places
assert -prec < f(0) - sin02 < prec
#================== Test Translations ==============================
# We can only check if all translated functions are valid. It has to be checked
# by hand if they are complete.
def test_math_transl():
from sympy.utilities.lambdify import MATH_TRANSLATIONS
for sym, mat in MATH_TRANSLATIONS.items():
assert sym in sympy.__dict__
assert mat in math.__dict__
def test_mpmath_transl():
from sympy.utilities.lambdify import MPMATH_TRANSLATIONS
for sym, mat in MPMATH_TRANSLATIONS.items():
assert sym in sympy.__dict__ or sym == 'Matrix'
assert mat in mpmath.__dict__
def test_numpy_transl():
if not numpy:
skip("numpy not installed.")
from sympy.utilities.lambdify import NUMPY_TRANSLATIONS
for sym, nump in NUMPY_TRANSLATIONS.items():
assert sym in sympy.__dict__
assert nump in numpy.__dict__
def test_numpy_translation_abs():
if not numpy:
skip("numpy not installed.")
f = lambdify(x, Abs(x), "numpy")
assert f(-1) == 1
assert f(1) == 1
def test_numexpr_printer():
if not numexpr:
skip("numexpr not installed.")
# if translation/printing is done incorrectly then evaluating
# a lambdified numexpr expression will throw an exception
from sympy.printing.lambdarepr import NumExprPrinter
from sympy import S
blacklist = ('where', 'complex', 'contains')
arg_tuple = (x, y, z) # some functions take more than one argument
for sym in NumExprPrinter._numexpr_functions.keys():
if sym in blacklist:
continue
ssym = S(sym)
if hasattr(ssym, '_nargs'):
nargs = ssym._nargs[0]
else:
nargs = 1
args = arg_tuple[:nargs]
f = lambdify(args, ssym(*args), modules='numexpr')
assert f(*(1, )*nargs) is not None
def test_issue_9334():
if not numexpr:
skip("numexpr not installed.")
if not numpy:
skip("numpy not installed.")
expr = sympy.S('b*a - sqrt(a**2)')
a, b = sorted(expr.free_symbols, key=lambda s: s.name)
func_numexpr = lambdify((a,b), expr, modules=[numexpr], dummify=False)
foo, bar = numpy.random.random((2, 4))
func_numexpr(foo, bar)
#================== Test some functions ============================
def test_exponentiation():
f = lambdify(x, x**2)
assert f(-1) == 1
assert f(0) == 0
assert f(1) == 1
assert f(-2) == 4
assert f(2) == 4
assert f(2.5) == 6.25
def test_sqrt():
f = lambdify(x, sqrt(x))
assert f(0) == 0.0
assert f(1) == 1.0
assert f(4) == 2.0
assert abs(f(2) - 1.414) < 0.001
assert f(6.25) == 2.5
def test_trig():
f = lambdify([x], [cos(x), sin(x)])
d = f(pi)
prec = 1e-11
assert -prec < d[0] + 1 < prec
assert -prec < d[1] < prec
d = f(3.14159)
prec = 1e-5
assert -prec < d[0] + 1 < prec
assert -prec < d[1] < prec
#================== Test vectors ===================================
def test_vector_simple():
f = lambdify((x, y, z), (z, y, x))
assert f(3, 2, 1) == (1, 2, 3)
assert f(1.0, 2.0, 3.0) == (3.0, 2.0, 1.0)
# make sure correct number of args required
raises(TypeError, lambda: f(0))
def test_vector_discontinuous():
f = lambdify(x, (-1/x, 1/x))
raises(ZeroDivisionError, lambda: f(0))
assert f(1) == (-1.0, 1.0)
assert f(2) == (-0.5, 0.5)
assert f(-2) == (0.5, -0.5)
def test_trig_symbolic():
f = lambdify([x], [cos(x), sin(x)])
d = f(pi)
assert abs(d[0] + 1) < 0.0001
assert abs(d[1] - 0) < 0.0001
def test_trig_float():
f = lambdify([x], [cos(x), sin(x)])
d = f(3.14159)
assert abs(d[0] + 1) < 0.0001
assert abs(d[1] - 0) < 0.0001
def test_docs():
f = lambdify(x, x**2)
assert f(2) == 4
f = lambdify([x, y, z], [z, y, x])
assert f(1, 2, 3) == [3, 2, 1]
f = lambdify(x, sqrt(x))
assert f(4) == 2.0
f = lambdify((x, y), sin(x*y)**2)
assert f(0, 5) == 0
def test_math():
f = lambdify((x, y), sin(x), modules="math")
assert f(0, 5) == 0
def test_sin():
f = lambdify(x, sin(x)**2)
assert isinstance(f(2), float)
f = lambdify(x, sin(x)**2, modules="math")
assert isinstance(f(2), float)
def test_matrix():
A = Matrix([[x, x*y], [sin(z) + 4, x**z]])
sol = Matrix([[1, 2], [sin(3) + 4, 1]])
f = lambdify((x, y, z), A, modules="sympy")
assert f(1, 2, 3) == sol
f = lambdify((x, y, z), (A, [A]), modules="sympy")
assert f(1, 2, 3) == (sol, [sol])
J = Matrix((x, x + y)).jacobian((x, y))
v = Matrix((x, y))
sol = Matrix([[1, 0], [1, 1]])
assert lambdify(v, J, modules='sympy')(1, 2) == sol
assert lambdify(v.T, J, modules='sympy')(1, 2) == sol
def test_numpy_matrix():
if not numpy:
skip("numpy not installed.")
A = Matrix([[x, x*y], [sin(z) + 4, x**z]])
sol_arr = numpy.array([[1, 2], [numpy.sin(3) + 4, 1]])
#Lambdify array first, to ensure return to array as default
f = lambdify((x, y, z), A, ['numpy'])
numpy.testing.assert_allclose(f(1, 2, 3), sol_arr)
#Check that the types are arrays and matrices
assert isinstance(f(1, 2, 3), numpy.ndarray)
def test_numpy_transpose():
if not numpy:
skip("numpy not installed.")
A = Matrix([[1, x], [0, 1]])
f = lambdify((x), A.T, modules="numpy")
numpy.testing.assert_array_equal(f(2), numpy.array([[1, 0], [2, 1]]))
def test_numpy_inverse():
if not numpy:
skip("numpy not installed.")
A = Matrix([[1, x], [0, 1]])
f = lambdify((x), A**-1, modules="numpy")
numpy.testing.assert_array_equal(f(2), numpy.array([[1, -2], [0, 1]]))
def test_numpy_old_matrix():
if not numpy:
skip("numpy not installed.")
A = Matrix([[x, x*y], [sin(z) + 4, x**z]])
sol_arr = numpy.array([[1, 2], [numpy.sin(3) + 4, 1]])
f = lambdify((x, y, z), A, [{'ImmutableMatrix': numpy.matrix}, 'numpy'])
numpy.testing.assert_allclose(f(1, 2, 3), sol_arr)
assert isinstance(f(1, 2, 3), numpy.matrix)
def test_numpy_piecewise():
if not numpy:
skip("numpy not installed.")
pieces = Piecewise((x, x < 3), (x**2, x > 5), (0, True))
f = lambdify(x, pieces, modules="numpy")
numpy.testing.assert_array_equal(f(numpy.arange(10)),
numpy.array([0, 1, 2, 0, 0, 0, 36, 49, 64, 81]))
# If we evaluate somewhere all conditions are False, we should get back NaN
nodef_func = lambdify(x, Piecewise((x, x > 0), (-x, x < 0)))
numpy.testing.assert_array_equal(nodef_func(numpy.array([-1, 0, 1])),
numpy.array([1, numpy.nan, 1]))
def test_numpy_logical_ops():
if not numpy:
skip("numpy not installed.")
and_func = lambdify((x, y), And(x, y), modules="numpy")
or_func = lambdify((x, y), Or(x, y), modules="numpy")
not_func = lambdify((x), Not(x), modules="numpy")
arr1 = numpy.array([True, True])
arr2 = numpy.array([False, True])
numpy.testing.assert_array_equal(and_func(arr1, arr2), numpy.array([False, True]))
numpy.testing.assert_array_equal(or_func(arr1, arr2), numpy.array([True, True]))
numpy.testing.assert_array_equal(not_func(arr2), numpy.array([True, False]))
def test_numpy_matmul():
if not numpy:
skip("numpy not installed.")
xmat = Matrix([[x, y], [z, 1+z]])
ymat = Matrix([[x**2], [Abs(x)]])
mat_func = lambdify((x, y, z), xmat*ymat, modules="numpy")
numpy.testing.assert_array_equal(mat_func(0.5, 3, 4), numpy.array([[1.625], [3.5]]))
numpy.testing.assert_array_equal(mat_func(-0.5, 3, 4), numpy.array([[1.375], [3.5]]))
# Multiple matrices chained together in multiplication
f = lambdify((x, y, z), xmat*xmat*xmat, modules="numpy")
numpy.testing.assert_array_equal(f(0.5, 3, 4), numpy.array([[72.125, 119.25],
[159, 251]]))
def test_numpy_numexpr():
if not numpy:
skip("numpy not installed.")
if not numexpr:
skip("numexpr not installed.")
a, b, c = numpy.random.randn(3, 128, 128)
# ensure that numpy and numexpr return same value for complicated expression
expr = sin(x) + cos(y) + tan(z)**2 + Abs(z-y)*acos(sin(y*z)) + \
Abs(y-z)*acosh(2+exp(y-x))- sqrt(x**2+I*y**2)
npfunc = lambdify((x, y, z), expr, modules='numpy')
nefunc = lambdify((x, y, z), expr, modules='numexpr')
assert numpy.allclose(npfunc(a, b, c), nefunc(a, b, c))
def test_numexpr_userfunctions():
if not numpy:
skip("numpy not installed.")
if not numexpr:
skip("numexpr not installed.")
a, b = numpy.random.randn(2, 10)
uf = type('uf', (Function, ),
{'eval' : classmethod(lambda x, y : y**2+1)})
func = lambdify(x, 1-uf(x), modules='numexpr')
assert numpy.allclose(func(a), -(a**2))
uf = implemented_function(Function('uf'), lambda x, y : 2*x*y+1)
func = lambdify((x, y), uf(x, y), modules='numexpr')
assert numpy.allclose(func(a, b), 2*a*b+1)
def test_integral():
f = Lambda(x, exp(-x**2))
l = lambdify(x, Integral(f(x), (x, -oo, oo)), modules="sympy")
assert l(x) == Integral(exp(-x**2), (x, -oo, oo))
#================== Test symbolic ==================================
def test_sym_single_arg():
f = lambdify(x, x * y)
assert f(z) == z * y
def test_sym_list_args():
f = lambdify([x, y], x + y + z)
assert f(1, 2) == 3 + z
def test_sym_integral():
f = Lambda(x, exp(-x**2))
l = lambdify(x, Integral(f(x), (x, -oo, oo)), modules="sympy")
assert l(y).doit() == sqrt(pi)
def test_namespace_order():
# lambdify had a bug, such that module dictionaries or cached module
# dictionaries would pull earlier namespaces into themselves.
# Because the module dictionaries form the namespace of the
# generated lambda, this meant that the behavior of a previously
# generated lambda function could change as a result of later calls
# to lambdify.
n1 = {'f': lambda x: 'first f'}
n2 = {'f': lambda x: 'second f',
'g': lambda x: 'function g'}
f = sympy.Function('f')
g = sympy.Function('g')
if1 = lambdify(x, f(x), modules=(n1, "sympy"))
assert if1(1) == 'first f'
if2 = lambdify(x, g(x), modules=(n2, "sympy"))
# previously gave 'second f'
assert if1(1) == 'first f'
def test_imps():
# Here we check if the default returned functions are anonymous - in
# the sense that we can have more than one function with the same name
f = implemented_function('f', lambda x: 2*x)
g = implemented_function('f', lambda x: math.sqrt(x))
l1 = lambdify(x, f(x))
l2 = lambdify(x, g(x))
assert str(f(x)) == str(g(x))
assert l1(3) == 6
assert l2(3) == math.sqrt(3)
# check that we can pass in a Function as input
func = sympy.Function('myfunc')
assert not hasattr(func, '_imp_')
my_f = implemented_function(func, lambda x: 2*x)
assert hasattr(func, '_imp_')
# Error for functions with same name and different implementation
f2 = implemented_function("f", lambda x: x + 101)
raises(ValueError, lambda: lambdify(x, f(f2(x))))
def test_imps_wrong_args():
raises(ValueError, lambda: implemented_function(sin, lambda x: x))
def test_lambdify_imps():
# Test lambdify with implemented functions
# first test basic (sympy) lambdify
f = sympy.cos
assert lambdify(x, f(x))(0) == 1
assert lambdify(x, 1 + f(x))(0) == 2
assert lambdify((x, y), y + f(x))(0, 1) == 2
# make an implemented function and test
f = implemented_function("f", lambda x: x + 100)
assert lambdify(x, f(x))(0) == 100
assert lambdify(x, 1 + f(x))(0) == 101
assert lambdify((x, y), y + f(x))(0, 1) == 101
# Can also handle tuples, lists, dicts as expressions
lam = lambdify(x, (f(x), x))
assert lam(3) == (103, 3)
lam = lambdify(x, [f(x), x])
assert lam(3) == [103, 3]
lam = lambdify(x, [f(x), (f(x), x)])
assert lam(3) == [103, (103, 3)]
lam = lambdify(x, {f(x): x})
assert lam(3) == {103: 3}
lam = lambdify(x, {f(x): x})
assert lam(3) == {103: 3}
lam = lambdify(x, {x: f(x)})
assert lam(3) == {3: 103}
# Check that imp preferred to other namespaces by default
d = {'f': lambda x: x + 99}
lam = lambdify(x, f(x), d)
assert lam(3) == 103
# Unless flag passed
lam = lambdify(x, f(x), d, use_imps=False)
assert lam(3) == 102
def test_dummification():
t = symbols('t')
F = Function('F')
G = Function('G')
#"\alpha" is not a valid python variable name
#lambdify should sub in a dummy for it, and return
#without a syntax error
alpha = symbols(r'\alpha')
some_expr = 2 * F(t)**2 / G(t)
lam = lambdify((F(t), G(t)), some_expr)
assert lam(3, 9) == 2
lam = lambdify(sin(t), 2 * sin(t)**2)
assert lam(F(t)) == 2 * F(t)**2
#Test that \alpha was properly dummified
lam = lambdify((alpha, t), 2*alpha + t)
assert lam(2, 1) == 5
raises(SyntaxError, lambda: lambdify(F(t) * G(t), F(t) * G(t) + 5))
raises(SyntaxError, lambda: lambdify(2 * F(t), 2 * F(t) + 5))
raises(SyntaxError, lambda: lambdify(2 * F(t), 4 * F(t) + 5))
def test_python_keywords():
# Test for issue 7452. The automatic dummification should ensure use of
# Python reserved keywords as symbol names will create valid lambda
# functions. This is an additional regression test.
python_if = symbols('if')
expr = python_if / 2
f = lambdify(python_if, expr)
assert f(4.0) == 2.0
def test_lambdify_docstring():
func = lambdify((w, x, y, z), w + x + y + z)
assert func.__doc__ == (
"Created with lambdify. Signature:\n\n"
"func(w, x, y, z)\n\n"
"Expression:\n\n"
"w + x + y + z")
syms = symbols('a1:26')
func = lambdify(syms, sum(syms))
assert func.__doc__ == (
"Created with lambdify. Signature:\n\n"
"func(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15,\n"
" a16, a17, a18, a19, a20, a21, a22, a23, a24, a25)\n\n"
"Expression:\n\n"
"a1 + a10 + a11 + a12 + a13 + a14 + a15 + a16 + a17 + a18 + a19 + a2 + a20 +...")
#================== Test special printers ==========================
def test_special_printers():
class IntervalPrinter(LambdaPrinter):
"""Use ``lambda`` printer but print numbers as ``mpi`` intervals. """
def _print_Integer(self, expr):
return "mpi('%s')" % super(IntervalPrinter, self)._print_Integer(expr)
def _print_Rational(self, expr):
return "mpi('%s')" % super(IntervalPrinter, self)._print_Rational(expr)
def intervalrepr(expr):
return IntervalPrinter().doprint(expr)
expr = sympy.sqrt(sympy.sqrt(2) + sympy.sqrt(3)) + sympy.S(1)/2
func0 = lambdify((), expr, modules="mpmath", printer=intervalrepr)
func1 = lambdify((), expr, modules="mpmath", printer=IntervalPrinter)
func2 = lambdify((), expr, modules="mpmath", printer=IntervalPrinter())
mpi = type(mpmath.mpi(1, 2))
assert isinstance(func0(), mpi)
assert isinstance(func1(), mpi)
assert isinstance(func2(), mpi)
def test_true_false():
# We want exact is comparison here, not just ==
assert lambdify([], true)() is True
assert lambdify([], false)() is False
def test_issue_2790():
assert lambdify((x, (y, z)), x + y)(1, (2, 4)) == 3
assert lambdify((x, (y, (w, z))), w + x + y + z)(1, (2, (3, 4))) == 10
assert lambdify(x, x + 1, dummify=False)(1) == 2
| bsd-3-clause |
ross/memcache-debug-panel | memcache_toolbar/panels/__init__.py | 1 | 3882 | # work around modules with the same name
from __future__ import absolute_import
from datetime import datetime
from debug_toolbar.panels import Panel
from django.conf import settings
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from os.path import dirname, realpath
import django
import logging
import SocketServer
import traceback
logger = logging.getLogger(__name__)
class Calls:
def __init__(self):
self.reset()
def reset(self):
self._calls = []
def append(self, call):
self._calls.append(call)
def calls(self):
return self._calls
def size(self):
return len(self._calls)
def last(self):
return self._calls[-1]
# NOTE this is not even close to thread-safe/aware
instance = Calls()
# based on the function with the same name in ddt's sql, i'd rather just use it
# than copy it, but i can't import it without things blowing up
django_path = realpath(dirname(django.__file__))
socketserver_path = realpath(dirname(SocketServer.__file__))
def tidy_stacktrace(strace):
trace = []
for s in strace[:-1]:
s_path = realpath(s[0])
if getattr(settings, 'DEBUG_TOOLBAR_CONFIG', {}).get(
'HIDE_DJANGO_SQL', True) \
and django_path in s_path and not 'django/contrib' in s_path:
continue
if socketserver_path in s_path:
continue
trace.append((s[0], s[1], s[2], s[3]))
return trace
def record(func):
def recorder(*args, **kwargs):
if getattr(settings, 'DEBUG_TOOLBAR_CONFIG', {}).get(
'ENABLE_STACKTRACES', True):
stacktrace = tidy_stacktrace(traceback.extract_stack())
else:
stacktrace = []
call = {'function': func.__name__, 'args': None,
'stacktrace': stacktrace}
# the try here is just being extra safe, it should not happen
try:
a = None
# first arg is self, do we have another
if len(args) > 1:
a = args[1]
# is it a dictionary (most likely multi)
if isinstance(a, dict):
# just use it's keys
a = a.keys()
# store the args
call['args'] = a
except e:
logger.exception('tracking of call args failed')
ret = None
try:
# the clock starts now
call['start'] = datetime.now()
ret = func(*args, **kwargs)
finally:
# the clock stops now
dur = datetime.now() - call['start']
call['duration'] = (dur.seconds * 1000) + (dur.microseconds / 1000.0)
instance.append(call)
return ret
return recorder
class BasePanel(Panel):
name = 'Memcache'
has_content = True
def process_request(self, request):
instance.reset()
@property
def nav_title(self):
return _('Memcache')
@property
def nav_subtitle(self):
duration = 0
calls = instance.calls()
for call in calls:
duration += call['duration']
n = len(calls)
if (n > 0):
return "%d calls, %0.2fms" % (n, duration)
else:
return "0 calls"
@property
def title(self):
return _('Memcache Calls')
@property
def url(self):
return ''
@property
def template(self):
return 'memcache_toolbar/panels/memcache.html'
@property
def content(self):
duration = 0
calls = instance.calls()
for call in calls:
duration += call['duration']
context = {
'calls': calls,
'count': len(calls),
'duration': duration,
}
return render_to_string(self.template, context)
| bsd-3-clause |
yanlend/scikit-learn | examples/linear_model/plot_sgd_iris.py | 286 | 2202 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| bsd-3-clause |
machine-intelligence/rl-teacher-atari | agents/pposgd-mpi/pposgd_mpi/mlp_policy.py | 1 | 2614 | from pposgd_mpi.common.mpi_running_mean_std import RunningMeanStd
import pposgd_mpi.common.tf_util as U
import tensorflow as tf
import gym
from pposgd_mpi.common.distributions import make_pdtype
class MlpPolicy(object):
recurrent = False
def __init__(self, name, *args, **kwargs):
with tf.variable_scope(name):
self._init(*args, **kwargs)
self.scope = tf.get_variable_scope().name
def _init(self, ob_space, ac_space, hid_size, num_hid_layers, gaussian_fixed_var=True):
assert isinstance(ob_space, gym.spaces.Box)
self.pdtype = pdtype = make_pdtype(ac_space)
sequence_length = None
ob = U.get_placeholder(name="obs", dtype=tf.float32, shape=[sequence_length] + list(ob_space.shape))
with tf.variable_scope("obfilter"):
self.ob_rms = RunningMeanStd(shape=ob_space.shape)
obz = tf.clip_by_value((ob - self.ob_rms.mean) / self.ob_rms.std, -5.0, 5.0)
last_out = obz
for i in range(num_hid_layers):
last_out = tf.nn.tanh(U.dense(last_out, hid_size, "vffc%i" % (i + 1), weight_init=U.normc_initializer(1.0)))
self.vpred = U.dense(last_out, 1, "vffinal", weight_init=U.normc_initializer(1.0))[:, 0]
last_out = obz
for i in range(num_hid_layers):
last_out = tf.nn.tanh(U.dense(last_out, hid_size, "polfc%i" % (i + 1), weight_init=U.normc_initializer(1.0)))
if gaussian_fixed_var and isinstance(ac_space, gym.spaces.Box):
mean = U.dense(last_out, pdtype.param_shape()[0] // 2, "polfinal", U.normc_initializer(0.01))
logstd = tf.get_variable(name="logstd", shape=[1, pdtype.param_shape()[0] // 2], initializer=tf.zeros_initializer())
pdparam = U.concatenate([mean, mean * 0.0 + logstd], axis=1)
else:
pdparam = U.dense(last_out, pdtype.param_shape()[0], "polfinal", U.normc_initializer(0.01))
self.pd = pdtype.pdfromflat(pdparam)
self.state_in = []
self.state_out = []
stochastic = tf.placeholder(dtype=tf.bool, shape=())
ac = U.switch(stochastic, self.pd.sample(), self.pd.mode())
self._act = U.function([stochastic, ob], [ac, self.vpred])
def act(self, stochastic, ob):
ac1, vpred1 = self._act(stochastic, ob[None])
return ac1[0], vpred1[0]
def get_variables(self):
return tf.get_collection(tf.GraphKeys.VARIABLES, self.scope)
def get_trainable_variables(self):
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope)
def get_initial_state(self):
return []
| mit |
vheon/YouCompleteMe | third_party/pythonfutures/crawl.py | 85 | 2422 | """Compare the speed of downloading URLs sequentially vs. using futures."""
import functools
import time
import timeit
import sys
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
from concurrent.futures import (as_completed, ThreadPoolExecutor,
ProcessPoolExecutor)
URLS = ['http://www.google.com/',
'http://www.apple.com/',
'http://www.ibm.com',
'http://www.thisurlprobablydoesnotexist.com',
'http://www.slashdot.org/',
'http://www.python.org/',
'http://www.bing.com/',
'http://www.facebook.com/',
'http://www.yahoo.com/',
'http://www.youtube.com/',
'http://www.blogger.com/']
def load_url(url, timeout):
kwargs = {'timeout': timeout} if sys.version_info >= (2, 6) else {}
return urlopen(url, **kwargs).read()
def download_urls_sequential(urls, timeout=60):
url_to_content = {}
for url in urls:
try:
url_to_content[url] = load_url(url, timeout=timeout)
except:
pass
return url_to_content
def download_urls_with_executor(urls, executor, timeout=60):
try:
url_to_content = {}
future_to_url = dict((executor.submit(load_url, url, timeout), url)
for url in urls)
for future in as_completed(future_to_url):
try:
url_to_content[future_to_url[future]] = future.result()
except:
pass
return url_to_content
finally:
executor.shutdown()
def main():
for name, fn in [('sequential',
functools.partial(download_urls_sequential, URLS)),
('processes',
functools.partial(download_urls_with_executor,
URLS,
ProcessPoolExecutor(10))),
('threads',
functools.partial(download_urls_with_executor,
URLS,
ThreadPoolExecutor(10)))]:
sys.stdout.write('%s: ' % name.ljust(12))
start = time.time()
url_map = fn()
sys.stdout.write('%.2f seconds (%d of %d downloaded)\n' %
(time.time() - start, len(url_map), len(URLS)))
if __name__ == '__main__':
main()
| gpl-3.0 |
donald-pinckney/EM-Simulator | EM Sim/EM SimContent/Lib/distutils/command/install_headers.py | 251 | 1346 | """distutils.command.install_headers
Implements the Distutils 'install_headers' command, to install C/C++ header
files to the Python include directory."""
__revision__ = "$Id$"
from distutils.core import Command
# XXX force is never used
class install_headers(Command):
description = "install C/C++ header files"
user_options = [('install-dir=', 'd',
"directory to install header files to"),
('force', 'f',
"force installation (overwrite existing files)"),
]
boolean_options = ['force']
def initialize_options(self):
self.install_dir = None
self.force = 0
self.outfiles = []
def finalize_options(self):
self.set_undefined_options('install',
('install_headers', 'install_dir'),
('force', 'force'))
def run(self):
headers = self.distribution.headers
if not headers:
return
self.mkpath(self.install_dir)
for header in headers:
(out, _) = self.copy_file(header, self.install_dir)
self.outfiles.append(out)
def get_inputs(self):
return self.distribution.headers or []
def get_outputs(self):
return self.outfiles
# class install_headers
| apache-2.0 |
technologiescollege/Blockly-rduino-communication | scripts/Lib/site-packages/werkzeug/_compat.py | 342 | 6311 | # flake8: noqa
# This whole file is full of lint errors
import codecs
import sys
import operator
import functools
import warnings
try:
import builtins
except ImportError:
import __builtin__ as builtins
PY2 = sys.version_info[0] == 2
WIN = sys.platform.startswith('win')
_identity = lambda x: x
if PY2:
unichr = unichr
text_type = unicode
string_types = (str, unicode)
integer_types = (int, long)
iterkeys = lambda d, *args, **kwargs: d.iterkeys(*args, **kwargs)
itervalues = lambda d, *args, **kwargs: d.itervalues(*args, **kwargs)
iteritems = lambda d, *args, **kwargs: d.iteritems(*args, **kwargs)
iterlists = lambda d, *args, **kwargs: d.iterlists(*args, **kwargs)
iterlistvalues = lambda d, *args, **kwargs: d.iterlistvalues(*args, **kwargs)
int_to_byte = chr
iter_bytes = iter
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
def fix_tuple_repr(obj):
def __repr__(self):
cls = self.__class__
return '%s(%s)' % (cls.__name__, ', '.join(
'%s=%r' % (field, self[index])
for index, field in enumerate(cls._fields)
))
obj.__repr__ = __repr__
return obj
def implements_iterator(cls):
cls.next = cls.__next__
del cls.__next__
return cls
def implements_to_string(cls):
cls.__unicode__ = cls.__str__
cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
return cls
def native_string_result(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs).encode('utf-8')
return functools.update_wrapper(wrapper, func)
def implements_bool(cls):
cls.__nonzero__ = cls.__bool__
del cls.__bool__
return cls
from itertools import imap, izip, ifilter
range_type = xrange
from StringIO import StringIO
from cStringIO import StringIO as BytesIO
NativeStringIO = BytesIO
def make_literal_wrapper(reference):
return _identity
def normalize_string_tuple(tup):
"""Normalizes a string tuple to a common type. Following Python 2
rules, upgrades to unicode are implicit.
"""
if any(isinstance(x, text_type) for x in tup):
return tuple(to_unicode(x) for x in tup)
return tup
def try_coerce_native(s):
"""Try to coerce a unicode string to native if possible. Otherwise,
leave it as unicode.
"""
try:
return to_native(s)
except UnicodeError:
return s
wsgi_get_bytes = _identity
def wsgi_decoding_dance(s, charset='utf-8', errors='replace'):
return s.decode(charset, errors)
def wsgi_encoding_dance(s, charset='utf-8', errors='replace'):
if isinstance(s, bytes):
return s
return s.encode(charset, errors)
def to_bytes(x, charset=sys.getdefaultencoding(), errors='strict'):
if x is None:
return None
if isinstance(x, (bytes, bytearray, buffer)):
return bytes(x)
if isinstance(x, unicode):
return x.encode(charset, errors)
raise TypeError('Expected bytes')
def to_native(x, charset=sys.getdefaultencoding(), errors='strict'):
if x is None or isinstance(x, str):
return x
return x.encode(charset, errors)
else:
unichr = chr
text_type = str
string_types = (str, )
integer_types = (int, )
iterkeys = lambda d, *args, **kwargs: iter(d.keys(*args, **kwargs))
itervalues = lambda d, *args, **kwargs: iter(d.values(*args, **kwargs))
iteritems = lambda d, *args, **kwargs: iter(d.items(*args, **kwargs))
iterlists = lambda d, *args, **kwargs: iter(d.lists(*args, **kwargs))
iterlistvalues = lambda d, *args, **kwargs: iter(d.listvalues(*args, **kwargs))
int_to_byte = operator.methodcaller('to_bytes', 1, 'big')
iter_bytes = functools.partial(map, int_to_byte)
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
fix_tuple_repr = _identity
implements_iterator = _identity
implements_to_string = _identity
implements_bool = _identity
native_string_result = _identity
imap = map
izip = zip
ifilter = filter
range_type = range
from io import StringIO, BytesIO
NativeStringIO = StringIO
_latin1_encode = operator.methodcaller('encode', 'latin1')
def make_literal_wrapper(reference):
if isinstance(reference, text_type):
return _identity
return _latin1_encode
def normalize_string_tuple(tup):
"""Ensures that all types in the tuple are either strings
or bytes.
"""
tupiter = iter(tup)
is_text = isinstance(next(tupiter, None), text_type)
for arg in tupiter:
if isinstance(arg, text_type) != is_text:
raise TypeError('Cannot mix str and bytes arguments (got %s)'
% repr(tup))
return tup
try_coerce_native = _identity
wsgi_get_bytes = _latin1_encode
def wsgi_decoding_dance(s, charset='utf-8', errors='replace'):
return s.encode('latin1').decode(charset, errors)
def wsgi_encoding_dance(s, charset='utf-8', errors='replace'):
if isinstance(s, text_type):
s = s.encode(charset)
return s.decode('latin1', errors)
def to_bytes(x, charset=sys.getdefaultencoding(), errors='strict'):
if x is None:
return None
if isinstance(x, (bytes, bytearray, memoryview)): # noqa
return bytes(x)
if isinstance(x, str):
return x.encode(charset, errors)
raise TypeError('Expected bytes')
def to_native(x, charset=sys.getdefaultencoding(), errors='strict'):
if x is None or isinstance(x, str):
return x
return x.decode(charset, errors)
def to_unicode(x, charset=sys.getdefaultencoding(), errors='strict',
allow_none_charset=False):
if x is None:
return None
if not isinstance(x, bytes):
return text_type(x)
if charset is None and allow_none_charset:
return x
return x.decode(charset, errors)
| gpl-3.0 |
djpnewton/beerme | beerme/__init__.py | 1 | 5457 | # coding: utf-8
import os
from datetime import datetime, timedelta
from flask import Flask, session
from flask_sqlalchemy import SQLAlchemy
from flask_kvsession import KVSessionExtension
from simplekv.db.sql import SQLAlchemyStore
from sqlalchemy import create_engine, MetaData
from flask_seasurf import SeaSurf
from flask_limiter import Limiter
import time
import random
import decimal
import uuid
import requests
import json
import config
# create global config
config = config.Config(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'config.cfg'))
# create flask wsgi app
app = Flask(__name__, template_folder='templates')
app.debug = False
app.config.update({
'SQLALCHEMY_DATABASE_URI': config.main.db_connection,
'SESSION_COOKIE_HTTPONLY': True,
'SESSION_COOKIE_SECURE': config.main.secure_cookie,
'PERMANENT_SESSION_LIFETIME': timedelta(minutes=config.main.session_lifetime),
'SECRET_KEY': config.main.secret_key
})
# mandrill middlelware
if config.email.use_mandrill:
app.config['MANDRILL_API_KEY'] = config.email.mandrill_api_key
app.config['MANDRILL_DEFAULT_FROM'] = config.email.from_
# add sqlalchemy middleware
db = SQLAlchemy(app)
# add flask_kvsession middleware
app.config['SESSION_KEY_BITS'] = 128
engine = create_engine('sqlite:///beerme/sessions.sqlite')
metadata = MetaData(bind=engine)
store = SQLAlchemyStore(engine, metadata, 'kvstore')
metadata.create_all()
KVSessionExtension(store, app)
# add flask csrf middleware
csrf = SeaSurf(app)
# add rate limiting middleware
limiter = Limiter(app)
auth_limit = limiter.shared_limit("5/minute;1/second", scope="auth")
# app constants
SATOSHIS = 1e8
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String, unique=True)
class Beer(db.Model):
id = db.Column(db.Integer, primary_key=True)
brew = db.Column(db.String)
table = db.Column(db.String)
name = db.Column(db.String)
price_satoshis = db.Column(db.Integer)
address = db.Column(db.String)
guid = db.Column(db.String, unique=True)
txid = db.Column(db.String, unique=True)
paid = db.Column(db.Boolean)
processed = db.Column(db.Boolean)
def init_db():
db.create_all()
@app.before_first_request
def init_all_on_first_request():
init_db()
@app.before_request
def refresh_session():
if session.has_key('gen_time'):
gen_time = session['gen_time']
lifetime = app.config['PERMANENT_SESSION_LIFETIME'].seconds
if time.time() > gen_time + lifetime / 2:
session.regenerate()
session['gen_time'] = time.time()
else:
session['gen_time'] = time.time()
def user_create(email):
user = User(email=email)
db.session.add(user)
db.session.commit()
return user
def beer_callback_url(guid):
callback_url = "http://beerme.djpsoft.com/payment?beer_guid=%s&secret=%s" % (guid, config.main.payment_secret)
return callback_url
def beer_new_address(guid):
callback_url = beer_callback_url(guid)
bc_payment_url = 'https://api.blockcypher.com/v1/btc/main/payments'
payload = {'token': config.main.blockcypher_token, 'destination': config.main.payment_address, 'callback_url': callback_url}
print 'callback_url:', callback_url
print 'bc_payment_url:', bc_payment_url
r = requests.post(bc_payment_url, data=json.dumps(payload))
if r.status_code == 200:
data = r.json()
print 'returned callback_url:', data['callback_url']
if data['callback_url'] != callback_url or data['destination'] != config.main.payment_address:
return None
return data['input_address']
def beer_price():
bbb_url = 'https://bitpay.com/api/rates/nzd'
r = requests.get(bbb_url)
if r.status_code == 200:
rate = r.json()['rate']
beer_price_nzd = config.main.beer_price
# convert to satoshis
return int(beer_price_nzd / rate * SATOSHIS)
def beer_add(brew, table, name, price_satoshis):
# get input address
guid = uuid.uuid4()
address = beer_new_address(guid)
if not address:
return None
# create new entry
beer = Beer(brew=brew, table=table, name=name, price_satoshis=price_satoshis, address=address, guid=str(guid), paid=False, processed=False)
db.session.add(beer)
db.session.commit()
return beer
def beer_payment(req):
json = req.get_json(force=True)
satoshis = json['value']
address = json['input_address']
txid = json['input_transaction_hash']
guid = req.args.get('beer_guid')
secret = req.args.get('secret')
print 'satoshis:', satoshis
print 'address:', address
print 'txid:', txid
print 'guid:', guid
print 'secret:', secret
if secret != config.main.payment_secret:
print 'secret (%s) does not match payment_secret' % secret
return
beer = Beer.query.filter_by(guid=guid).first()
if not beer:
print 'invalid beer guid'
return
if address != beer.address:
print 'invalid address (should be %s)' % beer.address
return
if satoshis < beer.price_satoshis - config.main.payment_txcost_threshold:
print 'payment insufficient (should be %s)' % (beer.price_satoshis - config.main.payment_txcost_threshold)
return
beer.txid = txid
beer.paid = True
db.session.add(beer)
db.session.commit()
# send email
utils.send_email_beer_alert(config, beer)
import beerme.views
| mit |
st135yle/django-site | dbenv/lib/python3.4/site-packages/psycopg2/tests/testconfig.py | 23 | 1431 | # Configure the test suite from the env variables.
import os
dbname = os.environ.get('PSYCOPG2_TESTDB', 'psycopg2_test')
dbhost = os.environ.get('PSYCOPG2_TESTDB_HOST', None)
dbport = os.environ.get('PSYCOPG2_TESTDB_PORT', None)
dbuser = os.environ.get('PSYCOPG2_TESTDB_USER', None)
dbpass = os.environ.get('PSYCOPG2_TESTDB_PASSWORD', None)
# Check if we want to test psycopg's green path.
green = os.environ.get('PSYCOPG2_TEST_GREEN', None)
if green:
if green == '1':
from psycopg2.extras import wait_select as wait_callback
elif green == 'eventlet':
from eventlet.support.psycopg2_patcher import eventlet_wait_callback \
as wait_callback
else:
raise ValueError("please set 'PSYCOPG2_TEST_GREEN' to a valid value")
import psycopg2.extensions
psycopg2.extensions.set_wait_callback(wait_callback)
# Construct a DSN to connect to the test database:
dsn = 'dbname=%s' % dbname
if dbhost is not None:
dsn += ' host=%s' % dbhost
if dbport is not None:
dsn += ' port=%s' % dbport
if dbuser is not None:
dsn += ' user=%s' % dbuser
if dbpass is not None:
dsn += ' password=%s' % dbpass
# Don't run replication tests if REPL_DSN is not set, default to normal DSN if
# set to empty string.
repl_dsn = os.environ.get('PSYCOPG2_TEST_REPL_DSN', None)
if repl_dsn == '':
repl_dsn = dsn
repl_slot = os.environ.get('PSYCOPG2_TEST_REPL_SLOT', 'psycopg2_test_slot')
| mit |
food52/thumbor | thumbor/detectors/local_detector.py | 14 | 2401 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/globocom/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com timehome@corp.globo.com
from os.path import join, dirname, abspath, isabs
try:
import cv
except ImportError:
import cv2.cv as cv
from thumbor.point import FocalPoint
from thumbor.detectors import BaseDetector
class CascadeLoaderDetector(BaseDetector):
def load_cascade_file(self, module_path, cascade_file_path):
if not hasattr(self.__class__, 'cascade'):
if isabs(cascade_file_path):
cascade_file = cascade_file_path
else:
cascade_file = join(abspath(dirname(module_path)), cascade_file_path)
self.__class__.cascade = cv.Load(cascade_file)
def get_min_size_for(self, size):
ratio = int(min(size) / 15)
ratio = max(20, ratio)
return (ratio, ratio)
def get_features(self):
engine = self.context.modules.engine
mode, converted_image = engine.image_data_as_rgb(False)
size = engine.size
image = cv.CreateImageHeader(size, cv.IPL_DEPTH_8U, 3)
cv.SetData(image, converted_image)
gray = cv.CreateImage(size, 8, 1)
convert_mode = getattr(cv, 'CV_%s2GRAY' % mode)
cv.CvtColor(image, gray, convert_mode)
min_size = self.get_min_size_for(size)
haar_scale = 1.2
min_neighbors = 3
cv.EqualizeHist(gray, gray)
faces = cv.HaarDetectObjects(
gray,
self.__class__.cascade, cv.CreateMemStorage(0),
haar_scale, min_neighbors,
cv.CV_HAAR_DO_CANNY_PRUNING, min_size)
faces_scaled = []
for ((x, y, w, h), n) in faces:
# the input to cv.HaarDetectObjects was resized, so scale the
# bounding box of each face and convert it to two CvPoints
x2, y2 = (x + w), (y + h)
faces_scaled.append(((x, y, x2 - x, y2 - y), n))
return faces_scaled
def detect(self, callback):
features = self.get_features()
if features:
for square, neighbors in features:
self.context.request.focal_points.append(FocalPoint.from_square(*square))
callback()
else:
self.next(callback)
| mit |
s-hertel/ansible | packaging/sdist/check-link-behavior.py | 114 | 1290 | #!/usr/bin/env python
"""Checks for link behavior required for sdist to retain symlinks."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import platform
import shutil
import sys
import tempfile
def main():
"""Main program entry point."""
temp_dir = tempfile.mkdtemp()
target_path = os.path.join(temp_dir, 'file.txt')
symlink_path = os.path.join(temp_dir, 'symlink.txt')
hardlink_path = os.path.join(temp_dir, 'hardlink.txt')
try:
with open(target_path, 'w'):
pass
os.symlink(target_path, symlink_path)
os.link(symlink_path, hardlink_path)
if not os.path.islink(symlink_path):
abort('Symbolic link not created.')
if not os.path.islink(hardlink_path):
# known issue on MacOS (Darwin)
abort('Hard link of symbolic link created as a regular file.')
finally:
shutil.rmtree(temp_dir)
def abort(reason):
"""
:type reason: str
"""
sys.exit('ERROR: %s\n'
'This will prevent symbolic links from being preserved in the resulting tarball.\n'
'Aborting creation of sdist on platform: %s'
% (reason, platform.system()))
if __name__ == '__main__':
main()
| gpl-3.0 |
shastah/spacewalk | client/debian/packages-already-in-debian/rhn-client-tools/src/actions/up2date_config.py | 12 | 2988 | #!/usr/bin/python
# Copyright (c) 1999--2015 Red Hat, Inc. Distributed under GPLv2.
#
# Author: Adrian Likins <alikins@redhat.com>
#
import os
import re
import sys
sys.path.append("/usr/share/rhn")
from up2date_client import config
cfg = config.initUp2dateConfig()
__rhnexport__ = [
'update',
'rpmmacros',
'get']
argVerbose = 0
def update(configdict, cache_only=None):
"""Invoke this to change the ondisk configuration of up2date"""
if cache_only:
return (0, "no-ops for caching", {})
if argVerbose > 1:
print "called update_up2date_config"
if type(configdict) != type({}):
return (13, "Invalid arguments passed to function", {})
unknownparams = []
if cfg['disallowConfChanges']:
skipParams = cfg['disallowConfChanges']
else:
skipParams = []
for param in configdict.keys():
# dont touch params in the skip params list
if param in skipParams:
continue
# write out all params, even ones we dont know about
# could be useful
cfg.set(param, configdict[param])
if len(unknownparams):
return unknownparams
cfg.save()
return (0, "config updated", {})
def get(cache_only=None):
"""Reterieve the current configuration of up2date"""
if cache_only:
return (0, "no-ops for caching", {})
if argVerbose > 1:
print "called get_up2date_config"
ret = {}
for k in cfg.keys():
ret[k] = cfg[k]
return (0, "configuration retrived", {'data' : ret})
def rpmmacros(macroName, macroValue, cache_only):
if cache_only:
return (0, "no-ops for caching", {})
writeUp2dateMacro(macroName, macroValue)
return (0, "%s set to %s" % (macroName, macroValue), {})
def writeUp2dateMacro(macroName, macroValue):
if os.access("/etc/rpm/macros.up2date", os.R_OK):
f = open("/etc/rpm/macros.up2date", "r")
lines = f.readlines()
f.close()
else:
lines = []
comment_r = re.compile("\s*#.*")
value_r = re.compile("%s.*" % macroName)
blank_r = re.compile("\s*")
newfile = []
for line in lines:
m = value_r.match(line)
if m:
continue
m = comment_r.match(line)
if m:
newfile.append(line)
continue
newfile.append(line)
# dont care about blank lines...
newfile.append("\n")
newfile.append("%s %s" % (macroName, macroValue))
f = open("/etc/rpm/macros.up2date", "w")
for line in newfile:
f.write(line)
f.write("\n")
f.close()
def main():
configdatatup = get()
configdata = configdatatup[2]['data']
import time
timestamp = time.time()
configdata['timeStampTest'] = timestamp
print configdata
import pprint
pprint.pprint(update(configdata))
configdata['serverURL'] = "http://localhost/XMLRPC"
pprint.pprint(update(configdata))
if __name__ == "__main__":
main()
| gpl-2.0 |
joshloyal/scikit-learn | examples/linear_model/plot_lasso_and_elasticnet.py | 24 | 2080 | """
========================================
Lasso and Elastic Net for Sparse Signals
========================================
Estimates Lasso and Elastic-Net regression models on a manually generated
sparse signal corrupted with an additive noise. Estimated coefficients are
compared with the ground-truth.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
###############################################################################
# generate some sparse data to play with
np.random.seed(42)
n_samples, n_features = 50, 200
X = np.random.randn(n_samples, n_features)
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[10:]] = 0 # sparsify coef
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal(size=n_samples)
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples // 2], y[:n_samples // 2]
X_test, y_test = X[n_samples // 2:], y[n_samples // 2:]
###############################################################################
# Lasso
from sklearn.linear_model import Lasso
alpha = 0.1
lasso = Lasso(alpha=alpha)
y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)
r2_score_lasso = r2_score(y_test, y_pred_lasso)
print(lasso)
print("r^2 on test data : %f" % r2_score_lasso)
###############################################################################
# ElasticNet
from sklearn.linear_model import ElasticNet
enet = ElasticNet(alpha=alpha, l1_ratio=0.7)
y_pred_enet = enet.fit(X_train, y_train).predict(X_test)
r2_score_enet = r2_score(y_test, y_pred_enet)
print(enet)
print("r^2 on test data : %f" % r2_score_enet)
plt.plot(enet.coef_, color='lightgreen', linewidth=2,
label='Elastic net coefficients')
plt.plot(lasso.coef_, color='gold', linewidth=2,
label='Lasso coefficients')
plt.plot(coef, '--', color='navy', label='original coefficients')
plt.legend(loc='best')
plt.title("Lasso R^2: %f, Elastic Net R^2: %f"
% (r2_score_lasso, r2_score_enet))
plt.show()
| bsd-3-clause |
jordigh/mercurial-crew | hgext/churn.py | 2 | 6887 | # churn.py - create a graph of revisions count grouped by template
#
# Copyright 2006 Josef "Jeff" Sipek <jeffpc@josefsipek.net>
# Copyright 2008 Alexander Solovyov <piranha@piranha.org.ua>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''command to display statistics about repository history'''
from mercurial.i18n import _
from mercurial import patch, cmdutil, scmutil, util, templater, commands
import os
import time, datetime
testedwith = 'internal'
def maketemplater(ui, repo, tmpl):
tmpl = templater.parsestring(tmpl, quoted=False)
try:
t = cmdutil.changeset_templater(ui, repo, False, None, None, False)
except SyntaxError, inst:
raise util.Abort(inst.args[0])
t.use_template(tmpl)
return t
def changedlines(ui, repo, ctx1, ctx2, fns):
added, removed = 0, 0
fmatch = scmutil.matchfiles(repo, fns)
diff = ''.join(patch.diff(repo, ctx1.node(), ctx2.node(), fmatch))
for l in diff.split('\n'):
if l.startswith("+") and not l.startswith("+++ "):
added += 1
elif l.startswith("-") and not l.startswith("--- "):
removed += 1
return (added, removed)
def countrate(ui, repo, amap, *pats, **opts):
"""Calculate stats"""
if opts.get('dateformat'):
def getkey(ctx):
t, tz = ctx.date()
date = datetime.datetime(*time.gmtime(float(t) - tz)[:6])
return date.strftime(opts['dateformat'])
else:
tmpl = opts.get('template', '{author|email}')
tmpl = maketemplater(ui, repo, tmpl)
def getkey(ctx):
ui.pushbuffer()
tmpl.show(ctx)
return ui.popbuffer()
state = {'count': 0}
rate = {}
df = False
if opts.get('date'):
df = util.matchdate(opts['date'])
m = scmutil.match(repo[None], pats, opts)
def prep(ctx, fns):
rev = ctx.rev()
if df and not df(ctx.date()[0]): # doesn't match date format
return
key = getkey(ctx).strip()
key = amap.get(key, key) # alias remap
if opts.get('changesets'):
rate[key] = (rate.get(key, (0,))[0] + 1, 0)
else:
parents = ctx.parents()
if len(parents) > 1:
ui.note(_('revision %d is a merge, ignoring...\n') % (rev,))
return
ctx1 = parents[0]
lines = changedlines(ui, repo, ctx1, ctx, fns)
rate[key] = [r + l for r, l in zip(rate.get(key, (0, 0)), lines)]
state['count'] += 1
ui.progress(_('analyzing'), state['count'], total=len(repo))
for ctx in cmdutil.walkchangerevs(repo, m, opts, prep):
continue
ui.progress(_('analyzing'), None)
return rate
def churn(ui, repo, *pats, **opts):
'''histogram of changes to the repository
This command will display a histogram representing the number
of changed lines or revisions, grouped according to the given
template. The default template will group changes by author.
The --dateformat option may be used to group the results by
date instead.
Statistics are based on the number of changed lines, or
alternatively the number of matching revisions if the
--changesets option is specified.
Examples::
# display count of changed lines for every committer
hg churn -t '{author|email}'
# display daily activity graph
hg churn -f '%H' -s -c
# display activity of developers by month
hg churn -f '%Y-%m' -s -c
# display count of lines changed in every year
hg churn -f '%Y' -s
It is possible to map alternate email addresses to a main address
by providing a file using the following format::
<alias email> = <actual email>
Such a file may be specified with the --aliases option, otherwise
a .hgchurn file will be looked for in the working directory root.
Aliases will be split from the rightmost "=".
'''
def pad(s, l):
return (s + " " * l)[:l]
amap = {}
aliases = opts.get('aliases')
if not aliases and os.path.exists(repo.wjoin('.hgchurn')):
aliases = repo.wjoin('.hgchurn')
if aliases:
for l in open(aliases, "r"):
try:
alias, actual = l.rsplit('=' in l and '=' or None, 1)
amap[alias.strip()] = actual.strip()
except ValueError:
l = l.strip()
if l:
ui.warn(_("skipping malformed alias: %s\n") % l)
continue
rate = countrate(ui, repo, amap, *pats, **opts).items()
if not rate:
return
if opts.get('sort'):
rate.sort()
else:
rate.sort(key=lambda x: (-sum(x[1]), x))
# Be careful not to have a zero maxcount (issue833)
maxcount = float(max(sum(v) for k, v in rate)) or 1.0
maxname = max(len(k) for k, v in rate)
ttywidth = ui.termwidth()
ui.debug("assuming %i character terminal\n" % ttywidth)
width = ttywidth - maxname - 2 - 2 - 2
if opts.get('diffstat'):
width -= 15
def format(name, diffstat):
added, removed = diffstat
return "%s %15s %s%s\n" % (pad(name, maxname),
'+%d/-%d' % (added, removed),
ui.label('+' * charnum(added),
'diffstat.inserted'),
ui.label('-' * charnum(removed),
'diffstat.deleted'))
else:
width -= 6
def format(name, count):
return "%s %6d %s\n" % (pad(name, maxname), sum(count),
'*' * charnum(sum(count)))
def charnum(count):
return int(round(count * width / maxcount))
for name, count in rate:
ui.write(format(name, count))
cmdtable = {
"churn":
(churn,
[('r', 'rev', [],
_('count rate for the specified revision or range'), _('REV')),
('d', 'date', '',
_('count rate for revisions matching date spec'), _('DATE')),
('t', 'template', '{author|email}',
_('template to group changesets'), _('TEMPLATE')),
('f', 'dateformat', '',
_('strftime-compatible format for grouping by date'), _('FORMAT')),
('c', 'changesets', False, _('count rate by number of changesets')),
('s', 'sort', False, _('sort by key (default: sort by count)')),
('', 'diffstat', False, _('display added/removed lines separately')),
('', 'aliases', '',
_('file with email aliases'), _('FILE')),
] + commands.walkopts,
_("hg churn [-d DATE] [-r REV] [--aliases FILE] [FILE]")),
}
commands.inferrepo += " churn"
| gpl-2.0 |
globality-corp/microcosm-pubsub | microcosm_pubsub/consumer.py | 1 | 5739 | """
Message consumer.
"""
from os.path import exists
from urllib.parse import urlparse
from boto3 import Session
from microcosm.api import defaults, typed
from microcosm_logging.decorators import logger
from microcosm_pubsub.backoff import BackoffPolicy
from microcosm_pubsub.reader import SQSFileReader, SQSJsonReader, SQSStdInReader
STDIN = "STDIN"
def is_file(url):
if exists(url):
return True
return exists(urlparse(url).path)
@logger
class SQSConsumer:
"""
Consume message from a (single) SQS queue.
"""
def __init__(
self,
sqs_client,
sqs_envelope,
sqs_queue_url,
limit,
wait_seconds,
backoff_policy,
):
self.sqs_client = sqs_client
self.sqs_envelope = sqs_envelope
self.sqs_queue_url = sqs_queue_url
self.limit = limit
self.wait_seconds = wait_seconds
self.backoff_policy = backoff_policy
def consume(self):
"""
Consume a batch of messages.
:returns: a list of `SQSMessage`
"""
return [
self.sqs_envelope.parse_raw_message(self, raw_message)
for raw_message in self.sqs_client.receive_message(
AttributeNames=[
"ApproximateReceiveCount",
],
MaxNumberOfMessages=self.limit,
QueueUrl=self.sqs_queue_url,
WaitTimeSeconds=self.wait_seconds,
).get("Messages", [])
]
def ack(self, message):
"""
Acknowledge that a message was processed successfully.
Deletes the message from the queue.
"""
self.sqs_client.delete_message(
QueueUrl=self.sqs_queue_url,
ReceiptHandle=message.receipt_handle,
)
def nack(self, message, visibility_timeout_seconds=None):
"""
Acknowledge that a message was NOT processed successfully.
(Re)sets the retry visibility timeout on the message.
There are three cases here:
1. We raised `Nack`; in this case, we explicitly wish to reprocess the message in the
near future; setting the visibility timeout is the right thing to do.
2a. We raised some non-`Nack` error and the `BackoffPolicy` HAS a configured value for its
`message_retry_visibility_timeout_seconds` meaning an operator has chosen to reprocess
all messages within that time limit; setting the visibility timeout is the right thing to do.
2b. We raised some non-`Nack` error and the `BackoffPolicy` DOES NOT HAVE a configured value for its
`message_retry_visibility_timeout_seconds` config meaning that the system will fallback to
its default behavior. We can either fallback to the SQS queue default (usually 30s) -- meaning
not setting the visibility timeout -- or we can enforce a default in this library -- using
the sqs consumer's config and override the SQS queue's visibility with a known, smallish value.
We choose the latter under the assumption that 30s is too long to reprocess most messages
as a default and that long-running handlers will be configured accordingly (see: 2a).
Therefore: we always invoke `change_message_visibility`
"""
timeout = self.backoff_policy.compute_backoff_timeout(message, visibility_timeout_seconds)
self.sqs_client.change_message_visibility(
QueueUrl=self.sqs_queue_url,
ReceiptHandle=message.receipt_handle,
VisibilityTimeout=timeout,
)
def configure_sqs_client(graph):
endpoint_url = graph.config.sqs_consumer.endpoint_url
profile_name = graph.config.sqs_consumer.profile_name
region_name = graph.config.sqs_consumer.region_name
session = Session(profile_name=profile_name)
return session.client(
"sqs",
endpoint_url=endpoint_url,
region_name=region_name,
)
@defaults(
endpoint_url=None,
profile_name=None,
region_name=None,
# backoff policy
backoff_policy="NaiveBackoffPolicy",
# SQS will not return more than ten messages at a time
limit=typed(int, default_value=10),
# SQS will only return a few messages at time unless long polling is enabled (>0)
wait_seconds=typed(int, default_value=1),
# On error, change the visibility timeout when nacking
message_retry_visibility_timeout_seconds=typed(int, default_value=5),
)
def configure_sqs_consumer(graph):
"""
Configure an SQS consumer.
"""
sqs_queue_url = graph.config.sqs_consumer.sqs_queue_url
sqs_event = graph.config.sqs_consumer.sqs_event
if graph.metadata.testing or sqs_queue_url == "test":
from unittest.mock import MagicMock
sqs_client = MagicMock()
elif sqs_event: # AWS Lambda invocation
sqs_client = SQSJsonReader(sqs_event)
elif sqs_queue_url == STDIN:
sqs_client = SQSStdInReader()
elif is_file(sqs_queue_url):
sqs_client = SQSFileReader(sqs_queue_url)
else:
sqs_client = configure_sqs_client(graph)
backoff_policy_class = BackoffPolicy.choose_backoff_policy(
graph.config.sqs_consumer.backoff_policy,
)
backoff_policy = backoff_policy_class(
message_retry_visibility_timeout_seconds=graph.config.sqs_consumer.message_retry_visibility_timeout_seconds,
)
return SQSConsumer(
backoff_policy=backoff_policy,
limit=graph.config.sqs_consumer.limit,
sqs_client=sqs_client,
sqs_envelope=graph.sqs_envelope,
sqs_queue_url=sqs_queue_url,
wait_seconds=graph.config.sqs_consumer.wait_seconds,
)
| apache-2.0 |
stefan-andritoiu/upm | tests/check_file_encoding.py | 12 | 1990 | #!/usr/bin/python
import unittest
import os
import chardet
target_exts = ['.h', '.hpp', '.hxx', '.txt']
valid_encodings = ['ascii', 'utf-8']
class EncodingTests(unittest.TestCase):
'''Non-ascii/utf-8 encodings can cause failures on downstream tools
such as documentation-generation and python2 module loading. This
class helps find those files which could cause an encoding problem'''
def test_headers_ascii(self):
'''Assert/print list of:
file:linenumber offending line
for all lines of matching files which are not in valid_encodings'''
# Keep a map of files with alternate encodes to report
invalid_files = {}
# Recusively search cwd for files with target_exts
for root, dirs, files in os.walk(os.curdir):
# Work on full paths
for file in files:
file = os.path.join(root, file)
# Skip any files not ending with target_exts
if not any(file.lower().endswith(x) for x in target_exts):
continue
# Check each with chardet
with open(file, 'r') as f:
for ndx, line in enumerate(f):
result = chardet.detect(line)
if not result['encoding'] in valid_encodings:
if not invalid_files.has_key(file):
invalid_files[file] = []
invalid_files[file].append([ndx,line])
# Sort the failures by filename
skeys = list(invalid_files.keys())
skeys.sort()
invalid_lines = ''
for fn in skeys:
for line in invalid_files[fn]:
invalid_lines += '%s:%d %s' % (fn, line[0], line[1])
self.assertEqual( len(invalid_files), 0,
"\nThe following modules have alternate encodings:\n" + \
invalid_lines)
if __name__ == '__main__':
unittest.main()
| mit |
dfdx/masque | others/faceless-python-old/faceless/lucaskanade.py | 1 | 7321 |
from PIL import Image
from scipy.ndimage.interpolation import affine_transform
from numpy import *
from matplotlib import pylab as plt
from matplotlib import gridspec
# nabla_Ix = array([[1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2]])
# nabla_Iy = array([[1, 1, 3, 3], [1, 1, 3, 3], [1, 1, 3, 3]])
# im_grad = (nabla_Ix, nabla_Iy)
# w, h = (4, 3)
N_p = 6
def imgrad(im):
"""[nabla(I_x), nabla(I_y)]"""
if len(im.shape) != 2:
raise Exception("Can work only with grayscale images")
grad = [g.astype(int32) for g in gradient(im.astype(int32))]
grad.reverse()
return grad
def flatten_params(A, b):
M = hstack([A, b.reshape((b.size, 1))])
return M.flatten()
def structure_params(p):
p = p.reshape(2, 3)
return p[:, 0:2], p[:, -1]
def interp_im(im, y, x):
x = asarray(x)
y = asarray(y)
x0 = floor(x).astype(int)
x1 = x0 + 1
y0 = floor(y).astype(int)
y1 = y0 + 1
x0 = clip(x0, 0, im.shape[1]-1);
x1 = clip(x1, 0, im.shape[1]-1);
y0 = clip(y0, 0, im.shape[0]-1);
y1 = clip(y1, 0, im.shape[0]-1);
Ia = im[ y0, x0 ]
Ib = im[ y1, x0 ]
Ic = im[ y0, x1 ]
Id = im[ y1, x1 ]
wa = (x1-x) * (y1-y)
wb = (x1-x) * (y-y0)
wc = (x-x0) * (y1-y)
wd = (x-x0) * (y-y0)
return wa*Ia + wb*Ib + wc*Ic + wd*Id
# TODO: Visualize!
def quadtobox(im, dst, M):
# Dimensions of destination image - integers, assume rectangle
minv = amin(dst.T, axis=0)
maxv = amax(dst.T, axis=0)
# xg, yg = meshgrid(range(maxv[0] + 1), range(maxv[1] + 1))
xg, yg = meshgrid(range(minv[0], maxv[0]), range(minv[1], maxv[1]))
xy = vstack([xg.T.flatten(), yg.T.flatten()])
xy = vstack([xy, ones((1, xy.shape[1]))])
# Transform into source
uv = dot(M, xy)
# Remove homogeneous
uv = uv[0:2,:].T
# Sample
xi = uv[:, 0].reshape((maxv[0] - minv[0], maxv[1] - minv[1])).T
yi = uv[:, 1].reshape((maxv[0] - minv[0], maxv[1] - minv[1])).T
wimg = interp_im(im, yi, xi)
return wimg
def warp_a(im, p, dst):
p = asarray(p).reshape(2, 3)
M = vstack([p, [0, 0, 1]])
M[0, 0] += 1
M[1, 1] += 1
wimg = quadtobox(im, dst, M)
return wimg
def jacobian(nx, ny):
jac_x = kron(array([range(0, nx)]), ones((ny, 1)))
jac_y = kron(array([range(0, ny)]).T, ones((1, nx)))
jac_zero = zeros((ny, nx))
jac_one = ones((ny, nx))
row_1 = hstack([jac_x, jac_zero, jac_y, jac_zero, jac_one, jac_zero])
row_2 = hstack([jac_zero, jac_x, jac_zero, jac_y, jac_zero, jac_one])
dW_dp = vstack([row_1, row_2])
return dW_dp
def sd_images(dW_dp, im_grad, N_p, h, w):
nabla_Ix, nabla_Iy = im_grad # TODO: swap axes
VI_dW_dp = zeros((h, w * N_p))
for p in range(0, N_p):
Tx = nabla_Ix * dW_dp[0:h, p * w : p * w + w]
Ty = nabla_Iy * dW_dp[h:, p * w : p * w + w]
VI_dW_dp[:, p * w : p * w + w] = Tx + Ty
return VI_dW_dp
def sd_update(VI_dW_dp, error_im, N_p, w):
sd_delta_p = zeros((N_p, 1))
for p in range(N_p):
h1 = VI_dW_dp[:, p*w : p*w + w]
sd_delta_p[p] = sum(h1 * error_im)
return sd_delta_p
def hessian(VI_dW_dp, N_p, w):
H = zeros((N_p, N_p))
for i in range(N_p):
h1 = VI_dW_dp[:, i*w : i*w + w]
for j in range(N_p):
h2 = VI_dW_dp[:, j*w : j*w + w]
H[i, j] = sum(h1 * h2)
return H
def update_step(p, delta_p):
p = p.reshape(2, 3)
delta_p = delta_p.reshape((2, 3))
# print '[0] p =', p
# print '[1] delta_p = ', delta_p
delta_M = vstack([delta_p, array([0, 0, 1])])
delta_M[0, 0] = delta_M[0, 0] + 1
delta_M[1, 1] = delta_M[1, 1] + 1
# print '[2] delta_M =', delta_M
delta_M = linalg.inv(delta_M)
# print '[3] inv(delta_M) =', delta_M
warp_M = vstack([p, array([0, 0, 1])])
warp_M[0, 0] += 1
warp_M[1, 1] += 1
comp_M = dot(warp_M, delta_M)
# print '[4] comp_M =', comp_M
p = comp_M[0:2, :]
p[0, 0] -= 1
p[1, 1] -= 1
return p.flatten()
def inv_comp(im, tmpl, n_iter=10, p_init=zeros((6,))):
"""Applies inverse compositional approach to aligning im to tmpl.
Estimates vector of parameters p = [p_1, p_2, p_3, p_4, p_5, p_6]"""
im = im.astype(int64)
tmpl = tmpl.astype(int64)
h, w = tmpl.shape
# tmpl_pts = array([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).T
tmpl_pts = ij2xy(array([[90, 260], [90, 530], [400, 530], [400, 260]])).T
nabla_T = imgrad(tmpl)
dW_dp = jacobian(w, h)
VT_dW_dp = sd_images(dW_dp, nabla_T, N_p, h, w)
# show_sd_images(VT_dW_dp, w)
# import time; time.sleep(5)
H = hessian(VT_dW_dp, N_p, w)
H_inv = linalg.inv(H)
warp_p = p_init.copy()
fit_err = []
for i in range(n_iter):
print 'iteration %s' % i
IWxp = warp_a(im, warp_p, tmpl_pts)
plot_imgs([im, IWxp], ratios=[2, 1])
plt.show()
error_im = IWxp - tmpl
fit_err.append(sqrt(mean(error_im * error_im)))
print "MSE: ", fit_err[-1]
sd_delta_p = sd_update(VT_dW_dp, error_im, N_p, w)
delta_p = dot(H_inv, sd_delta_p)
warp_p = update_step(warp_p, delta_p)
return warp_p
######### REPL Helpers ############
def show(im, gray=True):
plt.figure()
if gray:
plt.gray()
plt.imshow(im)
plt.show()
def show_pil(im, gray=None):
Image.fromarray(uint8(im)).show()
def show_sd_images(sd_imgs, w):
for i in xrange(6):
show_pil(sd_imgs[:, i*w : (i + 1)*w])
def add_rect(i, j, h, w):
plt.gca().add_patch(plt.Rectangle((j, i), w, h, fill=False))
def plot_imgs(imgs, ratios=[1, 1]):
plt.gray()
gs = gridspec.GridSpec(1, len(imgs), width_ratios=ratios)
for i in range(len(imgs)):
plt.subplot(gs[i])
plt.imshow(imgs[i])
return gs
######## Test Scenarios ###########
face_dst = array([[90, 260], [90, 530], [400, 530], [400, 260]])
def test_warp_a():
im = array(Image.open('face.bmp').convert('L'))
dst = face_dst
p = array([0, 0, 0, 0, 0, 0])
def test_inv_comp(p_real=[0, .1, .1, 0, 0, 0], n_iter=10):
im = asarray(Image.open('face.bmp').convert('L'))
imh, imw = im.shape
dst = array([[90, 260], [90, 530], [400, 530], [400, 260]])
i0, j0 = dst.min(axis=0)
i1, j1 = dst.max(axis=0)
# tmpl = im[i0:i1, j0:j1]
tmpl = warp_a(im, p_real, ij2xy(dst).T)
return inv_comp(im, tmpl, n_iter)
def test_rect():
im = array(Image.open('face.bmp').convert('L'))
pts = array([[90, 260], [90, 530], [400, 530], [400, 260]])
i0, j0 = pts.min(axis=0)
i1, j1 = pts.max(axis=0)
plt.figure()
plt.subplot(1, 2, 1)
plt.gray()
plt.imshow(im)
add_rect(i0, j0, i1 - i0, j1 - j0)
plt.subplot(1, 2, 2)
box = im[i0:i1, j0:j1]
plt.imshow(box)
plt.show()
def test_rect2():
im = array(Image.open('face.bmp').convert('L'))
pts = array([[90, 260], [90, 530], [400, 530], [400, 260]])
i0, j0 = pts.min(axis=0)
i1, j1 = pts.max(axis=0)
box = im[i0:i1, j0:j1]
gs = plot_imgs([im, box], ratios=[2, 1])
# plt.subplot(gs[0])
# add_rect(i0, j0, i1 - i0, j1 - j0)
plt.show()
return
| mit |
trabacus-softapps/openerp-8.0-cc | openerp/addons/account_voucher/invoice.py | 382 | 2496 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.tools.translate import _
class invoice(osv.osv):
_inherit = 'account.invoice'
def invoice_pay_customer(self, cr, uid, ids, context=None):
if not ids: return []
dummy, view_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account_voucher', 'view_vendor_receipt_dialog_form')
inv = self.browse(cr, uid, ids[0], context=context)
return {
'name':_("Pay Invoice"),
'view_mode': 'form',
'view_id': view_id,
'view_type': 'form',
'res_model': 'account.voucher',
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': 'new',
'domain': '[]',
'context': {
'payment_expected_currency': inv.currency_id.id,
'default_partner_id': self.pool.get('res.partner')._find_accounting_partner(inv.partner_id).id,
'default_amount': inv.type in ('out_refund', 'in_refund') and -inv.residual or inv.residual,
'default_reference': inv.name,
'close_after_process': True,
'invoice_type': inv.type,
'invoice_id': inv.id,
'default_type': inv.type in ('out_invoice','out_refund') and 'receipt' or 'payment',
'type': inv.type in ('out_invoice','out_refund') and 'receipt' or 'payment'
}
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
IQSS/geoconnect | gc_apps/gis_tabular/tests/test_json_field.py | 1 | 2515 | from __future__ import print_function
from os.path import abspath, dirname, isfile, join, isdir
from unittest import skip
from django.test import TestCase
from gc_apps.gis_tabular.models import TestIt
from msg_util import msgt
class JSONFieldTester(TestCase):
def setUp(self):
pass
#self.upload_california_census()
#self.upload_mass_census()
def tearDown(self):
pass
def test_1_dict(self):
"""Save data dict to JSONField, check type pre/post"""
msgt(self.test_1_dict.__doc__)
# attributes
#
tname = 'Obj 1'
col_names_val = dict(a=1, b=2, c=3) #range(1,10)
d = dict(name=tname,
column_names=col_names_val)
# make object and save it
#
tunsaved = TestIt(**d)
tunsaved.save()
# Retrieve object
#
tsaved = TestIt.objects.first()
print (tsaved.column_names)
print (type(tsaved.column_names))
self.assertEqual(tsaved.column_names, col_names_val)
self.assertEqual(tsaved.name, tname)
#tsaved.delete()
def test_2_string(self):
"""Save data string to JSONField, check type pre/post"""
msgt(self.test_2_string.__doc__)
#print TestIt.objects.count()
# attributes
#
tname = 'Obj 1'
col_names_val = 'ze string'
d = dict(name=tname,
column_names=col_names_val)
# make object and save it
#
tunsaved = TestIt(**d)
tunsaved.save()
# Retrieve object
#
tsaved = TestIt.objects.first()
print (tsaved.column_names)
print (type(tsaved.column_names))
self.assertEqual(tsaved.column_names, col_names_val)
self.assertEqual(tsaved.name, tname)
def test_3_list(self):
"""Save data list to JSONField, check type pre/post"""
msgt(self.test_3_list.__doc__)
#print TestIt.objects.count()
# attributes
#
tname = 'Obj 1'
col_names_val = range(10)
d = dict(name=tname,
column_names=col_names_val)
# make object and save it
#
tunsaved = TestIt(**d)
tunsaved.save()
# Retrieve object
#
tsaved = TestIt.objects.first()
print (tsaved.column_names)
print (type(tsaved.column_names))
self.assertEqual(tsaved.column_names, col_names_val)
self.assertEqual(tsaved.name, tname)
| apache-2.0 |
zenodo/zenodo | zenodo/modules/deposit/views.py | 1 | 13211 | # -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2016, 2017 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Redirects for legacy URLs."""
from __future__ import absolute_import, print_function, unicode_literals
from datetime import datetime
from functools import wraps
from flask import Blueprint, Markup, abort, current_app, flash, redirect, \
render_template, request, url_for
from flask_babelex import gettext as _
from flask_security import current_user, login_required
from invenio_accounts.models import User
from invenio_communities.models import Community
from invenio_db import db
from invenio_pidrelations.contrib.versioning import PIDVersioning
from invenio_pidstore.errors import PIDDeletedError, PIDDoesNotExistError
from invenio_pidstore.models import PersistentIdentifier
from invenio_pidstore.resolver import Resolver
from invenio_records_ui.signals import record_viewed
from zenodo.modules.deposit.utils import delete_record
from zenodo.modules.openaire import current_openaire
from zenodo.modules.records import current_zenodo_records
from zenodo.modules.records.permissions import has_admin_permission, \
record_permission_factory
from zenodo.modules.utils import obj_or_import_string
from .api import ZenodoDeposit
from .fetchers import zenodo_deposit_fetcher
from .forms import RecordDeleteForm
blueprint = Blueprint(
'zenodo_deposit',
__name__,
url_prefix='',
template_folder='templates',
static_folder='static',
)
@blueprint.errorhandler(PIDDeletedError)
def tombstone_errorhandler(error):
"""Render tombstone page."""
return render_template(
current_app.config['RECORDS_UI_TOMBSTONE_TEMPLATE'],
pid=error.pid,
record=error.record or {},
), 410
def pass_record(action, deposit_cls=ZenodoDeposit):
"""Pass record and deposit record to function."""
def decorator(f):
@wraps(f)
def inner(pid_value):
# Resolve pid_value to record pid and record
pid, record = pid_value.data
# Check permissions.
permission = record_permission_factory(
record=record, action=action)
if not permission.can():
abort(403)
# Fetch deposit id from record and resolve deposit record and pid.
depid = zenodo_deposit_fetcher(None, record)
if not depid:
abort(404)
depid, deposit = Resolver(
pid_type=depid.pid_type,
object_type='rec',
getter=deposit_cls.get_record,
).resolve(depid.pid_value)
return f(pid=pid, record=record, depid=depid, deposit=deposit)
return inner
return decorator
def can_user_create(f):
"""Check if the user is verified for the appropriate amount of time."""
@wraps(f)
def inner(*args, **kwargs):
can, error_message = True, ''
permission_func = obj_or_import_string(current_app.config.get(
'ZENODO_DEPOSIT_CREATE_PERMISSION'))
if permission_func and callable(permission_func):
can, error_message = permission_func()
if can or has_admin_permission(None, None):
return f(*args, **kwargs)
flash(error_message, category='warning')
abort(403)
return inner
@login_required
def legacy_index():
"""Legacy deposit."""
c_id = request.args.get('c', type=str)
if c_id:
return redirect(url_for('invenio_deposit_ui.new', c=c_id))
return render_template(current_app.config['DEPOSIT_UI_INDEX_TEMPLATE'])
@login_required
@can_user_create
def new():
"""Create a new deposit."""
c = Community.get(request.args.get('c', type=str))
return render_template(current_app.config['DEPOSIT_UI_NEW_TEMPLATE'],
record={'_deposit': {'id': None}}, community=c)
@blueprint.route(
'/record/<pid(recid,record_class='
'"zenodo.modules.records.api:ZenodoRecord"):pid_value>',
methods=['POST']
)
@login_required
@pass_record('update')
def edit(pid=None, record=None, depid=None, deposit=None):
"""Edit a record."""
# If the record doesn't have a DOI, its deposit shouldn't be editable.
if 'doi' not in record:
abort(404)
return redirect(url_for(
'invenio_deposit_ui.{0}'.format(depid.pid_type),
pid_value=depid.pid_value
))
@blueprint.route(
'/record/<pid(recid,record_class='
'"zenodo.modules.records.api:ZenodoRecord"):pid_value>'
'/newversion',
methods=['POST']
)
@login_required
@pass_record('newversion')
def newversion(pid=None, record=None, depid=None, deposit=None):
"""Create a new version of a record."""
# If the record doesn't have a DOI, its deposit shouldn't be editable.
if 'doi' not in record:
abort(404)
# FIXME: Maybe this has to go inside the API (`ZenodoDeposit.newversion`)
# If this is not the latest version, get the latest and extend it
latest_pid = PIDVersioning(child=pid).last_child
if pid != latest_pid:
# We still want to do a POST, so we specify a 307 reidrect code
return redirect(url_for('zenodo_deposit.newversion',
pid_value=latest_pid.pid_value), code=307)
deposit.newversion()
db.session.commit()
new_version_deposit = PIDVersioning(child=pid).draft_child_deposit
return redirect(url_for(
'invenio_deposit_ui.{0}'.format(new_version_deposit.pid_type),
pid_value=new_version_deposit.pid_value
))
@blueprint.route(
'/record/<pid(recid,record_class='
'"zenodo.modules.records.api:ZenodoRecord"):pid_value>'
'/registerconceptdoi',
methods=['POST']
)
@login_required
@pass_record('registerconceptdoi')
def registerconceptdoi(pid=None, record=None, depid=None, deposit=None):
"""Register the Concept DOI for the record."""
# If the record doesn't have a DOI, its deposit shouldn't be editable.
if 'conceptdoi' in record:
abort(404) # TODO: Abort with better code if record is versioned
deposit.registerconceptdoi()
db.session.commit()
return redirect(url_for('invenio_records_ui.recid',
pid_value=pid.pid_value))
@blueprint.route(
'/record'
'/<pid(recid,record_class='
'"zenodo.modules.records.api:ZenodoRecord"):pid_value>'
'/admin/delete',
methods=['GET', 'POST']
)
@login_required
@pass_record('delete')
def delete(pid=None, record=None, depid=None, deposit=None):
"""Delete a record."""
# View disabled until properly implemented and tested.
try:
doi = PersistentIdentifier.get('doi', record['doi'])
except PIDDoesNotExistError:
doi = None
owners = User.query.filter(User.id.in_(record.get('owners', []))).all()
pids = [pid, depid, doi]
if 'conceptdoi' in record:
conceptdoi = PersistentIdentifier.get('doi', record['conceptdoi'])
pids.append(conceptdoi)
else:
conceptdoi = None
if 'conceptrecid' in record:
conceptrecid = PersistentIdentifier.get('recid',
record['conceptrecid'])
pids.append(conceptrecid)
else:
conceptrecid = None
form = RecordDeleteForm()
form.standard_reason.choices = current_app.config['ZENODO_REMOVAL_REASONS']
if form.validate_on_submit():
reason = form.reason.data or dict(
current_app.config['ZENODO_REMOVAL_REASONS']
)[form.standard_reason.data]
delete_record(record.id, reason, int(current_user.get_id()))
flash(
_('Record %(recid)s and associated objects successfully deleted.',
recid=pid.pid_value),
category='success'
)
return redirect(url_for('zenodo_frontpage.index'))
return render_template(
'zenodo_deposit/delete.html',
form=form,
owners=owners,
pid=pid,
pids=pids,
record=record,
deposit=deposit,
)
@blueprint.app_context_processor
def current_datetime():
"""Template contex processor which adds current datetime to the context."""
now = datetime.utcnow()
return {
'current_datetime': now,
'current_date': now.date(),
'current_time': now.time(),
}
@blueprint.app_context_processor
def current_openaire_ctx():
"""Current OpenAIRE context."""
return dict(current_openaire=current_openaire)
@blueprint.app_template_filter('tolinksjs')
def to_links_js(pid, deposit=None):
"""Get API links."""
if not isinstance(deposit, ZenodoDeposit):
return []
self_url = current_app.config['DEPOSIT_RECORDS_API'].format(
pid_value=pid.pid_value)
links = {
'self': self_url,
'html': url_for(
'invenio_deposit_ui.{}'.format(pid.pid_type),
pid_value=pid.pid_value),
'bucket': current_app.config['DEPOSIT_FILES_API'] + '/{0}'.format(
str(deposit.files.bucket.id)),
'discard': self_url + '/actions/discard',
'edit': self_url + '/actions/edit',
'publish': self_url + '/actions/publish',
'newversion': self_url + '/actions/newversion',
'registerconceptdoi': self_url + '/actions/registerconceptdoi',
'files': self_url + '/files',
}
# Add versioning links
conceptrecid = deposit.get('conceptrecid')
if conceptrecid:
conceptrecid = PersistentIdentifier.get('recid', conceptrecid)
pv = PIDVersioning(parent=conceptrecid)
latest_record = pv.last_child
if latest_record:
links['latest'] = current_app.config['RECORDS_API'].format(
pid_value=latest_record.pid_value)
links['latest_html'] = url_for(
'invenio_records_ui.recid', pid_value=latest_record.pid_value)
draft_child_depid = pv.draft_child_deposit
if draft_child_depid:
links['latest_draft'] = (
current_app.config['DEPOSIT_RECORDS_API']
.format(pid_value=draft_child_depid.pid_value))
links['latest_draft_html'] = url_for(
'invenio_deposit_ui.{}'.format(draft_child_depid.pid_type),
pid_value=draft_child_depid.pid_value)
return links
@blueprint.app_template_filter('tofilesjs')
def to_files_js(deposit):
"""List files in a deposit."""
if not isinstance(deposit, ZenodoDeposit):
return []
res = []
for f in deposit.files:
res.append({
'key': f.key,
'version_id': f.version_id,
'checksum': f.file.checksum,
'size': f.file.size,
'completed': True,
'progress': 100,
'links': {
'self': (
current_app.config['DEPOSIT_FILES_API'] +
u'/{bucket}/{key}?versionId={version_id}'.format(
bucket=f.bucket_id,
key=f.key,
version_id=f.version_id,
)),
}
})
for f in deposit.multipart_files.all():
res.append({
'key': f.key,
'size': f.size,
'multipart': True,
'completed': f.completed,
'processing': True,
'progress': 100,
'links': {
'self': (
current_app.config['DEPOSIT_FILES_API'] +
u'/{bucket}/{key}?uploadId={upload_id}'.format(
bucket=f.bucket_id,
key=f.key,
upload_id=f.upload_id,
)),
}
})
return res
def default_view_method(pid, record, template=None):
"""Default view method for updating record.
Sends ``record_viewed`` signal and renders template.
:param pid: PID object ('depid'-type PID).
:param record: Record object (Deposit API).
:param template: Template to render.
"""
# Put deposit in edit mode if not already.
if record['_deposit']['status'] != 'draft':
record = record.edit()
db.session.commit()
record_viewed.send(
current_app._get_current_object(),
pid=pid,
record=record,
)
return render_template(
template,
pid=pid,
record=record,
)
| gpl-2.0 |
Jcing95/iop-hd | test/functional/replace-by-fee.py | 2 | 22471 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the RBF code."""
from test_framework.test_framework import IoPTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
MAX_REPLACEMENT_LIMIT = 100
def txToHex(tx):
return bytes_to_hex_str(tx.serialize())
def make_utxo(node, amount, confirmed=True, scriptPubKey=CScript([1])):
"""Create a txout with a given amount and scriptPubKey
Mines coins as needed.
confirmed - txouts created will be confirmed in the blockchain;
unconfirmed otherwise.
"""
fee = 1*COIN
while node.getbalance() < satoshi_round((amount + fee)/COIN):
node.generate(100)
new_addr = node.getnewaddress()
txid = node.sendtoaddress(new_addr, satoshi_round((amount+fee)/COIN))
tx1 = node.getrawtransaction(txid, 1)
txid = int(txid, 16)
i = None
for i, txout in enumerate(tx1['vout']):
if txout['scriptPubKey']['addresses'] == [new_addr]:
break
assert i is not None
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(txid, i))]
tx2.vout = [CTxOut(amount, scriptPubKey)]
tx2.rehash()
signed_tx = node.signrawtransaction(txToHex(tx2))
txid = node.sendrawtransaction(signed_tx['hex'], True)
# If requested, ensure txouts are confirmed.
if confirmed:
mempool_size = len(node.getrawmempool())
while mempool_size > 0:
node.generate(1)
new_size = len(node.getrawmempool())
# Error out if we have something stuck in the mempool, as this
# would likely be a bug.
assert(new_size < mempool_size)
mempool_size = new_size
return COutPoint(int(txid, 16), 0)
class ReplaceByFeeTest(IoPTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args= [["-maxorphantx=1000",
"-whitelist=127.0.0.1",
"-limitancestorcount=50",
"-limitancestorsize=101",
"-limitdescendantcount=200",
"-limitdescendantsize=101"],
["-mempoolreplacement=0"]]
def run_test(self):
# Leave IBD
self.nodes[0].generate(1)
make_utxo(self.nodes[0], 1*COIN)
# Ensure nodes are synced
self.sync_all()
self.log.info("Running test simple doublespend...")
self.test_simple_doublespend()
self.log.info("Running test doublespend chain...")
self.test_doublespend_chain()
self.log.info("Running test doublespend tree...")
self.test_doublespend_tree()
self.log.info("Running test replacement feeperkb...")
self.test_replacement_feeperkb()
self.log.info("Running test spends of conflicting outputs...")
self.test_spends_of_conflicting_outputs()
self.log.info("Running test new unconfirmed inputs...")
self.test_new_unconfirmed_inputs()
self.log.info("Running test too many replacements...")
self.test_too_many_replacements()
self.log.info("Running test opt-in...")
self.test_opt_in()
self.log.info("Running test RPC...")
self.test_rpc()
self.log.info("Running test prioritised transactions...")
self.test_prioritised_transactions()
self.log.info("Passed")
def test_simple_doublespend(self):
"""Simple doublespend"""
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
# make_utxo may have generated a bunch of blocks, so we need to sync
# before we can spend the coins generated, or else the resulting
# transactions might not be accepted by our peers.
self.sync_all()
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
self.sync_all()
# Should fail because we haven't changed the fee
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(1*COIN, CScript([b'b']))]
tx1b_hex = txToHex(tx1b)
# This will raise an exception due to insufficient fee
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, True)
# This will raise an exception due to transaction replacement being disabled
assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[1].sendrawtransaction, tx1b_hex, True)
# Extra 0.1 IOP fee
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))]
tx1b_hex = txToHex(tx1b)
# Replacement still disabled even with "enough fee"
assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[1].sendrawtransaction, tx1b_hex, True)
# Works when enabled
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
mempool = self.nodes[0].getrawmempool()
assert (tx1a_txid not in mempool)
assert (tx1b_txid in mempool)
assert_equal(tx1b_hex, self.nodes[0].getrawtransaction(tx1b_txid))
# Second node is running mempoolreplacement=0, will not replace originally-seen txn
mempool = self.nodes[1].getrawmempool()
assert tx1a_txid in mempool
assert tx1b_txid not in mempool
def test_doublespend_chain(self):
"""Doublespend of a long chain"""
initial_nValue = 50*COIN
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
prevout = tx0_outpoint
remaining_value = initial_nValue
chain_txids = []
while remaining_value > 10*COIN:
remaining_value -= 1*COIN
tx = CTransaction()
tx.vin = [CTxIn(prevout, nSequence=0)]
tx.vout = [CTxOut(remaining_value, CScript([1]))]
tx_hex = txToHex(tx)
txid = self.nodes[0].sendrawtransaction(tx_hex, True)
chain_txids.append(txid)
prevout = COutPoint(int(txid, 16), 0)
# Whether the double-spend is allowed is evaluated by including all
# child fees - 40 IOP - so this attempt is rejected.
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - 30*COIN, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
# This will raise an exception due to insufficient fee
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, dbl_tx_hex, True)
# Accepted with sufficient fee
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(1*COIN, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
mempool = self.nodes[0].getrawmempool()
for doublespent_txid in chain_txids:
assert(doublespent_txid not in mempool)
def test_doublespend_tree(self):
"""Doublespend of a big tree of transactions"""
initial_nValue = 50*COIN
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
def branch(prevout, initial_value, max_txs, tree_width=5, fee=0.0001*COIN, _total_txs=None):
if _total_txs is None:
_total_txs = [0]
if _total_txs[0] >= max_txs:
return
txout_value = (initial_value - fee) // tree_width
if txout_value < fee:
return
vout = [CTxOut(txout_value, CScript([i+1]))
for i in range(tree_width)]
tx = CTransaction()
tx.vin = [CTxIn(prevout, nSequence=0)]
tx.vout = vout
tx_hex = txToHex(tx)
assert(len(tx.serialize()) < 100000)
txid = self.nodes[0].sendrawtransaction(tx_hex, True)
yield tx
_total_txs[0] += 1
txid = int(txid, 16)
for i, txout in enumerate(tx.vout):
for x in branch(COutPoint(txid, i), txout_value,
max_txs,
tree_width=tree_width, fee=fee,
_total_txs=_total_txs):
yield x
fee = int(0.0001*COIN)
n = MAX_REPLACEMENT_LIMIT
tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
assert_equal(len(tree_txs), n)
# Attempt double-spend, will fail because too little fee paid
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - fee*n, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
# This will raise an exception due to insufficient fee
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, dbl_tx_hex, True)
# 1 IOP fee is enough
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - fee*n - 1*COIN, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
self.nodes[0].sendrawtransaction(dbl_tx_hex, True)
mempool = self.nodes[0].getrawmempool()
for tx in tree_txs:
tx.rehash()
assert (tx.hash not in mempool)
# Try again, but with more total transactions than the "max txs
# double-spent at once" anti-DoS limit.
for n in (MAX_REPLACEMENT_LIMIT+1, MAX_REPLACEMENT_LIMIT*2):
fee = int(0.0001*COIN)
tx0_outpoint = make_utxo(self.nodes[0], initial_nValue)
tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee))
assert_equal(len(tree_txs), n)
dbl_tx = CTransaction()
dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)]
dbl_tx.vout = [CTxOut(initial_nValue - 2*fee*n, CScript([1]))]
dbl_tx_hex = txToHex(dbl_tx)
# This will raise an exception
assert_raises_rpc_error(-26, "too many potential replacements", self.nodes[0].sendrawtransaction, dbl_tx_hex, True)
for tx in tree_txs:
tx.rehash()
self.nodes[0].getrawtransaction(tx.hash)
def test_replacement_feeperkb(self):
"""Replacement requires fee-per-KB to be higher"""
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Higher fee, but the fee per KB is much lower, so the replacement is
# rejected.
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.001*COIN), CScript([b'a'*999000]))]
tx1b_hex = txToHex(tx1b)
# This will raise an exception due to insufficient fee
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, True)
def test_spends_of_conflicting_outputs(self):
"""Replacements that spend conflicting tx outputs are rejected"""
utxo1 = make_utxo(self.nodes[0], int(1.2*COIN))
utxo2 = make_utxo(self.nodes[0], 3*COIN)
tx1a = CTransaction()
tx1a.vin = [CTxIn(utxo1, nSequence=0)]
tx1a.vout = [CTxOut(int(1.1*COIN), CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
tx1a_txid = int(tx1a_txid, 16)
# Direct spend an output of the transaction we're replacing.
tx2 = CTransaction()
tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0)]
tx2.vin.append(CTxIn(COutPoint(tx1a_txid, 0), nSequence=0))
tx2.vout = tx1a.vout
tx2_hex = txToHex(tx2)
# This will raise an exception
assert_raises_rpc_error(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, tx2_hex, True)
# Spend tx1a's output to test the indirect case.
tx1b = CTransaction()
tx1b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)]
tx1b.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1b_hex = txToHex(tx1b)
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
tx1b_txid = int(tx1b_txid, 16)
tx2 = CTransaction()
tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0),
CTxIn(COutPoint(tx1b_txid, 0))]
tx2.vout = tx1a.vout
tx2_hex = txToHex(tx2)
# This will raise an exception
assert_raises_rpc_error(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, tx2_hex, True)
def test_new_unconfirmed_inputs(self):
"""Replacements that add new unconfirmed inputs are rejected"""
confirmed_utxo = make_utxo(self.nodes[0], int(1.1*COIN))
unconfirmed_utxo = make_utxo(self.nodes[0], int(0.1*COIN), False)
tx1 = CTransaction()
tx1.vin = [CTxIn(confirmed_utxo)]
tx1.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1_hex = txToHex(tx1)
tx1_txid = self.nodes[0].sendrawtransaction(tx1_hex, True)
tx2 = CTransaction()
tx2.vin = [CTxIn(confirmed_utxo), CTxIn(unconfirmed_utxo)]
tx2.vout = tx1.vout
tx2_hex = txToHex(tx2)
# This will raise an exception
assert_raises_rpc_error(-26, "replacement-adds-unconfirmed", self.nodes[0].sendrawtransaction, tx2_hex, True)
def test_too_many_replacements(self):
"""Replacements that evict too many transactions are rejected"""
# Try directly replacing more than MAX_REPLACEMENT_LIMIT
# transactions
# Start by creating a single transaction with many outputs
initial_nValue = 10*COIN
utxo = make_utxo(self.nodes[0], initial_nValue)
fee = int(0.0001*COIN)
split_value = int((initial_nValue-fee)/(MAX_REPLACEMENT_LIMIT+1))
outputs = []
for i in range(MAX_REPLACEMENT_LIMIT+1):
outputs.append(CTxOut(split_value, CScript([1])))
splitting_tx = CTransaction()
splitting_tx.vin = [CTxIn(utxo, nSequence=0)]
splitting_tx.vout = outputs
splitting_tx_hex = txToHex(splitting_tx)
txid = self.nodes[0].sendrawtransaction(splitting_tx_hex, True)
txid = int(txid, 16)
# Now spend each of those outputs individually
for i in range(MAX_REPLACEMENT_LIMIT+1):
tx_i = CTransaction()
tx_i.vin = [CTxIn(COutPoint(txid, i), nSequence=0)]
tx_i.vout = [CTxOut(split_value-fee, CScript([b'a']))]
tx_i_hex = txToHex(tx_i)
self.nodes[0].sendrawtransaction(tx_i_hex, True)
# Now create doublespend of the whole lot; should fail.
# Need a big enough fee to cover all spending transactions and have
# a higher fee rate
double_spend_value = (split_value-100*fee)*(MAX_REPLACEMENT_LIMIT+1)
inputs = []
for i in range(MAX_REPLACEMENT_LIMIT+1):
inputs.append(CTxIn(COutPoint(txid, i), nSequence=0))
double_tx = CTransaction()
double_tx.vin = inputs
double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
double_tx_hex = txToHex(double_tx)
# This will raise an exception
assert_raises_rpc_error(-26, "too many potential replacements", self.nodes[0].sendrawtransaction, double_tx_hex, True)
# If we remove an input, it should pass
double_tx = CTransaction()
double_tx.vin = inputs[0:-1]
double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))]
double_tx_hex = txToHex(double_tx)
self.nodes[0].sendrawtransaction(double_tx_hex, True)
def test_opt_in(self):
"""Replacing should only work if orig tx opted in"""
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
# Create a non-opting in transaction
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0xffffffff)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Shouldn't be able to double-spend
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))]
tx1b_hex = txToHex(tx1b)
# This will raise an exception
assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[0].sendrawtransaction, tx1b_hex, True)
tx1_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
# Create a different non-opting in transaction
tx2a = CTransaction()
tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0xfffffffe)]
tx2a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx2a_hex = txToHex(tx2a)
tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, True)
# Still shouldn't be able to double-spend
tx2b = CTransaction()
tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))]
tx2b_hex = txToHex(tx2b)
# This will raise an exception
assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[0].sendrawtransaction, tx2b_hex, True)
# Now create a new transaction that spends from tx1a and tx2a
# opt-in on one of the inputs
# Transaction should be replaceable on either input
tx1a_txid = int(tx1a_txid, 16)
tx2a_txid = int(tx2a_txid, 16)
tx3a = CTransaction()
tx3a.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0xffffffff),
CTxIn(COutPoint(tx2a_txid, 0), nSequence=0xfffffffd)]
tx3a.vout = [CTxOut(int(0.9*COIN), CScript([b'c'])), CTxOut(int(0.9*COIN), CScript([b'd']))]
tx3a_hex = txToHex(tx3a)
self.nodes[0].sendrawtransaction(tx3a_hex, True)
tx3b = CTransaction()
tx3b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)]
tx3b.vout = [CTxOut(int(0.5*COIN), CScript([b'e']))]
tx3b_hex = txToHex(tx3b)
tx3c = CTransaction()
tx3c.vin = [CTxIn(COutPoint(tx2a_txid, 0), nSequence=0)]
tx3c.vout = [CTxOut(int(0.5*COIN), CScript([b'f']))]
tx3c_hex = txToHex(tx3c)
self.nodes[0].sendrawtransaction(tx3b_hex, True)
# If tx3b was accepted, tx3c won't look like a replacement,
# but make sure it is accepted anyway
self.nodes[0].sendrawtransaction(tx3c_hex, True)
def test_prioritised_transactions(self):
# Ensure that fee deltas used via prioritisetransaction are
# correctly used by replacement logic
# 1. Check that feeperkb uses modified fees
tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx1a = CTransaction()
tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx1a_hex = txToHex(tx1a)
tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True)
# Higher fee, but the actual fee per KB is much lower.
tx1b = CTransaction()
tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)]
tx1b.vout = [CTxOut(int(0.001*COIN), CScript([b'a'*740000]))]
tx1b_hex = txToHex(tx1b)
# Verify tx1b cannot replace tx1a.
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, True)
# Use prioritisetransaction to set tx1a's fee to 0.
self.nodes[0].prioritisetransaction(txid=tx1a_txid, fee_delta=int(-0.1*COIN))
# Now tx1b should be able to replace tx1a
tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True)
assert(tx1b_txid in self.nodes[0].getrawmempool())
# 2. Check that absolute fee checks use modified fee.
tx1_outpoint = make_utxo(self.nodes[0], int(1.1*COIN))
tx2a = CTransaction()
tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2a.vout = [CTxOut(1*COIN, CScript([b'a']))]
tx2a_hex = txToHex(tx2a)
tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, True)
# Lower fee, but we'll prioritise it
tx2b = CTransaction()
tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)]
tx2b.vout = [CTxOut(int(1.01*COIN), CScript([b'a']))]
tx2b.rehash()
tx2b_hex = txToHex(tx2b)
# Verify tx2b cannot replace tx2a.
assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx2b_hex, True)
# Now prioritise tx2b to have a higher modified fee
self.nodes[0].prioritisetransaction(txid=tx2b.hash, fee_delta=int(0.1*COIN))
# tx2b should now be accepted
tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True)
assert(tx2b_txid in self.nodes[0].getrawmempool())
def test_rpc(self):
us0 = self.nodes[0].listunspent()[0]
ins = [us0]
outs = {self.nodes[0].getnewaddress() : Decimal(1.0000000)}
rawtx0 = self.nodes[0].createrawtransaction(ins, outs, 0, True)
rawtx1 = self.nodes[0].createrawtransaction(ins, outs, 0, False)
json0 = self.nodes[0].decoderawtransaction(rawtx0)
json1 = self.nodes[0].decoderawtransaction(rawtx1)
assert_equal(json0["vin"][0]["sequence"], 4294967293)
assert_equal(json1["vin"][0]["sequence"], 4294967295)
rawtx2 = self.nodes[0].createrawtransaction([], outs)
frawtx2a = self.nodes[0].fundrawtransaction(rawtx2, {"replaceable": True})
frawtx2b = self.nodes[0].fundrawtransaction(rawtx2, {"replaceable": False})
json0 = self.nodes[0].decoderawtransaction(frawtx2a['hex'])
json1 = self.nodes[0].decoderawtransaction(frawtx2b['hex'])
assert_equal(json0["vin"][0]["sequence"], 4294967293)
assert_equal(json1["vin"][0]["sequence"], 4294967294)
if __name__ == '__main__':
ReplaceByFeeTest().main()
| mit |
KeyWeeUsr/plyer | plyer/platforms/android/gravity.py | 2 | 2145 | '''
Android gravity
---------------------
'''
from jnius import autoclass
from jnius import cast
from jnius import java_method
from jnius import PythonJavaClass
from plyer.facades import Gravity
from plyer.platforms.android import activity
Context = autoclass('android.content.Context')
Sensor = autoclass('android.hardware.Sensor')
SensorManager = autoclass('android.hardware.SensorManager')
class GravitySensorListener(PythonJavaClass):
__javainterfaces__ = ['android/hardware/SensorEventListener']
def __init__(self):
super(GravitySensorListener, self).__init__()
service = activity.getSystemService(Context.SENSOR_SERVICE)
self.SensorManager = cast('android.hardware.SensorManager', service)
self.sensor = self.SensorManager.getDefaultSensor(
Sensor.TYPE_GRAVITY
)
self.values = [None, None, None]
def enable(self):
self.SensorManager.registerListener(
self,
self.sensor,
SensorManager.SENSOR_DELAY_NORMAL
)
def disable(self):
self.SensorManager.unregisterListener(self, self.sensor)
@java_method('(Landroid/hardware/SensorEvent;)V')
def onSensorChanged(self, event):
self.values = event.values[:3]
@java_method('(Landroid/hardware/Sensor;I)V')
def onAccuracyChanged(self, sensor, accuracy):
pass
class AndroidGravity(Gravity):
def __init__(self):
super(AndroidGravity, self).__init__()
self.state = False
def _enable(self):
if not self.state:
self.listener = GravitySensorListener()
self.listener.enable()
self.state = True
def _disable(self):
if self.state:
self.state = False
self.listener.disable()
del self.listener
def _get_gravity(self):
if self.state:
return tuple(self.listener.values)
else:
return (None, None, None)
def __del__(self):
if self.state:
self._disable()
super(self.__class__, self).__del__()
def instance():
return AndroidGravity()
| mit |
Aloomaio/googleads-python-lib | examples/adwords/v201809/targeting/add_demographic_targeting_criteria.py | 1 | 2990 | #!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds demographic criteria to an ad group.
To get a list of ad groups, run get_ad_groups.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
from googleads import adwords
GENDER_MALE = '10'
AGE_RANGE_UNDETERMINED = '503999'
AD_GROUP_ID = 'INSERT_AD_GROUP_ID_HERE'
def main(client, ad_group_id):
# Initialize appropriate service.
ad_group_criterion_service = client.GetService(
'AdGroupCriterionService', version='v201809')
# Create the ad group criteria.
ad_group_criteria = [
# Targeting criterion.
{
'xsi_type': 'BiddableAdGroupCriterion',
'adGroupId': ad_group_id,
'criterion': {
'xsi_type': 'Gender',
# Create gender criteria. The IDs can be found in the
# documentation:
# https://developers.google.com/adwords/api/docs/appendix/genders.
'id': GENDER_MALE
}
},
# Exclusion criterion.
{
'xsi_type': 'NegativeAdGroupCriterion',
'adGroupId': ad_group_id,
'criterion': {
'xsi_type': 'AgeRange',
# Create age range criteria. The IDs can be found in the
# documentation:
# https://developers.google.com/adwords/api/docs/appendix/ages.
'id': AGE_RANGE_UNDETERMINED
}
}
]
# Create operations.
operations = []
for criterion in ad_group_criteria:
operations.append({
'operator': 'ADD',
'operand': criterion
})
response = ad_group_criterion_service.mutate(operations)
if response and response['value']:
criteria = response['value']
for ad_group_criterion in criteria:
criterion = ad_group_criterion['criterion']
print ('Ad group criterion with ad group ID %s, criterion ID %s and '
'type "%s" was added.' %
(ad_group_criterion['adGroupId'], criterion['id'],
criterion['type']))
else:
print 'No criteria were returned.'
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, AD_GROUP_ID)
| apache-2.0 |
M4rtinK/pyside-bb10 | tests/QtCore/qflags_test.py | 6 | 3405 | #!/usr/bin/python
'''Test cases for QFlags'''
import unittest
from PySide.QtCore import Qt, QTemporaryFile, QFile, QIODevice, QObject
class QFlagTest(unittest.TestCase):
'''Test case for usage of flags'''
def testCallFunction(self):
f = QTemporaryFile()
self.assertTrue(f.open())
fileName = f.fileName()
f.close()
f = QFile(fileName)
self.assertEqual(f.open(QIODevice.Truncate | QIODevice.Text | QIODevice.ReadWrite), True)
om = f.openMode()
self.assertEqual(om & QIODevice.Truncate, QIODevice.Truncate)
self.assertEqual(om & QIODevice.Text, QIODevice.Text)
self.assertEqual(om & QIODevice.ReadWrite, QIODevice.ReadWrite)
self.assertTrue(om == QIODevice.Truncate | QIODevice.Text | QIODevice.ReadWrite)
f.close()
class QFlagOperatorTest(unittest.TestCase):
'''Test case for operators in QFlags'''
def testInvert(self):
'''QFlags ~ (invert) operator'''
self.assertEqual(type(~QIODevice.ReadOnly), QIODevice.OpenMode)
def testOr(self):
'''QFlags | (or) operator'''
self.assertEqual(type(QIODevice.ReadOnly | QIODevice.WriteOnly), QIODevice.OpenMode)
def testAnd(self):
'''QFlags & (and) operator'''
self.assertEqual(type(QIODevice.ReadOnly & QIODevice.WriteOnly), QIODevice.OpenMode)
def testIOr(self):
'''QFlags |= (ior) operator'''
flag = Qt.WindowFlags()
self.assertTrue(Qt.Widget == 0)
self.assertFalse(flag & Qt.Widget)
result = flag & Qt.Widget
self.assertTrue(result == 0)
flag |= Qt.WindowMinimizeButtonHint
self.assertTrue(flag & Qt.WindowMinimizeButtonHint)
def testInvertOr(self):
'''QFlags ~ (invert) operator over the result of an | (or) operator'''
self.assertEqual(type(~(Qt.ItemIsSelectable | Qt.ItemIsEditable)), Qt.ItemFlags)
def testEqual(self):
'''QFlags == operator'''
flags = Qt.Window
flags |= Qt.WindowMinimizeButtonHint
flag_type = (flags & Qt.WindowType_Mask)
self.assertEqual(flag_type, Qt.Window)
self.assertEqual(Qt.KeyboardModifiers(Qt.ControlModifier), Qt.ControlModifier)
def testOperatorBetweenFlags(self):
'''QFlags & QFlags'''
flags = Qt.NoItemFlags | Qt.ItemIsUserCheckable
newflags = Qt.NoItemFlags | Qt.ItemIsUserCheckable
self.assertTrue(flags & newflags)
def testOperatorDifferentOrder(self):
'''Different ordering of arguments'''
flags = Qt.NoItemFlags | Qt.ItemIsUserCheckable
self.assertEqual(flags | Qt.ItemIsEnabled, Qt.ItemIsEnabled | flags)
class QFlagsOnQVariant(unittest.TestCase):
def testQFlagsOnQVariant(self):
o = QObject()
o.setProperty("foo", QIODevice.ReadOnly | QIODevice.WriteOnly)
self.assertEqual(type(o.property("foo")), QIODevice.OpenMode)
class QFlagsWrongType(unittest.TestCase):
def testWrongType(self):
'''Wrong type passed to QFlags binary operators'''
self.assertRaises(TypeError, Qt.NoItemFlags | '43')
self.assertRaises(TypeError, Qt.NoItemFlags & '43')
self.assertRaises(TypeError, 'jabba' & Qt.NoItemFlags)
self.assertRaises(TypeError, 'hut' & Qt.NoItemFlags)
self.assertRaises(TypeError, Qt.NoItemFlags & QObject())
if __name__ == '__main__':
unittest.main()
| lgpl-2.1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.