repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
nhenezi/kuma
|
refs/heads/master
|
vendor/packages/nose/unit_tests/test_twisted_testcase.py
|
10
|
try:
from twisted.trial import unittest
except ImportError:
from nose import SkipTest
raise SkipTest('twisted not available; skipping')
class TestTwisted(unittest.TestCase):
def test(self):
pass
|
DuoLife/script
|
refs/heads/master
|
python/scapy-2.3.1/scapy/sendrecv.py
|
8
|
## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <phil@secdev.org>
## This program is published under a GPLv2 license
"""
Functions to send and receive packets.
"""
import cPickle,os,sys,time,subprocess
from select import select
from data import *
import arch
from config import conf
from packet import Gen
from utils import warning,get_temp_file,PcapReader,wrpcap
import plist
from error import log_runtime,log_interactive
from base_classes import SetGen
#################
## Debug class ##
#################
class debug:
recv=[]
sent=[]
match=[]
####################
## Send / Receive ##
####################
def sndrcv(pks, pkt, timeout = None, inter = 0, verbose=None, chainCC=0, retry=0, multi=0):
if not isinstance(pkt, Gen):
pkt = SetGen(pkt)
if verbose is None:
verbose = conf.verb
debug.recv = plist.PacketList([],"Unanswered")
debug.sent = plist.PacketList([],"Sent")
debug.match = plist.SndRcvList([])
nbrecv=0
ans = []
# do it here to fix random fields, so that parent and child have the same
all_stimuli = tobesent = [p for p in pkt]
notans = len(tobesent)
hsent={}
for i in tobesent:
h = i.hashret()
if h in hsent:
hsent[h].append(i)
else:
hsent[h] = [i]
if retry < 0:
retry = -retry
autostop=retry
else:
autostop=0
while retry >= 0:
found=0
if timeout < 0:
timeout = None
rdpipe,wrpipe = os.pipe()
rdpipe=os.fdopen(rdpipe)
wrpipe=os.fdopen(wrpipe,"w")
pid=1
try:
pid = os.fork()
if pid == 0:
try:
sys.stdin.close()
rdpipe.close()
try:
i = 0
if verbose:
print "Begin emission:"
for p in tobesent:
pks.send(p)
i += 1
time.sleep(inter)
if verbose:
print "Finished to send %i packets." % i
except SystemExit:
pass
except KeyboardInterrupt:
pass
except:
log_runtime.exception("--- Error in child %i" % os.getpid())
log_runtime.info("--- Error in child %i" % os.getpid())
finally:
try:
os.setpgrp() # Chance process group to avoid ctrl-C
sent_times = [p.sent_time for p in all_stimuli if p.sent_time]
cPickle.dump( (conf.netcache,sent_times), wrpipe )
wrpipe.close()
except:
pass
elif pid < 0:
log_runtime.error("fork error")
else:
wrpipe.close()
stoptime = 0
remaintime = None
inmask = [rdpipe,pks]
try:
try:
while 1:
if stoptime:
remaintime = stoptime-time.time()
if remaintime <= 0:
break
r = None
if arch.FREEBSD or arch.DARWIN:
inp, out, err = select(inmask,[],[], 0.05)
if len(inp) == 0 or pks in inp:
r = pks.nonblock_recv()
else:
inp, out, err = select(inmask,[],[], remaintime)
if len(inp) == 0:
break
if pks in inp:
r = pks.recv(MTU)
if rdpipe in inp:
if timeout:
stoptime = time.time()+timeout
del(inmask[inmask.index(rdpipe)])
if r is None:
continue
ok = 0
h = r.hashret()
if h in hsent:
hlst = hsent[h]
for i in range(len(hlst)):
if r.answers(hlst[i]):
ans.append((hlst[i],r))
if verbose > 1:
os.write(1, "*")
ok = 1
if not multi:
del(hlst[i])
notans -= 1;
else:
if not hasattr(hlst[i], '_answered'):
notans -= 1;
hlst[i]._answered = 1;
break
if notans == 0 and not multi:
break
if not ok:
if verbose > 1:
os.write(1, ".")
nbrecv += 1
if conf.debug_match:
debug.recv.append(r)
except KeyboardInterrupt:
if chainCC:
raise
finally:
try:
nc,sent_times = cPickle.load(rdpipe)
except EOFError:
warning("Child died unexpectedly. Packets may have not been sent %i"%os.getpid())
else:
conf.netcache.update(nc)
for p,t in zip(all_stimuli, sent_times):
p.sent_time = t
os.waitpid(pid,0)
finally:
if pid == 0:
os._exit(0)
remain = reduce(list.__add__, hsent.values(), [])
if multi:
remain = filter(lambda p: not hasattr(p, '_answered'), remain);
if autostop and len(remain) > 0 and len(remain) != len(tobesent):
retry = autostop
tobesent = remain
if len(tobesent) == 0:
break
retry -= 1
if conf.debug_match:
debug.sent=plist.PacketList(remain[:],"Sent")
debug.match=plist.SndRcvList(ans[:])
#clean the ans list to delete the field _answered
if (multi):
for s,r in ans:
if hasattr(s, '_answered'):
del(s._answered)
if verbose:
print "\nReceived %i packets, got %i answers, remaining %i packets" % (nbrecv+len(ans), len(ans), notans)
return plist.SndRcvList(ans),plist.PacketList(remain,"Unanswered")
def __gen_send(s, x, inter=0, loop=0, count=None, verbose=None, realtime=None, *args, **kargs):
if type(x) is str:
x = conf.raw_layer(load=x)
if not isinstance(x, Gen):
x = SetGen(x)
if verbose is None:
verbose = conf.verb
n = 0
if count is not None:
loop = -count
elif not loop:
loop=-1
try:
while loop:
dt0 = None
for p in x:
if realtime:
ct = time.time()
if dt0:
st = dt0+p.time-ct
if st > 0:
time.sleep(st)
else:
dt0 = ct-p.time
s.send(p)
n += 1
if verbose:
os.write(1,".")
time.sleep(inter)
if loop < 0:
loop += 1
except KeyboardInterrupt:
pass
s.close()
if verbose:
print "\nSent %i packets." % n
@conf.commands.register
def send(x, inter=0, loop=0, count=None, verbose=None, realtime=None, *args, **kargs):
"""Send packets at layer 3
send(packets, [inter=0], [loop=0], [verbose=conf.verb]) -> None"""
__gen_send(conf.L3socket(*args, **kargs), x, inter=inter, loop=loop, count=count,verbose=verbose, realtime=realtime)
@conf.commands.register
def sendp(x, inter=0, loop=0, iface=None, iface_hint=None, count=None, verbose=None, realtime=None, *args, **kargs):
"""Send packets at layer 2
sendp(packets, [inter=0], [loop=0], [verbose=conf.verb]) -> None"""
if iface is None and iface_hint is not None:
iface = conf.route.route(iface_hint)[0]
__gen_send(conf.L2socket(iface=iface, *args, **kargs), x, inter=inter, loop=loop, count=count, verbose=verbose, realtime=realtime)
@conf.commands.register
def sendpfast(x, pps=None, mbps=None, realtime=None, loop=0, file_cache=False, iface=None):
"""Send packets at layer 2 using tcpreplay for performance
pps: packets per second
mpbs: MBits per second
realtime: use packet's timestamp, bending time with realtime value
loop: number of times to process the packet list
file_cache: cache packets in RAM instead of reading from disk at each iteration
iface: output interface """
if iface is None:
iface = conf.iface
argv = [conf.prog.tcpreplay, "--intf1=%s" % iface ]
if pps is not None:
argv.append("--pps=%i" % pps)
elif mbps is not None:
argv.append("--mbps=%i" % mbps)
elif realtime is not None:
argv.append("--multiplier=%i" % realtime)
else:
argv.append("--topspeed")
if loop:
argv.append("--loop=%i" % loop)
if file_cache:
argv.append("--enable-file-cache")
f = get_temp_file()
argv.append(f)
wrpcap(f, x)
try:
subprocess.check_call(argv)
except KeyboardInterrupt:
log_interactive.info("Interrupted by user")
except Exception,e:
log_interactive.error("while trying to exec [%s]: %s" % (argv[0],e))
finally:
os.unlink(f)
@conf.commands.register
def sr(x,filter=None, iface=None, nofilter=0, *args,**kargs):
"""Send and receive packets at layer 3
nofilter: put 1 to avoid use of bpf filters
retry: if positive, how many times to resend unanswered packets
if negative, how many times to retry when no more packets are answered
timeout: how much time to wait after the last packet has been sent
verbose: set verbosity level
multi: whether to accept multiple answers for the same stimulus
filter: provide a BPF filter
iface: listen answers only on the given interface"""
if not kargs.has_key("timeout"):
kargs["timeout"] = -1
s = conf.L3socket(filter=filter, iface=iface, nofilter=nofilter)
a,b=sndrcv(s,x,*args,**kargs)
s.close()
return a,b
@conf.commands.register
def sr1(x,filter=None,iface=None, nofilter=0, *args,**kargs):
"""Send packets at layer 3 and return only the first answer
nofilter: put 1 to avoid use of bpf filters
retry: if positive, how many times to resend unanswered packets
if negative, how many times to retry when no more packets are answered
timeout: how much time to wait after the last packet has been sent
verbose: set verbosity level
multi: whether to accept multiple answers for the same stimulus
filter: provide a BPF filter
iface: listen answers only on the given interface"""
if not kargs.has_key("timeout"):
kargs["timeout"] = -1
s=conf.L3socket(filter=filter, nofilter=nofilter, iface=iface)
a,b=sndrcv(s,x,*args,**kargs)
s.close()
if len(a) > 0:
return a[0][1]
else:
return None
@conf.commands.register
def srp(x,iface=None, iface_hint=None, filter=None, nofilter=0, type=ETH_P_ALL, *args,**kargs):
"""Send and receive packets at layer 2
nofilter: put 1 to avoid use of bpf filters
retry: if positive, how many times to resend unanswered packets
if negative, how many times to retry when no more packets are answered
timeout: how much time to wait after the last packet has been sent
verbose: set verbosity level
multi: whether to accept multiple answers for the same stimulus
filter: provide a BPF filter
iface: work only on the given interface"""
if not kargs.has_key("timeout"):
kargs["timeout"] = -1
if iface is None and iface_hint is not None:
iface = conf.route.route(iface_hint)[0]
s = conf.L2socket(iface=iface, filter=filter, nofilter=nofilter, type=type)
a,b=sndrcv(s ,x,*args,**kargs)
s.close()
return a,b
@conf.commands.register
def srp1(*args,**kargs):
"""Send and receive packets at layer 2 and return only the first answer
nofilter: put 1 to avoid use of bpf filters
retry: if positive, how many times to resend unanswered packets
if negative, how many times to retry when no more packets are answered
timeout: how much time to wait after the last packet has been sent
verbose: set verbosity level
multi: whether to accept multiple answers for the same stimulus
filter: provide a BPF filter
iface: work only on the given interface"""
if not kargs.has_key("timeout"):
kargs["timeout"] = -1
a,b=srp(*args,**kargs)
if len(a) > 0:
return a[0][1]
else:
return None
def __sr_loop(srfunc, pkts, prn=lambda x:x[1].summary(), prnfail=lambda x:x.summary(), inter=1, timeout=None, count=None, verbose=None, store=1, *args, **kargs):
n = 0
r = 0
ct = conf.color_theme
if verbose is None:
verbose = conf.verb
parity = 0
ans=[]
unans=[]
if timeout is None:
timeout = min(2*inter, 5)
try:
while 1:
parity ^= 1
col = [ct.even,ct.odd][parity]
if count is not None:
if count == 0:
break
count -= 1
start = time.time()
print "\rsend...\r",
res = srfunc(pkts, timeout=timeout, verbose=0, chainCC=1, *args, **kargs)
n += len(res[0])+len(res[1])
r += len(res[0])
if verbose > 1 and prn and len(res[0]) > 0:
msg = "RECV %i:" % len(res[0])
print "\r"+ct.success(msg),
for p in res[0]:
print col(prn(p))
print " "*len(msg),
if verbose > 1 and prnfail and len(res[1]) > 0:
msg = "fail %i:" % len(res[1])
print "\r"+ct.fail(msg),
for p in res[1]:
print col(prnfail(p))
print " "*len(msg),
if verbose > 1 and not (prn or prnfail):
print "recv:%i fail:%i" % tuple(map(len, res[:2]))
if store:
ans += res[0]
unans += res[1]
end=time.time()
if end-start < inter:
time.sleep(inter+start-end)
except KeyboardInterrupt:
pass
if verbose and n>0:
print ct.normal("\nSent %i packets, received %i packets. %3.1f%% hits." % (n,r,100.0*r/n))
return plist.SndRcvList(ans),plist.PacketList(unans)
@conf.commands.register
def srloop(pkts, *args, **kargs):
"""Send a packet at layer 3 in loop and print the answer each time
srloop(pkts, [prn], [inter], [count], ...) --> None"""
return __sr_loop(sr, pkts, *args, **kargs)
@conf.commands.register
def srploop(pkts, *args, **kargs):
"""Send a packet at layer 2 in loop and print the answer each time
srloop(pkts, [prn], [inter], [count], ...) --> None"""
return __sr_loop(srp, pkts, *args, **kargs)
def sndrcvflood(pks, pkt, prn=lambda (s,r):r.summary(), chainCC=0, store=1, unique=0):
if not isinstance(pkt, Gen):
pkt = SetGen(pkt)
tobesent = [p for p in pkt]
received = plist.SndRcvList()
seen = {}
hsent={}
for i in tobesent:
h = i.hashret()
if h in hsent:
hsent[h].append(i)
else:
hsent[h] = [i]
def send_in_loop(tobesent):
while 1:
for p in tobesent:
yield p
packets_to_send = send_in_loop(tobesent)
ssock = rsock = pks.fileno()
try:
while 1:
readyr,readys,_ = select([rsock],[ssock],[])
if ssock in readys:
pks.send(packets_to_send.next())
if rsock in readyr:
p = pks.recv(MTU)
if p is None:
continue
h = p.hashret()
if h in hsent:
hlst = hsent[h]
for i in hlst:
if p.answers(i):
res = prn((i,p))
if unique:
if res in seen:
continue
seen[res] = None
if res is not None:
print res
if store:
received.append((i,p))
except KeyboardInterrupt:
if chainCC:
raise
return received
@conf.commands.register
def srflood(x,filter=None, iface=None, nofilter=None, *args,**kargs):
"""Flood and receive packets at layer 3
prn: function applied to packets received. Ret val is printed if not None
store: if 1 (default), store answers and return them
unique: only consider packets whose print
nofilter: put 1 to avoid use of bpf filters
filter: provide a BPF filter
iface: listen answers only on the given interface"""
s = conf.L3socket(filter=filter, iface=iface, nofilter=nofilter)
r=sndrcvflood(s,x,*args,**kargs)
s.close()
return r
@conf.commands.register
def srpflood(x,filter=None, iface=None, iface_hint=None, nofilter=None, *args,**kargs):
"""Flood and receive packets at layer 2
prn: function applied to packets received. Ret val is printed if not None
store: if 1 (default), store answers and return them
unique: only consider packets whose print
nofilter: put 1 to avoid use of bpf filters
filter: provide a BPF filter
iface: listen answers only on the given interface"""
if iface is None and iface_hint is not None:
iface = conf.route.route(iface_hint)[0]
s = conf.L2socket(filter=filter, iface=iface, nofilter=nofilter)
r=sndrcvflood(s,x,*args,**kargs)
s.close()
return r
@conf.commands.register
def sniff(count=0, store=1, offline=None, prn = None, lfilter=None, L2socket=None, timeout=None,
opened_socket=None, stop_filter=None, *arg, **karg):
"""Sniff packets
sniff([count=0,] [prn=None,] [store=1,] [offline=None,] [lfilter=None,] + L2ListenSocket args) -> list of packets
count: number of packets to capture. 0 means infinity
store: wether to store sniffed packets or discard them
prn: function to apply to each packet. If something is returned,
it is displayed. Ex:
ex: prn = lambda x: x.summary()
lfilter: python function applied to each packet to determine
if further action may be done
ex: lfilter = lambda x: x.haslayer(Padding)
offline: pcap file to read packets from, instead of sniffing them
timeout: stop sniffing after a given time (default: None)
L2socket: use the provided L2socket
opened_socket: provide an object ready to use .recv() on
stop_filter: python function applied to each packet to determine
if we have to stop the capture after this packet
ex: stop_filter = lambda x: x.haslayer(TCP)
"""
c = 0
if opened_socket is not None:
s = opened_socket
else:
if offline is None:
if L2socket is None:
L2socket = conf.L2listen
s = L2socket(type=ETH_P_ALL, *arg, **karg)
else:
s = PcapReader(offline)
lst = []
if timeout is not None:
stoptime = time.time()+timeout
remain = None
try:
while 1:
if timeout is not None:
remain = stoptime-time.time()
if remain <= 0:
break
sel = select([s],[],[],remain)
if s in sel[0]:
p = s.recv(MTU)
if p is None:
break
if lfilter and not lfilter(p):
continue
if store:
lst.append(p)
c += 1
if prn:
r = prn(p)
if r is not None:
print r
if stop_filter and stop_filter(p):
break
if count > 0 and c >= count:
break
except KeyboardInterrupt:
pass
if opened_socket is None:
s.close()
return plist.PacketList(lst,"Sniffed")
@conf.commands.register
def bridge_and_sniff(if1, if2, count=0, store=1, offline=None, prn = None, lfilter=None, L2socket=None, timeout=None,
stop_filter=None, *args, **kargs):
"""Forward traffic between two interfaces and sniff packets exchanged
bridge_and_sniff([count=0,] [prn=None,] [store=1,] [offline=None,] [lfilter=None,] + L2Socket args) -> list of packets
count: number of packets to capture. 0 means infinity
store: wether to store sniffed packets or discard them
prn: function to apply to each packet. If something is returned,
it is displayed. Ex:
ex: prn = lambda x: x.summary()
lfilter: python function applied to each packet to determine
if further action may be done
ex: lfilter = lambda x: x.haslayer(Padding)
timeout: stop sniffing after a given time (default: None)
L2socket: use the provided L2socket
stop_filter: python function applied to each packet to determine
if we have to stop the capture after this packet
ex: stop_filter = lambda x: x.haslayer(TCP)
"""
c = 0
if L2socket is None:
L2socket = conf.L2socket
s1 = L2socket(iface=if1)
s2 = L2socket(iface=if2)
peerof={s1:s2,s2:s1}
label={s1:if1, s2:if2}
lst = []
if timeout is not None:
stoptime = time.time()+timeout
remain = None
try:
while True:
if timeout is not None:
remain = stoptime-time.time()
if remain <= 0:
break
ins,outs,errs = select([s1,s2],[],[], remain)
for s in ins:
p = s.recv()
if p is not None:
peerof[s].send(p.original)
if lfilter and not lfilter(p):
continue
if store:
p.sniffed_on = label[s]
lst.append(p)
c += 1
if prn:
r = prn(p)
if r is not None:
print "%s: %s" % (label[s],r)
if stop_filter and stop_filter(p):
break
if count > 0 and c >= count:
break
except KeyboardInterrupt:
pass
finally:
return plist.PacketList(lst,"Sniffed")
@conf.commands.register
def tshark(*args,**kargs):
"""Sniff packets and print them calling pkt.show(), a bit like text wireshark"""
sniff(prn=lambda x: x.display(),*args,**kargs)
|
sdu-cfei/modest-py
|
refs/heads/master
|
examples/simple/simple.py
|
1
|
"""
Copyright (c) 2017, University of Southern Denmark
All rights reserved.
This code is licensed under BSD 2-clause license.
See LICENSE file in the project root for license terms.
"""
import logging
import json
import os
import pandas as pd
from modestpy import Estimation
from modestpy.utilities.sysarch import get_sys_arch
logging.basicConfig(level='INFO', filename='test.log', filemode='w')
if __name__ == "__main__":
"""
This file is supposed to be run from the root directory.
Otherwise the paths have to be corrected.
"""
# DATA PREPARATION ==============================================
# Resources
platform = get_sys_arch()
assert platform, 'Unsupported platform type!'
fmu_file = 'Simple2R1C_ic_' + platform + '.fmu'
fmu_path = os.path.join('examples', 'simple', 'resources', fmu_file)
inp_path = os.path.join('examples', 'simple', 'resources', 'inputs.csv')
ideal_path = os.path.join('examples', 'simple', 'resources', 'result.csv')
est_path = os.path.join('examples', 'simple', 'resources', 'est.json')
known_path = os.path.join('examples', 'simple', 'resources', 'known.json')
# Working directory
workdir = os.path.join('examples', 'simple', 'workdir')
if not os.path.exists(workdir):
os.mkdir(workdir)
assert os.path.exists(workdir), "Work directory does not exist"
# Load inputs
inp = pd.read_csv(inp_path).set_index('time')
# Load measurements (ideal results)
ideal = pd.read_csv(ideal_path).set_index('time')
# Load definition of estimated parameters (name, initial value, bounds)
with open(est_path) as f:
est = json.load(f)
# Load definition of known parameters (name, value)
with open(known_path) as f:
known = json.load(f)
# MODEL IDENTIFICATION ==========================================
# Comparing parallel GA against GA using different population sizes
case_workdir = os.path.join(workdir, "modestga")
if not os.path.exists(case_workdir):
os.mkdir(case_workdir)
session = Estimation(
case_workdir,
fmu_path,
inp,
known,
est,
ideal,
lp_n=1,
lp_len=50000,
lp_frame=(0, 50000),
vp=(0, 50000),
ic_param={'Tstart': 'T'},
methods=('MODESTGA',),
modestga_opts={
'generations': 20, # Max. number of generations
'pop_size': 60, # Population size
'trm_size': 7, # Tournament size
'tol': 1e-3, # Absolute tolerance
'workers': 3 # Number of CPUs to use
},
ftype='RMSE',
default_log=True,
logfile='simple.log'
)
estimates = session.estimate()
err, res = session.validate()
|
mytliulei/DCNRobotInstallPackages
|
refs/heads/master
|
windows/win32/scapy-2/scapy/layers/dot11.py
|
15
|
## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <phil@secdev.org>
## This program is published under a GPLv2 license
"""
Wireless LAN according to IEEE 802.11.
"""
import re,struct
from scapy.packet import *
from scapy.fields import *
from scapy.plist import PacketList
from scapy.layers.l2 import *
try:
from Crypto.Cipher import ARC4
except ImportError:
log_loading.info("Can't import python Crypto lib. Won't be able to decrypt WEP.")
### Fields
class Dot11AddrMACField(MACField):
def is_applicable(self, pkt):
return 1
def addfield(self, pkt, s, val):
if self.is_applicable(pkt):
return MACField.addfield(self, pkt, s, val)
else:
return s
def getfield(self, pkt, s):
if self.is_applicable(pkt):
return MACField.getfield(self, pkt, s)
else:
return s,None
class Dot11Addr2MACField(Dot11AddrMACField):
def is_applicable(self, pkt):
if pkt.type == 1:
return pkt.subtype in [ 0xb, 0xa, 0xe, 0xf] # RTS, PS-Poll, CF-End, CF-End+CF-Ack
return 1
class Dot11Addr3MACField(Dot11AddrMACField):
def is_applicable(self, pkt):
if pkt.type in [0,2]:
return 1
return 0
class Dot11Addr4MACField(Dot11AddrMACField):
def is_applicable(self, pkt):
if pkt.type == 2:
if pkt.FCfield & 0x3 == 0x3: # To-DS and From-DS are set
return 1
return 0
### Layers
class PrismHeader(Packet):
""" iwpriv wlan0 monitor 3 """
name = "Prism header"
fields_desc = [ LEIntField("msgcode",68),
LEIntField("len",144),
StrFixedLenField("dev","",16),
LEIntField("hosttime_did",0),
LEShortField("hosttime_status",0),
LEShortField("hosttime_len",0),
LEIntField("hosttime",0),
LEIntField("mactime_did",0),
LEShortField("mactime_status",0),
LEShortField("mactime_len",0),
LEIntField("mactime",0),
LEIntField("channel_did",0),
LEShortField("channel_status",0),
LEShortField("channel_len",0),
LEIntField("channel",0),
LEIntField("rssi_did",0),
LEShortField("rssi_status",0),
LEShortField("rssi_len",0),
LEIntField("rssi",0),
LEIntField("sq_did",0),
LEShortField("sq_status",0),
LEShortField("sq_len",0),
LEIntField("sq",0),
LEIntField("signal_did",0),
LEShortField("signal_status",0),
LEShortField("signal_len",0),
LESignedIntField("signal",0),
LEIntField("noise_did",0),
LEShortField("noise_status",0),
LEShortField("noise_len",0),
LEIntField("noise",0),
LEIntField("rate_did",0),
LEShortField("rate_status",0),
LEShortField("rate_len",0),
LEIntField("rate",0),
LEIntField("istx_did",0),
LEShortField("istx_status",0),
LEShortField("istx_len",0),
LEIntField("istx",0),
LEIntField("frmlen_did",0),
LEShortField("frmlen_status",0),
LEShortField("frmlen_len",0),
LEIntField("frmlen",0),
]
def answers(self, other):
if isinstance(other, PrismHeader):
return self.payload.answers(other.payload)
else:
return self.payload.answers(other)
class RadioTap(Packet):
name = "RadioTap dummy"
fields_desc = [ ByteField('version', 0),
ByteField('pad', 0),
FieldLenField('len', None, 'notdecoded', '<H', adjust=lambda pkt,x:x+8),
FlagsField('present', None, -32, ['TSFT','Flags','Rate','Channel','FHSS','dBm_AntSignal',
'dBm_AntNoise','Lock_Quality','TX_Attenuation','dB_TX_Attenuation',
'dBm_TX_Power', 'Antenna', 'dB_AntSignal', 'dB_AntNoise',
'b14', 'b15','b16','b17','b18','b19','b20','b21','b22','b23',
'b24','b25','b26','b27','b28','b29','b30','Ext']),
StrLenField('notdecoded', "", length_from= lambda pkt:pkt.len-8) ]
class PPI(Packet):
name = "Per-Packet Information header (partial)"
fields_desc = [ ByteField("version", 0),
ByteField("flags", 0),
FieldLenField("len", None, fmt="<H", length_of="fields", adjust=lambda pkt,x:x+8),
LEIntField("dlt", 0),
StrLenField("notdecoded", "", length_from = lambda pkt:pkt.len-8)
]
class Dot11SCField(LEShortField):
def is_applicable(self, pkt):
return pkt.type != 1 # control frame
def addfield(self, pkt, s, val):
if self.is_applicable(pkt):
return LEShortField.addfield(self, pkt, s, val)
else:
return s
def getfield(self, pkt, s):
if self.is_applicable(pkt):
return LEShortField.getfield(self, pkt, s)
else:
return s,None
class Dot11(Packet):
name = "802.11"
fields_desc = [
BitField("subtype", 0, 4),
BitEnumField("type", 0, 2, ["Management", "Control", "Data", "Reserved"]),
BitField("proto", 0, 2),
FlagsField("FCfield", 0, 8, ["to-DS", "from-DS", "MF", "retry", "pw-mgt", "MD", "wep", "order"]),
ShortField("ID",0),
MACField("addr1", ETHER_ANY),
Dot11Addr2MACField("addr2", ETHER_ANY),
Dot11Addr3MACField("addr3", ETHER_ANY),
Dot11SCField("SC", 0),
Dot11Addr4MACField("addr4", ETHER_ANY)
]
def mysummary(self):
return self.sprintf("802.11 %Dot11.type% %Dot11.subtype% %Dot11.addr2% > %Dot11.addr1%")
def guess_payload_class(self, payload):
if self.type == 0x02 and (self.subtype >= 0x08 and self.subtype <=0xF and self.subtype != 0xD):
return Dot11QoS
elif self.FCfield & 0x40:
return Dot11WEP
else:
return Packet.guess_payload_class(self, payload)
def answers(self, other):
if isinstance(other,Dot11):
if self.type == 0: # management
if self.addr1.lower() != other.addr2.lower(): # check resp DA w/ req SA
return 0
if (other.subtype,self.subtype) in [(0,1),(2,3),(4,5)]:
return 1
if self.subtype == other.subtype == 11: # auth
return self.payload.answers(other.payload)
elif self.type == 1: # control
return 0
elif self.type == 2: # data
return self.payload.answers(other.payload)
elif self.type == 3: # reserved
return 0
return 0
def unwep(self, key=None, warn=1):
if self.FCfield & 0x40 == 0:
if warn:
warning("No WEP to remove")
return
if isinstance(self.payload.payload, NoPayload):
if key or conf.wepkey:
self.payload.decrypt(key)
if isinstance(self.payload.payload, NoPayload):
if warn:
warning("Dot11 can't be decrypted. Check conf.wepkey.")
return
self.FCfield &= ~0x40
self.payload=self.payload.payload
class Dot11QoS(Packet):
name = "802.11 QoS"
fields_desc = [ BitField("TID",None,4),
BitField("EOSP",None,1),
BitField("Ack Policy",None,2),
BitField("Reserved",None,1),
ByteField("TXOP",None) ]
def guess_payload_class(self, payload):
if isinstance(self.underlayer, Dot11):
if self.underlayer.FCfield & 0x40:
return Dot11WEP
return Packet.guess_payload_class(self, payload)
capability_list = [ "res8", "res9", "short-slot", "res11",
"res12", "DSSS-OFDM", "res14", "res15",
"ESS", "IBSS", "CFP", "CFP-req",
"privacy", "short-preamble", "PBCC", "agility"]
reason_code = {0:"reserved",1:"unspec", 2:"auth-expired",
3:"deauth-ST-leaving",
4:"inactivity", 5:"AP-full", 6:"class2-from-nonauth",
7:"class3-from-nonass", 8:"disas-ST-leaving",
9:"ST-not-auth"}
status_code = {0:"success", 1:"failure", 10:"cannot-support-all-cap",
11:"inexist-asso", 12:"asso-denied", 13:"algo-unsupported",
14:"bad-seq-num", 15:"challenge-failure",
16:"timeout", 17:"AP-full",18:"rate-unsupported" }
class Dot11Beacon(Packet):
name = "802.11 Beacon"
fields_desc = [ LELongField("timestamp", 0),
LEShortField("beacon_interval", 0x0064),
FlagsField("cap", 0, 16, capability_list) ]
class Dot11Elt(Packet):
name = "802.11 Information Element"
fields_desc = [ ByteEnumField("ID", 0, {0:"SSID", 1:"Rates", 2: "FHset", 3:"DSset", 4:"CFset", 5:"TIM", 6:"IBSSset", 16:"challenge",
42:"ERPinfo", 46:"QoS Capability", 47:"ERPinfo", 48:"RSNinfo", 50:"ESRates",221:"vendor",68:"reserved"}),
FieldLenField("len", None, "info", "B"),
StrLenField("info", "", length_from=lambda x:x.len) ]
def mysummary(self):
if self.ID == 0:
return "SSID=%s"%repr(self.info),[Dot11]
else:
return ""
class Dot11ATIM(Packet):
name = "802.11 ATIM"
class Dot11Disas(Packet):
name = "802.11 Disassociation"
fields_desc = [ LEShortEnumField("reason", 1, reason_code) ]
class Dot11AssoReq(Packet):
name = "802.11 Association Request"
fields_desc = [ FlagsField("cap", 0, 16, capability_list),
LEShortField("listen_interval", 0x00c8) ]
class Dot11AssoResp(Packet):
name = "802.11 Association Response"
fields_desc = [ FlagsField("cap", 0, 16, capability_list),
LEShortField("status", 0),
LEShortField("AID", 0) ]
class Dot11ReassoReq(Packet):
name = "802.11 Reassociation Request"
fields_desc = [ FlagsField("cap", 0, 16, capability_list),
LEShortField("listen_interval", 0x00c8),
MACField("current_AP", ETHER_ANY) ]
class Dot11ReassoResp(Dot11AssoResp):
name = "802.11 Reassociation Response"
class Dot11ProbeReq(Packet):
name = "802.11 Probe Request"
class Dot11ProbeResp(Packet):
name = "802.11 Probe Response"
fields_desc = [ LELongField("timestamp", 0),
LEShortField("beacon_interval", 0x0064),
FlagsField("cap", 0, 16, capability_list) ]
class Dot11Auth(Packet):
name = "802.11 Authentication"
fields_desc = [ LEShortEnumField("algo", 0, ["open", "sharedkey"]),
LEShortField("seqnum", 0),
LEShortEnumField("status", 0, status_code) ]
def answers(self, other):
if self.seqnum == other.seqnum+1:
return 1
return 0
class Dot11Deauth(Packet):
name = "802.11 Deauthentication"
fields_desc = [ LEShortEnumField("reason", 1, reason_code) ]
class Dot11WEP(Packet):
name = "802.11 WEP packet"
fields_desc = [ StrFixedLenField("iv", "\0\0\0", 3),
ByteField("keyid", 0),
StrField("wepdata",None,remain=4),
IntField("icv",None) ]
def post_dissect(self, s):
# self.icv, = struct.unpack("!I",self.wepdata[-4:])
# self.wepdata = self.wepdata[:-4]
self.decrypt()
def build_payload(self):
if self.wepdata is None:
return Packet.build_payload(self)
return ""
def post_build(self, p, pay):
if self.wepdata is None:
key = conf.wepkey
if key:
if self.icv is None:
pay += struct.pack("<I",crc32(pay))
icv = ""
else:
icv = p[4:8]
c = ARC4.new(self.iv+key)
p = p[:4]+c.encrypt(pay)+icv
else:
warning("No WEP key set (conf.wepkey).. strange results expected..")
return p
def decrypt(self,key=None):
if key is None:
key = conf.wepkey
if key:
c = ARC4.new(self.iv+key)
self.add_payload(LLC(c.decrypt(self.wepdata)))
bind_layers( PrismHeader, Dot11, )
bind_layers( RadioTap, Dot11, )
bind_layers( PPI, Dot11, dlt=105)
bind_layers( Dot11, LLC, type=2)
bind_layers( Dot11QoS, LLC, )
bind_layers( Dot11, Dot11AssoReq, subtype=0, type=0)
bind_layers( Dot11, Dot11AssoResp, subtype=1, type=0)
bind_layers( Dot11, Dot11ReassoReq, subtype=2, type=0)
bind_layers( Dot11, Dot11ReassoResp, subtype=3, type=0)
bind_layers( Dot11, Dot11ProbeReq, subtype=4, type=0)
bind_layers( Dot11, Dot11ProbeResp, subtype=5, type=0)
bind_layers( Dot11, Dot11Beacon, subtype=8, type=0)
bind_layers( Dot11, Dot11ATIM, subtype=9, type=0)
bind_layers( Dot11, Dot11Disas, subtype=10, type=0)
bind_layers( Dot11, Dot11Auth, subtype=11, type=0)
bind_layers( Dot11, Dot11Deauth, subtype=12, type=0)
bind_layers( Dot11Beacon, Dot11Elt, )
bind_layers( Dot11AssoReq, Dot11Elt, )
bind_layers( Dot11AssoResp, Dot11Elt, )
bind_layers( Dot11ReassoReq, Dot11Elt, )
bind_layers( Dot11ReassoResp, Dot11Elt, )
bind_layers( Dot11ProbeReq, Dot11Elt, )
bind_layers( Dot11ProbeResp, Dot11Elt, )
bind_layers( Dot11Auth, Dot11Elt, )
bind_layers( Dot11Elt, Dot11Elt, )
conf.l2types.register(105, Dot11)
conf.l2types.register_num2layer(801, Dot11)
conf.l2types.register(119, PrismHeader)
conf.l2types.register_num2layer(802, PrismHeader)
conf.l2types.register(127, RadioTap)
conf.l2types.register(0xc0, PPI)
conf.l2types.register_num2layer(803, RadioTap)
class WiFi_am(AnsweringMachine):
"""Before using this, initialize "iffrom" and "ifto" interfaces:
iwconfig iffrom mode monitor
iwpriv orig_ifto hostapd 1
ifconfig ifto up
note: if ifto=wlan0ap then orig_ifto=wlan0
note: ifto and iffrom must be set on the same channel
ex:
ifconfig eth1 up
iwconfig eth1 mode monitor
iwconfig eth1 channel 11
iwpriv wlan0 hostapd 1
ifconfig wlan0ap up
iwconfig wlan0 channel 11
iwconfig wlan0 essid dontexist
iwconfig wlan0 mode managed
"""
function_name = "airpwn"
filter = None
def parse_options(self, iffrom, ifto, replace, pattern="", ignorepattern=""):
self.iffrom = iffrom
self.ifto = ifto
ptrn = re.compile(pattern)
iptrn = re.compile(ignorepattern)
def is_request(self, pkt):
if not isinstance(pkt,Dot11):
return 0
if not pkt.FCfield & 1:
return 0
if not pkt.haslayer(TCP):
return 0
ip = pkt.getlayer(IP)
tcp = pkt.getlayer(TCP)
pay = str(tcp.payload)
if not self.ptrn.match(pay):
return 0
if self.iptrn.match(pay):
return 0
def make_reply(self, p):
ip = p.getlayer(IP)
tcp = p.getlayer(TCP)
pay = str(tcp.payload)
del(p.payload.payload.payload)
p.FCfield="from-DS"
p.addr1,p.addr2 = p.addr2,p.addr1
p /= IP(src=ip.dst,dst=ip.src)
p /= TCP(sport=tcp.dport, dport=tcp.sport,
seq=tcp.ack, ack=tcp.seq+len(pay),
flags="PA")
q = p.copy()
p /= self.replace
q.ID += 1
q.getlayer(TCP).flags="RA"
q.getlayer(TCP).seq+=len(replace)
return [p,q]
def print_reply(self):
print p.sprintf("Sent %IP.src%:%IP.sport% > %IP.dst%:%TCP.dport%")
def send_reply(self, reply):
sendp(reply, iface=self.ifto, **self.optsend)
def sniff(self):
sniff(iface=self.iffrom, **self.optsniff)
plst=[]
def get_toDS():
global plst
while 1:
p,=sniff(iface="eth1",count=1)
if not isinstance(p,Dot11):
continue
if p.FCfield & 1:
plst.append(p)
print "."
# if not ifto.endswith("ap"):
# print "iwpriv %s hostapd 1" % ifto
# os.system("iwpriv %s hostapd 1" % ifto)
# ifto += "ap"
#
# os.system("iwconfig %s mode monitor" % iffrom)
#
def airpwn(iffrom, ifto, replace, pattern="", ignorepattern=""):
"""Before using this, initialize "iffrom" and "ifto" interfaces:
iwconfig iffrom mode monitor
iwpriv orig_ifto hostapd 1
ifconfig ifto up
note: if ifto=wlan0ap then orig_ifto=wlan0
note: ifto and iffrom must be set on the same channel
ex:
ifconfig eth1 up
iwconfig eth1 mode monitor
iwconfig eth1 channel 11
iwpriv wlan0 hostapd 1
ifconfig wlan0ap up
iwconfig wlan0 channel 11
iwconfig wlan0 essid dontexist
iwconfig wlan0 mode managed
"""
ptrn = re.compile(pattern)
iptrn = re.compile(ignorepattern)
def do_airpwn(p, ifto=ifto, replace=replace, ptrn=ptrn, iptrn=iptrn):
if not isinstance(p,Dot11):
return
if not p.FCfield & 1:
return
if not p.haslayer(TCP):
return
ip = p.getlayer(IP)
tcp = p.getlayer(TCP)
pay = str(tcp.payload)
# print "got tcp"
if not ptrn.match(pay):
return
# print "match 1"
if iptrn.match(pay):
return
# print "match 2"
del(p.payload.payload.payload)
p.FCfield="from-DS"
p.addr1,p.addr2 = p.addr2,p.addr1
q = p.copy()
p /= IP(src=ip.dst,dst=ip.src)
p /= TCP(sport=tcp.dport, dport=tcp.sport,
seq=tcp.ack, ack=tcp.seq+len(pay),
flags="PA")
q = p.copy()
p /= replace
q.ID += 1
q.getlayer(TCP).flags="RA"
q.getlayer(TCP).seq+=len(replace)
sendp([p,q], iface=ifto, verbose=0)
# print "send",repr(p)
# print "send",repr(q)
print p.sprintf("Sent %IP.src%:%IP.sport% > %IP.dst%:%TCP.dport%")
sniff(iface=iffrom,prn=do_airpwn)
conf.stats_dot11_protocols += [Dot11WEP, Dot11Beacon, ]
class Dot11PacketList(PacketList):
def __init__(self, res=None, name="Dot11List", stats=None):
if stats is None:
stats = conf.stats_dot11_protocols
PacketList.__init__(self, res, name, stats)
def toEthernet(self):
data = map(lambda x:x.getlayer(Dot11), filter(lambda x : x.haslayer(Dot11) and x.type == 2, self.res))
r2 = []
for p in data:
q = p.copy()
q.unwep()
r2.append(Ether()/q.payload.payload.payload) #Dot11/LLC/SNAP/IP
return PacketList(r2,name="Ether from %s"%self.listname)
|
Distrotech/scons
|
refs/heads/distrotech-scons
|
test/LINK/LINKCOM.py
|
5
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test the ability to configure the $LINKCOM construction variable.
"""
import TestSCons
_python_ = TestSCons._python_
_exe = TestSCons._exe
test = TestSCons.TestSCons()
test.write('mylink.py', r"""
import sys
outfile = open(sys.argv[1], 'wb')
for f in sys.argv[2:]:
infile = open(f, 'rb')
for l in [l for l in infile.readlines() if l != '/*link*/\n']:
outfile.write(l)
sys.exit(0)
""")
test.write('SConstruct', """
env = Environment(LINKCOM = r'%(_python_)s mylink.py $TARGET $SOURCES',
OBJSUFFIX = '.obj',
PROGSUFFIX = '.exe')
env.Program(target = 'test1', source = ['test1.obj', 'test2.obj'])
""" % locals())
test.write('test1.obj', """\
test1.obj
/*link*/
""")
test.write('test2.obj', """\
test2.obj
/*link*/
""")
test.run()
test.must_match('test1.exe', "test1.obj\ntest2.obj\n")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
deadman96385/Deathly_Kernel_D2
|
refs/heads/5.1
|
tools/perf/util/setup.py
|
4998
|
#!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
|
rossburton/yocto-autobuilder
|
refs/heads/ross
|
lib/python2.7/site-packages/buildbot-0.8.8-py2.7.egg/buildbot/test/unit/test_schedulers_base.py
|
4
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import sys
import mock
import twisted
from twisted.trial import unittest
from twisted.internet import defer
from buildbot import config
from buildbot.schedulers import base
from buildbot.process import properties
from buildbot.test.util import scheduler
from buildbot.test.fake import fakedb
class BaseScheduler(scheduler.SchedulerMixin, unittest.TestCase):
OBJECTID = 19
def setUp(self):
self.setUpScheduler()
def tearDown(self):
self.tearDownScheduler()
def makeScheduler(self, name='testsched', builderNames=['a', 'b'],
properties={}, codebases = {'':{}}):
sched = self.attachScheduler(
base.BaseScheduler(name=name, builderNames=builderNames,
properties=properties, codebases=codebases),
self.OBJECTID)
return sched
# tests
def test_constructor_builderNames(self):
self.assertRaises(config.ConfigErrors,
lambda : self.makeScheduler(builderNames='xxx'))
def test_constructor_builderNames_unicode(self):
self.makeScheduler(builderNames=[u'a'])
def test_constructor_codebases_valid(self):
codebases = {"codebase1": {"repository":"", "branch":"", "revision":""}}
self.makeScheduler(codebases = codebases)
def test_constructor_codebases_invalid(self):
# scheduler only accepts codebases with at least repository set
codebases = {"codebase1": {"dictionary":"", "that":"", "fails":""}}
self.assertRaises(config.ConfigErrors,
lambda : self.makeScheduler(codebases = codebases))
def test_listBuilderNames(self):
sched = self.makeScheduler(builderNames=['x', 'y'])
self.assertEqual(sched.listBuilderNames(), ['x', 'y'])
def test_getPendingBuildTimes(self):
sched = self.makeScheduler()
self.assertEqual(sched.getPendingBuildTimes(), [])
def test_addBuildsetForLatest_defaults(self):
sched = self.makeScheduler(name='testy', builderNames=['x'],
properties=dict(a='b'))
d = sched.addBuildsetForLatest(reason='because')
def check((bsid,brids)):
self.db.buildsets.assertBuildset(bsid,
dict(reason='because', brids=brids,
external_idstring=None,
properties=[ ('a', ('b', 'Scheduler')),
('scheduler', ('testy', 'Scheduler')), ],
sourcestampsetid=100),
{'':
dict(branch=None, revision=None, repository='', codebase='',
project='', sourcestampsetid=100)
})
d.addCallback(check)
return d
def test_startConsumingChanges_fileIsImportant_check(self):
sched = self.makeScheduler()
self.assertRaises(AssertionError,
lambda : sched.startConsumingChanges(fileIsImportant="maybe"))
def do_test_change_consumption(self, kwargs, change, expected_result):
# (expected_result should be True (important), False (unimportant), or
# None (ignore the change))
sched = self.makeScheduler()
sched.startService()
change_received = [ None ]
def gotChange(got_change, got_important):
self.assertEqual(got_change, change)
change_received[0] = got_important
return defer.succeed(None)
sched.gotChange = gotChange
d = sched.startConsumingChanges(**kwargs)
def test(_):
# check that it registered a callback
callbacks = self.master.getSubscriptionCallbacks()
self.assertNotEqual(callbacks['changes'], None)
# invoke the callback with the change, and check the result
callbacks['changes'](change)
self.assertEqual(change_received[0], expected_result)
d.addCallback(test)
d.addCallback(lambda _ : sched.stopService())
return d
def test_change_consumption_defaults(self):
# all changes are important by default
return self.do_test_change_consumption(
dict(),
self.makeFakeChange(),
True)
def test_change_consumption_fileIsImportant_True(self):
return self.do_test_change_consumption(
dict(fileIsImportant=lambda c : True),
self.makeFakeChange(),
True)
def test_change_consumption_fileIsImportant_False(self):
return self.do_test_change_consumption(
dict(fileIsImportant=lambda c : False),
self.makeFakeChange(),
False)
def test_change_consumption_fileIsImportant_exception(self):
d = self.do_test_change_consumption(
dict(fileIsImportant=lambda c : 1/0),
self.makeFakeChange(),
None)
def check_err(_):
self.assertEqual(1, len(self.flushLoggedErrors(ZeroDivisionError)))
d.addCallback(check_err)
return d
if twisted.version.major <= 9 and sys.version_info[:2] >= (2,7):
test_change_consumption_fileIsImportant_exception.skip = \
"flushLoggedErrors does not work correctly on 9.0.0 and earlier with Python-2.7"
def test_change_consumption_change_filter_True(self):
cf = mock.Mock()
cf.filter_change = lambda c : True
return self.do_test_change_consumption(
dict(change_filter=cf),
self.makeFakeChange(),
True)
def test_change_consumption_change_filter_False(self):
cf = mock.Mock()
cf.filter_change = lambda c : False
return self.do_test_change_consumption(
dict(change_filter=cf),
self.makeFakeChange(),
None)
def test_change_consumption_fileIsImportant_False_onlyImportant(self):
return self.do_test_change_consumption(
dict(fileIsImportant=lambda c : False, onlyImportant=True),
self.makeFakeChange(),
None)
def test_change_consumption_fileIsImportant_True_onlyImportant(self):
return self.do_test_change_consumption(
dict(fileIsImportant=lambda c : True, onlyImportant=True),
self.makeFakeChange(),
True)
def test_addBuilsetForLatest_args(self):
sched = self.makeScheduler(name='xyz', builderNames=['y', 'z'])
d = sched.addBuildsetForLatest(reason='cuz', branch='default',
project='myp', repository='hgmo',
external_idstring='try_1234')
def check((bsid,brids)):
self.db.buildsets.assertBuildset(bsid,
dict(reason='cuz', brids=brids,
external_idstring='try_1234',
properties=[('scheduler', ('xyz', 'Scheduler'))],
sourcestampsetid=100),
{'':
dict(branch='default', revision=None, repository='hgmo',
codebase='', project='myp', sourcestampsetid=100)
})
d.addCallback(check)
return d
def test_addBuildsetForLatest_properties(self):
props = properties.Properties(xxx="yyy")
sched = self.makeScheduler(name='xyz', builderNames=['y', 'z'])
d = sched.addBuildsetForLatest(reason='cuz', branch='default',
project='myp', repository='hgmo',
external_idstring='try_1234', properties=props)
def check((bsid,brids)):
self.db.buildsets.assertBuildset(bsid,
dict(reason='cuz', brids=brids,
external_idstring='try_1234',
properties=[
('scheduler', ('xyz', 'Scheduler')),
('xxx', ('yyy', 'TEST')),
],
sourcestampsetid=100),
{'':
dict(branch='default', revision=None, repository='hgmo',
codebase='', project='myp', sourcestampsetid=100)
})
d.addCallback(check)
return d
def test_addBuildsetForLatest_builderNames(self):
sched = self.makeScheduler(name='xyz', builderNames=['y', 'z'])
d = sched.addBuildsetForLatest(reason='cuz', branch='default',
builderNames=['a', 'b'])
def check((bsid,brids)):
self.db.buildsets.assertBuildset(bsid,
dict(reason='cuz', brids=brids,
external_idstring=None,
properties=[('scheduler', ('xyz', 'Scheduler'))],
sourcestampsetid=100),
{'':
dict(branch='default', revision=None, repository='',
codebase='', project='', sourcestampsetid=100)
})
d.addCallback(check)
return d
def test_addBuildsetForChanges_one_change(self):
sched = self.makeScheduler(name='n', builderNames=['b'])
self.db.insertTestData([
fakedb.Change(changeid=13, branch='trunk', revision='9283',
repository='svn://...', codebase='',
project='world-domination'),
])
d = sched.addBuildsetForChanges(reason='power', changeids=[13])
def check((bsid,brids)):
self.db.buildsets.assertBuildset(bsid,
dict(reason='power', brids=brids,
external_idstring=None,
properties=[('scheduler', ('n', 'Scheduler'))],
sourcestampsetid=100),
{'':
dict(branch='trunk', repository='svn://...', codebase='',
changeids=set([13]), project='world-domination',
revision='9283', sourcestampsetid=100)
})
d.addCallback(check)
return d
def test_addBuildsetForChanges_properties(self):
props = properties.Properties(xxx="yyy")
sched = self.makeScheduler(name='n', builderNames=['c'])
self.db.insertTestData([
fakedb.Change(changeid=14, branch='default', revision='123:abc',
repository='', project='', codebase=''),
])
d = sched.addBuildsetForChanges(reason='downstream', changeids=[14],
properties=props)
def check((bsid,brids)):
self.db.buildsets.assertBuildset(bsid,
dict(reason='downstream', brids=brids,
external_idstring=None,
properties=[
('scheduler', ('n', 'Scheduler')),
('xxx', ('yyy', 'TEST')),
],
sourcestampsetid=100),
{'':
dict(branch='default', revision='123:abc', repository='',
project='', changeids=set([14]), sourcestampsetid=100,
codebase='')
})
d.addCallback(check)
return d
def test_addBuildsetForChanges_one_change_builderNames(self):
sched = self.makeScheduler(name='n', builderNames=['b'])
self.db.insertTestData([
fakedb.Change(changeid=13, branch='trunk', revision='9283',
codebase='', repository='svn://...',
project='world-domination'),
])
d = sched.addBuildsetForChanges(reason='power', changeids=[13],
builderNames=['p'])
def check((bsid,brids)):
self.db.buildsets.assertBuildset(bsid,
dict(reason='power', brids=brids,
external_idstring=None,
properties=[('scheduler', ('n', 'Scheduler'))],
sourcestampsetid=100),
{'':
dict(branch='trunk', repository='svn://...', codebase='',
changeids=set([13]), project='world-domination',
revision='9283', sourcestampsetid=100)
})
d.addCallback(check)
return d
def test_addBuildsetForChanges_multiple_changes_no_codebaseGenerator(self):
# This is a test for backwards compatibility
# Changes from different repositories come together in one build
sched = self.makeScheduler(name='n', builderNames=['b', 'c'])
# No codebaseGenerator means all changes have codebase == ''
self.db.insertTestData([
fakedb.Change(changeid=13, branch='trunk', revision='9283',
repository='svn://A..', project='knitting',
codebase=''),
fakedb.Change(changeid=14, branch='devel', revision='9284',
repository='svn://B..', project='making-tea',
codebase=''),
fakedb.Change(changeid=15, branch='trunk', revision='9285',
repository='svn://C..', project='world-domination',
codebase=''),
])
# note that the changeids are given out of order here; it should still
# use the most recent
d = sched.addBuildsetForChanges(reason='power', changeids=[14, 15, 13])
def check((bsid,brids)):
self.db.buildsets.assertBuildset(bsid,
dict(reason='power', brids=brids,
external_idstring=None,
properties=[('scheduler', ('n', 'Scheduler'))],
sourcestampsetid=100),
{'':
dict(branch='trunk', repository='svn://C..', codebase='',
changeids=set([13,14,15]), project='world-domination',
revision='9285', sourcestampsetid=100)
})
d.addCallback(check)
return d
def test_addBuildsetForChanges_multiple_changes_single_codebase(self):
sched = self.makeScheduler(name='n', builderNames=['b', 'c'])
self.db.insertTestData([
fakedb.Change(changeid=13, branch='trunk', revision='9283',
repository='svn://...', project='knitting',
codebase=''),
fakedb.Change(changeid=14, branch='devel', revision='9284',
repository='svn://...', project='making-tea',
codebase=''),
fakedb.Change(changeid=15, branch='trunk', revision='9285',
repository='svn://...', project='world-domination',
codebase=''),
])
# note that the changeids are given out of order here; it should still
# use the most recent
d = sched.addBuildsetForChanges(reason='power', changeids=[14, 15, 13])
def check((bsid,brids)):
self.db.buildsets.assertBuildset(bsid,
dict(reason='power', brids=brids,
external_idstring=None,
properties=[('scheduler', ('n', 'Scheduler'))],
sourcestampsetid=100),
{'':
dict(branch='trunk', repository='svn://...', codebase='',
changeids=set([13,14,15]), project='world-domination',
revision='9285', sourcestampsetid=100)
})
d.addCallback(check)
return d
def test_addBuildsetForChanges_codebases_set_multiple_changed_codebases(self):
codebases = { 'cbA':dict(
repository='svn://A..',
branch='stable',
revision='13579'),
'cbB':dict(
repository='svn://B..',
branch='stable',
revision='24680'),
'cbC':dict(
repository='svn://C..',
branch='stable',
revision='12345'),
'cbD':dict(
repository='svn://D..')}
# Scheduler gets codebases that can be used to create extra sourcestamps
# for repositories that have no changes
sched = self.makeScheduler(name='n', builderNames=['b', 'c'],
codebases=codebases)
self.db.insertTestData([
fakedb.Change(changeid=12, branch='trunk', revision='9282',
repository='svn://A..', project='playing',
codebase='cbA'),
fakedb.Change(changeid=13, branch='trunk', revision='9283',
repository='svn://A..', project='knitting',
codebase='cbA'),
fakedb.Change(changeid=14, branch='develop', revision='9284',
repository='svn://A..', project='making-tea',
codebase='cbA'),
fakedb.Change(changeid=15, branch='trunk', revision='8085',
repository='svn://B..', project='boxing',
codebase='cbB'),
fakedb.Change(changeid=16, branch='develop', revision='8086',
repository='svn://B..', project='playing soccer',
codebase='cbB'),
fakedb.Change(changeid=17, branch='develop', revision='8087',
repository='svn://B..', project='swimming',
codebase='cbB'),
])
# note that the changeids are given out of order here; it should still
# use the most recent for each codebase
d = sched.addBuildsetForChanges(reason='power', changeids=[14, 12, 17, 16, 13, 15])
def check((bsid,brids)):
self.db.buildsets.assertBuildset(bsid,
dict(reason='power', brids=brids,
external_idstring=None,
properties=[('scheduler', ('n', 'Scheduler'))],
sourcestampsetid=100),
{'cbA':
dict(branch='develop', repository='svn://A..', codebase='cbA',
changeids=set([12,13,14]), project='making-tea',
revision='9284', sourcestampsetid=100),
'cbB':
dict(branch='develop', repository='svn://B..', codebase='cbB',
changeids=set([15,16,17]), project='swimming',
revision='8087', sourcestampsetid=100),
'cbC':
dict(branch='stable', repository='svn://C..', codebase='cbC',
project='', revision='12345', sourcestampsetid=100),
'cbD':
dict(branch=None, repository='svn://D..', codebase='cbD',
project='', revision=None, sourcestampsetid=100),
})
d.addCallback(check)
return d
def test_addBuildsetForSourceStamp(self):
sched = self.makeScheduler(name='n', builderNames=['b'])
d = self.db.insertTestData([
fakedb.SourceStampSet(id=1091),
fakedb.SourceStamp(id=91, sourcestampsetid=1091, branch='fixins',
revision='abc', patchid=None, repository='r',
project='p'),
])
d.addCallback(lambda _ :
sched.addBuildsetForSourceStamp(reason='whynot', setid=1091))
def check((bsid,brids)):
self.db.buildsets.assertBuildset(bsid,
dict(reason='whynot', brids=brids,
external_idstring=None,
properties=[('scheduler', ('n', 'Scheduler'))],
sourcestampsetid=1091),
{'':
dict(branch='fixins', revision='abc', repository='r',
project='p', codebase='', sourcestampsetid=1091)
})
d.addCallback(check)
return d
def test_addBuildsetForSourceStamp_properties(self):
props = properties.Properties(xxx="yyy")
sched = self.makeScheduler(name='n', builderNames=['b'])
d = self.db.insertTestData([
fakedb.SourceStampSet(id=1091),
fakedb.SourceStamp(id=91, sourcestampsetid=1091, branch='fixins',
revision='abc', patchid=None, repository='r', codebase='cb',
project='p'),
])
d.addCallback(lambda _ :
sched.addBuildsetForSourceStamp(reason='whynot', setid=1091,
properties=props))
def check((bsid,brids)):
self.db.buildsets.assertBuildset(bsid,
dict(reason='whynot', brids=brids,
external_idstring=None,
properties=[
('scheduler', ('n', 'Scheduler')),
('xxx', ('yyy', 'TEST')),
],
sourcestampsetid=1091),
{'cb':
dict(branch='fixins', revision='abc', repository='r',
codebase='cb', project='p', sourcestampsetid=1091)
})
d.addCallback(check)
return d
def test_addBuildsetForSourceStamp_builderNames(self):
sched = self.makeScheduler(name='n', builderNames=['k'])
d = self.db.insertTestData([
fakedb.SourceStampSet(id=1091),
fakedb.SourceStamp(id=91, sourcestampsetid=1091, branch='fixins',
revision='abc', patchid=None, repository='r', codebase='cb',
project='p'),
])
d.addCallback(lambda _ :
sched.addBuildsetForSourceStamp(reason='whynot', setid = 1091,
builderNames=['a', 'b']))
def check((bsid,brids)):
self.db.buildsets.assertBuildset(bsid,
dict(reason='whynot', brids=brids,
external_idstring=None,
properties=[('scheduler', ('n', 'Scheduler'))],
sourcestampsetid=1091),
{'cb':
dict(branch='fixins', revision='abc', repository='r',
codebase='cb', project='p', sourcestampsetid=1091)
})
d.addCallback(check)
return d
def test_findNewSchedulerInstance(self):
sched = self.makeScheduler(name='n', builderNames=['k'])
new_sched = self.makeScheduler(name='n', builderNames=['l'])
distractor = self.makeScheduler(name='x', builderNames=['l'])
config = mock.Mock()
config.schedulers = dict(dist=distractor, n=new_sched)
self.assertIdentical(sched.findNewSchedulerInstance(config), new_sched)
|
mgit-at/ansible
|
refs/heads/devel
|
lib/ansible/modules/messaging/rabbitmq/rabbitmq_binding.py
|
3
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Manuel Sousa <manuel.sousa@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: rabbitmq_binding
author: Manuel Sousa (@manuel-sousa)
version_added: "2.0"
short_description: Manage rabbitMQ bindings
description:
- This module uses rabbitMQ REST APIs to create / delete bindings.
requirements: [ "requests >= 1.0.0" ]
options:
state:
description:
- Whether the bindings should be present or absent.
choices: [ "present", "absent" ]
default: present
name:
description:
- source exchange to create binding on.
required: true
aliases: [ "src", "source" ]
destination:
description:
- destination exchange or queue for the binding.
required: true
aliases: [ "dst", "dest" ]
destination_type:
description:
- Either queue or exchange.
required: true
choices: [ "queue", "exchange" ]
aliases: [ "type", "dest_type" ]
routing_key:
description:
- routing key for the binding.
default: "#"
arguments:
description:
- extra arguments for exchange. If defined this argument is a key/value dictionary
required: false
default: {}
extends_documentation_fragment:
- rabbitmq
'''
EXAMPLES = '''
# Bind myQueue to directExchange with routing key info
- rabbitmq_binding:
name: directExchange
destination: myQueue
type: queue
routing_key: info
# Bind directExchange to topicExchange with routing key *.info
- rabbitmq_binding:
name: topicExchange
destination: topicExchange
type: exchange
routing_key: '*.info'
'''
import json
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
from ansible.module_utils.six.moves.urllib import parse as urllib_parse
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.rabbitmq import rabbitmq_argument_spec
class RabbitMqBinding(object):
def __init__(self, module):
"""
:param module:
"""
self.module = module
self.name = self.module.params['name']
self.login_user = self.module.params['login_user']
self.login_password = self.module.params['login_password']
self.login_host = self.module.params['login_host']
self.login_port = self.module.params['login_port']
self.login_protocol = self.module.params['login_protocol']
self.vhost = self.module.params['vhost']
self.destination = self.module.params['destination']
self.destination_type = 'q' if self.module.params['destination_type'] == 'queue' else 'e'
self.routing_key = self.module.params['routing_key']
self.arguments = self.module.params['arguments']
self.verify = self.module.params['cacert']
self.cert = self.module.params['cert']
self.key = self.module.params['key']
self.base_url = '{0}://{1}:{2}/api/bindings'.format(self.login_protocol,
self.login_host,
self.login_port)
self.url = '{0}/{1}/e/{2}/{3}/{4}/{5}'.format(self.base_url,
urllib_parse.quote(self.vhost, safe=''),
urllib_parse.quote(self.name, safe=''),
self.destination_type,
urllib_parse.quote(self.destination, safe=''),
urllib_parse.quote(self.routing_key))
self.result = {
'changed': False,
'name': self.module.params['name'],
}
self.authentication = (
self.login_user,
self.login_password
)
self.request = requests
self.http_check_states = {
200: True,
404: False,
}
self.http_actionable_states = {
201: True,
204: True,
}
self.api_result = self.request.get(self.url, auth=self.authentication)
def run(self):
"""
:return:
"""
self.check_presence()
self.check_mode()
self.action_mode()
def check_presence(self):
"""
:return:
"""
if self.check_should_throw_fail():
self.fail()
def change_required(self):
"""
:return:
"""
if self.module.params['state'] == 'present':
if not self.is_present():
return True
elif self.module.params['state'] == 'absent':
if self.is_present():
return True
return False
def is_present(self):
"""
:return:
"""
return self.http_check_states.get(self.api_result.status_code, False)
def check_mode(self):
"""
:return:
"""
if self.module.check_mode:
result = self.result
result['changed'] = self.change_required()
result['details'] = self.api_result.json() if self.is_present() else self.api_result.text
result['arguments'] = self.module.params['arguments']
self.module.exit_json(**result)
def check_reply_is_correct(self):
"""
:return:
"""
if self.api_result.status_code in self.http_check_states:
return True
return False
def check_should_throw_fail(self):
"""
:return:
"""
if not self.is_present():
if not self.check_reply_is_correct():
return True
return False
def action_mode(self):
"""
:return:
"""
result = self.result
if self.change_required():
if self.module.params['state'] == 'present':
self.create()
if self.module.params['state'] == 'absent':
self.remove()
if self.action_should_throw_fail():
self.fail()
result['changed'] = True
result['destination'] = self.module.params['destination']
self.module.exit_json(**result)
else:
result['changed'] = False
self.module.exit_json(**result)
def action_reply_is_correct(self):
"""
:return:
"""
if self.api_result.status_code in self.http_actionable_states:
return True
return False
def action_should_throw_fail(self):
"""
:return:
"""
if not self.action_reply_is_correct():
return True
return False
def create(self):
"""
:return:
"""
self.url = '{0}/{1}/e/{2}/{3}/{4}'.format(self.base_url,
urllib_parse.quote(self.vhost, safe=''),
urllib_parse.quote(self.name, safe=''),
self.destination_type,
urllib_parse.quote(self.destination, safe=''))
self.api_result = self.request.post(self.url,
auth=self.authentication,
verify=self.verify,
cert=(self.cert, self.key),
headers={"content-type": "application/json"},
data=json.dumps({
'routing_key': self.routing_key,
'arguments': self.arguments
}))
def remove(self):
"""
:return:
"""
self.api_result = self.request.delete(self.url, auth=self.authentication)
def fail(self):
"""
:return:
"""
self.module.fail_json(
msg="Unexpected reply from API",
status=self.api_result.status_code,
details=self.api_result.text
)
def main():
argument_spec = rabbitmq_argument_spec()
argument_spec.update(
dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
name=dict(required=True, aliases=["src", "source"], type='str'),
destination=dict(required=True, aliases=["dst", "dest"], type='str'),
destination_type=dict(required=True, aliases=["type", "dest_type"], choices=["queue", "exchange"],
type='str'),
routing_key=dict(default='#', type='str'),
arguments=dict(default=dict(), type='dict')
)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_REQUESTS:
module.fail_json(msg="requests library is required for this module. To install, use `pip install requests`")
RabbitMqBinding(module).run()
if __name__ == '__main__':
main()
|
jiangzhixiao/odoo
|
refs/heads/8.0
|
addons/base_report_designer/plugin/openerp_report_designer/bin/script/About.py
|
293
|
#########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer d29583@groovegarden.com
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
import uno
from com.sun.star.task import XJobExecutor
if __name__<>'package':
from lib.gui import *
class About(unohelper.Base, XJobExecutor):
def __init__(self, ctx):
self.ctx = ctx
self.module = "openerp_report"
self.version = "0.1"
self.win = DBModalDialog(60, 50, 175, 115, "About Odoo Report Designer")
fdBigFont = createUnoStruct("com.sun.star.awt.FontDescriptor")
fdBigFont.Width = 20
fdBigFont.Height = 25
fdBigFont.Weight = 120
fdBigFont.Family= 3
oLabelTitle1 = self.win.addFixedText("lblTitle1", 1, 1, 35, 30)
oLabelTitle1.Model.TextColor = 16056320
oLabelTitle1.Model.FontDescriptor = fdBigFont
oLabelTitle1.Model.FontRelief = 1
oLabelTitle1.Text = "Open"
oLabelTitle2 = self.win.addFixedText("lblTitle2", 35, 1, 30, 30)
oLabelTitle2.Model.TextColor = 1
oLabelTitle2.Model.FontDescriptor = fdBigFont
oLabelTitle2.Model.FontRelief = 1
oLabelTitle2.Text = "ERP"
oLabelProdDesc = self.win.addFixedText("lblProdDesc", 1, 30, 173, 75)
oLabelProdDesc.Model.TextColor = 1
fdBigFont.Width = 10
fdBigFont.Height = 11
fdBigFont.Weight = 76
oLabelProdDesc.Model.FontDescriptor = fdBigFont
oLabelProdDesc.Model.Align = 1
oLabelProdDesc.Model.FontRelief = 1
oLabelProdDesc.Model.MultiLine = True
oLabelProdDesc.Text = "This package helps you to create or modify\nreports in Odoo. Once connected to the\nserver, you can design your template of reports\nusing fields and expressions and browsing the\ncomplete structure of Odoo object database."
oLabelFooter = self.win.addFixedText("lblFooter", -1, -1, 173, 25)
oLabelFooter.Model.TextColor = 255
#oLabelFooter.Model.BackgroundColor = 1
oLabelFooter.Model.Border = 2
oLabelFooter.Model.BorderColor = 255
fdBigFont.Width = 8
fdBigFont.Height = 9
fdBigFont.Weight = 100
oLabelFooter.Model.FontDescriptor = fdBigFont
oLabelFooter.Model.Align = 1
oLabelFooter.Model.FontRelief = 1
oLabelFooter.Model.MultiLine = True
sMessage = "Odoo Report Designer v1.0 \nCopyright 2007-TODAY Tiny sprl \nThis product is free software, under the GNU Affero General Public License."
oLabelFooter.Text = sMessage
self.win.doModalDialog("",None)
if __name__<>"package" and __name__=="__main__":
About(None)
elif __name__=="package":
g_ImplementationHelper.addImplementation( About, "org.openoffice.openerp.report.about", ("com.sun.star.task.Job",),)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
sebalix/OpenUpgrade
|
refs/heads/8.0
|
addons/l10n_br/__init__.py
|
430
|
# -*- encoding: utf-8 -*-
###############################################################################
# #
# Copyright (C) 2009 Renato Lima - Akretion #
# #
#This program is free software: you can redistribute it and/or modify #
#it under the terms of the GNU Affero General Public License as published by #
#the Free Software Foundation, either version 3 of the License, or #
#(at your option) any later version. #
# #
#This program is distributed in the hope that it will be useful, #
#but WITHOUT ANY WARRANTY; without even the implied warranty of #
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
#GNU General Public License for more details. #
# #
#You should have received a copy of the GNU General Public License #
#along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
import account
|
SuriyaaKudoIsc/olympia
|
refs/heads/master
|
apps/addons/tests/test_buttons.py
|
2
|
from datetime import datetime
import json
import jinja2
import jingo
from mock import patch, Mock
from nose.tools import eq_
from pyquery import PyQuery
import amo
import amo.models
import amo.tests
from amo.urlresolvers import reverse
from addons.buttons import install_button, _install_button, big_install_button
from addons.models import Addon
def setup():
jingo.load_helpers()
class ButtonTest(amo.tests.TestCase):
def setUp(self):
super(ButtonTest, self).setUp()
self.addon = Mock()
self.addon.is_featured.return_value = False
self.addon.is_unreviewed.return_value = False
self.addon.has_eula = False
self.addon.status = amo.STATUS_PUBLIC
self.addon.id = 2
self.addon.slug = 'slug'
self.addon.type = amo.ADDON_EXTENSION
self.addon.privacy_policy = None
self.addon.backup_version = None
self.version = v = Mock()
v.is_compatible = False
v.compat_override_app_versions.return_value = []
v.is_unreviewed = False
v.is_beta = False
v.is_lite = False
v.version = 'v1'
self.addon.current_version = v
self.file = self.get_file(amo.PLATFORM_ALL.id)
v.all_files = [self.file]
self.platforms = amo.PLATFORM_MAC.id, amo.PLATFORM_LINUX.id
self.platform_files = map(self.get_file, self.platforms)
self.request = Mock()
self.request.APP = amo.FIREFOX
# Make GET mutable.
self.request.GET = {}
user = self.request.user
user.get_and_delete_messages.__dict__['__name__'] = 'f'
user.is_authenticated.return_value = False
self.context = {
'APP': amo.FIREFOX,
'LANG': 'en-US',
'request': self.request,
}
@patch('addons.buttons.jingo.env.get_template')
def get_button(self, t_mock, **kwargs):
"""Proxy for calling install_button."""
template_mock = Mock()
t_mock.return_value = template_mock
if 'show_backup' not in kwargs:
kwargs['show_backup'] = True
install_button(self.context, self.addon, **kwargs)
# Extract button from the kwargs from the first call.
return template_mock.render.call_args[0][0]['button']
def render(self, **kwargs):
return PyQuery(_install_button(self.context, self.addon, **kwargs))
def get_file(self, platform):
file = Mock()
file.platform = platform
file.latest_xpi_url.return_value = 'xpi.latest'
file.get_url_path.return_value = 'xpi.url'
file.eula_url.return_value = 'eula.url'
file.status = amo.STATUS_PUBLIC
file.strict_compatibility = False
file.binary_components = False
return file
class TestButtonSetup(ButtonTest):
"""Tests for setup code inside install_button."""
def test_src(self):
"""src defaults to '', and can be in the context or request.GET."""
b = self.get_button()
eq_(b.src, '')
self.request.GET['src'] = 'zz'
b = self.get_button()
eq_(b.src, 'zz')
self.context['src'] = 'yy'
b = self.get_button()
eq_(b.src, 'yy')
b = self.get_button(src='xx')
eq_(b.src, 'xx')
def test_collection(self):
"""Same as src; looking for collection{,_id,_uuid} in request."""
b = self.get_button()
eq_(b.collection, None)
self.request.GET['collection_uuid'] = 'aa'
b = self.get_button()
eq_(b.collection, 'aa')
self.request.GET['collection_id'] = 'bb'
b = self.get_button()
eq_(b.collection, 'bb')
self.request.GET['collection'] = 'cc'
b = self.get_button()
eq_(b.collection, 'cc')
self.context['collection'] = 'dd'
b = self.get_button()
eq_(b.collection, 'dd')
b = self.get_button(collection='ee')
eq_(b.collection, 'ee')
c = Mock()
c.uuid = 'ff'
b = self.get_button(collection=c)
eq_(b.collection, 'ff')
class TestButton(ButtonTest):
"""Tests for the InstallButton class."""
def test_plain_button(self):
b = self.get_button()
eq_(b.button_class, ['download'])
eq_(b.install_class, [])
eq_(b.install_text, '')
eq_(b.version, self.version)
assert b.latest
assert not b.featured
assert not b.unreviewed
assert not b.show_contrib
assert not b.show_warning
def test_show_contrib(self):
b = self.get_button()
assert not b.show_contrib
self.addon.takes_contributions = True
b = self.get_button()
assert not b.show_contrib
self.addon.annoying = amo.CONTRIB_ROADBLOCK
b = self.get_button()
assert b.show_contrib
eq_(b.button_class, ['contrib', 'go'])
eq_(b.install_class, ['contrib'])
def test_show_warning(self):
b = self.get_button()
assert not b.show_warning
self.addon.is_unreviewed.return_value = True
b = self.get_button()
assert b.show_warning
b = self.get_button(show_warning=False)
assert not b.show_warning
def test_featured(self):
self.addon.is_featured.return_value = True
b = self.get_button()
assert b.featured
eq_(b.button_class, ['download'])
eq_(b.install_class, ['featuredaddon'])
eq_(b.install_text, 'Featured')
def test_unreviewed(self):
# Throw featured in there to make sure it's ignored.
self.addon.is_featured.return_value = True
self.addon.is_unreviewed.return_value = True
b = self.get_button()
assert not b.featured
assert b.unreviewed
eq_(b.button_class, ['download', 'caution'])
eq_(b.install_class, ['unreviewed'])
eq_(b.install_text, 'Not Reviewed')
def test_beta(self):
# Throw featured in there to make sure it's ignored.
self.addon.is_featured.return_value = True
self.version.is_beta = True
b = self.get_button()
assert not b.featured
assert b.is_beta
eq_(b.button_class, ['download', 'caution'])
eq_(b.install_class, ['unreviewed', 'beta'])
eq_(b.install_text, 'Not Reviewed')
def test_lite(self):
# Throw featured in there to make sure it's ignored.
self.addon.is_featured.return_value = True
self.addon.status = amo.STATUS_LITE
self.version.is_lite = True
b = self.get_button()
assert not b.featured
assert b.lite
eq_(b.button_class, ['caution'])
eq_(b.install_class, ['lite'])
eq_(b.install_text, 'Experimental')
def test_lite_and_nominated(self):
# Throw featured in there to make sure it's ignored.
self.addon.is_featured.return_value = True
self.addon.status = amo.STATUS_LITE_AND_NOMINATED
self.version.is_lite = True
b = self.get_button()
assert not b.featured
assert b.lite
eq_(b.button_class, ['caution'])
eq_(b.install_class, ['lite'])
eq_(b.install_text, 'Experimental')
def test_lite_unreviewed_version(self):
# Throw featured in there to make sure it's ignored.
self.addon.is_featured.return_value = True
self.addon.status = amo.STATUS_LITE
self.version.is_unreviewed = True
self.version.is_lite = False
b = self.get_button()
assert not b.featured
assert not b.lite
assert b.unreviewed
eq_(b.button_class, ['download', 'caution'])
eq_(b.install_class, ['unreviewed'])
eq_(b.install_text, 'Not Reviewed')
def test_public_with_lite_version(self):
# Throw featured in there to make sure it's ignored.
self.addon.is_featured.return_value = True
self.addon.status = amo.STATUS_PUBLIC
self.version.is_lite = True
b = self.get_button()
assert not b.featured
assert b.lite
eq_(b.button_class, ['caution'])
eq_(b.install_class, ['lite'])
eq_(b.install_text, 'Experimental')
def test_attrs(self):
b = self.get_button()
eq_(b.attrs(), {})
self.addon.takes_contributions = True
self.addon.annoying = amo.CONTRIB_AFTER
self.addon.type = amo.ADDON_SEARCH
b = self.get_button()
eq_(b.attrs(), {'data-after': 'contrib', 'data-search': 'true'})
def test_after_no_show_contrib(self):
self.addon.takes_contributions = True
self.addon.annoying = amo.CONTRIB_AFTER
b = self.get_button()
eq_(b.attrs(), {'data-after': 'contrib'})
b = self.get_button(show_contrib=False)
eq_(b.attrs(), {})
def test_file_details(self):
file = self.get_file(amo.PLATFORM_ALL.id)
self.addon.meet_the_dev_url.return_value = 'meet.dev'
b = self.get_button()
# Normal.
text, url, os = b.file_details(file)
eq_(text, 'Download Now')
eq_(url, 'xpi.latest')
eq_(os, None)
# Platformer.
file = self.get_file(amo.PLATFORM_MAC.id)
_, _, os = b.file_details(file)
eq_(os, amo.PLATFORM_MAC)
# Not the latest version.
b.latest = False
_, url, _ = b.file_details(file)
eq_(url, 'xpi.url')
# Contribution roadblock.
b.show_contrib = True
text, url, _ = b.file_details(file)
eq_(text, 'Continue to Download →')
eq_(url,
'/en-US/firefox/addon/2/contribute/roadblock/?version=v1')
def test_file_details_unreviewed(self):
file = self.get_file(amo.PLATFORM_ALL.id)
file.status = amo.STATUS_UNREVIEWED
b = self.get_button()
_, url, _ = b.file_details(file)
eq_(url, 'xpi.url')
def test_fix_link(self):
b = self.get_button()
eq_(b.fix_link('foo.com'), 'foo.com')
b = self.get_button(src='src')
eq_(b.fix_link('foo.com'), 'foo.com?src=src')
collection = Mock()
collection.uuid = 'xxx'
b = self.get_button(collection=collection)
eq_(b.fix_link('foo.com'), 'foo.com?collection_id=xxx')
b = self.get_button(collection=collection, src='src')
self.assertUrlEqual(b.fix_link('foo.com'),
'foo.com?src=src&collection_id=xxx')
def test_links(self):
self.version.all_files = self.platform_files
links = self.get_button().links()
eq_(len(links), len(self.platforms))
eq_([x.os.id for x in links], list(self.platforms))
def test_link_with_invalid_file(self):
self.version.all_files = self.platform_files
self.version.all_files[0].status = amo.STATUS_DISABLED
links = self.get_button().links()
expected_platforms = self.platforms[1:]
eq_(len(links), len(expected_platforms))
eq_([x.os.id for x in links], list(expected_platforms))
def test_no_version(self):
self.addon.current_version = None
eq_(self.get_button().links(), [])
class TestButtonHtml(ButtonTest):
def test_basics(self):
a = self.addon
a.id = '12345'
a.icon_url = 'icon url'
a.meet_the_dev_url.return_value = 'meet.dev'
a.name = 'addon name'
self.file.hash = 'file hash'
doc = self.render()
eq_(doc('.install-shell').length, 1)
eq_(doc('.install').length, 1)
eq_(doc('.install').length, 1)
eq_(doc('.install-button').length, 1)
eq_(doc('.button').length, 1)
install = doc('.install')
eq_('12345', install.attr('data-addon'))
eq_('icon url', install.attr('data-icon'))
eq_('meet.dev', install.attr('data-developers'))
eq_(reverse('addons.versions', args=[a.id]),
install.attr('data-versions'))
eq_('addon name', install.attr('data-name'))
eq_(None, install.attr('data-min'))
eq_(None, install.attr('data-max'))
button = doc('.button')
eq_(['button', 'download'], button.attr('class').split())
eq_('file hash', button.attr('data-hash'))
eq_('xpi.latest', button.attr('href'))
def test_featured(self):
self.addon.is_featured.return_value = True
doc = self.render()
eq_(['install', 'featuredaddon'],
doc('.install').attr('class').split())
eq_('Featured', doc('.install strong:last-child').text())
def test_unreviewed(self):
self.addon.status = amo.STATUS_UNREVIEWED
self.addon.is_unreviewed.return_value = True
self.addon.get_url_path.return_value = 'addon.url'
button = self.render()('.button.caution')
eq_('addon.url', button.attr('href'))
eq_('xpi.url', button.attr('data-realurl'))
def test_detailed_privacy_policy(self):
policy = self.render(detailed=True)('.install-shell .privacy-policy')
eq_(policy.length, 0)
self.addon.privacy_policy = 'privacy!'
policy = self.render(detailed=True)('.install-shell .privacy-policy')
eq_(policy.text(), 'View privacy policy')
def test_unreviewed_detailed_warning(self):
self.addon.status = amo.STATUS_UNREVIEWED
self.addon.is_unreviewed.return_value = True
self.addon.get_url_path.return_value = 'addon.url'
warning = self.render(detailed=True)('.install-shell .warning')
eq_(warning.text(),
'This add-on has not been reviewed by Mozilla. Learn more')
def test_lite_detailed_warning(self):
self.addon.status = amo.STATUS_LITE
self.version.is_lite = True
warning = self.render(detailed=True)('.install-shell .warning')
eq_(warning.text(),
'This add-on has been preliminarily reviewed by Mozilla.'
' Learn more')
def test_lite_and_nom_detailed_warning(self):
self.addon.status = amo.STATUS_LITE_AND_NOMINATED
self.version.is_lite = True
warning = self.render(detailed=True)('.install-shell .warning')
eq_(warning.text(),
'This add-on has been preliminarily reviewed by Mozilla.'
' Learn more')
def test_multi_platform(self):
self.version.all_files = self.platform_files
doc = self.render()
eq_(doc('.button').length, 2)
for platform in self.platforms:
os = doc('.button.%s .os' %
amo.PLATFORMS[platform].shortname).attr('data-os')
eq_(amo.PLATFORMS[platform].name, os)
def test_compatible_apps(self):
compat = Mock()
compat.min.version = 'min version'
compat.max.version = 'max version'
self.version.compatible_apps = {amo.FIREFOX: compat}
self.version.is_compatible = (True, [])
self.version.is_compatible_app.return_value = True
self.version.created = datetime.now()
install = self.render()('.install')
eq_('min version', install.attr('data-min'))
eq_('max version', install.attr('data-max'))
def test_contrib_text_with_platform(self):
self.version.all_files = self.platform_files
self.addon.takes_contributions = True
self.addon.annoying = amo.CONTRIB_ROADBLOCK
self.addon.meet_the_dev_url.return_value = 'addon.url'
doc = self.render()
eq_(doc('.contrib .os').text(), '')
@patch('addons.buttons._install_button')
@patch('addons.helpers.statusflags')
def test_big_install_button_xss(self, flags_mock, button_mock):
# Make sure there's no xss in statusflags.
button_mock.return_value = jinja2.Markup('<b>button</b>')
flags_mock.return_value = xss = '<script src="x.js">'
s = big_install_button(self.context, self.addon)
assert xss not in s, s
def test_d2c_attrs(self):
compat = Mock()
compat.min.version = '4.0'
compat.max.version = '12.0'
self.version.compatible_apps = {amo.FIREFOX: compat}
self.version.is_compatible = (True, [])
self.version.is_compatible_app.return_value = True
doc = self.render(impala=True)
install_shell = doc('.install-shell')
install = doc('.install')
eq_(install.attr('data-min'), '4.0')
eq_(install.attr('data-max'), '12.0')
eq_(install.attr('data-is-compatible'), 'true')
eq_(install.attr('data-is-compatible-app'), 'true')
eq_(install.attr('data-compat-overrides'), '[]')
eq_(install_shell.find('.d2c-reasons-popup ul li').length, 0)
# Also test overrides.
override = [('10.0a1', '10.*')]
self.version.compat_override_app_versions.return_value = override
install = self.render(impala=True)('.install')
eq_(install.attr('data-is-compatible'), 'true')
eq_(install.attr('data-compat-overrides'), json.dumps(override))
def test_d2c_attrs_binary(self):
compat = Mock()
compat.min.version = '4.0'
compat.max.version = '12.0'
self.version.compatible_apps = {amo.FIREFOX: compat}
self.version.is_compatible = (False, ['Add-on binary components.'])
self.version.is_compatible_app.return_value = True
doc = self.render(impala=True)
install_shell = doc('.install-shell')
install = doc('.install')
eq_(install.attr('data-min'), '4.0')
eq_(install.attr('data-max'), '12.0')
eq_(install.attr('data-is-compatible'), 'false')
eq_(install.attr('data-is-compatible-app'), 'true')
eq_(install.attr('data-compat-overrides'), '[]')
eq_(install_shell.find('.d2c-reasons-popup ul li').length, 1)
def test_d2c_attrs_strict_and_binary(self):
compat = Mock()
compat.min.version = '4.0'
compat.max.version = '12.0'
self.version.compatible_apps = {amo.FIREFOX: compat}
self.version.is_compatible = (False, ['strict', 'binary'])
self.version.is_compatible_app.return_value = True
doc = self.render(impala=True)
install_shell = doc('.install-shell')
install = doc('.install')
eq_(install.attr('data-min'), '4.0')
eq_(install.attr('data-max'), '12.0')
eq_(install.attr('data-is-compatible'), 'false')
eq_(install.attr('data-is-compatible-app'), 'true')
eq_(install.attr('data-compat-overrides'), '[]')
eq_(install_shell.find('.d2c-reasons-popup ul li').length, 2)
class TestBackup(ButtonTest):
def setUp(self):
super(TestBackup, self).setUp()
self.backup_version = Mock()
self.backup_version.is_unreviewed = False
self.backup_version.is_beta = False
self.backup_version.is_lite = False
self.backup_file = self.get_backup_file()
self.backup_version.all_files = [self.backup_file]
self.addon.backup_version = self.backup_version
def get_backup_file(self):
file = Mock()
file.platform = amo.PLATFORM_ALL.id
file.latest_xpi_url.return_value = 'xpi.backup'
file.get_url_path.return_value = 'xpi.backup.url'
file.status = amo.STATUS_PUBLIC
return file
def test_backup_appears(self):
doc = PyQuery(install_button(self.context, self.addon))
eq_(len(doc('.install-shell')), 2)
eq_(len(doc('.backup-button')), 1)
def test_backup_not_appears(self):
doc = PyQuery(install_button(self.context, self.addon,
show_backup=False))
eq_(len(doc('.install-shell')), 1)
def test_backup_version(self):
doc = PyQuery(install_button(self.context, self.addon))
eq_(doc('a')[1].get('href'), 'xpi.backup.url')
def test_big_install_button(self):
doc = PyQuery(big_install_button(self.context, self.addon))
eq_(len(doc('.install-shell')), 2)
eq_(len(doc('.backup-button')), 1)
def test_big_install_button_backup_version(self):
doc = PyQuery(big_install_button(self.context, self.addon))
eq_(doc('.backup-button a.download')[0].get('href'), 'xpi.backup.url')
class TestViews(amo.tests.TestCase):
fixtures = ['addons/eula+contrib-addon']
def test_eula_with_contrib_roadblock(self):
url = reverse('addons.eula', args=[11730, 53612])
response = self.client.get(url, follow=True)
doc = PyQuery(response.content)
eq_(doc('[data-search]').attr('class'), 'install ')
def test_versions_no_backup(self):
url = reverse('addons.versions', args=['a11730'])
response = self.client.get(url)
doc = PyQuery(response.content)
eq_(len(doc('.backup-button')), 0)
def test_details_no_backup(self):
url = reverse('addons.detail', args=['a11730'])
response = self.client.get(url)
doc = PyQuery(response.content)
eq_(len(doc('.backup-button')), 0)
def test_details_backup(self):
addon = Addon.objects.get(id=11730)
addon.update(_backup_version=addon._current_version)
url = reverse('addons.detail', args=['a11730'])
response = self.client.get(url)
doc = PyQuery(response.content)
eq_(len(doc('.backup-button')), 1)
|
CiscoSystems/vespa
|
refs/heads/master
|
neutron/plugins/nicira/dhcp_meta/__init__.py
|
34
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 VMware, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
|
MarkWh1te/xueqiu_predict
|
refs/heads/master
|
python3_env/lib/python3.4/site-packages/pygments/formatters/terminal256.py
|
23
|
# -*- coding: utf-8 -*-
"""
pygments.formatters.terminal256
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Formatter for 256-color terminal output with ANSI sequences.
RGB-to-XTERM color conversion routines adapted from xterm256-conv
tool (http://frexx.de/xterm-256-notes/data/xterm256-conv2.tar.bz2)
by Wolfgang Frisch.
Formatter version 1.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# TODO:
# - Options to map style's bold/underline/italic/border attributes
# to some ANSI attrbutes (something like 'italic=underline')
# - An option to output "style RGB to xterm RGB/index" conversion table
# - An option to indicate that we are running in "reverse background"
# xterm. This means that default colors are white-on-black, not
# black-on-while, so colors like "white background" need to be converted
# to "white background, black foreground", etc...
import sys
from pygments.formatter import Formatter
__all__ = ['Terminal256Formatter', 'TerminalTrueColorFormatter']
class EscapeSequence:
def __init__(self, fg=None, bg=None, bold=False, underline=False):
self.fg = fg
self.bg = bg
self.bold = bold
self.underline = underline
def escape(self, attrs):
if len(attrs):
return "\x1b[" + ";".join(attrs) + "m"
return ""
def color_string(self):
attrs = []
if self.fg is not None:
attrs.extend(("38", "5", "%i" % self.fg))
if self.bg is not None:
attrs.extend(("48", "5", "%i" % self.bg))
if self.bold:
attrs.append("01")
if self.underline:
attrs.append("04")
return self.escape(attrs)
def true_color_string(self):
attrs = []
if self.fg:
attrs.extend(("38", "2", str(self.fg[0]), str(self.fg[1]), str(self.fg[2])))
if self.bg:
attrs.extend(("48", "2", str(self.bg[0]), str(self.bg[1]), str(self.bg[2])))
if self.bold:
attrs.append("01")
if self.underline:
attrs.append("04")
return self.escape(attrs)
def reset_string(self):
attrs = []
if self.fg is not None:
attrs.append("39")
if self.bg is not None:
attrs.append("49")
if self.bold or self.underline:
attrs.append("00")
return self.escape(attrs)
class Terminal256Formatter(Formatter):
"""
Format tokens with ANSI color sequences, for output in a 256-color
terminal or console. Like in `TerminalFormatter` color sequences
are terminated at newlines, so that paging the output works correctly.
The formatter takes colors from a style defined by the `style` option
and converts them to nearest ANSI 256-color escape sequences. Bold and
underline attributes from the style are preserved (and displayed).
.. versionadded:: 0.9
Options accepted:
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
"""
name = 'Terminal256'
aliases = ['terminal256', 'console256', '256']
filenames = []
def __init__(self, **options):
Formatter.__init__(self, **options)
self.xterm_colors = []
self.best_match = {}
self.style_string = {}
self.usebold = 'nobold' not in options
self.useunderline = 'nounderline' not in options
self._build_color_table() # build an RGB-to-256 color conversion table
self._setup_styles() # convert selected style's colors to term. colors
def _build_color_table(self):
# colors 0..15: 16 basic colors
self.xterm_colors.append((0x00, 0x00, 0x00)) # 0
self.xterm_colors.append((0xcd, 0x00, 0x00)) # 1
self.xterm_colors.append((0x00, 0xcd, 0x00)) # 2
self.xterm_colors.append((0xcd, 0xcd, 0x00)) # 3
self.xterm_colors.append((0x00, 0x00, 0xee)) # 4
self.xterm_colors.append((0xcd, 0x00, 0xcd)) # 5
self.xterm_colors.append((0x00, 0xcd, 0xcd)) # 6
self.xterm_colors.append((0xe5, 0xe5, 0xe5)) # 7
self.xterm_colors.append((0x7f, 0x7f, 0x7f)) # 8
self.xterm_colors.append((0xff, 0x00, 0x00)) # 9
self.xterm_colors.append((0x00, 0xff, 0x00)) # 10
self.xterm_colors.append((0xff, 0xff, 0x00)) # 11
self.xterm_colors.append((0x5c, 0x5c, 0xff)) # 12
self.xterm_colors.append((0xff, 0x00, 0xff)) # 13
self.xterm_colors.append((0x00, 0xff, 0xff)) # 14
self.xterm_colors.append((0xff, 0xff, 0xff)) # 15
# colors 16..232: the 6x6x6 color cube
valuerange = (0x00, 0x5f, 0x87, 0xaf, 0xd7, 0xff)
for i in range(217):
r = valuerange[(i // 36) % 6]
g = valuerange[(i // 6) % 6]
b = valuerange[i % 6]
self.xterm_colors.append((r, g, b))
# colors 233..253: grayscale
for i in range(1, 22):
v = 8 + i * 10
self.xterm_colors.append((v, v, v))
def _closest_color(self, r, g, b):
distance = 257*257*3 # "infinity" (>distance from #000000 to #ffffff)
match = 0
for i in range(0, 254):
values = self.xterm_colors[i]
rd = r - values[0]
gd = g - values[1]
bd = b - values[2]
d = rd*rd + gd*gd + bd*bd
if d < distance:
match = i
distance = d
return match
def _color_index(self, color):
index = self.best_match.get(color, None)
if index is None:
try:
rgb = int(str(color), 16)
except ValueError:
rgb = 0
r = (rgb >> 16) & 0xff
g = (rgb >> 8) & 0xff
b = rgb & 0xff
index = self._closest_color(r, g, b)
self.best_match[color] = index
return index
def _setup_styles(self):
for ttype, ndef in self.style:
escape = EscapeSequence()
if ndef['color']:
escape.fg = self._color_index(ndef['color'])
if ndef['bgcolor']:
escape.bg = self._color_index(ndef['bgcolor'])
if self.usebold and ndef['bold']:
escape.bold = True
if self.useunderline and ndef['underline']:
escape.underline = True
self.style_string[str(ttype)] = (escape.color_string(),
escape.reset_string())
def format(self, tokensource, outfile):
# hack: if the output is a terminal and has an encoding set,
# use that to avoid unicode encode problems
if not self.encoding and hasattr(outfile, "encoding") and \
hasattr(outfile, "isatty") and outfile.isatty() and \
sys.version_info < (3,):
self.encoding = outfile.encoding
return Formatter.format(self, tokensource, outfile)
def format_unencoded(self, tokensource, outfile):
for ttype, value in tokensource:
not_found = True
while ttype and not_found:
try:
# outfile.write( "<" + str(ttype) + ">" )
on, off = self.style_string[str(ttype)]
# Like TerminalFormatter, add "reset colors" escape sequence
# on newline.
spl = value.split('\n')
for line in spl[:-1]:
if line:
outfile.write(on + line + off)
outfile.write('\n')
if spl[-1]:
outfile.write(on + spl[-1] + off)
not_found = False
# outfile.write( '#' + str(ttype) + '#' )
except KeyError:
# ottype = ttype
ttype = ttype[:-1]
# outfile.write( '!' + str(ottype) + '->' + str(ttype) + '!' )
if not_found:
outfile.write(value)
class TerminalTrueColorFormatter(Terminal256Formatter):
r"""
Format tokens with ANSI color sequences, for output in a true-color
terminal or console. Like in `TerminalFormatter` color sequences
are terminated at newlines, so that paging the output works correctly.
.. versionadded:: 2.1
Options accepted:
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
"""
name = 'TerminalTrueColor'
aliases = ['terminal16m', 'console16m', '16m']
filenames = []
def _build_color_table(self):
pass
def _color_tuple(self, color):
try:
rgb = int(str(color), 16)
except ValueError:
return None
r = (rgb >> 16) & 0xff
g = (rgb >> 8) & 0xff
b = rgb & 0xff
return (r, g, b)
def _setup_styles(self):
for ttype, ndef in self.style:
escape = EscapeSequence()
if ndef['color']:
escape.fg = self._color_tuple(ndef['color'])
if ndef['bgcolor']:
escape.bg = self._color_tuple(ndef['bgcolor'])
if self.usebold and ndef['bold']:
escape.bold = True
if self.useunderline and ndef['underline']:
escape.underline = True
self.style_string[str(ttype)] = (escape.true_color_string(),
escape.reset_string())
|
manipopopo/tensorflow
|
refs/heads/master
|
tensorflow/python/estimator/training.py
|
2
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and functions related to train_and_evaluate."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import os
import time
import six
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.estimator import estimator as estimator_lib
from tensorflow.python.estimator import exporter as exporter_lib
from tensorflow.python.estimator import run_config as run_config_lib
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import server_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.util import compat
from tensorflow.python.util.tf_export import estimator_export
_MAX_DELAY_SECS = 60
_DELAY_SECS_PER_WORKER = 5
_TF_CONFIG_ENV = 'TF_CONFIG'
_ENVIRONMENT_KEY = 'environment'
_ENVIRONMENT_GOOGLE_VALUE = 'google'
_TRAINER_JOBS = (run_config_lib.TaskType.CHIEF, run_config_lib.TaskType.MASTER,
run_config_lib.TaskType.WORKER)
def _validate_input_fn(input_fn):
"""Validates the `input_fn`."""
if not callable(input_fn):
raise TypeError('`input_fn` must be callable, given: {}'.format(input_fn))
def _validate_hooks(hooks):
"""Validates the `hooks`."""
hooks = tuple(hooks or [])
for hook in hooks:
if not isinstance(hook, session_run_hook.SessionRunHook):
raise TypeError(
'All hooks must be `SessionRunHook` instances, given: {}'.format(
hook))
return hooks
def _validate_exporters(exporters):
"""Validates `exporters` and returns them as a tuple."""
if not exporters:
return ()
if isinstance(exporters, exporter_lib.Exporter):
exporters = [exporters]
unique_names = [] # `Exporter`s should have unique names.
try:
for exporter in exporters:
if not isinstance(exporter, exporter_lib.Exporter):
# Error message will be printed out by the outer try/except.
raise TypeError
if not exporter.name:
full_list_of_names = [e.name for e in exporters]
raise ValueError('An Exporter cannot have a name that is `None` or'
' empty. All exporter names:'
' {}'.format(full_list_of_names))
if not isinstance(exporter.name, six.string_types):
raise ValueError('An Exporter must have a string name. Given: '
'{}'.format(type(exporter.name)))
if exporter.name in unique_names:
full_list_of_names = [e.name for e in exporters]
raise ValueError(
'`exporters` must have unique names. Such a name cannot be `None`.'
' All exporter names: {}'.format(full_list_of_names))
unique_names.append(exporter.name)
except TypeError:
# Two possibilities:
# - `exporters` is neither `Exporter` nor iterable. Python has
# raised a `TypeError` when iterating over `exporters`.
# - an `exporter` was None or not of type `Exporter`, so we raised a
# `TypeError`.
raise TypeError('`exporters` must be an Exporter,'
' an iterable of Exporter, or `None`,'
' found %s.' % exporters)
return tuple(exporters)
def _is_google_env():
"""Detects whether current environment is google."""
tf_config = json.loads(os.environ.get(_TF_CONFIG_ENV) or '{}')
if not tf_config:
logging.warn('TF_CONFIG should not be empty in distributed environment.')
return tf_config.get(_ENVIRONMENT_KEY) == _ENVIRONMENT_GOOGLE_VALUE
@estimator_export('estimator.TrainSpec')
class TrainSpec(
collections.namedtuple('TrainSpec', ['input_fn', 'max_steps', 'hooks'])):
"""Configuration for the "train" part for the `train_and_evaluate` call.
`TrainSpec` determines the input data for the training, as well as the
duration. Optional hooks run at various stages of training.
"""
def __new__(cls, input_fn, max_steps=None, hooks=None):
"""Creates a validated `TrainSpec` instance.
Args:
input_fn: A function that provides input data for training as minibatches.
See @{$premade_estimators#create_input_functions} for more
information. The function should construct and return one of
the following:
* A 'tf.data.Dataset' object: Outputs of `Dataset` object must be a
tuple (features, labels) with same constraints as below.
* A tuple (features, labels): Where features is a `Tensor` or a
dictionary of string feature name to `Tensor` and labels is a
`Tensor` or a dictionary of string label name to `Tensor`.
max_steps: Int. Positive number of total steps for which to train model.
If `None`, train forever. The training `input_fn` is not expected to
generate `OutOfRangeError` or `StopIteration` exceptions. See the
`train_and_evaluate` stop condition section for details.
hooks: Iterable of `tf.train.SessionRunHook` objects to run
on all workers (including chief) during training.
Returns:
A validated `TrainSpec` object.
Raises:
ValueError: If any of the input arguments is invalid.
TypeError: If any of the arguments is not of the expected type.
"""
# Validate input_fn.
_validate_input_fn(input_fn)
# Validate max_steps.
if max_steps is not None and max_steps <= 0:
raise ValueError(
'Must specify max_steps > 0, given: {}'.format(max_steps))
# Validate hooks.
hooks = _validate_hooks(hooks)
return super(TrainSpec, cls).__new__(
cls, input_fn=input_fn, max_steps=max_steps, hooks=hooks)
@estimator_export('estimator.EvalSpec')
class EvalSpec(
collections.namedtuple('EvalSpec', [
'input_fn', 'steps', 'name', 'hooks', 'exporters', 'start_delay_secs',
'throttle_secs'
])):
"""Configuration for the "eval" part for the `train_and_evaluate` call.
`EvalSpec` combines details of evaluation of the trained model as well as its
export. Evaluation consists of computing metrics to judge the performance of
the trained model. Export writes out the trained model on to external
storage.
"""
def __new__(cls,
input_fn,
steps=100,
name=None,
hooks=None,
exporters=None,
start_delay_secs=120,
throttle_secs=600):
"""Creates a validated `EvalSpec` instance.
Args:
input_fn: A function that constructs the input data for evaluation.
See @{$premade_estimators#create_input_functions} for more
information. The function should construct and return one of
the following:
* A 'tf.data.Dataset' object: Outputs of `Dataset` object must be a
tuple (features, labels) with same constraints as below.
* A tuple (features, labels): Where features is a `Tensor` or a
dictionary of string feature name to `Tensor` and labels is a
`Tensor` or a dictionary of string label name to `Tensor`.
steps: Int. Positive number of steps for which to evaluate model. If
`None`, evaluates until `input_fn` raises an end-of-input exception.
See `Estimator.evaluate` for details.
name: String. Name of the evaluation if user needs to run multiple
evaluations on different data sets. Metrics for different evaluations
are saved in separate folders, and appear separately in tensorboard.
hooks: Iterable of `tf.train.SessionRunHook` objects to run
during evaluation.
exporters: Iterable of `Exporter`s, or a single one, or `None`.
`exporters` will be invoked after each evaluation.
start_delay_secs: Int. Start evaluating after waiting for this many
seconds.
throttle_secs: Int. Do not re-evaluate unless the last evaluation was
started at least this many seconds ago. Of course, evaluation does not
occur if no new checkpoints are available, hence, this is the minimum.
Returns:
A validated `EvalSpec` object.
Raises:
ValueError: If any of the input arguments is invalid.
TypeError: If any of the arguments is not of the expected type.
"""
# Validate input_fn.
_validate_input_fn(input_fn)
# Validate steps.
if steps is not None and steps <= 0:
raise ValueError('Must specify steps > 0, given: {}'.format(steps))
# Validate name.
if name is not None and not isinstance(name, six.string_types):
raise TypeError('`name` must be string, given: {}'.format(name))
# Validate hooks.
hooks = _validate_hooks(hooks)
# Validate exporters.
exporters = _validate_exporters(exporters)
# Validate start_delay_secs.
if start_delay_secs < 0:
raise ValueError('Must specify start_delay_secs >= 0, given: {}'.format(
start_delay_secs))
# Validate throttle_secs.
if throttle_secs < 0:
raise ValueError(
'Must specify throttle_secs >= 0, given: {}'.format(throttle_secs))
return super(EvalSpec, cls).__new__(
cls,
input_fn=input_fn,
steps=steps,
name=name,
hooks=hooks,
exporters=exporters,
start_delay_secs=start_delay_secs,
throttle_secs=throttle_secs)
@estimator_export('estimator.train_and_evaluate')
def train_and_evaluate(estimator, train_spec, eval_spec):
"""Train and evaluate the `estimator`.
This utility function trains, evaluates, and (optionally) exports the model by
using the given `estimator`. All training related specification is held in
`train_spec`, including training `input_fn` and training max steps, etc. All
evaluation and export related specification is held in `eval_spec`, including
evaluation `input_fn`, steps, etc.
This utility function provides consistent behavior for both local
(non-distributed) and distributed configurations. Currently, the only
supported distributed training configuration is between-graph replication.
Overfitting: In order to avoid overfitting, it is recommended to set up the
training `input_fn` to shuffle the training data properly.
Stop condition: In order to support both distributed and non-distributed
configuration reliably, the only supported stop condition for model
training is `train_spec.max_steps`. If `train_spec.max_steps` is `None`, the
model is trained forever. *Use with care* if model stop condition is
different. For example, assume that the model is expected to be trained with
one epoch of training data, and the training `input_fn` is configured to throw
`OutOfRangeError` after going through one epoch, which stops the
`Estimator.train`. For a three-training-worker distributed configuration, each
training worker is likely to go through the whole epoch independently. So, the
model will be trained with three epochs of training data instead of one epoch.
Example of local (non-distributed) training:
```python
# Set up feature columns.
categorial_feature_a = categorial_column_with_hash_bucket(...)
categorial_feature_a_emb = embedding_column(
categorical_column=categorial_feature_a, ...)
... # other feature columns
estimator = DNNClassifier(
feature_columns=[categorial_feature_a_emb, ...],
hidden_units=[1024, 512, 256])
# Or set up the model directory
# estimator = DNNClassifier(
# config=tf.estimator.RunConfig(
# model_dir='/my_model', save_summary_steps=100),
# feature_columns=[categorial_feature_a_emb, ...],
# hidden_units=[1024, 512, 256])
# Input pipeline for train and evaluate.
def train_input_fn(): # returns x, y
# please shuffle the data.
pass
def eval_input_fn(): # returns x, y
pass
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=1000)
eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
```
Note that in current implementation `estimator.evaluate` will be called
multiple times. This means that evaluation graph (including eval_input_fn)
will be re-created for each `evaluate` call. `estimator.train` will be called
only once.
Example of distributed training:
Regarding the example of distributed training, the code above can be used
without a change (Please do make sure that the `RunConfig.model_dir` for all
workers is set to the same directory, i.e., a shared file system all workers
can read and write). The only extra work to do is setting the environment
variable `TF_CONFIG` properly for each worker correspondingly.
Also see
[Distributed TensorFlow](https://www.tensorflow.org/deploy/distributed).
Setting environment variable depends on the platform. For example, on Linux,
it can be done as follows (`$` is the shell prompt):
```
$ TF_CONFIG='<replace_with_real_content>' python train_model.py
```
For the content in `TF_CONFIG`, assume that the training cluster spec looks
like:
```
cluster = {"chief": ["host0:2222"],
"worker": ["host1:2222", "host2:2222", "host3:2222"],
"ps": ["host4:2222", "host5:2222"]}
```
Example of `TF_CONFIG` for chief training worker (must have one and only one):
```
# This should be a JSON string, which is set as environment variable. Usually
# the cluster manager handles that.
TF_CONFIG='{
"cluster": {
"chief": ["host0:2222"],
"worker": ["host1:2222", "host2:2222", "host3:2222"],
"ps": ["host4:2222", "host5:2222"]
},
"task": {"type": "chief", "index": 0}
}'
```
Note that the chief worker also does the model training job, similar to other
non-chief training workers (see next paragraph). In addition to the model
training, it manages some extra work, e.g., checkpoint saving and restoring,
writing summaries, etc.
Example of `TF_CONFIG` for non-chief training worker (optional, could be
multiple):
```
# This should be a JSON string, which is set as environment variable. Usually
# the cluster manager handles that.
TF_CONFIG='{
"cluster": {
"chief": ["host0:2222"],
"worker": ["host1:2222", "host2:2222", "host3:2222"],
"ps": ["host4:2222", "host5:2222"]
},
"task": {"type": "worker", "index": 0}
}'
```
where the `task.index` should be set as 0, 1, 2, in this example, respectively
for non-chief training workers.
Example of `TF_CONFIG` for parameter server, aka ps (could be multiple):
```
# This should be a JSON string, which is set as environment variable. Usually
# the cluster manager handles that.
TF_CONFIG='{
"cluster": {
"chief": ["host0:2222"],
"worker": ["host1:2222", "host2:2222", "host3:2222"],
"ps": ["host4:2222", "host5:2222"]
},
"task": {"type": "ps", "index": 0}
}'
```
where the `task.index` should be set as 0 and 1, in this example, respectively
for parameter servers.
Example of `TF_CONFIG` for evaluator task. Evaluator is a special task that is
not part of the training cluster. There could be only one. It is used for
model evaluation.
```
# This should be a JSON string, which is set as environment variable. Usually
# the cluster manager handles that.
TF_CONFIG='{
"cluster": {
"chief": ["host0:2222"],
"worker": ["host1:2222", "host2:2222", "host3:2222"],
"ps": ["host4:2222", "host5:2222"]
},
"task": {"type": "evaluator", "index": 0}
}'
```
Args:
estimator: An `Estimator` instance to train and evaluate.
train_spec: A `TrainSpec` instance to specify the training specification.
eval_spec: A `EvalSpec` instance to specify the evaluation and export
specification.
Returns:
A tuple of the result of the `evaluate` call to the `Estimator` and the
export results using the specified `ExportStrategy`.
Currently, the return value is undefined for distributed training mode.
Raises:
ValueError: if environment variable `TF_CONFIG` is incorrectly set.
"""
_assert_eval_spec(eval_spec) # fail fast if eval_spec is invalid.
executor = _TrainingExecutor(
estimator=estimator, train_spec=train_spec, eval_spec=eval_spec)
config = estimator.config
if (config.task_type == run_config_lib.TaskType.EVALUATOR and
config.task_id > 0):
raise ValueError(
'For distributed training, there can only be one `evaluator` task '
'(with task id 0). Given task id {}'.format(config.task_id))
return executor.run()
class _StopAtSecsHook(session_run_hook.SessionRunHook):
"""Stops given secs after begin is called."""
def __init__(self, stop_after_secs):
self._stop_after_secs = stop_after_secs
self._start_time = None
def begin(self):
self._start_time = time.time()
def after_run(self, run_context, run_values):
del run_values
if time.time() - self._start_time >= self._stop_after_secs:
run_context.request_stop()
class _NewCheckpointListenerForEvaluate(
basic_session_run_hooks.CheckpointSaverListener):
"""A saver listener to run evaluate with every checkpoint."""
def __init__(self, evaluator, eval_throttle_secs, continuous_eval_listener):
self._evaluator = evaluator
self._eval_throttle_secs = eval_throttle_secs
self._continuous_eval_listener = continuous_eval_listener
self.eval_result, self.export_results = None, None
def begin(self):
self._timer = basic_session_run_hooks.SecondOrStepTimer(
every_secs=self._eval_throttle_secs)
self._is_first_run = True
def after_save(self, session, global_step_value):
del session # unused; required by signature.
# skip first run model is not trained yet.
if self._is_first_run:
self._is_first_run = False
return
if not self._continuous_eval_listener.before_eval():
logging.info('Exiting training and evaluation loop, as requested by '
'_ContinuousEvalListener.before_eval.')
return True
if self._timer.should_trigger_for_step(global_step_value):
self._evaluate(global_step_value) # updates self.eval_result
if not self._continuous_eval_listener.after_eval(self.eval_result):
logging.info('Exiting evaluation, as requested by '
'_ContinuousEvalListener.after_eval.')
return True
else:
# TODO(ispir): add remaining time in the log.
logging.info('Skip the current checkpoint eval due to throttle secs '
'({} secs).'.format(self._eval_throttle_secs))
def end(self, session, global_step_value):
# Evaluate if the last step has not been evaluated, yet.
if global_step_value != self._timer.last_triggered_step():
if self._continuous_eval_listener.before_eval():
self._evaluate(global_step_value)
self._continuous_eval_listener.after_eval(self.eval_result)
def _evaluate(self, global_step_value):
self._timer.update_last_triggered_step(global_step_value)
self.eval_result, self.export_results = (
self._evaluator.evaluate_and_export())
if self.eval_result.status != _EvalStatus.EVALUATED:
# This is unexpected; should never happen.
# Training should always end with a new checkpoint.
raise RuntimeError('There was no new checkpoint after the training. '
'Eval status: {}'.format(self.eval_result.status))
class _TrainingExecutor(object):
"""The executor to run `Estimator` training and evaluation.
This implementation supports both distributed and non-distributed (aka local)
training and evaluation based on the setting in `tf.estimator.RunConfig`.
"""
def __init__(self,
estimator,
train_spec,
eval_spec,
train_hooks=None,
continuous_eval_listener=None):
if not isinstance(estimator, estimator_lib.Estimator):
raise TypeError(
'`estimator` must have type `tf.estimator.Estimator`. '
'Got: {}'.format(type(estimator)))
self._estimator = estimator
if not isinstance(train_spec, TrainSpec):
raise TypeError(
'`train_spec` must have type `tf.estimator.TrainSpec`. '
'Got: {}'.format(type(train_spec)))
self._train_spec = train_spec
if eval_spec and not isinstance(eval_spec, EvalSpec):
raise TypeError('`eval_spec` must be either `None` or have type '
'`tf.estimator.EvalSpec`. Got: {}'.format(
type(eval_spec)))
self._eval_spec = eval_spec
self._train_hooks = _validate_hooks(train_hooks)
if (continuous_eval_listener and
not isinstance(continuous_eval_listener, _ContinuousEvalListener)):
raise TypeError('`continuous_eval_listener` must have type '
'`_ContinuousEvalListener`.')
self._continuous_eval_listener = (
continuous_eval_listener or _ContinuousEvalListener())
@property
def estimator(self):
return self._estimator
def run(self):
"""Executes the run_foo for task type `foo`.
`_TrainingExecutor` predefines the procedure for task type 'chief',
'worker', 'ps', and 'evaluator'. For task type `foo`, the corresponding
procedure is `run_foo'. This `run` method invoke the procedure base on the
`RunConfig.task_type`.
Returns:
A tuple of the result of the `evaluate` call to the `Estimator` and the
export results using the specified `ExportStrategy`.
Currently undefined for distributed training mode.
Raises:
ValueError: if the estimator.config is mis-configured.
"""
config = self._estimator.config
if (not config.cluster_spec and
config.task_type != run_config_lib.TaskType.EVALUATOR):
logging.info('Running training and evaluation locally (non-distributed).')
return self.run_local()
# Distributed case.
if not config.task_type:
# TODO(xiejw): Improve the error message about how to set the TF_CONFIG
# correctly.
raise ValueError(
'`estimator.config` must have task_type set. This usually means '
'TF_CONFIG environment is not set correctly.')
if config.task_type == 'local':
raise ValueError(
'`task.type` in TF_CONFIG cannot be `local`. Leaving `cluster` and '
'`task` properties in TF_CONFIG absent triggers train and evaluate '
'`Estimator` locally (non-distributed).')
# For task type foo, call executor.run_foo.
available_tasks = [
x for x in dir(self)
if x.startswith('run_') and x != 'run_local' and
callable(getattr(self, x))
]
task_to_run = 'run_' + config.task_type
if task_to_run not in available_tasks:
raise ValueError(
'Task type {} is not supported. Supported task types are {}'.format(
config.task_type, [x[len('run_'):] for x in available_tasks]))
getattr(self, task_to_run)()
def run_chief(self):
"""Runs task chief."""
# TODO(xiejw): To allow execution framework to add train hooks.
return self._start_distributed_training()
def run_worker(self):
"""Runs task (training) worker."""
# TODO(xiejw): To allow execution framework to add train hooks.
return self._start_distributed_training()
def run_master(self):
"""Runs task master."""
_assert_eval_spec(self._eval_spec)
# Final export signal: For any eval result with global_step >= train
# max_steps, the evaluator will send the final export signal. There is a
# small chance that the Estimator.train stopping logic sees a different
# global_step value (due to global step race condition and the fact the
# saver sees a larger value for checkpoint saving), which does not end
# the training. When the training ends, a new checkpoint is generated, which
# triggers the listener again. So, it could be the case the final export is
# triggered twice.
#
# But here, throttle_secs will skip the next intermediate checkpoint and,
# so, the double final export chance is very small.
evaluator = _TrainingExecutor._Evaluator(self._estimator, self._eval_spec,
self._train_spec.max_steps)
# When the underlying `Estimator` object saves a new checkpoint, we would
# like this callback to be called so that evaluation and export can trigger.
saving_listeners = [
_NewCheckpointListenerForEvaluate(evaluator,
self._eval_spec.throttle_secs,
_ContinuousEvalListener())
]
self._start_distributed_training(saving_listeners=saving_listeners)
def run_evaluator(self):
"""Runs task evaluator."""
# TODO(xiejw): To allow execution framework to add continuous eval listener.
return self._start_continuous_evaluation()
def run_ps(self):
"""Runs task parameter server (in training cluster spec)."""
config = self._estimator.config
server = self._start_std_server(config)
server.join()
def run_local(self):
"""Runs training and evaluation locally (non-distributed)."""
_assert_eval_spec(self._eval_spec)
train_hooks = list(self._train_spec.hooks) + list(self._train_hooks)
logging.info('Start train and evaluate loop. The evaluate will happen '
'after every checkpoint. Checkpoint frequency is determined '
'based on RunConfig arguments: save_checkpoints_steps {} or '
'save_checkpoints_secs {}.'.format(
self._estimator.config.save_checkpoints_steps,
self._estimator.config.save_checkpoints_secs))
evaluator = _TrainingExecutor._Evaluator(self._estimator, self._eval_spec,
self._train_spec.max_steps)
listener_for_eval = _NewCheckpointListenerForEvaluate(
evaluator, self._eval_spec.throttle_secs,
self._continuous_eval_listener)
saving_listeners = [listener_for_eval]
self._estimator.train(
input_fn=self._train_spec.input_fn,
max_steps=self._train_spec.max_steps,
hooks=train_hooks,
saving_listeners=saving_listeners)
eval_result = listener_for_eval.eval_result or _EvalResult(
status=_EvalStatus.MISSING_CHECKPOINT)
return eval_result.metrics, listener_for_eval.export_results
def _start_std_server(self, config):
"""Creates, starts, and returns a server_lib.Server."""
if (not config.cluster_spec or not config.task_type or
config.task_id is None):
raise RuntimeError('Could not start server; be sure to specify '
'cluster_spec, task_type, and task in '
'RunConfig or set the TF_CONFIG environment variable.')
if not config.master:
jobs = config.cluster_spec.jobs
if (len(jobs) == 1 and
len(config.cluster_spec.job_tasks(jobs[0])) == 1 and
config.task_type in _TRAINER_JOBS):
# For distributed training, config.master is empty if and only if it has
# a single node in the cluster spec. In this case, we should not start
# the server.
logging.info('Skip starting Tensorflow server as there is only one '
'node in the cluster.')
return
else:
raise RuntimeError(
'Could not start server; be sure to specify master in '
'RunConfig or set the TF_CONFIG environment variable.')
logging.info('Start Tensorflow server.')
if config.session_config is None:
session_config = config_pb2.ConfigProto(log_device_placement=False)
else:
session_config = config_pb2.ConfigProto(
log_device_placement=False,
gpu_options=config.session_config.gpu_options)
server = server_lib.Server(
config.cluster_spec,
job_name=config.task_type,
task_index=config.task_id,
config=session_config,
start=False,
protocol=config.protocol)
server.start()
return server
def _start_distributed_training(self, saving_listeners=None):
"""Calls `Estimator` train in a distributed setting."""
config = self._estimator.config
# Start in-process TensorFlow server if needed. It's important to start the
# server before we (optionally) sleep. Otherwise, the servers will wait to
# connect to each other before starting to train.
if not _is_google_env():
self._start_std_server(config)
# Delay worker to start. For asynchronous training, this usually helps model
# to converge faster. Chief starts the training immediately, so, worker
# with task id x (0-based) should wait (x+1) * _DELAY_SECS_PER_WORKER.
start_delay_secs = 0
if config.task_type == run_config_lib.TaskType.WORKER:
# TODO(xiejw): Replace the hard code logic (task_id + 1) with unique id in
# training cluster.
start_delay_secs = min(_MAX_DELAY_SECS,
(config.task_id + 1) * _DELAY_SECS_PER_WORKER)
if start_delay_secs > 0:
logging.info('Waiting %d secs before starting training.',
start_delay_secs)
time.sleep(start_delay_secs)
self._estimator.train(
input_fn=self._train_spec.input_fn,
max_steps=self._train_spec.max_steps,
hooks=list(self._train_spec.hooks) + list(self._train_hooks),
saving_listeners=saving_listeners)
def _start_continuous_evaluation(self):
"""Repeatedly calls `Estimator` evaluate and export until training ends."""
_assert_eval_spec(self._eval_spec)
start_delay_secs = self._eval_spec.start_delay_secs
if start_delay_secs:
logging.info('Waiting %f secs before starting eval.', start_delay_secs)
time.sleep(start_delay_secs)
latest_eval_result = None
evaluator = _TrainingExecutor._Evaluator(self._estimator, self._eval_spec,
self._train_spec.max_steps)
should_early_stop = False
while not should_early_stop:
if (latest_eval_result and
latest_eval_result.status == _EvalStatus.EVALUATED):
global_step = latest_eval_result.metrics.get(ops.GraphKeys.GLOBAL_STEP)
if (global_step and self._train_spec.max_steps and
global_step >= self._train_spec.max_steps):
logging.info(
'Exiting evaluation, global_step=%s >= train max_steps=%s',
global_step, self._train_spec.max_steps)
return
latest_eval_result, should_early_stop = self._execute_evaluator_once(
evaluator, self._continuous_eval_listener,
self._eval_spec.throttle_secs)
def _execute_evaluator_once(self, evaluator, continuous_eval_listener,
throttle_secs):
"""Executes the `evaluator`."""
_assert_eval_spec(self._eval_spec)
start = time.time()
eval_result = None
should_early_stop = False
if not continuous_eval_listener.before_eval():
logging.info('Exiting evaluation, as requested by '
'_ContinuousEvalListener.before_eval.')
should_early_stop = True
return (eval_result, should_early_stop)
# Final export signal: For any eval result with global_step >= train
# max_steps, the evaluator will send the final export signal. The next
# iteration of while loop will end the continuous eval as the stopping
# condition is satisfied (both checks use the same global_step value,
# i.e., no race condition)
eval_result, _ = evaluator.evaluate_and_export()
if not self._continuous_eval_listener.after_eval(eval_result):
logging.info('Exiting evaluation, as requested by '
'_ContinuousEvalListener.after_eval.')
should_early_stop = True
return (eval_result, should_early_stop)
# Throttle if necessary.
elapsed_time = time.time() - start
difference = throttle_secs - elapsed_time
if difference > 0:
logging.info('Waiting %f secs before starting next eval run.', difference)
time.sleep(difference)
return (eval_result, should_early_stop)
class _Evaluator(object):
"""A helper class to call `Estimator.evaluate` and export model."""
def __init__(self, estimator, eval_spec, max_training_steps):
self._estimator = estimator
_assert_eval_spec(eval_spec)
self._eval_spec = eval_spec
self._is_final_export_triggered = False
self._previous_ckpt_path = None
self._last_warning_time = 0
self._max_training_steps = max_training_steps
@property
def is_final_export_triggered(self):
return self._is_final_export_triggered
def evaluate_and_export(self):
"""Evaluate and (maybe) export the current model.
Returns:
A tuple of `EvalResult` instance and the export results.
Raises:
RuntimeError: for any unexpected internal error.
TypeError: if evaluation result has wrong type.
"""
latest_ckpt_path = self._estimator.latest_checkpoint()
if not latest_ckpt_path:
self._log_err_msg('Estimator is not trained yet. Will start an '
'evaluation when a checkpoint is ready.')
return _EvalResult(status=_EvalStatus.MISSING_CHECKPOINT), []
if latest_ckpt_path == self._previous_ckpt_path:
self._log_err_msg(
'No new checkpoint ready for evaluation. Skip the current '
'evaluation pass as evaluation results are expected to be same '
'for the same checkpoint.')
return _EvalResult(status=_EvalStatus.NO_NEW_CHECKPOINT), []
metrics = self._estimator.evaluate(
input_fn=self._eval_spec.input_fn,
steps=self._eval_spec.steps,
name=self._eval_spec.name,
checkpoint_path=latest_ckpt_path,
hooks=self._eval_spec.hooks)
# _EvalResult validates the metrics.
eval_result = _EvalResult(
status=_EvalStatus.EVALUATED,
metrics=metrics,
checkpoint_path=latest_ckpt_path)
is_the_final_export = (
eval_result.metrics[ops.GraphKeys.GLOBAL_STEP] >=
self._max_training_steps if self._max_training_steps else False)
export_results = self._export_eval_result(eval_result,
is_the_final_export)
if is_the_final_export:
logging.debug('Calling exporter with the `is_the_final_export=True`.')
self._is_final_export_triggered = True
self._last_warning_time = 0
self._previous_ckpt_path = latest_ckpt_path
return eval_result, export_results
def _log_err_msg(self, message):
"""Prints warning `message` every 10 mins."""
current_time = time.time()
if current_time - self._last_warning_time > 600:
logging.warning(message)
self._last_warning_time = current_time
def _export_eval_result(self, eval_result, is_the_final_export):
"""Export `eval_result` according to exporters in `EvalSpec`."""
export_dir_base = os.path.join(
compat.as_str_any(self._estimator.model_dir),
compat.as_str_any('export'))
export_results = []
for exporter in self._eval_spec.exporters:
export_results.append(
exporter.export(
estimator=self._estimator,
export_path=os.path.join(
compat.as_str_any(export_dir_base),
compat.as_str_any(exporter.name)),
checkpoint_path=eval_result.checkpoint_path,
eval_result=eval_result.metrics,
is_the_final_export=is_the_final_export))
return export_results
class _EvalStatus(object):
"""The status of an evaluation event.
For local training and evaluation, the status can only be `EVALUATED` as
`Estimator.train` always generates a new checkpoint.
For distributed training and evaluation, a separated evaluator keeps looking
for new checkpoint. So, multiple situations might occur:
- EVALUATED: A new checkpoint is found since last evaluation.
`Estimator.evaluate` will be invoked.
- MISSING_CHECKPOINT: No checkpoint can be found. Typically, this means
the trainer has not yet produced any checkpoint.
- NO_NEW_CHECKPOINT: No new checkpoint can be found since last evaluation.
Typically, this means the trainer has not yet produced any new checkpoint.
"""
EVALUATED = 'evaluated'
MISSING_CHECKPOINT = 'missing checkpoint'
NO_NEW_CHECKPOINT = 'no new checkpoint'
class _EvalResult(
collections.namedtuple('EvalResult',
['status', 'metrics', 'checkpoint_path'])):
"""_EvalResult holds the result of an evaluation event."""
def __new__(cls, status, metrics=None, checkpoint_path=None):
"""Creates a validated `_EvalResult`.
Args:
status: See `_EvalStatus`.
metrics: The evaluation results returned by `Estimator.evaluate`. Only set
if status is `EVALUATED`.
checkpoint_path: The corresponding checkpoint path for the `metrics`. Only
set if status is `EVALUATED`.
Returns:
A validated `_EvalResult` object.
Raises:
ValueError: If validation fails.
TypeError: If any of the arguments is not the expected type.
"""
if status != _EvalStatus.EVALUATED:
if metrics:
raise ValueError(
'metrics must be `None` if status is not {}; got status {},'
' metrics {}'.format(_EvalStatus.EVALUATED, status, metrics))
if checkpoint_path:
raise ValueError(
'checkpoint must be `None` if status is not {}; got status {}, '
'checkpoint_path {}'.format(_EvalStatus.EVALUATED, status,
checkpoint_path))
return super(_EvalResult, cls).__new__(cls, status, metrics,
checkpoint_path)
# Now, evaluated case.
assert status == _EvalStatus.EVALUATED
# Validates metrics.
if not metrics:
raise ValueError(
'Internal error: `Estimator.evaluate` should never return empty '
'metrics.')
if not isinstance(metrics, dict):
raise TypeError(
'`Estimator.evaluate` should return dict. Given {}.'.format(
type(metrics)))
if ops.GraphKeys.GLOBAL_STEP not in metrics:
raise ValueError(
'Internal error: `Estimator.evaluate` result should have '
'`global_step` in result. Given {}'.format(metrics))
# Validates checkpoint_path.
if not checkpoint_path:
raise ValueError(
'Internal error: `checkpoint_path` should never be empty.')
return super(_EvalResult, cls).__new__(cls, status, metrics,
checkpoint_path)
class _ContinuousEvalListener(object):
"""Interface for listeners that take action before or after evaluation."""
def before_eval(self):
"""Called before evaluation.
Returns:
`False` if you want to skip the current evaluation and early stop the
continuous evaluation; `True` otherwise.
"""
return True
def after_eval(self, eval_result):
"""Called after the evaluation is executed.
Args:
eval_result: An `_EvalResult` instance.
Returns:
False if you want to early stop continuous evaluation; `True` otherwise.
"""
del eval_result
return True
def _assert_eval_spec(eval_spec):
"""Raise error if `eval_spec` is not of the right type."""
if not isinstance(eval_spec, EvalSpec):
raise TypeError('`eval_spec` must have type `tf.estimator.EvalSpec`. '
'Got: {}'.format(type(eval_spec)))
|
coursemdetw/2014cdb
|
refs/heads/master
|
wsgi/static/Brython2.1.3-20140704-213726/Lib/multiprocessing/util.py
|
696
|
#
# Module providing various facilities to other parts of the package
#
# multiprocessing/util.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
import sys
import functools
import os
import itertools
import weakref
import atexit
import threading # we want threading to install it's
# cleanup function before multiprocessing does
from subprocess import _args_from_interpreter_flags
from multiprocessing.process import current_process, active_children
__all__ = [
'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger',
'log_to_stderr', 'get_temp_dir', 'register_after_fork',
'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal',
'SUBDEBUG', 'SUBWARNING',
]
#
# Logging
#
NOTSET = 0
SUBDEBUG = 5
DEBUG = 10
INFO = 20
SUBWARNING = 25
LOGGER_NAME = 'multiprocessing'
DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s'
_logger = None
_log_to_stderr = False
def sub_debug(msg, *args):
if _logger:
_logger.log(SUBDEBUG, msg, *args)
def debug(msg, *args):
if _logger:
_logger.log(DEBUG, msg, *args)
def info(msg, *args):
if _logger:
_logger.log(INFO, msg, *args)
def sub_warning(msg, *args):
if _logger:
_logger.log(SUBWARNING, msg, *args)
def get_logger():
'''
Returns logger used by multiprocessing
'''
global _logger
import logging
logging._acquireLock()
try:
if not _logger:
_logger = logging.getLogger(LOGGER_NAME)
_logger.propagate = 0
logging.addLevelName(SUBDEBUG, 'SUBDEBUG')
logging.addLevelName(SUBWARNING, 'SUBWARNING')
# XXX multiprocessing should cleanup before logging
if hasattr(atexit, 'unregister'):
atexit.unregister(_exit_function)
atexit.register(_exit_function)
else:
atexit._exithandlers.remove((_exit_function, (), {}))
atexit._exithandlers.append((_exit_function, (), {}))
finally:
logging._releaseLock()
return _logger
def log_to_stderr(level=None):
'''
Turn on logging and add a handler which prints to stderr
'''
global _log_to_stderr
import logging
logger = get_logger()
formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
if level:
logger.setLevel(level)
_log_to_stderr = True
return _logger
#
# Function returning a temp directory which will be removed on exit
#
def get_temp_dir():
# get name of a temp directory which will be automatically cleaned up
if current_process()._tempdir is None:
import shutil, tempfile
tempdir = tempfile.mkdtemp(prefix='pymp-')
info('created temp directory %s', tempdir)
Finalize(None, shutil.rmtree, args=[tempdir], exitpriority=-100)
current_process()._tempdir = tempdir
return current_process()._tempdir
#
# Support for reinitialization of objects when bootstrapping a child process
#
_afterfork_registry = weakref.WeakValueDictionary()
_afterfork_counter = itertools.count()
def _run_after_forkers():
items = list(_afterfork_registry.items())
items.sort()
for (index, ident, func), obj in items:
try:
func(obj)
except Exception as e:
info('after forker raised exception %s', e)
def register_after_fork(obj, func):
_afterfork_registry[(next(_afterfork_counter), id(obj), func)] = obj
#
# Finalization using weakrefs
#
_finalizer_registry = {}
_finalizer_counter = itertools.count()
class Finalize(object):
'''
Class which supports object finalization using weakrefs
'''
def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None):
assert exitpriority is None or type(exitpriority) is int
if obj is not None:
self._weakref = weakref.ref(obj, self)
else:
assert exitpriority is not None
self._callback = callback
self._args = args
self._kwargs = kwargs or {}
self._key = (exitpriority, next(_finalizer_counter))
self._pid = os.getpid()
_finalizer_registry[self._key] = self
def __call__(self, wr=None,
# Need to bind these locally because the globals can have
# been cleared at shutdown
_finalizer_registry=_finalizer_registry,
sub_debug=sub_debug, getpid=os.getpid):
'''
Run the callback unless it has already been called or cancelled
'''
try:
del _finalizer_registry[self._key]
except KeyError:
sub_debug('finalizer no longer registered')
else:
if self._pid != getpid():
sub_debug('finalizer ignored because different process')
res = None
else:
sub_debug('finalizer calling %s with args %s and kwargs %s',
self._callback, self._args, self._kwargs)
res = self._callback(*self._args, **self._kwargs)
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
return res
def cancel(self):
'''
Cancel finalization of the object
'''
try:
del _finalizer_registry[self._key]
except KeyError:
pass
else:
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
def still_active(self):
'''
Return whether this finalizer is still waiting to invoke callback
'''
return self._key in _finalizer_registry
def __repr__(self):
try:
obj = self._weakref()
except (AttributeError, TypeError):
obj = None
if obj is None:
return '<Finalize object, dead>'
x = '<Finalize object, callback=%s' % \
getattr(self._callback, '__name__', self._callback)
if self._args:
x += ', args=' + str(self._args)
if self._kwargs:
x += ', kwargs=' + str(self._kwargs)
if self._key[0] is not None:
x += ', exitprority=' + str(self._key[0])
return x + '>'
def _run_finalizers(minpriority=None):
'''
Run all finalizers whose exit priority is not None and at least minpriority
Finalizers with highest priority are called first; finalizers with
the same priority will be called in reverse order of creation.
'''
if _finalizer_registry is None:
# This function may be called after this module's globals are
# destroyed. See the _exit_function function in this module for more
# notes.
return
if minpriority is None:
f = lambda p : p[0][0] is not None
else:
f = lambda p : p[0][0] is not None and p[0][0] >= minpriority
items = [x for x in list(_finalizer_registry.items()) if f(x)]
items.sort(reverse=True)
for key, finalizer in items:
sub_debug('calling %s', finalizer)
try:
finalizer()
except Exception:
import traceback
traceback.print_exc()
if minpriority is None:
_finalizer_registry.clear()
#
# Clean up on exit
#
def is_exiting():
'''
Returns true if the process is shutting down
'''
return _exiting or _exiting is None
_exiting = False
def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers,
active_children=active_children,
current_process=current_process):
# We hold on to references to functions in the arglist due to the
# situation described below, where this function is called after this
# module's globals are destroyed.
global _exiting
if not _exiting:
_exiting = True
info('process shutting down')
debug('running all "atexit" finalizers with priority >= 0')
_run_finalizers(0)
if current_process() is not None:
# We check if the current process is None here because if
# it's None, any call to ``active_children()`` will raise
# an AttributeError (active_children winds up trying to
# get attributes from util._current_process). One
# situation where this can happen is if someone has
# manipulated sys.modules, causing this module to be
# garbage collected. The destructor for the module type
# then replaces all values in the module dict with None.
# For instance, after setuptools runs a test it replaces
# sys.modules with a copy created earlier. See issues
# #9775 and #15881. Also related: #4106, #9205, and
# #9207.
for p in active_children():
if p._daemonic:
info('calling terminate() for daemon %s', p.name)
p._popen.terminate()
for p in active_children():
info('calling join() for process %s', p.name)
p.join()
debug('running the remaining "atexit" finalizers')
_run_finalizers()
atexit.register(_exit_function)
#
# Some fork aware types
#
class ForkAwareThreadLock(object):
def __init__(self):
self._reset()
register_after_fork(self, ForkAwareThreadLock._reset)
def _reset(self):
self._lock = threading.Lock()
self.acquire = self._lock.acquire
self.release = self._lock.release
class ForkAwareLocal(threading.local):
def __init__(self):
register_after_fork(self, lambda obj : obj.__dict__.clear())
def __reduce__(self):
return type(self), ()
|
dominikl/bioformats
|
refs/heads/develop
|
cpp/ext/gtest-1.7.0/test/gtest_xml_outfiles_test.py
|
2526
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module."""
__author__ = "keith.ray@gmail.com (Keith Ray)"
import os
from xml.dom import minidom, Node
import gtest_test_utils
import gtest_xml_test_utils
GTEST_OUTPUT_SUBDIR = "xml_outfiles"
GTEST_OUTPUT_1_TEST = "gtest_xml_outfile1_test_"
GTEST_OUTPUT_2_TEST = "gtest_xml_outfile2_test_"
EXPECTED_XML_1 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
<testsuite name="PropertyOne" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="TestSomeProperties" status="run" time="*" classname="PropertyOne" SetUpProp="1" TestSomeProperty="1" TearDownProp="1" />
</testsuite>
</testsuites>
"""
EXPECTED_XML_2 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
<testsuite name="PropertyTwo" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="TestSomeProperties" status="run" time="*" classname="PropertyTwo" SetUpProp="2" TestSomeProperty="2" TearDownProp="2" />
</testsuite>
</testsuites>
"""
class GTestXMLOutFilesTest(gtest_xml_test_utils.GTestXMLTestCase):
"""Unit test for Google Test's XML output functionality."""
def setUp(self):
# We want the trailing '/' that the last "" provides in os.path.join, for
# telling Google Test to create an output directory instead of a single file
# for xml output.
self.output_dir_ = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_OUTPUT_SUBDIR, "")
self.DeleteFilesAndDir()
def tearDown(self):
self.DeleteFilesAndDir()
def DeleteFilesAndDir(self):
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_1_TEST + ".xml"))
except os.error:
pass
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_2_TEST + ".xml"))
except os.error:
pass
try:
os.rmdir(self.output_dir_)
except os.error:
pass
def testOutfile1(self):
self._TestOutFile(GTEST_OUTPUT_1_TEST, EXPECTED_XML_1)
def testOutfile2(self):
self._TestOutFile(GTEST_OUTPUT_2_TEST, EXPECTED_XML_2)
def _TestOutFile(self, test_name, expected_xml):
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(test_name)
command = [gtest_prog_path, "--gtest_output=xml:%s" % self.output_dir_]
p = gtest_test_utils.Subprocess(command,
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
# TODO(wan@google.com): libtool causes the built test binary to be
# named lt-gtest_xml_outfiles_test_ instead of
# gtest_xml_outfiles_test_. To account for this possibillity, we
# allow both names in the following code. We should remove this
# hack when Chandler Carruth's libtool replacement tool is ready.
output_file_name1 = test_name + ".xml"
output_file1 = os.path.join(self.output_dir_, output_file_name1)
output_file_name2 = 'lt-' + output_file_name1
output_file2 = os.path.join(self.output_dir_, output_file_name2)
self.assert_(os.path.isfile(output_file1) or os.path.isfile(output_file2),
output_file1)
expected = minidom.parseString(expected_xml)
if os.path.isfile(output_file1):
actual = minidom.parse(output_file1)
else:
actual = minidom.parse(output_file2)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual.unlink()
if __name__ == "__main__":
os.environ["GTEST_STACK_TRACE_DEPTH"] = "0"
gtest_test_utils.Main()
|
openstack/poppy
|
refs/heads/master
|
tests/unit/provider/mock/test_certificates.py
|
2
|
# Copyright (c) 2016 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from poppy.provider.mock import certificates
from tests.unit import base
class TestCertificates(base.TestCase):
def setUp(self):
super(TestCertificates, self).setUp()
self.driver = mock.Mock()
self.driver.provider_name = 'Mock'
self.controller = certificates.CertificateController(self.driver)
def test_create_certificate(self):
self.assertIsNone(self.controller.create_certificate({}))
|
litebitcoins/litebitcoin
|
refs/heads/master
|
test/functional/bip68-112-113-p2p.py
|
16
|
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test activation of the first version bits soft fork.
This soft fork will activate the following BIPS:
BIP 68 - nSequence relative lock times
BIP 112 - CHECKSEQUENCEVERIFY
BIP 113 - MedianTimePast semantics for nLockTime
regtest lock-in with 108/144 block signalling
activation after a further 144 blocks
mine 82 blocks whose coinbases will be used to generate inputs for our tests
mine 61 blocks to transition from DEFINED to STARTED
mine 144 blocks only 100 of which are signaling readiness in order to fail to change state this period
mine 144 blocks with 108 signaling and verify STARTED->LOCKED_IN
mine 140 blocks and seed block chain with the 82 inputs will use for our tests at height 572
mine 3 blocks and verify still at LOCKED_IN and test that enforcement has not triggered
mine 1 block and test that enforcement has triggered (which triggers ACTIVE)
Test BIP 113 is enforced
Mine 4 blocks so next height is 580 and test BIP 68 is enforced for time and height
Mine 1 block so next height is 581 and test BIP 68 now passes time but not height
Mine 1 block so next height is 582 and test BIP 68 now passes time and height
Test that BIP 112 is enforced
Various transactions will be used to test that the BIPs rules are not enforced before the soft fork activates
And that after the soft fork activates transactions pass and fail as they should according to the rules.
For each BIP, transactions of versions 1 and 2 will be tested.
----------------
BIP 113:
bip113tx - modify the nLocktime variable
BIP 68:
bip68txs - 16 txs with nSequence relative locktime of 10 with various bits set as per the relative_locktimes below
BIP 112:
bip112txs_vary_nSequence - 16 txs with nSequence relative_locktimes of 10 evaluated against 10 OP_CSV OP_DROP
bip112txs_vary_nSequence_9 - 16 txs with nSequence relative_locktimes of 9 evaluated against 10 OP_CSV OP_DROP
bip112txs_vary_OP_CSV - 16 txs with nSequence = 10 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP
bip112txs_vary_OP_CSV_9 - 16 txs with nSequence = 9 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP
bip112tx_special - test negative argument to OP_CSV
"""
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.mininode import ToHex, CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block
from test_framework.comptool import TestInstance, TestManager
from test_framework.script import *
from io import BytesIO
import time
base_relative_locktime = 10
seq_disable_flag = 1<<31
seq_random_high_bit = 1<<25
seq_type_flag = 1<<22
seq_random_low_bit = 1<<18
# b31,b25,b22,b18 represent the 31st, 25th, 22nd and 18th bits respectively in the nSequence field
# relative_locktimes[b31][b25][b22][b18] is a base_relative_locktime with the indicated bits set if their indices are 1
relative_locktimes = []
for b31 in range(2):
b25times = []
for b25 in range(2):
b22times = []
for b22 in range(2):
b18times = []
for b18 in range(2):
rlt = base_relative_locktime
if (b31):
rlt = rlt | seq_disable_flag
if (b25):
rlt = rlt | seq_random_high_bit
if (b22):
rlt = rlt | seq_type_flag
if (b18):
rlt = rlt | seq_random_low_bit
b18times.append(rlt)
b22times.append(b18times)
b25times.append(b22times)
relative_locktimes.append(b25times)
def all_rlt_txs(txarray):
txs = []
for b31 in range(2):
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
txs.append(txarray[b31][b25][b22][b18])
return txs
class BIP68_112_113Test(ComparisonTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [['-whitelist=127.0.0.1', '-blockversion=4']]
def run_test(self):
test = TestManager(self, self.options.tmpdir)
test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
test.run()
def send_generic_input_tx(self, node, coinbases):
amount = Decimal("49.99")
return node.sendrawtransaction(ToHex(self.sign_transaction(node, self.create_transaction(node, node.getblock(coinbases.pop())['tx'][0], self.nodeaddress, amount))))
def create_transaction(self, node, txid, to_address, amount):
inputs = [{ "txid" : txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(rawtx))
tx.deserialize(f)
return tx
def sign_transaction(self, node, unsignedtx):
rawtx = ToHex(unsignedtx)
signresult = node.signrawtransaction(rawtx)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(signresult['hex']))
tx.deserialize(f)
return tx
def generate_blocks(self, number, version, test_blocks = []):
for i in range(number):
block = self.create_test_block([], version)
test_blocks.append([block, True])
self.last_block_time += 600
self.tip = block.sha256
self.tipheight += 1
return test_blocks
def create_test_block(self, txs, version = 536870912):
block = create_block(self.tip, create_coinbase(self.tipheight + 1), self.last_block_time + 600)
block.nVersion = version
block.vtx.extend(txs)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
return block
def create_bip68txs(self, bip68inputs, txversion, locktime_delta = 0):
txs = []
assert(len(bip68inputs) >= 16)
i = 0
for b31 in range(2):
b25txs = []
for b25 in range(2):
b22txs = []
for b22 in range(2):
b18txs = []
for b18 in range(2):
tx = self.create_transaction(self.nodes[0], bip68inputs[i], self.nodeaddress, Decimal("49.98"))
i += 1
tx.nVersion = txversion
tx.vin[0].nSequence = relative_locktimes[b31][b25][b22][b18] + locktime_delta
b18txs.append(self.sign_transaction(self.nodes[0], tx))
b22txs.append(b18txs)
b25txs.append(b22txs)
txs.append(b25txs)
return txs
def create_bip112special(self, input, txversion):
tx = self.create_transaction(self.nodes[0], input, self.nodeaddress, Decimal("49.98"))
tx.nVersion = txversion
signtx = self.sign_transaction(self.nodes[0], tx)
signtx.vin[0].scriptSig = CScript([-1, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
return signtx
def create_bip112txs(self, bip112inputs, varyOP_CSV, txversion, locktime_delta = 0):
txs = []
assert(len(bip112inputs) >= 16)
i = 0
for b31 in range(2):
b25txs = []
for b25 in range(2):
b22txs = []
for b22 in range(2):
b18txs = []
for b18 in range(2):
tx = self.create_transaction(self.nodes[0], bip112inputs[i], self.nodeaddress, Decimal("49.98"))
i += 1
if (varyOP_CSV): # if varying OP_CSV, nSequence is fixed
tx.vin[0].nSequence = base_relative_locktime + locktime_delta
else: # vary nSequence instead, OP_CSV is fixed
tx.vin[0].nSequence = relative_locktimes[b31][b25][b22][b18] + locktime_delta
tx.nVersion = txversion
signtx = self.sign_transaction(self.nodes[0], tx)
if (varyOP_CSV):
signtx.vin[0].scriptSig = CScript([relative_locktimes[b31][b25][b22][b18], OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
else:
signtx.vin[0].scriptSig = CScript([base_relative_locktime, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
b18txs.append(signtx)
b22txs.append(b18txs)
b25txs.append(b22txs)
txs.append(b25txs)
return txs
def get_tests(self):
long_past_time = int(time.time()) - 600 * 1000 # enough to build up to 1000 blocks 10 minutes apart without worrying about getting into the future
self.nodes[0].setmocktime(long_past_time - 100) # enough so that the generated blocks will still all be before long_past_time
self.coinbase_blocks = self.nodes[0].generate(1 + 16 + 2*32 + 1) # 82 blocks generated for inputs
self.nodes[0].setmocktime(0) # set time back to present so yielded blocks aren't in the future as we advance last_block_time
self.tipheight = 82 # height of the next block to build
self.last_block_time = long_past_time
self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
self.nodeaddress = self.nodes[0].getnewaddress()
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'defined')
test_blocks = self.generate_blocks(61, 4)
yield TestInstance(test_blocks, sync_every_block=False) # 1
# Advanced from DEFINED to STARTED, height = 143
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'started')
# Fail to achieve LOCKED_IN 100 out of 144 signal bit 0
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(50, 536870913) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(24, 536936448, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False) # 2
# Failed to advance past STARTED, height = 287
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'started')
# 108 out of 144 signal bit 0 to achieve lock-in
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(58, 536870913) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(10, 536936448, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False) # 3
# Advanced from STARTED to LOCKED_IN, height = 431
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'locked_in')
# 140 more version 4 blocks
test_blocks = self.generate_blocks(140, 4)
yield TestInstance(test_blocks, sync_every_block=False) # 4
### Inputs at height = 572
# Put inputs for all tests in the chain at height 572 (tip now = 571) (time increases by 600s per block)
# Note we reuse inputs for v1 and v2 txs so must test these separately
# 16 normal inputs
bip68inputs = []
for i in range(16):
bip68inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks))
# 2 sets of 16 inputs with 10 OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112basicinputs = []
for j in range(2):
inputs = []
for i in range(16):
inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks))
bip112basicinputs.append(inputs)
# 2 sets of 16 varied inputs with (relative_lock_time) OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112diverseinputs = []
for j in range(2):
inputs = []
for i in range(16):
inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks))
bip112diverseinputs.append(inputs)
# 1 special input with -1 OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112specialinput = self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks)
# 1 normal input
bip113input = self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks)
self.nodes[0].setmocktime(self.last_block_time + 600)
inputblockhash = self.nodes[0].generate(1)[0] # 1 block generated for inputs to be in chain at height 572
self.nodes[0].setmocktime(0)
self.tip = int("0x" + inputblockhash, 0)
self.tipheight += 1
self.last_block_time += 600
assert_equal(len(self.nodes[0].getblock(inputblockhash,True)["tx"]), 82+1)
# 2 more version 4 blocks
test_blocks = self.generate_blocks(2, 4)
yield TestInstance(test_blocks, sync_every_block=False) # 5
# Not yet advanced to ACTIVE, height = 574 (will activate for block 576, not 575)
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'locked_in')
# Test both version 1 and version 2 transactions for all tests
# BIP113 test transaction will be modified before each use to put in appropriate block time
bip113tx_v1 = self.create_transaction(self.nodes[0], bip113input, self.nodeaddress, Decimal("49.98"))
bip113tx_v1.vin[0].nSequence = 0xFFFFFFFE
bip113tx_v1.nVersion = 1
bip113tx_v2 = self.create_transaction(self.nodes[0], bip113input, self.nodeaddress, Decimal("49.98"))
bip113tx_v2.vin[0].nSequence = 0xFFFFFFFE
bip113tx_v2.nVersion = 2
# For BIP68 test all 16 relative sequence locktimes
bip68txs_v1 = self.create_bip68txs(bip68inputs, 1)
bip68txs_v2 = self.create_bip68txs(bip68inputs, 2)
# For BIP112 test:
# 16 relative sequence locktimes of 10 against 10 OP_CSV OP_DROP inputs
bip112txs_vary_nSequence_v1 = self.create_bip112txs(bip112basicinputs[0], False, 1)
bip112txs_vary_nSequence_v2 = self.create_bip112txs(bip112basicinputs[0], False, 2)
# 16 relative sequence locktimes of 9 against 10 OP_CSV OP_DROP inputs
bip112txs_vary_nSequence_9_v1 = self.create_bip112txs(bip112basicinputs[1], False, 1, -1)
bip112txs_vary_nSequence_9_v2 = self.create_bip112txs(bip112basicinputs[1], False, 2, -1)
# sequence lock time of 10 against 16 (relative_lock_time) OP_CSV OP_DROP inputs
bip112txs_vary_OP_CSV_v1 = self.create_bip112txs(bip112diverseinputs[0], True, 1)
bip112txs_vary_OP_CSV_v2 = self.create_bip112txs(bip112diverseinputs[0], True, 2)
# sequence lock time of 9 against 16 (relative_lock_time) OP_CSV OP_DROP inputs
bip112txs_vary_OP_CSV_9_v1 = self.create_bip112txs(bip112diverseinputs[1], True, 1, -1)
bip112txs_vary_OP_CSV_9_v2 = self.create_bip112txs(bip112diverseinputs[1], True, 2, -1)
# -1 OP_CSV OP_DROP input
bip112tx_special_v1 = self.create_bip112special(bip112specialinput, 1)
bip112tx_special_v2 = self.create_bip112special(bip112specialinput, 2)
### TESTING ###
##################################
### Before Soft Forks Activate ###
##################################
# All txs should pass
### Version 1 txs ###
success_txs = []
# add BIP113 tx and -1 CSV tx
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1)
success_txs.append(bip113signed1)
success_txs.append(bip112tx_special_v1)
# add BIP 68 txs
success_txs.extend(all_rlt_txs(bip68txs_v1))
# add BIP 112 with seq=10 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v1))
# try BIP 112 with seq=9 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v1))
yield TestInstance([[self.create_test_block(success_txs), True]]) # 6
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
### Version 2 txs ###
success_txs = []
# add BIP113 tx and -1 CSV tx
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2)
success_txs.append(bip113signed2)
success_txs.append(bip112tx_special_v2)
# add BIP 68 txs
success_txs.extend(all_rlt_txs(bip68txs_v2))
# add BIP 112 with seq=10 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v2))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v2))
# try BIP 112 with seq=9 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v2))
yield TestInstance([[self.create_test_block(success_txs), True]]) # 7
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# 1 more version 4 block to get us to height 575 so the fork should now be active for the next block
test_blocks = self.generate_blocks(1, 4)
yield TestInstance(test_blocks, sync_every_block=False) # 8
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'active')
#################################
### After Soft Forks Activate ###
#################################
### BIP 113 ###
# BIP 113 tests should now fail regardless of version number if nLockTime isn't satisfied by new rules
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1)
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2)
for bip113tx in [bip113signed1, bip113signed2]:
yield TestInstance([[self.create_test_block([bip113tx]), False]]) # 9,10
# BIP 113 tests should now pass if the locktime is < MTP
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block
bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1)
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block
bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2)
for bip113tx in [bip113signed1, bip113signed2]:
yield TestInstance([[self.create_test_block([bip113tx]), True]]) # 11,12
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Next block height = 580 after 4 blocks of random version
test_blocks = self.generate_blocks(4, 1234)
yield TestInstance(test_blocks, sync_every_block=False) # 13
### BIP 68 ###
### Version 1 txs ###
# All still pass
success_txs = []
success_txs.extend(all_rlt_txs(bip68txs_v1))
yield TestInstance([[self.create_test_block(success_txs), True]]) # 14
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
### Version 2 txs ###
bip68success_txs = []
# All txs with SEQUENCE_LOCKTIME_DISABLE_FLAG set pass
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
bip68success_txs.append(bip68txs_v2[1][b25][b22][b18])
yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 15
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# All txs without flag fail as we are at delta height = 8 < 10 and delta time = 8 * 600 < 10 * 512
bip68timetxs = []
for b25 in range(2):
for b18 in range(2):
bip68timetxs.append(bip68txs_v2[0][b25][1][b18])
for tx in bip68timetxs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 16 - 19
bip68heighttxs = []
for b25 in range(2):
for b18 in range(2):
bip68heighttxs.append(bip68txs_v2[0][b25][0][b18])
for tx in bip68heighttxs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 20 - 23
# Advance one block to 581
test_blocks = self.generate_blocks(1, 1234)
yield TestInstance(test_blocks, sync_every_block=False) # 24
# Height txs should fail and time txs should now pass 9 * 600 > 10 * 512
bip68success_txs.extend(bip68timetxs)
yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 25
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
for tx in bip68heighttxs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 26 - 29
# Advance one block to 582
test_blocks = self.generate_blocks(1, 1234)
yield TestInstance(test_blocks, sync_every_block=False) # 30
# All BIP 68 txs should pass
bip68success_txs.extend(bip68heighttxs)
yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 31
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
### BIP 112 ###
### Version 1 txs ###
# -1 OP_CSV tx should fail
yield TestInstance([[self.create_test_block([bip112tx_special_v1]), False]]) #32
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 1 txs should still pass
success_txs = []
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
success_txs.append(bip112txs_vary_OP_CSV_v1[1][b25][b22][b18])
success_txs.append(bip112txs_vary_OP_CSV_9_v1[1][b25][b22][b18])
yield TestInstance([[self.create_test_block(success_txs), True]]) # 33
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV, version 1 txs should now fail
fail_txs = []
fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1))
fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1))
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
fail_txs.append(bip112txs_vary_OP_CSV_v1[0][b25][b22][b18])
fail_txs.append(bip112txs_vary_OP_CSV_9_v1[0][b25][b22][b18])
for tx in fail_txs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 34 - 81
### Version 2 txs ###
# -1 OP_CSV tx should fail
yield TestInstance([[self.create_test_block([bip112tx_special_v2]), False]]) #82
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 2 txs should pass (all sequence locks are met)
success_txs = []
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
success_txs.append(bip112txs_vary_OP_CSV_v2[1][b25][b22][b18]) # 8/16 of vary_OP_CSV
success_txs.append(bip112txs_vary_OP_CSV_9_v2[1][b25][b22][b18]) # 8/16 of vary_OP_CSV_9
yield TestInstance([[self.create_test_block(success_txs), True]]) # 83
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
## SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV for all remaining txs ##
# All txs with nSequence 9 should fail either due to earlier mismatch or failing the CSV check
fail_txs = []
fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2)) # 16/16 of vary_nSequence_9
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
fail_txs.append(bip112txs_vary_OP_CSV_9_v2[0][b25][b22][b18]) # 16/16 of vary_OP_CSV_9
for tx in fail_txs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 84 - 107
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in nSequence, tx should fail
fail_txs = []
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
fail_txs.append(bip112txs_vary_nSequence_v2[1][b25][b22][b18]) # 8/16 of vary_nSequence
for tx in fail_txs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 108-115
# If sequencelock types mismatch, tx should fail
fail_txs = []
for b25 in range(2):
for b18 in range(2):
fail_txs.append(bip112txs_vary_nSequence_v2[0][b25][1][b18]) # 12/16 of vary_nSequence
fail_txs.append(bip112txs_vary_OP_CSV_v2[0][b25][1][b18]) # 12/16 of vary_OP_CSV
for tx in fail_txs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 116-123
# Remaining txs should pass, just test masking works properly
success_txs = []
for b25 in range(2):
for b18 in range(2):
success_txs.append(bip112txs_vary_nSequence_v2[0][b25][0][b18]) # 16/16 of vary_nSequence
success_txs.append(bip112txs_vary_OP_CSV_v2[0][b25][0][b18]) # 16/16 of vary_OP_CSV
yield TestInstance([[self.create_test_block(success_txs), True]]) # 124
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Additional test, of checking that comparison of two time types works properly
time_txs = []
for b25 in range(2):
for b18 in range(2):
tx = bip112txs_vary_OP_CSV_v2[0][b25][1][b18]
tx.vin[0].nSequence = base_relative_locktime | seq_type_flag
signtx = self.sign_transaction(self.nodes[0], tx)
time_txs.append(signtx)
yield TestInstance([[self.create_test_block(time_txs), True]]) # 125
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
### Missing aspects of test
## Testing empty stack fails
if __name__ == '__main__':
BIP68_112_113Test().main()
|
Azure/azure-sdk-for-python
|
refs/heads/sync-eng/common-js-nightly-docs-2-1768-ForTestPipeline
|
sdk/communication/azure-communication-sms/azure/communication/sms/_generated/operations/__init__.py
|
2
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._sms_operations import SmsOperations
__all__ = [
'SmsOperations',
]
|
6112/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/fetch/nosniff/resources/worker.py
|
219
|
def main(request, response):
type = request.GET.first("type", None)
content = "// nothing to see here"
content += "\n"
content += "this.postMessage('hi')"
response.add_required_headers = False
response.writer.write_status(200)
response.writer.write_header("x-content-type-options", "nosniff")
response.writer.write_header("content-length", len(content))
if(type != None):
response.writer.write_header("content-type", type)
response.writer.end_headers()
response.writer.write(content)
|
Precis/Diamond
|
refs/heads/master
|
src/collectors/resqueweb/test/testresqueweb.py
|
31
|
#!/usr/bin/python
# coding=utf-8
##########################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from resqueweb import ResqueWebCollector
##########################################################################
class TestResqueWebCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('ResqueWebCollector', {
'interval': 10
})
self.collector = ResqueWebCollector(config, None)
def test_import(self):
self.assertTrue(ResqueWebCollector)
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
patch_urlopen = patch('urllib2.urlopen', Mock(
return_value=self.getFixture('stats.txt')))
patch_urlopen.start()
self.collector.collect()
patch_urlopen.stop()
metrics = {
'pending.current': 2,
'processed.total': 11686516,
'failed.total': 38667,
'workers.current': 9,
'working.current': 2,
'queue.low.current': 4,
'queue.mail.current': 3,
'queue.realtime.current': 9,
'queue.normal.current': 1,
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
@patch.object(Collector, 'publish')
def test_should_fail_gracefully(self, publish_mock):
patch_urlopen = patch('urllib2.urlopen', Mock(
return_value=self.getFixture('stats_blank.txt')))
patch_urlopen.start()
self.collector.collect()
patch_urlopen.stop()
self.assertPublishedMany(publish_mock, {})
##########################################################################
if __name__ == "__main__":
unittest.main()
|
himanshu-dixit/oppia
|
refs/heads/develop
|
core/storage/job/gae_models.py
|
4
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models for long-running jobs."""
import random
from core.platform import models
import utils
from google.appengine.ext import ndb
(base_models,) = models.Registry.import_models([models.NAMES.base_model])
# These are the possible status codes for a job.
STATUS_CODE_NEW = 'new'
STATUS_CODE_QUEUED = 'queued'
STATUS_CODE_STARTED = 'started'
STATUS_CODE_COMPLETED = 'completed'
STATUS_CODE_FAILED = 'failed'
STATUS_CODE_CANCELED = 'canceled'
class JobModel(base_models.BaseModel):
"""Class representing a datastore entity for a long-running job."""
@classmethod
def get_new_id(cls, entity_name):
"""Overwrites superclass method.
Args:
entity_name: str. The name of the entity to create a new job id for.
Returns:
str. A job id.
"""
job_type = entity_name
current_time_str = str(int(utils.get_current_time_in_millisecs()))
random_int = random.randint(0, 1000)
return '%s-%s-%s' % (job_type, current_time_str, random_int)
# The job type.
job_type = ndb.StringProperty(indexed=True)
# The time at which the job was queued, in milliseconds since the epoch.
time_queued_msec = ndb.FloatProperty(indexed=True)
# The time at which the job was started, in milliseconds since the epoch.
# This is never set if the job was canceled before it was started.
time_started_msec = ndb.FloatProperty(indexed=True)
# The time at which the job was completed, failed or canceled, in
# milliseconds since the epoch.
time_finished_msec = ndb.FloatProperty(indexed=True)
# The current status code for the job.
status_code = ndb.StringProperty(
indexed=True,
default=STATUS_CODE_NEW,
choices=[
STATUS_CODE_NEW, STATUS_CODE_QUEUED, STATUS_CODE_STARTED,
STATUS_CODE_COMPLETED, STATUS_CODE_FAILED, STATUS_CODE_CANCELED
])
# Any metadata for the job, such as the root pipeline id for mapreduce
# jobs.
metadata = ndb.JsonProperty(indexed=False)
# The output of the job. This is only populated if the job has status code
# STATUS_CODE_COMPLETED, and is None otherwise. If populated, this is
# expected to be a list of strings.
output = ndb.JsonProperty(indexed=False)
# The error message, if applicable. Only populated if the job has status
# code STATUS_CODE_FAILED or STATUS_CODE_CANCELED; None otherwise.
error = ndb.TextProperty(indexed=False)
# Whether the datastore models associated with this job have been cleaned
# up (i.e., deleted).
has_been_cleaned_up = ndb.BooleanProperty(default=False, indexed=True)
# Store additional params passed with job.
additional_job_params = ndb.JsonProperty(default=None)
@property
def is_cancelable(self):
# Whether the job is currently in 'queued' or 'started' status.
return self.status_code in [STATUS_CODE_QUEUED, STATUS_CODE_STARTED]
@classmethod
def get_recent_jobs(cls, limit, recency_msec):
"""Gets at most limit jobs with respect to a time after recency_msec.
Args:
limit: int. A limit on the number of jobs to return.
recency_msec: int. The number of milliseconds earlier
than the current time.
Returns:
list(JobModel) or None. A list of at most `limit` jobs
that come after recency_msec time.
"""
earliest_time_msec = (
utils.get_current_time_in_millisecs() - recency_msec)
return cls.query().filter(
cls.time_queued_msec > earliest_time_msec
).order(-cls.time_queued_msec).fetch(limit)
@classmethod
def get_all_unfinished_jobs(cls, limit):
"""Gets at most `limit` unfinished jobs.
Args:
limit: int. A limit on the number of jobs to return.
Returns:
list(JobModel) or None. A list of at most `limit` number
of unfinished jobs.
"""
return cls.query().filter(
JobModel.status_code.IN([STATUS_CODE_QUEUED, STATUS_CODE_STARTED])
).order(-cls.time_queued_msec).fetch(limit)
@classmethod
def get_unfinished_jobs(cls, job_type):
"""Gets jobs that are unfinished.
Args:
job_type: str. The type of jobs that may be unfinished.
Returns:
list(JobModel) or None. A list of all jobs that belong
to the given job_type.
"""
return cls.query().filter(cls.job_type == job_type).filter(
JobModel.status_code.IN([STATUS_CODE_QUEUED, STATUS_CODE_STARTED]))
@classmethod
def do_unfinished_jobs_exist(cls, job_type):
"""Checks if unfinished jobs exist.
Returns:
bool. True if unfinished jobs exist, otherwise false.
"""
return bool(cls.get_unfinished_jobs(job_type).count(limit=1))
# Allowed transitions: idle --> running --> stopping --> idle.
CONTINUOUS_COMPUTATION_STATUS_CODE_IDLE = 'idle'
CONTINUOUS_COMPUTATION_STATUS_CODE_RUNNING = 'running'
CONTINUOUS_COMPUTATION_STATUS_CODE_STOPPING = 'stopping'
class ContinuousComputationModel(base_models.BaseModel):
"""Class representing a continuous computation.
The id of each instance of this model is the name of the continuous
computation manager class.
"""
# The current status code for the computation.
status_code = ndb.StringProperty(
indexed=True,
default=CONTINUOUS_COMPUTATION_STATUS_CODE_IDLE,
choices=[
CONTINUOUS_COMPUTATION_STATUS_CODE_IDLE,
CONTINUOUS_COMPUTATION_STATUS_CODE_RUNNING,
CONTINUOUS_COMPUTATION_STATUS_CODE_STOPPING
])
# The realtime layer that is currently 'active' (i.e., the one that is
# going to be cleared immediately after the current batch job run
# completes).
active_realtime_layer_index = ndb.IntegerProperty(
default=0,
choices=[0, 1])
# The time at which a batch job for this computation was last kicked off,
# in milliseconds since the epoch.
last_started_msec = ndb.FloatProperty(indexed=True)
# The time at which a batch job for this computation was last completed or
# failed, in milliseconds since the epoch.
last_finished_msec = ndb.FloatProperty(indexed=True)
# The time at which a halt signal was last sent to this batch job, in
# milliseconds since the epoch.
last_stopped_msec = ndb.FloatProperty(indexed=True)
|
jelugbo/hebs_master
|
refs/heads/master
|
common/lib/xmodule/xmodule/modulestore/tests/test_split_modulestore_bulk_operations.py
|
6
|
import copy
import ddt
import unittest
from bson.objectid import ObjectId
from mock import MagicMock, Mock, call
from xmodule.modulestore.split_mongo.split import SplitBulkWriteMixin
from xmodule.modulestore.split_mongo.mongo_connection import MongoConnection
from opaque_keys.edx.locator import CourseLocator
class TestBulkWriteMixin(unittest.TestCase):
def setUp(self):
super(TestBulkWriteMixin, self).setUp()
self.bulk = SplitBulkWriteMixin()
self.bulk.SCHEMA_VERSION = 1
self.clear_cache = self.bulk._clear_cache = Mock(name='_clear_cache')
self.conn = self.bulk.db_connection = MagicMock(name='db_connection', spec=MongoConnection)
self.conn.get_course_index.return_value = {'initial': 'index'}
self.course_key = CourseLocator('org', 'course', 'run-a', branch='test')
self.course_key_b = CourseLocator('org', 'course', 'run-b', branch='test')
self.structure = {'this': 'is', 'a': 'structure', '_id': ObjectId()}
self.definition = {'this': 'is', 'a': 'definition', '_id': ObjectId()}
self.index_entry = {'this': 'is', 'an': 'index'}
def assertConnCalls(self, *calls):
self.assertEqual(list(calls), self.conn.mock_calls)
def assertCacheNotCleared(self):
self.assertFalse(self.clear_cache.called)
class TestBulkWriteMixinPreviousTransaction(TestBulkWriteMixin):
"""
Verify that opening and closing a transaction doesn't affect later behaviour.
"""
def setUp(self):
super(TestBulkWriteMixinPreviousTransaction, self).setUp()
self.bulk._begin_bulk_operation(self.course_key)
self.bulk.insert_course_index(self.course_key, MagicMock('prev-index-entry'))
self.bulk.update_structure(self.course_key, {'this': 'is', 'the': 'previous structure', '_id': ObjectId()})
self.bulk._end_bulk_operation(self.course_key)
self.conn.reset_mock()
self.clear_cache.reset_mock()
@ddt.ddt
class TestBulkWriteMixinClosed(TestBulkWriteMixin):
"""
Tests of the bulk write mixin when bulk operations aren't active.
"""
@ddt.data('deadbeef1234' * 2, u'deadbeef1234' * 2, ObjectId())
def test_no_bulk_read_structure(self, version_guid):
# Reading a structure when no bulk operation is active should just call
# through to the db_connection
result = self.bulk.get_structure(self.course_key, version_guid)
self.assertConnCalls(call.get_structure(self.course_key.as_object_id(version_guid)))
self.assertEqual(result, self.conn.get_structure.return_value)
self.assertCacheNotCleared()
def test_no_bulk_write_structure(self):
# Writing a structure when no bulk operation is active should just
# call through to the db_connection. It should also clear the
# system cache
self.bulk.update_structure(self.course_key, self.structure)
self.assertConnCalls(call.insert_structure(self.structure))
self.clear_cache.assert_called_once_with(self.structure['_id'])
@ddt.data('deadbeef1234' * 2, u'deadbeef1234' * 2, ObjectId())
def test_no_bulk_read_definition(self, version_guid):
# Reading a definition when no bulk operation is active should just call
# through to the db_connection
result = self.bulk.get_definition(self.course_key, version_guid)
self.assertConnCalls(call.get_definition(self.course_key.as_object_id(version_guid)))
self.assertEqual(result, self.conn.get_definition.return_value)
def test_no_bulk_write_definition(self):
# Writing a definition when no bulk operation is active should just
# call through to the db_connection.
self.bulk.update_definition(self.course_key, self.definition)
self.assertConnCalls(call.insert_definition(self.definition))
@ddt.data(True, False)
def test_no_bulk_read_index(self, ignore_case):
# Reading a course index when no bulk operation is active should just call
# through to the db_connection
result = self.bulk.get_course_index(self.course_key, ignore_case=ignore_case)
self.assertConnCalls(call.get_course_index(self.course_key, ignore_case))
self.assertEqual(result, self.conn.get_course_index.return_value)
self.assertCacheNotCleared()
def test_no_bulk_write_index(self):
# Writing a course index when no bulk operation is active should just call
# through to the db_connection
self.bulk.insert_course_index(self.course_key, self.index_entry)
self.assertConnCalls(call.insert_course_index(self.index_entry))
self.assertCacheNotCleared()
def test_out_of_order_end(self):
# Calling _end_bulk_operation without a corresponding _begin...
# is a noop
self.bulk._end_bulk_operation(self.course_key)
def test_write_new_index_on_close(self):
self.conn.get_course_index.return_value = None
self.bulk._begin_bulk_operation(self.course_key)
self.conn.reset_mock()
self.bulk.insert_course_index(self.course_key, self.index_entry)
self.assertConnCalls()
self.bulk._end_bulk_operation(self.course_key)
self.conn.insert_course_index.assert_called_once_with(self.index_entry)
def test_write_updated_index_on_close(self):
old_index = {'this': 'is', 'an': 'old index'}
self.conn.get_course_index.return_value = old_index
self.bulk._begin_bulk_operation(self.course_key)
self.conn.reset_mock()
self.bulk.insert_course_index(self.course_key, self.index_entry)
self.assertConnCalls()
self.bulk._end_bulk_operation(self.course_key)
self.conn.update_course_index.assert_called_once_with(self.index_entry, from_index=old_index)
def test_write_structure_on_close(self):
self.conn.get_course_index.return_value = None
self.bulk._begin_bulk_operation(self.course_key)
self.conn.reset_mock()
self.bulk.update_structure(self.course_key, self.structure)
self.assertConnCalls()
self.bulk._end_bulk_operation(self.course_key)
self.assertConnCalls(call.insert_structure(self.structure))
def test_write_multiple_structures_on_close(self):
self.conn.get_course_index.return_value = None
self.bulk._begin_bulk_operation(self.course_key)
self.conn.reset_mock()
self.bulk.update_structure(self.course_key.replace(branch='a'), self.structure)
other_structure = {'another': 'structure', '_id': ObjectId()}
self.bulk.update_structure(self.course_key.replace(branch='b'), other_structure)
self.assertConnCalls()
self.bulk._end_bulk_operation(self.course_key)
self.assertItemsEqual(
[call.insert_structure(self.structure), call.insert_structure(other_structure)],
self.conn.mock_calls
)
def test_write_index_and_definition_on_close(self):
original_index = {'versions': {}}
self.conn.get_course_index.return_value = copy.deepcopy(original_index)
self.bulk._begin_bulk_operation(self.course_key)
self.conn.reset_mock()
self.bulk.update_definition(self.course_key, self.definition)
self.bulk.insert_course_index(self.course_key, {'versions': {self.course_key.branch: self.definition['_id']}})
self.assertConnCalls()
self.bulk._end_bulk_operation(self.course_key)
self.assertConnCalls(
call.insert_definition(self.definition),
call.update_course_index(
{'versions': {self.course_key.branch: self.definition['_id']}},
from_index=original_index
)
)
def test_write_index_and_multiple_definitions_on_close(self):
original_index = {'versions': {'a': ObjectId(), 'b': ObjectId()}}
self.conn.get_course_index.return_value = copy.deepcopy(original_index)
self.bulk._begin_bulk_operation(self.course_key)
self.conn.reset_mock()
self.bulk.update_definition(self.course_key.replace(branch='a'), self.definition)
other_definition = {'another': 'definition', '_id': ObjectId()}
self.bulk.update_definition(self.course_key.replace(branch='b'), other_definition)
self.bulk.insert_course_index(self.course_key, {'versions': {'a': self.definition['_id'], 'b': other_definition['_id']}})
self.bulk._end_bulk_operation(self.course_key)
self.assertItemsEqual(
[
call.insert_definition(self.definition),
call.insert_definition(other_definition),
call.update_course_index(
{'versions': {'a': self.definition['_id'], 'b': other_definition['_id']}},
from_index=original_index
)
],
self.conn.mock_calls
)
def test_write_definition_on_close(self):
self.conn.get_course_index.return_value = None
self.bulk._begin_bulk_operation(self.course_key)
self.conn.reset_mock()
self.bulk.update_definition(self.course_key, self.definition)
self.assertConnCalls()
self.bulk._end_bulk_operation(self.course_key)
self.assertConnCalls(call.insert_definition(self.definition))
def test_write_multiple_definitions_on_close(self):
self.conn.get_course_index.return_value = None
self.bulk._begin_bulk_operation(self.course_key)
self.conn.reset_mock()
self.bulk.update_definition(self.course_key.replace(branch='a'), self.definition)
other_definition = {'another': 'definition', '_id': ObjectId()}
self.bulk.update_definition(self.course_key.replace(branch='b'), other_definition)
self.assertConnCalls()
self.bulk._end_bulk_operation(self.course_key)
self.assertItemsEqual(
[call.insert_definition(self.definition), call.insert_definition(other_definition)],
self.conn.mock_calls
)
def test_write_index_and_structure_on_close(self):
original_index = {'versions': {}}
self.conn.get_course_index.return_value = copy.deepcopy(original_index)
self.bulk._begin_bulk_operation(self.course_key)
self.conn.reset_mock()
self.bulk.update_structure(self.course_key, self.structure)
self.bulk.insert_course_index(self.course_key, {'versions': {self.course_key.branch: self.structure['_id']}})
self.assertConnCalls()
self.bulk._end_bulk_operation(self.course_key)
self.assertConnCalls(
call.insert_structure(self.structure),
call.update_course_index(
{'versions': {self.course_key.branch: self.structure['_id']}},
from_index=original_index
)
)
def test_write_index_and_multiple_structures_on_close(self):
original_index = {'versions': {'a': ObjectId(), 'b': ObjectId()}}
self.conn.get_course_index.return_value = copy.deepcopy(original_index)
self.bulk._begin_bulk_operation(self.course_key)
self.conn.reset_mock()
self.bulk.update_structure(self.course_key.replace(branch='a'), self.structure)
other_structure = {'another': 'structure', '_id': ObjectId()}
self.bulk.update_structure(self.course_key.replace(branch='b'), other_structure)
self.bulk.insert_course_index(self.course_key, {'versions': {'a': self.structure['_id'], 'b': other_structure['_id']}})
self.bulk._end_bulk_operation(self.course_key)
self.assertItemsEqual(
[
call.insert_structure(self.structure),
call.insert_structure(other_structure),
call.update_course_index(
{'versions': {'a': self.structure['_id'], 'b': other_structure['_id']}},
from_index=original_index
)
],
self.conn.mock_calls
)
def test_version_structure_creates_new_version(self):
self.assertNotEquals(
self.bulk.version_structure(self.course_key, self.structure, 'user_id')['_id'],
self.structure['_id']
)
def test_version_structure_new_course(self):
self.conn.get_course_index.return_value = None
self.bulk._begin_bulk_operation(self.course_key)
version_result = self.bulk.version_structure(self.course_key, self.structure, 'user_id')
get_result = self.bulk.get_structure(self.course_key, version_result['_id'])
self.assertEquals(version_result, get_result)
class TestBulkWriteMixinClosedAfterPrevTransaction(TestBulkWriteMixinClosed, TestBulkWriteMixinPreviousTransaction):
"""
Test that operations on with a closed transaction aren't affected by a previously executed transaction
"""
pass
@ddt.ddt
class TestBulkWriteMixinFindMethods(TestBulkWriteMixin):
"""
Tests of BulkWriteMixin methods for finding many structures or indexes
"""
def test_no_bulk_find_matching_course_indexes(self):
branch = Mock(name='branch')
search_targets = MagicMock(name='search_targets')
self.conn.find_matching_course_indexes.return_value = [Mock(name='result')]
result = self.bulk.find_matching_course_indexes(branch, search_targets)
self.assertConnCalls(call.find_matching_course_indexes(branch, search_targets))
self.assertEqual(result, self.conn.find_matching_course_indexes.return_value)
self.assertCacheNotCleared()
@ddt.data(
(None, None, [], []),
(
'draft',
None,
[{'versions': {'draft': '123'}}],
[
{'versions': {'published': '123'}},
{}
],
),
(
'draft',
{'f1': 'v1'},
[{'versions': {'draft': '123'}, 'search_targets': {'f1': 'v1'}}],
[
{'versions': {'draft': '123'}, 'search_targets': {'f1': 'value2'}},
{'versions': {'published': '123'}, 'search_targets': {'f1': 'v1'}},
{'search_targets': {'f1': 'v1'}},
{'versions': {'draft': '123'}},
],
),
(
None,
{'f1': 'v1'},
[
{'versions': {'draft': '123'}, 'search_targets': {'f1': 'v1'}},
{'versions': {'published': '123'}, 'search_targets': {'f1': 'v1'}},
{'search_targets': {'f1': 'v1'}},
],
[
{'versions': {'draft': '123'}, 'search_targets': {'f1': 'v2'}},
{'versions': {'draft': '123'}, 'search_targets': {'f2': 'v1'}},
{'versions': {'draft': '123'}},
],
),
(
None,
{'f1': 'v1', 'f2': 2},
[
{'search_targets': {'f1': 'v1', 'f2': 2}},
{'search_targets': {'f1': 'v1', 'f2': 2}},
],
[
{'versions': {'draft': '123'}, 'search_targets': {'f1': 'v1'}},
{'search_targets': {'f1': 'v1'}},
{'versions': {'draft': '123'}, 'search_targets': {'f1': 'v2'}},
{'versions': {'draft': '123'}},
],
),
)
@ddt.unpack
def test_find_matching_course_indexes(self, branch, search_targets, matching, unmatching):
db_indexes = [Mock(name='from_db')]
for n, index in enumerate(matching + unmatching):
course_key = CourseLocator('org', 'course', 'run{}'.format(n))
self.bulk._begin_bulk_operation(course_key)
self.bulk.insert_course_index(course_key, index)
expected = matching + db_indexes
self.conn.find_matching_course_indexes.return_value = db_indexes
result = self.bulk.find_matching_course_indexes(branch, search_targets)
self.assertItemsEqual(result, expected)
for item in unmatching:
self.assertNotIn(item, result)
def test_no_bulk_find_structures_by_id(self):
ids = [Mock(name='id')]
self.conn.find_structures_by_id.return_value = [MagicMock(name='result')]
result = self.bulk.find_structures_by_id(ids)
self.assertConnCalls(call.find_structures_by_id(ids))
self.assertEqual(result, self.conn.find_structures_by_id.return_value)
self.assertCacheNotCleared()
@ddt.data(
([], [], []),
([1, 2, 3], [1, 2], [1, 2]),
([1, 2, 3], [1], [1, 2]),
([1, 2, 3], [], [1, 2]),
)
@ddt.unpack
def test_find_structures_by_id(self, search_ids, active_ids, db_ids):
db_structure = lambda _id: {'db': 'structure', '_id': _id}
active_structure = lambda _id: {'active': 'structure', '_id': _id}
db_structures = [db_structure(_id) for _id in db_ids if _id not in active_ids]
for n, _id in enumerate(active_ids):
course_key = CourseLocator('org', 'course', 'run{}'.format(n))
self.bulk._begin_bulk_operation(course_key)
self.bulk.update_structure(course_key, active_structure(_id))
self.conn.find_structures_by_id.return_value = db_structures
results = self.bulk.find_structures_by_id(search_ids)
self.conn.find_structures_by_id.assert_called_once_with(list(set(search_ids) - set(active_ids)))
for _id in active_ids:
if _id in search_ids:
self.assertIn(active_structure(_id), results)
else:
self.assertNotIn(active_structure(_id), results)
for _id in db_ids:
if _id in search_ids and _id not in active_ids:
self.assertIn(db_structure(_id), results)
else:
self.assertNotIn(db_structure(_id), results)
@ddt.data(
([], [], []),
([1, 2, 3], [1, 2], [1, 2]),
([1, 2, 3], [1], [1, 2]),
([1, 2, 3], [], [1, 2]),
)
@ddt.unpack
def test_get_definitions(self, search_ids, active_ids, db_ids):
db_definition = lambda _id: {'db': 'definition', '_id': _id}
active_definition = lambda _id: {'active': 'definition', '_id': _id}
db_definitions = [db_definition(_id) for _id in db_ids if _id not in active_ids]
self.bulk._begin_bulk_operation(self.course_key)
for n, _id in enumerate(active_ids):
self.bulk.update_definition(self.course_key, active_definition(_id))
self.conn.get_definitions.return_value = db_definitions
results = self.bulk.get_definitions(self.course_key, search_ids)
self.conn.get_definitions.assert_called_once_with(list(set(search_ids) - set(active_ids)))
for _id in active_ids:
if _id in search_ids:
self.assertIn(active_definition(_id), results)
else:
self.assertNotIn(active_definition(_id), results)
for _id in db_ids:
if _id in search_ids and _id not in active_ids:
self.assertIn(db_definition(_id), results)
else:
self.assertNotIn(db_definition(_id), results)
def test_no_bulk_find_structures_derived_from(self):
ids = [Mock(name='id')]
self.conn.find_structures_derived_from.return_value = [MagicMock(name='result')]
result = self.bulk.find_structures_derived_from(ids)
self.assertConnCalls(call.find_structures_derived_from(ids))
self.assertEqual(result, self.conn.find_structures_derived_from.return_value)
self.assertCacheNotCleared()
@ddt.data(
# Test values are:
# - previous_versions to search for
# - documents in the cache with $previous_version.$_id
# - documents in the db with $previous_version.$_id
([], [], []),
(['1', '2', '3'], ['1.a', '1.b', '2.c'], ['1.a', '2.c']),
(['1', '2', '3'], ['1.a'], ['1.a', '2.c']),
(['1', '2', '3'], [], ['1.a', '2.c']),
(['1', '2', '3'], ['4.d'], ['1.a', '2.c']),
)
@ddt.unpack
def test_find_structures_derived_from(self, search_ids, active_ids, db_ids):
def db_structure(_id):
previous, _, current = _id.partition('.')
return {'db': 'structure', 'previous_version': previous, '_id': current}
def active_structure(_id):
previous, _, current = _id.partition('.')
return {'active': 'structure', 'previous_version': previous, '_id': current}
db_structures = [db_structure(_id) for _id in db_ids]
active_structures = []
for n, _id in enumerate(active_ids):
course_key = CourseLocator('org', 'course', 'run{}'.format(n))
self.bulk._begin_bulk_operation(course_key)
structure = active_structure(_id)
self.bulk.update_structure(course_key, structure)
active_structures.append(structure)
self.conn.find_structures_derived_from.return_value = db_structures
results = self.bulk.find_structures_derived_from(search_ids)
self.conn.find_structures_derived_from.assert_called_once_with(search_ids)
for structure in active_structures:
if structure['previous_version'] in search_ids:
self.assertIn(structure, results)
else:
self.assertNotIn(structure, results)
for structure in db_structures:
if (
structure['previous_version'] in search_ids and # We're searching for this document
not any(active.endswith(structure['_id']) for active in active_ids) # This document doesn't match any active _ids
):
self.assertIn(structure, results)
else:
self.assertNotIn(structure, results)
def test_no_bulk_find_ancestor_structures(self):
original_version = Mock(name='original_version')
block_id = Mock(name='block_id')
self.conn.find_ancestor_structures.return_value = [MagicMock(name='result')]
result = self.bulk.find_ancestor_structures(original_version, block_id)
self.assertConnCalls(call.find_ancestor_structures(original_version, block_id))
self.assertEqual(result, self.conn.find_ancestor_structures.return_value)
self.assertCacheNotCleared()
@ddt.data(
# Test values are:
# - original_version
# - block_id
# - matching documents in the cache
# - non-matching documents in the cache
# - expected documents returned from the db
# - unexpected documents returned from the db
('ov', 'bi', [{'original_version': 'ov', 'blocks': {'bi': {'edit_info': {'update_version': 'foo'}}}}], [], [], []),
('ov', 'bi', [{'original_version': 'ov', 'blocks': {'bi': {'edit_info': {'update_version': 'foo'}}}, '_id': 'foo'}], [], [], [{'_id': 'foo'}]),
('ov', 'bi', [], [{'blocks': {'bi': {'edit_info': {'update_version': 'foo'}}}}], [], []),
('ov', 'bi', [], [{'original_version': 'ov'}], [], []),
('ov', 'bi', [], [], [{'original_version': 'ov', 'blocks': {'bi': {'edit_info': {'update_version': 'foo'}}}}], []),
(
'ov',
'bi',
[{'original_version': 'ov', 'blocks': {'bi': {'edit_info': {'update_version': 'foo'}}}}],
[],
[{'original_version': 'ov', 'blocks': {'bi': {'edit_info': {'update_version': 'bar'}}}}],
[]
),
)
@ddt.unpack
def test_find_ancestor_structures(self, original_version, block_id, active_match, active_unmatch, db_match, db_unmatch):
for structure in active_match + active_unmatch + db_match + db_unmatch:
structure.setdefault('_id', ObjectId())
for n, structure in enumerate(active_match + active_unmatch):
course_key = CourseLocator('org', 'course', 'run{}'.format(n))
self.bulk._begin_bulk_operation(course_key)
self.bulk.update_structure(course_key, structure)
self.conn.find_ancestor_structures.return_value = db_match + db_unmatch
results = self.bulk.find_ancestor_structures(original_version, block_id)
self.conn.find_ancestor_structures.assert_called_once_with(original_version, block_id)
self.assertItemsEqual(active_match + db_match, results)
@ddt.ddt
class TestBulkWriteMixinOpen(TestBulkWriteMixin):
"""
Tests of the bulk write mixin when bulk write operations are open
"""
def setUp(self):
super(TestBulkWriteMixinOpen, self).setUp()
self.bulk._begin_bulk_operation(self.course_key)
@ddt.data('deadbeef1234' * 2, u'deadbeef1234' * 2, ObjectId())
def test_read_structure_without_write_from_db(self, version_guid):
# Reading a structure before it's been written (while in bulk operation mode)
# returns the structure from the database
result = self.bulk.get_structure(self.course_key, version_guid)
self.assertEquals(self.conn.get_structure.call_count, 1)
self.assertEqual(result, self.conn.get_structure.return_value)
self.assertCacheNotCleared()
@ddt.data('deadbeef1234' * 2, u'deadbeef1234' * 2, ObjectId())
def test_read_structure_without_write_only_reads_once(self, version_guid):
# Reading the same structure multiple times shouldn't hit the database
# more than once
for _ in xrange(2):
result = self.bulk.get_structure(self.course_key, version_guid)
self.assertEquals(self.conn.get_structure.call_count, 1)
self.assertEqual(result, self.conn.get_structure.return_value)
self.assertCacheNotCleared()
@ddt.data('deadbeef1234' * 2, u'deadbeef1234' * 2, ObjectId())
def test_read_structure_after_write_no_db(self, version_guid):
# Reading a structure that's already been written shouldn't hit the db at all
self.structure['_id'] = version_guid
self.bulk.update_structure(self.course_key, self.structure)
result = self.bulk.get_structure(self.course_key, version_guid)
self.assertEquals(self.conn.get_structure.call_count, 0)
self.assertEqual(result, self.structure)
@ddt.data('deadbeef1234' * 2, u'deadbeef1234' * 2, ObjectId())
def test_read_structure_after_write_after_read(self, version_guid):
# Reading a structure that's been updated after being pulled from the db should
# still get the updated value
self.structure['_id'] = version_guid
self.bulk.get_structure(self.course_key, version_guid)
self.bulk.update_structure(self.course_key, self.structure)
result = self.bulk.get_structure(self.course_key, version_guid)
self.assertEquals(self.conn.get_structure.call_count, 1)
self.assertEqual(result, self.structure)
@ddt.data('deadbeef1234' * 2, u'deadbeef1234' * 2, ObjectId())
def test_read_definition_without_write_from_db(self, version_guid):
# Reading a definition before it's been written (while in bulk operation mode)
# returns the definition from the database
result = self.bulk.get_definition(self.course_key, version_guid)
self.assertEquals(self.conn.get_definition.call_count, 1)
self.assertEqual(result, self.conn.get_definition.return_value)
self.assertCacheNotCleared()
@ddt.data('deadbeef1234' * 2, u'deadbeef1234' * 2, ObjectId())
def test_read_definition_without_write_only_reads_once(self, version_guid):
# Reading the same definition multiple times shouldn't hit the database
# more than once
for _ in xrange(2):
result = self.bulk.get_definition(self.course_key, version_guid)
self.assertEquals(self.conn.get_definition.call_count, 1)
self.assertEqual(result, self.conn.get_definition.return_value)
self.assertCacheNotCleared()
@ddt.data('deadbeef1234' * 2, u'deadbeef1234' * 2, ObjectId())
def test_read_definition_after_write_no_db(self, version_guid):
# Reading a definition that's already been written shouldn't hit the db at all
self.definition['_id'] = version_guid
self.bulk.update_definition(self.course_key, self.definition)
result = self.bulk.get_definition(self.course_key, version_guid)
self.assertEquals(self.conn.get_definition.call_count, 0)
self.assertEqual(result, self.definition)
@ddt.data('deadbeef1234' * 2, u'deadbeef1234' * 2, ObjectId())
def test_read_definition_after_write_after_read(self, version_guid):
# Reading a definition that's been updated after being pulled from the db should
# still get the updated value
self.definition['_id'] = version_guid
self.bulk.get_definition(self.course_key, version_guid)
self.bulk.update_definition(self.course_key, self.definition)
result = self.bulk.get_definition(self.course_key, version_guid)
self.assertEquals(self.conn.get_definition.call_count, 1)
self.assertEqual(result, self.definition)
@ddt.data(True, False)
def test_read_index_without_write_from_db(self, ignore_case):
# Reading the index without writing to it should pull from the database
result = self.bulk.get_course_index(self.course_key, ignore_case=ignore_case)
self.assertEquals(self.conn.get_course_index.call_count, 1)
self.assertEquals(self.conn.get_course_index.return_value, result)
@ddt.data(True, False)
def test_read_index_without_write_only_reads_once(self, ignore_case):
# Reading the index multiple times should only result in one read from
# the database
for _ in xrange(2):
result = self.bulk.get_course_index(self.course_key, ignore_case=ignore_case)
self.assertEquals(self.conn.get_course_index.call_count, 1)
self.assertEquals(self.conn.get_course_index.return_value, result)
@ddt.data(True, False)
def test_read_index_after_write(self, ignore_case):
# Reading the index after a write still should hit the database once to fetch the
# initial index, and should return the written index_entry
self.bulk.insert_course_index(self.course_key, self.index_entry)
result = self.bulk.get_course_index(self.course_key, ignore_case=ignore_case)
self.assertEquals(self.conn.get_course_index.call_count, 1)
self.assertEquals(self.index_entry, result)
def test_read_index_ignore_case(self):
# Reading using ignore case should find an already written entry with a different case
self.bulk.insert_course_index(self.course_key, self.index_entry)
result = self.bulk.get_course_index(
self.course_key.replace(
org=self.course_key.org.upper(),
course=self.course_key.course.title(),
run=self.course_key.run.upper()
),
ignore_case=True
)
self.assertEquals(self.conn.get_course_index.call_count, 1)
self.assertEquals(self.index_entry, result)
def test_version_structure_creates_new_version_before_read(self):
self.assertNotEquals(
self.bulk.version_structure(self.course_key, self.structure, 'user_id')['_id'],
self.structure['_id']
)
def test_version_structure_creates_new_version_after_read(self):
self.conn.get_structure.return_value = copy.deepcopy(self.structure)
self.bulk.get_structure(self.course_key, self.structure['_id'])
self.assertNotEquals(
self.bulk.version_structure(self.course_key, self.structure, 'user_id')['_id'],
self.structure['_id']
)
def test_copy_branch_versions(self):
# Directly updating an index so that the draft branch points to the published index
# version should work, and should only persist a single structure
self.maxDiff = None
published_structure = {'published': 'structure', '_id': ObjectId()}
self.bulk.update_structure(self.course_key, published_structure)
index = {'versions': {'published': published_structure['_id']}}
self.bulk.insert_course_index(self.course_key, index)
index_copy = copy.deepcopy(index)
index_copy['versions']['draft'] = index['versions']['published']
self.bulk.update_course_index(self.course_key, index_copy)
self.bulk._end_bulk_operation(self.course_key)
self.conn.insert_structure.assert_called_once_with(published_structure)
self.conn.update_course_index.assert_called_once_with(index_copy, from_index=self.conn.get_course_index.return_value)
self.conn.get_course_index.assert_called_once_with(self.course_key)
class TestBulkWriteMixinOpenAfterPrevTransaction(TestBulkWriteMixinOpen, TestBulkWriteMixinPreviousTransaction):
"""
Test that operations on with an open transaction aren't affected by a previously executed transaction
"""
pass
|
beni55/edx-platform
|
refs/heads/master
|
lms/djangoapps/shoppingcart/exceptions.py
|
191
|
"""
Exceptions for the shoppingcart app
"""
# (Exception Class Names are sort of self-explanatory, so skipping docstring requirement)
# pylint: disable=missing-docstring
class PaymentException(Exception):
pass
class PurchasedCallbackException(PaymentException):
pass
class InvalidCartItem(PaymentException):
pass
class ItemAlreadyInCartException(InvalidCartItem):
pass
class AlreadyEnrolledInCourseException(InvalidCartItem):
pass
class CourseDoesNotExistException(InvalidCartItem):
pass
class CouponDoesNotExistException(InvalidCartItem):
pass
class MultipleCouponsNotAllowedException(InvalidCartItem):
pass
class RedemptionCodeError(Exception):
"""An error occurs while processing redemption codes. """
pass
class ReportException(Exception):
pass
class ReportTypeDoesNotExistException(ReportException):
pass
class InvalidStatusToRetire(Exception):
pass
class UnexpectedOrderItemStatus(Exception):
pass
class ItemNotFoundInCartException(Exception):
pass
|
mtat76/atm-py
|
refs/heads/master
|
build/lib/atmPy/tools/math_functions.py
|
7
|
import numpy as np
def gauss(x, amp, pos, sigma):
""" amp,x_center,sigma
x: array"""
return amp * np.exp(-(x - pos) ** 2 / (2. * sigma ** 2))
|
sanketdjain/box-python-sdk
|
refs/heads/master
|
test/functional/test_rate_limits.py
|
5
|
# coding: utf-8
from __future__ import unicode_literals
def test_too_many_requests_causes_retry(box_client, mock_box, monkeypatch):
monkeypatch.setattr(mock_box, 'RATE_LIMIT_THRESHOLD', 1)
box_client.folder('0').get()
box_client.folder('0').get()
assert len(mock_box.requests) == 6 # 3 auth requests, 2 real requests, and a retry
|
beni55/networkx
|
refs/heads/master
|
networkx/algorithms/centrality/eigenvector.py
|
8
|
# coding=utf8
"""
Eigenvector centrality.
"""
# Copyright (C) 2004-2015 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import networkx as nx
__author__ = "\n".join(['Aric Hagberg (aric.hagberg@gmail.com)',
'Pieter Swart (swart@lanl.gov)',
'Sasha Gutfraind (ag362@cornell.edu)'])
__all__ = ['eigenvector_centrality',
'eigenvector_centrality_numpy']
def eigenvector_centrality(G, max_iter=100, tol=1.0e-6, nstart=None,
weight='weight'):
"""Compute the eigenvector centrality for the graph G.
Eigenvector centrality computes the centrality for a node based on the
centrality of its neighbors. The eigenvector centrality for node `i` is
.. math::
\mathbf{Ax} = \lambda \mathbf{x}
where `A` is the adjacency matrix of the graph G with eigenvalue `\lambda`.
By virtue of the Perron–Frobenius theorem, there is a unique and positive
solution if `\lambda` is the largest eigenvalue associated with the
eigenvector of the adjacency matrix `A` ([2]_).
Parameters
----------
G : graph
A networkx graph
max_iter : integer, optional
Maximum number of iterations in power method.
tol : float, optional
Error tolerance used to check convergence in power method iteration.
nstart : dictionary, optional
Starting value of eigenvector iteration for each node.
weight : None or string, optional
If None, all edge weights are considered equal.
Otherwise holds the name of the edge attribute used as weight.
Returns
-------
nodes : dictionary
Dictionary of nodes with eigenvector centrality as the value.
Examples
--------
>>> G = nx.path_graph(4)
>>> centrality = nx.eigenvector_centrality(G)
>>> print(['%s %0.2f'%(node,centrality[node]) for node in centrality])
['0 0.37', '1 0.60', '2 0.60', '3 0.37']
See Also
--------
eigenvector_centrality_numpy
pagerank
hits
Notes
------
The measure was introduced by [1]_ and is discussed in [2]_.
Eigenvector convergence: The power iteration method is used to compute
the eigenvector and convergence is not guaranteed. Our method stops after
``max_iter`` iterations or when the vector change is below an error
tolerance of ``number_of_nodes(G)*tol``. We actually use (A+I) rather
than the adjacency matrix A because it shifts the spectrum to enable
discerning the correct eigenvector even for networks with multiple
dominant eigenvalues.
For directed graphs this is "left" eigenvector centrality which corresponds
to the in-edges in the graph. For out-edges eigenvector centrality
first reverse the graph with ``G.reverse()``.
References
----------
.. [1] Phillip Bonacich:
Power and Centrality: A Family of Measures.
American Journal of Sociology 92(5):1170–1182, 1986
http://www.leonidzhukov.net/hse/2014/socialnetworks/papers/Bonacich-Centrality.pdf
.. [2] Mark E. J. Newman:
Networks: An Introduction.
Oxford University Press, USA, 2010, pp. 169.
"""
from math import sqrt
if type(G) == nx.MultiGraph or type(G) == nx.MultiDiGraph:
raise nx.NetworkXException("Not defined for multigraphs.")
if len(G) == 0:
raise nx.NetworkXException("Empty graph.")
if nstart is None:
# choose starting vector with entries of 1/len(G)
x = dict([(n,1.0/len(G)) for n in G])
else:
x = nstart
# normalize starting vector
s = 1.0/sum(x.values())
for k in x:
x[k] *= s
nnodes = G.number_of_nodes()
# make up to max_iter iterations
for i in range(max_iter):
xlast = x
x = xlast.copy() # Start with xlast times I to iterate with (A+I)
# do the multiplication y^T = x^T A (left eigenvector)
for n in x:
for nbr in G[n]:
x[nbr] += xlast[n] * G[n][nbr].get(weight, 1)
# normalize vector
try:
s = 1.0/sqrt(sum(v**2 for v in x.values()))
# this should never be zero?
except ZeroDivisionError:
s = 1.0
for n in x:
x[n] *= s
# check convergence
err = sum([abs(x[n]-xlast[n]) for n in x])
if err < nnodes*tol:
return x
raise nx.NetworkXError("""eigenvector_centrality():
power iteration failed to converge in %d iterations."%(i+1))""")
def eigenvector_centrality_numpy(G, weight='weight'):
"""Compute the eigenvector centrality for the graph G.
Eigenvector centrality computes the centrality for a node based on the
centrality of its neighbors. The eigenvector centrality for node `i` is
.. math::
\mathbf{Ax} = \lambda \mathbf{x}
where `A` is the adjacency matrix of the graph G with eigenvalue `\lambda`.
By virtue of the Perron–Frobenius theorem, there is a unique and positive
solution if `\lambda` is the largest eigenvalue associated with the
eigenvector of the adjacency matrix `A` ([2]_).
Parameters
----------
G : graph
A networkx graph
weight : None or string, optional
The name of the edge attribute used as weight.
If None, all edge weights are considered equal.
Returns
-------
nodes : dictionary
Dictionary of nodes with eigenvector centrality as the value.
Examples
--------
>>> G = nx.path_graph(4)
>>> centrality = nx.eigenvector_centrality_numpy(G)
>>> print(['%s %0.2f'%(node,centrality[node]) for node in centrality])
['0 0.37', '1 0.60', '2 0.60', '3 0.37']
See Also
--------
eigenvector_centrality
pagerank
hits
Notes
------
The measure was introduced by [1]_.
This algorithm uses the SciPy sparse eigenvalue solver (ARPACK) to
find the largest eigenvalue/eigenvector pair.
For directed graphs this is "left" eigenvector centrality which corresponds
to the in-edges in the graph. For out-edges eigenvector centrality
first reverse the graph with G.reverse().
References
----------
.. [1] Phillip Bonacich:
Power and Centrality: A Family of Measures.
American Journal of Sociology 92(5):1170–1182, 1986
http://www.leonidzhukov.net/hse/2014/socialnetworks/papers/Bonacich-Centrality.pdf
.. [2] Mark E. J. Newman:
Networks: An Introduction.
Oxford University Press, USA, 2010, pp. 169.
"""
import scipy as sp
from scipy.sparse import linalg
if len(G) == 0:
raise nx.NetworkXException('Empty graph.')
M = nx.to_scipy_sparse_matrix(G, nodelist=list(G), weight=weight,
dtype=float)
eigenvalue, eigenvector = linalg.eigs(M.T, k=1, which='LR')
largest = eigenvector.flatten().real
norm = sp.sign(largest.sum())*sp.linalg.norm(largest)
centrality = dict(zip(G,map(float,largest/norm)))
return centrality
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import scipy
except:
raise SkipTest("SciPy not available")
|
renaelectronics/linuxcnc
|
refs/heads/master
|
src/hal/user_comps/vismach/rotarydelta.py
|
6
|
#!/usr/bin/python
# Copyright 2013 Jeff Epler <jepler@unpythonic.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from vismach import *
import hal
import rotarydeltakins
import sys
# allow overriding variables here, using the command line, like:
# loadusr rotarydelta SOMENAME=123
for setting in sys.argv[1:]: exec setting
c = hal.component("rotarydelta")
c.newpin("joint0", hal.HAL_FLOAT, hal.HAL_IN)
c.newpin("joint1", hal.HAL_FLOAT, hal.HAL_IN)
c.newpin("joint2", hal.HAL_FLOAT, hal.HAL_IN)
c.newpin("pfr", hal.HAL_FLOAT, hal.HAL_IN)
c.newpin("tl", hal.HAL_FLOAT, hal.HAL_IN)
c.newpin("sl", hal.HAL_FLOAT, hal.HAL_IN)
c.newpin("fr", hal.HAL_FLOAT, hal.HAL_IN)
c['pfr'], c['tl'], c['sl'], c['fr'] = rotarydeltakins.get_geometry()
c.ready()
class DeltaTranslate(Collection):
def __init__(self, parts, comp):
self.comp = comp
self.parts = parts
self.x = self.y = self.z = 0
def apply(self):
glPushMatrix()
rotarydeltakins.set_geometry(self.comp['pfr'], self.comp['tl'], self.comp['sl'], self.comp['fr'])
f = rotarydeltakins.forward(self.comp['joint0'], self.comp['joint1'], self.comp['joint2'])
if f is not None:
self.x = x = f[0]
self.y = y = f[1]
self.z = z = f[2]
else:
x = self.x
y = self.y
z = self.z
glTranslatef(x, y, z)
def unapply(self):
glPopMatrix()
class HexPrismZ(CoordsBase):
def draw(self):
z0, z1, h = self.coords()
h /= cos(pi/6)
glBegin(GL_TRIANGLE_FAN)
glNormal3f(0, 0, 1)
glVertex3f(0, 0, z1)
for i in range(7):
d = (2*pi/6) * i
glVertex3f(h * cos(d), h * sin(d), z1)
glEnd()
glBegin(GL_TRIANGLE_FAN)
glNormal3f(0, 0, -1)
glVertex3f(0, 0, z0)
for i in range(7):
d = (2*pi/6) * i
glVertex3f(h * cos(d), h * sin(d), z0)
glEnd()
glBegin(GL_TRIANGLES)
for i in range(6):
d1 = (2*pi/6) * i
cd1 = h * cos(d1)
sd1 = h * sin(d1)
d2 = (2*pi/6) * (i+1)
cd2 = h * cos(d2)
sd2 = h * sin(d2)
glNormal3f(cos(d1), sin(d1), 0)
glVertex3f(cd1, sd1, z1)
glVertex3f(cd2, sd2, z0)
glVertex3f(cd2, sd2, z1)
glVertex3f(cd1, sd1, z1)
glVertex3f(cd1, sd1, z0)
glVertex3f(cd2, sd2, z0)
glEnd()
def build_joint(angle, joint):
return Rotate([
HalTranslate([
CylinderY(-1, 1, 6, 1),
HalRotate([
CylinderX(c, 0, .5, 'tl', .5)
], c, joint, 1, 0, 1, 0)
], c, "pfr", 1, 0, 0)
], angle, 0, 0, 1)
class Strut:
def __init__(self, platform, angle, component, joint):
self.platform = platform
self.angle = radians(angle)
self.component = component
self.joint = joint
self.q = gluNewQuadric()
def draw(self):
c = cos(self.angle)
s = sin(self.angle)
o = self.component['fr']
oo = .2 * o
x0 = self.platform.x + c*o
sx = oo * -s
y0 = self.platform.y + s*o
sy = oo * c
z0 = self.platform.z
j = -radians(self.component[self.joint])
r2 = self.component['pfr'] + self.component['tl'] * cos(j)
x1 = r2 * cos(self.angle)
y1 = r2 * sin(self.angle)
z1 = self.component['tl'] * sin(j)
d = x1-x0, y1-y0, z1-z0
mag = sqrt(sum(di*di for di in d))
dx, dy, dz = (di/mag for di in d)
L = self.component['sl']
theta = atan2(dz, hypot(dx,dy))
phi = atan2(dy, dx)
glPushMatrix()
glTranslatef(x0+sx, y0+sy, z0)
glRotatef(degrees(phi), 0, 0, 1)
glRotatef(90-degrees(theta), 0, 1, 0)
self.cylinder(L)
glPopMatrix()
glPushMatrix()
glTranslatef(x0-sx, y0-sy, z0)
glRotatef(degrees(phi), 0, 0, 1)
glRotatef(90-degrees(theta), 0, 1, 0)
self.cylinder(L)
glPopMatrix()
def cylinder(self, L):
gluCylinder(self.q, .5, .5, L, 32, 1)
# bottom cap
glRotatef(180,1,0,0)
gluDisk(self.q, 0, .5, 32, 1)
glRotatef(180,1,0,0)
# the top cap needs flipped and translated
glTranslatef(0,0, L)
gluDisk(self.q, 0, .5, 32, 1)
tooltip = Capture()
tool = DeltaTranslate([
Translate([
Color((.5,.5,.5,0), [
Translate([tooltip], 0,0,-2),
HexPrismZ(c, 0, .5, 'fr'),
CylinderZ(-2, 0, -1.5, .25),
CylinderZ(-1.5, .25, 1, .25)
])
], 0, 0, -.5)], c)
red = (1,.5,.5,0)
green = (.5,1,.5,0)
blue = (.5,.5,1,0)
joint0 = Color(red, [build_joint(-90, "joint0")])
joint1 = Color(green, [build_joint(30, "joint1")])
joint2 = Color(blue, [build_joint(150, "joint2")])
work = Capture()
strut0 = Color(red, [Strut(tool, -90, c, "joint0")])
strut1 = Color(green, [Strut(tool, 30, c, "joint1")])
strut2 = Color(blue, [Strut(tool, 150, c, "joint2")])
table = Collection([
CylinderZ(-22, 8, -21, 8),
Translate([
CylinderZ(7, c['pfr']+3.5, 8, c['pfr']+3.5),
Rotate( [Box(1, -c['pfr']+3, 1, 5, -c['pfr']-2, 8)], 0, 0, 0, 1),
Rotate( [Box(1, -c['pfr']+3, 1, 5, -c['pfr']-2, 8)], 120, 0, 0, 1),
Rotate( [Box(1, -c['pfr']+3, 1, 5, -c['pfr']-2, 8)], 240, 0, 0, 1)],
0, 0, -3)
])
model = Collection([table, joint0, joint1, joint2, tool,
strut0, strut1, strut2,
work])
main(model, tooltip, work, 60)
|
avoinsystems/odoo
|
refs/heads/8.0
|
addons/email_template/wizard/mail_compose_message.py
|
197
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp import tools
from openerp.osv import osv, fields
def _reopen(self, res_id, model):
return {'type': 'ir.actions.act_window',
'view_mode': 'form',
'view_type': 'form',
'res_id': res_id,
'res_model': self._name,
'target': 'new',
# save original model in context, because selecting the list of available
# templates requires a model in context
'context': {
'default_model': model,
},
}
class mail_compose_message(osv.TransientModel):
_inherit = 'mail.compose.message'
def default_get(self, cr, uid, fields, context=None):
""" Override to pre-fill the data when having a template in single-email mode
and not going through the view: the on_change is not called in that case. """
if context is None:
context = {}
res = super(mail_compose_message, self).default_get(cr, uid, fields, context=context)
if res.get('composition_mode') != 'mass_mail' and context.get('default_template_id') and res.get('model') and res.get('res_id'):
res.update(
self.onchange_template_id(
cr, uid, [], context['default_template_id'], res.get('composition_mode'),
res.get('model'), res.get('res_id'), context=context
)['value']
)
if fields is not None:
[res.pop(field, None) for field in res.keys() if field not in fields]
return res
_columns = {
'template_id': fields.many2one('email.template', 'Use template', select=True),
}
def send_mail(self, cr, uid, ids, context=None):
""" Override of send_mail to duplicate attachments linked to the email.template.
Indeed, basic mail.compose.message wizard duplicates attachments in mass
mailing mode. But in 'single post' mode, attachments of an email template
also have to be duplicated to avoid changing their ownership. """
if context is None:
context = {}
wizard_context = dict(context)
for wizard in self.browse(cr, uid, ids, context=context):
if wizard.template_id:
wizard_context['mail_notify_user_signature'] = False # template user_signature is added when generating body_html
wizard_context['mail_auto_delete'] = wizard.template_id.auto_delete # mass mailing: use template auto_delete value -> note, for emails mass mailing only
wizard_context['mail_server_id'] = wizard.template_id.mail_server_id.id
if not wizard.attachment_ids or wizard.composition_mode == 'mass_mail' or not wizard.template_id:
continue
new_attachment_ids = []
for attachment in wizard.attachment_ids:
if attachment in wizard.template_id.attachment_ids:
new_attachment_ids.append(self.pool.get('ir.attachment').copy(cr, uid, attachment.id, {'res_model': 'mail.compose.message', 'res_id': wizard.id}, context=context))
else:
new_attachment_ids.append(attachment.id)
self.write(cr, uid, wizard.id, {'attachment_ids': [(6, 0, new_attachment_ids)]}, context=context)
return super(mail_compose_message, self).send_mail(cr, uid, ids, context=wizard_context)
def onchange_template_id(self, cr, uid, ids, template_id, composition_mode, model, res_id, context=None):
""" - mass_mailing: we cannot render, so return the template values
- normal mode: return rendered values """
if template_id and composition_mode == 'mass_mail':
fields = ['subject', 'body_html', 'email_from', 'reply_to', 'mail_server_id']
template = self.pool['email.template'].browse(cr, uid, template_id, context=context)
values = dict((field, getattr(template, field)) for field in fields if getattr(template, field))
if template.attachment_ids:
values['attachment_ids'] = [att.id for att in template.attachment_ids]
if template.mail_server_id:
values['mail_server_id'] = template.mail_server_id.id
if template.user_signature and 'body_html' in values:
signature = self.pool.get('res.users').browse(cr, uid, uid, context).signature
values['body_html'] = tools.append_content_to_html(values['body_html'], signature, plaintext=False)
elif template_id:
values = self.generate_email_for_composer_batch(cr, uid, template_id, [res_id], context=context)[res_id]
# transform attachments into attachment_ids; not attached to the document because this will
# be done further in the posting process, allowing to clean database if email not send
ir_attach_obj = self.pool.get('ir.attachment')
for attach_fname, attach_datas in values.pop('attachments', []):
data_attach = {
'name': attach_fname,
'datas': attach_datas,
'datas_fname': attach_fname,
'res_model': 'mail.compose.message',
'res_id': 0,
'type': 'binary', # override default_type from context, possibly meant for another model!
}
values.setdefault('attachment_ids', list()).append(ir_attach_obj.create(cr, uid, data_attach, context=context))
else:
default_context = dict(context, default_composition_mode=composition_mode, default_model=model, default_res_id=res_id)
default_values = self.default_get(cr, uid, ['composition_mode', 'model', 'res_id', 'parent_id', 'partner_ids', 'subject', 'body', 'email_from', 'reply_to', 'attachment_ids', 'mail_server_id'], context=default_context)
values = dict((key, default_values[key]) for key in ['subject', 'body', 'partner_ids', 'email_from', 'reply_to', 'attachment_ids', 'mail_server_id'] if key in default_values)
if values.get('body_html'):
values['body'] = values.pop('body_html')
return {'value': values}
def save_as_template(self, cr, uid, ids, context=None):
""" hit save as template button: current form value will be a new
template attached to the current document. """
email_template = self.pool.get('email.template')
ir_model_pool = self.pool.get('ir.model')
for record in self.browse(cr, uid, ids, context=context):
model_ids = ir_model_pool.search(cr, uid, [('model', '=', record.model or 'mail.message')], context=context)
model_id = model_ids and model_ids[0] or False
model_name = ''
if model_id:
model_name = ir_model_pool.browse(cr, uid, model_id, context=context).name
template_name = "%s: %s" % (model_name, tools.ustr(record.subject))
values = {
'name': template_name,
'subject': record.subject or False,
'body_html': record.body or False,
'model_id': model_id or False,
'attachment_ids': [(6, 0, [att.id for att in record.attachment_ids])],
}
template_id = email_template.create(cr, uid, values, context=context)
# generate the saved template
template_values = record.onchange_template_id(template_id, record.composition_mode, record.model, record.res_id)['value']
template_values['template_id'] = template_id
record.write(template_values)
return _reopen(self, record.id, record.model)
#------------------------------------------------------
# Wizard validation and send
#------------------------------------------------------
def generate_email_for_composer_batch(self, cr, uid, template_id, res_ids, context=None, fields=None):
""" Call email_template.generate_email(), get fields relevant for
mail.compose.message, transform email_cc and email_to into partner_ids """
if context is None:
context = {}
if fields is None:
fields = ['subject', 'body_html', 'email_from', 'email_to', 'partner_to', 'email_cc', 'reply_to', 'attachment_ids', 'mail_server_id']
returned_fields = fields + ['partner_ids', 'attachments']
values = dict.fromkeys(res_ids, False)
ctx = dict(context, tpl_partners_only=True)
template_values = self.pool.get('email.template').generate_email_batch(cr, uid, template_id, res_ids, fields=fields, context=ctx)
for res_id in res_ids:
res_id_values = dict((field, template_values[res_id][field]) for field in returned_fields if template_values[res_id].get(field))
res_id_values['body'] = res_id_values.pop('body_html', '')
values[res_id] = res_id_values
return values
def render_message_batch(self, cr, uid, wizard, res_ids, context=None):
""" Override to handle templates. """
# generate composer values
composer_values = super(mail_compose_message, self).render_message_batch(cr, uid, wizard, res_ids, context)
# generate template-based values
if wizard.template_id:
template_values = self.generate_email_for_composer_batch(
cr, uid, wizard.template_id.id, res_ids,
fields=['email_to', 'partner_to', 'email_cc', 'attachment_ids', 'mail_server_id'],
context=context)
else:
template_values = {}
for res_id in res_ids:
if template_values.get(res_id):
# recipients are managed by the template
composer_values[res_id].pop('partner_ids')
composer_values[res_id].pop('email_to')
composer_values[res_id].pop('email_cc')
# remove attachments from template values as they should not be rendered
template_values[res_id].pop('attachment_ids', None)
else:
template_values[res_id] = dict()
# update template values by composer values
template_values[res_id].update(composer_values[res_id])
return template_values
def render_template_batch(self, cr, uid, template, model, res_ids, context=None, post_process=False):
return self.pool.get('email.template').render_template_batch(cr, uid, template, model, res_ids, context=context, post_process=post_process)
# Compatibility methods
def generate_email_for_composer(self, cr, uid, template_id, res_id, context=None):
return self.generate_email_for_composer_batch(cr, uid, template_id, [res_id], context)[res_id]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
MacHu-GWU/filetool-project
|
refs/heads/master
|
filetool/meth.py
|
2
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import hashlib
def repr_data_size(size_in_bytes, precision=2):
"""Return human readable string represent of a file size. Doesn"t support
size greater than 1EB.
For example:
- 100 bytes => 100 B
- 100,000 bytes => 97.66 KB
- 100,000,000 bytes => 95.37 MB
- 100,000,000,000 bytes => 93.13 GB
- 100,000,000,000,000 bytes => 90.95 TB
- 100,000,000,000,000,000 bytes => 88.82 PB
...
Magnitude of data::
1000 kB kilobyte
1000 ** 2 MB megabyte
1000 ** 3 GB gigabyte
1000 ** 4 TB terabyte
1000 ** 5 PB petabyte
1000 ** 6 EB exabyte
1000 ** 7 ZB zettabyte
1000 ** 8 YB yottabyte
"""
if size_in_bytes < 1024:
return "%s B" % size_in_bytes
magnitude_of_data = ["B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"]
index = 0
while 1:
index += 1
size_in_bytes, mod = divmod(size_in_bytes, 1024)
if size_in_bytes < 1024:
break
template = "{0:.%sf} {1}" % precision
s = template.format(size_in_bytes + mod/1024.0, magnitude_of_data[index])
return s
def md5file(abspath, nbytes=0):
"""Return md5 hash value of a piece of a file
Estimate processing time on:
:param abspath: the absolute path to the file
:param nbytes: only has first N bytes of the file. if 0, hash all file
CPU = i7-4600U 2.10GHz - 2.70GHz, RAM = 8.00 GB
1 second can process 0.25GB data
- 0.59G - 2.43 sec
- 1.3G - 5.68 sec
- 1.9G - 7.72 sec
- 2.5G - 10.32 sec
- 3.9G - 16.0 sec
"""
m = hashlib.md5()
with open(abspath, "rb") as f:
if nbytes:
data = f.read(nbytes)
if data:
m.update(data)
else:
while True:
data = f.read(4 * 1 << 16) # only use first 4GB data
if not data:
break
m.update(data)
return m.hexdigest()
#--- Unittest ---
if __name__ == "__main__":
import unittest
class Unittest(unittest.TestCase):
def test_repr_data_size(self):
size_list = [100 * 1000 ** i for i in range(9)]
repr_list = [
"100 B", "97.66 KB", "95.37 MB",
"93.13 GB", "90.95 TB", "88.82 PB",
"86.74 EB", "84.70 ZB", "82.72 YB",
]
for size, str_repr in zip(size_list, repr_list):
self.assertEqual(repr_data_size(size, precision=2), str_repr)
def test_md5file(self):
md5 = md5file("meth.py")
unittest.main()
|
kjc88/sl4a
|
refs/heads/master
|
python/src/Mac/Modules/te/tescan.py
|
34
|
# Scan an Apple header file, generating a Python file of generator calls.
import sys
from bgenlocations import TOOLBOXDIR, BGENDIR
sys.path.append(BGENDIR)
from scantools import Scanner
LONG = "TextEdit"
SHORT = "te"
OBJECT = "TEHandle"
def main():
input = LONG + ".h"
output = SHORT + "gen.py"
defsoutput = TOOLBOXDIR + LONG + ".py"
scanner = MyScanner(input, output, defsoutput)
scanner.scan()
scanner.close()
print "=== Testing definitions output code ==="
execfile(defsoutput, {}, {})
print "=== Done scanning and generating, now importing the generated code... ==="
exec "import " + SHORT + "support"
print "=== Done. It's up to you to compile it now! ==="
class MyScanner(Scanner):
def destination(self, type, name, arglist):
classname = "Function"
listname = "functions"
if arglist:
t, n, m = arglist[-1]
# This is non-functional today
if t == OBJECT and m == "InMode":
classname = "Method"
listname = "methods"
return classname, listname
def makeblacklistnames(self):
return [
"TEDispose",
"TEInit",
## "TEGetHiliteRgn",
]
def makeblacklisttypes(self):
return [
"TEClickLoopUPP",
"UniversalProcPtr",
"WordBreakUPP",
"TEDoTextUPP",
"TERecalcUPP",
"TEFindWordUPP",
]
def makerepairinstructions(self):
return [
([("void_ptr", "*", "InMode"), ("long", "*", "InMode")],
[("InBuffer", "*", "*")]),
# TEContinuousStyle
([("short", "mode", "OutMode"), ("TextStyle", "aStyle", "OutMode")],
[("short", "mode", "InOutMode"), ("TextStyle", "aStyle", "InOutMode")])
]
if __name__ == "__main__":
main()
|
moopet/django-flatpage-meta
|
refs/heads/master
|
flatpage_meta/templatetags/flatpage_meta_tags.py
|
1
|
from django import template
from django.contrib.sites.models import Site
register = template.Library()
@register.inclusion_tag('flatpage_meta_tags.html')
def flatpage_meta_tags(flatpage=None):
flatpage_tags = flatpage.meta_tag_set.all() if flatpage else None
site_tags = Site.objects.get_current().meta_tag_set.all()
if flatpage_tags:
site_tags = site_tags.exclude(meta_tag_type__in=[f.meta_tag_type for f in flatpage_tags])
return {
'site_tags': site_tags,
'flatpage_tags': flatpage_tags,
}
|
eleonrk/SickRage
|
refs/heads/master
|
lib/github/GistComment.py
|
10
|
# -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# #
# This file is part of PyGithub. #
# http://pygithub.github.io/PyGithub/v1/index.html #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
import github.NamedUser
class GistComment(github.GithubObject.CompletableGithubObject):
"""
This class represents GistComments as returned for example by http://developer.github.com/v3/todo
"""
def __repr__(self):
return self.get__repr__({"id": self._id.value, "user": self._user.value})
@property
def body(self):
"""
:type: string
"""
self._completeIfNotSet(self._body)
return self._body.value
@property
def created_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def id(self):
"""
:type: integer
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def updated_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
@property
def user(self):
"""
:type: :class:`github.NamedUser.NamedUser`
"""
self._completeIfNotSet(self._user)
return self._user.value
def delete(self):
"""
:calls: `DELETE /gists/:gist_id/comments/:id <http://developer.github.com/v3/gists/comments>`_
:rtype: None
"""
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
self.url
)
def edit(self, body):
"""
:calls: `PATCH /gists/:gist_id/comments/:id <http://developer.github.com/v3/gists/comments>`_
:param body: string
:rtype: None
"""
assert isinstance(body, (str, unicode)), body
post_parameters = {
"body": body,
}
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
self.url,
input=post_parameters
)
self._useAttributes(data)
def _initAttributes(self):
self._body = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._id = github.GithubObject.NotSet
self._updated_at = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
self._user = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "body" in attributes: # pragma no branch
self._body = self._makeStringAttribute(attributes["body"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
if "user" in attributes: # pragma no branch
self._user = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["user"])
|
geodynamics/pylith
|
refs/heads/hackathon/static-greens-fns
|
tests/fullscale/linearelasticity/nofaults-3d/sheartraction_rate_gendb.py
|
1
|
#!/usr/bin/env nemesis
#
# ----------------------------------------------------------------------
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University of Chicago
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2016 University of California, Davis
#
# See COPYING for license information.
#
# ----------------------------------------------------------------------
#
# @file tests/fullscale/linearelasticity/nofaults-3d/sheartraction_rate_gendb.py
#
# @brief Python script to generate spatial database with Dirichlet
# boundary conditions for the time-dependent shear test. The traction
# boundary conditions use UniformDB in the .cfg file.
import numpy
from pythia.pyre.units.time import year
class GenerateDB(object):
"""Python object to generate spatial database with Dirichlet
boundary conditions for the tim-dependent shear test.
"""
def __init__(self):
"""Constructor.
"""
return
def run(self):
"""Generate the database.
"""
# Domain
x = numpy.arange(-1.0e+4, 1.01e+4, 5.0e+3)
y = numpy.arange(-1.0e+4, 1.01e+4, 5.0e+3)
z = numpy.array([0])
x3, y3, z3 = numpy.meshgrid(x, y, z)
nptsX = x.shape[0]
nptsY = y.shape[0]
nptsZ = z.shape[0]
xyz = numpy.zeros((nptsX * nptsY * nptsZ, 3), dtype=numpy.float64)
xyz[:, 0] = x3.ravel()
xyz[:, 1] = y3.ravel()
xyz[:, 2] = z3.ravel()
from sheartraction_rate_soln import AnalyticalSoln
soln = AnalyticalSoln()
disp = soln.bc_initial_displacement(xyz)
velocity_time = soln.bc_rate_time(xyz) / year.value
velocity = soln.bc_velocity(xyz) * year.value
from spatialdata.geocoords.CSCart import CSCart
cs = CSCart()
cs.inventory.spaceDim = 3
cs._configure()
data = {
"x": x,
"y": y,
"z": z,
'points': xyz,
'coordsys': cs,
'data_dim': 2,
'values': [
{'name': "initial_amplitude_x",
'units': "m",
'data': disp[0, :, 0].ravel()},
{'name': "initial_amplitude_y",
'units': "m",
'data': disp[0, :, 1].ravel()},
{'name': "initial_amplitude_z",
'units': "m",
'data': disp[0, :, 2].ravel()},
{'name': "rate_amplitude_x",
'units': "m/year",
'data': velocity[0, :, 0].ravel()},
{'name': "rate_amplitude_y",
'units': "m/year",
'data': velocity[0, :, 1].ravel()},
{'name': "rate_amplitude_z",
'units': "m/year",
'data': velocity[0, :, 2].ravel()},
{'name': "rate_start_time",
'units': "year",
'data': velocity_time[0, :, 0].ravel()},
]}
from spatialdata.spatialdb.SimpleGridAscii import SimpleGridAscii
io = SimpleGridAscii()
io.inventory.filename = "sheartraction_rate_disp.spatialdb"
io._configure()
io.write(data)
return
# ======================================================================
if __name__ == "__main__":
GenerateDB().run()
# End of file
|
leesavide/pythonista-docs
|
refs/heads/master
|
Documentation/matplotlib/mpl_examples/pylab_examples/legend_demo4.py
|
9
|
import matplotlib.pyplot as plt
ax = plt.subplot(311)
b1 = ax.bar([0, 1, 2], [0.2, 0.3, 0.1], width=0.4,
label="Bar 1", align="center")
b2 = ax.bar([0.5, 1.5, 2.5], [0.3, 0.2, 0.2], color="red", width=0.4,
label="Bar 2", align="center")
ax.legend()
ax = plt.subplot(312)
err1 = ax.errorbar([0, 1, 2], [2, 3, 1], xerr=0.4, fmt="s",
label="test 1")
err2 = ax.errorbar([0, 1, 2], [3, 2, 4], yerr=0.3, fmt="o",
label="test 2")
err3 = ax.errorbar([0, 1, 2], [1, 1, 3], xerr=0.4, yerr=0.3, fmt="^",
label="test 3")
ax.legend()
ax = plt.subplot(313)
ll = ax.stem([0.3, 1.5, 2.7], [1, 3.6, 2.7], label="stem test")
ax.legend()
plt.show()
|
grani/grpc
|
refs/heads/UnityClient.1.2.0
|
src/python/grpcio/grpc/framework/foundation/callable_util.py
|
29
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Utilities for working with callables."""
import abc
import collections
import enum
import functools
import logging
import six
class Outcome(six.with_metaclass(abc.ABCMeta)):
"""A sum type describing the outcome of some call.
Attributes:
kind: One of Kind.RETURNED or Kind.RAISED respectively indicating that the
call returned a value or raised an exception.
return_value: The value returned by the call. Must be present if kind is
Kind.RETURNED.
exception: The exception raised by the call. Must be present if kind is
Kind.RAISED.
"""
@enum.unique
class Kind(enum.Enum):
"""Identifies the general kind of the outcome of some call."""
RETURNED = object()
RAISED = object()
class _EasyOutcome(
collections.namedtuple('_EasyOutcome',
['kind', 'return_value', 'exception']), Outcome):
"""A trivial implementation of Outcome."""
def _call_logging_exceptions(behavior, message, *args, **kwargs):
try:
return _EasyOutcome(Outcome.Kind.RETURNED,
behavior(*args, **kwargs), None)
except Exception as e: # pylint: disable=broad-except
logging.exception(message)
return _EasyOutcome(Outcome.Kind.RAISED, None, e)
def with_exceptions_logged(behavior, message):
"""Wraps a callable in a try-except that logs any exceptions it raises.
Args:
behavior: Any callable.
message: A string to log if the behavior raises an exception.
Returns:
A callable that when executed invokes the given behavior. The returned
callable takes the same arguments as the given behavior but returns a
future.Outcome describing whether the given behavior returned a value or
raised an exception.
"""
@functools.wraps(behavior)
def wrapped_behavior(*args, **kwargs):
return _call_logging_exceptions(behavior, message, *args, **kwargs)
return wrapped_behavior
def call_logging_exceptions(behavior, message, *args, **kwargs):
"""Calls a behavior in a try-except that logs any exceptions it raises.
Args:
behavior: Any callable.
message: A string to log if the behavior raises an exception.
*args: Positional arguments to pass to the given behavior.
**kwargs: Keyword arguments to pass to the given behavior.
Returns:
An Outcome describing whether the given behavior returned a value or raised
an exception.
"""
return _call_logging_exceptions(behavior, message, *args, **kwargs)
|
santod/android_GE_kernel_htc_m7vzw
|
refs/heads/master
|
tools/perf/util/setup.py
|
4998
|
#!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
|
ShassAro/ShassAro
|
refs/heads/master
|
Bl_project/blVirtualEnv/lib/python2.7/site-packages/pip/_vendor/colorama/__init__.py
|
197
|
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
from .initialise import init, deinit, reinit
from .ansi import Fore, Back, Style
from .ansitowin32 import AnsiToWin32
__version__ = '0.3.1'
|
you21979/phantomjs
|
refs/heads/2.0
|
src/breakpad/src/tools/gyp/test/sibling/gyptest-all.py
|
151
|
#!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('build/all.gyp', chdir='src')
test.build('build/all.gyp', test.ALL, chdir='src')
chdir = 'src/build'
# The top-level Makefile is in the directory where gyp was run.
# TODO(mmoss) Should the Makefile go in the directory of the passed in .gyp
# file? What about when passing in multiple .gyp files? Would sub-project
# Makefiles (see http://codereview.chromium.org/340008 comments) solve this?
if test.format == 'make':
chdir = 'src'
if test.format == 'xcode':
chdir = 'src/prog1'
test.run_built_executable('prog1',
chdir=chdir,
stdout="Hello from prog1.c\n")
if test.format == 'xcode':
chdir = 'src/prog2'
test.run_built_executable('prog2',
chdir=chdir,
stdout="Hello from prog2.c\n")
test.pass_test()
|
hurricup/intellij-community
|
refs/heads/master
|
python/testData/refactoring/extractsuperclass/moveExtends/source_module.after.py
|
320
|
from dest_module import NewParent
class MyClass(NewParent):
pass
|
akosyakov/intellij-community
|
refs/heads/master
|
python/testData/override/py3k_after.py
|
83
|
class A:
def m(self):
pass
class B(A):
def m(self):
<selection>super().m()</selection>
|
ubgarbage/gae-blog
|
refs/heads/master
|
external_auth/urls.py
|
1
|
import django.conf.urls.defaults
import google_login.urls
import mailru_login.urls
import yandex_login.urls
import inboxru_login.urls
urlpatterns = django.conf.urls.defaults.patterns( '',
( 'google/', django.conf.urls.defaults.include(google_login.urls) ),
( 'mailru/', django.conf.urls.defaults.include(mailru_login.urls) ),
( 'inboxru/', django.conf.urls.defaults.include(inboxru_login.urls) ),
( 'yandex/', django.conf.urls.defaults.include(yandex_login.urls) ),
)
|
marado/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/krasview.py
|
5
|
# encoding: utf-8
from __future__ import unicode_literals
import json
from .common import InfoExtractor
from ..utils import (
int_or_none,
js_to_json,
)
class KrasViewIE(InfoExtractor):
IE_DESC = 'Красвью'
_VALID_URL = r'https?://krasview\.ru/(?:video|embed)/(?P<id>\d+)'
_TEST = {
'url': 'http://krasview.ru/video/512228',
'md5': '3b91003cf85fc5db277870c8ebd98eae',
'info_dict': {
'id': '512228',
'ext': 'mp4',
'title': 'Снег, лёд, заносы',
'description': 'Снято в городе Нягань, в Ханты-Мансийском автономном округе.',
'duration': 27,
'thumbnail': 're:^https?://.*\.jpg',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
flashvars = json.loads(js_to_json(self._search_regex(
r'video_Init\(({.+?})', webpage, 'flashvars')))
video_url = flashvars['url']
title = self._og_search_title(webpage)
description = self._og_search_description(webpage, default=None)
thumbnail = flashvars.get('image') or self._og_search_thumbnail(webpage)
duration = int_or_none(flashvars.get('duration'))
width = int_or_none(self._og_search_property('video:width', webpage, 'video width'))
height = int_or_none(self._og_search_property('video:height', webpage, 'video height'))
return {
'id': video_id,
'url': video_url,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'width': width,
'height': height,
}
|
xaime/sneaks
|
refs/heads/master
|
plugins/__init__.py
|
2
|
__author__ = 'Xaime'
|
mollstam/UnrealPy
|
refs/heads/master
|
UnrealPyEmbed/Source/Python/Lib/python27/test/pydocfodder.py
|
195
|
"""Something just to look at via pydoc."""
import types
class A_classic:
"A classic class."
def A_method(self):
"Method defined in A."
def AB_method(self):
"Method defined in A and B."
def AC_method(self):
"Method defined in A and C."
def AD_method(self):
"Method defined in A and D."
def ABC_method(self):
"Method defined in A, B and C."
def ABD_method(self):
"Method defined in A, B and D."
def ACD_method(self):
"Method defined in A, C and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
class B_classic(A_classic):
"A classic class, derived from A_classic."
def AB_method(self):
"Method defined in A and B."
def ABC_method(self):
"Method defined in A, B and C."
def ABD_method(self):
"Method defined in A, B and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
def B_method(self):
"Method defined in B."
def BC_method(self):
"Method defined in B and C."
def BD_method(self):
"Method defined in B and D."
def BCD_method(self):
"Method defined in B, C and D."
class C_classic(A_classic):
"A classic class, derived from A_classic."
def AC_method(self):
"Method defined in A and C."
def ABC_method(self):
"Method defined in A, B and C."
def ACD_method(self):
"Method defined in A, C and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
def BC_method(self):
"Method defined in B and C."
def BCD_method(self):
"Method defined in B, C and D."
def C_method(self):
"Method defined in C."
def CD_method(self):
"Method defined in C and D."
class D_classic(B_classic, C_classic):
"A classic class, derived from B_classic and C_classic."
def AD_method(self):
"Method defined in A and D."
def ABD_method(self):
"Method defined in A, B and D."
def ACD_method(self):
"Method defined in A, C and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
def BD_method(self):
"Method defined in B and D."
def BCD_method(self):
"Method defined in B, C and D."
def CD_method(self):
"Method defined in C and D."
def D_method(self):
"Method defined in D."
class A_new(object):
"A new-style class."
def A_method(self):
"Method defined in A."
def AB_method(self):
"Method defined in A and B."
def AC_method(self):
"Method defined in A and C."
def AD_method(self):
"Method defined in A and D."
def ABC_method(self):
"Method defined in A, B and C."
def ABD_method(self):
"Method defined in A, B and D."
def ACD_method(self):
"Method defined in A, C and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
def A_classmethod(cls, x):
"A class method defined in A."
A_classmethod = classmethod(A_classmethod)
def A_staticmethod():
"A static method defined in A."
A_staticmethod = staticmethod(A_staticmethod)
def _getx(self):
"A property getter function."
def _setx(self, value):
"A property setter function."
def _delx(self):
"A property deleter function."
A_property = property(fdel=_delx, fget=_getx, fset=_setx,
doc="A sample property defined in A.")
A_int_alias = int
class B_new(A_new):
"A new-style class, derived from A_new."
def AB_method(self):
"Method defined in A and B."
def ABC_method(self):
"Method defined in A, B and C."
def ABD_method(self):
"Method defined in A, B and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
def B_method(self):
"Method defined in B."
def BC_method(self):
"Method defined in B and C."
def BD_method(self):
"Method defined in B and D."
def BCD_method(self):
"Method defined in B, C and D."
class C_new(A_new):
"A new-style class, derived from A_new."
def AC_method(self):
"Method defined in A and C."
def ABC_method(self):
"Method defined in A, B and C."
def ACD_method(self):
"Method defined in A, C and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
def BC_method(self):
"Method defined in B and C."
def BCD_method(self):
"Method defined in B, C and D."
def C_method(self):
"Method defined in C."
def CD_method(self):
"Method defined in C and D."
class D_new(B_new, C_new):
"""A new-style class, derived from B_new and C_new.
"""
def AD_method(self):
"Method defined in A and D."
def ABD_method(self):
"Method defined in A, B and D."
def ACD_method(self):
"Method defined in A, C and D."
def ABCD_method(self):
"Method defined in A, B, C and D."
def BD_method(self):
"Method defined in B and D."
def BCD_method(self):
"Method defined in B, C and D."
def CD_method(self):
"Method defined in C and D."
def D_method(self):
"Method defined in D."
class FunkyProperties(object):
"""From SF bug 472347, by Roeland Rengelink.
Property getters etc may not be vanilla functions or methods,
and this used to make GUI pydoc blow up.
"""
def __init__(self):
self.desc = {'x':0}
class get_desc:
def __init__(self, attr):
self.attr = attr
def __call__(self, inst):
print 'Get called', self, inst
return inst.desc[self.attr]
class set_desc:
def __init__(self, attr):
self.attr = attr
def __call__(self, inst, val):
print 'Set called', self, inst, val
inst.desc[self.attr] = val
class del_desc:
def __init__(self, attr):
self.attr = attr
def __call__(self, inst):
print 'Del called', self, inst
del inst.desc[self.attr]
x = property(get_desc('x'), set_desc('x'), del_desc('x'), 'prop x')
submodule = types.ModuleType(__name__ + '.submodule',
"""A submodule, which should appear in its parent's summary""")
|
amyvmiwei/kbengine
|
refs/heads/master
|
kbe/res/scripts/common/Lib/lib2to3/refactor.py
|
71
|
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Refactoring framework.
Used as a main program, this can refactor any number of files and/or
recursively descend down directories. Imported as a module, this
provides infrastructure to write your own refactoring tool.
"""
from __future__ import with_statement
__author__ = "Guido van Rossum <guido@python.org>"
# Python imports
import os
import sys
import logging
import operator
import collections
import io
from itertools import chain
# Local imports
from .pgen2 import driver, tokenize, token
from .fixer_util import find_root
from . import pytree, pygram
from . import btm_utils as bu
from . import btm_matcher as bm
def get_all_fix_names(fixer_pkg, remove_prefix=True):
"""Return a sorted list of all available fix names in the given package."""
pkg = __import__(fixer_pkg, [], [], ["*"])
fixer_dir = os.path.dirname(pkg.__file__)
fix_names = []
for name in sorted(os.listdir(fixer_dir)):
if name.startswith("fix_") and name.endswith(".py"):
if remove_prefix:
name = name[4:]
fix_names.append(name[:-3])
return fix_names
class _EveryNode(Exception):
pass
def _get_head_types(pat):
""" Accepts a pytree Pattern Node and returns a set
of the pattern types which will match first. """
if isinstance(pat, (pytree.NodePattern, pytree.LeafPattern)):
# NodePatters must either have no type and no content
# or a type and content -- so they don't get any farther
# Always return leafs
if pat.type is None:
raise _EveryNode
return set([pat.type])
if isinstance(pat, pytree.NegatedPattern):
if pat.content:
return _get_head_types(pat.content)
raise _EveryNode # Negated Patterns don't have a type
if isinstance(pat, pytree.WildcardPattern):
# Recurse on each node in content
r = set()
for p in pat.content:
for x in p:
r.update(_get_head_types(x))
return r
raise Exception("Oh no! I don't understand pattern %s" %(pat))
def _get_headnode_dict(fixer_list):
""" Accepts a list of fixers and returns a dictionary
of head node type --> fixer list. """
head_nodes = collections.defaultdict(list)
every = []
for fixer in fixer_list:
if fixer.pattern:
try:
heads = _get_head_types(fixer.pattern)
except _EveryNode:
every.append(fixer)
else:
for node_type in heads:
head_nodes[node_type].append(fixer)
else:
if fixer._accept_type is not None:
head_nodes[fixer._accept_type].append(fixer)
else:
every.append(fixer)
for node_type in chain(pygram.python_grammar.symbol2number.values(),
pygram.python_grammar.tokens):
head_nodes[node_type].extend(every)
return dict(head_nodes)
def get_fixers_from_package(pkg_name):
"""
Return the fully qualified names for fixers in the package pkg_name.
"""
return [pkg_name + "." + fix_name
for fix_name in get_all_fix_names(pkg_name, False)]
def _identity(obj):
return obj
if sys.version_info < (3, 0):
import codecs
_open_with_encoding = codecs.open
# codecs.open doesn't translate newlines sadly.
def _from_system_newlines(input):
return input.replace("\r\n", "\n")
def _to_system_newlines(input):
if os.linesep != "\n":
return input.replace("\n", os.linesep)
else:
return input
else:
_open_with_encoding = open
_from_system_newlines = _identity
_to_system_newlines = _identity
def _detect_future_features(source):
have_docstring = False
gen = tokenize.generate_tokens(io.StringIO(source).readline)
def advance():
tok = next(gen)
return tok[0], tok[1]
ignore = frozenset((token.NEWLINE, tokenize.NL, token.COMMENT))
features = set()
try:
while True:
tp, value = advance()
if tp in ignore:
continue
elif tp == token.STRING:
if have_docstring:
break
have_docstring = True
elif tp == token.NAME and value == "from":
tp, value = advance()
if tp != token.NAME or value != "__future__":
break
tp, value = advance()
if tp != token.NAME or value != "import":
break
tp, value = advance()
if tp == token.OP and value == "(":
tp, value = advance()
while tp == token.NAME:
features.add(value)
tp, value = advance()
if tp != token.OP or value != ",":
break
tp, value = advance()
else:
break
except StopIteration:
pass
return frozenset(features)
class FixerError(Exception):
"""A fixer could not be loaded."""
class RefactoringTool(object):
_default_options = {"print_function" : False,
"write_unchanged_files" : False}
CLASS_PREFIX = "Fix" # The prefix for fixer classes
FILE_PREFIX = "fix_" # The prefix for modules with a fixer within
def __init__(self, fixer_names, options=None, explicit=None):
"""Initializer.
Args:
fixer_names: a list of fixers to import
options: an dict with configuration.
explicit: a list of fixers to run even if they are explicit.
"""
self.fixers = fixer_names
self.explicit = explicit or []
self.options = self._default_options.copy()
if options is not None:
self.options.update(options)
if self.options["print_function"]:
self.grammar = pygram.python_grammar_no_print_statement
else:
self.grammar = pygram.python_grammar
# When this is True, the refactor*() methods will call write_file() for
# files processed even if they were not changed during refactoring. If
# and only if the refactor method's write parameter was True.
self.write_unchanged_files = self.options.get("write_unchanged_files")
self.errors = []
self.logger = logging.getLogger("RefactoringTool")
self.fixer_log = []
self.wrote = False
self.driver = driver.Driver(self.grammar,
convert=pytree.convert,
logger=self.logger)
self.pre_order, self.post_order = self.get_fixers()
self.files = [] # List of files that were or should be modified
self.BM = bm.BottomMatcher()
self.bmi_pre_order = [] # Bottom Matcher incompatible fixers
self.bmi_post_order = []
for fixer in chain(self.post_order, self.pre_order):
if fixer.BM_compatible:
self.BM.add_fixer(fixer)
# remove fixers that will be handled by the bottom-up
# matcher
elif fixer in self.pre_order:
self.bmi_pre_order.append(fixer)
elif fixer in self.post_order:
self.bmi_post_order.append(fixer)
self.bmi_pre_order_heads = _get_headnode_dict(self.bmi_pre_order)
self.bmi_post_order_heads = _get_headnode_dict(self.bmi_post_order)
def get_fixers(self):
"""Inspects the options to load the requested patterns and handlers.
Returns:
(pre_order, post_order), where pre_order is the list of fixers that
want a pre-order AST traversal, and post_order is the list that want
post-order traversal.
"""
pre_order_fixers = []
post_order_fixers = []
for fix_mod_path in self.fixers:
mod = __import__(fix_mod_path, {}, {}, ["*"])
fix_name = fix_mod_path.rsplit(".", 1)[-1]
if fix_name.startswith(self.FILE_PREFIX):
fix_name = fix_name[len(self.FILE_PREFIX):]
parts = fix_name.split("_")
class_name = self.CLASS_PREFIX + "".join([p.title() for p in parts])
try:
fix_class = getattr(mod, class_name)
except AttributeError:
raise FixerError("Can't find %s.%s" % (fix_name, class_name))
fixer = fix_class(self.options, self.fixer_log)
if fixer.explicit and self.explicit is not True and \
fix_mod_path not in self.explicit:
self.log_message("Skipping implicit fixer: %s", fix_name)
continue
self.log_debug("Adding transformation: %s", fix_name)
if fixer.order == "pre":
pre_order_fixers.append(fixer)
elif fixer.order == "post":
post_order_fixers.append(fixer)
else:
raise FixerError("Illegal fixer order: %r" % fixer.order)
key_func = operator.attrgetter("run_order")
pre_order_fixers.sort(key=key_func)
post_order_fixers.sort(key=key_func)
return (pre_order_fixers, post_order_fixers)
def log_error(self, msg, *args, **kwds):
"""Called when an error occurs."""
raise
def log_message(self, msg, *args):
"""Hook to log a message."""
if args:
msg = msg % args
self.logger.info(msg)
def log_debug(self, msg, *args):
if args:
msg = msg % args
self.logger.debug(msg)
def print_output(self, old_text, new_text, filename, equal):
"""Called with the old version, new version, and filename of a
refactored file."""
pass
def refactor(self, items, write=False, doctests_only=False):
"""Refactor a list of files and directories."""
for dir_or_file in items:
if os.path.isdir(dir_or_file):
self.refactor_dir(dir_or_file, write, doctests_only)
else:
self.refactor_file(dir_or_file, write, doctests_only)
def refactor_dir(self, dir_name, write=False, doctests_only=False):
"""Descends down a directory and refactor every Python file found.
Python files are assumed to have a .py extension.
Files and subdirectories starting with '.' are skipped.
"""
py_ext = os.extsep + "py"
for dirpath, dirnames, filenames in os.walk(dir_name):
self.log_debug("Descending into %s", dirpath)
dirnames.sort()
filenames.sort()
for name in filenames:
if (not name.startswith(".") and
os.path.splitext(name)[1] == py_ext):
fullname = os.path.join(dirpath, name)
self.refactor_file(fullname, write, doctests_only)
# Modify dirnames in-place to remove subdirs with leading dots
dirnames[:] = [dn for dn in dirnames if not dn.startswith(".")]
def _read_python_source(self, filename):
"""
Do our best to decode a Python source file correctly.
"""
try:
f = open(filename, "rb")
except OSError as err:
self.log_error("Can't open %s: %s", filename, err)
return None, None
try:
encoding = tokenize.detect_encoding(f.readline)[0]
finally:
f.close()
with _open_with_encoding(filename, "r", encoding=encoding) as f:
return _from_system_newlines(f.read()), encoding
def refactor_file(self, filename, write=False, doctests_only=False):
"""Refactors a file."""
input, encoding = self._read_python_source(filename)
if input is None:
# Reading the file failed.
return
input += "\n" # Silence certain parse errors
if doctests_only:
self.log_debug("Refactoring doctests in %s", filename)
output = self.refactor_docstring(input, filename)
if self.write_unchanged_files or output != input:
self.processed_file(output, filename, input, write, encoding)
else:
self.log_debug("No doctest changes in %s", filename)
else:
tree = self.refactor_string(input, filename)
if self.write_unchanged_files or (tree and tree.was_changed):
# The [:-1] is to take off the \n we added earlier
self.processed_file(str(tree)[:-1], filename,
write=write, encoding=encoding)
else:
self.log_debug("No changes in %s", filename)
def refactor_string(self, data, name):
"""Refactor a given input string.
Args:
data: a string holding the code to be refactored.
name: a human-readable name for use in error/log messages.
Returns:
An AST corresponding to the refactored input stream; None if
there were errors during the parse.
"""
features = _detect_future_features(data)
if "print_function" in features:
self.driver.grammar = pygram.python_grammar_no_print_statement
try:
tree = self.driver.parse_string(data)
except Exception as err:
self.log_error("Can't parse %s: %s: %s",
name, err.__class__.__name__, err)
return
finally:
self.driver.grammar = self.grammar
tree.future_features = features
self.log_debug("Refactoring %s", name)
self.refactor_tree(tree, name)
return tree
def refactor_stdin(self, doctests_only=False):
input = sys.stdin.read()
if doctests_only:
self.log_debug("Refactoring doctests in stdin")
output = self.refactor_docstring(input, "<stdin>")
if self.write_unchanged_files or output != input:
self.processed_file(output, "<stdin>", input)
else:
self.log_debug("No doctest changes in stdin")
else:
tree = self.refactor_string(input, "<stdin>")
if self.write_unchanged_files or (tree and tree.was_changed):
self.processed_file(str(tree), "<stdin>", input)
else:
self.log_debug("No changes in stdin")
def refactor_tree(self, tree, name):
"""Refactors a parse tree (modifying the tree in place).
For compatible patterns the bottom matcher module is
used. Otherwise the tree is traversed node-to-node for
matches.
Args:
tree: a pytree.Node instance representing the root of the tree
to be refactored.
name: a human-readable name for this tree.
Returns:
True if the tree was modified, False otherwise.
"""
for fixer in chain(self.pre_order, self.post_order):
fixer.start_tree(tree, name)
#use traditional matching for the incompatible fixers
self.traverse_by(self.bmi_pre_order_heads, tree.pre_order())
self.traverse_by(self.bmi_post_order_heads, tree.post_order())
# obtain a set of candidate nodes
match_set = self.BM.run(tree.leaves())
while any(match_set.values()):
for fixer in self.BM.fixers:
if fixer in match_set and match_set[fixer]:
#sort by depth; apply fixers from bottom(of the AST) to top
match_set[fixer].sort(key=pytree.Base.depth, reverse=True)
if fixer.keep_line_order:
#some fixers(eg fix_imports) must be applied
#with the original file's line order
match_set[fixer].sort(key=pytree.Base.get_lineno)
for node in list(match_set[fixer]):
if node in match_set[fixer]:
match_set[fixer].remove(node)
try:
find_root(node)
except ValueError:
# this node has been cut off from a
# previous transformation ; skip
continue
if node.fixers_applied and fixer in node.fixers_applied:
# do not apply the same fixer again
continue
results = fixer.match(node)
if results:
new = fixer.transform(node, results)
if new is not None:
node.replace(new)
#new.fixers_applied.append(fixer)
for node in new.post_order():
# do not apply the fixer again to
# this or any subnode
if not node.fixers_applied:
node.fixers_applied = []
node.fixers_applied.append(fixer)
# update the original match set for
# the added code
new_matches = self.BM.run(new.leaves())
for fxr in new_matches:
if not fxr in match_set:
match_set[fxr]=[]
match_set[fxr].extend(new_matches[fxr])
for fixer in chain(self.pre_order, self.post_order):
fixer.finish_tree(tree, name)
return tree.was_changed
def traverse_by(self, fixers, traversal):
"""Traverse an AST, applying a set of fixers to each node.
This is a helper method for refactor_tree().
Args:
fixers: a list of fixer instances.
traversal: a generator that yields AST nodes.
Returns:
None
"""
if not fixers:
return
for node in traversal:
for fixer in fixers[node.type]:
results = fixer.match(node)
if results:
new = fixer.transform(node, results)
if new is not None:
node.replace(new)
node = new
def processed_file(self, new_text, filename, old_text=None, write=False,
encoding=None):
"""
Called when a file has been refactored and there may be changes.
"""
self.files.append(filename)
if old_text is None:
old_text = self._read_python_source(filename)[0]
if old_text is None:
return
equal = old_text == new_text
self.print_output(old_text, new_text, filename, equal)
if equal:
self.log_debug("No changes to %s", filename)
if not self.write_unchanged_files:
return
if write:
self.write_file(new_text, filename, old_text, encoding)
else:
self.log_debug("Not writing changes to %s", filename)
def write_file(self, new_text, filename, old_text, encoding=None):
"""Writes a string to a file.
It first shows a unified diff between the old text and the new text, and
then rewrites the file; the latter is only done if the write option is
set.
"""
try:
f = _open_with_encoding(filename, "w", encoding=encoding)
except OSError as err:
self.log_error("Can't create %s: %s", filename, err)
return
try:
f.write(_to_system_newlines(new_text))
except OSError as err:
self.log_error("Can't write %s: %s", filename, err)
finally:
f.close()
self.log_debug("Wrote changes to %s", filename)
self.wrote = True
PS1 = ">>> "
PS2 = "... "
def refactor_docstring(self, input, filename):
"""Refactors a docstring, looking for doctests.
This returns a modified version of the input string. It looks
for doctests, which start with a ">>>" prompt, and may be
continued with "..." prompts, as long as the "..." is indented
the same as the ">>>".
(Unfortunately we can't use the doctest module's parser,
since, like most parsers, it is not geared towards preserving
the original source.)
"""
result = []
block = None
block_lineno = None
indent = None
lineno = 0
for line in input.splitlines(keepends=True):
lineno += 1
if line.lstrip().startswith(self.PS1):
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
block_lineno = lineno
block = [line]
i = line.find(self.PS1)
indent = line[:i]
elif (indent is not None and
(line.startswith(indent + self.PS2) or
line == indent + self.PS2.rstrip() + "\n")):
block.append(line)
else:
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
block = None
indent = None
result.append(line)
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno,
indent, filename))
return "".join(result)
def refactor_doctest(self, block, lineno, indent, filename):
"""Refactors one doctest.
A doctest is given as a block of lines, the first of which starts
with ">>>" (possibly indented), while the remaining lines start
with "..." (identically indented).
"""
try:
tree = self.parse_block(block, lineno, indent)
except Exception as err:
if self.logger.isEnabledFor(logging.DEBUG):
for line in block:
self.log_debug("Source: %s", line.rstrip("\n"))
self.log_error("Can't parse docstring in %s line %s: %s: %s",
filename, lineno, err.__class__.__name__, err)
return block
if self.refactor_tree(tree, filename):
new = str(tree).splitlines(keepends=True)
# Undo the adjustment of the line numbers in wrap_toks() below.
clipped, new = new[:lineno-1], new[lineno-1:]
assert clipped == ["\n"] * (lineno-1), clipped
if not new[-1].endswith("\n"):
new[-1] += "\n"
block = [indent + self.PS1 + new.pop(0)]
if new:
block += [indent + self.PS2 + line for line in new]
return block
def summarize(self):
if self.wrote:
were = "were"
else:
were = "need to be"
if not self.files:
self.log_message("No files %s modified.", were)
else:
self.log_message("Files that %s modified:", were)
for file in self.files:
self.log_message(file)
if self.fixer_log:
self.log_message("Warnings/messages while refactoring:")
for message in self.fixer_log:
self.log_message(message)
if self.errors:
if len(self.errors) == 1:
self.log_message("There was 1 error:")
else:
self.log_message("There were %d errors:", len(self.errors))
for msg, args, kwds in self.errors:
self.log_message(msg, *args, **kwds)
def parse_block(self, block, lineno, indent):
"""Parses a block into a tree.
This is necessary to get correct line number / offset information
in the parser diagnostics and embedded into the parse tree.
"""
tree = self.driver.parse_tokens(self.wrap_toks(block, lineno, indent))
tree.future_features = frozenset()
return tree
def wrap_toks(self, block, lineno, indent):
"""Wraps a tokenize stream to systematically modify start/end."""
tokens = tokenize.generate_tokens(self.gen_lines(block, indent).__next__)
for type, value, (line0, col0), (line1, col1), line_text in tokens:
line0 += lineno - 1
line1 += lineno - 1
# Don't bother updating the columns; this is too complicated
# since line_text would also have to be updated and it would
# still break for tokens spanning lines. Let the user guess
# that the column numbers for doctests are relative to the
# end of the prompt string (PS1 or PS2).
yield type, value, (line0, col0), (line1, col1), line_text
def gen_lines(self, block, indent):
"""Generates lines as expected by tokenize from a list of lines.
This strips the first len(indent + self.PS1) characters off each line.
"""
prefix1 = indent + self.PS1
prefix2 = indent + self.PS2
prefix = prefix1
for line in block:
if line.startswith(prefix):
yield line[len(prefix):]
elif line == prefix.rstrip() + "\n":
yield "\n"
else:
raise AssertionError("line=%r, prefix=%r" % (line, prefix))
prefix = prefix2
while True:
yield ""
class MultiprocessingUnsupported(Exception):
pass
class MultiprocessRefactoringTool(RefactoringTool):
def __init__(self, *args, **kwargs):
super(MultiprocessRefactoringTool, self).__init__(*args, **kwargs)
self.queue = None
self.output_lock = None
def refactor(self, items, write=False, doctests_only=False,
num_processes=1):
if num_processes == 1:
return super(MultiprocessRefactoringTool, self).refactor(
items, write, doctests_only)
try:
import multiprocessing
except ImportError:
raise MultiprocessingUnsupported
if self.queue is not None:
raise RuntimeError("already doing multiple processes")
self.queue = multiprocessing.JoinableQueue()
self.output_lock = multiprocessing.Lock()
processes = [multiprocessing.Process(target=self._child)
for i in range(num_processes)]
try:
for p in processes:
p.start()
super(MultiprocessRefactoringTool, self).refactor(items, write,
doctests_only)
finally:
self.queue.join()
for i in range(num_processes):
self.queue.put(None)
for p in processes:
if p.is_alive():
p.join()
self.queue = None
def _child(self):
task = self.queue.get()
while task is not None:
args, kwargs = task
try:
super(MultiprocessRefactoringTool, self).refactor_file(
*args, **kwargs)
finally:
self.queue.task_done()
task = self.queue.get()
def refactor_file(self, *args, **kwargs):
if self.queue is not None:
self.queue.put((args, kwargs))
else:
return super(MultiprocessRefactoringTool, self).refactor_file(
*args, **kwargs)
|
huntxu/fuel-web
|
refs/heads/master
|
nailgun/nailgun/objects/plugin_link.py
|
2
|
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nailgun.db.sqlalchemy.models import plugin_link as plugin_link_db_model
from nailgun.objects import base
from nailgun.objects.serializers import plugin_link
class PluginLink(base.NailgunObject):
model = plugin_link_db_model.PluginLink
serializer = plugin_link.PluginLinkSerializer
class PluginLinkCollection(base.NailgunCollection):
single = PluginLink
@classmethod
def get_by_plugin_id(cls, plugin_id):
if plugin_id is not None:
return cls.filter_by(None, plugin_id=plugin_id)
else:
return cls.all()
@classmethod
def create_with_plugin_id(cls, data, plugin_id):
data['plugin_id'] = plugin_id
return cls.create(data)
|
drzaeus77/pyroute2
|
refs/heads/master
|
examples/nl80211_interfaces.py
|
8
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
from pyroute2.iwutil import IW
iw = IW()
for q in iw.get_interfaces_dump():
phyname = 'phy%i' % int(q.get_attr('NL80211_ATTR_WIPHY'))
print('%i\t%s\t%s\t%s' % (q.get_attr('NL80211_ATTR_IFINDEX'), phyname,
q.get_attr('NL80211_ATTR_IFNAME'),
q.get_attr('NL80211_ATTR_MAC')))
iw.close()
|
2014c2g2/2014c2
|
refs/heads/master
|
exts/wsgi/static/Brython2.1.0-20140419-113919/Lib/xml/etree/ElementInclude.py
|
784
|
#
# ElementTree
# $Id: ElementInclude.py 3375 2008-02-13 08:05:08Z fredrik $
#
# limited xinclude support for element trees
#
# history:
# 2003-08-15 fl created
# 2003-11-14 fl fixed default loader
#
# Copyright (c) 2003-2004 by Fredrik Lundh. All rights reserved.
#
# fredrik@pythonware.com
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2008 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/psf/license for licensing details.
##
# Limited XInclude support for the ElementTree package.
##
import copy
from . import ElementTree
XINCLUDE = "{http://www.w3.org/2001/XInclude}"
XINCLUDE_INCLUDE = XINCLUDE + "include"
XINCLUDE_FALLBACK = XINCLUDE + "fallback"
##
# Fatal include error.
class FatalIncludeError(SyntaxError):
pass
##
# Default loader. This loader reads an included resource from disk.
#
# @param href Resource reference.
# @param parse Parse mode. Either "xml" or "text".
# @param encoding Optional text encoding (UTF-8 by default for "text").
# @return The expanded resource. If the parse mode is "xml", this
# is an ElementTree instance. If the parse mode is "text", this
# is a Unicode string. If the loader fails, it can return None
# or raise an IOError exception.
# @throws IOError If the loader fails to load the resource.
def default_loader(href, parse, encoding=None):
if parse == "xml":
file = open(href, 'rb')
data = ElementTree.parse(file).getroot()
else:
if not encoding:
encoding = 'UTF-8'
file = open(href, 'r', encoding=encoding)
data = file.read()
file.close()
return data
##
# Expand XInclude directives.
#
# @param elem Root element.
# @param loader Optional resource loader. If omitted, it defaults
# to {@link default_loader}. If given, it should be a callable
# that implements the same interface as <b>default_loader</b>.
# @throws FatalIncludeError If the function fails to include a given
# resource, or if the tree contains malformed XInclude elements.
# @throws IOError If the function fails to load a given resource.
def include(elem, loader=None):
if loader is None:
loader = default_loader
# look for xinclude elements
i = 0
while i < len(elem):
e = elem[i]
if e.tag == XINCLUDE_INCLUDE:
# process xinclude directive
href = e.get("href")
parse = e.get("parse", "xml")
if parse == "xml":
node = loader(href, parse)
if node is None:
raise FatalIncludeError(
"cannot load %r as %r" % (href, parse)
)
node = copy.copy(node)
if e.tail:
node.tail = (node.tail or "") + e.tail
elem[i] = node
elif parse == "text":
text = loader(href, parse, e.get("encoding"))
if text is None:
raise FatalIncludeError(
"cannot load %r as %r" % (href, parse)
)
if i:
node = elem[i-1]
node.tail = (node.tail or "") + text + (e.tail or "")
else:
elem.text = (elem.text or "") + text + (e.tail or "")
del elem[i]
continue
else:
raise FatalIncludeError(
"unknown parse type in xi:include tag (%r)" % parse
)
elif e.tag == XINCLUDE_FALLBACK:
raise FatalIncludeError(
"xi:fallback tag must be child of xi:include (%r)" % e.tag
)
else:
include(e, loader)
i = i + 1
|
manthey/girder
|
refs/heads/master
|
girder/models/group.py
|
3
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import datetime
from .model_base import AccessControlledModel
from girder import events
from girder.constants import AccessType, CoreEventHandler
from girder.exceptions import ValidationException
class Group(AccessControlledModel):
"""
Groups are simply groups of users. The primary use of grouping users is
to simplify access control for resources in the system, but they can
be used for other purposes that require groupings of users as well.
Group membership is stored in the database on the user document only;
there is no "users" field in this model. This is to optimize for the most
common use case for querying membership, which involves checking access
control policies, which is always done relative to a specific user. The
task of querying all members within a group is much less common and
typically only performed on a single group at a time, so doing a find on the
indexed group list in the user collection is sufficiently fast.
Users with READ access on the group can see the group and its members.
Users with WRITE access on the group can add and remove members and
change the name or description.
Users with ADMIN access can promote group members to grant them WRITE or
ADMIN access, and can also delete the entire group.
This model uses a custom implementation of the access control methods,
because it uses only a subset of its capabilities and provides a more
optimized implementation for that subset. Specifically: read access is
implied by membership in the group or having an invitation to join the
group, so we don't store read access in the access document as normal.
Another constraint is that write and admin access on the group can only be
granted to members of the group. Also, group permissions are not allowed
on groups for the sake of simplicity.
"""
def initialize(self):
self.name = 'group'
self.ensureIndices(['lowerName'])
self.ensureTextIndex({
'name': 10,
'description': 1
})
self.exposeFields(level=AccessType.READ, fields=(
'_id', 'name', 'public', 'description', 'created', 'updated',
'addAllowed', '_addToGroupPolicy'))
events.bind('model.group.save.created',
CoreEventHandler.GROUP_CREATOR_ACCESS,
self._grantCreatorAccess)
def validate(self, doc):
doc['name'] = doc['name'].strip()
doc['lowerName'] = doc['name'].lower()
doc['description'] = doc['description'].strip()
if not doc['name']:
raise ValidationException('Group name must not be empty.', 'name')
q = {
'lowerName': doc['lowerName'],
}
if '_id' in doc:
q['_id'] = {'$ne': doc['_id']}
duplicate = self.findOne(q, fields=['_id'])
if duplicate is not None:
raise ValidationException('A group with that name already exists.',
field='name')
return doc
def listMembers(self, group, offset=0, limit=0, sort=None):
"""
List members of the group.
"""
from .user import User
return User().find({
'groups': group['_id']
}, limit=limit, offset=offset, sort=sort)
def remove(self, group, **kwargs):
"""
Delete a group, and all references to it in the database.
:param group: The group document to delete.
:type group: dict
"""
# Remove references to this group from user group membership lists
from .user import User
User().update({
'groups': group['_id']
}, {
'$pull': {'groups': group['_id']}
})
# Finally, delete the document itself
AccessControlledModel.remove(self, group)
def getMembers(self, group, offset=0, limit=0, sort=None):
"""
Return the list of all users who belong to this group.
:param group: The group to list members on.
:param offset: Offset into the result set of users.
:param limit: Result set size limit.
:param sort: Sort parameter for the find query.
:returns: List of user documents.
"""
from .user import User
return User().find(
{'groups': group['_id']},
offset=offset, limit=limit, sort=sort)
def addUser(self, group, user, level=AccessType.READ):
"""
Add the user to the group. Records membership in the group in the
user document, and also grants the specified access level on the
group itself to the user. Any group member has at least read access on
the group. If the user already belongs to the group, this method can
be used to change their access level within it.
"""
from .user import User
if 'groups' not in user:
user['groups'] = []
if not group['_id'] in user['groups']:
user['groups'].append(group['_id'])
# saved again in setUserAccess...
user = User().save(user, validate=False)
# Delete outstanding request if one exists
self._deleteRequest(group, user)
self.setUserAccess(group, user, level, save=True)
return group
def _deleteRequest(self, group, user):
"""
Helper method to delete a request for the given user.
"""
if user['_id'] in group.get('requests', []):
group['requests'].remove(user['_id'])
self.save(group, validate=False)
def joinGroup(self, group, user):
"""
This method either accepts an invitation to join a group, or if the
given user has not been invited to the group, this will create an
invitation request that moderators and admins may grant or deny later.
"""
from .user import User
if 'groupInvites' not in user:
user['groupInvites'] = []
for invite in user['groupInvites']:
if invite['groupId'] == group['_id']:
self.addUser(group, user, level=invite['level'])
user['groupInvites'].remove(invite)
User().save(user, validate=False)
break
else:
if 'requests' not in group:
group['requests'] = []
if not user['_id'] in group['requests']:
group['requests'].append(user['_id'])
group = self.save(group, validate=False)
return group
def inviteUser(self, group, user, level=AccessType.READ):
"""
Invite a user to join the group. Inviting them automatically
grants the user read access to the group so that they can see it.
Once they accept the invitation, they will be given the specified level
of access.
If the user has requested an invitation to this group, calling this
will accept their request and add them to the group at the access
level specified.
"""
from .user import User
if group['_id'] in user.get('groups', []):
raise ValidationException('User is already in this group.')
# If there is an outstanding request to join from this user, we
# just add them to the group instead of invite them.
if user['_id'] in group.get('requests', []):
return self.addUser(group, user, level)
if 'groupInvites' not in user:
user['groupInvites'] = []
for invite in user['groupInvites']:
if invite['groupId'] == group['_id']:
invite['level'] = level
break
else:
user['groupInvites'].append({
'groupId': group['_id'],
'level': level
})
return User().save(user, validate=False)
def getInvites(self, group, limit=0, offset=0, sort=None):
"""
Return a page of outstanding invitations to a group. This is simply
a list of users invited to the group currently.
:param group: The group to find invitations for.
:param limit: Result set size limit.
:param offset: Offset into the results.
:param sort: The sort field.
"""
from .user import User
return User().find(
{'groupInvites.groupId': group['_id']},
limit=limit, offset=offset, sort=sort)
def removeUser(self, group, user):
"""
Remove the user from the group. If the user is not in the group but
has an outstanding invitation to the group, the invitation will be
revoked. If the user has requested an invitation, calling this will
deny that request, thereby deleting it.
"""
from .user import User
# Remove group membership for this user.
if 'groups' in user and group['_id'] in user['groups']:
user['groups'].remove(group['_id'])
# Remove outstanding requests from this user
self._deleteRequest(group, user)
# Remove any outstanding invitations for this group
user['groupInvites'] = list(filter(
lambda inv: not inv['groupId'] == group['_id'],
user.get('groupInvites', [])))
user = User().save(user, validate=False)
# Remove all group access for this user on this group.
self.setUserAccess(group, user, level=None, save=True)
return group
def createGroup(self, name, creator, description='', public=True):
"""
Create a new group. The creator will be given admin access to it.
:param name: The name of the folder.
:type name: str
:param description: Description for the folder.
:type description: str
:param public: Whether the group is publicly visible.
:type public: bool
:param creator: User document representing the creator of the group.
:type creator: dict
:returns: The group document that was created.
"""
assert isinstance(public, bool)
now = datetime.datetime.utcnow()
group = {
'name': name,
'description': description,
'creatorId': creator['_id'],
'created': now,
'updated': now,
'requests': []
}
self.setPublic(group, public, save=False)
return self.save(group)
def _grantCreatorAccess(self, event):
"""
This callback makes the group creator an administrator member of the
group.
This generally should not be called or overridden directly, but it may
be unregistered from the `model.group.save.created` event.
"""
from .user import User
group = event.info
creator = User().load(group['creatorId'], force=True, exc=True)
self.addUser(group, creator, level=AccessType.ADMIN)
def updateGroup(self, group):
"""
Updates a group.
:param group: The group document to update
:type group: dict
:returns: The group document that was edited.
"""
group['updated'] = datetime.datetime.utcnow()
# Validate and save the group
return self.save(group)
def getFullRequestList(self, group):
"""
Return the set of all outstanding requests, filled in with the login
and full names of the corresponding users.
:param group: The group to get requests for.
:type group: dict
"""
from .user import User
userModel = User()
for userId in group.get('requests', []):
user = userModel.load(userId, force=True, fields=['firstName', 'lastName', 'login'])
yield {
'id': userId,
'login': user['login'],
'name': '%s %s' % (user['firstName'], user['lastName'])
}
def hasAccess(self, doc, user=None, level=AccessType.READ):
"""
This overrides the default AccessControlledModel behavior for checking
access to perform an optimized subset of the access control behavior.
:param doc: The group to check permission on.
:type doc: dict
:param user: The user to check against.
:type user: dict
:param level: The access level.
:type level: AccessType
:returns: Whether the access is granted.
"""
if user is None:
# Short-circuit the case of anonymous users
return level == AccessType.READ and doc.get('public', False) is True
elif user['admin']:
# Short-circuit the case of admins
return True
elif level == AccessType.READ:
# For read access, just check user document for membership or public
return doc.get('public', False) is True or\
doc['_id'] in user.get('groups', []) or\
doc['_id'] in [i['groupId'] for i in
user.get('groupInvites', [])]
else:
# Check the actual permissions document for >=WRITE access
return self._hasUserAccess(doc.get('access', {}).get('users', []),
user['_id'], level)
def permissionClauses(self, user=None, level=None, prefix=''):
permission = super(Group, self).permissionClauses(user, level, prefix)
if user and level == AccessType.READ:
permission['$or'].extend([
{prefix + '_id': {'$in': user.get('groups', [])}},
{prefix + '_id': {'$in': [i['groupId'] for i in
user.get('groupInvites', [])]}},
])
return permission
def getAccessLevel(self, doc, user):
"""
Return the maximum access level for a given user on the group.
:param doc: The group to check access on.
:param user: The user to get the access level for.
:returns: The max AccessType available for the user on the object.
"""
if user is None:
if doc.get('public', False):
return AccessType.READ
else:
return AccessType.NONE
elif user['admin']:
return AccessType.ADMIN
else:
access = doc.get('access', {})
level = AccessType.NONE
if doc['_id'] in user.get('groups', []):
level = AccessType.READ
elif doc['_id'] in [i['groupId'] for i in
user.get('groupInvites', [])]:
return AccessType.READ
for userAccess in access.get('users', []):
if userAccess['id'] == user['_id']:
level = max(level, userAccess['level'])
if level == AccessType.ADMIN:
return level
return level
def setGroupAccess(self, doc, group, level, save=False):
raise NotImplementedError('Not implemented.')
def setUserAccess(self, doc, user, level, save=False):
"""
This override is used because we only need to augment the access
field in the case of WRITE access and above since READ access is
implied by membership or invitation.
"""
# save parameter not used?
if level is not None and level > AccessType.READ:
doc = AccessControlledModel.setUserAccess(
self, doc, user, level, save=True)
else:
doc = AccessControlledModel.setUserAccess(
self, doc, user, level=None, save=True)
return doc
|
josenavas/glowing-dangerzone
|
refs/heads/master
|
gd/config.py
|
1
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The biocore Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from os import environ
from os.path import dirname, abspath, join
from future import standard_library
with standard_library.hooks():
from configparser import ConfigParser
class GDConfig(object):
"""Holds the glowing-dangerzone configuration
Attributes
----------
user : str
The postgres user to connect to the postgres server
password : str
The password for the previous user
database : str
The database to connect to
host : str
The host where the postgres server lives
port : str
The port to use to connect to the postgres server
admin_user : str
The administrator user to connect to the postgres server
admin_password : str
The password for the administrator user
"""
def __init__(self):
# If GD_CONFIG_FP is not set, default to the example in the repo
try:
conf_fp = environ['GD_CONFIG_FP']
except KeyError:
conf_fp = join(dirname(abspath(__file__)),
'support_files', 'config.txt')
# parse the config bits
config = ConfigParser()
with open(conf_fp) as f:
config.readfp(f)
self.user = config.get('postgres', 'USER')
self.password = config.get('postgres', 'PASSWORD') or None
self.database = config.get('postgres', 'DATABASE')
self.host = config.get('postgres', 'HOST')
self.port = config.getint('postgres', 'PORT')
self.admin_user = config.get('postgres', 'ADMIN_USER') or None
self.admin_password = config.get('postgres', 'ADMIN_PASSWORD') or None
gd_config = GDConfig()
|
sani-coop/tinjaca
|
refs/heads/master
|
addons/propuestas/models/__init__.py
|
1
|
# -*- coding: utf-8 -*-
import solicitantes
import unidades_productivas
import propuestas
import garantias
import avalistas
import conyuges
import inversiones
import referencias_solicitante
import familiares
import referencias_avalistas
import cuentas_bancarias_avalista
import talleres
|
StrellaGroup/erpnext
|
refs/heads/develop
|
erpnext/education/doctype/student_admission/student_admission.py
|
22
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import nowdate
from frappe.website.website_generator import WebsiteGenerator
class StudentAdmission(WebsiteGenerator):
def autoname(self):
if not self.title:
self.title = self.get_title()
self.name = self.title
def validate(self):
if not self.route: #pylint: disable=E0203
self.route = "admissions/" + "-".join(self.title.split(" "))
def get_context(self, context):
context.no_cache = 1
context.show_sidebar = True
context.title = self.title
context.parents = [{'name': 'admissions', 'title': _('All Student Admissions'), 'route': 'admissions' }]
def get_title(self):
return _("Admissions for {0}").format(self.academic_year)
def get_list_context(context=None):
context.update({
"show_sidebar": True,
"title": _("Student Admissions"),
"get_list": get_admission_list,
"row_template": "education/doctype/student_admission/templates/student_admission_row.html",
})
def get_admission_list(doctype, txt, filters, limit_start, limit_page_length=20, order_by="modified"):
return frappe.db.sql('''select name, title, academic_year, modified, admission_start_date, route,
admission_end_date from `tabStudent Admission` where published=1 and admission_end_date >= %s
order by admission_end_date asc limit {0}, {1}
'''.format(limit_start, limit_page_length), [nowdate()], as_dict=1)
|
alangwansui/mtl_ordercenter
|
refs/heads/master
|
openerp/addons/analytic_user_function/__init__.py
|
441
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import analytic_user_function
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
jcoady9/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/test/crashers/nasty_eq_vs_dict.py
|
63
|
# from http://mail.python.org/pipermail/python-dev/2001-June/015239.html
# if you keep changing a dictionary while looking up a key, you can
# provoke an infinite recursion in C
# At the time neither Tim nor Michael could be bothered to think of a
# way to fix it.
class Yuck:
def __init__(self):
self.i = 0
def make_dangerous(self):
self.i = 1
def __hash__(self):
# direct to slot 4 in table of size 8; slot 12 when size 16
return 4 + 8
def __eq__(self, other):
if self.i == 0:
# leave dict alone
pass
elif self.i == 1:
# fiddle to 16 slots
self.__fill_dict(6)
self.i = 2
else:
# fiddle to 8 slots
self.__fill_dict(4)
self.i = 1
return 1
def __fill_dict(self, n):
self.i = 0
dict.clear()
for i in range(n):
dict[i] = i
dict[self] = "OK!"
y = Yuck()
dict = {y: "OK!"}
z = Yuck()
y.make_dangerous()
print(dict[z])
|
akretion/odoo
|
refs/heads/12-patch-paging-100-in-o2m
|
addons/account_facturx/__init__.py
|
36
|
# -*- encoding: utf-8 -*-
from . import models
|
general-language-syntax/GLS
|
refs/heads/master
|
test/integration/ListSliceIndex/zero to number.py
|
4
|
#
abc[0:7]
#
|
tensorflow/models
|
refs/heads/master
|
official/vision/image_classification/resnet/resnet_ctl_imagenet_main.py
|
1
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs a ResNet model on the ImageNet dataset using custom training loops."""
import math
import os
# Import libraries
from absl import app
from absl import flags
from absl import logging
import orbit
import tensorflow as tf
from official.common import distribute_utils
from official.modeling import performance
from official.utils.flags import core as flags_core
from official.utils.misc import keras_utils
from official.utils.misc import model_helpers
from official.vision.image_classification.resnet import common
from official.vision.image_classification.resnet import imagenet_preprocessing
from official.vision.image_classification.resnet import resnet_runnable
flags.DEFINE_boolean(name='use_tf_function', default=True,
help='Wrap the train and test step inside a '
'tf.function.')
flags.DEFINE_boolean(name='single_l2_loss_op', default=False,
help='Calculate L2_loss on concatenated weights, '
'instead of using Keras per-layer L2 loss.')
def build_stats(runnable, time_callback):
"""Normalizes and returns dictionary of stats.
Args:
runnable: The module containing all the training and evaluation metrics.
time_callback: Time tracking callback instance.
Returns:
Dictionary of normalized results.
"""
stats = {}
if not runnable.flags_obj.skip_eval:
stats['eval_loss'] = runnable.test_loss.result().numpy()
stats['eval_acc'] = runnable.test_accuracy.result().numpy()
stats['train_loss'] = runnable.train_loss.result().numpy()
stats['train_acc'] = runnable.train_accuracy.result().numpy()
if time_callback:
timestamp_log = time_callback.timestamp_log
stats['step_timestamp_log'] = timestamp_log
stats['train_finish_time'] = time_callback.train_finish_time
if time_callback.epoch_runtime_log:
stats['avg_exp_per_second'] = time_callback.average_examples_per_second
return stats
def get_num_train_iterations(flags_obj):
"""Returns the number of training steps, train and test epochs."""
train_steps = (
imagenet_preprocessing.NUM_IMAGES['train'] // flags_obj.batch_size)
train_epochs = flags_obj.train_epochs
if flags_obj.train_steps:
train_steps = min(flags_obj.train_steps, train_steps)
train_epochs = 1
eval_steps = math.ceil(1.0 * imagenet_preprocessing.NUM_IMAGES['validation'] /
flags_obj.batch_size)
return train_steps, train_epochs, eval_steps
def run(flags_obj):
"""Run ResNet ImageNet training and eval loop using custom training loops.
Args:
flags_obj: An object containing parsed flag values.
Raises:
ValueError: If fp16 is passed as it is not currently supported.
Returns:
Dictionary of training and eval stats.
"""
keras_utils.set_session_config()
performance.set_mixed_precision_policy(flags_core.get_tf_dtype(flags_obj))
if tf.config.list_physical_devices('GPU'):
if flags_obj.tf_gpu_thread_mode:
keras_utils.set_gpu_thread_mode_and_count(
per_gpu_thread_count=flags_obj.per_gpu_thread_count,
gpu_thread_mode=flags_obj.tf_gpu_thread_mode,
num_gpus=flags_obj.num_gpus,
datasets_num_private_threads=flags_obj.datasets_num_private_threads)
common.set_cudnn_batchnorm_mode()
data_format = flags_obj.data_format
if data_format is None:
data_format = ('channels_first' if tf.config.list_physical_devices('GPU')
else 'channels_last')
tf.keras.backend.set_image_data_format(data_format)
strategy = distribute_utils.get_distribution_strategy(
distribution_strategy=flags_obj.distribution_strategy,
num_gpus=flags_obj.num_gpus,
all_reduce_alg=flags_obj.all_reduce_alg,
num_packs=flags_obj.num_packs,
tpu_address=flags_obj.tpu)
per_epoch_steps, train_epochs, eval_steps = get_num_train_iterations(
flags_obj)
if flags_obj.steps_per_loop is None:
steps_per_loop = per_epoch_steps
elif flags_obj.steps_per_loop > per_epoch_steps:
steps_per_loop = per_epoch_steps
logging.warn('Setting steps_per_loop to %d to respect epoch boundary.',
steps_per_loop)
else:
steps_per_loop = flags_obj.steps_per_loop
logging.info(
'Training %d epochs, each epoch has %d steps, '
'total steps: %d; Eval %d steps', train_epochs, per_epoch_steps,
train_epochs * per_epoch_steps, eval_steps)
time_callback = keras_utils.TimeHistory(
flags_obj.batch_size,
flags_obj.log_steps,
logdir=flags_obj.model_dir if flags_obj.enable_tensorboard else None)
with distribute_utils.get_strategy_scope(strategy):
runnable = resnet_runnable.ResnetRunnable(flags_obj, time_callback,
per_epoch_steps)
eval_interval = flags_obj.epochs_between_evals * per_epoch_steps
checkpoint_interval = (
steps_per_loop * 5 if flags_obj.enable_checkpoint_and_export else None)
summary_interval = steps_per_loop if flags_obj.enable_tensorboard else None
checkpoint_manager = tf.train.CheckpointManager(
runnable.checkpoint,
directory=flags_obj.model_dir,
max_to_keep=10,
step_counter=runnable.global_step,
checkpoint_interval=checkpoint_interval)
resnet_controller = orbit.Controller(
strategy=strategy,
trainer=runnable,
evaluator=runnable if not flags_obj.skip_eval else None,
global_step=runnable.global_step,
steps_per_loop=steps_per_loop,
checkpoint_manager=checkpoint_manager,
summary_interval=summary_interval,
summary_dir=flags_obj.model_dir,
eval_summary_dir=os.path.join(flags_obj.model_dir, 'eval'))
time_callback.on_train_begin()
if not flags_obj.skip_eval:
resnet_controller.train_and_evaluate(
train_steps=per_epoch_steps * train_epochs,
eval_steps=eval_steps,
eval_interval=eval_interval)
else:
resnet_controller.train(steps=per_epoch_steps * train_epochs)
time_callback.on_train_end()
stats = build_stats(runnable, time_callback)
return stats
def main(_):
model_helpers.apply_clean(flags.FLAGS)
stats = run(flags.FLAGS)
logging.info('Run stats:\n%s', stats)
if __name__ == '__main__':
logging.set_verbosity(logging.INFO)
common.define_keras_flags()
app.run(main)
|
tseaver/gcloud-python
|
refs/heads/master
|
kms/tests/unit/gapic/v1/test_key_management_service_client_v1.py
|
1
|
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests."""
import pytest
from google.cloud import kms_v1
from google.cloud.kms_v1 import enums
from google.cloud.kms_v1.proto import resources_pb2
from google.cloud.kms_v1.proto import service_pb2
from google.iam.v1 import iam_policy_pb2
from google.iam.v1 import policy_pb2
from google.protobuf import duration_pb2
from google.protobuf import field_mask_pb2
from google.protobuf import timestamp_pb2
class MultiCallableStub(object):
"""Stub for the grpc.UnaryUnaryMultiCallable interface."""
def __init__(self, method, channel_stub):
self.method = method
self.channel_stub = channel_stub
def __call__(self, request, timeout=None, metadata=None, credentials=None):
self.channel_stub.requests.append((self.method, request))
response = None
if self.channel_stub.responses:
response = self.channel_stub.responses.pop()
if isinstance(response, Exception):
raise response
if response:
return response
class ChannelStub(object):
"""Stub for the grpc.Channel interface."""
def __init__(self, responses=[]):
self.responses = responses
self.requests = []
def unary_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return MultiCallableStub(method, self)
class CustomException(Exception):
pass
class TestKeyManagementServiceClient(object):
def test_list_key_rings(self):
# Setup Expected Response
next_page_token = ''
total_size = 705419236
key_rings_element = {}
key_rings = [key_rings_element]
expected_response = {
'next_page_token': next_page_token,
'total_size': total_size,
'key_rings': key_rings
}
expected_response = service_pb2.ListKeyRingsResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = kms_v1.KeyManagementServiceClient(channel=channel)
# Setup Request
parent = client.location_path('[PROJECT]', '[LOCATION]')
paged_list_response = client.list_key_rings(parent)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.key_rings[0] == resources[0]
assert len(channel.requests) == 1
expected_request = service_pb2.ListKeyRingsRequest(parent=parent)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_key_rings_exception(self):
channel = ChannelStub(responses=[CustomException()])
client = kms_v1.KeyManagementServiceClient(channel=channel)
# Setup request
parent = client.location_path('[PROJECT]', '[LOCATION]')
paged_list_response = client.list_key_rings(parent)
with pytest.raises(CustomException):
list(paged_list_response)
def test_list_crypto_keys(self):
# Setup Expected Response
next_page_token = ''
total_size = 705419236
crypto_keys_element = {}
crypto_keys = [crypto_keys_element]
expected_response = {
'next_page_token': next_page_token,
'total_size': total_size,
'crypto_keys': crypto_keys
}
expected_response = service_pb2.ListCryptoKeysResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = kms_v1.KeyManagementServiceClient(channel=channel)
# Setup Request
parent = client.key_ring_path('[PROJECT]', '[LOCATION]', '[KEY_RING]')
paged_list_response = client.list_crypto_keys(parent)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.crypto_keys[0] == resources[0]
assert len(channel.requests) == 1
expected_request = service_pb2.ListCryptoKeysRequest(parent=parent)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_crypto_keys_exception(self):
channel = ChannelStub(responses=[CustomException()])
client = kms_v1.KeyManagementServiceClient(channel=channel)
# Setup request
parent = client.key_ring_path('[PROJECT]', '[LOCATION]', '[KEY_RING]')
paged_list_response = client.list_crypto_keys(parent)
with pytest.raises(CustomException):
list(paged_list_response)
def test_list_crypto_key_versions(self):
# Setup Expected Response
next_page_token = ''
total_size = 705419236
crypto_key_versions_element = {}
crypto_key_versions = [crypto_key_versions_element]
expected_response = {
'next_page_token': next_page_token,
'total_size': total_size,
'crypto_key_versions': crypto_key_versions
}
expected_response = service_pb2.ListCryptoKeyVersionsResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = kms_v1.KeyManagementServiceClient(channel=channel)
# Setup Request
parent = client.crypto_key_path('[PROJECT]', '[LOCATION]',
'[KEY_RING]', '[CRYPTO_KEY]')
paged_list_response = client.list_crypto_key_versions(parent)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.crypto_key_versions[0] == resources[0]
assert len(channel.requests) == 1
expected_request = service_pb2.ListCryptoKeyVersionsRequest(
parent=parent)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_crypto_key_versions_exception(self):
channel = ChannelStub(responses=[CustomException()])
client = kms_v1.KeyManagementServiceClient(channel=channel)
# Setup request
parent = client.crypto_key_path('[PROJECT]', '[LOCATION]',
'[KEY_RING]', '[CRYPTO_KEY]')
paged_list_response = client.list_crypto_key_versions(parent)
with pytest.raises(CustomException):
list(paged_list_response)
def test_get_key_ring(self):
# Setup Expected Response
name_2 = 'name2-1052831874'
expected_response = {'name': name_2}
expected_response = resources_pb2.KeyRing(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = kms_v1.KeyManagementServiceClient(channel=channel)
# Setup Request
name = client.key_ring_path('[PROJECT]', '[LOCATION]', '[KEY_RING]')
response = client.get_key_ring(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_pb2.GetKeyRingRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_key_ring_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = kms_v1.KeyManagementServiceClient(channel=channel)
# Setup request
name = client.key_ring_path('[PROJECT]', '[LOCATION]', '[KEY_RING]')
with pytest.raises(CustomException):
client.get_key_ring(name)
def test_get_crypto_key(self):
# Setup Expected Response
name_2 = 'name2-1052831874'
expected_response = {'name': name_2}
expected_response = resources_pb2.CryptoKey(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = kms_v1.KeyManagementServiceClient(channel=channel)
# Setup Request
name = client.crypto_key_path('[PROJECT]', '[LOCATION]', '[KEY_RING]',
'[CRYPTO_KEY]')
response = client.get_crypto_key(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_pb2.GetCryptoKeyRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_crypto_key_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = kms_v1.KeyManagementServiceClient(channel=channel)
# Setup request
name = client.crypto_key_path('[PROJECT]', '[LOCATION]', '[KEY_RING]',
'[CRYPTO_KEY]')
with pytest.raises(CustomException):
client.get_crypto_key(name)
def test_get_crypto_key_version(self):
# Setup Expected Response
name_2 = 'name2-1052831874'
expected_response = {'name': name_2}
expected_response = resources_pb2.CryptoKeyVersion(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = kms_v1.KeyManagementServiceClient(channel=channel)
# Setup Request
name = client.crypto_key_version_path('[PROJECT]', '[LOCATION]',
'[KEY_RING]', '[CRYPTO_KEY]',
'[CRYPTO_KEY_VERSION]')
response = client.get_crypto_key_version(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_pb2.GetCryptoKeyVersionRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_crypto_key_version_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = kms_v1.KeyManagementServiceClient(channel=channel)
# Setup request
name = client.crypto_key_version_path('[PROJECT]', '[LOCATION]',
'[KEY_RING]', '[CRYPTO_KEY]',
'[CRYPTO_KEY_VERSION]')
with pytest.raises(CustomException):
client.get_crypto_key_version(name)
def test_create_key_ring(self):
# Setup Expected Response
name = 'name3373707'
expected_response = {'name': name}
expected_response = resources_pb2.KeyRing(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = kms_v1.KeyManagementServiceClient(channel=channel)
# Setup Request
parent = client.location_path('[PROJECT]', '[LOCATION]')
key_ring_id = 'keyRingId-2056646742'
key_ring = {}
response = client.create_key_ring(parent, key_ring_id, key_ring)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_pb2.CreateKeyRingRequest(
parent=parent, key_ring_id=key_ring_id, key_ring=key_ring)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_key_ring_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = kms_v1.KeyManagementServiceClient(channel=channel)
# Setup request
parent = client.location_path('[PROJECT]', '[LOCATION]')
key_ring_id = 'keyRingId-2056646742'
key_ring = {}
with pytest.raises(CustomException):
client.create_key_ring(parent, key_ring_id, key_ring)
def test_create_crypto_key(self):
# Setup Expected Response
name = 'name3373707'
expected_response = {'name': name}
expected_response = resources_pb2.CryptoKey(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = kms_v1.KeyManagementServiceClient(channel=channel)
# Setup Request
parent = client.key_ring_path('[PROJECT]', '[LOCATION]', '[KEY_RING]')
crypto_key_id = 'my-app-key'
purpose = enums.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT
seconds = 2147483647
next_rotation_time = {'seconds': seconds}
seconds_2 = 604800
rotation_period = {'seconds': seconds_2}
crypto_key = {
'purpose': purpose,
'next_rotation_time': next_rotation_time,
'rotation_period': rotation_period
}
response = client.create_crypto_key(parent, crypto_key_id, crypto_key)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_pb2.CreateCryptoKeyRequest(
parent=parent, crypto_key_id=crypto_key_id, crypto_key=crypto_key)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_crypto_key_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = kms_v1.KeyManagementServiceClient(channel=channel)
# Setup request
parent = client.key_ring_path('[PROJECT]', '[LOCATION]', '[KEY_RING]')
crypto_key_id = 'my-app-key'
purpose = enums.CryptoKey.CryptoKeyPurpose.ENCRYPT_DECRYPT
seconds = 2147483647
next_rotation_time = {'seconds': seconds}
seconds_2 = 604800
rotation_period = {'seconds': seconds_2}
crypto_key = {
'purpose': purpose,
'next_rotation_time': next_rotation_time,
'rotation_period': rotation_period
}
with pytest.raises(CustomException):
client.create_crypto_key(parent, crypto_key_id, crypto_key)
def test_create_crypto_key_version(self):
# Setup Expected Response
name = 'name3373707'
expected_response = {'name': name}
expected_response = resources_pb2.CryptoKeyVersion(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = kms_v1.KeyManagementServiceClient(channel=channel)
# Setup Request
parent = client.crypto_key_path('[PROJECT]', '[LOCATION]',
'[KEY_RING]', '[CRYPTO_KEY]')
crypto_key_version = {}
response = client.create_crypto_key_version(parent, crypto_key_version)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_pb2.CreateCryptoKeyVersionRequest(
parent=parent, crypto_key_version=crypto_key_version)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_crypto_key_version_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = kms_v1.KeyManagementServiceClient(channel=channel)
# Setup request
parent = client.crypto_key_path('[PROJECT]', '[LOCATION]',
'[KEY_RING]', '[CRYPTO_KEY]')
crypto_key_version = {}
with pytest.raises(CustomException):
client.create_crypto_key_version(parent, crypto_key_version)
def test_update_crypto_key(self):
# Setup Expected Response
name = 'name3373707'
expected_response = {'name': name}
expected_response = resources_pb2.CryptoKey(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = kms_v1.KeyManagementServiceClient(channel=channel)
# Setup Request
crypto_key = {}
update_mask = {}
response = client.update_crypto_key(crypto_key, update_mask)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_pb2.UpdateCryptoKeyRequest(
crypto_key=crypto_key, update_mask=update_mask)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_update_crypto_key_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = kms_v1.KeyManagementServiceClient(channel=channel)
# Setup request
crypto_key = {}
update_mask = {}
with pytest.raises(CustomException):
client.update_crypto_key(crypto_key, update_mask)
def test_update_crypto_key_version(self):
# Setup Expected Response
name = 'name3373707'
expected_response = {'name': name}
expected_response = resources_pb2.CryptoKeyVersion(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = kms_v1.KeyManagementServiceClient(channel=channel)
# Setup Request
crypto_key_version = {}
update_mask = {}
response = client.update_crypto_key_version(crypto_key_version,
update_mask)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_pb2.UpdateCryptoKeyVersionRequest(
crypto_key_version=crypto_key_version, update_mask=update_mask)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_update_crypto_key_version_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = kms_v1.KeyManagementServiceClient(channel=channel)
# Setup request
crypto_key_version = {}
update_mask = {}
with pytest.raises(CustomException):
client.update_crypto_key_version(crypto_key_version, update_mask)
def test_encrypt(self):
# Setup Expected Response
name_2 = 'name2-1052831874'
ciphertext = b'-72'
expected_response = {'name': name_2, 'ciphertext': ciphertext}
expected_response = service_pb2.EncryptResponse(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = kms_v1.KeyManagementServiceClient(channel=channel)
# Setup Request
name = client.crypto_key_path_path('[PROJECT]', '[LOCATION]',
'[KEY_RING]', '[CRYPTO_KEY_PATH]')
plaintext = b'-9'
response = client.encrypt(name, plaintext)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_pb2.EncryptRequest(
name=name, plaintext=plaintext)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_encrypt_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = kms_v1.KeyManagementServiceClient(channel=channel)
# Setup request
name = client.crypto_key_path_path('[PROJECT]', '[LOCATION]',
'[KEY_RING]', '[CRYPTO_KEY_PATH]')
plaintext = b'-9'
with pytest.raises(CustomException):
client.encrypt(name, plaintext)
def test_decrypt(self):
# Setup Expected Response
plaintext = b'-9'
expected_response = {'plaintext': plaintext}
expected_response = service_pb2.DecryptResponse(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = kms_v1.KeyManagementServiceClient(channel=channel)
# Setup Request
name = client.crypto_key_path('[PROJECT]', '[LOCATION]', '[KEY_RING]',
'[CRYPTO_KEY]')
ciphertext = b'-72'
response = client.decrypt(name, ciphertext)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_pb2.DecryptRequest(
name=name, ciphertext=ciphertext)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_decrypt_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = kms_v1.KeyManagementServiceClient(channel=channel)
# Setup request
name = client.crypto_key_path('[PROJECT]', '[LOCATION]', '[KEY_RING]',
'[CRYPTO_KEY]')
ciphertext = b'-72'
with pytest.raises(CustomException):
client.decrypt(name, ciphertext)
def test_update_crypto_key_primary_version(self):
# Setup Expected Response
name_2 = 'name2-1052831874'
expected_response = {'name': name_2}
expected_response = resources_pb2.CryptoKey(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = kms_v1.KeyManagementServiceClient(channel=channel)
# Setup Request
name = client.crypto_key_path('[PROJECT]', '[LOCATION]', '[KEY_RING]',
'[CRYPTO_KEY]')
crypto_key_version_id = 'cryptoKeyVersionId729489152'
response = client.update_crypto_key_primary_version(
name, crypto_key_version_id)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_pb2.UpdateCryptoKeyPrimaryVersionRequest(
name=name, crypto_key_version_id=crypto_key_version_id)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_update_crypto_key_primary_version_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = kms_v1.KeyManagementServiceClient(channel=channel)
# Setup request
name = client.crypto_key_path('[PROJECT]', '[LOCATION]', '[KEY_RING]',
'[CRYPTO_KEY]')
crypto_key_version_id = 'cryptoKeyVersionId729489152'
with pytest.raises(CustomException):
client.update_crypto_key_primary_version(name,
crypto_key_version_id)
def test_destroy_crypto_key_version(self):
# Setup Expected Response
name_2 = 'name2-1052831874'
expected_response = {'name': name_2}
expected_response = resources_pb2.CryptoKeyVersion(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = kms_v1.KeyManagementServiceClient(channel=channel)
# Setup Request
name = client.crypto_key_version_path('[PROJECT]', '[LOCATION]',
'[KEY_RING]', '[CRYPTO_KEY]',
'[CRYPTO_KEY_VERSION]')
response = client.destroy_crypto_key_version(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_pb2.DestroyCryptoKeyVersionRequest(
name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_destroy_crypto_key_version_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = kms_v1.KeyManagementServiceClient(channel=channel)
# Setup request
name = client.crypto_key_version_path('[PROJECT]', '[LOCATION]',
'[KEY_RING]', '[CRYPTO_KEY]',
'[CRYPTO_KEY_VERSION]')
with pytest.raises(CustomException):
client.destroy_crypto_key_version(name)
def test_restore_crypto_key_version(self):
# Setup Expected Response
name_2 = 'name2-1052831874'
expected_response = {'name': name_2}
expected_response = resources_pb2.CryptoKeyVersion(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = kms_v1.KeyManagementServiceClient(channel=channel)
# Setup Request
name = client.crypto_key_version_path('[PROJECT]', '[LOCATION]',
'[KEY_RING]', '[CRYPTO_KEY]',
'[CRYPTO_KEY_VERSION]')
response = client.restore_crypto_key_version(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_pb2.RestoreCryptoKeyVersionRequest(
name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_restore_crypto_key_version_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = kms_v1.KeyManagementServiceClient(channel=channel)
# Setup request
name = client.crypto_key_version_path('[PROJECT]', '[LOCATION]',
'[KEY_RING]', '[CRYPTO_KEY]',
'[CRYPTO_KEY_VERSION]')
with pytest.raises(CustomException):
client.restore_crypto_key_version(name)
def test_get_public_key(self):
# Setup Expected Response
pem = 'pem110872'
expected_response = {'pem': pem}
expected_response = resources_pb2.PublicKey(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = kms_v1.KeyManagementServiceClient(channel=channel)
# Setup Request
name = client.crypto_key_version_path('[PROJECT]', '[LOCATION]',
'[KEY_RING]', '[CRYPTO_KEY]',
'[CRYPTO_KEY_VERSION]')
response = client.get_public_key(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_pb2.GetPublicKeyRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_public_key_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = kms_v1.KeyManagementServiceClient(channel=channel)
# Setup request
name = client.crypto_key_version_path('[PROJECT]', '[LOCATION]',
'[KEY_RING]', '[CRYPTO_KEY]',
'[CRYPTO_KEY_VERSION]')
with pytest.raises(CustomException):
client.get_public_key(name)
def test_asymmetric_decrypt(self):
# Setup Expected Response
plaintext = b'-9'
expected_response = {'plaintext': plaintext}
expected_response = service_pb2.AsymmetricDecryptResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = kms_v1.KeyManagementServiceClient(channel=channel)
# Setup Request
name = client.crypto_key_version_path('[PROJECT]', '[LOCATION]',
'[KEY_RING]', '[CRYPTO_KEY]',
'[CRYPTO_KEY_VERSION]')
ciphertext = b'-72'
response = client.asymmetric_decrypt(name, ciphertext)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_pb2.AsymmetricDecryptRequest(
name=name, ciphertext=ciphertext)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_asymmetric_decrypt_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = kms_v1.KeyManagementServiceClient(channel=channel)
# Setup request
name = client.crypto_key_version_path('[PROJECT]', '[LOCATION]',
'[KEY_RING]', '[CRYPTO_KEY]',
'[CRYPTO_KEY_VERSION]')
ciphertext = b'-72'
with pytest.raises(CustomException):
client.asymmetric_decrypt(name, ciphertext)
def test_asymmetric_sign(self):
# Setup Expected Response
signature = b'-100'
expected_response = {'signature': signature}
expected_response = service_pb2.AsymmetricSignResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = kms_v1.KeyManagementServiceClient(channel=channel)
# Setup Request
name = client.crypto_key_version_path('[PROJECT]', '[LOCATION]',
'[KEY_RING]', '[CRYPTO_KEY]',
'[CRYPTO_KEY_VERSION]')
digest = {}
response = client.asymmetric_sign(name, digest)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_pb2.AsymmetricSignRequest(
name=name, digest=digest)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_asymmetric_sign_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = kms_v1.KeyManagementServiceClient(channel=channel)
# Setup request
name = client.crypto_key_version_path('[PROJECT]', '[LOCATION]',
'[KEY_RING]', '[CRYPTO_KEY]',
'[CRYPTO_KEY_VERSION]')
digest = {}
with pytest.raises(CustomException):
client.asymmetric_sign(name, digest)
def test_set_iam_policy(self):
# Setup Expected Response
version = 351608024
etag = b'21'
expected_response = {'version': version, 'etag': etag}
expected_response = policy_pb2.Policy(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = kms_v1.KeyManagementServiceClient(channel=channel)
# Setup Request
resource = client.key_ring_path('[PROJECT]', '[LOCATION]',
'[KEY_RING]')
policy = {}
response = client.set_iam_policy(resource, policy)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = iam_policy_pb2.SetIamPolicyRequest(
resource=resource, policy=policy)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_set_iam_policy_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = kms_v1.KeyManagementServiceClient(channel=channel)
# Setup request
resource = client.key_ring_path('[PROJECT]', '[LOCATION]',
'[KEY_RING]')
policy = {}
with pytest.raises(CustomException):
client.set_iam_policy(resource, policy)
def test_get_iam_policy(self):
# Setup Expected Response
version = 351608024
etag = b'21'
expected_response = {'version': version, 'etag': etag}
expected_response = policy_pb2.Policy(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = kms_v1.KeyManagementServiceClient(channel=channel)
# Setup Request
resource = client.key_ring_path('[PROJECT]', '[LOCATION]',
'[KEY_RING]')
response = client.get_iam_policy(resource)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = iam_policy_pb2.GetIamPolicyRequest(
resource=resource)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_iam_policy_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = kms_v1.KeyManagementServiceClient(channel=channel)
# Setup request
resource = client.key_ring_path('[PROJECT]', '[LOCATION]',
'[KEY_RING]')
with pytest.raises(CustomException):
client.get_iam_policy(resource)
def test_test_iam_permissions(self):
# Setup Expected Response
expected_response = {}
expected_response = iam_policy_pb2.TestIamPermissionsResponse(
**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
client = kms_v1.KeyManagementServiceClient(channel=channel)
# Setup Request
resource = client.key_ring_path('[PROJECT]', '[LOCATION]',
'[KEY_RING]')
permissions = []
response = client.test_iam_permissions(resource, permissions)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = iam_policy_pb2.TestIamPermissionsRequest(
resource=resource, permissions=permissions)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_test_iam_permissions_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
client = kms_v1.KeyManagementServiceClient(channel=channel)
# Setup request
resource = client.key_ring_path('[PROJECT]', '[LOCATION]',
'[KEY_RING]')
permissions = []
with pytest.raises(CustomException):
client.test_iam_permissions(resource, permissions)
|
nodice73/hspipeline
|
refs/heads/master
|
web/env/lib/python2.7/site-packages/pip/exceptions.py
|
344
|
"""Exceptions used throughout package"""
from __future__ import absolute_import
from itertools import chain, groupby, repeat
from pip._vendor.six import iteritems
class PipError(Exception):
"""Base pip exception"""
class InstallationError(PipError):
"""General exception during installation"""
class UninstallationError(PipError):
"""General exception during uninstallation"""
class DistributionNotFound(InstallationError):
"""Raised when a distribution cannot be found to satisfy a requirement"""
class RequirementsFileParseError(InstallationError):
"""Raised when a general error occurs parsing a requirements file line."""
class BestVersionAlreadyInstalled(PipError):
"""Raised when the most up-to-date version of a package is already
installed."""
class BadCommand(PipError):
"""Raised when virtualenv or a command is not found"""
class CommandError(PipError):
"""Raised when there is an error in command-line arguments"""
class PreviousBuildDirError(PipError):
"""Raised when there's a previous conflicting build directory"""
class InvalidWheelFilename(InstallationError):
"""Invalid wheel filename."""
class UnsupportedWheel(InstallationError):
"""Unsupported wheel."""
class HashErrors(InstallationError):
"""Multiple HashError instances rolled into one for reporting"""
def __init__(self):
self.errors = []
def append(self, error):
self.errors.append(error)
def __str__(self):
lines = []
self.errors.sort(key=lambda e: e.order)
for cls, errors_of_cls in groupby(self.errors, lambda e: e.__class__):
lines.append(cls.head)
lines.extend(e.body() for e in errors_of_cls)
if lines:
return '\n'.join(lines)
def __nonzero__(self):
return bool(self.errors)
def __bool__(self):
return self.__nonzero__()
class HashError(InstallationError):
"""
A failure to verify a package against known-good hashes
:cvar order: An int sorting hash exception classes by difficulty of
recovery (lower being harder), so the user doesn't bother fretting
about unpinned packages when he has deeper issues, like VCS
dependencies, to deal with. Also keeps error reports in a
deterministic order.
:cvar head: A section heading for display above potentially many
exceptions of this kind
:ivar req: The InstallRequirement that triggered this error. This is
pasted on after the exception is instantiated, because it's not
typically available earlier.
"""
req = None
head = ''
def body(self):
"""Return a summary of me for display under the heading.
This default implementation simply prints a description of the
triggering requirement.
:param req: The InstallRequirement that provoked this error, with
populate_link() having already been called
"""
return ' %s' % self._requirement_name()
def __str__(self):
return '%s\n%s' % (self.head, self.body())
def _requirement_name(self):
"""Return a description of the requirement that triggered me.
This default implementation returns long description of the req, with
line numbers
"""
return str(self.req) if self.req else 'unknown package'
class VcsHashUnsupported(HashError):
"""A hash was provided for a version-control-system-based requirement, but
we don't have a method for hashing those."""
order = 0
head = ("Can't verify hashes for these requirements because we don't "
"have a way to hash version control repositories:")
class DirectoryUrlHashUnsupported(HashError):
"""A hash was provided for a version-control-system-based requirement, but
we don't have a method for hashing those."""
order = 1
head = ("Can't verify hashes for these file:// requirements because they "
"point to directories:")
class HashMissing(HashError):
"""A hash was needed for a requirement but is absent."""
order = 2
head = ('Hashes are required in --require-hashes mode, but they are '
'missing from some requirements. Here is a list of those '
'requirements along with the hashes their downloaded archives '
'actually had. Add lines like these to your requirements files to '
'prevent tampering. (If you did not enable --require-hashes '
'manually, note that it turns on automatically when any package '
'has a hash.)')
def __init__(self, gotten_hash):
"""
:param gotten_hash: The hash of the (possibly malicious) archive we
just downloaded
"""
self.gotten_hash = gotten_hash
def body(self):
from pip.utils.hashes import FAVORITE_HASH # Dodge circular import.
package = None
if self.req:
# In the case of URL-based requirements, display the original URL
# seen in the requirements file rather than the package name,
# so the output can be directly copied into the requirements file.
package = (self.req.original_link if self.req.original_link
# In case someone feeds something downright stupid
# to InstallRequirement's constructor.
else getattr(self.req, 'req', None))
return ' %s --hash=%s:%s' % (package or 'unknown package',
FAVORITE_HASH,
self.gotten_hash)
class HashUnpinned(HashError):
"""A requirement had a hash specified but was not pinned to a specific
version."""
order = 3
head = ('In --require-hashes mode, all requirements must have their '
'versions pinned with ==. These do not:')
class HashMismatch(HashError):
"""
Distribution file hash values don't match.
:ivar package_name: The name of the package that triggered the hash
mismatch. Feel free to write to this after the exception is raise to
improve its error message.
"""
order = 4
head = ('THESE PACKAGES DO NOT MATCH THE HASHES FROM THE REQUIREMENTS '
'FILE. If you have updated the package versions, please update '
'the hashes. Otherwise, examine the package contents carefully; '
'someone may have tampered with them.')
def __init__(self, allowed, gots):
"""
:param allowed: A dict of algorithm names pointing to lists of allowed
hex digests
:param gots: A dict of algorithm names pointing to hashes we
actually got from the files under suspicion
"""
self.allowed = allowed
self.gots = gots
def body(self):
return ' %s:\n%s' % (self._requirement_name(),
self._hash_comparison())
def _hash_comparison(self):
"""
Return a comparison of actual and expected hash values.
Example::
Expected sha256 abcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeabcde
or 123451234512345123451234512345123451234512345
Got bcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdef
"""
def hash_then_or(hash_name):
# For now, all the decent hashes have 6-char names, so we can get
# away with hard-coding space literals.
return chain([hash_name], repeat(' or'))
lines = []
for hash_name, expecteds in iteritems(self.allowed):
prefix = hash_then_or(hash_name)
lines.extend((' Expected %s %s' % (next(prefix), e))
for e in expecteds)
lines.append(' Got %s\n' %
self.gots[hash_name].hexdigest())
prefix = ' or'
return '\n'.join(lines)
class UnsupportedPythonVersion(InstallationError):
"""Unsupported python version according to Requires-Python package
metadata."""
|
fragforce/fragforce.org
|
refs/heads/dev
|
ffsite/views/__init__.py
|
3
|
# Import all subs
from .events import *
from .sites import *
from .static import *
|
smikes/node-gyp
|
refs/heads/master
|
gyp/gyptest.py
|
80
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
__doc__ = """
gyptest.py -- test runner for GYP tests.
"""
import os
import optparse
import subprocess
import sys
class CommandRunner:
"""
Executor class for commands, including "commands" implemented by
Python functions.
"""
verbose = True
active = True
def __init__(self, dictionary={}):
self.subst_dictionary(dictionary)
def subst_dictionary(self, dictionary):
self._subst_dictionary = dictionary
def subst(self, string, dictionary=None):
"""
Substitutes (via the format operator) the values in the specified
dictionary into the specified command.
The command can be an (action, string) tuple. In all cases, we
perform substitution on strings and don't worry if something isn't
a string. (It's probably a Python function to be executed.)
"""
if dictionary is None:
dictionary = self._subst_dictionary
if dictionary:
try:
string = string % dictionary
except TypeError:
pass
return string
def display(self, command, stdout=None, stderr=None):
if not self.verbose:
return
if type(command) == type(()):
func = command[0]
args = command[1:]
s = '%s(%s)' % (func.__name__, ', '.join(map(repr, args)))
if type(command) == type([]):
# TODO: quote arguments containing spaces
# TODO: handle meta characters?
s = ' '.join(command)
else:
s = self.subst(command)
if not s.endswith('\n'):
s += '\n'
sys.stdout.write(s)
sys.stdout.flush()
def execute(self, command, stdout=None, stderr=None):
"""
Executes a single command.
"""
if not self.active:
return 0
if type(command) == type(''):
command = self.subst(command)
cmdargs = shlex.split(command)
if cmdargs[0] == 'cd':
command = (os.chdir,) + tuple(cmdargs[1:])
if type(command) == type(()):
func = command[0]
args = command[1:]
return func(*args)
else:
if stdout is sys.stdout:
# Same as passing sys.stdout, except python2.4 doesn't fail on it.
subout = None
else:
# Open pipe for anything else so Popen works on python2.4.
subout = subprocess.PIPE
if stderr is sys.stderr:
# Same as passing sys.stderr, except python2.4 doesn't fail on it.
suberr = None
elif stderr is None:
# Merge with stdout if stderr isn't specified.
suberr = subprocess.STDOUT
else:
# Open pipe for anything else so Popen works on python2.4.
suberr = subprocess.PIPE
p = subprocess.Popen(command,
shell=(sys.platform == 'win32'),
stdout=subout,
stderr=suberr)
p.wait()
if stdout is None:
self.stdout = p.stdout.read()
elif stdout is not sys.stdout:
stdout.write(p.stdout.read())
if stderr not in (None, sys.stderr):
stderr.write(p.stderr.read())
return p.returncode
def run(self, command, display=None, stdout=None, stderr=None):
"""
Runs a single command, displaying it first.
"""
if display is None:
display = command
self.display(display)
return self.execute(command, stdout, stderr)
class Unbuffered:
def __init__(self, fp):
self.fp = fp
def write(self, arg):
self.fp.write(arg)
self.fp.flush()
def __getattr__(self, attr):
return getattr(self.fp, attr)
sys.stdout = Unbuffered(sys.stdout)
sys.stderr = Unbuffered(sys.stderr)
def find_all_gyptest_files(directory):
result = []
for root, dirs, files in os.walk(directory):
if '.svn' in dirs:
dirs.remove('.svn')
result.extend([ os.path.join(root, f) for f in files
if f.startswith('gyptest') and f.endswith('.py') ])
result.sort()
return result
def main(argv=None):
if argv is None:
argv = sys.argv
usage = "gyptest.py [-ahlnq] [-f formats] [test ...]"
parser = optparse.OptionParser(usage=usage)
parser.add_option("-a", "--all", action="store_true",
help="run all tests")
parser.add_option("-C", "--chdir", action="store", default=None,
help="chdir to the specified directory")
parser.add_option("-f", "--format", action="store", default='',
help="run tests with the specified formats")
parser.add_option("-G", '--gyp_option', action="append", default=[],
help="Add -G options to the gyp command line")
parser.add_option("-l", "--list", action="store_true",
help="list available tests and exit")
parser.add_option("-n", "--no-exec", action="store_true",
help="no execute, just print the command line")
parser.add_option("--passed", action="store_true",
help="report passed tests")
parser.add_option("--path", action="append", default=[],
help="additional $PATH directory")
parser.add_option("-q", "--quiet", action="store_true",
help="quiet, don't print test command lines")
opts, args = parser.parse_args(argv[1:])
if opts.chdir:
os.chdir(opts.chdir)
if opts.path:
extra_path = [os.path.abspath(p) for p in opts.path]
extra_path = os.pathsep.join(extra_path)
os.environ['PATH'] += os.pathsep + extra_path
if not args:
if not opts.all:
sys.stderr.write('Specify -a to get all tests.\n')
return 1
args = ['test']
tests = []
for arg in args:
if os.path.isdir(arg):
tests.extend(find_all_gyptest_files(os.path.normpath(arg)))
else:
tests.append(arg)
if opts.list:
for test in tests:
print test
sys.exit(0)
CommandRunner.verbose = not opts.quiet
CommandRunner.active = not opts.no_exec
cr = CommandRunner()
os.environ['PYTHONPATH'] = os.path.abspath('test/lib')
if not opts.quiet:
sys.stdout.write('PYTHONPATH=%s\n' % os.environ['PYTHONPATH'])
passed = []
failed = []
no_result = []
if opts.format:
format_list = opts.format.split(',')
else:
# TODO: not duplicate this mapping from pylib/gyp/__init__.py
format_list = {
'freebsd7': ['make'],
'freebsd8': ['make'],
'openbsd5': ['make'],
'cygwin': ['msvs'],
'win32': ['msvs', 'ninja'],
'linux2': ['make', 'ninja'],
'linux3': ['make', 'ninja'],
'darwin': ['make', 'ninja', 'xcode'],
}[sys.platform]
for format in format_list:
os.environ['TESTGYP_FORMAT'] = format
if not opts.quiet:
sys.stdout.write('TESTGYP_FORMAT=%s\n' % format)
gyp_options = []
for option in opts.gyp_option:
gyp_options += ['-G', option]
if gyp_options and not opts.quiet:
sys.stdout.write('Extra Gyp options: %s\n' % gyp_options)
for test in tests:
status = cr.run([sys.executable, test] + gyp_options,
stdout=sys.stdout,
stderr=sys.stderr)
if status == 2:
no_result.append(test)
elif status:
failed.append(test)
else:
passed.append(test)
if not opts.quiet:
def report(description, tests):
if tests:
if len(tests) == 1:
sys.stdout.write("\n%s the following test:\n" % description)
else:
fmt = "\n%s the following %d tests:\n"
sys.stdout.write(fmt % (description, len(tests)))
sys.stdout.write("\t" + "\n\t".join(tests) + "\n")
if opts.passed:
report("Passed", passed)
report("Failed", failed)
report("No result from", no_result)
if failed:
return 1
else:
return 0
if __name__ == "__main__":
sys.exit(main())
|
mikewiebe-ansible/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/network/vyos/facts/lag_interfaces/lag_interfaces.py
|
21
|
#
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The vyos lag_interfaces fact class
It is in this file the configuration is collected from the device
for a given resource, parsed, and the facts tree is populated
based on the configuration.
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from re import findall, search, M
from copy import deepcopy
from ansible.module_utils.network.common import utils
from ansible.module_utils.network.vyos.argspec.lag_interfaces. \
lag_interfaces import Lag_interfacesArgs
class Lag_interfacesFacts(object):
""" The vyos lag_interfaces fact class
"""
def __init__(self, module, subspec='config', options='options'):
self._module = module
self.argument_spec = Lag_interfacesArgs.argument_spec
spec = deepcopy(self.argument_spec)
if subspec:
if options:
facts_argument_spec = spec[subspec][options]
else:
facts_argument_spec = spec[subspec]
else:
facts_argument_spec = spec
self.generated_spec = utils.generate_dict(facts_argument_spec)
def populate_facts(self, connection, ansible_facts, data=None):
""" Populate the facts for lag_interfaces
:param module: the module instance
:param connection: the device connection
:param data: previously collected conf
:rtype: dictionary
:returns: facts
"""
if not data:
data = connection.get_config()
objs = []
lag_names = findall(r'^set interfaces bonding (\S+)', data, M)
if lag_names:
for lag in set(lag_names):
lag_regex = r' %s .+$' % lag
cfg = findall(lag_regex, data, M)
obj = self.render_config(cfg)
output = connection.run_commands(['show interfaces bonding ' + lag + ' slaves'])
lines = output[0].splitlines()
members = []
member = {}
if len(lines) > 1:
for line in lines[2:]:
splitted_line = line.split()
if len(splitted_line) > 1:
member['member'] = splitted_line[0]
members.append(member)
else:
members = []
member = {}
obj['name'] = lag.strip("'")
if members:
obj['members'] = members
if obj:
objs.append(obj)
facts = {}
if objs:
facts['lag_interfaces'] = []
params = utils.validate_config(self.argument_spec, {'config': objs})
for cfg in params['config']:
facts['lag_interfaces'].append(utils.remove_empties(cfg))
ansible_facts['ansible_network_resources'].update(facts)
return ansible_facts
def render_config(self, conf):
"""
Render config as dictionary structure and delete keys
from spec for null values
:param spec: The facts tree, generated from the argspec
:param conf: The configuration
:rtype: dictionary
:returns: The generated config
"""
arp_monitor_conf = '\n'.join(filter(lambda x: ('arp-monitor' in x), conf))
hash_policy_conf = '\n'.join(filter(lambda x: ('hash-policy' in x), conf))
lag_conf = '\n'.join(filter(lambda x: ('bond' in x), conf))
config = self.parse_attribs(
['mode', 'primary'], lag_conf
)
config['arp_monitor'] = self.parse_arp_monitor(arp_monitor_conf)
config['hash_policy'] = self.parse_hash_policy(hash_policy_conf)
return utils.remove_empties(config)
def parse_attribs(self, attribs, conf):
config = {}
for item in attribs:
value = utils.parse_conf_arg(conf, item)
if value:
config[item] = value.strip("'")
else:
config[item] = None
return utils.remove_empties(config)
def parse_arp_monitor(self, conf):
arp_monitor = None
if conf:
arp_monitor = {}
target_list = []
interval = search(r'^.*arp-monitor interval (.+)', conf, M)
targets = findall(r"^.*arp-monitor target '(.+)'", conf, M)
if targets:
for target in targets:
target_list.append(target)
arp_monitor['target'] = target_list
if interval:
value = interval.group(1).strip("'")
arp_monitor['interval'] = int(value)
return arp_monitor
def parse_hash_policy(self, conf):
hash_policy = None
if conf:
hash_policy = search(r'^.*hash-policy (.+)', conf, M)
hash_policy = hash_policy.group(1).strip("'")
return hash_policy
|
colinligertwood/odoo
|
refs/heads/master
|
openerp/addons/base/tests/test_base.py
|
64
|
import unittest2
import openerp.tests.common as common
from openerp.osv.orm import except_orm
class test_base(common.TransactionCase):
def setUp(self):
super(test_base,self).setUp()
self.res_partner = self.registry('res.partner')
self.res_users = self.registry('res.users')
self.res_partner_title = self.registry('res.partner.title')
# samples use effective TLDs from the Mozilla public suffix
# list at http://publicsuffix.org
self.samples = [
('"Raoul Grosbedon" <raoul@chirurgiens-dentistes.fr> ', 'Raoul Grosbedon', 'raoul@chirurgiens-dentistes.fr'),
('ryu+giga-Sushi@aizubange.fukushima.jp', '', 'ryu+giga-Sushi@aizubange.fukushima.jp'),
('Raoul chirurgiens-dentistes.fr', 'Raoul chirurgiens-dentistes.fr', ''),
(" Raoul O'hara <!@historicalsociety.museum>", "Raoul O'hara", '!@historicalsociety.museum')
]
def test_00_res_partner_name_create(self):
cr, uid = self.cr, self.uid
parse = self.res_partner._parse_partner_name
for text, name, mail in self.samples:
self.assertEqual((name,mail), parse(text), 'Partner name parsing failed')
partner_id, dummy = self.res_partner.name_create(cr, uid, text)
partner = self.res_partner.browse(cr, uid, partner_id)
self.assertEqual(name or mail, partner.name, 'Partner name incorrect')
self.assertEqual(mail or False, partner.email, 'Partner email incorrect')
def test_10_res_partner_find_or_create(self):
cr,uid = self.cr, self.uid
email = self.samples[0][0]
partner_id, dummy = self.res_partner.name_create(cr, uid, email)
found_id = self.res_partner.find_or_create(cr, uid, email)
self.assertEqual(partner_id, found_id, 'find_or_create failed')
new_id = self.res_partner.find_or_create(cr, uid, self.samples[1][0])
self.assertTrue(new_id > partner_id, 'find_or_create failed - should have created new one')
new_id2 = self.res_partner.find_or_create(cr, uid, self.samples[2][0])
self.assertTrue(new_id2 > new_id, 'find_or_create failed - should have created new one again')
def test_15_res_partner_name_search(self):
cr,uid = self.cr, self.uid
for name, active in [
('"A Raoul Grosbedon" <raoul@chirurgiens-dentistes.fr>', False),
('B Raoul chirurgiens-dentistes.fr', True),
("C Raoul O'hara <!@historicalsociety.museum>", True),
('ryu+giga-Sushi@aizubange.fukushima.jp', True),
]:
partner_id, dummy = self.res_partner.name_create(cr, uid, name, context={'default_active': active})
partners = self.res_partner.name_search(cr, uid, 'Raoul')
self.assertEqual(len(partners), 2, 'Incorrect search number result for name_search')
partners = self.res_partner.name_search(cr, uid, 'Raoul', limit=1)
self.assertEqual(len(partners), 1, 'Incorrect search number result for name_search with a limit')
self.assertEqual(partners[0][1], 'B Raoul chirurgiens-dentistes.fr', 'Incorrect partner returned, should be the first active')
def test_20_res_partner_address_sync(self):
cr, uid = self.cr, self.uid
ghoststep = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid,
{'name': 'GhostStep',
'is_company': True,
'street': 'Main Street, 10',
'phone': '123456789',
'email': 'info@ghoststep.com',
'vat': 'BE0477472701',
'type': 'default'}))
p1 = self.res_partner.browse(cr, uid, self.res_partner.name_create(cr, uid, 'Denis Bladesmith <denis.bladesmith@ghoststep.com>')[0])
self.assertEqual(p1.type, 'contact', 'Default type must be "contact"')
p1phone = '123456789#34'
p1.write({'phone': p1phone,
'parent_id': ghoststep.id,
'use_parent_address': True})
p1.refresh()
self.assertEqual(p1.street, ghoststep.street, 'Address fields must be synced')
self.assertEqual(p1.phone, p1phone, 'Phone should be preserved after address sync')
self.assertEqual(p1.type, 'contact', 'Type should be preserved after address sync')
self.assertEqual(p1.email, 'denis.bladesmith@ghoststep.com', 'Email should be preserved after sync')
# turn off sync
p1street = 'Different street, 42'
p1.write({'street': p1street,
'use_parent_address': False})
p1.refresh(), ghoststep.refresh()
self.assertEqual(p1.street, p1street, 'Address fields must not be synced after turning sync off')
self.assertNotEqual(ghoststep.street, p1street, 'Parent address must never be touched')
# turn on sync again
p1.write({'use_parent_address': True})
p1.refresh()
self.assertEqual(p1.street, ghoststep.street, 'Address fields must be synced again')
self.assertEqual(p1.phone, p1phone, 'Phone should be preserved after address sync')
self.assertEqual(p1.type, 'contact', 'Type should be preserved after address sync')
self.assertEqual(p1.email, 'denis.bladesmith@ghoststep.com', 'Email should be preserved after sync')
# Modify parent, sync to children
ghoststreet = 'South Street, 25'
ghoststep.write({'street': ghoststreet})
p1.refresh()
self.assertEqual(p1.street, ghoststreet, 'Address fields must be synced automatically')
self.assertEqual(p1.phone, p1phone, 'Phone should not be synced')
self.assertEqual(p1.email, 'denis.bladesmith@ghoststep.com', 'Email should be preserved after sync')
p1street = 'My Street, 11'
p1.write({'street': p1street})
ghoststep.refresh()
self.assertEqual(ghoststep.street, ghoststreet, 'Touching contact should never alter parent')
def test_30_res_partner_first_contact_sync(self):
""" Test initial creation of company/contact pair where contact address gets copied to
company """
cr, uid = self.cr, self.uid
ironshield = self.res_partner.browse(cr, uid, self.res_partner.name_create(cr, uid, 'IronShield')[0])
self.assertFalse(ironshield.is_company, 'Partners are not companies by default')
self.assertFalse(ironshield.use_parent_address, 'use_parent_address defaults to False')
self.assertEqual(ironshield.type, 'contact', 'Default type must be "contact"')
ironshield.write({'type': 'default'}) # force default type to double-check sync
p1 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid,
{'name': 'Isen Hardearth',
'street': 'Strongarm Avenue, 12',
'parent_id': ironshield.id}))
self.assertEquals(p1.type, 'contact', 'Default type must be "contact", not the copied parent type')
ironshield.refresh()
self.assertEqual(ironshield.street, p1.street, 'Address fields should be copied to company')
self.assertTrue(ironshield.is_company, 'Company flag should be turned on after first contact creation')
def test_40_res_partner_address_getc(self):
""" Test address_get address resolution mechanism: it should first go down through descendants,
stopping when encountering another is_copmany entity, then go up, stopping again at the first
is_company entity or the root ancestor and if nothing matches, it should use the provided partner
itself """
cr, uid = self.cr, self.uid
elmtree = self.res_partner.browse(cr, uid, self.res_partner.name_create(cr, uid, 'Elmtree')[0])
branch1 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid, {'name': 'Branch 1',
'parent_id': elmtree.id,
'is_company': True}))
leaf10 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid, {'name': 'Leaf 10',
'parent_id': branch1.id,
'type': 'invoice'}))
branch11 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid, {'name': 'Branch 11',
'parent_id': branch1.id,
'type': 'other'}))
leaf111 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid, {'name': 'Leaf 111',
'parent_id': branch11.id,
'type': 'delivery'}))
branch11.write({'is_company': False}) # force is_company after creating 1rst child
branch2 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid, {'name': 'Branch 2',
'parent_id': elmtree.id,
'is_company': True}))
leaf21 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid, {'name': 'Leaf 21',
'parent_id': branch2.id,
'type': 'delivery'}))
leaf22 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid, {'name': 'Leaf 22',
'parent_id': branch2.id}))
leaf23 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid, {'name': 'Leaf 23',
'parent_id': branch2.id,
'type': 'default'}))
# go up, stop at branch1
self.assertEqual(self.res_partner.address_get(cr, uid, [leaf111.id], ['delivery', 'invoice', 'contact', 'other', 'default']),
{'delivery': leaf111.id,
'invoice': leaf10.id,
'contact': branch1.id,
'other': branch11.id,
'default': leaf111.id}, 'Invalid address resolution')
self.assertEqual(self.res_partner.address_get(cr, uid, [branch11.id], ['delivery', 'invoice', 'contact', 'other', 'default']),
{'delivery': leaf111.id,
'invoice': leaf10.id,
'contact': branch1.id,
'other': branch11.id,
'default': branch11.id}, 'Invalid address resolution')
# go down, stop at at all child companies
self.assertEqual(self.res_partner.address_get(cr, uid, [elmtree.id], ['delivery', 'invoice', 'contact', 'other', 'default']),
{'delivery': elmtree.id,
'invoice': elmtree.id,
'contact': elmtree.id,
'other': elmtree.id,
'default': elmtree.id}, 'Invalid address resolution')
# go down through children
self.assertEqual(self.res_partner.address_get(cr, uid, [branch1.id], ['delivery', 'invoice', 'contact', 'other', 'default']),
{'delivery': leaf111.id,
'invoice': leaf10.id,
'contact': branch1.id,
'other': branch11.id,
'default': branch1.id}, 'Invalid address resolution')
self.assertEqual(self.res_partner.address_get(cr, uid, [branch2.id], ['delivery', 'invoice', 'contact', 'other', 'default']),
{'delivery': leaf21.id,
'invoice': leaf23.id,
'contact': branch2.id,
'other': leaf23.id,
'default': leaf23.id}, 'Invalid address resolution')
# go up then down through siblings
self.assertEqual(self.res_partner.address_get(cr, uid, [leaf21.id], ['delivery', 'invoice', 'contact', 'other', 'default']),
{'delivery': leaf21.id,
'invoice': leaf23.id,
'contact': branch2.id,
'other': leaf23.id,
'default': leaf23.id
}, 'Invalid address resolution, should scan commercial entity ancestor and its descendants')
self.assertEqual(self.res_partner.address_get(cr, uid, [leaf22.id], ['delivery', 'invoice', 'contact', 'other', 'default']),
{'delivery': leaf21.id,
'invoice': leaf23.id,
'contact': leaf22.id,
'other': leaf23.id,
'default': leaf23.id}, 'Invalid address resolution, should scan commercial entity ancestor and its descendants')
self.assertEqual(self.res_partner.address_get(cr, uid, [leaf23.id], ['delivery', 'invoice', 'contact', 'other', 'default']),
{'delivery': leaf21.id,
'invoice': leaf23.id,
'contact': branch2.id,
'other': leaf23.id,
'default': leaf23.id}, 'Invalid address resolution, `default` should only override if no partner with specific type exists')
# empty adr_pref means only 'default'
self.assertEqual(self.res_partner.address_get(cr, uid, [elmtree.id], []),
{'default': elmtree.id}, 'Invalid address resolution, no default means commercial entity ancestor')
self.assertEqual(self.res_partner.address_get(cr, uid, [leaf111.id], []),
{'default': leaf111.id}, 'Invalid address resolution, no default means contact itself')
branch11.write({'type': 'default'})
self.assertEqual(self.res_partner.address_get(cr, uid, [leaf111.id], []),
{'default': branch11.id}, 'Invalid address resolution, branch11 should now be default')
def test_50_res_partner_commercial_sync(self):
cr, uid = self.cr, self.uid
p0 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid,
{'name': 'Sigurd Sunknife',
'email': 'ssunknife@gmail.com'}))
sunhelm = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid,
{'name': 'Sunhelm',
'is_company': True,
'street': 'Rainbow Street, 13',
'phone': '1122334455',
'email': 'info@sunhelm.com',
'vat': 'BE0477472701',
'child_ids': [(4, p0.id),
(0, 0, {'name': 'Alrik Greenthorn',
'email': 'agr@sunhelm.com'})],
}))
p1 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid,
{'name': 'Otto Blackwood',
'email': 'otto.blackwood@sunhelm.com',
'parent_id': sunhelm.id}))
p11 = self.res_partner.browse(cr, uid, self.res_partner.create(cr, uid,
{'name': 'Gini Graywool',
'email': 'ggr@sunhelm.com',
'parent_id': p1.id}))
p2 = self.res_partner.browse(cr, uid, self.res_partner.search(cr, uid,
[('email', '=', 'agr@sunhelm.com')])[0])
for p in (p0, p1, p11, p2):
p.refresh()
self.assertEquals(p.commercial_partner_id, sunhelm, 'Incorrect commercial entity resolution')
self.assertEquals(p.vat, sunhelm.vat, 'Commercial fields must be automatically synced')
sunhelmvat = 'BE0123456789'
sunhelm.write({'vat': sunhelmvat})
for p in (p0, p1, p11, p2):
p.refresh()
self.assertEquals(p.vat, sunhelmvat, 'Commercial fields must be automatically and recursively synced')
p1vat = 'BE0987654321'
p1.write({'vat': p1vat})
for p in (sunhelm, p0, p11, p2):
p.refresh()
self.assertEquals(p.vat, sunhelmvat, 'Sync to children should only work downstream and on commercial entities')
# promote p1 to commercial entity
vals = p1.onchange_type(is_company=True)['value']
p1.write(dict(vals, parent_id=sunhelm.id,
is_company=True,
name='Sunhelm Subsidiary'))
p1.refresh()
self.assertEquals(p1.vat, p1vat, 'Setting is_company should stop auto-sync of commercial fields')
self.assertEquals(p1.commercial_partner_id, p1, 'Incorrect commercial entity resolution after setting is_company')
# writing on parent should not touch child commercial entities
sunhelmvat2 = 'BE0112233445'
sunhelm.write({'vat': sunhelmvat2})
p1.refresh()
self.assertEquals(p1.vat, p1vat, 'Setting is_company should stop auto-sync of commercial fields')
p0.refresh()
self.assertEquals(p0.vat, sunhelmvat2, 'Commercial fields must be automatically synced')
def test_60_read_group(self):
cr, uid = self.cr, self.uid
title_sir = self.res_partner_title.create(cr, uid, {'name': 'Sir', 'domain': 'contact'})
title_lady = self.res_partner_title.create(cr, uid, {'name': 'Lady', 'domain': 'contact'})
test_users = [
{'name': 'Alice', 'login': 'alice', 'color': 1, 'function': 'Friend', 'date': '2015-03-28', 'title': title_lady},
{'name': 'Alice', 'login': 'alice2', 'color': 0, 'function': 'Friend', 'date': '2015-01-28', 'title': title_lady},
{'name': 'Bob', 'login': 'bob', 'color': 2, 'function': 'Friend', 'date': '2015-03-02', 'title': title_sir},
{'name': 'Eve', 'login': 'eve', 'color': 3, 'function': 'Eavesdropper', 'date': '2015-03-20', 'title': title_lady},
{'name': 'Nab', 'login': 'nab', 'color': -3, 'function': '5$ Wrench', 'date': '2014-09-10', 'title': title_sir},
{'name': 'Nab', 'login': 'nab-she', 'color': 6, 'function': '5$ Wrench', 'date': '2014-01-02', 'title': title_lady},
]
ids = [self.res_users.create(cr, uid, u) for u in test_users]
domain = [('id', 'in', ids)]
# group on local char field without domain and without active_test (-> empty WHERE clause)
groups_data = self.res_users.read_group(cr, uid, [], fields=['login'], groupby=['login'], orderby='login DESC', context={'active_test': False})
self.assertGreater(len(groups_data), 6, "Incorrect number of results when grouping on a field")
# group on local char field with limit
groups_data = self.res_users.read_group(cr, uid, domain, fields=['login'], groupby=['login'], orderby='login DESC', limit=3, offset=3)
self.assertEqual(len(groups_data), 3, "Incorrect number of results when grouping on a field with limit")
self.assertEqual(['bob', 'alice2', 'alice'], [g['login'] for g in groups_data], 'Result mismatch')
# group on inherited char field, aggregate on int field (second groupby ignored on purpose)
groups_data = self.res_users.read_group(cr, uid, domain, fields=['name', 'color', 'function'], groupby=['function', 'login'])
self.assertEqual(len(groups_data), 3, "Incorrect number of results when grouping on a field")
self.assertEqual(['5$ Wrench', 'Eavesdropper', 'Friend'], [g['function'] for g in groups_data], 'incorrect read_group order')
for group_data in groups_data:
self.assertIn('color', group_data, "Aggregated data for the column 'color' is not present in read_group return values")
self.assertEqual(group_data['color'], 3, "Incorrect sum for aggregated data for the column 'color'")
# group on inherited char field, reverse order
groups_data = self.res_users.read_group(cr, uid, domain, fields=['name', 'color'], groupby='name', orderby='name DESC')
self.assertEqual(['Nab', 'Eve', 'Bob', 'Alice'], [g['name'] for g in groups_data], 'Incorrect ordering of the list')
# group on int field, default ordering
groups_data = self.res_users.read_group(cr, uid, domain, fields=['color'], groupby='color')
self.assertEqual([-3, 0, 1, 2, 3, 6], [g['color'] for g in groups_data], 'Incorrect ordering of the list')
# multi group, second level is int field, should still be summed in first level grouping
groups_data = self.res_users.read_group(cr, uid, domain, fields=['name', 'color'], groupby=['name', 'color'], orderby='name DESC')
self.assertEqual(['Nab', 'Eve', 'Bob', 'Alice'], [g['name'] for g in groups_data], 'Incorrect ordering of the list')
self.assertEqual([3, 3, 2, 1], [g['color'] for g in groups_data], 'Incorrect ordering of the list')
# group on inherited char field, multiple orders with directions
groups_data = self.res_users.read_group(cr, uid, domain, fields=['name', 'color'], groupby='name', orderby='color DESC, name')
self.assertEqual(len(groups_data), 4, "Incorrect number of results when grouping on a field")
self.assertEqual(['Eve', 'Nab', 'Bob', 'Alice'], [g['name'] for g in groups_data], 'Incorrect ordering of the list')
self.assertEqual([1, 2, 1, 2], [g['name_count'] for g in groups_data], 'Incorrect number of results')
# group on inherited date column (res_partner.date) -> Year-Month, default ordering
groups_data = self.res_users.read_group(cr, uid, domain, fields=['function', 'color', 'date'], groupby=['date'])
self.assertEqual(len(groups_data), 4, "Incorrect number of results when grouping on a field")
self.assertEqual(['January 2014', 'September 2014', 'January 2015', 'March 2015'], [g['date'] for g in groups_data], 'Incorrect ordering of the list')
self.assertEqual([1, 1, 1, 3], [g['date_count'] for g in groups_data], 'Incorrect number of results')
# group on inherited date column (res_partner.date) -> Year-Month, custom order
groups_data = self.res_users.read_group(cr, uid, domain, fields=['function', 'color', 'date'], groupby=['date'], orderby='date DESC')
self.assertEqual(len(groups_data), 4, "Incorrect number of results when grouping on a field")
self.assertEqual(['March 2015', 'January 2015', 'September 2014', 'January 2014'], [g['date'] for g in groups_data], 'Incorrect ordering of the list')
self.assertEqual([3, 1, 1, 1], [g['date_count'] for g in groups_data], 'Incorrect number of results')
# group on inherited many2one (res_partner.title), default order
groups_data = self.res_users.read_group(cr, uid, domain, fields=['function', 'color', 'title'], groupby=['title'])
self.assertEqual(len(groups_data), 2, "Incorrect number of results when grouping on a field")
# m2o is returned as a (id, label) pair
self.assertEqual([(title_lady, 'Lady'), (title_sir, 'Sir')], [g['title'] for g in groups_data], 'Incorrect ordering of the list')
self.assertEqual([4, 2], [g['title_count'] for g in groups_data], 'Incorrect number of results')
self.assertEqual([10, -1], [g['color'] for g in groups_data], 'Incorrect aggregation of int column')
# group on inherited many2one (res_partner.title), reversed natural order
groups_data = self.res_users.read_group(cr, uid, domain, fields=['function', 'color', 'title'], groupby=['title'], orderby="title desc")
self.assertEqual(len(groups_data), 2, "Incorrect number of results when grouping on a field")
# m2o is returned as a (id, label) pair
self.assertEqual([(title_sir, 'Sir'), (title_lady, 'Lady')], [g['title'] for g in groups_data], 'Incorrect ordering of the list')
self.assertEqual([2, 4], [g['title_count'] for g in groups_data], 'Incorrect number of results')
self.assertEqual([-1, 10], [g['color'] for g in groups_data], 'Incorrect aggregation of int column')
# group on inherited many2one (res_partner.title), multiple orders with m2o in second position
groups_data = self.res_users.read_group(cr, uid, domain, fields=['function', 'color', 'title'], groupby=['title'], orderby="color desc, title desc")
self.assertEqual(len(groups_data), 2, "Incorrect number of results when grouping on a field")
# m2o is returned as a (id, label) pair
self.assertEqual([(title_lady, 'Lady'), (title_sir, 'Sir')], [g['title'] for g in groups_data], 'Incorrect ordering of the result')
self.assertEqual([4, 2], [g['title_count'] for g in groups_data], 'Incorrect number of results')
self.assertEqual([10, -1], [g['color'] for g in groups_data], 'Incorrect aggregation of int column')
# group on inherited many2one (res_partner.title), ordered by other inherited field (color)
groups_data = self.res_users.read_group(cr, uid, domain, fields=['function', 'color', 'title'], groupby=['title'], orderby='color')
self.assertEqual(len(groups_data), 2, "Incorrect number of results when grouping on a field")
# m2o is returned as a (id, label) pair
self.assertEqual([(title_sir, 'Sir'), (title_lady, 'Lady')], [g['title'] for g in groups_data], 'Incorrect ordering of the list')
self.assertEqual([2, 4], [g['title_count'] for g in groups_data], 'Incorrect number of results')
self.assertEqual([-1, 10], [g['color'] for g in groups_data], 'Incorrect aggregation of int column')
class test_partner_recursion(common.TransactionCase):
def setUp(self):
super(test_partner_recursion,self).setUp()
self.res_partner = self.registry('res.partner')
cr, uid = self.cr, self.uid
self.p1 = self.res_partner.name_create(cr, uid, 'Elmtree')[0]
self.p2 = self.res_partner.create(cr, uid, {'name': 'Elmtree Child 1', 'parent_id': self.p1})
self.p3 = self.res_partner.create(cr, uid, {'name': 'Elmtree Grand-Child 1.1', 'parent_id': self.p2})
# split 101, 102, 103 tests to force SQL rollback between them
def test_101_res_partner_recursion(self):
cr, uid, p1, p3 = self.cr, self.uid, self.p1, self.p3
self.assertRaises(except_orm, self.res_partner.write, cr, uid, [p1], {'parent_id': p3})
def test_102_res_partner_recursion(self):
cr, uid, p2, p3 = self.cr, self.uid, self.p2, self.p3
self.assertRaises(except_orm, self.res_partner.write, cr, uid, [p2], {'parent_id': p3})
def test_103_res_partner_recursion(self):
cr, uid, p3 = self.cr, self.uid, self.p3
self.assertRaises(except_orm, self.res_partner.write, cr, uid, [p3], {'parent_id': p3})
def test_104_res_partner_recursion_indirect_cycle(self):
""" Indirect hacky write to create cycle in children """
cr, uid, p2, p3 = self.cr, self.uid, self.p2, self.p3
p3b = self.res_partner.create(cr, uid, {'name': 'Elmtree Grand-Child 1.2', 'parent_id': self.p2})
self.assertRaises(except_orm, self.res_partner.write, cr, uid, [p2],
{'child_ids': [(1, p3, {'parent_id': p3b}), (1, p3b, {'parent_id': p3})]})
def test_110_res_partner_recursion_multi_update(self):
""" multi-write on several partners in same hierarchy must not trigger a false cycle detection """
cr, uid, p1, p2, p3 = self.cr, self.uid, self.p1, self.p2, self.p3
self.assertTrue(self.res_partner.write(cr, uid, [p1,p2,p3], {'phone': '123456'}))
class test_translation(common.TransactionCase):
def setUp(self):
super(test_translation, self).setUp()
self.res_category = self.registry('res.partner.category')
self.ir_translation = self.registry('ir.translation')
cr, uid = self.cr, self.uid
self.registry('ir.translation').load_module_terms(cr, ['base'], ['fr_FR'])
self.cat_id = self.res_category.create(cr, uid, {'name': 'Customers'})
self.ir_translation.create(cr, uid, {'name': 'res.partner.category,name', 'module':'base',
'value': 'Clients', 'res_id': self.cat_id, 'lang':'fr_FR', 'state':'translated', 'type': 'model'})
def test_101_create_translated_record(self):
cr, uid = self.cr, self.uid
no_context_cat = self.res_category.browse(cr, uid, self.cat_id)
self.assertEqual(no_context_cat.name, 'Customers', "Error in basic name_get")
fr_context_cat = self.res_category.browse(cr, uid, self.cat_id, context={'lang':'fr_FR'})
self.assertEqual(fr_context_cat.name, 'Clients', "Translation not found")
def test_102_duplicate_record(self):
cr, uid = self.cr, self.uid
self.new_cat_id = self.res_category.copy(cr, uid, self.cat_id, context={'lang':'fr_FR'})
no_context_cat = self.res_category.browse(cr, uid, self.new_cat_id)
self.assertEqual(no_context_cat.name, 'Customers', "Duplication did not set untranslated value")
fr_context_cat = self.res_category.browse(cr, uid, self.new_cat_id, context={'lang':'fr_FR'})
self.assertEqual(fr_context_cat.name, 'Clients', "Did not found translation for initial value")
def test_103_duplicate_record_fr(self):
cr, uid = self.cr, self.uid
self.new_fr_cat_id = self.res_category.copy(cr, uid, self.cat_id, default={'name': 'Clients (copie)'}, context={'lang':'fr_FR'})
no_context_cat = self.res_category.browse(cr, uid, self.new_fr_cat_id)
self.assertEqual(no_context_cat.name, 'Customers', "Duplication erased original untranslated value")
fr_context_cat = self.res_category.browse(cr, uid, self.new_fr_cat_id, context={'lang':'fr_FR'})
self.assertEqual(fr_context_cat.name, 'Clients (copie)', "Did not used default value for translated value")
test_state = None
#: Stores state information across multiple test classes
def setUpModule():
global test_state
test_state = {}
def tearDownModule():
global test_state
test_state = None
class TestPhaseInstall00(unittest2.TestCase):
"""
WARNING: Relies on tests being run in alphabetical order
"""
@classmethod
def setUpClass(cls):
cls.state = None
def test_00_setup(self):
type(self).state = 'init'
@common.at_install(False)
def test_01_no_install(self):
type(self).state = 'error'
def test_02_check(self):
self.assertEqual(
self.state, 'init',
"Testcase state should not have been transitioned from 00")
class TestPhaseInstall01(unittest2.TestCase):
at_install = False
def test_default_norun(self):
self.fail("An unmarket test in a non-at-install case should not run")
@common.at_install(True)
def test_set_run(self):
test_state['set_at_install'] = True
class TestPhaseInstall02(unittest2.TestCase):
"""
Can't put the check for test_set_run in the same class: if
@common.at_install does not work for test_set_run, it won't work for
the other one either. Thus move checking of whether test_set_run has
correctly run indeed to a separate class.
Warning: relies on *classes* being run in alphabetical order in test
modules
"""
def test_check_state(self):
self.assertTrue(
test_state.get('set_at_install'),
"The flag should be set if local overriding of runstate")
if __name__ == '__main__':
unittest2.main()
|
bayusantoso/final-assignment-web-ontology
|
refs/heads/master
|
IMPLEMENTATION/Application/SourceCode/GOApps/flask/Lib/site-packages/pip/_vendor/distlib/version.py
|
426
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2014 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""
Implementation of a flexible versioning scheme providing support for PEP-386,
distribute-compatible and semantic versioning.
"""
import logging
import re
from .compat import string_types
__all__ = ['NormalizedVersion', 'NormalizedMatcher',
'LegacyVersion', 'LegacyMatcher',
'SemanticVersion', 'SemanticMatcher',
'UnsupportedVersionError', 'get_scheme']
logger = logging.getLogger(__name__)
class UnsupportedVersionError(ValueError):
"""This is an unsupported version."""
pass
class Version(object):
def __init__(self, s):
self._string = s = s.strip()
self._parts = parts = self.parse(s)
assert isinstance(parts, tuple)
assert len(parts) > 0
def parse(self, s):
raise NotImplementedError('please implement in a subclass')
def _check_compatible(self, other):
if type(self) != type(other):
raise TypeError('cannot compare %r and %r' % (self, other))
def __eq__(self, other):
self._check_compatible(other)
return self._parts == other._parts
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
self._check_compatible(other)
return self._parts < other._parts
def __gt__(self, other):
return not (self.__lt__(other) or self.__eq__(other))
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
def __ge__(self, other):
return self.__gt__(other) or self.__eq__(other)
# See http://docs.python.org/reference/datamodel#object.__hash__
def __hash__(self):
return hash(self._parts)
def __repr__(self):
return "%s('%s')" % (self.__class__.__name__, self._string)
def __str__(self):
return self._string
@property
def is_prerelease(self):
raise NotImplementedError('Please implement in subclasses.')
class Matcher(object):
version_class = None
dist_re = re.compile(r"^(\w[\s\w'.-]*)(\((.*)\))?")
comp_re = re.compile(r'^(<=|>=|<|>|!=|={2,3}|~=)?\s*([^\s,]+)$')
num_re = re.compile(r'^\d+(\.\d+)*$')
# value is either a callable or the name of a method
_operators = {
'<': lambda v, c, p: v < c,
'>': lambda v, c, p: v > c,
'<=': lambda v, c, p: v == c or v < c,
'>=': lambda v, c, p: v == c or v > c,
'==': lambda v, c, p: v == c,
'===': lambda v, c, p: v == c,
# by default, compatible => >=.
'~=': lambda v, c, p: v == c or v > c,
'!=': lambda v, c, p: v != c,
}
def __init__(self, s):
if self.version_class is None:
raise ValueError('Please specify a version class')
self._string = s = s.strip()
m = self.dist_re.match(s)
if not m:
raise ValueError('Not valid: %r' % s)
groups = m.groups('')
self.name = groups[0].strip()
self.key = self.name.lower() # for case-insensitive comparisons
clist = []
if groups[2]:
constraints = [c.strip() for c in groups[2].split(',')]
for c in constraints:
m = self.comp_re.match(c)
if not m:
raise ValueError('Invalid %r in %r' % (c, s))
groups = m.groups()
op = groups[0] or '~='
s = groups[1]
if s.endswith('.*'):
if op not in ('==', '!='):
raise ValueError('\'.*\' not allowed for '
'%r constraints' % op)
# Could be a partial version (e.g. for '2.*') which
# won't parse as a version, so keep it as a string
vn, prefix = s[:-2], True
if not self.num_re.match(vn):
# Just to check that vn is a valid version
self.version_class(vn)
else:
# Should parse as a version, so we can create an
# instance for the comparison
vn, prefix = self.version_class(s), False
clist.append((op, vn, prefix))
self._parts = tuple(clist)
def match(self, version):
"""
Check if the provided version matches the constraints.
:param version: The version to match against this instance.
:type version: Strring or :class:`Version` instance.
"""
if isinstance(version, string_types):
version = self.version_class(version)
for operator, constraint, prefix in self._parts:
f = self._operators.get(operator)
if isinstance(f, string_types):
f = getattr(self, f)
if not f:
msg = ('%r not implemented '
'for %s' % (operator, self.__class__.__name__))
raise NotImplementedError(msg)
if not f(version, constraint, prefix):
return False
return True
@property
def exact_version(self):
result = None
if len(self._parts) == 1 and self._parts[0][0] in ('==', '==='):
result = self._parts[0][1]
return result
def _check_compatible(self, other):
if type(self) != type(other) or self.name != other.name:
raise TypeError('cannot compare %s and %s' % (self, other))
def __eq__(self, other):
self._check_compatible(other)
return self.key == other.key and self._parts == other._parts
def __ne__(self, other):
return not self.__eq__(other)
# See http://docs.python.org/reference/datamodel#object.__hash__
def __hash__(self):
return hash(self.key) + hash(self._parts)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self._string)
def __str__(self):
return self._string
PEP440_VERSION_RE = re.compile(r'^v?(\d+!)?(\d+(\.\d+)*)((a|b|c|rc)(\d+))?'
r'(\.(post)(\d+))?(\.(dev)(\d+))?'
r'(\+([a-zA-Z\d]+(\.[a-zA-Z\d]+)?))?$')
def _pep_440_key(s):
s = s.strip()
m = PEP440_VERSION_RE.match(s)
if not m:
raise UnsupportedVersionError('Not a valid version: %s' % s)
groups = m.groups()
nums = tuple(int(v) for v in groups[1].split('.'))
while len(nums) > 1 and nums[-1] == 0:
nums = nums[:-1]
if not groups[0]:
epoch = 0
else:
epoch = int(groups[0])
pre = groups[4:6]
post = groups[7:9]
dev = groups[10:12]
local = groups[13]
if pre == (None, None):
pre = ()
else:
pre = pre[0], int(pre[1])
if post == (None, None):
post = ()
else:
post = post[0], int(post[1])
if dev == (None, None):
dev = ()
else:
dev = dev[0], int(dev[1])
if local is None:
local = ()
else:
parts = []
for part in local.split('.'):
# to ensure that numeric compares as > lexicographic, avoid
# comparing them directly, but encode a tuple which ensures
# correct sorting
if part.isdigit():
part = (1, int(part))
else:
part = (0, part)
parts.append(part)
local = tuple(parts)
if not pre:
# either before pre-release, or final release and after
if not post and dev:
# before pre-release
pre = ('a', -1) # to sort before a0
else:
pre = ('z',) # to sort after all pre-releases
# now look at the state of post and dev.
if not post:
post = ('_',) # sort before 'a'
if not dev:
dev = ('final',)
#print('%s -> %s' % (s, m.groups()))
return epoch, nums, pre, post, dev, local
_normalized_key = _pep_440_key
class NormalizedVersion(Version):
"""A rational version.
Good:
1.2 # equivalent to "1.2.0"
1.2.0
1.2a1
1.2.3a2
1.2.3b1
1.2.3c1
1.2.3.4
TODO: fill this out
Bad:
1 # mininum two numbers
1.2a # release level must have a release serial
1.2.3b
"""
def parse(self, s):
result = _normalized_key(s)
# _normalized_key loses trailing zeroes in the release
# clause, since that's needed to ensure that X.Y == X.Y.0 == X.Y.0.0
# However, PEP 440 prefix matching needs it: for example,
# (~= 1.4.5.0) matches differently to (~= 1.4.5.0.0).
m = PEP440_VERSION_RE.match(s) # must succeed
groups = m.groups()
self._release_clause = tuple(int(v) for v in groups[1].split('.'))
return result
PREREL_TAGS = set(['a', 'b', 'c', 'rc', 'dev'])
@property
def is_prerelease(self):
return any(t[0] in self.PREREL_TAGS for t in self._parts if t)
def _match_prefix(x, y):
x = str(x)
y = str(y)
if x == y:
return True
if not x.startswith(y):
return False
n = len(y)
return x[n] == '.'
class NormalizedMatcher(Matcher):
version_class = NormalizedVersion
# value is either a callable or the name of a method
_operators = {
'~=': '_match_compatible',
'<': '_match_lt',
'>': '_match_gt',
'<=': '_match_le',
'>=': '_match_ge',
'==': '_match_eq',
'===': '_match_arbitrary',
'!=': '_match_ne',
}
def _adjust_local(self, version, constraint, prefix):
if prefix:
strip_local = '+' not in constraint and version._parts[-1]
else:
# both constraint and version are
# NormalizedVersion instances.
# If constraint does not have a local component,
# ensure the version doesn't, either.
strip_local = not constraint._parts[-1] and version._parts[-1]
if strip_local:
s = version._string.split('+', 1)[0]
version = self.version_class(s)
return version, constraint
def _match_lt(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if version >= constraint:
return False
release_clause = constraint._release_clause
pfx = '.'.join([str(i) for i in release_clause])
return not _match_prefix(version, pfx)
def _match_gt(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if version <= constraint:
return False
release_clause = constraint._release_clause
pfx = '.'.join([str(i) for i in release_clause])
return not _match_prefix(version, pfx)
def _match_le(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
return version <= constraint
def _match_ge(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
return version >= constraint
def _match_eq(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if not prefix:
result = (version == constraint)
else:
result = _match_prefix(version, constraint)
return result
def _match_arbitrary(self, version, constraint, prefix):
return str(version) == str(constraint)
def _match_ne(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if not prefix:
result = (version != constraint)
else:
result = not _match_prefix(version, constraint)
return result
def _match_compatible(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if version == constraint:
return True
if version < constraint:
return False
# if not prefix:
# return True
release_clause = constraint._release_clause
if len(release_clause) > 1:
release_clause = release_clause[:-1]
pfx = '.'.join([str(i) for i in release_clause])
return _match_prefix(version, pfx)
_REPLACEMENTS = (
(re.compile('[.+-]$'), ''), # remove trailing puncts
(re.compile(r'^[.](\d)'), r'0.\1'), # .N -> 0.N at start
(re.compile('^[.-]'), ''), # remove leading puncts
(re.compile(r'^\((.*)\)$'), r'\1'), # remove parentheses
(re.compile(r'^v(ersion)?\s*(\d+)'), r'\2'), # remove leading v(ersion)
(re.compile(r'^r(ev)?\s*(\d+)'), r'\2'), # remove leading v(ersion)
(re.compile('[.]{2,}'), '.'), # multiple runs of '.'
(re.compile(r'\b(alfa|apha)\b'), 'alpha'), # misspelt alpha
(re.compile(r'\b(pre-alpha|prealpha)\b'),
'pre.alpha'), # standardise
(re.compile(r'\(beta\)$'), 'beta'), # remove parentheses
)
_SUFFIX_REPLACEMENTS = (
(re.compile('^[:~._+-]+'), ''), # remove leading puncts
(re.compile('[,*")([\]]'), ''), # remove unwanted chars
(re.compile('[~:+_ -]'), '.'), # replace illegal chars
(re.compile('[.]{2,}'), '.'), # multiple runs of '.'
(re.compile(r'\.$'), ''), # trailing '.'
)
_NUMERIC_PREFIX = re.compile(r'(\d+(\.\d+)*)')
def _suggest_semantic_version(s):
"""
Try to suggest a semantic form for a version for which
_suggest_normalized_version couldn't come up with anything.
"""
result = s.strip().lower()
for pat, repl in _REPLACEMENTS:
result = pat.sub(repl, result)
if not result:
result = '0.0.0'
# Now look for numeric prefix, and separate it out from
# the rest.
#import pdb; pdb.set_trace()
m = _NUMERIC_PREFIX.match(result)
if not m:
prefix = '0.0.0'
suffix = result
else:
prefix = m.groups()[0].split('.')
prefix = [int(i) for i in prefix]
while len(prefix) < 3:
prefix.append(0)
if len(prefix) == 3:
suffix = result[m.end():]
else:
suffix = '.'.join([str(i) for i in prefix[3:]]) + result[m.end():]
prefix = prefix[:3]
prefix = '.'.join([str(i) for i in prefix])
suffix = suffix.strip()
if suffix:
#import pdb; pdb.set_trace()
# massage the suffix.
for pat, repl in _SUFFIX_REPLACEMENTS:
suffix = pat.sub(repl, suffix)
if not suffix:
result = prefix
else:
sep = '-' if 'dev' in suffix else '+'
result = prefix + sep + suffix
if not is_semver(result):
result = None
return result
def _suggest_normalized_version(s):
"""Suggest a normalized version close to the given version string.
If you have a version string that isn't rational (i.e. NormalizedVersion
doesn't like it) then you might be able to get an equivalent (or close)
rational version from this function.
This does a number of simple normalizations to the given string, based
on observation of versions currently in use on PyPI. Given a dump of
those version during PyCon 2009, 4287 of them:
- 2312 (53.93%) match NormalizedVersion without change
with the automatic suggestion
- 3474 (81.04%) match when using this suggestion method
@param s {str} An irrational version string.
@returns A rational version string, or None, if couldn't determine one.
"""
try:
_normalized_key(s)
return s # already rational
except UnsupportedVersionError:
pass
rs = s.lower()
# part of this could use maketrans
for orig, repl in (('-alpha', 'a'), ('-beta', 'b'), ('alpha', 'a'),
('beta', 'b'), ('rc', 'c'), ('-final', ''),
('-pre', 'c'),
('-release', ''), ('.release', ''), ('-stable', ''),
('+', '.'), ('_', '.'), (' ', ''), ('.final', ''),
('final', '')):
rs = rs.replace(orig, repl)
# if something ends with dev or pre, we add a 0
rs = re.sub(r"pre$", r"pre0", rs)
rs = re.sub(r"dev$", r"dev0", rs)
# if we have something like "b-2" or "a.2" at the end of the
# version, that is pobably beta, alpha, etc
# let's remove the dash or dot
rs = re.sub(r"([abc]|rc)[\-\.](\d+)$", r"\1\2", rs)
# 1.0-dev-r371 -> 1.0.dev371
# 0.1-dev-r79 -> 0.1.dev79
rs = re.sub(r"[\-\.](dev)[\-\.]?r?(\d+)$", r".\1\2", rs)
# Clean: 2.0.a.3, 2.0.b1, 0.9.0~c1
rs = re.sub(r"[.~]?([abc])\.?", r"\1", rs)
# Clean: v0.3, v1.0
if rs.startswith('v'):
rs = rs[1:]
# Clean leading '0's on numbers.
#TODO: unintended side-effect on, e.g., "2003.05.09"
# PyPI stats: 77 (~2%) better
rs = re.sub(r"\b0+(\d+)(?!\d)", r"\1", rs)
# Clean a/b/c with no version. E.g. "1.0a" -> "1.0a0". Setuptools infers
# zero.
# PyPI stats: 245 (7.56%) better
rs = re.sub(r"(\d+[abc])$", r"\g<1>0", rs)
# the 'dev-rNNN' tag is a dev tag
rs = re.sub(r"\.?(dev-r|dev\.r)\.?(\d+)$", r".dev\2", rs)
# clean the - when used as a pre delimiter
rs = re.sub(r"-(a|b|c)(\d+)$", r"\1\2", rs)
# a terminal "dev" or "devel" can be changed into ".dev0"
rs = re.sub(r"[\.\-](dev|devel)$", r".dev0", rs)
# a terminal "dev" can be changed into ".dev0"
rs = re.sub(r"(?![\.\-])dev$", r".dev0", rs)
# a terminal "final" or "stable" can be removed
rs = re.sub(r"(final|stable)$", "", rs)
# The 'r' and the '-' tags are post release tags
# 0.4a1.r10 -> 0.4a1.post10
# 0.9.33-17222 -> 0.9.33.post17222
# 0.9.33-r17222 -> 0.9.33.post17222
rs = re.sub(r"\.?(r|-|-r)\.?(\d+)$", r".post\2", rs)
# Clean 'r' instead of 'dev' usage:
# 0.9.33+r17222 -> 0.9.33.dev17222
# 1.0dev123 -> 1.0.dev123
# 1.0.git123 -> 1.0.dev123
# 1.0.bzr123 -> 1.0.dev123
# 0.1a0dev.123 -> 0.1a0.dev123
# PyPI stats: ~150 (~4%) better
rs = re.sub(r"\.?(dev|git|bzr)\.?(\d+)$", r".dev\2", rs)
# Clean '.pre' (normalized from '-pre' above) instead of 'c' usage:
# 0.2.pre1 -> 0.2c1
# 0.2-c1 -> 0.2c1
# 1.0preview123 -> 1.0c123
# PyPI stats: ~21 (0.62%) better
rs = re.sub(r"\.?(pre|preview|-c)(\d+)$", r"c\g<2>", rs)
# Tcl/Tk uses "px" for their post release markers
rs = re.sub(r"p(\d+)$", r".post\1", rs)
try:
_normalized_key(rs)
except UnsupportedVersionError:
rs = None
return rs
#
# Legacy version processing (distribute-compatible)
#
_VERSION_PART = re.compile(r'([a-z]+|\d+|[\.-])', re.I)
_VERSION_REPLACE = {
'pre': 'c',
'preview': 'c',
'-': 'final-',
'rc': 'c',
'dev': '@',
'': None,
'.': None,
}
def _legacy_key(s):
def get_parts(s):
result = []
for p in _VERSION_PART.split(s.lower()):
p = _VERSION_REPLACE.get(p, p)
if p:
if '0' <= p[:1] <= '9':
p = p.zfill(8)
else:
p = '*' + p
result.append(p)
result.append('*final')
return result
result = []
for p in get_parts(s):
if p.startswith('*'):
if p < '*final':
while result and result[-1] == '*final-':
result.pop()
while result and result[-1] == '00000000':
result.pop()
result.append(p)
return tuple(result)
class LegacyVersion(Version):
def parse(self, s):
return _legacy_key(s)
@property
def is_prerelease(self):
result = False
for x in self._parts:
if (isinstance(x, string_types) and x.startswith('*') and
x < '*final'):
result = True
break
return result
class LegacyMatcher(Matcher):
version_class = LegacyVersion
_operators = dict(Matcher._operators)
_operators['~='] = '_match_compatible'
numeric_re = re.compile('^(\d+(\.\d+)*)')
def _match_compatible(self, version, constraint, prefix):
if version < constraint:
return False
m = self.numeric_re.match(str(constraint))
if not m:
logger.warning('Cannot compute compatible match for version %s '
' and constraint %s', version, constraint)
return True
s = m.groups()[0]
if '.' in s:
s = s.rsplit('.', 1)[0]
return _match_prefix(version, s)
#
# Semantic versioning
#
_SEMVER_RE = re.compile(r'^(\d+)\.(\d+)\.(\d+)'
r'(-[a-z0-9]+(\.[a-z0-9-]+)*)?'
r'(\+[a-z0-9]+(\.[a-z0-9-]+)*)?$', re.I)
def is_semver(s):
return _SEMVER_RE.match(s)
def _semantic_key(s):
def make_tuple(s, absent):
if s is None:
result = (absent,)
else:
parts = s[1:].split('.')
# We can't compare ints and strings on Python 3, so fudge it
# by zero-filling numeric values so simulate a numeric comparison
result = tuple([p.zfill(8) if p.isdigit() else p for p in parts])
return result
m = is_semver(s)
if not m:
raise UnsupportedVersionError(s)
groups = m.groups()
major, minor, patch = [int(i) for i in groups[:3]]
# choose the '|' and '*' so that versions sort correctly
pre, build = make_tuple(groups[3], '|'), make_tuple(groups[5], '*')
return (major, minor, patch), pre, build
class SemanticVersion(Version):
def parse(self, s):
return _semantic_key(s)
@property
def is_prerelease(self):
return self._parts[1][0] != '|'
class SemanticMatcher(Matcher):
version_class = SemanticVersion
class VersionScheme(object):
def __init__(self, key, matcher, suggester=None):
self.key = key
self.matcher = matcher
self.suggester = suggester
def is_valid_version(self, s):
try:
self.matcher.version_class(s)
result = True
except UnsupportedVersionError:
result = False
return result
def is_valid_matcher(self, s):
try:
self.matcher(s)
result = True
except UnsupportedVersionError:
result = False
return result
def is_valid_constraint_list(self, s):
"""
Used for processing some metadata fields
"""
return self.is_valid_matcher('dummy_name (%s)' % s)
def suggest(self, s):
if self.suggester is None:
result = None
else:
result = self.suggester(s)
return result
_SCHEMES = {
'normalized': VersionScheme(_normalized_key, NormalizedMatcher,
_suggest_normalized_version),
'legacy': VersionScheme(_legacy_key, LegacyMatcher, lambda self, s: s),
'semantic': VersionScheme(_semantic_key, SemanticMatcher,
_suggest_semantic_version),
}
_SCHEMES['default'] = _SCHEMES['normalized']
def get_scheme(name):
if name not in _SCHEMES:
raise ValueError('unknown scheme name: %r' % name)
return _SCHEMES[name]
|
sampadsaha5/sympy
|
refs/heads/master
|
sympy/ntheory/continued_fraction.py
|
33
|
from sympy.core.numbers import Integer, Rational
def continued_fraction_periodic(p, q, d=0):
r"""
Find the periodic continued fraction expansion of a quadratic irrational.
Compute the continued fraction expansion of a rational or a
quadratic irrational number, i.e. `\frac{p + \sqrt{d}}{q}`, where
`p`, `q` and `d \ge 0` are integers.
Returns the continued fraction representation (canonical form) as
a list of integers, optionally ending (for quadratic irrationals)
with repeating block as the last term of this list.
Parameters
==========
p : int
the rational part of the number's numerator
q : int
the denominator of the number
d : int, optional
the irrational part (discriminator) of the number's numerator
Examples
========
>>> from sympy.ntheory.continued_fraction import continued_fraction_periodic
>>> continued_fraction_periodic(3, 2, 7)
[2, [1, 4, 1, 1]]
Golden ratio has the simplest continued fraction expansion:
>>> continued_fraction_periodic(1, 2, 5)
[[1]]
If the discriminator is zero or a perfect square then the number will be a
rational number:
>>> continued_fraction_periodic(4, 3, 0)
[1, 3]
>>> continued_fraction_periodic(4, 3, 49)
[3, 1, 2]
See Also
========
continued_fraction_iterator, continued_fraction_reduce
References
==========
.. [1] http://en.wikipedia.org/wiki/Periodic_continued_fraction
.. [2] K. Rosen. Elementary Number theory and its applications.
Addison-Wesley, 3 Sub edition, pages 379-381, January 1992.
"""
from sympy.core.compatibility import as_int
from sympy.functions import sqrt
p, q, d = list(map(as_int, [p, q, d]))
sd = sqrt(d)
if q == 0:
raise ValueError("The denominator is zero.")
if d < 0:
raise ValueError("Delta supposed to be a non-negative "
"integer, got %d" % d)
elif d == 0 or sd.is_integer:
# the number is a rational number
return list(continued_fraction_iterator(Rational(p + sd, q)))
if (d - p**2)%q:
d *= q**2
sd *= q
p *= abs(q)
q *= abs(q)
terms = []
pq = {}
while (p, q) not in pq:
pq[(p, q)] = len(terms)
terms.append(int((p + sd)/q))
p = terms[-1]*q - p
q = (d - p**2)/q
i = pq[(p, q)]
return terms[:i] + [terms[i:]]
def continued_fraction_reduce(cf):
"""
Reduce a continued fraction to a rational or quadratic irrational.
Compute the rational or quadratic irrational number from its
terminating or periodic continued fraction expansion. The
continued fraction expansion (cf) should be supplied as a
terminating iterator supplying the terms of the expansion. For
terminating continued fractions, this is equivalent to
``list(continued_fraction_convergents(cf))[-1]``, only a little more
efficient. If the expansion has a repeating part, a list of the
repeating terms should be returned as the last element from the
iterator. This is the format returned by
continued_fraction_periodic.
For quadratic irrationals, returns the largest solution found,
which is generally the one sought, if the fraction is in canonical
form (all terms positive except possibly the first).
Examples
========
>>> from sympy.ntheory.continued_fraction import continued_fraction_reduce
>>> continued_fraction_reduce([1, 2, 3, 4, 5])
225/157
>>> continued_fraction_reduce([-2, 1, 9, 7, 1, 2])
-256/233
>>> continued_fraction_reduce([2, 1, 2, 1, 1, 4, 1, 1, 6, 1, 1, 8]).n(10)
2.718281835
>>> continued_fraction_reduce([1, 4, 2, [3, 1]])
(sqrt(21) + 287)/238
>>> continued_fraction_reduce([[1]])
1/2 + sqrt(5)/2
>>> from sympy.ntheory.continued_fraction import continued_fraction_periodic
>>> continued_fraction_reduce(continued_fraction_periodic(8, 5, 13))
(sqrt(13) + 8)/5
See Also
========
continued_fraction_periodic
"""
from sympy.core.symbol import Dummy
from sympy.solvers import solve
period = []
x = Dummy('x')
def untillist(cf):
for nxt in cf:
if isinstance(nxt, list):
period.extend(nxt)
yield x
break
yield nxt
a = Integer(0)
for a in continued_fraction_convergents(untillist(cf)):
pass
if period:
y = Dummy('y')
solns = solve(continued_fraction_reduce(period + [y]) - y, y)
solns.sort()
pure = solns[-1]
return a.subs(x, pure).radsimp()
else:
return a
def continued_fraction_iterator(x):
"""
Return continued fraction expansion of x as iterator.
Examples
========
>>> from sympy.core import Rational, pi
>>> from sympy.ntheory.continued_fraction import continued_fraction_iterator
>>> list(continued_fraction_iterator(Rational(3, 8)))
[0, 2, 1, 2]
>>> list(continued_fraction_iterator(Rational(-3, 8)))
[-1, 1, 1, 1, 2]
>>> for i, v in enumerate(continued_fraction_iterator(pi)):
... if i > 7:
... break
... print(v)
3
7
15
1
292
1
1
1
References
==========
.. [1] http://en.wikipedia.org/wiki/Continued_fraction
"""
from sympy.functions import floor
while True:
i = floor(x)
yield i
x -= i
if not x:
break
x = 1/x
def continued_fraction_convergents(cf):
"""
Return an iterator over the convergents of a continued fraction (cf).
The parameter should be an iterable returning successive
partial quotients of the continued fraction, such as might be
returned by continued_fraction_iterator. In computing the
convergents, the continued fraction need not be strictly in
canonical form (all integers, all but the first positive).
Rational and negative elements may be present in the expansion.
Examples
========
>>> from sympy.core import Rational, pi
>>> from sympy import S
>>> from sympy.ntheory.continued_fraction import \
continued_fraction_convergents, continued_fraction_iterator
>>> list(continued_fraction_convergents([0, 2, 1, 2]))
[0, 1/2, 1/3, 3/8]
>>> list(continued_fraction_convergents([1, S('1/2'), -7, S('1/4')]))
[1, 3, 19/5, 7]
>>> it = continued_fraction_convergents(continued_fraction_iterator(pi))
>>> for n in range(7):
... print(next(it))
3
22/7
333/106
355/113
103993/33102
104348/33215
208341/66317
See Also
========
continued_fraction_iterator
"""
p_2, q_2 = Integer(0), Integer(1)
p_1, q_1 = Integer(1), Integer(0)
for a in cf:
p, q = a*p_1 + p_2, a*q_1 + q_2
p_2, q_2 = p_1, q_1
p_1, q_1 = p, q
yield p/q
|
Metaswitch/horizon
|
refs/heads/master
|
openstack_dashboard/dashboards/project/loadbalancers/tabs.py
|
30
|
# Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tabs
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.loadbalancers import tables
class PoolsTab(tabs.TableTab):
table_classes = (tables.PoolsTable,)
name = _("Pools")
slug = "pools"
template_name = "horizon/common/_detail_table.html"
def get_poolstable_data(self):
pools = []
try:
request = self.tab_group.request
tenant_id = self.request.user.tenant_id
pools = api.lbaas.pool_list(request,
tenant_id=tenant_id)
fips = None
for pool in pools:
if hasattr(pool, "vip") and pool.vip:
if not fips:
fips = api.network.tenant_floating_ip_list(request)
vip_fip = [fip for fip in fips
if fip.port_id == pool.vip.port_id]
if vip_fip:
pool.vip.fip = vip_fip[0]
except Exception:
exceptions.handle(self.tab_group.request,
_('Unable to retrieve pools list.'))
return pools
class MembersTab(tabs.TableTab):
table_classes = (tables.MembersTable,)
name = _("Members")
slug = "members"
template_name = "horizon/common/_detail_table.html"
def get_memberstable_data(self):
try:
tenant_id = self.request.user.tenant_id
members = api.lbaas.member_list(self.tab_group.request,
tenant_id=tenant_id)
except Exception:
members = []
exceptions.handle(self.tab_group.request,
_('Unable to retrieve member list.'))
return members
class MonitorsTab(tabs.TableTab):
table_classes = (tables.MonitorsTable,)
name = _("Monitors")
slug = "monitors"
template_name = "horizon/common/_detail_table.html"
def get_monitorstable_data(self):
try:
tenant_id = self.request.user.tenant_id
monitors = api.lbaas.pool_health_monitor_list(
self.tab_group.request, tenant_id=tenant_id)
except Exception:
monitors = []
exceptions.handle(self.tab_group.request,
_('Unable to retrieve monitor list.'))
return monitors
class LoadBalancerTabs(tabs.TabGroup):
slug = "lbtabs"
tabs = (PoolsTab, MembersTab, MonitorsTab)
sticky = True
class PoolDetailsTab(tabs.Tab):
name = _("Pool Details")
slug = "pooldetails"
template_name = "project/loadbalancers/_pool_details.html"
def get_context_data(self, request):
pool = self.tab_group.kwargs['pool']
return {'pool': pool}
class VipDetailsTab(tabs.Tab):
name = _("VIP Details")
slug = "vipdetails"
template_name = "project/loadbalancers/_vip_details.html"
def get_context_data(self, request):
vid = self.tab_group.kwargs['vip_id']
vip = []
try:
vip = api.lbaas.vip_get(request, vid)
fips = api.network.tenant_floating_ip_list(self.tab_group.request)
vip_fip = [fip for fip in fips
if fip.port_id == vip.port.id]
if vip_fip:
vip.fip = vip_fip[0]
except Exception:
exceptions.handle(self.tab_group.request,
_('Unable to retrieve VIP details.'))
return {'vip': vip}
class MemberDetailsTab(tabs.Tab):
name = _("Member Details")
slug = "memberdetails"
template_name = "project/loadbalancers/_member_details.html"
def get_context_data(self, request):
member = self.tab_group.kwargs['member']
return {'member': member}
class MonitorDetailsTab(tabs.Tab):
name = _("Monitor Details")
slug = "monitordetails"
template_name = "project/loadbalancers/_monitor_details.html"
def get_context_data(self, request):
monitor = self.tab_group.kwargs['monitor']
return {'monitor': monitor}
class PoolDetailsTabs(tabs.TabGroup):
slug = "pooltabs"
tabs = (PoolDetailsTab,)
class VipDetailsTabs(tabs.TabGroup):
slug = "viptabs"
tabs = (VipDetailsTab,)
class MemberDetailsTabs(tabs.TabGroup):
slug = "membertabs"
tabs = (MemberDetailsTab,)
class MonitorDetailsTabs(tabs.TabGroup):
slug = "monitortabs"
tabs = (MonitorDetailsTab,)
|
mfellner/maximilianfellner.eu
|
refs/heads/master
|
app/shared/models/__init__.py
|
1
|
# -*- coding: utf-8 -*-
from abc import abstractmethod, ABCMeta
from flask.json import jsonify
from flask.ext.sqlalchemy import SQLAlchemy
import serialize
db = SQLAlchemy()
class JSendResponse(serialize.SerializableMixin):
"""Base class for restful JSON responses according to the JSend specification (http://labs.omniti.com/labs/jsend).
:param status: either 'success', 'fail' or 'error'.
:type status: str
"""
__metaclass__ = ABCMeta
def __init__(self, status):
self.status = status
@staticmethod
def new_success(data=None):
"""Create a new 'success' response.
:param data: optional data of the response.
:type data: object
:returns: DataResponse with status 'success'.
"""
return DataResponse('success', data)
@staticmethod
def new_fail(data):
"""Create a new 'fail' response.
:param data: object explaining the failure.
:type data: object
:returns: DataResponse with status 'fail'.
"""
return DataResponse('fail', data)
@staticmethod
def new_error(message):
"""Create a new 'error' response.
:param message: message explaining the error.
:type message: str
:returns: MessageResponse with status 'error'.
"""
return MessageResponse('error', message)
@abstractmethod
def serializable_fields(self, **kwargs):
return {}
def jsonify(self, **kwargs):
return jsonify(self.serialize(**kwargs))
class DataResponse(JSendResponse):
"""Response with a status and optional data.
:param status: either 'success' or 'fail'
:type status: str
:param data: optional data of the response. Data which needs to be formatted must implement Serializable.
"""
def __init__(self, status, data=None):
self.data = data
super(DataResponse, self).__init__(status)
def serializable_fields(self, **kwargs):
if isinstance(self.data, (serialize.SerializableMixin, list)):
return {'status': serialize.String,
'data': serialize.Nested}
else:
return {'status': serialize.String,
'data': serialize.Raw}
class MessageResponse(JSendResponse):
"""Response with a status and message.
:param status: usually 'error'
:type status: str
:param message: description of the error.
:type message: str
"""
def __init__(self, status, message):
self.message = message
super(MessageResponse, self).__init__(status)
def serializable_fields(self, **kwargs):
return {'status': serialize.String,
'message': serialize.String}
|
Ferada/rope
|
refs/heads/master
|
ropetest/objectinfertest.py
|
4
|
import unittest
import rope.base.project
import rope.base.builtins
from rope.base import libutils
from ropetest import testutils
class ObjectInferTest(unittest.TestCase):
def setUp(self):
super(ObjectInferTest, self).setUp()
self.project = testutils.sample_project()
def tearDown(self):
testutils.remove_project(self.project)
super(ObjectInferTest, self).tearDown()
def test_simple_type_inferencing(self):
code = 'class Sample(object):\n pass\na_var = Sample()\n'
scope = libutils.get_string_scope(self.project, code)
sample_class = scope['Sample'].get_object()
a_var = scope['a_var'].get_object()
self.assertEquals(sample_class, a_var.get_type())
def test_simple_type_inferencing_classes_defined_in_holding_scope(self):
code = 'class Sample(object):\n pass\n' \
'def a_func():\n a_var = Sample()\n'
scope = libutils.get_string_scope(self.project, code)
sample_class = scope['Sample'].get_object()
a_var = scope['a_func'].get_object().\
get_scope()['a_var'].get_object()
self.assertEquals(sample_class, a_var.get_type())
def test_simple_type_inferencing_classes_in_class_methods(self):
code = 'class Sample(object):\n pass\n' \
'class Another(object):\n' \
' def a_method():\n a_var = Sample()\n'
scope = libutils.get_string_scope(self.project, code)
sample_class = scope['Sample'].get_object()
another_class = scope['Another'].get_object()
a_var = another_class['a_method'].\
get_object().get_scope()['a_var'].get_object()
self.assertEquals(sample_class, a_var.get_type())
def test_simple_type_inferencing_class_attributes(self):
code = 'class Sample(object):\n pass\n' \
'class Another(object):\n' \
' def __init__(self):\n self.a_var = Sample()\n'
scope = libutils.get_string_scope(self.project, code)
sample_class = scope['Sample'].get_object()
another_class = scope['Another'].get_object()
a_var = another_class['a_var'].get_object()
self.assertEquals(sample_class, a_var.get_type())
def test_simple_type_inferencing_for_in_class_assignments(self):
code = 'class Sample(object):\n pass\n' \
'class Another(object):\n an_attr = Sample()\n'
scope = libutils.get_string_scope(self.project, code)
sample_class = scope['Sample'].get_object()
another_class = scope['Another'].get_object()
an_attr = another_class['an_attr'].get_object()
self.assertEquals(sample_class, an_attr.get_type())
def test_simple_type_inferencing_for_chained_assignments(self):
mod = 'class Sample(object):\n pass\n' \
'copied_sample = Sample'
mod_scope = libutils.get_string_scope(self.project, mod)
sample_class = mod_scope['Sample']
copied_sample = mod_scope['copied_sample']
self.assertEquals(sample_class.get_object(),
copied_sample.get_object())
def test_following_chained_assignments_avoiding_circles(self):
mod = 'class Sample(object):\n pass\n' \
'sample_class = Sample\n' \
'sample_class = sample_class\n'
mod_scope = libutils.get_string_scope(self.project, mod)
sample_class = mod_scope['Sample']
sample_class_var = mod_scope['sample_class']
self.assertEquals(sample_class.get_object(),
sample_class_var.get_object())
def test_function_returned_object_static_type_inference1(self):
src = 'class Sample(object):\n pass\n' \
'def a_func():\n return Sample\n' \
'a_var = a_func()\n'
scope = libutils.get_string_scope(self.project, src)
sample_class = scope['Sample']
a_var = scope['a_var']
self.assertEquals(sample_class.get_object(), a_var.get_object())
def test_function_returned_object_static_type_inference2(self):
src = 'class Sample(object):\n pass\n' \
'def a_func():\n return Sample()\n' \
'a_var = a_func()\n'
scope = libutils.get_string_scope(self.project, src)
sample_class = scope['Sample'].get_object()
a_var = scope['a_var'].get_object()
self.assertEquals(sample_class, a_var.get_type())
def test_recursive_function_returned_object_static_type_inference(self):
src = 'class Sample(object):\n pass\n' \
'def a_func():\n' \
' if True:\n return Sample()\n' \
' else:\n return a_func()\n' \
'a_var = a_func()\n'
scope = libutils.get_string_scope(self.project, src)
sample_class = scope['Sample'].get_object()
a_var = scope['a_var'].get_object()
self.assertEquals(sample_class, a_var.get_type())
def test_func_returned_obj_using_call_spec_func_static_type_infer(self):
src = 'class Sample(object):\n' \
' def __call__(self):\n return Sample\n' \
'sample = Sample()\na_var = sample()'
scope = libutils.get_string_scope(self.project, src)
sample_class = scope['Sample']
a_var = scope['a_var']
self.assertEquals(sample_class.get_object(), a_var.get_object())
def test_list_type_inferencing(self):
src = 'class Sample(object):\n pass\na_var = [Sample()]\n'
scope = libutils.get_string_scope(self.project, src)
sample_class = scope['Sample'].get_object()
a_var = scope['a_var'].get_object()
self.assertNotEquals(sample_class, a_var.get_type())
def test_attributed_object_inference(self):
src = 'class Sample(object):\n' \
' def __init__(self):\n self.a_var = None\n' \
' def set(self):\n self.a_var = Sample()\n'
scope = libutils.get_string_scope(self.project, src)
sample_class = scope['Sample'].get_object()
a_var = sample_class['a_var'].get_object()
self.assertEquals(sample_class, a_var.get_type())
def test_getting_property_attributes(self):
src = 'class A(object):\n pass\n' \
'def f(*args):\n return A()\n' \
'class B(object):\n p = property(f)\n' \
'a_var = B().p\n'
pymod = libutils.get_string_module(self.project, src)
a_class = pymod['A'].get_object()
a_var = pymod['a_var'].get_object()
self.assertEquals(a_class, a_var.get_type())
def test_getting_property_attributes_with_method_getters(self):
src = 'class A(object):\n pass\n' \
'class B(object):\n def p_get(self):\n return A()\n' \
' p = property(p_get)\n' \
'a_var = B().p\n'
pymod = libutils.get_string_module(self.project, src)
a_class = pymod['A'].get_object()
a_var = pymod['a_var'].get_object()
self.assertEquals(a_class, a_var.get_type())
def test_lambda_functions(self):
code = 'class C(object):\n pass\n' \
'l = lambda: C()\na_var = l()'
mod = libutils.get_string_module(self.project, code)
c_class = mod['C'].get_object()
a_var = mod['a_var'].get_object()
self.assertEquals(c_class, a_var.get_type())
def test_mixing_subscript_with_tuple_assigns(self):
code = 'class C(object):\n attr = 0\n' \
'd = {}\nd[0], b = (0, C())\n'
mod = libutils.get_string_module(self.project, code)
c_class = mod['C'].get_object()
a_var = mod['b'].get_object()
self.assertEquals(c_class, a_var.get_type())
def test_mixing_ass_attr_with_tuple_assignment(self):
code = 'class C(object):\n attr = 0\n' \
'c = C()\nc.attr, b = (0, C())\n'
mod = libutils.get_string_module(self.project, code)
c_class = mod['C'].get_object()
a_var = mod['b'].get_object()
self.assertEquals(c_class, a_var.get_type())
def test_mixing_slice_with_tuple_assigns(self):
mod = libutils.get_string_module(
self.project,
'class C(object):\n attr = 0\n'
'd = [None] * 3\nd[0:2], b = ((0,), C())\n')
c_class = mod['C'].get_object()
a_var = mod['b'].get_object()
self.assertEquals(c_class, a_var.get_type())
def test_nested_tuple_assignments(self):
mod = libutils.get_string_module(
self.project,
'class C1(object):\n pass\nclass C2(object):\n pass\n'
'a, (b, c) = (C1(), (C2(), C1()))\n')
c1_class = mod['C1'].get_object()
c2_class = mod['C2'].get_object()
a_var = mod['a'].get_object()
b_var = mod['b'].get_object()
c_var = mod['c'].get_object()
self.assertEquals(c1_class, a_var.get_type())
self.assertEquals(c2_class, b_var.get_type())
self.assertEquals(c1_class, c_var.get_type())
def test_empty_tuples(self):
mod = libutils.get_string_module(
self.project, 't = ()\na, b = t\n')
a = mod['a'].get_object() # noqa
def test_handling_generator_functions(self):
code = 'class C(object):\n pass\n' \
'def f():\n yield C()\n' \
'for c in f():\n a_var = c\n'
mod = libutils.get_string_module(self.project, code)
c_class = mod['C'].get_object()
a_var = mod['a_var'].get_object()
self.assertEquals(c_class, a_var.get_type())
def test_handling_generator_functions_for_strs(self):
mod = testutils.create_module(self.project, 'mod')
mod.write('def f():\n yield ""\n'
'for s in f():\n a_var = s\n')
pymod = self.project.get_pymodule(mod)
a_var = pymod['a_var'].get_object()
self.assertTrue(isinstance(a_var.get_type(), rope.base.builtins.Str))
def test_considering_nones_to_be_unknowns(self):
code = 'class C(object):\n pass\n' \
'a_var = None\na_var = C()\na_var = None\n'
mod = libutils.get_string_module(self.project, code)
c_class = mod['C'].get_object()
a_var = mod['a_var'].get_object()
self.assertEquals(c_class, a_var.get_type())
def test_basic_list_comprehensions(self):
code = 'class C(object):\n pass\n' \
'l = [C() for i in range(1)]\na_var = l[0]\n'
mod = libutils.get_string_module(self.project, code)
c_class = mod['C'].get_object()
a_var = mod['a_var'].get_object()
self.assertEquals(c_class, a_var.get_type())
def test_basic_generator_expressions(self):
code = 'class C(object):\n pass\n' \
'l = (C() for i in range(1))\na_var = list(l)[0]\n'
mod = libutils.get_string_module(self.project, code)
c_class = mod['C'].get_object()
a_var = mod['a_var'].get_object()
self.assertEquals(c_class, a_var.get_type())
def test_list_comprehensions_and_loop_var(self):
code = 'class C(object):\n pass\n' \
'c_objects = [C(), C()]\n' \
'l = [c for c in c_objects]\na_var = l[0]\n'
mod = libutils.get_string_module(self.project, code)
c_class = mod['C'].get_object()
a_var = mod['a_var'].get_object()
self.assertEquals(c_class, a_var.get_type())
def test_list_comprehensions_and_multiple_loop_var(self):
code = 'class C1(object):\n pass\n' \
'class C2(object):\n pass\n' \
'l = [(c1, c2) for c1 in [C1()] for c2 in [C2()]]\n' \
'a, b = l[0]\n'
mod = libutils.get_string_module(self.project, code)
c1_class = mod['C1'].get_object()
c2_class = mod['C2'].get_object()
a_var = mod['a'].get_object()
b_var = mod['b'].get_object()
self.assertEquals(c1_class, a_var.get_type())
self.assertEquals(c2_class, b_var.get_type())
def test_list_comprehensions_and_multiple_iters(self):
mod = libutils.get_string_module(
self.project,
'class C1(object):\n pass\nclass C2(object):\n pass\n'
'l = [(c1, c2) for c1, c2 in [(C1(), C2())]]\n'
'a, b = l[0]\n')
c1_class = mod['C1'].get_object()
c2_class = mod['C2'].get_object()
a_var = mod['a'].get_object()
b_var = mod['b'].get_object()
self.assertEquals(c1_class, a_var.get_type())
self.assertEquals(c2_class, b_var.get_type())
def test_we_know_the_type_of_catched_exceptions(self):
code = 'class MyError(Exception):\n pass\n' \
'try:\n raise MyError()\n' \
'except MyError, e:\n pass\n'
mod = libutils.get_string_module(self.project, code)
my_error = mod['MyError'].get_object()
e_var = mod['e'].get_object()
self.assertEquals(my_error, e_var.get_type())
def test_we_know_the_type_of_catched_multiple_excepts(self):
code = 'class MyError(Exception):\n pass\n' \
'try:\n raise MyError()\n' \
'except (MyError, Exception), e:\n pass\n'
mod = libutils.get_string_module(self.project, code)
my_error = mod['MyError'].get_object()
e_var = mod['e'].get_object()
self.assertEquals(my_error, e_var.get_type())
def test_using_property_as_decorators(self):
code = 'class A(object):\n pass\n' \
'class B(object):\n' \
' @property\n def f(self):\n return A()\n' \
'b = B()\nvar = b.f\n'
mod = libutils.get_string_module(self.project, code)
var = mod['var'].get_object()
a = mod['A'].get_object()
self.assertEquals(a, var.get_type())
def test_using_property_as_decorators_and_passing_parameter(self):
code = 'class B(object):\n' \
' @property\n def f(self):\n return self\n' \
'b = B()\nvar = b.f\n'
mod = libutils.get_string_module(self.project, code)
var = mod['var'].get_object()
a = mod['B'].get_object()
self.assertEquals(a, var.get_type())
def suite():
result = unittest.TestSuite()
result.addTests(unittest.makeSuite(ObjectInferTest))
return result
if __name__ == '__main__':
unittest.main()
|
andrewleech/SickRage
|
refs/heads/master
|
lib/sqlalchemy/engine/threadlocal.py
|
79
|
# engine/threadlocal.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Provides a thread-local transactional wrapper around the root Engine class.
The ``threadlocal`` module is invoked when using the
``strategy="threadlocal"`` flag with :func:`~sqlalchemy.engine.create_engine`.
This module is semi-private and is invoked automatically when the threadlocal
engine strategy is used.
"""
from .. import util
from . import base
import weakref
class TLConnection(base.Connection):
def __init__(self, *arg, **kw):
super(TLConnection, self).__init__(*arg, **kw)
self.__opencount = 0
def _increment_connect(self):
self.__opencount += 1
return self
def close(self):
if self.__opencount == 1:
base.Connection.close(self)
self.__opencount -= 1
def _force_close(self):
self.__opencount = 0
base.Connection.close(self)
class TLEngine(base.Engine):
"""An Engine that includes support for thread-local managed
transactions.
"""
_tl_connection_cls = TLConnection
def __init__(self, *args, **kwargs):
super(TLEngine, self).__init__(*args, **kwargs)
self._connections = util.threading.local()
def contextual_connect(self, **kw):
if not hasattr(self._connections, 'conn'):
connection = None
else:
connection = self._connections.conn()
if connection is None or connection.closed:
# guards against pool-level reapers, if desired.
# or not connection.connection.is_valid:
connection = self._tl_connection_cls(
self, self.pool.connect(), **kw)
self._connections.conn = weakref.ref(connection)
return connection._increment_connect()
def begin_twophase(self, xid=None):
if not hasattr(self._connections, 'trans'):
self._connections.trans = []
self._connections.trans.append(
self.contextual_connect().begin_twophase(xid=xid))
return self
def begin_nested(self):
if not hasattr(self._connections, 'trans'):
self._connections.trans = []
self._connections.trans.append(
self.contextual_connect().begin_nested())
return self
def begin(self):
if not hasattr(self._connections, 'trans'):
self._connections.trans = []
self._connections.trans.append(self.contextual_connect().begin())
return self
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if type is None:
self.commit()
else:
self.rollback()
def prepare(self):
if not hasattr(self._connections, 'trans') or \
not self._connections.trans:
return
self._connections.trans[-1].prepare()
def commit(self):
if not hasattr(self._connections, 'trans') or \
not self._connections.trans:
return
trans = self._connections.trans.pop(-1)
trans.commit()
def rollback(self):
if not hasattr(self._connections, 'trans') or \
not self._connections.trans:
return
trans = self._connections.trans.pop(-1)
trans.rollback()
def dispose(self):
self._connections = util.threading.local()
super(TLEngine, self).dispose()
@property
def closed(self):
return not hasattr(self._connections, 'conn') or \
self._connections.conn() is None or \
self._connections.conn().closed
def close(self):
if not self.closed:
self.contextual_connect().close()
connection = self._connections.conn()
connection._force_close()
del self._connections.conn
self._connections.trans = []
def __repr__(self):
return 'TLEngine(%s)' % str(self.url)
|
BeegorMif/HTPC-Manager
|
refs/heads/master
|
lib/hachoir_parser/image/ico.py
|
90
|
"""
Microsoft Windows icon and cursor file format parser.
Author: Victor Stinner
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet, ParserError,
UInt8, UInt16, UInt32, Enum, RawBytes)
from lib.hachoir_parser.image.common import PaletteRGBA
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_parser.common.win32 import BitmapInfoHeader
class IconHeader(FieldSet):
def createFields(self):
yield UInt8(self, "width", "Width")
yield UInt8(self, "height", "Height")
yield UInt8(self, "nb_color", "Number of colors")
yield UInt8(self, "reserved", "(reserved)")
yield UInt16(self, "planes", "Color planes (=1)")
yield UInt16(self, "bpp", "Bits per pixel")
yield UInt32(self, "size", "Content size in bytes")
yield UInt32(self, "offset", "Data offset")
def createDescription(self):
return "Icon: %ux%u pixels, %u bits/pixel" % \
(self["width"].value, self["height"].value, self["bpp"].value)
def isValid(self):
if self["nb_color"].value == 0:
if self["bpp"].value in (8, 24, 32) and self["planes"].value == 1:
return True
if self["planes"].value == 4 and self["bpp"].value == 0:
return True
elif self["nb_color"].value == 16:
if self["bpp"].value in (4, 16) and self["planes"].value == 1:
return True
else:
return False
if self["bpp"].value == 0 and self["planes"].value == 0:
return True
return False
class IconData(FieldSet):
def __init__(self, parent, name, header):
FieldSet.__init__(self, parent, name, "Icon data")
self.header = header
def createFields(self):
yield BitmapInfoHeader(self, "header")
# Read palette if needed
nb_color = self.header["nb_color"].value
if self.header["bpp"].value == 8:
nb_color = 256
if nb_color != 0:
yield PaletteRGBA(self, "palette", nb_color)
# Read pixels
size = self.header["size"].value - self.current_size/8
yield RawBytes(self, "pixels", size, "Image pixels")
class IcoFile(Parser):
endian = LITTLE_ENDIAN
PARSER_TAGS = {
"id": "ico",
"category": "image",
"file_ext": ("ico", "cur"),
"mime": (u"image/x-ico",),
"min_size": (22 + 40)*8,
# "magic": (
# ("\0\0\1\0", 0), # Icon
# ("\0\0\2\0", 0), # Cursor
# ),
"magic_regex": ((
# signature=0, type=(1|2), count in 1..20,
"\0\0[\1\2]\0[\x01-\x14]."
# size=(16x16|32x32|48x48|64x64),
"(\x10\x10|\x20\x20|\x30\x30|\x40\x40)"
# nb_color=0 or 16; nb_plane=(0|1|4), bpp=(0|8|24|32)
"[\x00\x10]\0[\0\1\4][\0\x08\x18\x20]\0",
0),),
"description": "Microsoft Windows icon or cursor",
}
TYPE_NAME = {
1: "icon",
2: "cursor"
}
def validate(self):
# Check signature and type
if self["signature"].value != 0:
return "Wrong file signature"
if self["type"].value not in self.TYPE_NAME:
return "Unknown picture type"
# Check all icon headers
index = -1
for field in self:
if field.name.startswith("icon_header"):
index += 1
if not field.isValid():
return "Invalid header #%u" % index
elif 0 <= index:
break
return True
def createFields(self):
yield UInt16(self, "signature", "Signature (0x0000)")
yield Enum(UInt16(self, "type", "Resource type"), self.TYPE_NAME)
yield UInt16(self, "nb_items", "Number of items")
items = []
for index in xrange(self["nb_items"].value):
item = IconHeader(self, "icon_header[]")
yield item
items.append(item)
for header in items:
if header["offset"].value*8 != self.current_size:
raise ParserError("Icon: Problem with icon data offset.")
yield IconData(self, "icon_data[]", header)
def createDescription(self):
desc = "Microsoft Windows %s" % self["type"].display
size = []
for header in self.array("icon_header"):
size.append("%ux%ux%u" % (header["width"].value,
header["height"].value, header["bpp"].value))
if size:
return "%s: %s" % (desc, ", ".join(size))
else:
return desc
def createContentSize(self):
count = self["nb_items"].value
if not count:
return None
field = self["icon_data[%u]" % (count-1)]
return field.absolute_address + field.size
|
alqfahad/odoo
|
refs/heads/8.0
|
openerp/tests/addons/test_translation_import/__init__.py
|
2355
|
# -*- coding: utf-8 -*-
import models
|
caronc/nzbget-subliminal
|
refs/heads/master
|
Subliminal/apprise/plugins/NotifyTwitter/tweepy/models.py
|
1
|
# Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
from __future__ import absolute_import, print_function
from .utils import parse_datetime, parse_html_value, parse_a_href
class ResultSet(list):
"""A list like object that holds results from a Twitter API query."""
def __init__(self, max_id=None, since_id=None):
super(ResultSet, self).__init__()
self._max_id = max_id
self._since_id = since_id
@property
def max_id(self):
if self._max_id:
return self._max_id
ids = self.ids()
# Max_id is always set to the *smallest* id, minus one, in the set
return (min(ids) - 1) if ids else None
@property
def since_id(self):
if self._since_id:
return self._since_id
ids = self.ids()
# Since_id is always set to the *greatest* id in the set
return max(ids) if ids else None
def ids(self):
return [item.id for item in self if hasattr(item, 'id')]
class Model(object):
def __init__(self, api=None):
self._api = api
def __getstate__(self):
# pickle
pickle = dict(self.__dict__)
try:
del pickle['_api'] # do not pickle the API reference
except KeyError:
pass
return pickle
@classmethod
def parse(cls, api, json):
"""Parse a JSON object into a model instance."""
raise NotImplementedError
@classmethod
def parse_list(cls, api, json_list):
"""
Parse a list of JSON objects into
a result set of model instances.
"""
results = ResultSet()
for obj in json_list:
if obj:
results.append(cls.parse(api, obj))
return results
def __repr__(self):
state = ['%s=%s' % (k, repr(v)) for (k, v) in vars(self).items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(state))
class Status(Model):
@classmethod
def parse(cls, api, json):
status = cls(api)
setattr(status, '_json', json)
for k, v in json.items():
if k == 'user':
user_model = getattr(api.parser.model_factory, 'user') if api else User
user = user_model.parse(api, v)
setattr(status, 'author', user)
setattr(status, 'user', user) # DEPRECIATED
elif k == 'created_at':
setattr(status, k, parse_datetime(v))
elif k == 'source':
if '<' in v:
setattr(status, k, parse_html_value(v))
setattr(status, 'source_url', parse_a_href(v))
else:
setattr(status, k, v)
setattr(status, 'source_url', None)
elif k == 'retweeted_status':
setattr(status, k, Status.parse(api, v))
elif k == 'quoted_status':
setattr(status, k, Status.parse(api, v))
elif k == 'place':
if v is not None:
setattr(status, k, Place.parse(api, v))
else:
setattr(status, k, None)
else:
setattr(status, k, v)
return status
def destroy(self):
return self._api.destroy_status(self.id)
def retweet(self):
return self._api.retweet(self.id)
def retweets(self):
return self._api.retweets(self.id)
def favorite(self):
return self._api.create_favorite(self.id)
def __eq__(self, other):
if isinstance(other, Status):
return self.id == other.id
return NotImplemented
def __ne__(self, other):
result = self == other
if result is NotImplemented:
return result
return not result
class User(Model):
@classmethod
def parse(cls, api, json):
user = cls(api)
setattr(user, '_json', json)
for k, v in json.items():
if k == 'created_at':
setattr(user, k, parse_datetime(v))
elif k == 'status':
setattr(user, k, Status.parse(api, v))
elif k == 'following':
# twitter sets this to null if it is false
if v is True:
setattr(user, k, True)
else:
setattr(user, k, False)
else:
setattr(user, k, v)
return user
@classmethod
def parse_list(cls, api, json_list):
if isinstance(json_list, list):
item_list = json_list
else:
item_list = json_list['users']
results = ResultSet()
for obj in item_list:
results.append(cls.parse(api, obj))
return results
def timeline(self, **kargs):
return self._api.user_timeline(user_id=self.id, **kargs)
def friends(self, **kargs):
return self._api.friends(user_id=self.id, **kargs)
def followers(self, **kargs):
return self._api.followers(user_id=self.id, **kargs)
def follow(self):
self._api.create_friendship(user_id=self.id)
self.following = True
def unfollow(self):
self._api.destroy_friendship(user_id=self.id)
self.following = False
def lists_memberships(self, *args, **kargs):
return self._api.lists_memberships(user=self.screen_name,
*args,
**kargs)
def lists_subscriptions(self, *args, **kargs):
return self._api.lists_subscriptions(user=self.screen_name,
*args,
**kargs)
def lists(self, *args, **kargs):
return self._api.lists_all(user=self.screen_name,
*args,
**kargs)
def followers_ids(self, *args, **kargs):
return self._api.followers_ids(user_id=self.id,
*args,
**kargs)
class DirectMessage(Model):
@classmethod
def parse(cls, api, json):
dm = cls(api)
for k, v in json.items():
if k == 'sender' or k == 'recipient':
setattr(dm, k, User.parse(api, v))
elif k == 'created_at':
setattr(dm, k, parse_datetime(v))
else:
setattr(dm, k, v)
return dm
def destroy(self):
return self._api.destroy_direct_message(self.id)
class Friendship(Model):
@classmethod
def parse(cls, api, json):
relationship = json['relationship']
# parse source
source = cls(api)
for k, v in relationship['source'].items():
setattr(source, k, v)
# parse target
target = cls(api)
for k, v in relationship['target'].items():
setattr(target, k, v)
return source, target
class Category(Model):
@classmethod
def parse(cls, api, json):
category = cls(api)
for k, v in json.items():
setattr(category, k, v)
return category
class SavedSearch(Model):
@classmethod
def parse(cls, api, json):
ss = cls(api)
for k, v in json.items():
if k == 'created_at':
setattr(ss, k, parse_datetime(v))
else:
setattr(ss, k, v)
return ss
def destroy(self):
return self._api.destroy_saved_search(self.id)
class SearchResults(ResultSet):
@classmethod
def parse(cls, api, json):
metadata = json['search_metadata']
results = SearchResults()
results.refresh_url = metadata.get('refresh_url')
results.completed_in = metadata.get('completed_in')
results.query = metadata.get('query')
results.count = metadata.get('count')
results.next_results = metadata.get('next_results')
status_model = getattr(api.parser.model_factory, 'status') if api else Status
for status in json['statuses']:
results.append(status_model.parse(api, status))
return results
class List(Model):
@classmethod
def parse(cls, api, json):
lst = List(api)
for k, v in json.items():
if k == 'user':
setattr(lst, k, User.parse(api, v))
elif k == 'created_at':
setattr(lst, k, parse_datetime(v))
else:
setattr(lst, k, v)
return lst
@classmethod
def parse_list(cls, api, json_list, result_set=None):
results = ResultSet()
if isinstance(json_list, dict):
json_list = json_list['lists']
for obj in json_list:
results.append(cls.parse(api, obj))
return results
def update(self, **kargs):
return self._api.update_list(self.slug, **kargs)
def destroy(self):
return self._api.destroy_list(self.slug)
def timeline(self, **kargs):
return self._api.list_timeline(self.user.screen_name,
self.slug,
**kargs)
def add_member(self, id):
return self._api.add_list_member(self.slug, id)
def remove_member(self, id):
return self._api.remove_list_member(self.slug, id)
def members(self, **kargs):
return self._api.list_members(self.user.screen_name,
self.slug,
**kargs)
def is_member(self, id):
return self._api.is_list_member(self.user.screen_name,
self.slug,
id)
def subscribe(self):
return self._api.subscribe_list(self.user.screen_name, self.slug)
def unsubscribe(self):
return self._api.unsubscribe_list(self.user.screen_name, self.slug)
def subscribers(self, **kargs):
return self._api.list_subscribers(self.user.screen_name,
self.slug,
**kargs)
def is_subscribed(self, id):
return self._api.is_subscribed_list(self.user.screen_name,
self.slug,
id)
class Relation(Model):
@classmethod
def parse(cls, api, json):
result = cls(api)
for k, v in json.items():
if k == 'value' and json['kind'] in ['Tweet', 'LookedupStatus']:
setattr(result, k, Status.parse(api, v))
elif k == 'results':
setattr(result, k, Relation.parse_list(api, v))
else:
setattr(result, k, v)
return result
class Relationship(Model):
@classmethod
def parse(cls, api, json):
result = cls(api)
for k, v in json.items():
if k == 'connections':
setattr(result, 'is_following', 'following' in v)
setattr(result, 'is_followed_by', 'followed_by' in v)
else:
setattr(result, k, v)
return result
class JSONModel(Model):
@classmethod
def parse(cls, api, json):
return json
class IDModel(Model):
@classmethod
def parse(cls, api, json):
if isinstance(json, list):
return json
else:
return json['ids']
class BoundingBox(Model):
@classmethod
def parse(cls, api, json):
result = cls(api)
if json is not None:
for k, v in json.items():
setattr(result, k, v)
return result
def origin(self):
"""
Return longitude, latitude of southwest (bottom, left) corner of
bounding box, as a tuple.
This assumes that bounding box is always a rectangle, which
appears to be the case at present.
"""
return tuple(self.coordinates[0][0])
def corner(self):
"""
Return longitude, latitude of northeast (top, right) corner of
bounding box, as a tuple.
This assumes that bounding box is always a rectangle, which
appears to be the case at present.
"""
return tuple(self.coordinates[0][2])
class Place(Model):
@classmethod
def parse(cls, api, json):
place = cls(api)
for k, v in json.items():
if k == 'bounding_box':
# bounding_box value may be null (None.)
# Example: "United States" (id=96683cc9126741d1)
if v is not None:
t = BoundingBox.parse(api, v)
else:
t = v
setattr(place, k, t)
elif k == 'contained_within':
# contained_within is a list of Places.
setattr(place, k, Place.parse_list(api, v))
else:
setattr(place, k, v)
return place
@classmethod
def parse_list(cls, api, json_list):
if isinstance(json_list, list):
item_list = json_list
else:
item_list = json_list['result']['places']
results = ResultSet()
for obj in item_list:
results.append(cls.parse(api, obj))
return results
class Media(Model):
@classmethod
def parse(cls, api, json):
media = cls(api)
for k, v in json.items():
setattr(media, k, v)
return media
class ModelFactory(object):
"""
Used by parsers for creating instances
of models. You may subclass this factory
to add your own extended models.
"""
status = Status
user = User
direct_message = DirectMessage
friendship = Friendship
saved_search = SavedSearch
search_results = SearchResults
category = Category
list = List
relation = Relation
relationship = Relationship
media = Media
json = JSONModel
ids = IDModel
place = Place
bounding_box = BoundingBox
|
wwj718/ANALYSE
|
refs/heads/master
|
lms/djangoapps/shoppingcart/migrations/0013_auto__add_field_invoice_is_valid.py
|
13
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Invoice.is_valid'
db.add_column('shoppingcart_invoice', 'is_valid',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Invoice.is_valid'
db.delete_column('shoppingcart_invoice', 'is_valid')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'shoppingcart.certificateitem': {
'Meta': {'object_name': 'CertificateItem', '_ormbases': ['shoppingcart.OrderItem']},
'course_enrollment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.CourseEnrollment']"}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.coupon': {
'Meta': {'object_name': 'Coupon'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 8, 20, 0, 0)'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'percentage_discount': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'shoppingcart.couponredemption': {
'Meta': {'object_name': 'CouponRedemption'},
'coupon': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Coupon']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.courseregistrationcode': {
'Meta': {'object_name': 'CourseRegistrationCode'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 8, 20, 0, 0)'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_by_user'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Invoice']", 'null': 'True'})
},
'shoppingcart.invoice': {
'Meta': {'object_name': 'Invoice'},
'company_contact_email': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'company_contact_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'company_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'company_reference': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_reference': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'is_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'tax_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'total_amount': ('django.db.models.fields.FloatField', [], {})
},
'shoppingcart.order': {
'Meta': {'object_name': 'Order'},
'bill_to_cardtype': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'bill_to_ccnum': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_city': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_first': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_last': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_postalcode': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'bill_to_state': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_street1': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'bill_to_street2': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'processor_reply_dump': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'purchase_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'refunded_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.orderitem': {
'Meta': {'object_name': 'OrderItem'},
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'fulfilled_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line_desc': ('django.db.models.fields.CharField', [], {'default': "'Misc. Item'", 'max_length': '1024'}),
'list_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '30', 'decimal_places': '2'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}),
'qty': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'refund_requested_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'report_comments': ('django.db.models.fields.TextField', [], {'default': "''"}),
'service_fee': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32', 'db_index': 'True'}),
'unit_cost': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.paidcourseregistration': {
'Meta': {'object_name': 'PaidCourseRegistration', '_ormbases': ['shoppingcart.OrderItem']},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'default': "'honor'", 'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.paidcourseregistrationannotation': {
'Meta': {'object_name': 'PaidCourseRegistrationAnnotation'},
'annotation': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'shoppingcart.registrationcoderedemption': {
'Meta': {'object_name': 'RegistrationCodeRedemption'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}),
'redeemed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 8, 20, 0, 0)', 'null': 'True'}),
'redeemed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'registration_code': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.CourseRegistrationCode']"})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['shoppingcart']
|
brandsoulmates/incubator-airflow
|
refs/heads/master
|
tests/www/api/experimental/test_endpoints.py
|
11
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from datetime import datetime, timedelta
from airflow.models import DagBag
import json
class ApiExperimentalTests(unittest.TestCase):
def setUp(self):
from airflow import configuration
configuration.load_test_config()
from airflow.www import app as application
app = application.create_app(testing=True)
self.app = app.test_client()
def test_task_info(self):
url_template = '/api/experimental/dags/{}/tasks/{}'
response = self.app.get(url_template.format('example_bash_operator', 'runme_0'))
self.assertIn('"email"', response.data.decode('utf-8'))
self.assertNotIn('error', response.data.decode('utf-8'))
self.assertEqual(200, response.status_code)
response = self.app.get(url_template.format('example_bash_operator', 'DNE'))
self.assertIn('error', response.data.decode('utf-8'))
self.assertEqual(404, response.status_code)
response = self.app.get(url_template.format('DNE', 'DNE'))
self.assertIn('error', response.data.decode('utf-8'))
self.assertEqual(404, response.status_code)
def test_trigger_dag(self):
url_template = '/api/experimental/dags/{}/dag_runs'
response = self.app.post(
url_template.format('example_bash_operator'),
data=json.dumps(dict(run_id='my_run' + datetime.now().isoformat())),
content_type="application/json"
)
self.assertEqual(200, response.status_code)
response = self.app.post(
url_template.format('does_not_exist_dag'),
data=json.dumps(dict()),
content_type="application/json"
)
self.assertEqual(404, response.status_code)
def test_trigger_dag_for_date(self):
url_template = '/api/experimental/dags/{}/dag_runs'
dag_id = 'example_bash_operator'
hour_from_now = datetime.now() + timedelta(hours=1)
execution_date = datetime(hour_from_now.year,
hour_from_now.month,
hour_from_now.day,
hour_from_now.hour)
datetime_string = execution_date.isoformat()
# Test Correct execution
response = self.app.post(
url_template.format(dag_id),
data=json.dumps(dict(execution_date=datetime_string)),
content_type="application/json"
)
self.assertEqual(200, response.status_code)
dagbag = DagBag()
dag = dagbag.get_dag(dag_id)
dag_run = dag.get_dagrun(execution_date)
self.assertTrue(dag_run,
'Dag Run not found for execution date {}'
.format(execution_date))
# Test error for nonexistent dag
response = self.app.post(
url_template.format('does_not_exist_dag'),
data=json.dumps(dict(execution_date=execution_date.isoformat())),
content_type="application/json"
)
self.assertEqual(404, response.status_code)
# Test error for bad datetime format
response = self.app.post(
url_template.format(dag_id),
data=json.dumps(dict(execution_date='not_a_datetime')),
content_type="application/json"
)
self.assertEqual(400, response.status_code)
|
shaufi/odoo
|
refs/heads/8.0
|
addons/website_event_sale/__openerp__.py
|
307
|
# -*- coding: utf-8 -*-
{
'name': "Online Event's Tickets",
'category': 'Hidden',
'summary': "Sell Your Event's Tickets",
'website': 'https://www.odoo.com/page/events',
'version': '1.0',
'description': """
Online Event's Tickets
======================
""",
'author': 'OpenERP SA',
'depends': ['website_event', 'event_sale', 'website_sale'],
'data': [
'views/website_event_sale.xml',
'security/ir.model.access.csv',
'security/website_event_sale.xml',
],
'qweb': ['static/src/xml/*.xml'],
'installable': True,
'auto_install': True
}
|
unicefuganda/ureport
|
refs/heads/master
|
functional_test/scenarios/nose_examples_test.py
|
2
|
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_raises
from nose.tools import raises
class TestNoseExamples():
@classmethod
def setup_class(klass):
"""This method is run once for each class before any tests are run"""
@classmethod
def teardown_class(klass):
"""This method is run once for each class _after_ all tests are run"""
def setUp(self):
"""This method is run once before _each_ test method is executed"""
def teardown(self):
"""This method is run once after _each_ test method is executed"""
def test_equals(self):
assert_equal("Some Value", "Some Value")
assert_not_equal("something else", "Incorrect Value")
def test_boolean(self):
assert_equal(True, True)
assert_not_equal(True, False)
@raises(Exception)
def test_raise_exc_with_decorator(self):
raise(Exception("A message"))
|
fw1121/CheckM
|
refs/heads/master
|
scripts/createMarketSetPlots.py
|
3
|
import os
for r in ['Archaea', 'Bacteria']:
for t in ['1.0', '0.99', '0.98', '0.97', '0.96', '0.95']:
os.system('./markerSetTest.py -T ' + r + ' -u ' + t + ' -s ' + t)
|
throwable-one/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.3/django/views/decorators/vary.py
|
307
|
try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.4 fallback.
from django.utils.cache import patch_vary_headers
from django.utils.decorators import available_attrs
def vary_on_headers(*headers):
"""
A view decorator that adds the specified headers to the Vary header of the
response. Usage:
@vary_on_headers('Cookie', 'Accept-language')
def index(request):
...
Note that the header names are not case-sensitive.
"""
def decorator(func):
def inner_func(*args, **kwargs):
response = func(*args, **kwargs)
patch_vary_headers(response, headers)
return response
return wraps(func, assigned=available_attrs(func))(inner_func)
return decorator
def vary_on_cookie(func):
"""
A view decorator that adds "Cookie" to the Vary header of a response. This
indicates that a page's contents depends on cookies. Usage:
@vary_on_cookie
def index(request):
...
"""
def inner_func(*args, **kwargs):
response = func(*args, **kwargs)
patch_vary_headers(response, ('Cookie',))
return response
return wraps(func, assigned=available_attrs(func))(inner_func)
|
icewater246/django-rest-framework-jwt
|
refs/heads/master
|
tests/test_authentication.py
|
14
|
from django.http import HttpResponse
from django.test import TestCase
from django.utils import unittest
from django.conf.urls import patterns
from rest_framework import permissions, status
try:
from rest_framework_oauth.authentication import OAuth2Authentication
except ImportError:
try:
from rest_framework.authentication import OAuth2Authentication
except ImportError:
OAuth2Authentication = None
try:
try:
from rest_framework_oauth.compat import oauth2_provider
from rest_framework_oauth.compat.oauth2_provider import oauth2
except ImportError:
# if oauth2 module can not be imported, skip the tests,
# because models have not been initialized.
oauth2_provider = None
except ImportError:
try:
from rest_framework.compat import oauth2_provider
from rest_framework.compat.oauth2_provider import oauth2 # NOQA
except ImportError:
# if oauth2 module can not be imported, skip the tests,
# because models have not been initialized.
oauth2_provider = None
from rest_framework.test import APIRequestFactory, APIClient
from rest_framework.views import APIView
from rest_framework_jwt import utils
from rest_framework_jwt.compat import get_user_model
from rest_framework_jwt.settings import api_settings, DEFAULTS
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
User = get_user_model()
DJANGO_OAUTH2_PROVIDER_NOT_INSTALLED = 'django-oauth2-provider not installed'
factory = APIRequestFactory()
class MockView(APIView):
permission_classes = (permissions.IsAuthenticated,)
def get(self, request):
return HttpResponse({'a': 1, 'b': 2, 'c': 3})
def post(self, request):
return HttpResponse({'a': 1, 'b': 2, 'c': 3})
urlpatterns = patterns(
'',
(r'^jwt/$', MockView.as_view(
authentication_classes=[JSONWebTokenAuthentication])),
(r'^jwt-oauth2/$', MockView.as_view(
authentication_classes=[
JSONWebTokenAuthentication, OAuth2Authentication])),
(r'^oauth2-jwt/$', MockView.as_view(
authentication_classes=[
OAuth2Authentication, JSONWebTokenAuthentication])),
)
class JSONWebTokenAuthenticationTests(TestCase):
"""JSON Web Token Authentication"""
urls = 'tests.test_authentication'
def setUp(self):
self.csrf_client = APIClient(enforce_csrf_checks=True)
self.username = 'jpueblo'
self.email = 'jpueblo@example.com'
self.user = User.objects.create_user(self.username, self.email)
def test_post_form_passing_jwt_auth(self):
"""
Ensure POSTing form over JWT auth with correct credentials
passes and does not require CSRF
"""
payload = utils.jwt_payload_handler(self.user)
token = utils.jwt_encode_handler(payload)
auth = 'JWT {0}'.format(token)
response = self.csrf_client.post(
'/jwt/', {'example': 'example'}, HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_post_json_passing_jwt_auth(self):
"""
Ensure POSTing JSON over JWT auth with correct credentials
passes and does not require CSRF
"""
payload = utils.jwt_payload_handler(self.user)
token = utils.jwt_encode_handler(payload)
auth = 'JWT {0}'.format(token)
response = self.csrf_client.post(
'/jwt/', {'example': 'example'},
HTTP_AUTHORIZATION=auth, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_post_form_failing_jwt_auth(self):
"""
Ensure POSTing form over JWT auth without correct credentials fails
"""
response = self.csrf_client.post('/jwt/', {'example': 'example'})
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_post_json_failing_jwt_auth(self):
"""
Ensure POSTing json over JWT auth without correct credentials fails
"""
response = self.csrf_client.post('/jwt/', {'example': 'example'},
format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response['WWW-Authenticate'], 'JWT realm="api"')
def test_post_no_jwt_header_failing_jwt_auth(self):
"""
Ensure POSTing over JWT auth without credentials fails
"""
auth = 'JWT'
response = self.csrf_client.post(
'/jwt/', {'example': 'example'},
HTTP_AUTHORIZATION=auth, format='json')
msg = 'Invalid Authorization header. No credentials provided.'
self.assertEqual(response.data['detail'], msg)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response['WWW-Authenticate'], 'JWT realm="api"')
def test_post_invalid_jwt_header_failing_jwt_auth(self):
"""
Ensure POSTing over JWT auth without correct credentials fails
"""
auth = 'JWT abc abc'
response = self.csrf_client.post(
'/jwt/', {'example': 'example'},
HTTP_AUTHORIZATION=auth, format='json')
msg = ('Invalid Authorization header. Credentials string '
'should not contain spaces.')
self.assertEqual(response.data['detail'], msg)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response['WWW-Authenticate'], 'JWT realm="api"')
def test_post_expired_token_failing_jwt_auth(self):
"""
Ensure POSTing over JWT auth with expired token fails
"""
payload = utils.jwt_payload_handler(self.user)
payload['exp'] = 1
token = utils.jwt_encode_handler(payload)
auth = 'JWT {0}'.format(token)
response = self.csrf_client.post(
'/jwt/', {'example': 'example'},
HTTP_AUTHORIZATION=auth, format='json')
msg = 'Signature has expired.'
self.assertEqual(response.data['detail'], msg)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response['WWW-Authenticate'], 'JWT realm="api"')
def test_post_invalid_token_failing_jwt_auth(self):
"""
Ensure POSTing over JWT auth with invalid token fails
"""
auth = 'JWT abc123'
response = self.csrf_client.post(
'/jwt/', {'example': 'example'},
HTTP_AUTHORIZATION=auth, format='json')
msg = 'Error decoding signature.'
self.assertEqual(response.data['detail'], msg)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response['WWW-Authenticate'], 'JWT realm="api"')
@unittest.skipUnless(oauth2_provider, DJANGO_OAUTH2_PROVIDER_NOT_INSTALLED)
def test_post_passing_jwt_auth_with_oauth2_priority(self):
"""
Ensure POSTing over JWT auth with correct credentials
passes and does not require CSRF when OAuth2Authentication
has priority on authentication_classes
"""
payload = utils.jwt_payload_handler(self.user)
token = utils.jwt_encode_handler(payload)
auth = 'JWT {0}'.format(token)
response = self.csrf_client.post(
'/oauth2-jwt/', {'example': 'example'},
HTTP_AUTHORIZATION=auth, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK, response)
@unittest.skipUnless(oauth2_provider, DJANGO_OAUTH2_PROVIDER_NOT_INSTALLED)
def test_post_passing_oauth2_with_jwt_auth_priority(self):
"""
Ensure POSTing over OAuth2 with correct credentials
passes and does not require CSRF when JSONWebTokenAuthentication
has priority on authentication_classes
"""
Client = oauth2_provider.oauth2.models.Client
AccessToken = oauth2_provider.oauth2.models.AccessToken
oauth2_client = Client.objects.create(
user=self.user,
client_type=0,
)
access_token = AccessToken.objects.create(
user=self.user,
client=oauth2_client,
)
auth = 'Bearer {0}'.format(access_token.token)
response = self.csrf_client.post(
'/jwt-oauth2/', {'example': 'example'},
HTTP_AUTHORIZATION=auth, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK, response)
def test_post_form_passing_jwt_invalid_payload(self):
"""
Ensure POSTing json over JWT auth with invalid payload fails
"""
payload = dict(email=None)
token = utils.jwt_encode_handler(payload)
auth = 'JWT {0}'.format(token)
response = self.csrf_client.post(
'/jwt/', {'example': 'example'}, HTTP_AUTHORIZATION=auth)
msg = 'Invalid payload.'
self.assertEqual(response.data['detail'], msg)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_different_auth_header_prefix(self):
"""
Ensure using a different setting for `JWT_AUTH_HEADER_PREFIX` and
with correct credentials passes.
"""
api_settings.JWT_AUTH_HEADER_PREFIX = 'Bearer'
payload = utils.jwt_payload_handler(self.user)
token = utils.jwt_encode_handler(payload)
auth = 'Bearer {0}'.format(token)
response = self.csrf_client.post(
'/jwt/', {'example': 'example'},
HTTP_AUTHORIZATION=auth, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Restore original settings
api_settings.JWT_AUTH_HEADER_PREFIX = DEFAULTS['JWT_AUTH_HEADER_PREFIX']
|
Cakin-Kwong/three.js
|
refs/heads/master
|
utils/exporters/blender/addons/io_three/exporter/base_classes.py
|
125
|
from . import utilities
from .. import constants, exceptions
class BaseClass(constants.BASE_DICT):
"""Base class which inherits from a base dictionary object."""
_defaults = {}
def __init__(self, parent=None, type=None):
constants.BASE_DICT.__init__(self)
self._type = type
self._parent = parent
constants.BASE_DICT.update(self, self._defaults.copy())
def __setitem__(self, key, value):
if not isinstance(value, constants.VALID_DATA_TYPES):
msg = "Value is an invalid data type: %s" % type(value)
raise exceptions.ThreeValueError(msg)
constants.BASE_DICT.__setitem__(self, key, value)
@property
def count(self):
"""
:return: number of keys
:rtype: int
"""
return len(self.keys())
@property
def parent(self):
"""
:return: parent object
"""
return self._parent
@property
def type(self):
"""
:return: the type (if applicable)
"""
return self._type
def copy(self):
"""Copies the items to a standard dictionary object.
:rtype: dict
"""
data = {}
def _dict_copy(old, new):
"""Recursive function for processing all values
:param old:
:param new:
"""
for key, value in old.items():
if isinstance(value, (str, list)):
new[key] = value[:]
elif isinstance(value, tuple):
new[key] = value+tuple()
elif isinstance(value, dict):
new[key] = {}
_dict_copy(value, new[key])
else:
new[key] = value
_dict_copy(self, data)
return data
class BaseNode(BaseClass):
"""Base class for all nodes for the current platform."""
def __init__(self, node, parent, type):
BaseClass.__init__(self, parent=parent, type=type)
self._node = node
if node is None:
self[constants.UUID] = utilities.id()
else:
self[constants.NAME] = node
self[constants.UUID] = utilities.id_from_name(node)
if isinstance(parent, BaseScene):
scene = parent
elif parent is not None:
scene = parent.scene
else:
scene = None
self._scene = scene
@property
def node(self):
"""
:return: name of the node
"""
return self._node
@property
def scene(self):
"""
:return: returns the scene point
"""
return self._scene
@property
def options(self):
"""
:return: export options
:retype: dict
"""
return self.scene.options
class BaseScene(BaseClass):
"""Base class that scenes inherit from."""
def __init__(self, filepath, options):
BaseClass.__init__(self, type=constants.SCENE)
self._filepath = filepath
self._options = options.copy()
@property
def filepath(self):
return self._filepath
@property
def options(self):
return self._options
|
iivic/BoiseStateX
|
refs/heads/master
|
lms/djangoapps/licenses/migrations/0001_initial.py
|
188
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CourseSoftware'
db.create_table('licenses_coursesoftware', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('full_name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('url', self.gf('django.db.models.fields.CharField')(max_length=255)),
('course_id', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('licenses', ['CourseSoftware'])
# Adding model 'UserLicense'
db.create_table('licenses_userlicense', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('software', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['licenses.CourseSoftware'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True)),
('serial', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('licenses', ['UserLicense'])
def backwards(self, orm):
# Deleting model 'CourseSoftware'
db.delete_table('licenses_coursesoftware')
# Deleting model 'UserLicense'
db.delete_table('licenses_userlicense')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'licenses.coursesoftware': {
'Meta': {'object_name': 'CourseSoftware'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'licenses.userlicense': {
'Meta': {'object_name': 'UserLicense'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'serial': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'software': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['licenses.CourseSoftware']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
}
}
complete_apps = ['licenses']
|
skirsdeda/django
|
refs/heads/master
|
django/contrib/gis/gdal/tests/test_geom.py
|
94
|
import json
from binascii import b2a_hex
try:
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
import unittest
from unittest import skipUnless
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.geometry.test_data import TestDataMixin
from django.utils.six.moves import xrange
if HAS_GDAL:
from django.contrib.gis.gdal import (OGRGeometry, OGRGeomType,
OGRException, OGRIndexError, SpatialReference, CoordTransform,
GDAL_VERSION)
@skipUnless(HAS_GDAL, "GDAL is required")
class OGRGeomTest(unittest.TestCase, TestDataMixin):
"This tests the OGR Geometry."
def test00a_geomtype(self):
"Testing OGRGeomType object."
# OGRGeomType should initialize on all these inputs.
OGRGeomType(1)
OGRGeomType(7)
OGRGeomType('point')
OGRGeomType('GeometrycollectioN')
OGRGeomType('LINearrING')
OGRGeomType('Unknown')
# Should throw TypeError on this input
self.assertRaises(OGRException, OGRGeomType, 23)
self.assertRaises(OGRException, OGRGeomType, 'fooD')
self.assertRaises(OGRException, OGRGeomType, 9)
# Equivalence can take strings, ints, and other OGRGeomTypes
self.assertEqual(OGRGeomType(1), OGRGeomType(1))
self.assertEqual(OGRGeomType(7), 'GeometryCollection')
self.assertEqual(OGRGeomType('point'), 'POINT')
self.assertNotEqual(OGRGeomType('point'), 2)
self.assertEqual(OGRGeomType('unknown'), 0)
self.assertEqual(OGRGeomType(6), 'MULtiPolyGON')
self.assertEqual(OGRGeomType(1), OGRGeomType('point'))
self.assertNotEqual(OGRGeomType('POINT'), OGRGeomType(6))
# Testing the Django field name equivalent property.
self.assertEqual('PointField', OGRGeomType('Point').django)
self.assertEqual('GeometryField', OGRGeomType('Geometry').django)
self.assertEqual('GeometryField', OGRGeomType('Unknown').django)
self.assertIsNone(OGRGeomType('none').django)
# 'Geometry' initialization implies an unknown geometry type.
gt = OGRGeomType('Geometry')
self.assertEqual(0, gt.num)
self.assertEqual('Unknown', gt.name)
def test00b_geomtype_25d(self):
"Testing OGRGeomType object with 25D types."
wkb25bit = OGRGeomType.wkb25bit
self.assertEqual(OGRGeomType(wkb25bit + 1), 'Point25D')
self.assertEqual(OGRGeomType('MultiLineString25D'), (5 + wkb25bit))
self.assertEqual('GeometryCollectionField', OGRGeomType('GeometryCollection25D').django)
def test01a_wkt(self):
"Testing WKT output."
for g in self.geometries.wkt_out:
geom = OGRGeometry(g.wkt)
self.assertEqual(g.wkt, geom.wkt)
def test01a_ewkt(self):
"Testing EWKT input/output."
for ewkt_val in ('POINT (1 2 3)', 'LINEARRING (0 0,1 1,2 1,0 0)'):
# First with ewkt output when no SRID in EWKT
self.assertEqual(ewkt_val, OGRGeometry(ewkt_val).ewkt)
# No test consumption with an SRID specified.
ewkt_val = 'SRID=4326;%s' % ewkt_val
geom = OGRGeometry(ewkt_val)
self.assertEqual(ewkt_val, geom.ewkt)
self.assertEqual(4326, geom.srs.srid)
def test01b_gml(self):
"Testing GML output."
for g in self.geometries.wkt_out:
geom = OGRGeometry(g.wkt)
exp_gml = g.gml
if GDAL_VERSION >= (1, 8):
# In GDAL 1.8, the non-conformant GML tag <gml:GeometryCollection> was
# replaced with <gml:MultiGeometry>.
exp_gml = exp_gml.replace('GeometryCollection', 'MultiGeometry')
self.assertEqual(exp_gml, geom.gml)
def test01c_hex(self):
"Testing HEX input/output."
for g in self.geometries.hex_wkt:
geom1 = OGRGeometry(g.wkt)
self.assertEqual(g.hex.encode(), geom1.hex)
# Constructing w/HEX
geom2 = OGRGeometry(g.hex)
self.assertEqual(geom1, geom2)
def test01d_wkb(self):
"Testing WKB input/output."
for g in self.geometries.hex_wkt:
geom1 = OGRGeometry(g.wkt)
wkb = geom1.wkb
self.assertEqual(b2a_hex(wkb).upper(), g.hex.encode())
# Constructing w/WKB.
geom2 = OGRGeometry(wkb)
self.assertEqual(geom1, geom2)
def test01e_json(self):
"Testing GeoJSON input/output."
for g in self.geometries.json_geoms:
geom = OGRGeometry(g.wkt)
if not hasattr(g, 'not_equal'):
# Loading jsons to prevent decimal differences
self.assertEqual(json.loads(g.json), json.loads(geom.json))
self.assertEqual(json.loads(g.json), json.loads(geom.geojson))
self.assertEqual(OGRGeometry(g.wkt), OGRGeometry(geom.json))
# Test input with some garbage content (but valid json) (#15529)
geom = OGRGeometry('{"type": "Point", "coordinates": [ 100.0, 0.0 ], "other": "<test>"}')
self.assertIsInstance(geom, OGRGeometry)
def test02_points(self):
"Testing Point objects."
OGRGeometry('POINT(0 0)')
for p in self.geometries.points:
if not hasattr(p, 'z'): # No 3D
pnt = OGRGeometry(p.wkt)
self.assertEqual(1, pnt.geom_type)
self.assertEqual('POINT', pnt.geom_name)
self.assertEqual(p.x, pnt.x)
self.assertEqual(p.y, pnt.y)
self.assertEqual((p.x, p.y), pnt.tuple)
def test03_multipoints(self):
"Testing MultiPoint objects."
for mp in self.geometries.multipoints:
mgeom1 = OGRGeometry(mp.wkt) # First one from WKT
self.assertEqual(4, mgeom1.geom_type)
self.assertEqual('MULTIPOINT', mgeom1.geom_name)
mgeom2 = OGRGeometry('MULTIPOINT') # Creating empty multipoint
mgeom3 = OGRGeometry('MULTIPOINT')
for g in mgeom1:
mgeom2.add(g) # adding each point from the multipoints
mgeom3.add(g.wkt) # should take WKT as well
self.assertEqual(mgeom1, mgeom2) # they should equal
self.assertEqual(mgeom1, mgeom3)
self.assertEqual(mp.coords, mgeom2.coords)
self.assertEqual(mp.n_p, mgeom2.point_count)
def test04_linestring(self):
"Testing LineString objects."
prev = OGRGeometry('POINT(0 0)')
for ls in self.geometries.linestrings:
linestr = OGRGeometry(ls.wkt)
self.assertEqual(2, linestr.geom_type)
self.assertEqual('LINESTRING', linestr.geom_name)
self.assertEqual(ls.n_p, linestr.point_count)
self.assertEqual(ls.coords, linestr.tuple)
self.assertEqual(linestr, OGRGeometry(ls.wkt))
self.assertNotEqual(linestr, prev)
self.assertRaises(OGRIndexError, linestr.__getitem__, len(linestr))
prev = linestr
# Testing the x, y properties.
x = [tmpx for tmpx, tmpy in ls.coords]
y = [tmpy for tmpx, tmpy in ls.coords]
self.assertEqual(x, linestr.x)
self.assertEqual(y, linestr.y)
def test05_multilinestring(self):
"Testing MultiLineString objects."
prev = OGRGeometry('POINT(0 0)')
for mls in self.geometries.multilinestrings:
mlinestr = OGRGeometry(mls.wkt)
self.assertEqual(5, mlinestr.geom_type)
self.assertEqual('MULTILINESTRING', mlinestr.geom_name)
self.assertEqual(mls.n_p, mlinestr.point_count)
self.assertEqual(mls.coords, mlinestr.tuple)
self.assertEqual(mlinestr, OGRGeometry(mls.wkt))
self.assertNotEqual(mlinestr, prev)
prev = mlinestr
for ls in mlinestr:
self.assertEqual(2, ls.geom_type)
self.assertEqual('LINESTRING', ls.geom_name)
self.assertRaises(OGRIndexError, mlinestr.__getitem__, len(mlinestr))
def test06_linearring(self):
"Testing LinearRing objects."
prev = OGRGeometry('POINT(0 0)')
for rr in self.geometries.linearrings:
lr = OGRGeometry(rr.wkt)
#self.assertEqual(101, lr.geom_type.num)
self.assertEqual('LINEARRING', lr.geom_name)
self.assertEqual(rr.n_p, len(lr))
self.assertEqual(lr, OGRGeometry(rr.wkt))
self.assertNotEqual(lr, prev)
prev = lr
def test07a_polygons(self):
"Testing Polygon objects."
# Testing `from_bbox` class method
bbox = (-180, -90, 180, 90)
p = OGRGeometry.from_bbox(bbox)
self.assertEqual(bbox, p.extent)
prev = OGRGeometry('POINT(0 0)')
for p in self.geometries.polygons:
poly = OGRGeometry(p.wkt)
self.assertEqual(3, poly.geom_type)
self.assertEqual('POLYGON', poly.geom_name)
self.assertEqual(p.n_p, poly.point_count)
self.assertEqual(p.n_i + 1, len(poly))
# Testing area & centroid.
self.assertAlmostEqual(p.area, poly.area, 9)
x, y = poly.centroid.tuple
self.assertAlmostEqual(p.centroid[0], x, 9)
self.assertAlmostEqual(p.centroid[1], y, 9)
# Testing equivalence
self.assertEqual(poly, OGRGeometry(p.wkt))
self.assertNotEqual(poly, prev)
if p.ext_ring_cs:
ring = poly[0]
self.assertEqual(p.ext_ring_cs, ring.tuple)
self.assertEqual(p.ext_ring_cs, poly[0].tuple)
self.assertEqual(len(p.ext_ring_cs), ring.point_count)
for r in poly:
self.assertEqual('LINEARRING', r.geom_name)
def test07b_closepolygons(self):
"Testing closing Polygon objects."
# Both rings in this geometry are not closed.
poly = OGRGeometry('POLYGON((0 0, 5 0, 5 5, 0 5), (1 1, 2 1, 2 2, 2 1))')
self.assertEqual(8, poly.point_count)
with self.assertRaises(OGRException):
poly.centroid
poly.close_rings()
self.assertEqual(10, poly.point_count) # Two closing points should've been added
self.assertEqual(OGRGeometry('POINT(2.5 2.5)'), poly.centroid)
def test08_multipolygons(self):
"Testing MultiPolygon objects."
OGRGeometry('POINT(0 0)')
for mp in self.geometries.multipolygons:
mpoly = OGRGeometry(mp.wkt)
self.assertEqual(6, mpoly.geom_type)
self.assertEqual('MULTIPOLYGON', mpoly.geom_name)
if mp.valid:
self.assertEqual(mp.n_p, mpoly.point_count)
self.assertEqual(mp.num_geom, len(mpoly))
self.assertRaises(OGRIndexError, mpoly.__getitem__, len(mpoly))
for p in mpoly:
self.assertEqual('POLYGON', p.geom_name)
self.assertEqual(3, p.geom_type)
self.assertEqual(mpoly.wkt, OGRGeometry(mp.wkt).wkt)
def test09a_srs(self):
"Testing OGR Geometries with Spatial Reference objects."
for mp in self.geometries.multipolygons:
# Creating a geometry w/spatial reference
sr = SpatialReference('WGS84')
mpoly = OGRGeometry(mp.wkt, sr)
self.assertEqual(sr.wkt, mpoly.srs.wkt)
# Ensuring that SRS is propagated to clones.
klone = mpoly.clone()
self.assertEqual(sr.wkt, klone.srs.wkt)
# Ensuring all children geometries (polygons and their rings) all
# return the assigned spatial reference as well.
for poly in mpoly:
self.assertEqual(sr.wkt, poly.srs.wkt)
for ring in poly:
self.assertEqual(sr.wkt, ring.srs.wkt)
# Ensuring SRS propagate in topological ops.
a = OGRGeometry(self.geometries.topology_geoms[0].wkt_a, sr)
b = OGRGeometry(self.geometries.topology_geoms[0].wkt_b, sr)
diff = a.difference(b)
union = a.union(b)
self.assertEqual(sr.wkt, diff.srs.wkt)
self.assertEqual(sr.srid, union.srs.srid)
# Instantiating w/an integer SRID
mpoly = OGRGeometry(mp.wkt, 4326)
self.assertEqual(4326, mpoly.srid)
mpoly.srs = SpatialReference(4269)
self.assertEqual(4269, mpoly.srid)
self.assertEqual('NAD83', mpoly.srs.name)
# Incrementing through the multipolygon after the spatial reference
# has been re-assigned.
for poly in mpoly:
self.assertEqual(mpoly.srs.wkt, poly.srs.wkt)
poly.srs = 32140
for ring in poly:
# Changing each ring in the polygon
self.assertEqual(32140, ring.srs.srid)
self.assertEqual('NAD83 / Texas South Central', ring.srs.name)
ring.srs = str(SpatialReference(4326)) # back to WGS84
self.assertEqual(4326, ring.srs.srid)
# Using the `srid` property.
ring.srid = 4322
self.assertEqual('WGS 72', ring.srs.name)
self.assertEqual(4322, ring.srid)
def test09b_srs_transform(self):
"Testing transform()."
orig = OGRGeometry('POINT (-104.609 38.255)', 4326)
trans = OGRGeometry('POINT (992385.4472045 481455.4944650)', 2774)
# Using an srid, a SpatialReference object, and a CoordTransform object
# or transformations.
t1, t2, t3 = orig.clone(), orig.clone(), orig.clone()
t1.transform(trans.srid)
t2.transform(SpatialReference('EPSG:2774'))
ct = CoordTransform(SpatialReference('WGS84'), SpatialReference(2774))
t3.transform(ct)
# Testing use of the `clone` keyword.
k1 = orig.clone()
k2 = k1.transform(trans.srid, clone=True)
self.assertEqual(k1, orig)
self.assertNotEqual(k1, k2)
prec = 3
for p in (t1, t2, t3, k2):
self.assertAlmostEqual(trans.x, p.x, prec)
self.assertAlmostEqual(trans.y, p.y, prec)
def test09c_transform_dim(self):
"Testing coordinate dimension is the same on transformed geometries."
ls_orig = OGRGeometry('LINESTRING(-104.609 38.255)', 4326)
ls_trans = OGRGeometry('LINESTRING(992385.4472045 481455.4944650)', 2774)
prec = 3
ls_orig.transform(ls_trans.srs)
# Making sure the coordinate dimension is still 2D.
self.assertEqual(2, ls_orig.coord_dim)
self.assertAlmostEqual(ls_trans.x[0], ls_orig.x[0], prec)
self.assertAlmostEqual(ls_trans.y[0], ls_orig.y[0], prec)
def test10_difference(self):
"Testing difference()."
for i in xrange(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
d1 = OGRGeometry(self.geometries.diff_geoms[i].wkt)
d2 = a.difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a - b) # __sub__ is difference operator
a -= b # testing __isub__
self.assertEqual(d1, a)
def test11_intersection(self):
"Testing intersects() and intersection()."
for i in xrange(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
i1 = OGRGeometry(self.geometries.intersect_geoms[i].wkt)
self.assertTrue(a.intersects(b))
i2 = a.intersection(b)
self.assertEqual(i1, i2)
self.assertEqual(i1, a & b) # __and__ is intersection operator
a &= b # testing __iand__
self.assertEqual(i1, a)
def test12_symdifference(self):
"Testing sym_difference()."
for i in xrange(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
d1 = OGRGeometry(self.geometries.sdiff_geoms[i].wkt)
d2 = a.sym_difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a ^ b) # __xor__ is symmetric difference operator
a ^= b # testing __ixor__
self.assertEqual(d1, a)
def test13_union(self):
"Testing union()."
for i in xrange(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
u1 = OGRGeometry(self.geometries.union_geoms[i].wkt)
u2 = a.union(b)
self.assertEqual(u1, u2)
self.assertEqual(u1, a | b) # __or__ is union operator
a |= b # testing __ior__
self.assertEqual(u1, a)
def test14_add(self):
"Testing GeometryCollection.add()."
# Can't insert a Point into a MultiPolygon.
mp = OGRGeometry('MultiPolygon')
pnt = OGRGeometry('POINT(5 23)')
self.assertRaises(OGRException, mp.add, pnt)
# GeometryCollection.add may take an OGRGeometry (if another collection
# of the same type all child geoms will be added individually) or WKT.
for mp in self.geometries.multipolygons:
mpoly = OGRGeometry(mp.wkt)
mp1 = OGRGeometry('MultiPolygon')
mp2 = OGRGeometry('MultiPolygon')
mp3 = OGRGeometry('MultiPolygon')
for poly in mpoly:
mp1.add(poly) # Adding a geometry at a time
mp2.add(poly.wkt) # Adding WKT
mp3.add(mpoly) # Adding a MultiPolygon's entire contents at once.
for tmp in (mp1, mp2, mp3):
self.assertEqual(mpoly, tmp)
def test15_extent(self):
"Testing `extent` property."
# The xmin, ymin, xmax, ymax of the MultiPoint should be returned.
mp = OGRGeometry('MULTIPOINT(5 23, 0 0, 10 50)')
self.assertEqual((0.0, 0.0, 10.0, 50.0), mp.extent)
# Testing on the 'real world' Polygon.
poly = OGRGeometry(self.geometries.polygons[3].wkt)
ring = poly.shell
x, y = ring.x, ring.y
xmin, ymin = min(x), min(y)
xmax, ymax = max(x), max(y)
self.assertEqual((xmin, ymin, xmax, ymax), poly.extent)
def test16_25D(self):
"Testing 2.5D geometries."
pnt_25d = OGRGeometry('POINT(1 2 3)')
self.assertEqual('Point25D', pnt_25d.geom_type.name)
self.assertEqual(3.0, pnt_25d.z)
self.assertEqual(3, pnt_25d.coord_dim)
ls_25d = OGRGeometry('LINESTRING(1 1 1,2 2 2,3 3 3)')
self.assertEqual('LineString25D', ls_25d.geom_type.name)
self.assertEqual([1.0, 2.0, 3.0], ls_25d.z)
self.assertEqual(3, ls_25d.coord_dim)
def test17_pickle(self):
"Testing pickle support."
g1 = OGRGeometry('LINESTRING(1 1 1,2 2 2,3 3 3)', 'WGS84')
g2 = pickle.loads(pickle.dumps(g1))
self.assertEqual(g1, g2)
self.assertEqual(4326, g2.srs.srid)
self.assertEqual(g1.srs.wkt, g2.srs.wkt)
def test18_ogrgeometry_transform_workaround(self):
"Testing coordinate dimensions on geometries after transformation."
# A bug in GDAL versions prior to 1.7 changes the coordinate
# dimension of a geometry after it has been transformed.
# This test ensures that the bug workarounds employed within
# `OGRGeometry.transform` indeed work.
wkt_2d = "MULTILINESTRING ((0 0,1 1,2 2))"
wkt_3d = "MULTILINESTRING ((0 0 0,1 1 1,2 2 2))"
srid = 4326
# For both the 2D and 3D MultiLineString, ensure _both_ the dimension
# of the collection and the component LineString have the expected
# coordinate dimension after transform.
geom = OGRGeometry(wkt_2d, srid)
geom.transform(srid)
self.assertEqual(2, geom.coord_dim)
self.assertEqual(2, geom[0].coord_dim)
self.assertEqual(wkt_2d, geom.wkt)
geom = OGRGeometry(wkt_3d, srid)
geom.transform(srid)
self.assertEqual(3, geom.coord_dim)
self.assertEqual(3, geom[0].coord_dim)
self.assertEqual(wkt_3d, geom.wkt)
def test19_equivalence_regression(self):
"Testing equivalence methods with non-OGRGeometry instances."
self.assertIsNotNone(OGRGeometry('POINT(0 0)'))
self.assertNotEqual(OGRGeometry('LINESTRING(0 0, 1 1)'), 3)
|
aeischeid/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/localpaths.py
|
40
|
import os
import sys
here = os.path.abspath(os.path.split(__file__)[0])
repo_root = os.path.abspath(os.path.join(here, os.pardir))
sys.path.insert(0, os.path.join(repo_root, "tools"))
sys.path.insert(0, os.path.join(repo_root, "tools", "six"))
sys.path.insert(0, os.path.join(repo_root, "tools", "html5lib"))
sys.path.insert(0, os.path.join(repo_root, "tools", "wptserve"))
sys.path.insert(0, os.path.join(repo_root, "tools", "pywebsocket", "src"))
sys.path.insert(0, os.path.join(repo_root, "tools", "py"))
sys.path.insert(0, os.path.join(repo_root, "tools", "pytest"))
sys.path.insert(0, os.path.join(repo_root, "tools", "webdriver"))
|
spcui/tp-qemu
|
refs/heads/master
|
qemu/tests/vhost_with_cgroup.py
|
3
|
import logging
from autotest.client.shared import error
from autotest.client import utils
from virttest.env_process import preprocess
try:
from virttest.staging.utils_cgroup import Cgroup, CgroupModules
except ImportError:
# TODO: Obsoleted path used prior autotest-0.15.2/virttest-2013.06.24
from autotest.client.shared.utils_cgroup import Cgroup, CgroupModules
@error.context_aware
def run(test, params, env):
"""
Test Step:
1. boot guest with vhost enabled
2. add vhost-%pid_qemu process to a cgroup
3. check the vhost process join to the cgroup successfully
:param test: QEMU test object
:param params: Dictionary with the test parameters
:param env: Dictionary with test environment.
"""
def assign_vm_into_cgroup(vm, cgroup, pwd=None):
"""
Assigns all threads of VM into cgroup
:param vm: desired VM
:param cgroup: cgroup handler
:param pwd: desired cgroup's pwd, cgroup index or None for root cgroup
"""
cgroup.set_cgroup(vm.get_shell_pid(), pwd)
for pid in utils.get_children_pids(vm.get_shell_pid()):
try:
cgroup.set_cgroup(int(pid), pwd)
except Exception: # Process might not already exist
raise error.TestFail("Failed to move all VM threads to cgroup")
error.context("Test Setup: Cgroup initialize in host", logging.info)
modules = CgroupModules()
if (modules.init(['cpu']) != 1):
raise error.TestFail("Can't mount cpu cgroup modules")
cgroup = Cgroup('cpu', '')
cgroup.initialize(modules)
error.context("Boot guest and attach vhost to cgroup your setting(cpu)",
logging.info)
params["start_vm"] = "yes"
preprocess(test, params, env)
vm = env.get_vm(params["main_vm"])
timeout = int(params.get("login_timeout", 360))
vm.wait_for_login(timeout=timeout)
cgroup.mk_cgroup()
cgroup.set_property("cpu.cfs_period_us", 100000, 0)
assign_vm_into_cgroup(vm, cgroup, 0)
vhost_pid = utils.system_output("pidof vhost-%s" % vm.get_pid())
if not vhost_pid:
raise error.TestError("Vhost process not exise")
logging.info("Vhost have started with pid %s" % vhost_pid)
cgroup.set_cgroup(int(vhost_pid))
error.context("Check whether vhost attached to cgroup successfully",
logging.info)
if vhost_pid not in cgroup.get_property("tasks"):
raise error.TestError("Oops, vhost process attach to cgroup FAILED!")
logging.info("Vhost process attach to cgroup successfully")
|
drayanaindra/shoop
|
refs/heads/master
|
shoop/addons/__init__.py
|
7
|
# This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from shoop.apps import AppConfig
from .manager import add_enabled_addons
__all__ = ["add_enabled_addons"]
class ShoopAddonsAppConfig(AppConfig):
name = "shoop.addons"
verbose_name = "Shoop Addons"
label = "shoop_addons"
provides = {
"admin_module": [
"shoop.addons.admin_module:AddonModule",
]
}
default_app_config = "shoop.addons.ShoopAddonsAppConfig"
|
yosshy/nova
|
refs/heads/master
|
nova/tests/unit/objects/test_external_event.py
|
77
|
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.objects import external_event as external_event_obj
from nova.tests.unit.objects import test_objects
class _TestInstanceExternalEventObject(object):
def test_make_key(self):
key = external_event_obj.InstanceExternalEvent.make_key('foo', 'bar')
self.assertEqual('foo-bar', key)
def test_make_key_no_tag(self):
key = external_event_obj.InstanceExternalEvent.make_key('foo')
self.assertEqual('foo', key)
def test_key(self):
event = external_event_obj.InstanceExternalEvent(
name='network-changed',
tag='bar')
with mock.patch.object(event, 'make_key') as make_key:
make_key.return_value = 'key'
self.assertEqual('key', event.key)
make_key.assert_called_once_with('network-changed', 'bar')
def test_event_names(self):
for event in external_event_obj.EVENT_NAMES:
external_event_obj.InstanceExternalEvent(name=event, tag='bar')
self.assertRaises(ValueError,
external_event_obj.InstanceExternalEvent,
name='foo', tag='bar')
class TestInstanceExternalEventObject(test_objects._LocalTest,
_TestInstanceExternalEventObject):
pass
class TestRemoteInstanceExternalEventObject(test_objects._RemoteTest,
_TestInstanceExternalEventObject):
pass
|
FreeSchoolHackers/data_hacking
|
refs/heads/master
|
data_hacking/min_hash/__init__.py
|
6
|
'''Package for Banded Min Hash based Similarity Calculations'''
from min_hash import *
|
EliasTouil/simpleBlog
|
refs/heads/master
|
simpleBlog/Lib/site-packages/wheel/pep425tags.py
|
233
|
"""Generate and work with PEP 425 Compatibility Tags."""
import sys
import warnings
try:
import sysconfig
except ImportError: # pragma nocover
# Python < 2.7
import distutils.sysconfig as sysconfig
import distutils.util
def get_config_var(var):
try:
return sysconfig.get_config_var(var)
except IOError as e: # pip Issue #1074
warnings.warn("{0}".format(e), RuntimeWarning)
return None
def get_abbr_impl():
"""Return abbreviated implementation name."""
if hasattr(sys, 'pypy_version_info'):
pyimpl = 'pp'
elif sys.platform.startswith('java'):
pyimpl = 'jy'
elif sys.platform == 'cli':
pyimpl = 'ip'
else:
pyimpl = 'cp'
return pyimpl
def get_impl_ver():
"""Return implementation version."""
impl_ver = get_config_var("py_version_nodot")
if not impl_ver or get_abbr_impl() == 'pp':
impl_ver = ''.join(map(str, get_impl_version_info()))
return impl_ver
def get_impl_version_info():
"""Return sys.version_info-like tuple for use in decrementing the minor
version."""
if get_abbr_impl() == 'pp':
# as per https://github.com/pypa/pip/issues/2882
return (sys.version_info[0], sys.pypy_version_info.major,
sys.pypy_version_info.minor)
else:
return sys.version_info[0], sys.version_info[1]
def get_flag(var, fallback, expected=True, warn=True):
"""Use a fallback method for determining SOABI flags if the needed config
var is unset or unavailable."""
val = get_config_var(var)
if val is None:
if warn:
warnings.warn("Config variable '{0}' is unset, Python ABI tag may "
"be incorrect".format(var), RuntimeWarning, 2)
return fallback()
return val == expected
def get_abi_tag():
"""Return the ABI tag based on SOABI (if available) or emulate SOABI
(CPython 2, PyPy)."""
soabi = get_config_var('SOABI')
impl = get_abbr_impl()
if not soabi and impl in ('cp', 'pp') and hasattr(sys, 'maxunicode'):
d = ''
m = ''
u = ''
if get_flag('Py_DEBUG',
lambda: hasattr(sys, 'gettotalrefcount'),
warn=(impl == 'cp')):
d = 'd'
if get_flag('WITH_PYMALLOC',
lambda: impl == 'cp',
warn=(impl == 'cp')):
m = 'm'
if get_flag('Py_UNICODE_SIZE',
lambda: sys.maxunicode == 0x10ffff,
expected=4,
warn=(impl == 'cp' and
sys.version_info < (3, 3))) \
and sys.version_info < (3, 3):
u = 'u'
abi = '%s%s%s%s%s' % (impl, get_impl_ver(), d, m, u)
elif soabi and soabi.startswith('cpython-'):
abi = 'cp' + soabi.split('-')[1]
elif soabi:
abi = soabi.replace('.', '_').replace('-', '_')
else:
abi = None
return abi
def get_platform():
"""Return our platform name 'win32', 'linux_x86_64'"""
# XXX remove distutils dependency
return distutils.util.get_platform().replace('.', '_').replace('-', '_')
def get_supported(versions=None, supplied_platform=None):
"""Return a list of supported tags for each version specified in
`versions`.
:param versions: a list of string versions, of the form ["33", "32"],
or None. The first version will be assumed to support our ABI.
"""
supported = []
# Versions must be given with respect to the preference
if versions is None:
versions = []
version_info = get_impl_version_info()
major = version_info[:-1]
# Support all previous minor Python versions.
for minor in range(version_info[-1], -1, -1):
versions.append(''.join(map(str, major + (minor,))))
impl = get_abbr_impl()
abis = []
abi = get_abi_tag()
if abi:
abis[0:0] = [abi]
abi3s = set()
import imp
for suffix in imp.get_suffixes():
if suffix[0].startswith('.abi'):
abi3s.add(suffix[0].split('.', 2)[1])
abis.extend(sorted(list(abi3s)))
abis.append('none')
platforms = []
if supplied_platform:
platforms.append(supplied_platform)
platforms.append(get_platform())
# Current version, current API (built specifically for our Python):
for abi in abis:
for arch in platforms:
supported.append(('%s%s' % (impl, versions[0]), abi, arch))
# No abi / arch, but requires our implementation:
for i, version in enumerate(versions):
supported.append(('%s%s' % (impl, version), 'none', 'any'))
if i == 0:
# Tagged specifically as being cross-version compatible
# (with just the major version specified)
supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any'))
# Major Python version + platform; e.g. binaries not using the Python API
supported.append(('py%s' % (versions[0][0]), 'none', arch))
# No abi / arch, generic Python
for i, version in enumerate(versions):
supported.append(('py%s' % (version,), 'none', 'any'))
if i == 0:
supported.append(('py%s' % (version[0]), 'none', 'any'))
return supported
|
pouyana/teireader
|
refs/heads/master
|
webui/applications/admin/languages/it.py
|
8
|
# coding: utf8
{
'!langcode!': 'it',
'!langname!': 'Italiano',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" è un\'espressione opzionale come "campo1=\'nuovo valore\'". Non si può fare "update" o "delete" dei risultati di un JOIN ',
'%s %%{row} deleted': '%s righe ("record") cancellate',
'%s %%{row} updated': '%s righe ("record") modificate',
'%Y-%m-%d': '%d/%m/%Y',
'%Y-%m-%d %H:%M:%S': '%d/%m/%Y %H:%M:%S',
'(requires internet access)': '(requires internet access)',
'(requires internet access, experimental)': '(requires internet access, experimental)',
'(something like "it-it")': '(qualcosa simile a "it-it")',
'@markmin\x01(file **gluon/contrib/plural_rules/%s.py** is not found)': '(file **gluon/contrib/plural_rules/%s.py** is not found)',
'@markmin\x01An error occured, please [[reload %s]] the page': 'An error occured, please [[reload %s]] the page',
'@markmin\x01Searching: **%s** %%{file}': 'Searching: **%s** files',
'A new version of web2py is available: %s': 'È disponibile una nuova versione di web2py: %s',
'About': 'informazioni',
'About application': "Informazioni sull'applicazione",
'additional code for your application': 'righe di codice aggiuntive per la tua applicazione',
'Additional code for your application': 'Additional code for your application',
'admin disabled because no admin password': 'amministrazione disabilitata per mancanza di password amministrativa',
'admin disabled because not supported on google app engine': 'amministrazione non supportata da Google Apps Engine',
'admin disabled because unable to access password file': 'amministrazione disabilitata per impossibilità di leggere il file delle password',
'Admin is disabled because insecure channel': 'amministrazione disabilitata: comunicazione non sicura',
'Admin language': 'Admin language',
'administrative interface': 'administrative interface',
'Administrator Password:': 'Password Amministratore:',
'An error occured, please %s the page': 'An error occured, please %s the page',
'and rename it (required):': 'e rinominala (obbligatorio):',
'and rename it:': 'e rinominala:',
'appadmin': 'appadmin ',
'appadmin is disabled because insecure channel': 'amministrazione app (appadmin) disabilitata: comunicazione non sicura',
'application "%s" uninstalled': 'applicazione "%s" disinstallata',
'application compiled': 'applicazione compilata',
'application is compiled and cannot be designed': "l'applicazione è compilata e non si può modificare",
'Application name:': 'Application name:',
'are not used': 'are not used',
'are not used yet': 'are not used yet',
'Are you sure you want to delete file "%s"?': 'Confermi di voler cancellare il file "%s"?',
'Are you sure you want to delete plugin "%s"?': 'Confermi di voler cancellare il plugin "%s"?',
'Are you sure you want to delete this object?': 'Are you sure you want to delete this object?',
'Are you sure you want to uninstall application "%s"?': 'Confermi di voler disinstallare l\'applicazione "%s"?',
'Are you sure you want to upgrade web2py now?': 'Confermi di voler aggiornare web2py ora?',
'arguments': 'arguments',
'ATTENTION: Login requires a secure (HTTPS) connection or running on localhost.': "ATTENZIONE: L'accesso richiede una connessione sicura (HTTPS) o l'esecuzione di web2py in locale (connessione su localhost)",
'ATTENTION: TESTING IS NOT THREAD SAFE SO DO NOT PERFORM MULTIPLE TESTS CONCURRENTLY.': 'ATTENTZIONE: NON ESEGUIRE PIÙ TEST IN PARALLELO (I TEST NON SONO "THREAD SAFE")',
'ATTENTION: you cannot edit the running application!': "ATTENZIONE: non puoi modificare l'applicazione correntemente in uso ",
'Autocomplete Python Code': 'Autocomplete Python Code',
'Available databases and tables': 'Database e tabelle disponibili',
'back': 'indietro',
'cache': 'cache',
'cache, errors and sessions cleaned': 'pulitura cache, errori and sessioni ',
'can be a git repo': 'can be a git repo',
'Cannot be empty': 'Non può essere vuoto',
'Cannot compile: there are errors in your app:': "Compilazione fallita: ci sono errori nell'applicazione.",
'cannot create file': 'impossibile creare il file',
'cannot upload file "%(filename)s"': 'impossibile caricare il file "%(filename)s"',
'Change admin password': 'change admin password',
'change editor settings': 'change editor settings',
'change password': 'cambia password',
'check all': 'controlla tutto',
'Check for upgrades': 'check for upgrades',
'Check to delete': 'Seleziona per cancellare',
'Checking for upgrades...': 'Controllo aggiornamenti in corso...',
'Clean': 'pulisci',
'click here for online examples': 'clicca per vedere gli esempi',
'click here for the administrative interface': "clicca per l'interfaccia amministrativa",
'click to check for upgrades': 'clicca per controllare presenza di aggiornamenti',
'code': 'code',
'collapse/expand all': 'collapse/expand all',
'Compile': 'compila',
'compiled application removed': "rimosso il codice compilato dell'applicazione",
'Controller': 'Controller',
'Controllers': 'Controllers',
'controllers': 'controllers',
'Copyright': 'Copyright',
'Create': 'crea',
'create file with filename:': 'crea un file col nome:',
'create new application:': 'create new application:',
'Create new simple application': 'Crea nuova applicazione',
'Create/Upload': 'Create/Upload',
'created by': 'creato da',
'crontab': 'crontab',
'Current request': 'Richiesta (request) corrente',
'Current response': 'Risposta (response) corrente',
'Current session': 'Sessione (session) corrente',
'currently running': 'currently running',
'currently saved or': 'attualmente salvato o',
'customize me!': 'Personalizzami!',
'data uploaded': 'dati caricati',
'Database': 'Database',
'database': 'database',
'database %s select': 'database %s select',
'database administration': 'amministrazione database',
'Date and Time': 'Data and Ora',
'db': 'db',
'DB Model': 'Modello di DB',
'Debug': 'Debug',
'defines tables': 'defininisce le tabelle',
'Delete': 'Cancella',
'delete': 'Cancella',
'delete all checked': 'cancella tutti i selezionati',
'delete plugin': 'cancella plugin',
'Delete this file (you will be asked to confirm deletion)': 'Delete this file (you will be asked to confirm deletion)',
'Delete:': 'Cancella:',
'Deploy': 'deploy',
'Deploy on Google App Engine': 'Installa su Google App Engine',
'Deploy to OpenShift': 'Deploy to OpenShift',
'design': 'progetta',
'Detailed traceback description': 'Detailed traceback description',
'direction: ltr': 'direction: ltr',
'Disable': 'Disable',
'docs': 'docs',
'done!': 'fatto!',
'download layouts': 'download layouts',
'download plugins': 'download plugins',
'EDIT': 'MODIFICA',
'Edit': 'modifica',
'Edit application': 'Modifica applicazione',
'edit controller': 'modifica controller',
'edit controller:': 'edit controller:',
'Edit current record': 'Modifica record corrente',
'edit profile': 'modifica profilo',
'Edit This App': 'Modifica questa applicazione',
'edit views:': 'modifica viste (view):',
'Editing %s': 'Editing %s',
'Enterprise Web Framework': 'Enterprise Web Framework',
'Error logs for "%(app)s"': 'Log degli errori per "%(app)s"',
'Error snapshot': 'Error snapshot',
'Error ticket': 'Error ticket',
'Errors': 'errori',
'Exception instance attributes': 'Exception instance attributes',
'Exit Fullscreen': 'Exit Fullscreen',
'Expand Abbreviation': 'Expand Abbreviation',
'export as csv file': 'esporta come file CSV',
'exposes': 'espone',
'exposes:': 'exposes:',
'extends': 'estende',
'failed to reload module because:': 'ricaricamento modulo fallito perché:',
'file "%(filename)s" created': 'creato il file "%(filename)s"',
'file "%(filename)s" deleted': 'cancellato il file "%(filename)s"',
'file "%(filename)s" uploaded': 'caricato il file "%(filename)s"',
'file "%s" of %s restored': 'ripristinato "%(filename)s"',
'file changed on disk': 'il file ha subito una modifica su disco',
'file does not exist': 'file inesistente',
'file saved on %(time)s': "file salvato nell'istante %(time)s",
'file saved on %s': 'file salvato: %s',
'filter': 'filter',
'Find Next': 'Find Next',
'Find Previous': 'Find Previous',
'Frames': 'Frames',
'Functions with no doctests will result in [passed] tests.': 'I test delle funzioni senza "doctests" risulteranno sempre [passed].',
'Get from URL:': 'Get from URL:',
'Git Pull': 'Git Pull',
'Git Push': 'Git Push',
'graph model': 'graph model',
'Hello World': 'Salve Mondo',
'Help': 'aiuto',
'Hide/Show Translated strings': 'Hide/Show Translated strings',
'htmledit': 'modifica come html',
'If the report above contains a ticket number it indicates a failure in executing the controller, before any attempt to execute the doctests. This is usually due to an indentation error or an error outside function code.\nA green title indicates that all tests (if defined) passed. In this case test results are not shown.': 'If the report above contains a ticket number it indicates a failure in executing the controller, before any attempt to execute the doctests. This is usually due to an indentation error or an error outside function code.\nA green title indicates that all tests (if defined) passed. In this case test results are not shown.',
'Import/Export': 'Importa/Esporta',
'includes': 'include',
'Index': 'Indice',
'insert new': 'inserisci nuovo',
'insert new %s': 'inserisci nuovo %s',
'inspect attributes': 'inspect attributes',
'Install': 'installa',
'Installed applications': 'Applicazioni installate',
'internal error': 'errore interno',
'Internal State': 'Stato interno',
'Invalid action': 'Azione non valida',
'invalid password': 'password non valida',
'Invalid Query': 'Richiesta (query) non valida',
'invalid request': 'richiesta non valida',
'invalid ticket': 'ticket non valido',
'Key bindings': 'Key bindings',
'Key bindings for ZenCoding Plugin': 'Key bindings for ZenCoding Plugin',
'language file "%(filename)s" created/updated': 'file linguaggio "%(filename)s" creato/aggiornato',
'Language files (static strings) updated': 'Linguaggi (documenti con stringhe statiche) aggiornati',
'languages': 'linguaggi',
'Languages': 'Linguaggi',
'Last saved on:': 'Ultimo salvataggio:',
'Layout': 'Layout',
'License for': 'Licenza relativa a',
'loading...': 'caricamento...',
'locals': 'locals',
'Login': 'Accesso',
'login': 'accesso',
'Login to the Administrative Interface': "Accesso all'interfaccia amministrativa",
'Logout': 'uscita',
'Main Menu': 'Menu principale',
'Manage': 'Manage',
'Menu Model': 'Menu Modelli',
'merge': 'unisci',
'Models': 'Modelli',
'models': 'modelli',
'Modules': 'Moduli',
'modules': 'moduli',
'new application "%s" created': 'creata la nuova applicazione "%s"',
'New application wizard': 'New application wizard',
'new plugin installed': 'installato nuovo plugin',
'New Record': 'Nuovo elemento (record)',
'new record inserted': 'nuovo record inserito',
'New simple application': 'New simple application',
'next 100 rows': 'prossime 100 righe',
'NO': 'NO',
'No databases in this application': 'Nessun database presente in questa applicazione',
'no match': 'nessuna corrispondenza',
'no package selected': 'no package selected',
'online designer': 'online designer',
'or alternatively': 'or alternatively',
'Or Get from URL:': 'Or Get from URL:',
'or import from csv file': 'oppure importa da file CSV',
'or provide app url:': "oppure fornisci url dell'applicazione:",
'Original/Translation': 'Originale/Traduzione',
'Overwrite installed app': 'sovrascrivi applicazione installata',
'Pack all': 'crea pacchetto',
'Pack compiled': 'crea pacchetto del codice compilato',
'Pack custom': 'Pack custom',
'pack plugin': 'crea pacchetto del plugin',
'PAM authenticated user, cannot change password here': 'utente autenticato tramite PAM, impossibile modificare password qui',
'password changed': 'password modificata',
'Peeking at file': 'Uno sguardo al file',
'plugin "%(plugin)s" deleted': 'plugin "%(plugin)s" cancellato',
'Plugin "%s" in application': 'Plugin "%s" nell\'applicazione',
'plugins': 'plugins',
'Plugins': 'I Plugins',
'Plural-Forms:': 'Plural-Forms:',
'Powered by': 'Powered by',
'previous 100 rows': '100 righe precedenti',
'Private files': 'Private files',
'private files': 'private files',
'Query:': 'Richiesta (query):',
'Rapid Search': 'Rapid Search',
'record': 'record',
'record does not exist': 'il record non esiste',
'record id': 'ID del record',
'register': 'registrazione',
'reload': 'reload',
'Reload routes': 'Reload routes',
'Remove compiled': 'rimozione codice compilato',
'Replace': 'Replace',
'Replace All': 'Replace All',
'request': 'request',
'Resolve Conflict file': 'File di risoluzione conflitto',
'response': 'response',
'restore': 'ripristino',
'revert': 'versione precedente',
'Rows in table': 'Righe nella tabella',
'Rows selected': 'Righe selezionate',
'rules are not defined': 'rules are not defined',
"Run tests in this file (to run all files, you may also use the button labelled 'test')": "Run tests in this file (to run all files, you may also use the button labelled 'test')",
'Running on %s': 'Running on %s',
'Save': 'Save',
'Save file:': 'Save file:',
'Save file: %s': 'Save file: %s',
'Save via Ajax': 'Save via Ajax',
'Saved file hash:': 'Hash del file salvato:',
'selected': 'selezionato',
'session': 'session',
'session expired': 'sessions scaduta',
'Set Breakpoint on %s at line %s: %s': 'Set Breakpoint on %s at line %s: %s',
'shell': 'shell',
'Site': 'sito',
'some files could not be removed': 'non è stato possibile rimuovere alcuni files',
'Start searching': 'Start searching',
'Start wizard': 'start wizard',
'state': 'stato',
'static': 'statico',
'Static': 'Static',
'Static files': 'Files statici',
'Stylesheet': 'Foglio di stile (stylesheet)',
'Submit': 'Submit',
'submit': 'invia',
'successful': 'successful',
'Sure you want to delete this object?': 'Vuoi veramente cancellare questo oggetto?',
'table': 'tabella',
'test': 'test',
'Testing application': 'Test applicazione in corsg',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'La richiesta (query) è una condizione come ad esempio "db.tabella1.campo1==\'valore\'". Una condizione come "db.tabella1.campo1==db.tabella2.campo2" produce un "JOIN" SQL.',
'the application logic, each URL path is mapped in one exposed function in the controller': 'logica dell\'applicazione, ogni percorso "URL" corrisponde ad una funzione esposta da un controller',
'The application logic, each URL path is mapped in one exposed function in the controller': 'The application logic, each URL path is mapped in one exposed function in the controller',
'the data representation, define database tables and sets': 'rappresentazione dei dati, definizione di tabelle di database e di "set" ',
'The data representation, define database tables and sets': 'The data representation, define database tables and sets',
'The presentations layer, views are also known as templates': 'The presentations layer, views are also known as templates',
'the presentations layer, views are also known as templates': 'Presentazione dell\'applicazione, viste (views, chiamate anche "templates")',
'There are no controllers': 'Non ci sono controller',
'There are no models': 'Non ci sono modelli',
'There are no modules': 'Non ci sono moduli',
'There are no plugins': 'There are no plugins',
'There are no private files': 'There are no private files',
'There are no static files': 'Non ci sono file statici',
'There are no translators, only default language is supported': 'Non ci sono traduzioni, viene solo supportato il linguaggio di base',
'There are no views': 'Non ci sono viste ("view")',
'These files are not served, they are only available from within your app': 'These files are not served, they are only available from within your app',
'These files are served without processing, your images go here': 'These files are served without processing, your images go here',
'these files are served without processing, your images go here': 'questi files vengono serviti così come sono, le immagini vanno qui',
'This is the %(filename)s template': 'Questo è il template %(filename)s',
'Ticket': 'Ticket',
'Ticket ID': 'Ticket ID',
'TM': 'TM',
'to previous version.': 'torna a versione precedente',
'To create a plugin, name a file/folder plugin_[name]': 'Per creare un plugin, chiamare un file o cartella plugin_[nome]',
'toggle breakpoint': 'toggle breakpoint',
'Toggle Fullscreen': 'Toggle Fullscreen',
'Traceback': 'Traceback',
'translation strings for the application': "stringhe di traduzioni per l'applicazione",
'Translation strings for the application': 'Translation strings for the application',
'try': 'prova',
'try something like': 'prova qualcosa come',
'Try the mobile interface': 'Try the mobile interface',
'try view': 'try view',
'Unable to check for upgrades': 'Impossibile controllare presenza di aggiornamenti',
'unable to create application "%s"': 'impossibile creare applicazione "%s"',
'unable to delete file "%(filename)s"': 'impossibile rimuovere file "%(plugin)s"',
'unable to delete file plugin "%(plugin)s"': 'impossibile rimuovere file di plugin "%(plugin)s"',
'Unable to download app because:': 'Impossibile scaricare applicazione perché',
'Unable to download because': 'Impossibile scaricare perché',
'Unable to download because:': 'Unable to download because:',
'unable to parse csv file': 'non riesco a decodificare questo file CSV',
'unable to uninstall "%s"': 'impossibile disinstallare "%s"',
'unable to upgrade because "%s"': 'impossibile aggiornare perché "%s"',
'uncheck all': 'smarca tutti',
'Uninstall': 'disinstalla',
'update': 'aggiorna',
'update all languages': 'aggiorna tutti i linguaggi',
'Update:': 'Aggiorna:',
'upgrade web2py now': 'upgrade web2py now',
'upload': 'upload',
'Upload': 'Upload',
'Upload & install packed application': 'Carica ed installa pacchetto con applicazione',
'Upload a package:': 'Upload a package:',
'Upload and install packed application': 'Upload and install packed application',
'upload application:': 'carica applicazione:',
'upload file:': 'carica file:',
'upload plugin file:': 'carica file di plugin:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Per costruire richieste (query) più complesse si usano (...)&(...) come "e" (AND), (...)|(...) come "o" (OR), e ~(...) come negazione (NOT).',
'Use an url:': 'Use an url:',
'variables': 'variables',
'Version': 'Versione',
'Version %s.%s.%s %s (%s)': 'Version %s.%s.%s %s (%s)',
'Version %s.%s.%s (%s) %s': 'Version %s.%s.%s (%s) %s',
'versioning': 'sistema di versioni',
'Versioning': 'Versioning',
'View': 'Vista',
'view': 'vista',
'Views': 'viste',
'views': 'viste',
'Web Framework': 'Web Framework',
'web2py is up to date': 'web2py è aggiornato',
'web2py Recent Tweets': 'Tweets recenti per web2py',
'web2py upgraded; please restart it': 'web2py aggiornato; prego riavviarlo',
'Welcome %s': 'Benvenuto %s',
'Welcome to web2py': 'Benvenuto su web2py',
'YES': 'SI',
}
|
petjaui/petjaui-cards
|
refs/heads/master
|
node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/xcodeproj_file.py
|
505
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Xcode project file generator.
This module is both an Xcode project file generator and a documentation of the
Xcode project file format. Knowledge of the project file format was gained
based on extensive experience with Xcode, and by making changes to projects in
Xcode.app and observing the resultant changes in the associated project files.
XCODE PROJECT FILES
The generator targets the file format as written by Xcode 3.2 (specifically,
3.2.6), but past experience has taught that the format has not changed
significantly in the past several years, and future versions of Xcode are able
to read older project files.
Xcode project files are "bundled": the project "file" from an end-user's
perspective is actually a directory with an ".xcodeproj" extension. The
project file from this module's perspective is actually a file inside this
directory, always named "project.pbxproj". This file contains a complete
description of the project and is all that is needed to use the xcodeproj.
Other files contained in the xcodeproj directory are simply used to store
per-user settings, such as the state of various UI elements in the Xcode
application.
The project.pbxproj file is a property list, stored in a format almost
identical to the NeXTstep property list format. The file is able to carry
Unicode data, and is encoded in UTF-8. The root element in the property list
is a dictionary that contains several properties of minimal interest, and two
properties of immense interest. The most important property is a dictionary
named "objects". The entire structure of the project is represented by the
children of this property. The objects dictionary is keyed by unique 96-bit
values represented by 24 uppercase hexadecimal characters. Each value in the
objects dictionary is itself a dictionary, describing an individual object.
Each object in the dictionary is a member of a class, which is identified by
the "isa" property of each object. A variety of classes are represented in a
project file. Objects can refer to other objects by ID, using the 24-character
hexadecimal object key. A project's objects form a tree, with a root object
of class PBXProject at the root. As an example, the PBXProject object serves
as parent to an XCConfigurationList object defining the build configurations
used in the project, a PBXGroup object serving as a container for all files
referenced in the project, and a list of target objects, each of which defines
a target in the project. There are several different types of target object,
such as PBXNativeTarget and PBXAggregateTarget. In this module, this
relationship is expressed by having each target type derive from an abstract
base named XCTarget.
The project.pbxproj file's root dictionary also contains a property, sibling to
the "objects" dictionary, named "rootObject". The value of rootObject is a
24-character object key referring to the root PBXProject object in the
objects dictionary.
In Xcode, every file used as input to a target or produced as a final product
of a target must appear somewhere in the hierarchy rooted at the PBXGroup
object referenced by the PBXProject's mainGroup property. A PBXGroup is
generally represented as a folder in the Xcode application. PBXGroups can
contain other PBXGroups as well as PBXFileReferences, which are pointers to
actual files.
Each XCTarget contains a list of build phases, represented in this module by
the abstract base XCBuildPhase. Examples of concrete XCBuildPhase derivations
are PBXSourcesBuildPhase and PBXFrameworksBuildPhase, which correspond to the
"Compile Sources" and "Link Binary With Libraries" phases displayed in the
Xcode application. Files used as input to these phases (for example, source
files in the former case and libraries and frameworks in the latter) are
represented by PBXBuildFile objects, referenced by elements of "files" lists
in XCTarget objects. Each PBXBuildFile object refers to a PBXBuildFile
object as a "weak" reference: it does not "own" the PBXBuildFile, which is
owned by the root object's mainGroup or a descendant group. In most cases, the
layer of indirection between an XCBuildPhase and a PBXFileReference via a
PBXBuildFile appears extraneous, but there's actually one reason for this:
file-specific compiler flags are added to the PBXBuildFile object so as to
allow a single file to be a member of multiple targets while having distinct
compiler flags for each. These flags can be modified in the Xcode applciation
in the "Build" tab of a File Info window.
When a project is open in the Xcode application, Xcode will rewrite it. As
such, this module is careful to adhere to the formatting used by Xcode, to
avoid insignificant changes appearing in the file when it is used in the
Xcode application. This will keep version control repositories happy, and
makes it possible to compare a project file used in Xcode to one generated by
this module to determine if any significant changes were made in the
application.
Xcode has its own way of assigning 24-character identifiers to each object,
which is not duplicated here. Because the identifier only is only generated
once, when an object is created, and is then left unchanged, there is no need
to attempt to duplicate Xcode's behavior in this area. The generator is free
to select any identifier, even at random, to refer to the objects it creates,
and Xcode will retain those identifiers and use them when subsequently
rewriting the project file. However, the generator would choose new random
identifiers each time the project files are generated, leading to difficulties
comparing "used" project files to "pristine" ones produced by this module,
and causing the appearance of changes as every object identifier is changed
when updated projects are checked in to a version control repository. To
mitigate this problem, this module chooses identifiers in a more deterministic
way, by hashing a description of each object as well as its parent and ancestor
objects. This strategy should result in minimal "shift" in IDs as successive
generations of project files are produced.
THIS MODULE
This module introduces several classes, all derived from the XCObject class.
Nearly all of the "brains" are built into the XCObject class, which understands
how to create and modify objects, maintain the proper tree structure, compute
identifiers, and print objects. For the most part, classes derived from
XCObject need only provide a _schema class object, a dictionary that
expresses what properties objects of the class may contain.
Given this structure, it's possible to build a minimal project file by creating
objects of the appropriate types and making the proper connections:
config_list = XCConfigurationList()
group = PBXGroup()
project = PBXProject({'buildConfigurationList': config_list,
'mainGroup': group})
With the project object set up, it can be added to an XCProjectFile object.
XCProjectFile is a pseudo-class in the sense that it is a concrete XCObject
subclass that does not actually correspond to a class type found in a project
file. Rather, it is used to represent the project file's root dictionary.
Printing an XCProjectFile will print the entire project file, including the
full "objects" dictionary.
project_file = XCProjectFile({'rootObject': project})
project_file.ComputeIDs()
project_file.Print()
Xcode project files are always encoded in UTF-8. This module will accept
strings of either the str class or the unicode class. Strings of class str
are assumed to already be encoded in UTF-8. Obviously, if you're just using
ASCII, you won't encounter difficulties because ASCII is a UTF-8 subset.
Strings of class unicode are handled properly and encoded in UTF-8 when
a project file is output.
"""
import gyp.common
import posixpath
import re
import struct
import sys
# hashlib is supplied as of Python 2.5 as the replacement interface for sha
# and other secure hashes. In 2.6, sha is deprecated. Import hashlib if
# available, avoiding a deprecation warning under 2.6. Import sha otherwise,
# preserving 2.4 compatibility.
try:
import hashlib
_new_sha1 = hashlib.sha1
except ImportError:
import sha
_new_sha1 = sha.new
# See XCObject._EncodeString. This pattern is used to determine when a string
# can be printed unquoted. Strings that match this pattern may be printed
# unquoted. Strings that do not match must be quoted and may be further
# transformed to be properly encoded. Note that this expression matches the
# characters listed with "+", for 1 or more occurrences: if a string is empty,
# it must not match this pattern, because it needs to be encoded as "".
_unquoted = re.compile('^[A-Za-z0-9$./_]+$')
# Strings that match this pattern are quoted regardless of what _unquoted says.
# Oddly, Xcode will quote any string with a run of three or more underscores.
_quoted = re.compile('___')
# This pattern should match any character that needs to be escaped by
# XCObject._EncodeString. See that function.
_escaped = re.compile('[\\\\"]|[\x00-\x1f]')
# Used by SourceTreeAndPathFromPath
_path_leading_variable = re.compile('^\$\((.*?)\)(/(.*))?$')
def SourceTreeAndPathFromPath(input_path):
"""Given input_path, returns a tuple with sourceTree and path values.
Examples:
input_path (source_tree, output_path)
'$(VAR)/path' ('VAR', 'path')
'$(VAR)' ('VAR', None)
'path' (None, 'path')
"""
source_group_match = _path_leading_variable.match(input_path)
if source_group_match:
source_tree = source_group_match.group(1)
output_path = source_group_match.group(3) # This may be None.
else:
source_tree = None
output_path = input_path
return (source_tree, output_path)
def ConvertVariablesToShellSyntax(input_string):
return re.sub('\$\((.*?)\)', '${\\1}', input_string)
class XCObject(object):
"""The abstract base of all class types used in Xcode project files.
Class variables:
_schema: A dictionary defining the properties of this class. The keys to
_schema are string property keys as used in project files. Values
are a list of four or five elements:
[ is_list, property_type, is_strong, is_required, default ]
is_list: True if the property described is a list, as opposed
to a single element.
property_type: The type to use as the value of the property,
or if is_list is True, the type to use for each
element of the value's list. property_type must
be an XCObject subclass, or one of the built-in
types str, int, or dict.
is_strong: If property_type is an XCObject subclass, is_strong
is True to assert that this class "owns," or serves
as parent, to the property value (or, if is_list is
True, values). is_strong must be False if
property_type is not an XCObject subclass.
is_required: True if the property is required for the class.
Note that is_required being True does not preclude
an empty string ("", in the case of property_type
str) or list ([], in the case of is_list True) from
being set for the property.
default: Optional. If is_requried is True, default may be set
to provide a default value for objects that do not supply
their own value. If is_required is True and default
is not provided, users of the class must supply their own
value for the property.
Note that although the values of the array are expressed in
boolean terms, subclasses provide values as integers to conserve
horizontal space.
_should_print_single_line: False in XCObject. Subclasses whose objects
should be written to the project file in the
alternate single-line format, such as
PBXFileReference and PBXBuildFile, should
set this to True.
_encode_transforms: Used by _EncodeString to encode unprintable characters.
The index into this list is the ordinal of the
character to transform; each value is a string
used to represent the character in the output. XCObject
provides an _encode_transforms list suitable for most
XCObject subclasses.
_alternate_encode_transforms: Provided for subclasses that wish to use
the alternate encoding rules. Xcode seems
to use these rules when printing objects in
single-line format. Subclasses that desire
this behavior should set _encode_transforms
to _alternate_encode_transforms.
_hashables: A list of XCObject subclasses that can be hashed by ComputeIDs
to construct this object's ID. Most classes that need custom
hashing behavior should do it by overriding Hashables,
but in some cases an object's parent may wish to push a
hashable value into its child, and it can do so by appending
to _hashables.
Attributes:
id: The object's identifier, a 24-character uppercase hexadecimal string.
Usually, objects being created should not set id until the entire
project file structure is built. At that point, UpdateIDs() should
be called on the root object to assign deterministic values for id to
each object in the tree.
parent: The object's parent. This is set by a parent XCObject when a child
object is added to it.
_properties: The object's property dictionary. An object's properties are
described by its class' _schema variable.
"""
_schema = {}
_should_print_single_line = False
# See _EncodeString.
_encode_transforms = []
i = 0
while i < ord(' '):
_encode_transforms.append('\\U%04x' % i)
i = i + 1
_encode_transforms[7] = '\\a'
_encode_transforms[8] = '\\b'
_encode_transforms[9] = '\\t'
_encode_transforms[10] = '\\n'
_encode_transforms[11] = '\\v'
_encode_transforms[12] = '\\f'
_encode_transforms[13] = '\\n'
_alternate_encode_transforms = list(_encode_transforms)
_alternate_encode_transforms[9] = chr(9)
_alternate_encode_transforms[10] = chr(10)
_alternate_encode_transforms[11] = chr(11)
def __init__(self, properties=None, id=None, parent=None):
self.id = id
self.parent = parent
self._properties = {}
self._hashables = []
self._SetDefaultsFromSchema()
self.UpdateProperties(properties)
def __repr__(self):
try:
name = self.Name()
except NotImplementedError:
return '<%s at 0x%x>' % (self.__class__.__name__, id(self))
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Copy(self):
"""Make a copy of this object.
The new object will have its own copy of lists and dicts. Any XCObject
objects owned by this object (marked "strong") will be copied in the
new object, even those found in lists. If this object has any weak
references to other XCObjects, the same references are added to the new
object without making a copy.
"""
that = self.__class__(id=self.id, parent=self.parent)
for key, value in self._properties.iteritems():
is_strong = self._schema[key][2]
if isinstance(value, XCObject):
if is_strong:
new_value = value.Copy()
new_value.parent = that
that._properties[key] = new_value
else:
that._properties[key] = value
elif isinstance(value, str) or isinstance(value, unicode) or \
isinstance(value, int):
that._properties[key] = value
elif isinstance(value, list):
if is_strong:
# If is_strong is True, each element is an XCObject, so it's safe to
# call Copy.
that._properties[key] = []
for item in value:
new_item = item.Copy()
new_item.parent = that
that._properties[key].append(new_item)
else:
that._properties[key] = value[:]
elif isinstance(value, dict):
# dicts are never strong.
if is_strong:
raise TypeError, 'Strong dict for key ' + key + ' in ' + \
self.__class__.__name__
else:
that._properties[key] = value.copy()
else:
raise TypeError, 'Unexpected type ' + value.__class__.__name__ + \
' for key ' + key + ' in ' + self.__class__.__name__
return that
def Name(self):
"""Return the name corresponding to an object.
Not all objects necessarily need to be nameable, and not all that do have
a "name" property. Override as needed.
"""
# If the schema indicates that "name" is required, try to access the
# property even if it doesn't exist. This will result in a KeyError
# being raised for the property that should be present, which seems more
# appropriate than NotImplementedError in this case.
if 'name' in self._properties or \
('name' in self._schema and self._schema['name'][3]):
return self._properties['name']
raise NotImplementedError, \
self.__class__.__name__ + ' must implement Name'
def Comment(self):
"""Return a comment string for the object.
Most objects just use their name as the comment, but PBXProject uses
different values.
The returned comment is not escaped and does not have any comment marker
strings applied to it.
"""
return self.Name()
def Hashables(self):
hashables = [self.__class__.__name__]
name = self.Name()
if name != None:
hashables.append(name)
hashables.extend(self._hashables)
return hashables
def HashablesForChild(self):
return None
def ComputeIDs(self, recursive=True, overwrite=True, seed_hash=None):
"""Set "id" properties deterministically.
An object's "id" property is set based on a hash of its class type and
name, as well as the class type and name of all ancestor objects. As
such, it is only advisable to call ComputeIDs once an entire project file
tree is built.
If recursive is True, recurse into all descendant objects and update their
hashes.
If overwrite is True, any existing value set in the "id" property will be
replaced.
"""
def _HashUpdate(hash, data):
"""Update hash with data's length and contents.
If the hash were updated only with the value of data, it would be
possible for clowns to induce collisions by manipulating the names of
their objects. By adding the length, it's exceedingly less likely that
ID collisions will be encountered, intentionally or not.
"""
hash.update(struct.pack('>i', len(data)))
hash.update(data)
if seed_hash is None:
seed_hash = _new_sha1()
hash = seed_hash.copy()
hashables = self.Hashables()
assert len(hashables) > 0
for hashable in hashables:
_HashUpdate(hash, hashable)
if recursive:
hashables_for_child = self.HashablesForChild()
if hashables_for_child is None:
child_hash = hash
else:
assert len(hashables_for_child) > 0
child_hash = seed_hash.copy()
for hashable in hashables_for_child:
_HashUpdate(child_hash, hashable)
for child in self.Children():
child.ComputeIDs(recursive, overwrite, child_hash)
if overwrite or self.id is None:
# Xcode IDs are only 96 bits (24 hex characters), but a SHA-1 digest is
# is 160 bits. Instead of throwing out 64 bits of the digest, xor them
# into the portion that gets used.
assert hash.digest_size % 4 == 0
digest_int_count = hash.digest_size / 4
digest_ints = struct.unpack('>' + 'I' * digest_int_count, hash.digest())
id_ints = [0, 0, 0]
for index in xrange(0, digest_int_count):
id_ints[index % 3] ^= digest_ints[index]
self.id = '%08X%08X%08X' % tuple(id_ints)
def EnsureNoIDCollisions(self):
"""Verifies that no two objects have the same ID. Checks all descendants.
"""
ids = {}
descendants = self.Descendants()
for descendant in descendants:
if descendant.id in ids:
other = ids[descendant.id]
raise KeyError, \
'Duplicate ID %s, objects "%s" and "%s" in "%s"' % \
(descendant.id, str(descendant._properties),
str(other._properties), self._properties['rootObject'].Name())
ids[descendant.id] = descendant
def Children(self):
"""Returns a list of all of this object's owned (strong) children."""
children = []
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong) = attributes[0:3]
if is_strong and property in self._properties:
if not is_list:
children.append(self._properties[property])
else:
children.extend(self._properties[property])
return children
def Descendants(self):
"""Returns a list of all of this object's descendants, including this
object.
"""
children = self.Children()
descendants = [self]
for child in children:
descendants.extend(child.Descendants())
return descendants
def PBXProjectAncestor(self):
# The base case for recursion is defined at PBXProject.PBXProjectAncestor.
if self.parent:
return self.parent.PBXProjectAncestor()
return None
def _EncodeComment(self, comment):
"""Encodes a comment to be placed in the project file output, mimicing
Xcode behavior.
"""
# This mimics Xcode behavior by wrapping the comment in "/*" and "*/". If
# the string already contains a "*/", it is turned into "(*)/". This keeps
# the file writer from outputting something that would be treated as the
# end of a comment in the middle of something intended to be entirely a
# comment.
return '/* ' + comment.replace('*/', '(*)/') + ' */'
def _EncodeTransform(self, match):
# This function works closely with _EncodeString. It will only be called
# by re.sub with match.group(0) containing a character matched by the
# the _escaped expression.
char = match.group(0)
# Backslashes (\) and quotation marks (") are always replaced with a
# backslash-escaped version of the same. Everything else gets its
# replacement from the class' _encode_transforms array.
if char == '\\':
return '\\\\'
if char == '"':
return '\\"'
return self._encode_transforms[ord(char)]
def _EncodeString(self, value):
"""Encodes a string to be placed in the project file output, mimicing
Xcode behavior.
"""
# Use quotation marks when any character outside of the range A-Z, a-z, 0-9,
# $ (dollar sign), . (period), and _ (underscore) is present. Also use
# quotation marks to represent empty strings.
#
# Escape " (double-quote) and \ (backslash) by preceding them with a
# backslash.
#
# Some characters below the printable ASCII range are encoded specially:
# 7 ^G BEL is encoded as "\a"
# 8 ^H BS is encoded as "\b"
# 11 ^K VT is encoded as "\v"
# 12 ^L NP is encoded as "\f"
# 127 ^? DEL is passed through as-is without escaping
# - In PBXFileReference and PBXBuildFile objects:
# 9 ^I HT is passed through as-is without escaping
# 10 ^J NL is passed through as-is without escaping
# 13 ^M CR is passed through as-is without escaping
# - In other objects:
# 9 ^I HT is encoded as "\t"
# 10 ^J NL is encoded as "\n"
# 13 ^M CR is encoded as "\n" rendering it indistinguishable from
# 10 ^J NL
# All other characters within the ASCII control character range (0 through
# 31 inclusive) are encoded as "\U001f" referring to the Unicode code point
# in hexadecimal. For example, character 14 (^N SO) is encoded as "\U000e".
# Characters above the ASCII range are passed through to the output encoded
# as UTF-8 without any escaping. These mappings are contained in the
# class' _encode_transforms list.
if _unquoted.search(value) and not _quoted.search(value):
return value
return '"' + _escaped.sub(self._EncodeTransform, value) + '"'
def _XCPrint(self, file, tabs, line):
file.write('\t' * tabs + line)
def _XCPrintableValue(self, tabs, value, flatten_list=False):
"""Returns a representation of value that may be printed in a project file,
mimicing Xcode's behavior.
_XCPrintableValue can handle str and int values, XCObjects (which are
made printable by returning their id property), and list and dict objects
composed of any of the above types. When printing a list or dict, and
_should_print_single_line is False, the tabs parameter is used to determine
how much to indent the lines corresponding to the items in the list or
dict.
If flatten_list is True, single-element lists will be transformed into
strings.
"""
printable = ''
comment = None
if self._should_print_single_line:
sep = ' '
element_tabs = ''
end_tabs = ''
else:
sep = '\n'
element_tabs = '\t' * (tabs + 1)
end_tabs = '\t' * tabs
if isinstance(value, XCObject):
printable += value.id
comment = value.Comment()
elif isinstance(value, str):
printable += self._EncodeString(value)
elif isinstance(value, unicode):
printable += self._EncodeString(value.encode('utf-8'))
elif isinstance(value, int):
printable += str(value)
elif isinstance(value, list):
if flatten_list and len(value) <= 1:
if len(value) == 0:
printable += self._EncodeString('')
else:
printable += self._EncodeString(value[0])
else:
printable = '(' + sep
for item in value:
printable += element_tabs + \
self._XCPrintableValue(tabs + 1, item, flatten_list) + \
',' + sep
printable += end_tabs + ')'
elif isinstance(value, dict):
printable = '{' + sep
for item_key, item_value in sorted(value.iteritems()):
printable += element_tabs + \
self._XCPrintableValue(tabs + 1, item_key, flatten_list) + ' = ' + \
self._XCPrintableValue(tabs + 1, item_value, flatten_list) + ';' + \
sep
printable += end_tabs + '}'
else:
raise TypeError, "Can't make " + value.__class__.__name__ + ' printable'
if comment != None:
printable += ' ' + self._EncodeComment(comment)
return printable
def _XCKVPrint(self, file, tabs, key, value):
"""Prints a key and value, members of an XCObject's _properties dictionary,
to file.
tabs is an int identifying the indentation level. If the class'
_should_print_single_line variable is True, tabs is ignored and the
key-value pair will be followed by a space insead of a newline.
"""
if self._should_print_single_line:
printable = ''
after_kv = ' '
else:
printable = '\t' * tabs
after_kv = '\n'
# Xcode usually prints remoteGlobalIDString values in PBXContainerItemProxy
# objects without comments. Sometimes it prints them with comments, but
# the majority of the time, it doesn't. To avoid unnecessary changes to
# the project file after Xcode opens it, don't write comments for
# remoteGlobalIDString. This is a sucky hack and it would certainly be
# cleaner to extend the schema to indicate whether or not a comment should
# be printed, but since this is the only case where the problem occurs and
# Xcode itself can't seem to make up its mind, the hack will suffice.
#
# Also see PBXContainerItemProxy._schema['remoteGlobalIDString'].
if key == 'remoteGlobalIDString' and isinstance(self,
PBXContainerItemProxy):
value_to_print = value.id
else:
value_to_print = value
# PBXBuildFile's settings property is represented in the output as a dict,
# but a hack here has it represented as a string. Arrange to strip off the
# quotes so that it shows up in the output as expected.
if key == 'settings' and isinstance(self, PBXBuildFile):
strip_value_quotes = True
else:
strip_value_quotes = False
# In another one-off, let's set flatten_list on buildSettings properties
# of XCBuildConfiguration objects, because that's how Xcode treats them.
if key == 'buildSettings' and isinstance(self, XCBuildConfiguration):
flatten_list = True
else:
flatten_list = False
try:
printable_key = self._XCPrintableValue(tabs, key, flatten_list)
printable_value = self._XCPrintableValue(tabs, value_to_print,
flatten_list)
if strip_value_quotes and len(printable_value) > 1 and \
printable_value[0] == '"' and printable_value[-1] == '"':
printable_value = printable_value[1:-1]
printable += printable_key + ' = ' + printable_value + ';' + after_kv
except TypeError, e:
gyp.common.ExceptionAppend(e,
'while printing key "%s"' % key)
raise
self._XCPrint(file, 0, printable)
def Print(self, file=sys.stdout):
"""Prints a reprentation of this object to file, adhering to Xcode output
formatting.
"""
self.VerifyHasRequiredProperties()
if self._should_print_single_line:
# When printing an object in a single line, Xcode doesn't put any space
# between the beginning of a dictionary (or presumably a list) and the
# first contained item, so you wind up with snippets like
# ...CDEF = {isa = PBXFileReference; fileRef = 0123...
# If it were me, I would have put a space in there after the opening
# curly, but I guess this is just another one of those inconsistencies
# between how Xcode prints PBXFileReference and PBXBuildFile objects as
# compared to other objects. Mimic Xcode's behavior here by using an
# empty string for sep.
sep = ''
end_tabs = 0
else:
sep = '\n'
end_tabs = 2
# Start the object. For example, '\t\tPBXProject = {\n'.
self._XCPrint(file, 2, self._XCPrintableValue(2, self) + ' = {' + sep)
# "isa" isn't in the _properties dictionary, it's an intrinsic property
# of the class which the object belongs to. Xcode always outputs "isa"
# as the first element of an object dictionary.
self._XCKVPrint(file, 3, 'isa', self.__class__.__name__)
# The remaining elements of an object dictionary are sorted alphabetically.
for property, value in sorted(self._properties.iteritems()):
self._XCKVPrint(file, 3, property, value)
# End the object.
self._XCPrint(file, end_tabs, '};\n')
def UpdateProperties(self, properties, do_copy=False):
"""Merge the supplied properties into the _properties dictionary.
The input properties must adhere to the class schema or a KeyError or
TypeError exception will be raised. If adding an object of an XCObject
subclass and the schema indicates a strong relationship, the object's
parent will be set to this object.
If do_copy is True, then lists, dicts, strong-owned XCObjects, and
strong-owned XCObjects in lists will be copied instead of having their
references added.
"""
if properties is None:
return
for property, value in properties.iteritems():
# Make sure the property is in the schema.
if not property in self._schema:
raise KeyError, property + ' not in ' + self.__class__.__name__
# Make sure the property conforms to the schema.
(is_list, property_type, is_strong) = self._schema[property][0:3]
if is_list:
if value.__class__ != list:
raise TypeError, \
property + ' of ' + self.__class__.__name__ + \
' must be list, not ' + value.__class__.__name__
for item in value:
if not isinstance(item, property_type) and \
not (item.__class__ == unicode and property_type == str):
# Accept unicode where str is specified. str is treated as
# UTF-8-encoded.
raise TypeError, \
'item of ' + property + ' of ' + self.__class__.__name__ + \
' must be ' + property_type.__name__ + ', not ' + \
item.__class__.__name__
elif not isinstance(value, property_type) and \
not (value.__class__ == unicode and property_type == str):
# Accept unicode where str is specified. str is treated as
# UTF-8-encoded.
raise TypeError, \
property + ' of ' + self.__class__.__name__ + ' must be ' + \
property_type.__name__ + ', not ' + value.__class__.__name__
# Checks passed, perform the assignment.
if do_copy:
if isinstance(value, XCObject):
if is_strong:
self._properties[property] = value.Copy()
else:
self._properties[property] = value
elif isinstance(value, str) or isinstance(value, unicode) or \
isinstance(value, int):
self._properties[property] = value
elif isinstance(value, list):
if is_strong:
# If is_strong is True, each element is an XCObject, so it's safe
# to call Copy.
self._properties[property] = []
for item in value:
self._properties[property].append(item.Copy())
else:
self._properties[property] = value[:]
elif isinstance(value, dict):
self._properties[property] = value.copy()
else:
raise TypeError, "Don't know how to copy a " + \
value.__class__.__name__ + ' object for ' + \
property + ' in ' + self.__class__.__name__
else:
self._properties[property] = value
# Set up the child's back-reference to this object. Don't use |value|
# any more because it may not be right if do_copy is true.
if is_strong:
if not is_list:
self._properties[property].parent = self
else:
for item in self._properties[property]:
item.parent = self
def HasProperty(self, key):
return key in self._properties
def GetProperty(self, key):
return self._properties[key]
def SetProperty(self, key, value):
self.UpdateProperties({key: value})
def DelProperty(self, key):
if key in self._properties:
del self._properties[key]
def AppendProperty(self, key, value):
# TODO(mark): Support ExtendProperty too (and make this call that)?
# Schema validation.
if not key in self._schema:
raise KeyError, key + ' not in ' + self.__class__.__name__
(is_list, property_type, is_strong) = self._schema[key][0:3]
if not is_list:
raise TypeError, key + ' of ' + self.__class__.__name__ + ' must be list'
if not isinstance(value, property_type):
raise TypeError, 'item of ' + key + ' of ' + self.__class__.__name__ + \
' must be ' + property_type.__name__ + ', not ' + \
value.__class__.__name__
# If the property doesn't exist yet, create a new empty list to receive the
# item.
if not key in self._properties:
self._properties[key] = []
# Set up the ownership link.
if is_strong:
value.parent = self
# Store the item.
self._properties[key].append(value)
def VerifyHasRequiredProperties(self):
"""Ensure that all properties identified as required by the schema are
set.
"""
# TODO(mark): A stronger verification mechanism is needed. Some
# subclasses need to perform validation beyond what the schema can enforce.
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong, is_required) = attributes[0:4]
if is_required and not property in self._properties:
raise KeyError, self.__class__.__name__ + ' requires ' + property
def _SetDefaultsFromSchema(self):
"""Assign object default values according to the schema. This will not
overwrite properties that have already been set."""
defaults = {}
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong, is_required) = attributes[0:4]
if is_required and len(attributes) >= 5 and \
not property in self._properties:
default = attributes[4]
defaults[property] = default
if len(defaults) > 0:
# Use do_copy=True so that each new object gets its own copy of strong
# objects, lists, and dicts.
self.UpdateProperties(defaults, do_copy=True)
class XCHierarchicalElement(XCObject):
"""Abstract base for PBXGroup and PBXFileReference. Not represented in a
project file."""
# TODO(mark): Do name and path belong here? Probably so.
# If path is set and name is not, name may have a default value. Name will
# be set to the basename of path, if the basename of path is different from
# the full value of path. If path is already just a leaf name, name will
# not be set.
_schema = XCObject._schema.copy()
_schema.update({
'comments': [0, str, 0, 0],
'fileEncoding': [0, str, 0, 0],
'includeInIndex': [0, int, 0, 0],
'indentWidth': [0, int, 0, 0],
'lineEnding': [0, int, 0, 0],
'sourceTree': [0, str, 0, 1, '<group>'],
'tabWidth': [0, int, 0, 0],
'usesTabs': [0, int, 0, 0],
'wrapsLines': [0, int, 0, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCObject.__init__(self, properties, id, parent)
if 'path' in self._properties and not 'name' in self._properties:
path = self._properties['path']
name = posixpath.basename(path)
if name != '' and path != name:
self.SetProperty('name', name)
if 'path' in self._properties and \
(not 'sourceTree' in self._properties or \
self._properties['sourceTree'] == '<group>'):
# If the pathname begins with an Xcode variable like "$(SDKROOT)/", take
# the variable out and make the path be relative to that variable by
# assigning the variable name as the sourceTree.
(source_tree, path) = SourceTreeAndPathFromPath(self._properties['path'])
if source_tree != None:
self._properties['sourceTree'] = source_tree
if path != None:
self._properties['path'] = path
if source_tree != None and path is None and \
not 'name' in self._properties:
# The path was of the form "$(SDKROOT)" with no path following it.
# This object is now relative to that variable, so it has no path
# attribute of its own. It does, however, keep a name.
del self._properties['path']
self._properties['name'] = source_tree
def Name(self):
if 'name' in self._properties:
return self._properties['name']
elif 'path' in self._properties:
return self._properties['path']
else:
# This happens in the case of the root PBXGroup.
return None
def Hashables(self):
"""Custom hashables for XCHierarchicalElements.
XCHierarchicalElements are special. Generally, their hashes shouldn't
change if the paths don't change. The normal XCObject implementation of
Hashables adds a hashable for each object, which means that if
the hierarchical structure changes (possibly due to changes caused when
TakeOverOnlyChild runs and encounters slight changes in the hierarchy),
the hashes will change. For example, if a project file initially contains
a/b/f1 and a/b becomes collapsed into a/b, f1 will have a single parent
a/b. If someone later adds a/f2 to the project file, a/b can no longer be
collapsed, and f1 winds up with parent b and grandparent a. That would
be sufficient to change f1's hash.
To counteract this problem, hashables for all XCHierarchicalElements except
for the main group (which has neither a name nor a path) are taken to be
just the set of path components. Because hashables are inherited from
parents, this provides assurance that a/b/f1 has the same set of hashables
whether its parent is b or a/b.
The main group is a special case. As it is permitted to have no name or
path, it is permitted to use the standard XCObject hash mechanism. This
is not considered a problem because there can be only one main group.
"""
if self == self.PBXProjectAncestor()._properties['mainGroup']:
# super
return XCObject.Hashables(self)
hashables = []
# Put the name in first, ensuring that if TakeOverOnlyChild collapses
# children into a top-level group like "Source", the name always goes
# into the list of hashables without interfering with path components.
if 'name' in self._properties:
# Make it less likely for people to manipulate hashes by following the
# pattern of always pushing an object type value onto the list first.
hashables.append(self.__class__.__name__ + '.name')
hashables.append(self._properties['name'])
# NOTE: This still has the problem that if an absolute path is encountered,
# including paths with a sourceTree, they'll still inherit their parents'
# hashables, even though the paths aren't relative to their parents. This
# is not expected to be much of a problem in practice.
path = self.PathFromSourceTreeAndPath()
if path != None:
components = path.split(posixpath.sep)
for component in components:
hashables.append(self.__class__.__name__ + '.path')
hashables.append(component)
hashables.extend(self._hashables)
return hashables
def Compare(self, other):
# Allow comparison of these types. PBXGroup has the highest sort rank;
# PBXVariantGroup is treated as equal to PBXFileReference.
valid_class_types = {
PBXFileReference: 'file',
PBXGroup: 'group',
PBXVariantGroup: 'file',
}
self_type = valid_class_types[self.__class__]
other_type = valid_class_types[other.__class__]
if self_type == other_type:
# If the two objects are of the same sort rank, compare their names.
return cmp(self.Name(), other.Name())
# Otherwise, sort groups before everything else.
if self_type == 'group':
return -1
return 1
def CompareRootGroup(self, other):
# This function should be used only to compare direct children of the
# containing PBXProject's mainGroup. These groups should appear in the
# listed order.
# TODO(mark): "Build" is used by gyp.generator.xcode, perhaps the
# generator should have a way of influencing this list rather than having
# to hardcode for the generator here.
order = ['Source', 'Intermediates', 'Projects', 'Frameworks', 'Products',
'Build']
# If the groups aren't in the listed order, do a name comparison.
# Otherwise, groups in the listed order should come before those that
# aren't.
self_name = self.Name()
other_name = other.Name()
self_in = isinstance(self, PBXGroup) and self_name in order
other_in = isinstance(self, PBXGroup) and other_name in order
if not self_in and not other_in:
return self.Compare(other)
if self_name in order and not other_name in order:
return -1
if other_name in order and not self_name in order:
return 1
# If both groups are in the listed order, go by the defined order.
self_index = order.index(self_name)
other_index = order.index(other_name)
if self_index < other_index:
return -1
if self_index > other_index:
return 1
return 0
def PathFromSourceTreeAndPath(self):
# Turn the object's sourceTree and path properties into a single flat
# string of a form comparable to the path parameter. If there's a
# sourceTree property other than "<group>", wrap it in $(...) for the
# comparison.
components = []
if self._properties['sourceTree'] != '<group>':
components.append('$(' + self._properties['sourceTree'] + ')')
if 'path' in self._properties:
components.append(self._properties['path'])
if len(components) > 0:
return posixpath.join(*components)
return None
def FullPath(self):
# Returns a full path to self relative to the project file, or relative
# to some other source tree. Start with self, and walk up the chain of
# parents prepending their paths, if any, until no more parents are
# available (project-relative path) or until a path relative to some
# source tree is found.
xche = self
path = None
while isinstance(xche, XCHierarchicalElement) and \
(path is None or \
(not path.startswith('/') and not path.startswith('$'))):
this_path = xche.PathFromSourceTreeAndPath()
if this_path != None and path != None:
path = posixpath.join(this_path, path)
elif this_path != None:
path = this_path
xche = xche.parent
return path
class PBXGroup(XCHierarchicalElement):
"""
Attributes:
_children_by_path: Maps pathnames of children of this PBXGroup to the
actual child XCHierarchicalElement objects.
_variant_children_by_name_and_path: Maps (name, path) tuples of
PBXVariantGroup children to the actual child PBXVariantGroup objects.
"""
_schema = XCHierarchicalElement._schema.copy()
_schema.update({
'children': [1, XCHierarchicalElement, 1, 1, []],
'name': [0, str, 0, 0],
'path': [0, str, 0, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCHierarchicalElement.__init__(self, properties, id, parent)
self._children_by_path = {}
self._variant_children_by_name_and_path = {}
for child in self._properties.get('children', []):
self._AddChildToDicts(child)
def Hashables(self):
# super
hashables = XCHierarchicalElement.Hashables(self)
# It is not sufficient to just rely on name and parent to build a unique
# hashable : a node could have two child PBXGroup sharing a common name.
# To add entropy the hashable is enhanced with the names of all its
# children.
for child in self._properties.get('children', []):
child_name = child.Name()
if child_name != None:
hashables.append(child_name)
return hashables
def HashablesForChild(self):
# To avoid a circular reference the hashables used to compute a child id do
# not include the child names.
return XCHierarchicalElement.Hashables(self)
def _AddChildToDicts(self, child):
# Sets up this PBXGroup object's dicts to reference the child properly.
child_path = child.PathFromSourceTreeAndPath()
if child_path:
if child_path in self._children_by_path:
raise ValueError, 'Found multiple children with path ' + child_path
self._children_by_path[child_path] = child
if isinstance(child, PBXVariantGroup):
child_name = child._properties.get('name', None)
key = (child_name, child_path)
if key in self._variant_children_by_name_and_path:
raise ValueError, 'Found multiple PBXVariantGroup children with ' + \
'name ' + str(child_name) + ' and path ' + \
str(child_path)
self._variant_children_by_name_and_path[key] = child
def AppendChild(self, child):
# Callers should use this instead of calling
# AppendProperty('children', child) directly because this function
# maintains the group's dicts.
self.AppendProperty('children', child)
self._AddChildToDicts(child)
def GetChildByName(self, name):
# This is not currently optimized with a dict as GetChildByPath is because
# it has few callers. Most callers probably want GetChildByPath. This
# function is only useful to get children that have names but no paths,
# which is rare. The children of the main group ("Source", "Products",
# etc.) is pretty much the only case where this likely to come up.
#
# TODO(mark): Maybe this should raise an error if more than one child is
# present with the same name.
if not 'children' in self._properties:
return None
for child in self._properties['children']:
if child.Name() == name:
return child
return None
def GetChildByPath(self, path):
if not path:
return None
if path in self._children_by_path:
return self._children_by_path[path]
return None
def GetChildByRemoteObject(self, remote_object):
# This method is a little bit esoteric. Given a remote_object, which
# should be a PBXFileReference in another project file, this method will
# return this group's PBXReferenceProxy object serving as a local proxy
# for the remote PBXFileReference.
#
# This function might benefit from a dict optimization as GetChildByPath
# for some workloads, but profiling shows that it's not currently a
# problem.
if not 'children' in self._properties:
return None
for child in self._properties['children']:
if not isinstance(child, PBXReferenceProxy):
continue
container_proxy = child._properties['remoteRef']
if container_proxy._properties['remoteGlobalIDString'] == remote_object:
return child
return None
def AddOrGetFileByPath(self, path, hierarchical):
"""Returns an existing or new file reference corresponding to path.
If hierarchical is True, this method will create or use the necessary
hierarchical group structure corresponding to path. Otherwise, it will
look in and create an item in the current group only.
If an existing matching reference is found, it is returned, otherwise, a
new one will be created, added to the correct group, and returned.
If path identifies a directory by virtue of carrying a trailing slash,
this method returns a PBXFileReference of "folder" type. If path
identifies a variant, by virtue of it identifying a file inside a directory
with an ".lproj" extension, this method returns a PBXVariantGroup
containing the variant named by path, and possibly other variants. For
all other paths, a "normal" PBXFileReference will be returned.
"""
# Adding or getting a directory? Directories end with a trailing slash.
is_dir = False
if path.endswith('/'):
is_dir = True
path = posixpath.normpath(path)
if is_dir:
path = path + '/'
# Adding or getting a variant? Variants are files inside directories
# with an ".lproj" extension. Xcode uses variants for localization. For
# a variant path/to/Language.lproj/MainMenu.nib, put a variant group named
# MainMenu.nib inside path/to, and give it a variant named Language. In
# this example, grandparent would be set to path/to and parent_root would
# be set to Language.
variant_name = None
parent = posixpath.dirname(path)
grandparent = posixpath.dirname(parent)
parent_basename = posixpath.basename(parent)
(parent_root, parent_ext) = posixpath.splitext(parent_basename)
if parent_ext == '.lproj':
variant_name = parent_root
if grandparent == '':
grandparent = None
# Putting a directory inside a variant group is not currently supported.
assert not is_dir or variant_name is None
path_split = path.split(posixpath.sep)
if len(path_split) == 1 or \
((is_dir or variant_name != None) and len(path_split) == 2) or \
not hierarchical:
# The PBXFileReference or PBXVariantGroup will be added to or gotten from
# this PBXGroup, no recursion necessary.
if variant_name is None:
# Add or get a PBXFileReference.
file_ref = self.GetChildByPath(path)
if file_ref != None:
assert file_ref.__class__ == PBXFileReference
else:
file_ref = PBXFileReference({'path': path})
self.AppendChild(file_ref)
else:
# Add or get a PBXVariantGroup. The variant group name is the same
# as the basename (MainMenu.nib in the example above). grandparent
# specifies the path to the variant group itself, and path_split[-2:]
# is the path of the specific variant relative to its group.
variant_group_name = posixpath.basename(path)
variant_group_ref = self.AddOrGetVariantGroupByNameAndPath(
variant_group_name, grandparent)
variant_path = posixpath.sep.join(path_split[-2:])
variant_ref = variant_group_ref.GetChildByPath(variant_path)
if variant_ref != None:
assert variant_ref.__class__ == PBXFileReference
else:
variant_ref = PBXFileReference({'name': variant_name,
'path': variant_path})
variant_group_ref.AppendChild(variant_ref)
# The caller is interested in the variant group, not the specific
# variant file.
file_ref = variant_group_ref
return file_ref
else:
# Hierarchical recursion. Add or get a PBXGroup corresponding to the
# outermost path component, and then recurse into it, chopping off that
# path component.
next_dir = path_split[0]
group_ref = self.GetChildByPath(next_dir)
if group_ref != None:
assert group_ref.__class__ == PBXGroup
else:
group_ref = PBXGroup({'path': next_dir})
self.AppendChild(group_ref)
return group_ref.AddOrGetFileByPath(posixpath.sep.join(path_split[1:]),
hierarchical)
def AddOrGetVariantGroupByNameAndPath(self, name, path):
"""Returns an existing or new PBXVariantGroup for name and path.
If a PBXVariantGroup identified by the name and path arguments is already
present as a child of this object, it is returned. Otherwise, a new
PBXVariantGroup with the correct properties is created, added as a child,
and returned.
This method will generally be called by AddOrGetFileByPath, which knows
when to create a variant group based on the structure of the pathnames
passed to it.
"""
key = (name, path)
if key in self._variant_children_by_name_and_path:
variant_group_ref = self._variant_children_by_name_and_path[key]
assert variant_group_ref.__class__ == PBXVariantGroup
return variant_group_ref
variant_group_properties = {'name': name}
if path != None:
variant_group_properties['path'] = path
variant_group_ref = PBXVariantGroup(variant_group_properties)
self.AppendChild(variant_group_ref)
return variant_group_ref
def TakeOverOnlyChild(self, recurse=False):
"""If this PBXGroup has only one child and it's also a PBXGroup, take
it over by making all of its children this object's children.
This function will continue to take over only children when those children
are groups. If there are three PBXGroups representing a, b, and c, with
c inside b and b inside a, and a and b have no other children, this will
result in a taking over both b and c, forming a PBXGroup for a/b/c.
If recurse is True, this function will recurse into children and ask them
to collapse themselves by taking over only children as well. Assuming
an example hierarchy with files at a/b/c/d1, a/b/c/d2, and a/b/c/d3/e/f
(d1, d2, and f are files, the rest are groups), recursion will result in
a group for a/b/c containing a group for d3/e.
"""
# At this stage, check that child class types are PBXGroup exactly,
# instead of using isinstance. The only subclass of PBXGroup,
# PBXVariantGroup, should not participate in reparenting in the same way:
# reparenting by merging different object types would be wrong.
while len(self._properties['children']) == 1 and \
self._properties['children'][0].__class__ == PBXGroup:
# Loop to take over the innermost only-child group possible.
child = self._properties['children'][0]
# Assume the child's properties, including its children. Save a copy
# of this object's old properties, because they'll still be needed.
# This object retains its existing id and parent attributes.
old_properties = self._properties
self._properties = child._properties
self._children_by_path = child._children_by_path
if not 'sourceTree' in self._properties or \
self._properties['sourceTree'] == '<group>':
# The child was relative to its parent. Fix up the path. Note that
# children with a sourceTree other than "<group>" are not relative to
# their parents, so no path fix-up is needed in that case.
if 'path' in old_properties:
if 'path' in self._properties:
# Both the original parent and child have paths set.
self._properties['path'] = posixpath.join(old_properties['path'],
self._properties['path'])
else:
# Only the original parent has a path, use it.
self._properties['path'] = old_properties['path']
if 'sourceTree' in old_properties:
# The original parent had a sourceTree set, use it.
self._properties['sourceTree'] = old_properties['sourceTree']
# If the original parent had a name set, keep using it. If the original
# parent didn't have a name but the child did, let the child's name
# live on. If the name attribute seems unnecessary now, get rid of it.
if 'name' in old_properties and old_properties['name'] != None and \
old_properties['name'] != self.Name():
self._properties['name'] = old_properties['name']
if 'name' in self._properties and 'path' in self._properties and \
self._properties['name'] == self._properties['path']:
del self._properties['name']
# Notify all children of their new parent.
for child in self._properties['children']:
child.parent = self
# If asked to recurse, recurse.
if recurse:
for child in self._properties['children']:
if child.__class__ == PBXGroup:
child.TakeOverOnlyChild(recurse)
def SortGroup(self):
self._properties['children'] = \
sorted(self._properties['children'], cmp=lambda x,y: x.Compare(y))
# Recurse.
for child in self._properties['children']:
if isinstance(child, PBXGroup):
child.SortGroup()
class XCFileLikeElement(XCHierarchicalElement):
# Abstract base for objects that can be used as the fileRef property of
# PBXBuildFile.
def PathHashables(self):
# A PBXBuildFile that refers to this object will call this method to
# obtain additional hashables specific to this XCFileLikeElement. Don't
# just use this object's hashables, they're not specific and unique enough
# on their own (without access to the parent hashables.) Instead, provide
# hashables that identify this object by path by getting its hashables as
# well as the hashables of ancestor XCHierarchicalElement objects.
hashables = []
xche = self
while xche != None and isinstance(xche, XCHierarchicalElement):
xche_hashables = xche.Hashables()
for index in xrange(0, len(xche_hashables)):
hashables.insert(index, xche_hashables[index])
xche = xche.parent
return hashables
class XCContainerPortal(XCObject):
# Abstract base for objects that can be used as the containerPortal property
# of PBXContainerItemProxy.
pass
class XCRemoteObject(XCObject):
# Abstract base for objects that can be used as the remoteGlobalIDString
# property of PBXContainerItemProxy.
pass
class PBXFileReference(XCFileLikeElement, XCContainerPortal, XCRemoteObject):
_schema = XCFileLikeElement._schema.copy()
_schema.update({
'explicitFileType': [0, str, 0, 0],
'lastKnownFileType': [0, str, 0, 0],
'name': [0, str, 0, 0],
'path': [0, str, 0, 1],
})
# Weird output rules for PBXFileReference.
_should_print_single_line = True
# super
_encode_transforms = XCFileLikeElement._alternate_encode_transforms
def __init__(self, properties=None, id=None, parent=None):
# super
XCFileLikeElement.__init__(self, properties, id, parent)
if 'path' in self._properties and self._properties['path'].endswith('/'):
self._properties['path'] = self._properties['path'][:-1]
is_dir = True
else:
is_dir = False
if 'path' in self._properties and \
not 'lastKnownFileType' in self._properties and \
not 'explicitFileType' in self._properties:
# TODO(mark): This is the replacement for a replacement for a quick hack.
# It is no longer incredibly sucky, but this list needs to be extended.
extension_map = {
'a': 'archive.ar',
'app': 'wrapper.application',
'bdic': 'file',
'bundle': 'wrapper.cfbundle',
'c': 'sourcecode.c.c',
'cc': 'sourcecode.cpp.cpp',
'cpp': 'sourcecode.cpp.cpp',
'css': 'text.css',
'cxx': 'sourcecode.cpp.cpp',
'dart': 'sourcecode',
'dylib': 'compiled.mach-o.dylib',
'framework': 'wrapper.framework',
'gyp': 'sourcecode',
'gypi': 'sourcecode',
'h': 'sourcecode.c.h',
'hxx': 'sourcecode.cpp.h',
'icns': 'image.icns',
'java': 'sourcecode.java',
'js': 'sourcecode.javascript',
'm': 'sourcecode.c.objc',
'mm': 'sourcecode.cpp.objcpp',
'nib': 'wrapper.nib',
'o': 'compiled.mach-o.objfile',
'pdf': 'image.pdf',
'pl': 'text.script.perl',
'plist': 'text.plist.xml',
'pm': 'text.script.perl',
'png': 'image.png',
'py': 'text.script.python',
'r': 'sourcecode.rez',
'rez': 'sourcecode.rez',
's': 'sourcecode.asm',
'storyboard': 'file.storyboard',
'strings': 'text.plist.strings',
'ttf': 'file',
'xcconfig': 'text.xcconfig',
'xcdatamodel': 'wrapper.xcdatamodel',
'xib': 'file.xib',
'y': 'sourcecode.yacc',
}
prop_map = {
'dart': 'explicitFileType',
'gyp': 'explicitFileType',
'gypi': 'explicitFileType',
}
if is_dir:
file_type = 'folder'
prop_name = 'lastKnownFileType'
else:
basename = posixpath.basename(self._properties['path'])
(root, ext) = posixpath.splitext(basename)
# Check the map using a lowercase extension.
# TODO(mark): Maybe it should try with the original case first and fall
# back to lowercase, in case there are any instances where case
# matters. There currently aren't.
if ext != '':
ext = ext[1:].lower()
# TODO(mark): "text" is the default value, but "file" is appropriate
# for unrecognized files not containing text. Xcode seems to choose
# based on content.
file_type = extension_map.get(ext, 'text')
prop_name = prop_map.get(ext, 'lastKnownFileType')
self._properties[prop_name] = file_type
class PBXVariantGroup(PBXGroup, XCFileLikeElement):
"""PBXVariantGroup is used by Xcode to represent localizations."""
# No additions to the schema relative to PBXGroup.
pass
# PBXReferenceProxy is also an XCFileLikeElement subclass. It is defined below
# because it uses PBXContainerItemProxy, defined below.
class XCBuildConfiguration(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'baseConfigurationReference': [0, PBXFileReference, 0, 0],
'buildSettings': [0, dict, 0, 1, {}],
'name': [0, str, 0, 1],
})
def HasBuildSetting(self, key):
return key in self._properties['buildSettings']
def GetBuildSetting(self, key):
return self._properties['buildSettings'][key]
def SetBuildSetting(self, key, value):
# TODO(mark): If a list, copy?
self._properties['buildSettings'][key] = value
def AppendBuildSetting(self, key, value):
if not key in self._properties['buildSettings']:
self._properties['buildSettings'][key] = []
self._properties['buildSettings'][key].append(value)
def DelBuildSetting(self, key):
if key in self._properties['buildSettings']:
del self._properties['buildSettings'][key]
def SetBaseConfiguration(self, value):
self._properties['baseConfigurationReference'] = value
class XCConfigurationList(XCObject):
# _configs is the default list of configurations.
_configs = [ XCBuildConfiguration({'name': 'Debug'}),
XCBuildConfiguration({'name': 'Release'}) ]
_schema = XCObject._schema.copy()
_schema.update({
'buildConfigurations': [1, XCBuildConfiguration, 1, 1, _configs],
'defaultConfigurationIsVisible': [0, int, 0, 1, 1],
'defaultConfigurationName': [0, str, 0, 1, 'Release'],
})
def Name(self):
return 'Build configuration list for ' + \
self.parent.__class__.__name__ + ' "' + self.parent.Name() + '"'
def ConfigurationNamed(self, name):
"""Convenience accessor to obtain an XCBuildConfiguration by name."""
for configuration in self._properties['buildConfigurations']:
if configuration._properties['name'] == name:
return configuration
raise KeyError, name
def DefaultConfiguration(self):
"""Convenience accessor to obtain the default XCBuildConfiguration."""
return self.ConfigurationNamed(self._properties['defaultConfigurationName'])
def HasBuildSetting(self, key):
"""Determines the state of a build setting in all XCBuildConfiguration
child objects.
If all child objects have key in their build settings, and the value is the
same in all child objects, returns 1.
If no child objects have the key in their build settings, returns 0.
If some, but not all, child objects have the key in their build settings,
or if any children have different values for the key, returns -1.
"""
has = None
value = None
for configuration in self._properties['buildConfigurations']:
configuration_has = configuration.HasBuildSetting(key)
if has is None:
has = configuration_has
elif has != configuration_has:
return -1
if configuration_has:
configuration_value = configuration.GetBuildSetting(key)
if value is None:
value = configuration_value
elif value != configuration_value:
return -1
if not has:
return 0
return 1
def GetBuildSetting(self, key):
"""Gets the build setting for key.
All child XCConfiguration objects must have the same value set for the
setting, or a ValueError will be raised.
"""
# TODO(mark): This is wrong for build settings that are lists. The list
# contents should be compared (and a list copy returned?)
value = None
for configuration in self._properties['buildConfigurations']:
configuration_value = configuration.GetBuildSetting(key)
if value is None:
value = configuration_value
else:
if value != configuration_value:
raise ValueError, 'Variant values for ' + key
return value
def SetBuildSetting(self, key, value):
"""Sets the build setting for key to value in all child
XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.SetBuildSetting(key, value)
def AppendBuildSetting(self, key, value):
"""Appends value to the build setting for key, which is treated as a list,
in all child XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.AppendBuildSetting(key, value)
def DelBuildSetting(self, key):
"""Deletes the build setting key from all child XCBuildConfiguration
objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.DelBuildSetting(key)
def SetBaseConfiguration(self, value):
"""Sets the build configuration in all child XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.SetBaseConfiguration(value)
class PBXBuildFile(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'fileRef': [0, XCFileLikeElement, 0, 1],
'settings': [0, str, 0, 0], # hack, it's a dict
})
# Weird output rules for PBXBuildFile.
_should_print_single_line = True
_encode_transforms = XCObject._alternate_encode_transforms
def Name(self):
# Example: "main.cc in Sources"
return self._properties['fileRef'].Name() + ' in ' + self.parent.Name()
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# It is not sufficient to just rely on Name() to get the
# XCFileLikeElement's name, because that is not a complete pathname.
# PathHashables returns hashables unique enough that no two
# PBXBuildFiles should wind up with the same set of hashables, unless
# someone adds the same file multiple times to the same target. That
# would be considered invalid anyway.
hashables.extend(self._properties['fileRef'].PathHashables())
return hashables
class XCBuildPhase(XCObject):
"""Abstract base for build phase classes. Not represented in a project
file.
Attributes:
_files_by_path: A dict mapping each path of a child in the files list by
path (keys) to the corresponding PBXBuildFile children (values).
_files_by_xcfilelikeelement: A dict mapping each XCFileLikeElement (keys)
to the corresponding PBXBuildFile children (values).
"""
# TODO(mark): Some build phase types, like PBXShellScriptBuildPhase, don't
# actually have a "files" list. XCBuildPhase should not have "files" but
# another abstract subclass of it should provide this, and concrete build
# phase types that do have "files" lists should be derived from that new
# abstract subclass. XCBuildPhase should only provide buildActionMask and
# runOnlyForDeploymentPostprocessing, and not files or the various
# file-related methods and attributes.
_schema = XCObject._schema.copy()
_schema.update({
'buildActionMask': [0, int, 0, 1, 0x7fffffff],
'files': [1, PBXBuildFile, 1, 1, []],
'runOnlyForDeploymentPostprocessing': [0, int, 0, 1, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCObject.__init__(self, properties, id, parent)
self._files_by_path = {}
self._files_by_xcfilelikeelement = {}
for pbxbuildfile in self._properties.get('files', []):
self._AddBuildFileToDicts(pbxbuildfile)
def FileGroup(self, path):
# Subclasses must override this by returning a two-element tuple. The
# first item in the tuple should be the PBXGroup to which "path" should be
# added, either as a child or deeper descendant. The second item should
# be a boolean indicating whether files should be added into hierarchical
# groups or one single flat group.
raise NotImplementedError, \
self.__class__.__name__ + ' must implement FileGroup'
def _AddPathToDict(self, pbxbuildfile, path):
"""Adds path to the dict tracking paths belonging to this build phase.
If the path is already a member of this build phase, raises an exception.
"""
if path in self._files_by_path:
raise ValueError, 'Found multiple build files with path ' + path
self._files_by_path[path] = pbxbuildfile
def _AddBuildFileToDicts(self, pbxbuildfile, path=None):
"""Maintains the _files_by_path and _files_by_xcfilelikeelement dicts.
If path is specified, then it is the path that is being added to the
phase, and pbxbuildfile must contain either a PBXFileReference directly
referencing that path, or it must contain a PBXVariantGroup that itself
contains a PBXFileReference referencing the path.
If path is not specified, either the PBXFileReference's path or the paths
of all children of the PBXVariantGroup are taken as being added to the
phase.
If the path is already present in the phase, raises an exception.
If the PBXFileReference or PBXVariantGroup referenced by pbxbuildfile
are already present in the phase, referenced by a different PBXBuildFile
object, raises an exception. This does not raise an exception when
a PBXFileReference or PBXVariantGroup reappear and are referenced by the
same PBXBuildFile that has already introduced them, because in the case
of PBXVariantGroup objects, they may correspond to multiple paths that are
not all added simultaneously. When this situation occurs, the path needs
to be added to _files_by_path, but nothing needs to change in
_files_by_xcfilelikeelement, and the caller should have avoided adding
the PBXBuildFile if it is already present in the list of children.
"""
xcfilelikeelement = pbxbuildfile._properties['fileRef']
paths = []
if path != None:
# It's best when the caller provides the path.
if isinstance(xcfilelikeelement, PBXVariantGroup):
paths.append(path)
else:
# If the caller didn't provide a path, there can be either multiple
# paths (PBXVariantGroup) or one.
if isinstance(xcfilelikeelement, PBXVariantGroup):
for variant in xcfilelikeelement._properties['children']:
paths.append(variant.FullPath())
else:
paths.append(xcfilelikeelement.FullPath())
# Add the paths first, because if something's going to raise, the
# messages provided by _AddPathToDict are more useful owing to its
# having access to a real pathname and not just an object's Name().
for a_path in paths:
self._AddPathToDict(pbxbuildfile, a_path)
# If another PBXBuildFile references this XCFileLikeElement, there's a
# problem.
if xcfilelikeelement in self._files_by_xcfilelikeelement and \
self._files_by_xcfilelikeelement[xcfilelikeelement] != pbxbuildfile:
raise ValueError, 'Found multiple build files for ' + \
xcfilelikeelement.Name()
self._files_by_xcfilelikeelement[xcfilelikeelement] = pbxbuildfile
def AppendBuildFile(self, pbxbuildfile, path=None):
# Callers should use this instead of calling
# AppendProperty('files', pbxbuildfile) directly because this function
# maintains the object's dicts. Better yet, callers can just call AddFile
# with a pathname and not worry about building their own PBXBuildFile
# objects.
self.AppendProperty('files', pbxbuildfile)
self._AddBuildFileToDicts(pbxbuildfile, path)
def AddFile(self, path, settings=None):
(file_group, hierarchical) = self.FileGroup(path)
file_ref = file_group.AddOrGetFileByPath(path, hierarchical)
if file_ref in self._files_by_xcfilelikeelement and \
isinstance(file_ref, PBXVariantGroup):
# There's already a PBXBuildFile in this phase corresponding to the
# PBXVariantGroup. path just provides a new variant that belongs to
# the group. Add the path to the dict.
pbxbuildfile = self._files_by_xcfilelikeelement[file_ref]
self._AddBuildFileToDicts(pbxbuildfile, path)
else:
# Add a new PBXBuildFile to get file_ref into the phase.
if settings is None:
pbxbuildfile = PBXBuildFile({'fileRef': file_ref})
else:
pbxbuildfile = PBXBuildFile({'fileRef': file_ref, 'settings': settings})
self.AppendBuildFile(pbxbuildfile, path)
class PBXHeadersBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Headers'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXResourcesBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Resources'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXSourcesBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Sources'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXFrameworksBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Frameworks'
def FileGroup(self, path):
(root, ext) = posixpath.splitext(path)
if ext != '':
ext = ext[1:].lower()
if ext == 'o':
# .o files are added to Xcode Frameworks phases, but conceptually aren't
# frameworks, they're more like sources or intermediates. Redirect them
# to show up in one of those other groups.
return self.PBXProjectAncestor().RootGroupForPath(path)
else:
return (self.PBXProjectAncestor().FrameworksGroup(), False)
class PBXShellScriptBuildPhase(XCBuildPhase):
_schema = XCBuildPhase._schema.copy()
_schema.update({
'inputPaths': [1, str, 0, 1, []],
'name': [0, str, 0, 0],
'outputPaths': [1, str, 0, 1, []],
'shellPath': [0, str, 0, 1, '/bin/sh'],
'shellScript': [0, str, 0, 1],
'showEnvVarsInLog': [0, int, 0, 0],
})
def Name(self):
if 'name' in self._properties:
return self._properties['name']
return 'ShellScript'
class PBXCopyFilesBuildPhase(XCBuildPhase):
_schema = XCBuildPhase._schema.copy()
_schema.update({
'dstPath': [0, str, 0, 1],
'dstSubfolderSpec': [0, int, 0, 1],
'name': [0, str, 0, 0],
})
# path_tree_re matches "$(DIR)/path" or just "$(DIR)". Match group 1 is
# "DIR", match group 3 is "path" or None.
path_tree_re = re.compile('^\\$\\((.*)\\)(/(.*)|)$')
# path_tree_to_subfolder maps names of Xcode variables to the associated
# dstSubfolderSpec property value used in a PBXCopyFilesBuildPhase object.
path_tree_to_subfolder = {
'BUILT_PRODUCTS_DIR': 16, # Products Directory
# Other types that can be chosen via the Xcode UI.
# TODO(mark): Map Xcode variable names to these.
# : 1, # Wrapper
# : 6, # Executables: 6
# : 7, # Resources
# : 15, # Java Resources
# : 10, # Frameworks
# : 11, # Shared Frameworks
# : 12, # Shared Support
# : 13, # PlugIns
}
def Name(self):
if 'name' in self._properties:
return self._properties['name']
return 'CopyFiles'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
def SetDestination(self, path):
"""Set the dstSubfolderSpec and dstPath properties from path.
path may be specified in the same notation used for XCHierarchicalElements,
specifically, "$(DIR)/path".
"""
path_tree_match = self.path_tree_re.search(path)
if path_tree_match:
# Everything else needs to be relative to an Xcode variable.
path_tree = path_tree_match.group(1)
relative_path = path_tree_match.group(3)
if path_tree in self.path_tree_to_subfolder:
subfolder = self.path_tree_to_subfolder[path_tree]
if relative_path is None:
relative_path = ''
else:
# The path starts with an unrecognized Xcode variable
# name like $(SRCROOT). Xcode will still handle this
# as an "absolute path" that starts with the variable.
subfolder = 0
relative_path = path
elif path.startswith('/'):
# Special case. Absolute paths are in dstSubfolderSpec 0.
subfolder = 0
relative_path = path[1:]
else:
raise ValueError, 'Can\'t use path %s in a %s' % \
(path, self.__class__.__name__)
self._properties['dstPath'] = relative_path
self._properties['dstSubfolderSpec'] = subfolder
class PBXBuildRule(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'compilerSpec': [0, str, 0, 1],
'filePatterns': [0, str, 0, 0],
'fileType': [0, str, 0, 1],
'isEditable': [0, int, 0, 1, 1],
'outputFiles': [1, str, 0, 1, []],
'script': [0, str, 0, 0],
})
def Name(self):
# Not very inspired, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.append(self._properties['fileType'])
if 'filePatterns' in self._properties:
hashables.append(self._properties['filePatterns'])
return hashables
class PBXContainerItemProxy(XCObject):
# When referencing an item in this project file, containerPortal is the
# PBXProject root object of this project file. When referencing an item in
# another project file, containerPortal is a PBXFileReference identifying
# the other project file.
#
# When serving as a proxy to an XCTarget (in this project file or another),
# proxyType is 1. When serving as a proxy to a PBXFileReference (in another
# project file), proxyType is 2. Type 2 is used for references to the
# producs of the other project file's targets.
#
# Xcode is weird about remoteGlobalIDString. Usually, it's printed without
# a comment, indicating that it's tracked internally simply as a string, but
# sometimes it's printed with a comment (usually when the object is initially
# created), indicating that it's tracked as a project file object at least
# sometimes. This module always tracks it as an object, but contains a hack
# to prevent it from printing the comment in the project file output. See
# _XCKVPrint.
_schema = XCObject._schema.copy()
_schema.update({
'containerPortal': [0, XCContainerPortal, 0, 1],
'proxyType': [0, int, 0, 1],
'remoteGlobalIDString': [0, XCRemoteObject, 0, 1],
'remoteInfo': [0, str, 0, 1],
})
def __repr__(self):
props = self._properties
name = '%s.gyp:%s' % (props['containerPortal'].Name(), props['remoteInfo'])
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Name(self):
# Admittedly not the best name, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.extend(self._properties['containerPortal'].Hashables())
hashables.extend(self._properties['remoteGlobalIDString'].Hashables())
return hashables
class PBXTargetDependency(XCObject):
# The "target" property accepts an XCTarget object, and obviously not
# NoneType. But XCTarget is defined below, so it can't be put into the
# schema yet. The definition of PBXTargetDependency can't be moved below
# XCTarget because XCTarget's own schema references PBXTargetDependency.
# Python doesn't deal well with this circular relationship, and doesn't have
# a real way to do forward declarations. To work around, the type of
# the "target" property is reset below, after XCTarget is defined.
#
# At least one of "name" and "target" is required.
_schema = XCObject._schema.copy()
_schema.update({
'name': [0, str, 0, 0],
'target': [0, None.__class__, 0, 0],
'targetProxy': [0, PBXContainerItemProxy, 1, 1],
})
def __repr__(self):
name = self._properties.get('name') or self._properties['target'].Name()
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Name(self):
# Admittedly not the best name, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.extend(self._properties['targetProxy'].Hashables())
return hashables
class PBXReferenceProxy(XCFileLikeElement):
_schema = XCFileLikeElement._schema.copy()
_schema.update({
'fileType': [0, str, 0, 1],
'path': [0, str, 0, 1],
'remoteRef': [0, PBXContainerItemProxy, 1, 1],
})
class XCTarget(XCRemoteObject):
# An XCTarget is really just an XCObject, the XCRemoteObject thing is just
# to allow PBXProject to be used in the remoteGlobalIDString property of
# PBXContainerItemProxy.
#
# Setting a "name" property at instantiation may also affect "productName",
# which may in turn affect the "PRODUCT_NAME" build setting in children of
# "buildConfigurationList". See __init__ below.
_schema = XCRemoteObject._schema.copy()
_schema.update({
'buildConfigurationList': [0, XCConfigurationList, 1, 1,
XCConfigurationList()],
'buildPhases': [1, XCBuildPhase, 1, 1, []],
'dependencies': [1, PBXTargetDependency, 1, 1, []],
'name': [0, str, 0, 1],
'productName': [0, str, 0, 1],
})
def __init__(self, properties=None, id=None, parent=None,
force_outdir=None, force_prefix=None, force_extension=None):
# super
XCRemoteObject.__init__(self, properties, id, parent)
# Set up additional defaults not expressed in the schema. If a "name"
# property was supplied, set "productName" if it is not present. Also set
# the "PRODUCT_NAME" build setting in each configuration, but only if
# the setting is not present in any build configuration.
if 'name' in self._properties:
if not 'productName' in self._properties:
self.SetProperty('productName', self._properties['name'])
if 'productName' in self._properties:
if 'buildConfigurationList' in self._properties:
configs = self._properties['buildConfigurationList']
if configs.HasBuildSetting('PRODUCT_NAME') == 0:
configs.SetBuildSetting('PRODUCT_NAME',
self._properties['productName'])
def AddDependency(self, other):
pbxproject = self.PBXProjectAncestor()
other_pbxproject = other.PBXProjectAncestor()
if pbxproject == other_pbxproject:
# Add a dependency to another target in the same project file.
container = PBXContainerItemProxy({'containerPortal': pbxproject,
'proxyType': 1,
'remoteGlobalIDString': other,
'remoteInfo': other.Name()})
dependency = PBXTargetDependency({'target': other,
'targetProxy': container})
self.AppendProperty('dependencies', dependency)
else:
# Add a dependency to a target in a different project file.
other_project_ref = \
pbxproject.AddOrGetProjectReference(other_pbxproject)[1]
container = PBXContainerItemProxy({
'containerPortal': other_project_ref,
'proxyType': 1,
'remoteGlobalIDString': other,
'remoteInfo': other.Name(),
})
dependency = PBXTargetDependency({'name': other.Name(),
'targetProxy': container})
self.AppendProperty('dependencies', dependency)
# Proxy all of these through to the build configuration list.
def ConfigurationNamed(self, name):
return self._properties['buildConfigurationList'].ConfigurationNamed(name)
def DefaultConfiguration(self):
return self._properties['buildConfigurationList'].DefaultConfiguration()
def HasBuildSetting(self, key):
return self._properties['buildConfigurationList'].HasBuildSetting(key)
def GetBuildSetting(self, key):
return self._properties['buildConfigurationList'].GetBuildSetting(key)
def SetBuildSetting(self, key, value):
return self._properties['buildConfigurationList'].SetBuildSetting(key, \
value)
def AppendBuildSetting(self, key, value):
return self._properties['buildConfigurationList'].AppendBuildSetting(key, \
value)
def DelBuildSetting(self, key):
return self._properties['buildConfigurationList'].DelBuildSetting(key)
# Redefine the type of the "target" property. See PBXTargetDependency._schema
# above.
PBXTargetDependency._schema['target'][1] = XCTarget
class PBXNativeTarget(XCTarget):
# buildPhases is overridden in the schema to be able to set defaults.
#
# NOTE: Contrary to most objects, it is advisable to set parent when
# constructing PBXNativeTarget. A parent of an XCTarget must be a PBXProject
# object. A parent reference is required for a PBXNativeTarget during
# construction to be able to set up the target defaults for productReference,
# because a PBXBuildFile object must be created for the target and it must
# be added to the PBXProject's mainGroup hierarchy.
_schema = XCTarget._schema.copy()
_schema.update({
'buildPhases': [1, XCBuildPhase, 1, 1,
[PBXSourcesBuildPhase(), PBXFrameworksBuildPhase()]],
'buildRules': [1, PBXBuildRule, 1, 1, []],
'productReference': [0, PBXFileReference, 0, 1],
'productType': [0, str, 0, 1],
})
# Mapping from Xcode product-types to settings. The settings are:
# filetype : used for explicitFileType in the project file
# prefix : the prefix for the file name
# suffix : the suffix for the filen ame
_product_filetypes = {
'com.apple.product-type.application': ['wrapper.application',
'', '.app'],
'com.apple.product-type.bundle': ['wrapper.cfbundle',
'', '.bundle'],
'com.apple.product-type.framework': ['wrapper.framework',
'', '.framework'],
'com.apple.product-type.library.dynamic': ['compiled.mach-o.dylib',
'lib', '.dylib'],
'com.apple.product-type.library.static': ['archive.ar',
'lib', '.a'],
'com.apple.product-type.tool': ['compiled.mach-o.executable',
'', ''],
'com.apple.product-type.bundle.unit-test': ['wrapper.cfbundle',
'', '.xctest'],
'com.googlecode.gyp.xcode.bundle': ['compiled.mach-o.dylib',
'', '.so'],
}
def __init__(self, properties=None, id=None, parent=None,
force_outdir=None, force_prefix=None, force_extension=None):
# super
XCTarget.__init__(self, properties, id, parent)
if 'productName' in self._properties and \
'productType' in self._properties and \
not 'productReference' in self._properties and \
self._properties['productType'] in self._product_filetypes:
products_group = None
pbxproject = self.PBXProjectAncestor()
if pbxproject != None:
products_group = pbxproject.ProductsGroup()
if products_group != None:
(filetype, prefix, suffix) = \
self._product_filetypes[self._properties['productType']]
# Xcode does not have a distinct type for loadable modules that are
# pure BSD targets (not in a bundle wrapper). GYP allows such modules
# to be specified by setting a target type to loadable_module without
# having mac_bundle set. These are mapped to the pseudo-product type
# com.googlecode.gyp.xcode.bundle.
#
# By picking up this special type and converting it to a dynamic
# library (com.apple.product-type.library.dynamic) with fix-ups,
# single-file loadable modules can be produced.
#
# MACH_O_TYPE is changed to mh_bundle to produce the proper file type
# (as opposed to mh_dylib). In order for linking to succeed,
# DYLIB_CURRENT_VERSION and DYLIB_COMPATIBILITY_VERSION must be
# cleared. They are meaningless for type mh_bundle.
#
# Finally, the .so extension is forcibly applied over the default
# (.dylib), unless another forced extension is already selected.
# .dylib is plainly wrong, and .bundle is used by loadable_modules in
# bundle wrappers (com.apple.product-type.bundle). .so seems an odd
# choice because it's used as the extension on many other systems that
# don't distinguish between linkable shared libraries and non-linkable
# loadable modules, but there's precedent: Python loadable modules on
# Mac OS X use an .so extension.
if self._properties['productType'] == 'com.googlecode.gyp.xcode.bundle':
self._properties['productType'] = \
'com.apple.product-type.library.dynamic'
self.SetBuildSetting('MACH_O_TYPE', 'mh_bundle')
self.SetBuildSetting('DYLIB_CURRENT_VERSION', '')
self.SetBuildSetting('DYLIB_COMPATIBILITY_VERSION', '')
if force_extension is None:
force_extension = suffix[1:]
if self._properties['productType'] == \
'com.apple.product-type-bundle.unit.test':
if force_extension is None:
force_extension = suffix[1:]
if force_extension is not None:
# If it's a wrapper (bundle), set WRAPPER_EXTENSION.
if filetype.startswith('wrapper.'):
self.SetBuildSetting('WRAPPER_EXTENSION', force_extension)
else:
# Extension override.
suffix = '.' + force_extension
self.SetBuildSetting('EXECUTABLE_EXTENSION', force_extension)
if filetype.startswith('compiled.mach-o.executable'):
product_name = self._properties['productName']
product_name += suffix
suffix = ''
self.SetProperty('productName', product_name)
self.SetBuildSetting('PRODUCT_NAME', product_name)
# Xcode handles most prefixes based on the target type, however there
# are exceptions. If a "BSD Dynamic Library" target is added in the
# Xcode UI, Xcode sets EXECUTABLE_PREFIX. This check duplicates that
# behavior.
if force_prefix is not None:
prefix = force_prefix
if filetype.startswith('wrapper.'):
self.SetBuildSetting('WRAPPER_PREFIX', prefix)
else:
self.SetBuildSetting('EXECUTABLE_PREFIX', prefix)
if force_outdir is not None:
self.SetBuildSetting('TARGET_BUILD_DIR', force_outdir)
# TODO(tvl): Remove the below hack.
# http://code.google.com/p/gyp/issues/detail?id=122
# Some targets include the prefix in the target_name. These targets
# really should just add a product_name setting that doesn't include
# the prefix. For example:
# target_name = 'libevent', product_name = 'event'
# This check cleans up for them.
product_name = self._properties['productName']
prefix_len = len(prefix)
if prefix_len and (product_name[:prefix_len] == prefix):
product_name = product_name[prefix_len:]
self.SetProperty('productName', product_name)
self.SetBuildSetting('PRODUCT_NAME', product_name)
ref_props = {
'explicitFileType': filetype,
'includeInIndex': 0,
'path': prefix + product_name + suffix,
'sourceTree': 'BUILT_PRODUCTS_DIR',
}
file_ref = PBXFileReference(ref_props)
products_group.AppendChild(file_ref)
self.SetProperty('productReference', file_ref)
def GetBuildPhaseByType(self, type):
if not 'buildPhases' in self._properties:
return None
the_phase = None
for phase in self._properties['buildPhases']:
if isinstance(phase, type):
# Some phases may be present in multiples in a well-formed project file,
# but phases like PBXSourcesBuildPhase may only be present singly, and
# this function is intended as an aid to GetBuildPhaseByType. Loop
# over the entire list of phases and assert if more than one of the
# desired type is found.
assert the_phase is None
the_phase = phase
return the_phase
def HeadersPhase(self):
headers_phase = self.GetBuildPhaseByType(PBXHeadersBuildPhase)
if headers_phase is None:
headers_phase = PBXHeadersBuildPhase()
# The headers phase should come before the resources, sources, and
# frameworks phases, if any.
insert_at = len(self._properties['buildPhases'])
for index in xrange(0, len(self._properties['buildPhases'])):
phase = self._properties['buildPhases'][index]
if isinstance(phase, PBXResourcesBuildPhase) or \
isinstance(phase, PBXSourcesBuildPhase) or \
isinstance(phase, PBXFrameworksBuildPhase):
insert_at = index
break
self._properties['buildPhases'].insert(insert_at, headers_phase)
headers_phase.parent = self
return headers_phase
def ResourcesPhase(self):
resources_phase = self.GetBuildPhaseByType(PBXResourcesBuildPhase)
if resources_phase is None:
resources_phase = PBXResourcesBuildPhase()
# The resources phase should come before the sources and frameworks
# phases, if any.
insert_at = len(self._properties['buildPhases'])
for index in xrange(0, len(self._properties['buildPhases'])):
phase = self._properties['buildPhases'][index]
if isinstance(phase, PBXSourcesBuildPhase) or \
isinstance(phase, PBXFrameworksBuildPhase):
insert_at = index
break
self._properties['buildPhases'].insert(insert_at, resources_phase)
resources_phase.parent = self
return resources_phase
def SourcesPhase(self):
sources_phase = self.GetBuildPhaseByType(PBXSourcesBuildPhase)
if sources_phase is None:
sources_phase = PBXSourcesBuildPhase()
self.AppendProperty('buildPhases', sources_phase)
return sources_phase
def FrameworksPhase(self):
frameworks_phase = self.GetBuildPhaseByType(PBXFrameworksBuildPhase)
if frameworks_phase is None:
frameworks_phase = PBXFrameworksBuildPhase()
self.AppendProperty('buildPhases', frameworks_phase)
return frameworks_phase
def AddDependency(self, other):
# super
XCTarget.AddDependency(self, other)
static_library_type = 'com.apple.product-type.library.static'
shared_library_type = 'com.apple.product-type.library.dynamic'
framework_type = 'com.apple.product-type.framework'
if isinstance(other, PBXNativeTarget) and \
'productType' in self._properties and \
self._properties['productType'] != static_library_type and \
'productType' in other._properties and \
(other._properties['productType'] == static_library_type or \
((other._properties['productType'] == shared_library_type or \
other._properties['productType'] == framework_type) and \
((not other.HasBuildSetting('MACH_O_TYPE')) or
other.GetBuildSetting('MACH_O_TYPE') != 'mh_bundle'))):
file_ref = other.GetProperty('productReference')
pbxproject = self.PBXProjectAncestor()
other_pbxproject = other.PBXProjectAncestor()
if pbxproject != other_pbxproject:
other_project_product_group = \
pbxproject.AddOrGetProjectReference(other_pbxproject)[0]
file_ref = other_project_product_group.GetChildByRemoteObject(file_ref)
self.FrameworksPhase().AppendProperty('files',
PBXBuildFile({'fileRef': file_ref}))
class PBXAggregateTarget(XCTarget):
pass
class PBXProject(XCContainerPortal):
# A PBXProject is really just an XCObject, the XCContainerPortal thing is
# just to allow PBXProject to be used in the containerPortal property of
# PBXContainerItemProxy.
"""
Attributes:
path: "sample.xcodeproj". TODO(mark) Document me!
_other_pbxprojects: A dictionary, keyed by other PBXProject objects. Each
value is a reference to the dict in the
projectReferences list associated with the keyed
PBXProject.
"""
_schema = XCContainerPortal._schema.copy()
_schema.update({
'attributes': [0, dict, 0, 0],
'buildConfigurationList': [0, XCConfigurationList, 1, 1,
XCConfigurationList()],
'compatibilityVersion': [0, str, 0, 1, 'Xcode 3.2'],
'hasScannedForEncodings': [0, int, 0, 1, 1],
'mainGroup': [0, PBXGroup, 1, 1, PBXGroup()],
'projectDirPath': [0, str, 0, 1, ''],
'projectReferences': [1, dict, 0, 0],
'projectRoot': [0, str, 0, 1, ''],
'targets': [1, XCTarget, 1, 1, []],
})
def __init__(self, properties=None, id=None, parent=None, path=None):
self.path = path
self._other_pbxprojects = {}
# super
return XCContainerPortal.__init__(self, properties, id, parent)
def Name(self):
name = self.path
if name[-10:] == '.xcodeproj':
name = name[:-10]
return posixpath.basename(name)
def Path(self):
return self.path
def Comment(self):
return 'Project object'
def Children(self):
# super
children = XCContainerPortal.Children(self)
# Add children that the schema doesn't know about. Maybe there's a more
# elegant way around this, but this is the only case where we need to own
# objects in a dictionary (that is itself in a list), and three lines for
# a one-off isn't that big a deal.
if 'projectReferences' in self._properties:
for reference in self._properties['projectReferences']:
children.append(reference['ProductGroup'])
return children
def PBXProjectAncestor(self):
return self
def _GroupByName(self, name):
if not 'mainGroup' in self._properties:
self.SetProperty('mainGroup', PBXGroup())
main_group = self._properties['mainGroup']
group = main_group.GetChildByName(name)
if group is None:
group = PBXGroup({'name': name})
main_group.AppendChild(group)
return group
# SourceGroup and ProductsGroup are created by default in Xcode's own
# templates.
def SourceGroup(self):
return self._GroupByName('Source')
def ProductsGroup(self):
return self._GroupByName('Products')
# IntermediatesGroup is used to collect source-like files that are generated
# by rules or script phases and are placed in intermediate directories such
# as DerivedSources.
def IntermediatesGroup(self):
return self._GroupByName('Intermediates')
# FrameworksGroup and ProjectsGroup are top-level groups used to collect
# frameworks and projects.
def FrameworksGroup(self):
return self._GroupByName('Frameworks')
def ProjectsGroup(self):
return self._GroupByName('Projects')
def RootGroupForPath(self, path):
"""Returns a PBXGroup child of this object to which path should be added.
This method is intended to choose between SourceGroup and
IntermediatesGroup on the basis of whether path is present in a source
directory or an intermediates directory. For the purposes of this
determination, any path located within a derived file directory such as
PROJECT_DERIVED_FILE_DIR is treated as being in an intermediates
directory.
The returned value is a two-element tuple. The first element is the
PBXGroup, and the second element specifies whether that group should be
organized hierarchically (True) or as a single flat list (False).
"""
# TODO(mark): make this a class variable and bind to self on call?
# Also, this list is nowhere near exhaustive.
# INTERMEDIATE_DIR and SHARED_INTERMEDIATE_DIR are used by
# gyp.generator.xcode. There should probably be some way for that module
# to push the names in, rather than having to hard-code them here.
source_tree_groups = {
'DERIVED_FILE_DIR': (self.IntermediatesGroup, True),
'INTERMEDIATE_DIR': (self.IntermediatesGroup, True),
'PROJECT_DERIVED_FILE_DIR': (self.IntermediatesGroup, True),
'SHARED_INTERMEDIATE_DIR': (self.IntermediatesGroup, True),
}
(source_tree, path) = SourceTreeAndPathFromPath(path)
if source_tree != None and source_tree in source_tree_groups:
(group_func, hierarchical) = source_tree_groups[source_tree]
group = group_func()
return (group, hierarchical)
# TODO(mark): make additional choices based on file extension.
return (self.SourceGroup(), True)
def AddOrGetFileInRootGroup(self, path):
"""Returns a PBXFileReference corresponding to path in the correct group
according to RootGroupForPath's heuristics.
If an existing PBXFileReference for path exists, it will be returned.
Otherwise, one will be created and returned.
"""
(group, hierarchical) = self.RootGroupForPath(path)
return group.AddOrGetFileByPath(path, hierarchical)
def RootGroupsTakeOverOnlyChildren(self, recurse=False):
"""Calls TakeOverOnlyChild for all groups in the main group."""
for group in self._properties['mainGroup']._properties['children']:
if isinstance(group, PBXGroup):
group.TakeOverOnlyChild(recurse)
def SortGroups(self):
# Sort the children of the mainGroup (like "Source" and "Products")
# according to their defined order.
self._properties['mainGroup']._properties['children'] = \
sorted(self._properties['mainGroup']._properties['children'],
cmp=lambda x,y: x.CompareRootGroup(y))
# Sort everything else by putting group before files, and going
# alphabetically by name within sections of groups and files. SortGroup
# is recursive.
for group in self._properties['mainGroup']._properties['children']:
if not isinstance(group, PBXGroup):
continue
if group.Name() == 'Products':
# The Products group is a special case. Instead of sorting
# alphabetically, sort things in the order of the targets that
# produce the products. To do this, just build up a new list of
# products based on the targets.
products = []
for target in self._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
product = target._properties['productReference']
# Make sure that the product is already in the products group.
assert product in group._properties['children']
products.append(product)
# Make sure that this process doesn't miss anything that was already
# in the products group.
assert len(products) == len(group._properties['children'])
group._properties['children'] = products
else:
group.SortGroup()
def AddOrGetProjectReference(self, other_pbxproject):
"""Add a reference to another project file (via PBXProject object) to this
one.
Returns [ProductGroup, ProjectRef]. ProductGroup is a PBXGroup object in
this project file that contains a PBXReferenceProxy object for each
product of each PBXNativeTarget in the other project file. ProjectRef is
a PBXFileReference to the other project file.
If this project file already references the other project file, the
existing ProductGroup and ProjectRef are returned. The ProductGroup will
still be updated if necessary.
"""
if not 'projectReferences' in self._properties:
self._properties['projectReferences'] = []
product_group = None
project_ref = None
if not other_pbxproject in self._other_pbxprojects:
# This project file isn't yet linked to the other one. Establish the
# link.
product_group = PBXGroup({'name': 'Products'})
# ProductGroup is strong.
product_group.parent = self
# There's nothing unique about this PBXGroup, and if left alone, it will
# wind up with the same set of hashables as all other PBXGroup objects
# owned by the projectReferences list. Add the hashables of the
# remote PBXProject that it's related to.
product_group._hashables.extend(other_pbxproject.Hashables())
# The other project reports its path as relative to the same directory
# that this project's path is relative to. The other project's path
# is not necessarily already relative to this project. Figure out the
# pathname that this project needs to use to refer to the other one.
this_path = posixpath.dirname(self.Path())
projectDirPath = self.GetProperty('projectDirPath')
if projectDirPath:
if posixpath.isabs(projectDirPath[0]):
this_path = projectDirPath
else:
this_path = posixpath.join(this_path, projectDirPath)
other_path = gyp.common.RelativePath(other_pbxproject.Path(), this_path)
# ProjectRef is weak (it's owned by the mainGroup hierarchy).
project_ref = PBXFileReference({
'lastKnownFileType': 'wrapper.pb-project',
'path': other_path,
'sourceTree': 'SOURCE_ROOT',
})
self.ProjectsGroup().AppendChild(project_ref)
ref_dict = {'ProductGroup': product_group, 'ProjectRef': project_ref}
self._other_pbxprojects[other_pbxproject] = ref_dict
self.AppendProperty('projectReferences', ref_dict)
# Xcode seems to sort this list case-insensitively
self._properties['projectReferences'] = \
sorted(self._properties['projectReferences'], cmp=lambda x,y:
cmp(x['ProjectRef'].Name().lower(),
y['ProjectRef'].Name().lower()))
else:
# The link already exists. Pull out the relevnt data.
project_ref_dict = self._other_pbxprojects[other_pbxproject]
product_group = project_ref_dict['ProductGroup']
project_ref = project_ref_dict['ProjectRef']
self._SetUpProductReferences(other_pbxproject, product_group, project_ref)
return [product_group, project_ref]
def _SetUpProductReferences(self, other_pbxproject, product_group,
project_ref):
# TODO(mark): This only adds references to products in other_pbxproject
# when they don't exist in this pbxproject. Perhaps it should also
# remove references from this pbxproject that are no longer present in
# other_pbxproject. Perhaps it should update various properties if they
# change.
for target in other_pbxproject._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
other_fileref = target._properties['productReference']
if product_group.GetChildByRemoteObject(other_fileref) is None:
# Xcode sets remoteInfo to the name of the target and not the name
# of its product, despite this proxy being a reference to the product.
container_item = PBXContainerItemProxy({
'containerPortal': project_ref,
'proxyType': 2,
'remoteGlobalIDString': other_fileref,
'remoteInfo': target.Name()
})
# TODO(mark): Does sourceTree get copied straight over from the other
# project? Can the other project ever have lastKnownFileType here
# instead of explicitFileType? (Use it if so?) Can path ever be
# unset? (I don't think so.) Can other_fileref have name set, and
# does it impact the PBXReferenceProxy if so? These are the questions
# that perhaps will be answered one day.
reference_proxy = PBXReferenceProxy({
'fileType': other_fileref._properties['explicitFileType'],
'path': other_fileref._properties['path'],
'sourceTree': other_fileref._properties['sourceTree'],
'remoteRef': container_item,
})
product_group.AppendChild(reference_proxy)
def SortRemoteProductReferences(self):
# For each remote project file, sort the associated ProductGroup in the
# same order that the targets are sorted in the remote project file. This
# is the sort order used by Xcode.
def CompareProducts(x, y, remote_products):
# x and y are PBXReferenceProxy objects. Go through their associated
# PBXContainerItem to get the remote PBXFileReference, which will be
# present in the remote_products list.
x_remote = x._properties['remoteRef']._properties['remoteGlobalIDString']
y_remote = y._properties['remoteRef']._properties['remoteGlobalIDString']
x_index = remote_products.index(x_remote)
y_index = remote_products.index(y_remote)
# Use the order of each remote PBXFileReference in remote_products to
# determine the sort order.
return cmp(x_index, y_index)
for other_pbxproject, ref_dict in self._other_pbxprojects.iteritems():
# Build up a list of products in the remote project file, ordered the
# same as the targets that produce them.
remote_products = []
for target in other_pbxproject._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
remote_products.append(target._properties['productReference'])
# Sort the PBXReferenceProxy children according to the list of remote
# products.
product_group = ref_dict['ProductGroup']
product_group._properties['children'] = sorted(
product_group._properties['children'],
cmp=lambda x, y: CompareProducts(x, y, remote_products))
class XCProjectFile(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'archiveVersion': [0, int, 0, 1, 1],
'classes': [0, dict, 0, 1, {}],
'objectVersion': [0, int, 0, 1, 45],
'rootObject': [0, PBXProject, 1, 1],
})
def SetXcodeVersion(self, version):
version_to_object_version = {
'2.4': 45,
'3.0': 45,
'3.1': 45,
'3.2': 46,
}
if not version in version_to_object_version:
supported_str = ', '.join(sorted(version_to_object_version.keys()))
raise Exception(
'Unsupported Xcode version %s (supported: %s)' %
( version, supported_str ) )
compatibility_version = 'Xcode %s' % version
self._properties['rootObject'].SetProperty('compatibilityVersion',
compatibility_version)
self.SetProperty('objectVersion', version_to_object_version[version]);
def ComputeIDs(self, recursive=True, overwrite=True, hash=None):
# Although XCProjectFile is implemented here as an XCObject, it's not a
# proper object in the Xcode sense, and it certainly doesn't have its own
# ID. Pass through an attempt to update IDs to the real root object.
if recursive:
self._properties['rootObject'].ComputeIDs(recursive, overwrite, hash)
def Print(self, file=sys.stdout):
self.VerifyHasRequiredProperties()
# Add the special "objects" property, which will be caught and handled
# separately during printing. This structure allows a fairly standard
# loop do the normal printing.
self._properties['objects'] = {}
self._XCPrint(file, 0, '// !$*UTF8*$!\n')
if self._should_print_single_line:
self._XCPrint(file, 0, '{ ')
else:
self._XCPrint(file, 0, '{\n')
for property, value in sorted(self._properties.iteritems(),
cmp=lambda x, y: cmp(x, y)):
if property == 'objects':
self._PrintObjects(file)
else:
self._XCKVPrint(file, 1, property, value)
self._XCPrint(file, 0, '}\n')
del self._properties['objects']
def _PrintObjects(self, file):
if self._should_print_single_line:
self._XCPrint(file, 0, 'objects = {')
else:
self._XCPrint(file, 1, 'objects = {\n')
objects_by_class = {}
for object in self.Descendants():
if object == self:
continue
class_name = object.__class__.__name__
if not class_name in objects_by_class:
objects_by_class[class_name] = []
objects_by_class[class_name].append(object)
for class_name in sorted(objects_by_class):
self._XCPrint(file, 0, '\n')
self._XCPrint(file, 0, '/* Begin ' + class_name + ' section */\n')
for object in sorted(objects_by_class[class_name],
cmp=lambda x, y: cmp(x.id, y.id)):
object.Print(file)
self._XCPrint(file, 0, '/* End ' + class_name + ' section */\n')
if self._should_print_single_line:
self._XCPrint(file, 0, '}; ')
else:
self._XCPrint(file, 1, '};\n')
|
GunoH/intellij-community
|
refs/heads/master
|
python/testData/inspections/PyUnresolvedReferencesInspection/StubWithGetAttr/complete.py
|
19
|
def a():
return 1
def b():
return ""
|
KiChjang/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/webdriver/tests/element_send_keys/conftest.py
|
42
|
import pytest
@pytest.fixture
def create_files(tmpdir_factory):
def inner(filenames):
filelist = []
tmpdir = tmpdir_factory.mktemp("tmp")
for filename in filenames:
fh = tmpdir.join(filename)
fh.write(filename)
filelist.append(fh)
return filelist
inner.__name__ = "create_files"
return inner
|
calthecoder/zealous-fibula
|
refs/heads/master
|
enemies.py
|
1
|
import random, sys
from time import sleep
enemylist = ['Goblin', 'Ogre', 'Giant Spider', 'Dragon', 'Orc']
class Enemy:
'''A base class for all enemies'''
def __init__(self, name, hp, description, pview):
'''Creates a new enemy
:param name: the name of the enemy
:param hp: the hit points of the enemy
:param description: the description the enemy does with each attack
'''
self.name = name
self.hp = hp
self.description = description
self.pview = pview
def act(self, p_obj):
i = random.randint(1,3)
ret = p_obj # to return
if i != 3:
print("\nIt sees you!\n")
return self.attack(ret) #needs to be here. if 'return' is omitted, it returns None.
else:
print("\nThe "+self.name+" does not see you.")
m = input('Attack or move on? (Q, M) ')
if m == 'q' or m == 'Q':
return self.attack(ret)
else:
return ret.hp #needs to be ret.hp, not just ret. See changelog, 0.2.4
def attack(self, p_obj):
ret = p_obj.hp
#BATTLE TIME!! for 10 secs
print("Battle starting...\n")
sleep(5)
rand = random.randint(1,p_obj.weapon.accuracy)
if self.hp>p_obj.weapon.dex*p_obj.weapon.damage and rand != p_obj.weapon.accuracy:
print("*****************The "+self.name+" attacks you!*****************\n*****************He wins!*****************\n\n")
ret = 0
sys.exit()
else:
print("*****************The "+self.name+" attacks you!*****************\n*****************He loses!*****************\n\n")
#include a function to turn the enemy into a bspace that says something dead is on the floor
ret -= (self.hp/3)
self.died()
return ret
def died(self):
self.pview = 'a room with a dead '+self.name+' lying on the ground in a pool of blood'
self.name = 'bspace'
self.hp = -1 #diff than bspace
self.description = 'An empty cave with nothing but a dead beast in it'
#don't forget to change enemylist when add new
class Goblin(Enemy):
def __init__(self, y,x):
super().__init__(name='Goblin',
hp=60,
description='A normal evil minion which does the general evil bidding of its master.',
pview='a hunched over figure with an evil smirk on its face.')
self.x = x
self.y = y
class Orc(Enemy):
def __init__(self, y,x):
super().__init__(name='Orc',
hp=70,
description='A slightly eviler version of a Goblin.',
pview='an ugly orc with a huge sword.')
self.x = x
self.y = y
class Ogre(Enemy):
def __init__(self,y,x):
super().__init__(name='Ogre',
hp=85,
description='A fairly stupid bloke, all it does is smash anything that moves.',
pview='a giant, stupid, upright animal.')
self.x = x
self.y = y
class GiantSpider(Enemy):
def __init__(self, y,x):
super().__init__(name='Giant Spider',
hp=125,
description='This extremely dangerous beast will split you in half and then suck out your organs.',
pview='a three meter tall spider. Venom drips from its fangs and splatters on the ground.')
self.x = x
self.y = y
class Dragon(Enemy):
def __init__(self,y,x):
super().__init__(name='Dragon',
hp=500,
description='You definetely do not want to cross paths with this dude!!',
pview='a huge, flying, firebreathing menace.')
self.x = x
self.y = y
#Blanks
class bspace(Enemy):
def __init__(self,y,x):
super().__init__(name='bspace',
hp=-1000,
description='An empty cave room with nothing in it.',
pview='an empty room with nothing in it at all.')
self.x = x
self.y = y
class bspace2(Enemy):
def __init__(self,y,x):
super().__init__(name='bspace',
hp=-1000,
description='An empty cave room with nothing in it.',
pview='a strange and musty smelling room with green, sticky mold on the walls.')
self.x = x
self.y = y
class bspace3(Enemy):
def __init__(self,y,x):
super().__init__(name='bspace',
hp=-1000,
description='An empty cave room with nothing in it.',
pview='a very peculiar looking room. Strange shadows dance on the walls and play tricks with your mind.')
self.x = x
self.y = y
class bspace4(Enemy):
def __init__(self,y,x):
super().__init__(name='bspace',
hp=-1000,
description='An empty cave room with nothing in it.',
pview="a dimmly lit room with a wet floor. Don't slip!")
self.x = x
self.y = y
class bspace5(Enemy): #for items that were picked up
def __init__(self,y,x):
super().__init__(name='bspace',
hp=-1000,
description='An empty cave room with nothing in it.',
pview="a room that has no meaning or value; just empty.")
self.x = x
self.y = y
class Road(Enemy):
def __init__(self,y,x):
super().__init__(name='bspace',
hp=-1000,
description='Part of a long road.',
pview="another segment of the road you have been travelling on.")
self.x = x
self.y = y
##Level gateways
class Level1(Enemy): #for items that were picked up
def __init__(self,y,x):
super().__init__(name='level',
hp=-1000,
description='Portal to Level 1.',
pview="the gateway to Level 1.")
self.x = x
self.y = y
self.locked = False
self.num = 1
class Level2(Enemy): #for items that were picked up
def __init__(self,y,x):
super().__init__(name='level',
hp=-1000,
description='Portal to Level 2.',
pview="the gateway to Level 2.")
self.x = x
self.y = y
self.locked = True
self.num = 2
class Level3(Enemy): #for items that were picked up
def __init__(self,y,x):
super().__init__(name='level',
hp=-1000,
description='Portal to Level 3.',
pview="the gateway to Level 3.")
self.x = x
self.y = y
self.locked = True
self.num = 3
class Level4(Enemy): #for items that were picked up
def __init__(self,y,x):
super().__init__(name='level',
hp=-1000,
description='Portal to Level 4.',
pview="the gateway to Level 4.")
self.x = x
self.y = y
self.locked = True
self.num = 4
class Level5(Enemy): #for items that were picked up
def __init__(self,y,x):
super().__init__(name='level',
hp=-1000,
description='Portal to Level 5.',
pview="the gateway to Level 5.")
self.x = x
self.y = y
self.locked = True
self.num = 5
|
skyddv/neutron
|
refs/heads/master
|
neutron/db/migration/alembic_migrations/versions/35a0f3365720_add_port_security_in_ml2.py
|
47
|
# Copyright 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""add port-security in ml2
Revision ID: 35a0f3365720
Revises: 341ee8a4ccb5
Create Date: 2014-09-30 09:41:14.146519
"""
# revision identifiers, used by Alembic.
revision = '35a0f3365720'
down_revision = '341ee8a4ccb5'
from alembic import op
def upgrade():
context = op.get_context()
if context.bind.dialect.name == 'ibm_db_sa':
# NOTE(junxie): DB2 stores booleans as 0 and 1.
op.execute('INSERT INTO networksecuritybindings (network_id, '
'port_security_enabled) SELECT id, 1 FROM networks '
'WHERE id NOT IN (SELECT network_id FROM '
'networksecuritybindings);')
op.execute('INSERT INTO portsecuritybindings (port_id, '
'port_security_enabled) SELECT id, 1 FROM ports '
'WHERE id NOT IN (SELECT port_id FROM '
'portsecuritybindings);')
else:
op.execute('INSERT INTO networksecuritybindings (network_id, '
'port_security_enabled) SELECT id, True FROM networks '
'WHERE id NOT IN (SELECT network_id FROM '
'networksecuritybindings);')
op.execute('INSERT INTO portsecuritybindings (port_id, '
'port_security_enabled) SELECT id, True FROM ports '
'WHERE id NOT IN (SELECT port_id FROM '
'portsecuritybindings);')
|
m-labs/linux-milkymist
|
refs/heads/ng
|
tools/perf/scripts/python/sctop.py
|
11180
|
# system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
|
JimCircadian/ansible
|
refs/heads/devel
|
test/units/plugins/strategy/test_strategy_linear.py
|
18
|
# Copyright (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.executor.play_iterator import PlayIterator
from ansible.playbook import Playbook
from ansible.playbook.play_context import PlayContext
from ansible.plugins.strategy.linear import StrategyModule
from ansible.executor.task_queue_manager import TaskQueueManager
from units.mock.loader import DictDataLoader
from units.mock.path import mock_unfrackpath_noop
class TestStrategyLinear(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
def test_noop(self):
fake_loader = DictDataLoader({
"test_play.yml": """
- hosts: all
gather_facts: no
tasks:
- block:
- block:
- name: task1
debug: msg='task1'
failed_when: inventory_hostname == 'host01'
- name: task2
debug: msg='task2'
rescue:
- name: rescue1
debug: msg='rescue1'
- name: rescue2
debug: msg='rescue2'
""",
})
mock_var_manager = MagicMock()
mock_var_manager._fact_cache = dict()
mock_var_manager.get_vars.return_value = dict()
p = Playbook.load('test_play.yml', loader=fake_loader, variable_manager=mock_var_manager)
hosts = []
for i in range(0, 2):
host = MagicMock()
host.name = host.get_name.return_value = 'host%02d' % i
hosts.append(host)
mock_var_manager._fact_cache['host00'] = dict()
inventory = MagicMock()
inventory.get_hosts.return_value = hosts
inventory.filter_hosts.return_value = hosts
play_context = PlayContext(play=p._entries[0])
itr = PlayIterator(
inventory=inventory,
play=p._entries[0],
play_context=play_context,
variable_manager=mock_var_manager,
all_vars=dict(),
)
mock_options = MagicMock()
mock_options.module_path = None
tqm = TaskQueueManager(
inventory=inventory,
variable_manager=mock_var_manager,
loader=fake_loader,
options=mock_options,
passwords=None,
)
tqm._initialize_processes(3)
strategy = StrategyModule(tqm)
# implicit meta: flush_handlers
hosts_left = strategy.get_hosts_left(itr)
hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
host1_task = hosts_tasks[0][1]
host2_task = hosts_tasks[1][1]
self.assertIsNotNone(host1_task)
self.assertIsNotNone(host2_task)
self.assertEqual(host1_task.action, 'meta')
self.assertEqual(host2_task.action, 'meta')
# debug: task1, debug: task1
hosts_left = strategy.get_hosts_left(itr)
hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
host1_task = hosts_tasks[0][1]
host2_task = hosts_tasks[1][1]
self.assertIsNotNone(host1_task)
self.assertIsNotNone(host2_task)
self.assertEqual(host1_task.action, 'debug')
self.assertEqual(host2_task.action, 'debug')
self.assertEqual(host1_task.name, 'task1')
self.assertEqual(host2_task.name, 'task1')
# mark the second host failed
itr.mark_host_failed(hosts[1])
# debug: task2, meta: noop
hosts_left = strategy.get_hosts_left(itr)
hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
host1_task = hosts_tasks[0][1]
host2_task = hosts_tasks[1][1]
self.assertIsNotNone(host1_task)
self.assertIsNotNone(host2_task)
self.assertEqual(host1_task.action, 'debug')
self.assertEqual(host2_task.action, 'meta')
self.assertEqual(host1_task.name, 'task2')
self.assertEqual(host2_task.name, '')
# meta: noop, debug: rescue1
hosts_left = strategy.get_hosts_left(itr)
hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
host1_task = hosts_tasks[0][1]
host2_task = hosts_tasks[1][1]
self.assertIsNotNone(host1_task)
self.assertIsNotNone(host2_task)
self.assertEqual(host1_task.action, 'meta')
self.assertEqual(host2_task.action, 'debug')
self.assertEqual(host1_task.name, '')
self.assertEqual(host2_task.name, 'rescue1')
# meta: noop, debug: rescue2
hosts_left = strategy.get_hosts_left(itr)
hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
host1_task = hosts_tasks[0][1]
host2_task = hosts_tasks[1][1]
self.assertIsNotNone(host1_task)
self.assertIsNotNone(host2_task)
self.assertEqual(host1_task.action, 'meta')
self.assertEqual(host2_task.action, 'debug')
self.assertEqual(host1_task.name, '')
self.assertEqual(host2_task.name, 'rescue2')
# implicit meta: flush_handlers
hosts_left = strategy.get_hosts_left(itr)
hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
host1_task = hosts_tasks[0][1]
host2_task = hosts_tasks[1][1]
self.assertIsNotNone(host1_task)
self.assertIsNotNone(host2_task)
self.assertEqual(host1_task.action, 'meta')
self.assertEqual(host2_task.action, 'meta')
# implicit meta: flush_handlers
hosts_left = strategy.get_hosts_left(itr)
hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
host1_task = hosts_tasks[0][1]
host2_task = hosts_tasks[1][1]
self.assertIsNotNone(host1_task)
self.assertIsNotNone(host2_task)
self.assertEqual(host1_task.action, 'meta')
self.assertEqual(host2_task.action, 'meta')
# end of iteration
hosts_left = strategy.get_hosts_left(itr)
hosts_tasks = strategy._get_next_task_lockstep(hosts_left, itr)
host1_task = hosts_tasks[0][1]
host2_task = hosts_tasks[1][1]
self.assertIsNone(host1_task)
self.assertIsNone(host2_task)
|
pacoqueen/upy
|
refs/heads/master
|
formularios/albaranes_de_salida.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (C) 2005-2008 Francisco José Rodríguez Bogado, #
# Diego Muñoz Escalante. #
# (pacoqueen@users.sourceforge.net, escalant3@users.sourceforge.net) #
# #
# This file is part of GeotexInn. #
# #
# GeotexInn is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2 of the License, or #
# (at your option) any later version. #
# #
# GeotexInn is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with GeotexInn; if not, write to the Free Software #
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA #
###############################################################################
###################################################################
## albaranes.py -- Albaranes de salida de mercancía (ventas).
###################################################################
## NOTAS:
##
###################################################################
## Changelog:
## 14 de octubre de 2005 -> Inicio
## 17 de octubre de 2005 -> 90% funcional
## 17 de octubre de 2005 -> 95% funcional
## 18 de octubre de 2005 -> 99% funcional
## 6 de diciembre de 2005 -> Añadido "multi" al añadir LDV de las
## ventas pendientes.
## Arreglado bug por un typo en un
## articuloVenta que se había colado (en
## lugar de articuloventa, que es lo
## correcto).
## Añanidos botones de smart_add y por
## lotes.
## 9 de diciembre de 2005 -> Añadido IVA por defecto.
## 11 de enero de 2005 -> Añadido botón de imprimir.
## 19 de enero de 2005 -> Fork a v02
## 23 de enero de 2005 -> Encapsulado a clase.
## 27 de enero de 2005 -> Cambiada provincia por teléfono, pero
## SOLO en la ventana. En la BD el campo
## sigue teniendo el mismo nombre.
## Las observaciones que se imprimen son las
## del envío, ya no lo pide por diálogo.
## 27 de enero de 2006 -> Cambiado totalmente provincia por telefono,
## tanto en la BD como en el formulario.
## 6 de junio de 2006 -> Añadidos servicios también al albarán.
## 12 de junio de 2006 -> Condición para que pregunte si debe
## redistribuir únicamente si el albarán es
## nuevo o se ha modificado.
###################################################################
## + DONE: El funcionamiento del drop_ldv está pendiente de
## comprobar. Lo he ajustado lo más posible al caso de uso.
## Es posible que genere demasiados pedidos vacíos, pero
## al menos funciona como debería. De todas formas, hay que
## probarlo más a fondo antes de poner en producción.
## + DONE: Falta bloquear el albarán después de 24/48 horas. ¿Cómo?
## Forget about it. CWT: Se bloquea en cuanto se imprime.
## TODO: Al crear los vencimientos de la factura al imprimir no
## tiene en cuenta el IRPF.
## TODO: ES ***EXTREMADAMENTE*** LENTO CON LOS ALBARANES DE BOLSAS.
## TODO: Cuando se vendan cajas sueltas, la caja debe ser el bulto
## en lugar del palé. Pero solo en ese caso.
###################################################################
## NOTAS:
## Atención a las líneas de devolución. Ahora se cuentan sus
## artículos incluso después de haberlos desvinculado del albarán.
## No hay problemas con volverlos a devolver en otro abono, ya que
## un mismo artículo soporta estar en varias líneas de devolución.
###################################################################
from ventana import Ventana
import utils
import pygtk
pygtk.require('2.0')
import gtk, gtk.glade, time, sqlobject
import sys, os
try:
import pclases
except ImportError:
from os.path import join as pathjoin; sys.path.append(pathjoin("..", "framework"))
import pclases
try:
import geninformes
except ImportError:
sys.path.append('../informes')
import geninformes
from utils import ffloat, _float as float
import datetime
from postomatic import attach_menu_notas
from ventana_progreso import VentanaProgreso
from psycopg2 import ProgrammingError as psycopg_ProgrammingError
class AlbaranesDeSalida(Ventana):
def __init__(self, objeto = None, usuario = None):
"""
Constructor. objeto puede ser un objeto de pclases con el que
comenzar la ventana (en lugar del primero de la tabla, que es
el que se muestra por defecto).
"""
self.usuario = usuario
self.modificado = False # Para detectar si el albarán en pantalla se ha modificado
# en la sesión actual.
self.nuevo = False # Para detectar si un albarán es nuevo.
Ventana.__init__(self, 'albaranes_de_salida.glade', objeto,
usuario = self.usuario)
connections = {'b_salir/clicked': self.pre_salir,
'b_fecha/clicked': self.buscar_fecha,
'b_drop_ldv/clicked': self.drop_ldv,
'b_add_pedido/clicked': self.add_pedido,
'b_add_producto/clicked': self.pedir_rango,
'b_actualizar/clicked': self.actualizar_ventana,
'b_guardar/clicked': self.guardar,
'b_borrar/clicked': self.borrar_albaran,
'b_nuevo/clicked': self.crear_nuevo_albaran,
'b_buscar/clicked': self.buscar_albaran,
'b_imprimir/clicked': self.imprimir,
'b_guardar_transportista/clicked':
self.guardar_transportista,
'b_guardar_destino/clicked': self.guardar_destino,
'b_leyenda/clicked': self.ver_leyenda,
'b_packinglist/clicked': self.packinglist,
'b_add_srv/clicked': self.add_srv,
'b_drop_srv/clicked': self.drop_srv,
'ventana_leyenda/delete_event': self.ocultar_leyenda,
'b_drop_transporteACuenta/clicked':
self.drop_transporte_a_cuenta,
'b_add_transporteACuenta/clicked':
self.add_transporte_a_cuenta,
'expander1/activate': self.expandirme_solo_a_mi,
'expander2/activate': self.expandirme_solo_a_mi,
'expander3/activate': self.expandirme_solo_a_mi,
'expander4/activate': self.expandirme_solo_a_mi,
'b_phaser/clicked': self.descargar_de_terminal,
'cbe_almacenOrigenID/changed': self.check_almacenes,
'cbe_almacenDestinoID/changed': self.check_almacenes,
'ch_facturable/toggled': self.sombrear_entry_motivo,
}
self.add_connections(connections)
if pclases.DEBUG:
antes = time.time()
print "Voy a inicializar la ventana..."
self.inicializar_ventana()
if pclases.DEBUG:
print " ... ventana inicializada. ", time.time() - antes
if self.objeto == None:
self.ir_a_primero()
else:
self.ir_a(objeto)
gtk.main()
def sombrear_entry_motivo(self, ch):
"""
Si el albarán es facturable sombrea el entry donde se escribe el
motivo por el que no sería facturable, y viceversa.
"""
if self.objeto:
self.wids['e_motivo'].set_sensitive(not self.objeto.facturable)
else:
self.wids['e_motivo'].set_sensitive(not ch.get_active())
def check_almacenes(self, combo):
"""
Comprueba que no se haya seleccionado el mismo almacén en los dos
desplegables y que el almacén origen no sea None.
"""
ido = utils.combo_get_value(self.wids['cbe_almacenOrigenID'])
idd = utils.combo_get_value(self.wids['cbe_almacenDestinoID'])
# 1.- El almacén origen no puede ser None.
if ido == None:
utils.dialogo_info(titulo = "ERROR ALMACÉN ORIGEN",
texto = "Debe seleccionar un almacén origen. Se usará el\n"
"almacén principal como origen de la mercancía.",
padre = self.wids['ventana'])
self.objeto.almacenOrigen = pclases.Almacen.get_almacen_principal()
self.objeto.syncUpdate()
utils.combo_set_from_db(self.wids['cbe_almacenOrigenID'],
self.objeto.almacenOrigenID)
self.wids['cbe_almacenOrigenID'].child.set_text(
self.objeto.almacenOrigen.nombre)
# 2.- Si el almacén origen y destino son el mismo muestra diálogo de
# advertencia y pone el destino a None.
elif ido == idd:
utils.dialogo_info(titulo = "ERROR ALMACENES",
texto =
"No puede asignar el mismo almacén como origen y destino.",
padre = self.wids['ventana'])
utils.combo_set_from_db(self.wids['cbe_almacenOrigenID'],
pclases.Almacen.get_almacen_principal_id_or_none())
utils.combo_set_from_db(self.wids['cbe_almacenDestinoID'], None)
def expandirme_solo_a_mi(self, expander):
"""
Oculta los otros 3 expanders de la página para no ocupar tanto sitio.
"""
expanders = (self.wids['expander1'], self.wids['expander2'], self.wids['expander3'], self.wids['expander4'])
for ex in expanders:
if ex != expander:
ex.set_expanded(False)
# --------------- Funciones auxiliares ------------------------------
def inicializar_leyenda(self):
ws = []
ws.append(self.wids['dwg_amarillo'].window)
ws.append(self.wids['dwg_naranja'].window)
ws.append(self.wids['dwg_rojo'].window)
ws.append(self.wids['dwg_blanco'].window)
ws.append(self.wids['dwg_verde'].window)
ws.append(self.wids['dwg_azul'].window)
cs = ("yellow", "orange", "red", "white", "green", "blue")
for i in xrange(len(cs)): # Debe haber otra forma más "pythónica"
# de hacerlo, seguro.
color = ws[i].get_colormap().alloc_color(cs[i])
ws[i].set_background(color)
self.wids['ventana_leyenda'].hide()
def ocultar_leyenda(self, w, e):
self.wids['ventana_leyenda'].hide()
self.wids['b_leyenda'].set_active(False)
return True #Quiero que siga vivo, que no me lo elimine.
def ver_leyenda(self, w):
if w.get_active():
ws = []
ws.append(self.wids['dwg_amarillo'].window)
ws.append(self.wids['dwg_naranja'].window)
ws.append(self.wids['dwg_rojo'].window)
ws.append(self.wids['dwg_blanco'].window)
ws.append(self.wids['dwg_verde'].window)
ws.append(self.wids['dwg_azul'].window)
ws.append(self.wids['dwg_RosyBrown3'].window)
cs = ("yellow", "orange", "red", "white", "green", "blue",
"RosyBrown3")
for i in xrange(len(cs)): # Debe haber otra forma más
# "pythónica" de hacerlo, seguro.
color = ws[i].get_colormap().alloc_color(cs[i])
ws[i].set_background(color)
self.wids['ventana_leyenda'].show()
else:
self.wids['ventana_leyenda'].hide()
def actualizar_destino(self, iddest):
t = pclases.Destino.get(iddest)
t.nombre = self.wids['cbe_nom'].child.get_text()
t.direccion = self.wids['e_direccion'].get_text()
t.cp = self.wids['e_cp'].get_text()
t.ciudad = self.wids['e_ciudad'].get_text()
t.telefono = self.wids['e_telf'].get_text()
t.pais = self.wids['e_pais'].get_text()
def actualizar_transportista(self, idtransp):
t = pclases.Transportista.get(idtransp)
t.nombre = self.wids['e_nombre'].get_text()
t.dni = self.wids['cbe_dni'].child.get_text()
t.telefono = self.wids['e_telefono'].get_text()
t.agencia = self.wids['e_agencia'].get_text()
t.matricula = self.wids['e_matricula'].get_text()
def crear_nuevo_destino(self):
destinos = pclases.Destino.select(pclases.AND(
pclases.Destino.q.nombre == self.wids['cbe_nom'].child.get_text(),
pclases.Destino.q.direccion == self.wids['e_direccion'].get_text(),
pclases.Destino.q.cp == self.wids['e_cp'].get_text(),
pclases.Destino.q.ciudad == self.wids['e_ciudad'].get_text(),
pclases.Destino.q.telefono == self.wids['e_telf'].get_text(),
pclases.Destino.q.pais == self.wids['e_pais'].get_text()))
if destinos.count() > 0:
t = destinos[0]
else:
t = pclases.Destino(nombre = self.wids['cbe_nom'].child.get_text(),
direccion = self.wids['e_direccion'].get_text(),
cp = self.wids['e_cp'].get_text(),
ciudad = self.wids['e_ciudad'].get_text(),
telefono = self.wids['e_telf'].get_text(),
pais = self.wids['e_pais'].get_text())
self.wids['cbe_nom'].get_model().append((t.id, t.nombre))
utils.combo_set_from_db(self.wids['cbe_nom'], t.id)
return t
def crear_nuevo_transportista(self):
t = pclases.Transportista(nombre = self.wids['e_nombre'].get_text(),
dni = self.wids['cbe_dni'].child.get_text(),
telefono = self.wids['e_telefono'].get_text(),
agencia = self.wids['e_agencia'].get_text(),
matricula = self.wids['e_matricula'].get_text())
self.wids['cbe_dni'].get_model().append((t.id, t.dni))
utils.combo_set_from_db(self.wids['cbe_dni'], t.id)
def refinar_busqueda_productos(self, resultados):
filas_res = []
for r in resultados:
filas_res.append((r.id, r.codigo, r.nombre, r.descripcion, r.get_existencias(), r.get_stock()))
idproducto = utils.dialogo_resultado(filas_res,
titulo = 'Seleccione producto',
cabeceras = ('ID Interno', 'Código', 'Nombre', 'Descripción', 'Existencias', 'Stock'),
padre = self.wids['ventana'])
if idproducto < 0:
return None
else:
return idproducto
def add_duplicado(self, codigo, albaran):
"""
Añade el rollo o bala con código "codigo" al albarán siempre y
cuando el producto se hubiese pedido.
Si codigo no es de la forma [rRbBcC]\d+[dD] da mensaje de error y
sale.
NO ACEPTA RANGOS.
Devuelve el objeto artículo añadido o None si no se pudo.
"""
articulo_annadido = None
import re
recodigo = re.compile("[rRbBcC]\d+[Dd]")
res = recodigo.findall(codigo)
if res == []:
utils.dialogo_info(titulo = "ERROR CÓDIGO",
texto = "El texto %s no es un código válido.\nSi está intentando introducir productos duplicados por motivos excepcionales\n(su código acaba en D) no pueden ser añadidos por lote.\n\nIntrodúzcalos uno a uno usando el código de trazabilidad completo,\nes decir, comenzando por R, B o C y acabando en D.\nPor ejemplo: R78042D.",
padre = self.wids['ventana'])
else:
codigo = res[0].upper()
try:
if codigo.startswith("R"):
articulo = pclases.Rollo.select(
pclases.Rollo.q.codigo == codigo)[0].articulos[0]
elif codigo.startswith("B"):
articulo = pclases.Bala.select(
pclases.Bala.q.codigo == codigo)[0].articulos[0]
elif codigo.startswith("C"):
articulo = pclases.Bigbag.select(
pclases.Bigbag.q.codigo == codigo)[0].articulos[0]
except (IndexError, AttributeError), msg:
utils.dialogo_info(titulo = "CÓDIGO NO ENCONTRADO",
texto = "Código %s no encontrado." % (codigo),
padre = self.wids['ventana'])
else:
self.crear_ldv([articulo])
self.actualizar_ventana()
articulo_annadido = articulo
return articulo_annadido
def pedir_rango(self, producto):
"""
Pide un rango de números de bala o rollo.
Devuelve una lista con los identificadores de
artículo pertenecientes al producto de venta
recibido que entran en el rango y no están
relacionados ya con otros albaranes.
"""
if (self.objeto.cliente
and self.objeto.cliente.calcular_credito_disponible() <= 0):
utils.dialogo_info(titulo = "CLIENTE SIN CRÉDITO",
texto = "El cliente ha sobrepasado el "
"crédito concedido.",
padre = self.wids['ventana'])
return
if (self.objeto.cliente
and self.objeto.cliente.get_facturas_vencidas_sin_documento_de_cobro()):
frasvenc = self.objeto.cliente.get_facturas_vencidas_sin_documento_de_cobro()
utils.dialogo_info(titulo = "CLIENTE DEUDOR",
texto = "El cliente tiene %d facturas "
"vencidas sin documento de pago." % (
len(frasvenc)),
padre = self.wids['ventana'])
return
# DONE: Portar ventana de pedir rango que acepta guiones,
# comas, etc. aquí.
strrango = utils.dialogo_entrada(titulo = 'INTRODUZCA RANGO',
texto = """
Rango de número de balas/rollos o el código indovidual.
Escriba el rango de códigos de la forma "xxxx-yyyy", ambos inclusive.
En caso de ambigüedad, introdúzcalos precedidos de R para geotextiles,
B para fibra, C para fibra de cemento, Z para balas de cable, X para
rollos de longitud insuficiente, Y para geotextiles «C», H para
palés, J para cajas de bolsas de fibra de cemento y K para bolsas
sueltas.
También puede introducir varios rangos separados por coma o espacio.
Por ejemplo:
123-145 Intentará añadir los rollos 123 a 145, ambos inclusive.
Si no los encuentra, los buscará entre las balas de fibra.
R123-145 Añadirá los rollos 123 a 145, ambos inclusive.
R123-R145 Hace lo mismo que el caso anterior.
B123-145 Añadirá, si se encuentran y están debidamente analizadas y
catalogadas, las balas de fibra 123 a 145, ambas inclusive.
B123-B145 Hace lo mismo que en el caso anterior.
C10-C15 BigBags de GEOCEM del número 10 al 15.
B100-B105, R4000 C101 Introduce el rango de balas de 100 a 105, ambas
inclusive; el rollo 4000 y el bigbag 101.
H31/40 Palé 31, de 14 cajas con 40 bolsas por caja.
""",
padre = self.wids['ventana'])
articulos = []
if strrango == '' or strrango == None:
return
self.logger.warning("%salbaranes_de_salida -> Iniciando pedir_rango"
" (salida de artículos manual)"
% (self.usuario and self.usuario.usuario + ": " or ""))
## ----------------
tokens = []
for token in strrango.split():
tokens += token.split(",")
tokens = [i.strip() for i in tokens if i.strip() != ""]
for rango in tokens:
# Casos especiales: ROLLOS Y BALAS DUPLICADOS. AAARGGGHHHH!!!
if (("R" in rango.upper() and "D" in rango.upper())
or ("B" in rango.upper() and "D" in rango.upper())):
articulo = self.add_duplicado(rango, self.objeto)
if articulo != None:
articulos.append(articulo)
continue
# -------------------------------------------------------------
if "B" in rango.upper():
tipocodigo = "B"
rango = rango.replace("B", "")
rango = rango.replace("b", "")
elif "R" in rango.upper():
tipocodigo = "R"
rango = rango.replace("R", "")
rango = rango.replace("r", "")
elif "C" in rango.upper():
tipocodigo = "C"
rango = rango.replace("C", "")
rango = rango.replace("c", "")
elif "Z" in rango.upper():
tipocodigo = "Z"
rango = rango.replace("Z", "")
rango = rango.replace("z", "")
elif "X" in rango.upper():
tipocodigo = "X"
rango = rango.replace("X", "")
rango = rango.replace("x", "")
elif "Y" in rango.upper():
tipocodigo = "Y"
rango = rango.replace("Y", "")
rango = rango.replace("y", "")
# Palés. H00/00
elif "H" in rango.upper():
tipocodigo = "H"
rango = rango.replace("H", "")
rango = rango.replace("h", "")
# Cajas.
elif "J" in rango.upper():
tipocodigo = "J"
rango = rango.replace("J", "").replace("j", "")
# Bolsas sueltas.
#elif "K" in rango.upper():
# tipocodigo = "K"
# rango = rango.replace("K", "").replace("k", "")
else:
tipocodigo = ""
if '-' in rango:
if tipocodigo == "H": # Quito el /bolsas_por_caja antes de
# procesar.
_rango = []
for tokenpale in rango.split("-"):
if "/" in tokenpale:
tokenpale = tokenpale[:tokenpale.index("/")]
_rango.append(tokenpale)
rango = "-".join(_rango)
ini, fin = rango.split('-')
try:
ini = int(ini)
fin = int(fin)
except ValueError:
utils.dialogo_info(titulo = "RANGO NO VÁLIDO",
texto = "El texto introducido (%s) no corresponde a un rango válido." % (rango),
padre = self.wids['ventana'])
continue
if fin < ini:
ini, fin = fin, ini
rangocodigos = xrange(ini, fin+1)
total = len(rangocodigos)
actual = 0.0
vpro = VentanaProgreso(padre = self.wids['ventana'])
vpro.set_valor(0.0, "Añadiendo artículos...")
vpro.mostrar()
try:
for codigo in rangocodigos:
vpro.set_valor(actual/total, None)
actual += 1.0
len_antes = len(articulos)
articulos = self.add_producto(codigo,
articulos,
tipocodigo)
len_despues = len(articulos)
if len_antes == len_despues:
# No se han añadido artículos porque el código era
# incorrecto.
# Voy a preguntar si quiere seguir, porque como haya
# metido un rango de 1000 códigos y no esté ninguno,
# el usuario se va a hinchar de darle a aceptar.
txt = """
El código %s%d no se encontró. ¿Desea continuar
e intentar añadir al albarán el resto de códigos?
""" % (tipocodigo, codigo)
if not utils.dialogo(titulo = "¿CONTINUAR?",
texto = txt,
padre = self.wids['ventana']):
break
finally:
vpro.ocultar()
else:
if tipocodigo == "H": # Quito el /bolsas_por_caja antes de
# procesar.
if "/" in rango:
rango = rango[:rango.index("/")]
try:
articulos = self.add_producto(int(rango),
articulos,
tipocodigo)
except ValueError:
self.logger.error("albaranes_de_salida.py: "
"pedir_rango. Error al convertir a entero: %s." % rango)
## ----------------
if articulos == []:
utils.dialogo_info(titulo = 'NO ENCONTRADO',
texto = 'Los códigos introducidos no se '
'encontraron o no estaban '
'debidamente catalogados.',
padre = self.wids['ventana'])
## ----------------
articulos_baja_calidad = []
for articulo in articulos:
if articulo.es_de_baja_calidad():
articulos_baja_calidad.append(articulo)
texto = """
Los siguientes artículos se han considerado que son
de baja calidad. ¿Continuar?
%s
""" % ("\n".join([a.codigo for a in articulos_baja_calidad]))
if (articulos_baja_calidad == []
or utils.dialogo(titulo = "ARTÍCULOS DE BAJA CALIDAD",
texto = texto,
padre = self.wids['ventana'])):
self.crear_ldv(articulos) # En realidad no crea, asocia
# artículos al albarán
self.actualizar_ventana()
def add_producto(self, codigo, articulos, tipocodigo = ""):
"""
Codigo es un número de rollo o bala. Viene como entero.
articulos es una lista de objetos articulos al que añade
el artículo o los artículos encontrados andes de devolverla.
"""
# PLAN: WTF: Limpiar un poco y refactorizar esta función.
albaran = self.objeto
if tipocodigo == "":
articulo = pclases.Rollo.select(pclases.Rollo.q.numrollo == codigo)
if articulo.count() == 0:
# No es un código de rollo. Busco bala
articulo = pclases.Bala.select(pclases.Bala.q.numbala == codigo)
antes_de_chequear_analizadas = articulo.count()
articulo = [b for b in articulo if b.analizada()]
# OJO: NOTA: Las balas "vendibles" son aquellas cuyo lote ya
# ha sido analizado y por tanto tiene código.
if len(articulo) == 0:
if antes_de_chequear_analizadas == 0:
# No se ha encontrado, código incorrecto.
utils.dialogo_info(titulo = 'CÓDIGO INCORRECTO',
texto = 'Código %s incorrecto.' % (codigo),
padre = self.wids['ventana'])
else:
# Se ha encontrado pero no se puede vender.
utils.dialogo_info(titulo = 'FIBRA NO ANALIZADA',
texto = """
La bala de fibra %s no ha sido analizada en el
laboratorio. No se puede vender fibra sin antes
determinar que cumple los criterios necesarios.
Asegúrese de que al menos se han determinado las
siguientes características: tenacidad, elongación,
rizo y encogimiento.
""" % (codigo),
padre = self.wids['ventana'])
return articulos # Devuelvo sin añadir nada.
elif tipocodigo == "R": # Sólo busco rollos.
articulo = pclases.Rollo.select(pclases.Rollo.q.numrollo == codigo)
if articulo.count() == 0:
return articulos # No lo encuentro, devuelvo sin añadir nada.
elif tipocodigo == "C": # Sólo busco fibra de cemento.
articulo = pclases.Bigbag.select(pclases.Bigbag.q.numbigbag == codigo)
if articulo.count() == 0:
return articulos # No lo encuentro, devuelvo sin añadir nada.
elif tipocodigo == "Z": # Sólo busco cable de fibra.
articulo = pclases.BalaCable.select(pclases.BalaCable.q.codigo == "Z%d" % codigo)
if articulo.count() == 0:
return articulos # No lo encuentro, devuelvo sin añadir nada.
elif tipocodigo == "X": # Sólo busco rollos defectuosos.
articulo = pclases.RolloDefectuoso.select(
pclases.RolloDefectuoso.q.codigo == "X%d" % codigo)
if articulo.count() == 0:
return articulos # No lo encuentro, devuelvo sin añadir nada.
elif tipocodigo == "Y": # Sólo busco rollos defectuosos.
articulo = pclases.RolloC.select(
pclases.RolloC.q.codigo == "Y%d" % codigo)
if articulo.count() == 0:
return articulos # No lo encuentro, devuelvo sin añadir nada.
elif tipocodigo == "B": # Sólo busco balas.
articulo = pclases.Bala.select(pclases.Bala.q.numbala == codigo)
antes_de_chequear_analizadas = articulo.count()
articulo = [b for b in articulo if b.analizada()]
# OJO: NOTA: Las balas "vendibles" son aquellas cuyo lote ya ha
# sido analizado y por tanto tiene código.
if len(articulo) == 0:
if antes_de_chequear_analizadas == 0:
# No se ha encontrado, código incorrecto.
utils.dialogo_info(titulo = 'CÓDIGO INCORRECTO',
texto = 'Código %s incorrecto.'%codigo,
padre = self.wids['ventana'])
else:
# Se ha encontrado pero no se puede vender.
utils.dialogo_info(titulo = 'FIBRA NO ANALIZADA',
texto = """
La bala de fibra %s no ha sido analizada en el
laboratorio. No se puede vender fibra sin antes
determinar que cumple los criterios necesarios.
Asegúrese de que al menos se han determinado las
siguientes características: tenacidad, elongación,
rizo y encogimiento.
""" % (codigo),
padre = self.wids['ventana'])
return articulos # Devuelvo sin añadir nada.
elif tipocodigo == "H": # Palés (completos o resto) de fibra de cemento
pale = pclases.Pale.select(pclases.Pale.q.numpale == codigo)
articulo = []
for p in pale:
# for c in p.cajas:
# for b in c.bolsas:
# articulo.append(b)
# Optimizando, que es gerundio:
#articulo += p.get_bolsas_en_almacen(self.objeto.almacenOrigen)
articulo += p.get_cajas_en_almacen(self.objeto.almacenOrigen)
print len(articulo)
elif tipocodigo == "J": # Una caja suelta de fibra de cemento
cajas = pclases.Caja.select(pclases.Caja.q.codigo == "J%d"%codigo)
articulo = []
for c in cajas:
# for b in c.bolsas:
articulo.append(b)
#elif tipocodigo == "K": # Una única bolsa de fibra de cemento
# articulo = pclases.Bolsa.select(
# pclases.Bolsa.q.codigo == "K%d" % codigo)
# if articulo.count() == 0:
# return articulos # No lo encuentro, devuelvo sin añadir nada.
else:
self.logger.error("albaranes_de_salida.py. Se solicitó añadir artículos que no son balas, rollos, rollos defectuosos, balas de cable ni geocem. tipocodigo = %s" % (tipocodigo))
# Aquí ya tengo un resultado válido, tanto si se ha buscado rollo
# como balas o ambas cosas:
# articulo es un objeto bala o rollo. Ambos tienen una lista de
# articulos con un articulo relacionado.
#articulo = articulo[0].articulos[0]
avisado_error_pale = False # No quiero avisar por cada una de las
# 560 bolsas de un palé.
listaarticulos = articulo
for _articulo in listaarticulos:
articulo = _articulo.articulo
# Compruebo que no esté relacionado ya con algún albarán, en cuyo
# caso muestro mensaje de error.
# XXX: Optimizo:
if tipocodigo == "H":
articulos.append(articulo)
continue # Ya he filtrado que estuvieran en el almacén
# antes, en el get_bolsas_..., que solo devuelve
# bolsas del palé en el almacén indicado.
# XXX: EOOptimización
if not articulo.en_almacen(almacen = self.objeto.almacenOrigen):
if ((tipocodigo=="H" or tipocodigo=="J" or tipocodigo=="K")
and avisado_error_pale):
continue
if articulo.albaranSalida != None:
motivo_salida = "Salió del mismo en el albarán %s." % (
articulo.albaranSalida.numalbaran)
elif (articulo.bala != None
and articulo.bala.partidaCarga != None):
motivo_salida = "Se empleó en producción, en la partida "\
"de carga %s." % (articulo.bala.partidaCarga.codigo)
else:
motivo_salida = ""
txt = """
El producto seleccionado con código %s no está en el almacén.
%s
Verifique si esto es correcto y elimine el producto del
albarán indicado si quiere añadirlo a este.
""" % (articulo.codigo_interno, motivo_salida)
utils.dialogo_info(titulo = "ERROR: PRODUCTO NO ENCONTRADO "\
"EN ALMACÉN",
texto = txt,
padre = self.wids['ventana'])
if (tipocodigo=="K" or tipocodigo=="J" or tipocodigo=="H"):
avisado_error_pale = True
else:
#articulo.albaranSalida = albaran
articulos.append(articulo)
return articulos
def es_diferente(self):
"""
Devuelve True si la información en pantalla es distinta a la
del objeto en memoria.
"""
# NOTA: No hay que preocuparse por el exceso de cómputo. Estas
# comparaciones son bastante rápidas al tener python -como los
# lenguajes de verdad y no los jueguetes tipo VB- las operaciones
# lógicas cortocircuitadas, de forma que si condición pasa a False
# no se evalúa lo que esté detrás del and en las instrucciones
# posteriores.
albaran = self.objeto
if albaran == None:
return False # Si no hay albaran activo, devuelvo que no hay
# cambio respecto a la ventana
condicion = albaran.numalbaran == self.wids['e_numalbaran'].get_text()
if pclases.DEBUG and not condicion: print "numalbaran", albaran.numalbaran
condicion = condicion and (utils.str_fecha(albaran.fecha) == self.wids['e_fecha'].get_text())
if pclases.DEBUG and not condicion: print "fecha", albaran.fecha
cliente = albaran.cliente
cbe_cliente = utils.combo_get_value(self.wids['cbe_cliente'])
if cliente == None:
mismocliente = cbe_cliente == None
else:
mismocliente = cliente.id == cbe_cliente
condicion = condicion and mismocliente
if pclases.DEBUG and not condicion: print "cliente", albaran.cliente
condicion = condicion and self.wids['ch_facturable'].get_active() == self.objeto.facturable
if pclases.DEBUG and not condicion: print "facturable", albaran.facturable
condicion = condicion and self.wids['e_motivo'].get_text() == self.objeto.motivo
if pclases.DEBUG and not condicion: print "motivo", albaran.motivo
condicion = condicion and self.wids['ch_bloqueado'].get_active() == albaran.bloqueado
if pclases.DEBUG and not condicion: print "bloqueado", albaran.bloqueado
condicion = condicion and self.wids['cbe_nom'].child.get_text() == albaran.nombre
if pclases.DEBUG and not condicion: print "nombre", albaran.nombre
condicion = condicion and self.wids['e_cp'].get_text() == albaran.cp
if pclases.DEBUG and not condicion: print "cp", albaran.cp
condicion = condicion and self.wids['e_ciudad'].get_text() == albaran.ciudad
if pclases.DEBUG and not condicion: print "ciudad", albaran.ciudad
condicion = condicion and self.wids['e_pais'].get_text() == albaran.pais
if pclases.DEBUG and not condicion: print "pais", albaran.pais
condicion = condicion and self.wids['e_telf'].get_text() == albaran.telefono
if pclases.DEBUG and not condicion: print "telefono", albaran.telefono
condicion = condicion and self.wids['e_direccion'].get_text() == albaran.direccion
if pclases.DEBUG and not condicion: print "direccion", albaran.direccion
buffer = self.wids['tv_observaciones'].get_buffer()
condicion = condicion and buffer.get_text(buffer.get_start_iter(), buffer.get_end_iter()) == albaran.observaciones
if pclases.DEBUG and not condicion: print "observaciones", albaran.observaciones
condicion = condicion and utils.combo_get_value(self.wids['cbe_dni']) == albaran.transportistaID
if pclases.DEBUG and not condicion: print "transportista", albaran.transportista
condicion = condicion and utils.combo_get_value(self.wids['cbe_nom']) == albaran.destinoID
if pclases.DEBUG and not condicion: print "destino", albaran.destino
condicion = (condicion and
utils.combo_get_value(self.wids['cbe_almacenOrigenID'])
== albaran.almacenOrigenID)
condicion = (condicion and
utils.combo_get_value(self.wids['cbe_almacenDestinoID'])
== albaran.almacenDestinoID)
return not condicion # "condicion" verifica que sea igual
def aviso_actualizacion(self):
"""
Muestra una ventana modal con el mensaje de objeto
actualizado.
"""
utils.dialogo_info('ACTUALIZAR',
'El albarán ha sido modificado remotamente.\nDebe '
'actualizar la información mostrada en pantalla.\n'
'Pulse el botón «Actualizar»',
padre = self.wids['ventana'])
self.wids['b_actualizar'].set_sensitive(True)
def inicializar_ventana(self):
"""
Inicializa los controles de la ventana, estableciendo sus
valores por defecto, deshabilitando los innecesarios,
rellenando los combos, formateando el TreeView -si lo hay-...
"""
# Inicialmente no se muestra NADA. Sólo se le deja al
# usuario la opción de buscar o crear nuevo.
self.wids['b_actualizar'].set_sensitive(False)
self.wids['b_guardar'].set_sensitive(False)
self.wids['b_nuevo'].set_sensitive(True)
self.wids['b_buscar'].set_sensitive(True)
self.inicializar_leyenda()
self.activar_widgets(False)
# Inicialización del resto de widgets:
cols = (('Cantidad', 'gobject.TYPE_FLOAT', True, True, False,
self.cambiar_cantidad_srv),
('Concepto', 'gobject.TYPE_STRING', True, True, True,
self.cambiar_concepto_srv),
('Precio', 'gobject.TYPE_FLOAT', True, True, False,
self.cambiar_precio_srv),
('Descuento', 'gobject.TYPE_FLOAT', True, True, False,
self.cambiar_descuento_srv),
('Total', 'gobject.TYPE_FLOAT', False, True, False, None),
('ID', 'gobject.TYPE_STRING', False, False, False, None))
utils.preparar_listview(self.wids['tv_servicios'], cols)
attach_menu_notas(self.wids['tv_servicios'], pclases.Servicio,
self.usuario, 1)
cols = (('Concepto', 'gobject.TYPE_STRING', True, True, True,
self.cambiar_concepto_tac),
('Precio', 'gobject.TYPE_STRING', True, True, False,
self.cambiar_precio_tac),
('Proveedor', 'gobject.TYPE_STRING', True, True, True,
self.cambiar_proveedor_tac),
('Observaciones', 'gobject.TYPE_STRING', True, True, True,
self.cambiar_observaciones_tac),
('Fecha', 'gobject.TYPE_STRING', True, True, True,
self.cambiar_fecha_tac),
('Factura compra', 'gobject.TYPE_STRING', False, True, False,
None),
('ID', 'gobject.TYPE_STRING', False, False, False, None))
utils.preparar_listview(self.wids['tv_transportesACuenta'], cols)
self.wids['tv_transportesACuenta'].get_selection().set_mode(
gtk.SELECTION_MULTIPLE)
utils.rellenar_lista(self.wids['cbe_cliente'],
[(c.id, "%s (%s, %s - %s)" % (
c.nombre,
c.cif,
c.ciudad,
c.provincia))
for c in
pclases.Cliente.select(orderBy='nombre')])
cols = (('Bultos añadidos al albarán', 'gobject.TYPE_STRING',
False, True, False, None),
('Código', 'gobject.TYPE_STRING', False, True, False, None),
('Descripción', 'gobject.TYPE_STRING',
False, True, True, None),
('Cantidad solicitada', 'gobject.TYPE_FLOAT',
True, True, False, self.cambiar_cantidad),
('Cantidad añadida', 'gobject.TYPE_FLOAT',
False, True, False, None),
('IDLDV', 'gobject.TYPE_STRING', False, False, False, None)
)
utils.preparar_treeview(self.wids['tv_ldvs'], cols)
self.wids['tv_ldvs'].get_selection().set_mode(gtk.SELECTION_MULTIPLE)
self.wids['tv_ldvs'].connect("row-activated", self.abrir_pedido)
attach_menu_notas(self.wids['tv_ldvs'], pclases.LineaDeVenta,
self.usuario, 2)
transportistas = [(t.id, t.dni) for t
in pclases.Transportista.select(orderBy = 'dni')]
utils.rellenar_lista(self.wids['cbe_dni'], transportistas)
self.wids['cbe_dni'].connect('changed',
self.combo_transportista_cambiado)
self.wids['cbe_dni'].child.connect('changed',
self.activar_guardar_transportista)
self.wids['e_nombre'].connect('changed',
self.activar_guardar_transportista)
self.wids['e_agencia'].connect('changed',
self.activar_guardar_transportista)
self.wids['e_matricula'].connect('changed',
self.activar_guardar_transportista)
self.wids['e_telefono'].connect('changed',
self.activar_guardar_transportista)
destinos = [(t.id, t.nombre)
for t in pclases.Destino.select(orderBy = 'nombre')]
utils.rellenar_lista(self.wids['cbe_nom'], destinos)
self.wids['cbe_nom'].connect('changed', self.combo_destino_cambiado)
self.wids['cbe_nom'].child.connect('changed',
self.activar_guardar_destino)
self.wids['e_cp'].connect('changed', self.activar_guardar_destino)
self.wids['e_ciudad'].connect('changed', self.activar_guardar_destino)
self.wids['e_pais'].connect('changed', self.activar_guardar_destino)
self.wids['e_telf'].connect('changed', self.activar_guardar_destino)
self.wids['e_direccion'].connect('changed',
self.activar_guardar_destino)
cols = (('Abono', 'gobject.TYPE_STRING', False, True, False, None),
('Fecha', 'gobject.TYPE_STRING', False, True, False, None),
('Código', 'gobject.TYPE_STRING', False, True, False, None),
('Descripción', 'gobject.TYPE_STRING', False,True,True,None),
('Código trazabilidad', 'gobject.TYPE_STRING',
False, True, False, None),
('IDLDD', 'gobject.TYPE_STRING', False, False, False, None)
)
utils.preparar_treeview(self.wids['tv_abonado'], cols)
utils.rellenar_lista(self.wids['cbe_almacenOrigenID'],
[(a.id, a.nombre)
for a in pclases.Almacen.select(orderBy = "nombre")])
utils.rellenar_lista(self.wids['cbe_almacenDestinoID'],
[(a.id, a.nombre)
for a in pclases.Almacen.select(orderBy = "nombre")])
# Si el negocio no vende artículos individuales, ¿para qué mostrar el
# botón de añadir rangos?
#if (pclases.Rollo.select().count() +
# pclases.RolloC.select().count() +
# pclases.RolloDefectuoso.select().count() +
# pclases.Bigbag.select().count() +
# pclases.Bala.select().count() +
# pclases.BalaCable.select().count() == 0):
if True:
self.wids['b_add_producto'].set_property("visible", False)
self.wids['b_phaser'].set_property("visible", False)
def abrir_pedido(self, tv, path, vc):
model = tv.get_model()
if model[path].parent == None:
idldv = model[path][-1]
#ldv = pclases.LineaDeVenta.get(idldv)
ldv = pclases.getObjetoPUID(idldv)
if ldv.pedidoVenta != None:
import pedidos_de_venta
ventanapedido=pedidos_de_venta.PedidosDeVenta(ldv.pedidoVenta)
else:
idarticulo = model[path][-1]
#articulo = pclases.Articulo.get(idarticulo)
objeto = pclases.getObjetoPUID(idarticulo)
if isinstance(objeto, pclases.Articulo):
if objeto.bala != None:
objeto = objeto.bala
elif objeto.rollo != None:
objeto = objeto.rollo
#elif objeto.bolsa != None:
# objeto = objeto.bolsa
elif objeto.caja != None:
objeto = objeto.caja
elif isinstance(objeto, (pclases.Caja, pclases.Pale)):
pass # I don't need to be forgiven. Yeah, yeah, yeah, no, no!
else:
objeto = None
if objeto != None:
from trazabilidad_articulos import TrazabilidadArticulos
ventanatrazabilidad = TrazabilidadArticulos(objeto)
def cantidad_anadida_a_ldv(self, ldv):
"""
Devuelve la cantidad total de los artículos
pertenecientes al albarán que se correspondan
con el producto recibido y se hayan agregado
a la LDV recibida.
"""
cantidad = 0.0
albaran = self.objeto
articulos_anadidos = self.__ldvs[ldv.id]['articulos']
for a in articulos_anadidos:
if a.rolloID != None: # Es un rollo
cantidad += (
a.productoVenta.camposEspecificosRollo.metrosLineales
* a.productoVenta.camposEspecificosRollo.ancho)
elif a.balaID != None: # Es una bala
cantidad += a.bala.pesobala
elif a.es_bigbag():
cantidad += a.bigbag.pesobigbag
elif a.es_bala_cable():
cantidad += a.peso
elif a.es_rollo_defectuoso():
cantidad += a.superficie
elif a.es_rolloC():
cantidad += a.peso
elif a.es_caja():
cantidad += a.peso
return cantidad
def cantidad_anadida(self, producto):
"""
Devuelve la cantidad total de los artículos
pertenecientes al albarán que se correspondan
con el producto recibido.
Solo funciona con productos de venta (para
productos de compra no se añaden artículos).
"""
cantidad = 0.0
if isinstance(producto, pclases.ProductoVenta):
albaran = self.objeto
if producto.es_caja():
#queryres_ids = pclases.Caja._connection.queryAll("""
# SELECT caja.id FROM caja, bolsa, articulo
# WHERE bolsa.id = articulo.bolsa_id
# AND caja.id = bolsa.caja_id
# AND articulo.albaran_salida_id = %d
# AND articulo.producto_venta_id = %d
# GROUP BY caja.id
# -- ORDER BY caja.id;""" % (albaran.id, producto.id))
queryres_ids = pclases.Caja._connection.queryAll("""
SELECT caja.id FROM caja, articulo
WHERE caja.id = articulo.caja_id
AND articulo.albaran_salida_id = %d
AND articulo.producto_venta_id = %d
-- GROUP BY caja.id
-- ORDER BY caja.id;""" % (albaran.id, producto.id))
cajas = [pclases.Caja.get(tupla[0]) for tupla in queryres_ids]
cantidad = sum([c.peso for c in cajas])
else:
articulos_anadidos = (
[a for a in albaran.articulos if a.productoVenta == producto]
+ [ldm.articulo for ldm in albaran.lineasDeMovimiento
if ldm.articulo.productoVenta == producto])
for a in utils.unificar(articulos_anadidos):
if a.es_rollo():
cantidad += a.productoVenta.camposEspecificosRollo.metrosLineales * a.productoVenta.camposEspecificosRollo.ancho
elif a.es_bala():
cantidad += a.bala.pesobala
elif a.es_bigbag():
cantidad += a.bigbag.pesobigbag
elif a.es_rollo_defectuoso():
cantidad += a.superficie
elif a.es_bala_cable() or a.es_rolloC(): # or a.es_caja():
cantidad += a.peso
return cantidad
def cambiar_cantidad(self, cell, path, nuevo_texto):
if self.wids['tv_ldvs'].get_model()[path].parent != None:
# Es un artículo, no una LDV. No dejo que lo cambie.
utils.dialogo_info(titulo = 'NO SE PUEDEN EDITAR LOS PRODUCTOS',
texto = 'La cantidad de un producto concreto no es editable.\nTal vez esté intentando eliminarlo del albarán,\nen ese caso selecciónelo y pulse el botón correspondiente.',
padre = self.wids['ventana'])
return
try:
cantidad = utils._float(nuevo_texto)
self.modificado = True
except ValueError:
utils.dialogo_info(titulo = 'NÚMERO INCORRECTO',
texto = 'Introduzca un número válido con . como separador decimal.')
return
idldv = self.wids['tv_ldvs'].get_model()[path][-1]
#ldv = pclases.LineaDeVenta.get(idldv)
ldv = pclases.getObjetoPUID(idldv)
if ldv.facturaVentaID != None and ldv.facturaVenta.bloqueada or ldv.prefacturaID != None and ldv.prefactura.bloqueada:
utils.dialogo_info(titulo = "OPERACIÓN NO PERMITIDA",
texto = "La venta ya ha sido facturada y la factura verificada y bloqueada.\nNo puede cambiar la cantidad.",
padre = self.wids['ventana'])
else:
self.redistribuir_ldv(path, cantidad)
self.actualizar_ventana()
def redistribuir_ldv(self, path, cantidad = None):
"""
Si cantidad es None es porque se debe hacer el reajuste automático.
Para el ajuste automático, cantidad valdrá la cantidad servida.
Si la LDV está facturada o el albarán está bloqueado, no cambia la
cantidad.
"""
idldv = self.wids['tv_ldvs'].get_model()[path][-1]
#ldv = pclases.LineaDeVenta.get(idldv)
ldv = pclases.getObjetoPUID(idldv)
if (not self.objeto.bloqueado and
((ldv.facturaVenta == None or not ldv.facturaVenta.bloqueada)
or (ldv.prefactura == None or not ldv.prefactura.bloqueada))):
cantidad_anterior = ldv.cantidad
cantidad_anadida = self.cantidad_anadida(ldv.productoVenta)
if cantidad == None:
cantidad = cantidad_anadida
ldv.cantidad = cantidad
ajustar_existencias(ldv, cantidad_anterior)
def activar_guardar_transportista(self, w):
self.wids['b_guardar_transportista'].set_sensitive(True)
def activar_guardar_destino(self, w):
self.wids['b_guardar_destino'].set_sensitive(True)
def combo_transportista_cambiado(self, c):
idtransp = utils.combo_get_value(c)
if idtransp != None:
self.mostrar_transportista(pclases.Transportista.get(idtransp))
def combo_destino_cambiado(self, c):
iddest = utils.combo_get_value(c)
if iddest != None:
self.mostrar_destino(pclases.Destino.get(iddest))
def activar_widgets(self, s):
"""
Activa o desactiva (sensitive=True/False) todos
los widgets de la ventana que dependan del
objeto mostrado.
Entrada: s debe ser True o False. En todo caso
se evaluará como boolean.
"""
if (self.objeto and self.objeto.bloqueado and self.usuario
and self.usuario.nivel >= 2):
s = False
ws = ('b_add_producto', 'b_drop_ldv', 'b_borrar', 'e_numalbaran',
'ch_bloqueado', 'b_fecha', 'cbe_cliente', 'tv_ldvs', 'e_fecha',
'ch_facturable', 'e_motivo', 'b_add_pedido', 'frame1', 'frame2',
'hbox19', 'vbox2', # 'b_guardar',
'tv_transportesACuenta', 'b_add_transporteACuenta',
'b_drop_transporteACuenta', 'b_phaser')
for w in ws:
self.wids[w].set_sensitive(s)
for w in ("cbe_almacenOrigenID", "cbe_almacenDestinoID"):
self.wids[w].set_sensitive(
s and self.wids[w].get_property("sensitive"))
# CWT: No bloquear transportes si el usuario tienen nivel <=2 (va por
# Rafa, en concreto, pero con más razón que un santo, eso sí).
if self.usuario and self.usuario.nivel <= 2:
self.wids['expander3'].set_sensitive(True)
self.wids['tv_transportesACuenta'].set_sensitive(True)
self.wids['b_add_transporteACuenta'].set_sensitive(True)
self.wids['b_drop_transporteACuenta'].set_sensitive(True)
self.wids['e_motivo'].set_sensitive(
s and not self.wids['ch_facturable'].get_active())
if pclases.DEBUG:
print "e_motivo", s and not self.wids['ch_facturable'].get_active()
def ir_a_primero(self):
"""
Hace que el primer registro -si lo hay- de la tabla implicada
en el objeto del formulario sea el objeto activo.
"""
albaran = self.objeto
try:
# Anulo el aviso de actualización del envío que deja de ser activo.
if albaran != None: albaran.notificador.desactivar()
albaran = pclases.AlbaranSalida.select(orderBy = '-id')[0]
# Selecciono todos los albaranes de venta y me quedo con el primero de la lista.
self.modificado = False
self.nuevo = False
albaran.notificador.activar(self.aviso_actualizacion) # Activo la notificación
except Exception, msg:
albaran = None
self.objeto = albaran
self.actualizar_ventana()
def refinar_resultados_busqueda(self, resultados):
"""
Muestra en una ventana de resultados todos los
registros de "resultados".
Devuelve el id (primera columna de la ventana
de resultados) de la fila seleccionada o None
si se canceló.
"""
filas_res = []
for r in resultados:
if r.almacenDestino:
destino = r.almacenDestino.nombre
elif r.destino != None:
destino = r.destino.get_info()
elif r.nombre:
destino = ", ".join((r.nombre, r.direccion, r.cp, r.ciudad,
r.pais))
else:
destino = ""
filas_res.append((r.id,
r.numalbaran,
r.fecha and r.fecha.strftime('%d/%m/%Y') or '',
r.almacenOrigen.nombre,
r.clienteID and r.cliente.nombre or "",
destino))
idalbaran = utils.dialogo_resultado(filas_res,
titulo = 'Seleccione albarán',
cabeceras = ('ID Interno',
'Número de albarán',
'Fecha',
'Origen',
'Cliente',
'Destino'),
padre = self.wids['ventana'])
if idalbaran < 0:
return None
else:
return idalbaran
def colorear(self, ldvs):
"""
ldvs es el diccionario... bah, mirar abajo, en la invocadora.
"""
def cell_func(column, cell, model, itr, data):
i, ldvs = data
#if model[itr].parent != None:
try:
hijo = model[itr].iterchildren().next()
except StopIteration:
hijo = None
if model[itr].parent == None: # Sin padre, línea de venta.
idldv = int(model[itr][-1].split(":")[-1])
if isinstance(ldvs[idldv]['ldv'].producto,
pclases.ProductoCompra):
color = "PaleGreen"
else:
cant_servida = round(ldvs[idldv]['ldv'].cantidad, 3)
cant_added = round(ldvs[idldv]['cantidad'], 3)
if cant_servida > cant_added:
color = "yellow"
if cant_servida == cant_added:
color = "green"
if cant_servida < cant_added:
color = "red"
if cant_added == 0:
color = "orange"
elif not hijo: # No hijos, es artículo.
idarticulo = model[itr][-1]
try:
#a = pclases.Articulo.get(idarticulo)
a = pclases.getObjetoPUID(idarticulo)
except pclases.SQLObjectNotFound:
color = None
else:
if a.albaranSalida != self.objeto:
# Artículo devuelto o transferido+vendido. Se muestra
# en pantalla para poder imprimir "con compatibilidad
# hacia atrás".
color = "RosyBrown3"
else:
color = "white"
else: # Nodos intermedios: cajas o palés. Si quiere ver si son
# artículos devueltos o algo (rosita) o normales (línea en
# blanco), que despliegue, porque no puedo marcar todo un
# palé si por ejemplo contiene 4 cajas en rosa y el resto
# en blanco.
color = "light blue"
cell.set_property("cell-background", color)
# NOTA: Esto hay que hacerlo porque el nuevo cell_func machaca el predefinido por defecto
# en el utils.preparar_x
utils.redondear_flotante_en_cell_cuando_sea_posible(column,
cell, model, itr,
(i, 1))
cols = self.wids['tv_ldvs'].get_columns()
for i in xrange(len(cols)):
column = cols[i]
cells = column.get_cell_renderers()
for cell in cells:
if not isinstance(cell, gtk.CellRendererPixbuf): # Para no machacar el data_func del icono "notas".
column.set_cell_data_func(cell,cell_func, [i, ldvs])
def rellenar_ldvs(self, albaran):
if pclases.DEBUG:
print "Soy rellenar_ldvs. Hasta ahora mismo no he sido invocada."
model = self.wids['tv_ldvs'].get_model()
model.clear()
self.__ldvs = self.agrupar_articulos(albaran)
# Devuelve un diccionario {idldv: {...}, idldv: {...}} Ver __doc__
# de la función.
self.colorear(self.__ldvs)
cajas = {} # iters de cajas insertadas
pales = {} # iters de pales insertados
opales = {} # Registros palé insertados por producto de venta.
for idldv in self.__ldvs:
articulos = self.__ldvs[idldv]['articulos']
pv = self.__ldvs[idldv]['ldv'].productoVenta
if pv not in opales:
opales[pv] = []
if (pv and pv.es_caja()):
for a in articulos:
if a.caja.pale not in opales[pv]:
opales[pv].append(a.caja.pale)
bultos = len(opales[pv])
else:
bultos = len(articulos) # Bultos añadidos
ldv = self.__ldvs[idldv]['ldv']
cantidad = ldv.cantidad
# cantidad_servida = self.cantidad_anadida(ldv.productoVenta)
cantidad_servida = self.cantidad_anadida_a_ldv(ldv)
# cantidad_servida = self.cantidad_anadida(self.__ldvs[ldv.id][])
iterpadre = model.append(None, (bultos,
ldv.producto.codigo,
ldv.producto.descripcion,
cantidad,
cantidad_servida,
ldv.get_puid()))
iterldv = iterpadre
for a in self.__ldvs[idldv]['articulos']:
iterpadre, cantidad = self.insert_en_pale(a, pales, model,
iterldv, cajas, cantidad, iterpadre)
# Ahora inserto el artículo. Si es una bolsa, iterpadre ahora
# valdrá lo que el iter de la caja a la que pertenece, si no
# será la línea de producto para colgar el rollo/bala, etc.
cantidad_bultos_del_articulo = 1
if a.es_caja():
cantidad_bultos_del_articulo = a.caja.numbolsas
model.append(iterpadre,
(cantidad_bultos_del_articulo,
a.codigo_interno,
'',
cantidad,
cantidad,
a.get_puid()))
self.rellenar_lineas_de_transferencia(opales, cajas, pales)
def insert_en_pale(self,a,pales,model,iterldv,cajas,cantidad,iterpadre):
# Insera el nodo del palé y devuelve el iter para que después
# se pueda insertar el artículo (la caja en sí) como nodo hijo.
cantidad = a.get_cantidad()
if a.es_caja():
caja = a.caja
pale = caja.pale
try:
iterpale = pales[pale]
except KeyError:
iterpale = model.append(iterldv,
(1, # El palé es el bulto
pale.codigo,
"",
0.0,
0.0,
pale.get_puid()))
pales[pale] = iterpale
#try:
# itercaja = cajas[caja]
#except KeyError:
# itercaja = model.append(iterpale,
# (1,
# caja.codigo,
# "",
# 0.0,
# 0.0,
# caja.get_puid()))
# cajas[caja] = itercaja
# #model[pales[pale]][0] += 1 # CWT: Palé es el bulto.
#iterpadre = itercaja
iterpadre = iterpale
# Actualizo las cantidades de mi padre caja y abuelo palé.
#for iterpc in (cajas[caja], pales[pale]):
for iterpc in (pales[pale], ):
#model[iterpc][0] += 1
model[iterpc][3] += cantidad
model[iterpc][4] += cantidad
return iterpadre, cantidad
def rellenar_lineas_de_transferencia(self, opales, cajas, pales):
"""
Añade al albarán los artículos relancionados con el mismo a través de
las líneas de transferencia.
Como hasta que salgan en otro albarán, el artículo estará relacionado
con el albarán actual por dos enlaces (relación artículo-albarán y
relación artículo-línea de transferencia-albarán), hay que chequear
que no lo voy a meter duplicado en la ventana.
«opales» es un diccionario de registros palé tratados por producto de
venta para llevar el control de bultos.
"""
model = self.wids['tv_ldvs'].get_model()
# ids_articulos_added = tuple([a.id for a in self.objeto.articulos])
# Esto de arriba ya no es así. Ahora los artículos por LDV ya incluyen
# los de transferencia además de los devueltos, así que construyo esta
# lista de otra forma:
larts = []
for k in self.__ldvs.keys():
for a in self.__ldvs[k]['articulos']:
if a not in larts:
larts.append(a.id)
ids_articulos_added = tuple(larts)
paths_productos = {}
for row in model:
ldv_id = row[-1]
#id = pclases.LineaDeVenta.get(ldv_id).productoVentaID
id = pclases.getObjetoPUID(ldv_id).productoVentaID
# Puede llegar a crear un paths_productos[None] -> [<path>]. Mejor.
path = row.path
try:
paths_productos[id].append(path)
except KeyError:
paths_productos[id] = [path]
#for ldt in self.objeto.lineasDeMovimiento:
# XXX: FIXME: Hay múltiples almacenes pero no líneas de movimiento. ¿?
for ldt in []:
a = articulo = ldt.articulo
if articulo.id not in ids_articulos_added:
producto_id = articulo.productoVentaID
path_producto = paths_productos[producto_id][0]
# MUST! Debe estar el producto. Es imposible relacionar
# un artículo con un albarán si el producto no está en
# una LDV (a no ser que hagas trampas directamente contra
# la BD).
cantidad = articulo.get_cantidad()
iterpadre = model.get_iter(path_producto)
iterldv = iterpadre
iterpadre, cantidad = self.insert_en_pale(a, pales, model,
iterldv, cajas,
cantidad, iterpadre)
# Ahora inserto el artículo. Si es una bolsa, iterpadre ahora
# valdrá lo que el iter de la caja a la que pertenece, si no
# será la línea de producto para colgar el rollo/bala, etc.
model.append(iterpadre,
(1,
articulo.codigo_interno,
'',
cantidad,
cantidad,
articulo.get_puid()))
if not articulo.es_caja():
model[iterpadre][0] += 1
else:
pv = articulo.productoVenta
if pv not in opales:
opales[pv] = []
pale = articulo.caja.pale
if pale not in opales[pv]:
opales[pv].append(pale)
model[iterpadre][0] += 1
model[iterpadre][4]=utils._float(model[iterpadre][4])+cantidad
def rellenar_widgets(self):
"""
Introduce la información del albaran actual
en los widgets.
No se chequea que sea != None, así que
hay que tener cuidado de no llamar a
esta función en ese caso.
"""
self.wids['ventana'].set_title(
self.objeto.numalbaran+" - Albaranes de venta (salida de material)")
self.wids['b_guardar'].set_sensitive(False) # Deshabilito el guardar
# antes de actualizar para evitar "falsos positivos".
albaran = self.objeto
if albaran == None:
return
self.wids['ch_facturable'].set_active(self.objeto.facturable)
self.wids['e_motivo'].set_text(self.objeto.motivo)
self.wids['e_numalbaran'].set_text(albaran.numalbaran)
self.wids['e_fecha'].set_text(utils.str_fecha(albaran.fecha))
self.wids['ch_bloqueado'].set_active(self.objeto.bloqueado)
cliente = albaran.cliente
if cliente == None:
self.wids['cbe_cliente'].set_active(-1)
self.wids['cbe_cliente'].child.set_text('')
else:
utils.combo_set_from_db(self.wids['cbe_cliente'], cliente.id)
self.rellenar_ldvs(albaran)
self.rellenar_servicios()
self.rellenar_transportes_a_cuenta()
if (albaran.destino == None
and albaran.nombre != None
and albaran.nombre.strip() != ""):
# Si ya tiene un destino asignado pero no está correctamente
# enlazado, lo creo.
nuevo_destino = self.crear_nuevo_destino()
albaran.destino = nuevo_destino
self.mostrar_destino(albaran.destino)
buffer = self.wids['tv_observaciones'].get_buffer()
buffer.set_text(albaran.observaciones)
self.mostrar_transportista(albaran.transportista)
self.wids['cbe_nom'].child.set_text(albaran.nombre)
self.wids['e_cp'].set_text(albaran.cp)
self.wids['e_ciudad'].set_text(albaran.ciudad)
self.wids['e_pais'].set_text(albaran.pais)
self.wids['e_telf'].set_text(albaran.telefono)
self.wids['e_direccion'].set_text(albaran.direccion)
self.wids['e_pedidos'].set_text(
self.get_nums_pedidos(albaran, cortar = False))
self.wids['e_facturas'].set_text(
', '.join([f.numfactura for f in albaran.get_facturas()]))
pedidos = self.get_pedidos(albaran)
gastos_envio = False
for p in pedidos: # Si al menos uno de los pedidos indica transporte
# a cargo, hay que marcar la casilla.
gastos_envio = gastos_envio or p.transporteACargo
self.wids['ch_debellevargastos'].set_active(gastos_envio)
self.wids['e_total_albaran'].set_text(
"%s €" % (utils.float2str(self.objeto.calcular_total())))
self.suspender(self.wids['cbe_almacenOrigenID'])
self.suspender(self.wids['cbe_almacenDestinoID'])
utils.combo_set_from_db(self.wids['cbe_almacenOrigenID'],
self.objeto.almacenOrigenID)
utils.combo_set_from_db(self.wids['cbe_almacenDestinoID'],
self.objeto.almacenDestinoID)
self.revivir(self.wids['cbe_almacenOrigenID'])
self.revivir(self.wids['cbe_almacenDestinoID'])
self.wids['b_guardar'].set_sensitive(False) # Deshabilito el guardar
# antes de actualizar para evitar "falsos positivos".
self.objeto.make_swap()
self.activar_packing_list()
# Tipo de albarán
self.wids['l_str_tipo'].set_text("<i>%s</i>"
% self.objeto.get_str_tipo())
self.wids['l_str_tipo'].set_use_markup(False) # AWKWARD GTK BUG!
self.wids['l_str_tipo'].set_use_markup(True)
# No dejo cambiar almacenes de origen ni destino si ya se han metido
# líneas de venta, porque al añadir las LDV es cuando se descuentan
# existencias de los almacenes. Si lo quiere cambiar, que elimine las
# LDVs, cambie el almacén y las vuelva a meter.
hay_ldvs = bool(self.objeto.lineasDeVenta)
self.wids['cbe_almacenOrigenID'].set_sensitive(not hay_ldvs)
self.wids['cbe_almacenDestinoID'].set_sensitive(not hay_ldvs)
self.sombrear_entry_motivo(self.wids['ch_facturable'])
def activar_packing_list(self):
"""
Muestra u oculta el botón de packing list dependiendo de si en las
líneas de venta hay productos de venta (susceptibles de llevar bultos
con código propio) o no (en cuyo caso no se puede imprimir packing
list).
"""
#mostrar = len([ldv.productoVenta
# for ldv in self.objeto.lineasDeVenta
# if ldv.productoVenta != None]) > 0
mostrar = False
self.wids['b_packinglist'].set_property("visible", mostrar)
def get_pedidos(self, albaran):
pedidos = []
# pedidos.extend([ldv.pedidoVenta.numpedido for ldv in albaran.lineasDeVenta if ldv.pedidoVenta and ldv.pedidoVenta.numpedido not in pedidos])
for ldv in albaran.lineasDeVenta:
if ldv.pedidoVenta != None and ldv.pedidoVenta not in pedidos:
pedidos.append(ldv.pedidoVenta)
return pedidos
def mostrar_transportista(self, transportista):
if transportista == None:
self.wids['cbe_dni'].set_active(-1)
self.wids['cbe_dni'].child.set_text('')
self.wids['e_nombre'].set_text('')
self.wids['e_agencia'].set_text('')
self.wids['e_matricula'].set_text('')
self.wids['e_telefono'].set_text('')
else:
utils.combo_set_from_db(self.wids['cbe_dni'], transportista.id)
self.wids['e_nombre'].set_text(transportista.nombre)
self.wids['e_agencia'].set_text(transportista.agencia)
self.wids['e_matricula'].set_text(transportista.matricula)
self.wids['e_telefono'].set_text(transportista.telefono)
self.wids['b_guardar_transportista'].set_sensitive(False)
def mostrar_destino(self, destino):
if destino == None:
self.wids['cbe_nom'].set_active(-1)
self.wids['cbe_nom'].child.set_text('')
self.wids['e_cp'].set_text('')
self.wids['e_ciudad'].set_text('')
self.wids['e_pais'].set_text('')
self.wids['e_telf'].set_text('')
self.wids['e_direccion'].set_text('')
else:
utils.combo_set_from_db(self.wids['cbe_nom'], destino.id)
self.wids['e_cp'].set_text(destino.cp)
self.wids['e_ciudad'].set_text(destino.ciudad)
self.wids['e_pais'].set_text(destino.pais)
self.wids['e_telf'].set_text(destino.telefono)
self.wids['e_direccion'].set_text(destino.direccion)
self.wids['b_guardar_destino'].set_sensitive(False)
def agrupar_articulos(self, albaran):
"""
Crea un diccionario cuyas claves son un ID de línea de venta
los valores son listas de articulos del producto de la LDV.
No se permite que un mismo artículo se relacione con dos LDVs
distintas.
Si la cantidad de los artículos (en m2, kilos, etc...) supera
de la LDV donde se quiere añadir, se intentará añadir a otra
LDV del mismo producto. Si no hay más LDV, se añadirá a la
LDV que haya.
NOTA: Marcado como DEPRECATED para próximas versiones.
"""
return {}
def buscar_ldv(self, d, codigo, cantidad):
"""
Busca en el diccionario d, la clave cuyo valor (que es otro
diccionario) contiene el código c en el campo 'codigo' Y la
cantidad de la LDV sea superior a la cantidad ya añadida (es
otro campo del diccionario que hace de valor del primer
diccionario) más la que se quiere añadir -cant-.
Si no se encuentra una LDV donde la cantidad sea superior o
igual, devolverá cualquiera de las LDV donde coincida el
código, aunque después al añadir se sobrepase la cantidad.
Suena lioso... pero no lo es... ¿o sí? Que va, viendo el
código se ve muy claro.
Devuelve la clave o None si no se encontró.
"""
res = None
for idldv in d:
# XXX
# if idldv == 0: return None
# XXX
if d[idldv]['codigo'] == codigo:
res = idldv
if d[idldv]['cantidad'] + cantidad <= d[idldv]['ldv'].cantidad:
res = idldv
break
return res
# --------------- Manejadores de eventos ----------------------------
def crear_nuevo_albaran(self, widget):
"""
Función callback del botón b_nuevo.
Pide los datos básicos para crear un nuevo objeto.
Una vez insertado en la BD hay que hacerlo activo
en la ventana para que puedan ser editados el resto
de campos que no se hayan pedido aquí.
"""
albaran = self.objeto
#nuevo_numalbaran = pclases.AlbaranSalida.get_siguiente_numero_numalbaran()
nuevo_numalbaran = pclases.AlbaranSalida.get_siguiente_numero_numalbaran_str()
# Datos a pedir:
numalbaran = utils.dialogo_entrada(titulo = "NÚMERO DE ALBARÁN",
texto = 'Introduzca un número para el albarán.\nDeje el número '
'de albarán por defecto si no está seguro.',
valor_por_defecto = nuevo_numalbaran,
padre = self.wids['ventana'])
if numalbaran == None:
return
# numero_numalbaran = utils.parse_numero(numalbaran)
numero_numalbaran_usuario = utils.parse_numero(numalbaran,
invertir = True)
numero_numalbaran_sugerido = utils.parse_numero(nuevo_numalbaran,
invertir = True)
#if self.usuario != None and self.usuario.nivel > 1 and numero_numalbaran != None and numero_numalbaran > nuevo_numalbaran:
if (self.usuario
and self.usuario.nivel > 1
and numero_numalbaran_usuario != None
and numero_numalbaran_usuario > numero_numalbaran_sugerido):
utils.dialogo_info(titulo = "NÚMERO DE ALBARÁN INCORRECTO",
texto = "No es estrictamente necesario que todos los albaranes"
" sean consecutivos.\n\nSin embargo, no se aconseja "
"crear albaranes con número superior al sugerido.\n\n"
"Si considera que debe hacerlo, contacte con un "
"usuario con mayor nivel de privilegios.",
padre = self.wids['ventana'])
return
if albaran != None: albaran.notificador.desactivar()
# CWT: El programa debe pedir siempre el almacén origen porque se le
# va la pinza al usuario, se olvida de elegirlo y se queda con el
# principal por defecto, etc.
almacenes = [(a.id, a.nombre)
for a in pclases.Almacen.select(orderBy = "id")]
almacenppal = pclases.Almacen.get_almacen_principal_id_or_none()
almo = utils.dialogo_combo(titulo = "ALMACÉN ORIGEN",
texto = "Seleccione el almacén origen de la mercancía",
ops = almacenes,
padre = self.wids['ventana'],
valor_por_defecto = almacenppal)
if not almo: # Cancelar
return
#try:
# almo = pclases.Almacen.select(
# pclases.Almacen.q.principal == True,
# orderBy = "id")[0].id
#except IndexError:
# almo = None
try:
albaran = pclases.AlbaranSalida(
numalbaran = numalbaran,
transportista = None,
cliente = None,
bloqueado = False,
facturable = True,
destino = None,
fecha = datetime.date.today(),
almacenOrigenID = almo,
almacenDestinoID = None)
# OJO: Con la última modificación de SQLObject el valor por
# defecto para los DateTime no es correcto.Mirar si en otros
# nuevo_* ocurre lo mismo.
utils.dialogo_info('ALBARÁN CREADO',
'El albarán %s ha sido creado.\n'
'No olvide asociar las salidas.' % albaran.numalbaran,
padre = self.wids['ventana'])
self.nuevo = True
self.modificado = False
except Exception, e:
#utils.dialogo_info('ERROR: ALBARÁN NO CREADO', 'El albarán %s no ha sido creado.\nCompruebe que el número no esté siendo usado y vuelva a intentarlo.\n\n\nError:\n%s' % (numalbaran, e), padre = self.wids['ventana'])
self.logger.error("%salbaranes_de_salida::crear_nuevo_albaran "
"-> Error al crear nuevo albarán. Excepción capturada: %s" % (
self.usuario and self.usuario.usuario+": " or "", e))
utils.dialogo_info('ERROR: ALBARÁN NO CREADO',
'El albarán %s no ha sido creado.\nCompruebe que el número '
'no esté siendo usado y vuelva a intentarlo.\n\n\n' % (
numalbaran),
padre = self.wids['ventana'])
albaran.notificador.activar(self.aviso_actualizacion)
self.objeto = albaran
self.actualizar_ventana()
def buscar_albaran(self, widget):
"""
Muestra una ventana de búsqueda y a continuación los
resultados. El objeto seleccionado se hará activo
en la ventana a no ser que se pulse en Cancelar en
la ventana de resultados.
"""
albaran = self.objeto
a_buscar = utils.dialogo_entrada(titulo = "BUSCAR ALBARÁN",
texto = "Introduzca número de albarán: ",
padre = self.wids['ventana'])
if a_buscar != None:
resultados = pclases.AlbaranSalida.select(
pclases.AlbaranSalida.q.numalbaran.contains(a_buscar))
if resultados.count() > 1:
## Refinar los resultados
idalbaran = self.refinar_resultados_busqueda(resultados)
if idalbaran == None:
return
resultados = [pclases.AlbaranSalida.get(idalbaran)]
# Se supone que la comprensión de listas es más rápida que hacer un nuevo get a SQLObject.
# Me quedo con una lista de resultados de un único objeto ocupando la primera posición.
# (Más abajo será cuando se cambie realmente el objeto actual por este resultado.)
elif resultados.count() < 1:
## Sin resultados de búsqueda
utils.dialogo_info('SIN RESULTADOS', 'La búsqueda no produjo resultados.\nPruebe a cambiar el texto buscado o déjelo en blanco para ver una lista completa.\n(Atención: Ver la lista completa puede resultar lento si el número de elementos es muy alto)', padre = self.wids['ventana'])
return
## Un único resultado
# Primero anulo la función de actualización
self.preguntar_si_redistribuir()
if albaran != None:
albaran.notificador.desactivar()
# Pongo el objeto como actual
albaran = resultados[0]
self.nuevo = False
self.modificado = False
# Y activo la función de notificación:
albaran.notificador.activar(self.aviso_actualizacion)
self.objeto = albaran
self.actualizar_ventana()
def guardar_transportista(self, w):
idtransp = utils.combo_get_value(self.wids['cbe_dni'])
if idtransp == None:
self.crear_nuevo_transportista()
else:
self.actualizar_transportista(idtransp)
self.modificado = True
self.wids['b_guardar_transportista'].set_sensitive(False)
def guardar_destino(self, w):
iddest = utils.combo_get_value(self.wids['cbe_nom'])
if iddest == None:
self.crear_nuevo_destino()
else:
self.actualizar_destino(iddest)
self.modificado = True
self.wids['b_guardar_destino'].set_sensitive(False)
def guardar(self, widget, actualizar_ventana = True):
"""
Guarda el contenido de los entry y demás widgets de entrada
de datos en el objeto y lo sincroniza con la BD.
"""
albaran = self.objeto
# Campos del objeto que hay que guardar:
numalbaran = self.wids['e_numalbaran'].get_text()
fecha = self.wids['e_fecha'].get_text()
idcliente = utils.combo_get_value(self.wids['cbe_cliente'])
idalmo = utils.combo_get_value(self.wids['cbe_almacenOrigenID'])
idalmd = utils.combo_get_value(self.wids['cbe_almacenDestinoID'])
# Desactivo el notificador momentáneamente
albaran.notificador.desactivar()
# Actualizo los datos del objeto
albaran.almacenOrigenID = idalmo
albaran.almacenDestinoID = idalmd
albaran.numalbaran = numalbaran
albaran.bloqueado = self.wids['ch_bloqueado'].get_active()
albaran.facturable = self.wids['ch_facturable'].get_active()
albaran.motivo = self.wids['e_motivo'].get_text()
try:
albaran.fecha = utils.parse_fecha(fecha)
except:
albaran.fecha = time.localtime()
if idcliente != None:
albaran.cliente = pclases.Cliente.get(idcliente)
else:
albaran.cliente = None
albaran.nombre = self.wids['cbe_nom'].child.get_text()
albaran.cp = self.wids['e_cp'].get_text()
albaran.ciudad = albaran.ciudad = self.wids['e_ciudad'].get_text()
albaran.pais = self.wids['e_pais'].get_text()
albaran.telefono = self.wids['e_telf'].get_text()
albaran.direccion = self.wids['e_direccion'].get_text()
buffer = self.wids['tv_observaciones'].get_buffer()
albaran.observaciones = buffer.get_text(buffer.get_start_iter(), buffer.get_end_iter())
self.guardar_transportista(None)
albaran.transportistaID = utils.combo_get_value(self.wids['cbe_dni'])
self.guardar_destino(None)
albaran.destinoID = utils.combo_get_value(self.wids['cbe_nom'])
# Fuerzo la actualización de la BD y no espero a que SQLObject lo
# haga por mí:
albaran.sync()
# Vuelvo a activar el notificador
albaran.notificador.activar(self.aviso_actualizacion)
self.objeto = albaran
self.modificado = True
if actualizar_ventana:
self.actualizar_ventana()
self.wids['b_guardar'].set_sensitive(False)
def buscar_fecha(self, boton):
self.wids['e_fecha'].set_text(utils.str_fecha(utils.mostrar_calendario(fecha_defecto = self.objeto and self.objeto.fecha or None, padre = self.wids['ventana'])))
def refinar_busqueda_pedido(self, pedidos):
"""
Recibe un resultado de sqlobject con los pedidos
buscados.
Precondiciones: pedidos.count() > 1
Devuelve una lista con el pedido seleccionado en la
primera posición o None si se cancela el diálogo.
"""
peds = [(p.id, "%s: %s - %s" % (p.numpedido,
p.cliente and p.cliente.nombre or 'SIN CLIENTE',
p.fecha and p.fecha.strftime('%d/%m/%Y') or 'SIN FECHA'))
for p in pedidos]
resp = utils.dialogo_combo(texto = 'Se encontraron varios pedidos con'
' el mismo número.\nSeleccione uno'
' de ellos.',
ops = peds,
padre = self.wids['ventana'])
if resp == None:
return None
return [p for p in pedidos if p.id == resp]
def add_pedido(self, boton):
"""
Añade todas las líneas de venta de un pedido
al albarán.
"""
if (self.objeto.cliente
and self.objeto.cliente.calcular_credito_disponible() <= 0):
utils.dialogo_info(titulo = "CLIENTE SIN CRÉDITO",
texto = "El cliente ha sobrepasado el "
"crédito concedido.",
padre = self.wids['ventana'])
return
if (self.objeto.cliente
and self.objeto.cliente.get_facturas_vencidas_sin_documento_de_cobro()):
frasvenc = self.objeto.cliente.get_facturas_vencidas_sin_documento_de_cobro()
utils.dialogo_info(titulo = "CLIENTE DEUDOR",
texto = "El cliente tiene %d facturas "
"vencidas sin documento de pago." % (
len(frasvenc)),
padre = self.wids['ventana'])
return
numpedido = utils.dialogo_entrada(titulo = 'NÚMERO DE PEDIDO',
texto = 'Introduzca el número del pedido',
padre = self.wids['ventana'])
if numpedido == None:
return
if self.objeto.cliente != None:
pedidos = pclases.PedidoVenta.select(pclases.AND(
pclases.PedidoVenta.q.numpedido.contains(numpedido),
pclases.PedidoVenta.q.clienteID == self.objeto.cliente.id,
pclases.PedidoVenta.q.cerrado == False))
else:
pedidos = pclases.PedidoVenta.select(pclases.AND(
pclases.PedidoVenta.q.numpedido.contains(numpedido),
pclases.PedidoVenta.q.cerrado == False))
if pedidos.count() > 1:
pedidos = self.refinar_busqueda_pedido(pedidos)
if pedidos == None:
return
try:
pedido = pedidos[0]
except:
# No se encontró
nombrecliente = (self.objeto and self.objeto.cliente
and "«"+self.objeto.cliente.nombre+"» " or "")
utils.dialogo_info(titulo = 'NO ENCONTRADO',
texto = 'Pedido no encontrado, no es del cliente %s'
'o no admite más albaranes.' % nombrecliente,
padre = self.wids['ventana'])
return
pedido.sync() # Por si ha habido cambios y no
# ha saltado el fallo de caché.
if (pedido.cliente
and pedido.cliente.calcular_credito_disponible() <= 0):
utils.dialogo_info(titulo = "CLIENTE SIN CRÉDITO",
texto = "El cliente ha sobrepasado el "
"crédito concedido.",
padre = self.wids['ventana'])
return
if (self.objeto.cliente
and self.objeto.cliente.get_facturas_vencidas_sin_documento_de_cobro()):
frasvenc = self.objeto.cliente.get_facturas_vencidas_sin_documento_de_cobro()
utils.dialogo_info(titulo = "CLIENTE DEUDOR",
texto = "El cliente tiene %d facturas "
"vencidas sin documento de pago." % (
len(frasvenc)),
padre = self.wids['ventana'])
return
if pedido.cerrado:
utils.dialogo_info(titulo = "PEDIDO CERRADO",
texto = "El pedido está cerrado y no admite más albaranes.",
padre = self.wids['ventana'])
else:
albaran = self.objeto
if albaran.cliente == None:
albaran.cliente = pedido.cliente
if pedido.cliente != albaran.cliente:
txtdlg='El cliente del pedido y del albarán debe ser el mismo.'
utils.dialogo_info(titulo = 'PEDIDO INCORRECTO',
texto = txtdlg,
padre = self.wids['ventana'])
else:
not_included = []
for ldp in pedido.lineasDePedido[:]:
# DONE: No unificar si tiene precios de venta distintos.
# Arreglado directamente en pclases para que devuelva
# cantidades servidas y pedidas teniendo en cuenta también
# el precio.
# DONE: Problema: A veces quieren servir la mitad de un
# producto a un precio y la otra mitad a otro.
# ¿Cómo lo hago? Fácil: Pongo a mano en la ventana la
# cantidad de cada línea que quiero servir y el algoritmo
# irá completando las líneas de arriba a abajo.
if (not ldp.albaraneada # Queda algo por servir
or (ldp.cantidad < 0
and ldp.cantidadPedida - ldp.cantidadServida != 0)):
# CWT: O tiene cantidad negativa por un abono made
# in BP y no se ha añadido ya a un albarán.
ldv = pclases.LineaDeVenta(
pedidoVentaID = ldp.pedidoVentaID,
facturaVenta = None,
productoVentaID = ldp.productoVentaID,
productoCompraID = ldp.productoCompraID,
albaranSalidaID = self.objeto.id,
fechahora = datetime.date.today(),
cantidad=ldp.cantidadPedida-ldp.cantidadServida,
precio = ldp.precio,
descuento = ldp.descuento,
notas = ldp.notas)
ajustar_existencias(ldv)
else:
# Si no se ha albaraneado porque la cantidad del
# producto viene incluida en una LDP anterior del
# mismo producto, al mismo precio y descuento (a.k.a.
# "se han unificado"), no la considero como "no
# albaraneada".
albaranes_de_la_ldp = ldp.albaranesSalida
if (not (len(albaranes_de_la_ldp) == 1
and albaranes_de_la_ldp[0] == self.objeto)):
not_included.append(ldp)
if not_included:
def get_nombre_producto(ldp):
producto = ldp.get_producto()
if hasattr(producto, "nombre"):
nombreproducto = producto.nombre
elif hasattr(producto, "descripcion"):
nombreproducto = producto.descripcion
else:
nombreproducto = "?"
return nombreproducto
utils.dialogo_info(titulo = 'LÍNEAS NO ALBARANEADAS',
texto = """
Las siguientes líneas de venta no se agregaron al albarán
por estar ya relacionadas con otra salida de material.
Modifique el pedido original o los albaranes asociados
si quiere agregarlas al albarán actual:
""" + '\n - ' + '\n - '.join(["Producto %s. Cantidad %.2f. Albarán de salida número %s" \
% (get_nombre_producto(ldp), ldp.cantidad, ", ".join(utils.unificar([a.numalbaran for a in ldp.albaranesSalida if a != None]))) \
for ldp in not_included]),
padre = self.wids['ventana'])
for srv in pedido.servicios:
if srv.albaranSalida == None:
srv.albaranSalida = self.objeto
self.modificado = True
self.actualizar_ventana()
def crear_ldv(self, articulos):
"""
Verifica que los artículos pertenezcan a una LDV existente.
Recibe una lista de artículos que deben ser del mismo producto de venta.
Verifica también que el artículo no esté en otro albarán de salida y
que haya sido analizado.
"""
if len(articulos) == 0:
return
pv = articulos[0].productoVenta
albaran = self.objeto
#if articulos[0].productoVenta not in [ldv.productoVenta for ldv in albaran.lineasDeVenta]:
# utils.dialogo_info('ERROR', 'El cliente no solicitó el producto %s en el pedido.' % (articulos[0].productoVenta.nombre),
# padre = self.wids['ventana'])
# return
productos_malos = []
for articulo in articulos:
articuloproductoVenta = articulo.productoVenta
if (articuloproductoVenta not in
[ldv.productoVenta for ldv in albaran.lineasDeVenta]):
if articuloproductoVenta not in productos_malos: # Para
# evitar tratar el mismo producto y mismo error una y
# otra vez.
productos_malos.append(articulo.productoVenta)
res = utils.dialogo(titulo = 'ERROR',
texto = 'El cliente no solicitó el producto %s en'\
' el pedido.\n\n¿Desea continuar?' % (
articulo.productoVenta.descripcion),
padre = self.wids['ventana'],
icono = gtk.STOCK_DIALOG_ERROR)
if not res:
return
else:
if (articulo.albaranSalida == None
or articulo.albaranSalida == self.objeto
# Ahora la condición para ver si está en almacén es que
# tenga relación con un almacén (obvious comment is
# obvious).
or articulo.almacen):
# Los artículos D llegan aquí ya añadidos al albarán,
# por eso incluyo el caso de que tenga
# ya albarán pero sea justo al que estamos añadiendo
# artículos. Si no, mostrará el error
# de la rama "else".
if (articulo.es_bala_cable()
or articulo.es_rollo_defectuoso()
or articulo.es_rolloC()
or articulo.analizado):
# La fibra de cable y rollos defectuosos no se analizan.
articulo.albaranSalida = albaran
#articulo.almacen = None
#articulo.almacen = self.objeto.almacenDestino
# será None cuando no sea un albarán de transferencia.
if self.objeto.almacenDestino:
articulo.mover_entre_almacenes(
self.objeto.almacenOrigen,
self.objeto.almacenDestino,
self.objeto)
else:
articulo.almacen = None
else:
res = utils.dialogo(titulo = 'PRODUCTO NO ANALIZADO',
texto = """
El artículo %s no ha sido analizado en
el laboratorio. No puede vender un producto
cuyas características de lote o partida
no han sido verificados.
¿Desea continuar?
""" % (articulo.codigo),
padre = self.wids['ventana'],
icono = gtk.STOCK_DIALOG_WARNING)
if not res:
return
else:
res = utils.dialogo(titulo = 'ERROR',
texto = """
El artículo %s salió en el albarán %s.
Si cree que es incorrecto. Compruebe el
albarán, elimine de allí el artículo
y vuelva a intentarlo.
""" % (articulo.codigo, articulo.albaranSalida.numalbaran),
padre = self.wids['ventana'],
icono = gtk.STOCK_DIALOG_ERROR)
if not res:
return
self.modificado = True
def drop_ldv(self, boton):
"""
Pone a None el idalbaran de la
línea de venta seleccionada y
a False la confirmación (aumentando
la cantidad del artículo).
"""
if self.wids['tv_ldvs'].get_selection().count_selected_rows() == 0:
return
model, paths = self.wids['tv_ldvs'].get_selection().get_selected_rows()
for path in paths:
iter = model.get_iter(path)
if model[iter].parent == None: # Es una LDV
idldv = model[iter][-1]
try:
#ldv = pclases.LineaDeVenta.get(idldv)
ldv = pclases.getObjetoPUID(idldv)
except pclases.SQLObjectNotFound: # Ya se ha borrado.
pass
else:
self.desvincular_ldv_del_albaran(ldv)
else: # Es un artículo
idarticulo = model[iter][-1]
#articulo = pclases.Articulo.get(idarticulo)
objeto = pclases.getObjetoPUID(idarticulo)
if isinstance(objeto, pclases.Pale):
vpro = VentanaProgreso(padre = self.wids['ventana'])
vpro.mostrar()
vpro.set_valor(0.0, "Devolviendo palé al almacén %s..." %
self.objeto.almacenOrigen.nombre)
total = len(self.objeto.articulos)
actual = 0.0
try:
for a in self.objeto.articulos:
actual += 1
vpro.set_valor(actual/total, texto = None)
if a.cajaID and a.caja.pale == objeto:
self.desvincular_articulo(a)
finally:
vpro.ocultar()
elif isinstance(objeto, pclases.Caja):
vpro = VentanaProgreso(padre = self.wids['ventana'])
vpro.mostrar()
vpro.set_valor(0.0, "Devolviendo caja al almacén %s..." %
self.objeto.almacenOrigen.nombre)
total = len(self.objeto.articulos)
actual = 0.0
try:
for a in self.objeto.articulos:
actual += 1
vpro.set_valor(actual/total, texto = None)
if a.cajaID and a.caja == objeto:
self.desvincular_articulo(a)
finally:
vpro.ocultar()
else:
self.desvincular_articulo(objeto)
self.modificado = True
self.actualizar_ventana()
def desvincular_articulo(self, articulo):
"""
Devuelve un objeto artículo al almacén, desvinculándolo del albarán
de salida actual, sea cual sea el tipo del mismo y actuando en
consecuencia (sacándolo del almacén actual y devolviéndolo al original
si era un albarán de transferencia, etc.).
"""
articulo.albaranSalida = None
if self.objeto.es_de_movimiento():
fail = articulo.anular_movimiento(
self.objeto.almacenOrigen,
self.objeto.almacenDestino,
self.objeto)
if fail: # Algo ha fallado.
articulo.albaranSalida = self.objeto
utils.dialogo_info(
titulo = "ALBARANES DE SALIDA: ERROR",
texto = "Se produjo un error al anular un "
"producto en el albarán actual (%d: %s)"
"\n\nCódigo de error: %d" % (
self.objeto.id,
self.objeto.numalbaran,
fail),
padre = self.wids['ventana'])
# Códigos de error en un diálogo de error. Si Jacob
# Nielsen levantara la cabeza... si... si estuviera
# muerto, claro.
else:
articulo.almacen = self.objeto.almacenOrigen
articulo.syncUpdate()
def desvincular_ldv_del_albaran(self, ldv):
# Primero hay que desvincular los artículos de la LDV.
productoVenta = ldv.productoVenta
albaran = self.objeto
for articulo in albaran.articulos:
if articulo.productoVenta == productoVenta and \
len([ldv for ldv in albaran.lineasDeVenta
if ldv.productoVenta == productoVenta]) == 1:
# Si hay más líneas del mismo producto no elimino sus artículos.
self.desvincular_articulo(articulo)
ajustar_existencias(ldv, 2 * ldv.cantidad)
# Le paso el doble como cantidad anterior para que al restar quede en positivo e incremente la cantidad
ldv.albaranSalida = None
if ldv.facturaVentaID == None and ldv.prefacturaID == None:
try:
ldv.destroySelf()
except:
txterror = "albarabes_de_salida::desvincular_ldv_del_albaran -> La LDV ID %d no tiene albarán ni factura(s) pero no se pudo eliminar." % (ldv.id)
print txterror
self.logger.error(txterror)
def desvincular_articulos_del_albaran(self, articulos):
for a in articulos:
self.desvincular_articulo(articulo)
def borrar_albaran(self, boton):
"""
Elimina el albarán de la BD y anula la relación entre
él y sus LDVs.
"""
if not utils.dialogo('Se eliminará el albarán actual y todas sus relaciones con ventas, pedidos, etc.\n¿Está seguro?', 'BORRAR ALBARÁN'): return
albaran = self.objeto
albaran.notificador.desactivar()
for ldv in albaran.lineasDeVenta:
self.desvincular_ldv_del_albaran(ldv)
self.desvincular_articulos_del_albaran(albaran.articulos)
try:
albaran.destroySelf()
except:
utils.dialogo_info('ERROR', 'No se pudo eliminar.\nIntente '
'eliminar primero los productos, servicios y '
'transportes del albarán.',
padre = self.wids['ventana'])
return
self.ir_a_primero()
def get_nums_pedidos(self, albaran, cortar = True):
"""
Devuelve una cadena con la lista de
pedidos asociados al albarán.
Si "cortar" es True, devuelve una cadena vacía
si hay más de dos pedidos (es para que al imprimir
no se vaya del espacio reservado a los números de pedido).
"""
pedidos = self.get_pedidos(albaran)
if len(pedidos) > 2 and cortar:
return ''
else:
return ', '.join([p.numpedido for p in pedidos])
def contar_bultos_de_ldvs(self, prods, p):
bultos = 0
for ldv in prods[p]:
try:
datos_rollo = ldv.productoVenta.camposEspecificosRollo
m2rollo = datos_rollo.metrosLineales * datos_rollo.ancho
try:
bultos += int(ldv.cantidad / m2rollo)
except ZeroDivisionError:
bultos += 0
except AttributeError:
# producto no tiene campos específicos. Es bala.
bultos += 0
return bultos
def preparar_llamada_imprimir(self, albaran):
"""
Devuelve la lista de parámetros que se
pasarán a geninformes.
"""
cliente = albaran.cliente
if cliente == None:
utils.dialogo_info(titulo = 'SIN CLIENTE',
texto = 'Debe seleccionar un cliente y guardar el albarán'
' antes de imprimirlo.',
padre = self.wids['ventana'])
return None, None, None, None, None, None, None, None, None, None
# Se acabaron las relaciones con Composan, ya no hacen falta sus
# albaranes amarillos.
composan = False
#if 'COMPOSAN' in cliente.nombre.upper():
# composan = True
#else:
# composan = False
fiscal = None
client = { 'nombre': cliente.nombre or "",
#'direccion': cliente.direccion or "", # CWT: Después de...
'direccion': cliente.direccionfacturacion or "",
#'cp': cliente.cp or "", # 3 años más o menos, ahora...
'cp': cliente.cpfacturacion or "",
#'ciudad': cliente.ciudad or "", # resulta que la...
'ciudad': cliente.ciudadfacturacion or "",
#'provincia': cliente.provincia or "", # dirección del...
'provincia': cliente.provinciafacturacion or "",
#'pais':cliente.pais or "", # cliente en los albaranes...
'pais':cliente.paisfacturacion or "",
#'telf': cliente.telefono or "" # debe ser la fiscal.
'telf': cliente.telefono or ""
}
if self.wids['e_telf'].get_text() == '':
telefono = ''
else:
telefono = 'Teléfono: %s' % self.wids['e_telf'].get_text()
envio ={'nombre': self.wids['cbe_nom'].child.get_text(),
'direccion': self.wids['e_direccion'].get_text(),
'cp': self.wids['e_cp'].get_text() ,
'localidad': self.wids['e_ciudad'].get_text(),
'telefono': telefono,
'pais': self.wids['e_pais'].get_text()}
general = {'albnum': albaran.numalbaran,
'fecha': albaran.fecha.strftime('%d/%m/%Y'),
'exp': '',
'numcli': cliente.id,
'numped': self.get_nums_pedidos(albaran),
'numref': '',
'sref': ''
}
lineas = []
prods = self.ldvs_agrupadas_por_producto(albaran)
for producto in prods:
# OJO: CHANGE: Otro CWT. Si los artículos añadidos a la LDV
# son 0, tomo los del pedido, que se calculan en
# base al producto.
bultos = 0
if hasattr(producto, "es_caja") and producto.es_caja():
#bultospales = []
#for ldv in prods[producto]:
# bultospales += [a.bolsa.caja.pale
# for a in self.__ldvs[ldv.id]['articulos']]
#bultospales = utils.unificar(bultospales)
#bultos = len(bultospales)
# OPTIMIZACIÓN
try:
idsarticulos = []
for ldv in prods[producto]:
idsarticulos += [str(id)
for id in self.__ldvs[ldv.id]['idsarticulos']]
idsarticulos = ", ".join(idsarticulos)
sql = """
SELECT COUNT(*)
FROM pale, caja, articulo
WHERE pale.id = caja.pale_id
AND caja.id = articulo.caja_id
AND articulo.id IN (%s);
""" % ids_articulos
sqlpaleres = pclases.Pale._connection.queryOne(sql)
bultos = sqlpaleres[0][0]
# It MUST to work. Si no, prefiero
# que pete, aunque temporalmente
# usaré el algoritmo lento.
except Exception, msg:
print "albaranes_de_salida.py::imprimir ->", msg
bultospales = []
for ldv in prods[producto]:
bultospales += [a.caja.pale
for a in self.__ldvs[ldv.id]['articulos']]
bultospales = utils.unificar(bultospales)
bultos = len(bultospales)
else:
try:
for ldv in prods[producto]:
articulos = self.__ldvs[ldv.id]['articulos']
bultos += len(articulos) # Bultos añadidos
except ZeroDivisionError, msg:
txterror="albaranes_de_salida::preparar_llamada_imprimir"\
" -> Excepción al contar bultos para imprimir el"\
" albarán: %s" % (msg)
print txterror
self.logger.error(txterror)
if bultos == 0:
bultos = self.contar_bultos_de_ldvs(prods, producto)
# TODO: En bultos de LDT (al menos) no cuenta los kg. para generar el albarán de salida.
try:
cantidad_anadida = self.cantidad_anadida(
prods[producto][0].producto)
except Exception, msg:
txterror = "albaranes_de_salida::preparar_llamada_imprimir"\
" -> Excepción al contar cantidad añadida al "\
"imprimir el albarán: %s" % (msg)
self.logger.error(txterror)
print txterror
cantidad_anadida = 0
# Si la cantidad añadida en artículos servidos es 0 y el
# producto es un producto especial o un producto de compra,
# la cantidad servida que aparecerá impresa es la de las LDVs
# del pedido.
if (cantidad_anadida == 0 and
(isinstance(producto, pclases.ProductoCompra)
or (hasattr(producto, "camposEspecificosEspecialID")
and producto.camposEspecificosEspecialID != None
)
)
):
for ldv in prods[producto]:
# prods es un diccionario que tiene como claves el producto
# y como valores una lista de LDVs del albarán
# pertenecientes a ese producto.
cantidad_anadida += ldv.cantidad
total = sum([ldv.get_subtotal(iva = True)
for ldv in prods[producto]])
# cantidad_total = cantidad_anadida # WTF? ¿No sobra esta línea?
try:
# Calculándolo así soluciono el problema de varias líneas a
# diferentes precios, y me aseguro de que siempre cuadra (a
# no ser que haya precios muy pequeños, con muchos decimanes y
# por cantidades muy altas, en cuyo caso *AL MOSTRAR EN
# IMPRESO* con dos decimales, se redondea. Internamente siguen
# siendo coherentes).
precio_unitario = total / cantidad_anadida
except ZeroDivisionError:
precio_unitario = 0
d = {'bulto': bultos,
'codigo': producto.codigo,
'descripcion': producto.descripcion,
# 'cantidad': self.calcular_cantidad_ldvs(prods[p]),
# 'cantidad': sum([ldv.cantidad for ldv in prods[p]]),
'cantidad': cantidad_anadida,
'numped': self.get_numpedidos_ldvs(prods[producto]),
'precio unitario': utils.float2str_autoprecision(
precio_unitario,
cantidad_anadida,
total),
'total': utils.float2str(total),
'unidad': producto.get_str_unidad_de_venta()
}
lineas.append(d)
## SERVICIOS: ##
dde = pclases.DatosDeLaEmpresa.select()
if dde.count() > 0:
dde = dde[0]
if not dde.esSociedad:
for srv in albaran.servicios:
total = srv.get_subtotal(iva = True)
try:
precio_unitario = total / srv.cantidad
except ZeroDivisionError:
precio_unitario = 0
d = {'bulto': 0,
'codigo': "",
'descripcion': srv.concepto,
'cantidad': srv.cantidad,
'numped': srv.pedidoVenta
and srv.pedidoVenta.numpedido
or "",
"precio unitario": utils.float2str_autoprecision(
precio_unitario,
srv.cantidad,
total),
"total": utils.float2str(total)
}
lineas.append(d)
## EOSERVICIOS ##
observaciones = self.objeto.observaciones
if (not cliente.provincia) and (not cliente.ciudad):
destino = ''
else:
destino = "%s (%s)" % (cliente.ciudad, cliente.provincia)
transporte = self.wids['e_agencia'].get_text()
conformeT = {'nombre': self.wids['e_nombre'].get_text(),
'dni': self.wids['cbe_dni'].get_child().get_text(),
'telf': self.wids['e_telefono'].get_text(),
'matricula': self.wids['e_matricula'].get_text()
}
conformeD = {'nombre': '',
'dni': '',
'telf': '',
'matricula': ''}
return composan, client, envio, general, lineas, observaciones, destino, transporte, conformeT, conformeD
def get_numpedidos_ldvs(self, ldvs):
"""
Devuelve los número de pedidos como cadena,
separados por coma, relacionados con las
LDVs de ldvs.
"""
nps = []
for ldv in ldvs:
if isinstance(ldv, pclases.LineaDeVenta):
if ldv.pedidoVenta != None and ldv.pedidoVenta.numpedido not in nps:
nps.append(ldv.pedidoVenta.numpedido)
return ','.join([str(numpedido) for numpedido in nps])
def calcular_cantidad_ldvs(self, lineas):
#def calcular_cantidad_ldvs(self, albaran):
"""
Recorre la lista de los artículos asociados al
albarán y suma:
- Si es una LDV de balas: pesobala
- Si es un rollo: multiplicar longitud*ancho.
"""
# OJO porque antes recibía una lista de LDVs y ahora recibe un albarán.
# Primero determino si son LDVs de balas o rollos.
try:
if lineas[0].productoVenta.camposEspecificosRollo != None:
return self.suma_cantidad_rollos(lineas)
else:
return self.suma_cantidad_balas(lineas)
except IndexError:
# No tiene artículos relacionados
return 0
def suma_cantidad_rollos(self, ars):
articulos = len(ars)
ancho = ars[0].productoVenta.camposEspecificosRollo.ancho
largo = ars[0].productoVenta.camposEspecificosRollo.metrosLineales
metros_cuadrados = largo * ancho
return articulos * metros_cuadrados
def suma_cantidad_balas(self, ars):
return sum([a.productoVenta.camposEspecificosBala.pesobala
for a in ars])
def ldvs_agrupadas_por_producto(self, albaran):
"""
Devuelve un diccionario de LDVs.
La clave es el código de producto y
el valor es una lista de LDVs pertenecientes
a ese producto.
CWT: Y DE DEVOLUCIONES.
"""
prods = {}
for ldv in albaran.lineasDeVenta:
producto = ldv.producto
if producto not in prods.keys():
prods[producto] = [ldv]
else:
prods[producto].append(ldv)
return prods
def imprimir(self, w):
"""
Genera un albarán en PDF a partir de los datos
del albarán actual.
"""
if pclases.DEBUG:
import time
if pclases.DEBUG:
print "Llamando a self.preguntar_si_redistribuir..."
antes = time.time()
self.preguntar_si_redistribuir()
if pclases.DEBUG:
print " --> Sin hueso mi ansiedad:", time.time() - antes
print "Llamando a self.guardar..."
antes = time.time()
self.guardar(None, actualizar_ventana = False) # Si se ha olvidado
# guardar, guardo yo.
if pclases.DEBUG:
print " --> Sin hueso mi ansiedad:", time.time() - antes
print "Comprobando self.wids['ch_debellevargastos']..."
antes = time.time()
albaran = self.objeto
if (self.wids['ch_debellevargastos'].get_active()
and len(self.objeto.servicios) == 0):
utils.dialogo_info(titulo = "ALBARÁN INCOMPLETO",
texto = "Se le ha indicado que el albarán debe incluir el "
"transporte.\n Sin embargo no ha incluido ninguno."
"\n Incluya uno o modifique el pedido original.",
padre = self.wids['ventana'])
return
if pclases.DEBUG:
print " --> Sin hueso mi ansiedad:", time.time() - antes
print "Iterando bucle de cantidades de ldv "\
"(self.cantidad_anadida_a_ldv(ldv)..."
antes = time.time()
for ldv in albaran.lineasDeVenta:
if ((ldv.productoVentaID != None
and ldv.productoVenta.articulos != [])
and (self.usuario != None
and self.usuario.nivel >= 2
and not self.objeto.bloqueado)):
# DONE: Así consigo que se imprima la cantidad del pedido en
# productos "especiales" (cables de fibra y cosas
# así) que no tienen artículos en la BD porque nunca
# se ha fabricado nada y no tienen existencias a
# las que se le asignen códigos de bala o rollo.
# CWT: Si el usuario tiene privilegios, que pueda imprimir
# los albaranes con la cantidad que quieran.
ldv.cantidad = self.cantidad_anadida_a_ldv(ldv)
ldv.sync()
if pclases.DEBUG:
print " --> Sin hueso mi ansiedad:", time.time() - antes
print "Llamando a actualizar_ventana..."
antes = time.time()
self.actualizar_ventana()
# Para refrescar los cambios en cantidades autoajustadas y tal.
if pclases.DEBUG:
print " --> Sin hueso mi ansiedad:", time.time() - antes
print "Llamando a preparar_llamada_imprimir..."
antes = time.time()
c, f, e, g, l, o, d, t, cT, cD=self.preparar_llamada_imprimir(albaran)
if c == f == e == g == None: # etc...
return
if pclases.DEBUG:
print " --> Sin hueso mi ansiedad:", time.time() - antes
print "Comprobando configuración de albarán multipágina..."
antes = time.time()
if pclases.config.get_multipagina():
try:
import albaran_multipag
except ImportError:
import sys
sys.path.append(os.path.join("..", "informes"))
import albaran_multipag
from informes import abrir_pdf
alb_mp = albaran_multipag.go_from_albaranSalida(self.objeto)
abrir_pdf(alb_mp)
elif not pclases.config.get_valorar_albaranes():
if c:
nomarchivo = geninformes.albaran(False,f,e,g,l,o,d,t,cT,cD)
self.abrir_albaran_imprimido(c, nomarchivo)
nomarchivo_compo = geninformes.albaran(c,f,e,g,l,o,d,t,cT,cD)
self.abrir_albaran_imprimido(c, nomarchivo_compo)
else:
nomarchivo = geninformes.albaranValorado(f, e, g, l, o, d, t, cT,
pclases.config.get_valorar_albaranes_con_iva())
self.abrir_albaran_imprimido(c, nomarchivo)
nomarchivo_compo = nomarchivo
if pclases.DEBUG:
print " --> Sin hueso mi ansiedad:", time.time() - antes
print "Llamando a... FUCK LIKE A BEAST, FIGHT LIKE AN ANIMAL!"
antes = time.time()
if pclases.DEBUG:
print " --> Sin hueso mi ansiedad:", time.time() - antes
print "A partir de aquí ya son movidas de carta de portes, CMR, "\
"generación de factura y demás. No me interesa."
antes = time.time()
if pclases.config.get_carta_portes():
try:
import albaran_porte
except ImportError:
import sys
sys.path.append(os.path.join("..", "informes"))
import albaran_porte
from informes import abrir_pdf
try:
kilos = sum([ldv.producto.calcular_kilos() * ldv.cantidad
for ldv in self.objeto.lineasDeVenta])
except (TypeError, ValueError):
kilos = utils.dialogo_entrada(
titulo = "INTRODUZCA PESO TOTAL",
texto = "Introduzca los kilogramos totales del albarán:",
padre = self.wids['ventana'])
recogida = utils.dialogo(titulo = "¿IMPRIMIR HOJA DE RECOGIDA?",
texto = "Responda «Sí» para generar una hoja adicional de recogida de envases vacíos.",
padre = self.wids['ventana'],
defecto = gtk.RESPONSE_NO,
tiempo = 10)
alb_cp, envases = albaran_porte.go_from_albaranSalida(self.objeto,
kilos,
recogida)
abrir_pdf(alb_cp)
if envases:
abrir_pdf(envases)
# OJO: No se adjunta al correo.
else:
self.imprimir_cmr()
if ((self.usuario == None or self.usuario.nivel <= 1)
and self.objeto.bloqueado):
utils.dialogo_info(titulo = "NO SE GENERARÁ FACTURA",
texto = """
El albarán se encuentra verificado y bloqueado.
No se generará factura.
Si cree que debe generar una factura del presente
albarán (o de la parte que quede pendiente de
facturar del mismo), desbloquee primero el albarán.
""",
padre = self.wids['ventana'])
if self.objeto and self.objeto.cliente:
self.objeto.cliente.sync()
if (self.objeto.facturable
and not self.objeto.bloqueado
and self.objeto.cliente and self.objeto.cliente.facturarConAlbaran
and utils.dialogo(titulo = "¿GENERAR FACTURA?",
texto = """
Compruebe minuciosamente el impreso generado.
Si está seguro de que es correcto, responda
sí para generar la factura.
""",
padre = self.wids['ventana'])):
ok, factura = self.generar_factura()
if ok:
self.objeto.bloqueado = factura.bloqueada = ok
nomarchivo_factura = imprimir_factura(factura, self.usuario)
for numcopia in range(self.objeto.cliente.copiasFactura):
nomarchivo_copia = imprimir_factura(factura, self.usuario,
es_copia = True)
self.actualizar_ventana()
from facturas_venta import debe_generar_recibo
from facturas_venta import generar_recibo
if debe_generar_recibo(factura, self.wids['ventana']):
generar_recibo(factura,
self.usuario,
self.logger,
self.wids['ventana'])
if self.objeto.cliente:
correoe_cliente = self.objeto.cliente.email
if self.objeto.cliente.cliente != None:
# Tiene intermediarios, añado sus correos a la lista de
# destinatarios; con los mismos adjuntos y demás que el
# cliente original, aunque no tenga marcadas las opciones de
# recibir copia en la ventana de clientes (eso se usaría para
# las ventas hechas directamente a él, no a sus clientes).
correoe_cliente += " %s" % self.objeto.cliente.cliente.email
correoe = utils.dialogo_entrada(
titulo = "¿ENVIAR ALBARÁN POR CORREO ELECTRÓNICO?",
texto = """
Introduzca a continuación el correo electrónico del
cliente si desea enviar una copia del albarán,
"packing list" y/o factura en PDF.
Cancele en caso contrario.
Por defecto aparecerán las direcciones de correo del
cliente, seguidas de las del comercial -si lo tuviera-.
""",
valor_por_defecto = correoe_cliente,
padre = self.wids['ventana'])
if correoe != None and correoe != '':
fichero_albaran = fichero_factura = ficheros_packing = None
if self.objeto.cliente.enviarCorreoAlbaran:
fichero_albaran = nomarchivo_compo
# Aunque parezca lo contrario, este no es el albarán
# de Composan.
if self.objeto.cliente.enviarCorreoFactura:
try:
fichero_factura = nomarchivo_factura
except NameError:
utils.dialogo_info(titulo = "FACTURA NO GENERADA",
texto = "La factura no se ha generado correctamente "
"o fue creada con anterioridad.\nNo se "
"enviará por correo.\nSi la factura "
"existe y quiere enviar una copia por correo "
"electrónico, hágalo desde la ventana de "
"facturas de venta.",
padre = self.wids['ventana'])
if self.objeto.cliente.enviarCorreoPacking:
ficheros_packing = self.packinglist(None, abrir_pdf = False)
self.enviar_por_correo(correoe, fichero_albaran, fichero_factura,
ficheros_packing)
def imprimir_cmr(self):
lugar_entrega = utils.dialogo_entrada(titulo = "CMR", texto = "Lugar de entrega:", padre = self.wids['ventana'], textview = True,
valor_por_defecto = self.objeto.nombre + "\n" + self.objeto.direccion + "\n" +
self.objeto.cp + "\n" + self.objeto.ciudad + " " + self.objeto.pais)
if lugar_entrega != None:
transportista = utils.dialogo_entrada(titulo = "CMR", texto = "Transportista:", padre = self.wids['ventana'], textview = True)
if transportista != None:
porteadores = utils.dialogo_entrada(titulo = "CMR", texto = "Porteadores:", padre = self.wids['ventana'], textview = True)
if porteadores != None:
from informes import abrir_pdf
abrir_pdf(geninformes.cmr(self.objeto, lugar_entrega, transportista, porteadores))
def enviar_por_correo(self, email, fichero_albaran = None, fichero_factura = None, fichero_packing_list = None):
"""
Crea un correo electrónico con fichero adjunto y
lo envía a la dirección "email".
"""
texto_adjuntos = []
adjuntos = []
if fichero_albaran:
adjuntos.append(fichero_albaran)
texto_adjuntos.append("copia del albarán")
if fichero_factura:
adjuntos.append(fichero_factura)
texto_adjuntos.append("copia de la factura")
if fichero_packing_list:
for fpl in fichero_packing_list:
adjuntos.append(fpl)
texto_adjuntos.append("copia de %d packing list%s" % (len(fichero_packing_list), len(fichero_packing_list) > 1 and "s" or ""))
texto_adjuntos = ": " + ", ".join(texto_adjuntos) + "."
correos = email.replace(",", " ").replace(";", "").strip().split()
correos = utils.unificar([c.lower().strip() for c in correos])
remitente = smtpuser = smtppass = server = ""
if self.usuario != None:
remitente = self.usuario.email
smtpuser = self.usuario.smtpuser
smtppass = self.usuario.smtppassword
server = self.usuario.smtpserver
if self.usuario == None or not remitente:
remitente = utils.dialogo_entrada(titulo = "DATO DE USUARIO NO ENCONTRADO",
texto = "Introduzca remitente del correo electrónico:",
padre = self.wids['ventana'])
if remitente == None or remitente.strip() == '':
return
if self.usuario == None or not server:
server = utils.dialogo_entrada(titulo = "DATO DE USUARIO NO ENCONTRADO",
texto = "Introduzca servidor SMTP de correo saliente:",
padre = self.wids['ventana'])
if server == None or server.strip() == '':
return
if self.usuario == None or not smtpuser:
smtpuser = utils.dialogo_entrada(titulo = "DATO DE USUARIO NO ENCONTRADO",
texto = "Introduzca usuario para autentificación en servidor SMTP de salida:",
padre = self.wids['ventana'])
if smtpuser == None:
return
if smtpuser.strip() == "":
smtpuser = None
if self.usuario == None or not smtppass:
smtppass = utils.dialogo_entrada(titulo = "DATO DE USUARIO NO ENCONTRADO",
texto = "Introduzca contraseña para autentificación en servidor SMTP de salida:",
padre = self.wids['ventana'],
pwd = True)
if smtppass == None:
return
if smtppass.strip() == "":
smtppass = None
try:
dde = pclases.DatosDeLaEmpresa.select()[0]
empresa = " (%s)" % (dde.nombre)
except:
txt = "No hay empresa dada de alta en datos_de_la_empresa. Es necesario para que aparezca el nombre como remitenten en el asunto del correo electónico."
self.logger.error(txt)
print txt
empresa = ""
asunto = "Albarán %s%s" % (self.objeto.numalbaran, empresa)
asunto = utils.dialogo_entrada(titulo = "ASUNTO DEL CORREO ELECTRÓNICO",
texto = "Introduzca un texto para el asunto del correo electrónico:",
padre = self.wids['ventana'],
valor_por_defecto = asunto)
if asunto == None:
return
if adjuntos == []:
texto = """Albarán %s generado e imprimido. Fecha de salida de la mercancía del almacén: %s.""" % (self.objeto.numalbaran, utils.str_fecha(datetime.date.today()))
else:
texto = """Adjunto la siguiente documentación en formato PDF correspondiente al albarán %s%s""" % (self.objeto.numalbaran, texto_adjuntos)
texto = utils.dialogo_entrada(titulo = "TEXTO DEL CORREO ELECTRÓNICO",
texto = "Introduzca un texto como contenido del correo electrónico:",
padre = self.wids['ventana'],
valor_por_defecto = texto)
if texto == None:
return
try:
dde = pclases.DatosDeLaEmpresa.select()[0]
texto = texto + """
%s
%s
%s - %s, %s
%s
""" % (dde.nombre,
dde.direccion,
dde.cp,
dde.ciudad,
dde.provincia,
dde.pais)
except:
texto = texto + """
Universal Pilates Vitality Studio
Dirección
Cod. Postal - Marbella, Málaga.
España.
"""
correos.append(remitente)
try:
ok = utils.enviar_correoe(remitente, correos, asunto, texto, adjuntos, server, smtpuser, smtppass)
except Exception, msg:
self.logger.error("%salbaranes_de_salida::enviar_por_correo -> Error al enviar el albarán ID %d. Mensaje de la excepción: %s" % (self.usuario and self.usuario.usuario + ": " or "", self.objeto and self.objeto.id or 0, msg))
ok = False
if not ok:
utils.dialogo_info(titulo = "ERROR ENVÍO E-MAIL",
texto = "Ocurrió un error enviando el correo electrónico.\nGuarde los documentos e inténtelo más tarde desde su propio cliente de correo.",
padre = self.wids['ventana'])
else:
utils.dialogo_info(titulo = "CORREO ELECTRÓNICO ENVIADO",
texto = "Se envío el correo electrónico a los destinatarios y una copia al remitente.\nVerifique que lo recibe y vuelva a enviar la documentación en caso contrario.",
padre = self.wids['ventana'])
def abrir_albaran_imprimido(self, composan, nomarchivo):
"""
Muestra el albarán generado en PDF.
composan indica si hay que abrir también el de Composan.
NOTA: La variable "composan" actualmente se ignora.
"""
import informes
informes.abrir_pdf(nomarchivo)
def preguntar_si_redistribuir(self):
"""
Comprueba cada línea de venta para ver si alguna no coincide
con las del pedido del que procede.
Si es así, Y EL ALBARÁN ES NUEVO O
"""
# NOTA: No sé por qué no acabé la docstring, de todas formas me parece
# que este método, con la nueva forma de crear LDVs, no tiene ya mucho
# sentido. Tampoco creo que se cumpla la condición que hace saltar el
# dialogo y tal.
# DONE: Repasar y quitar el diálogo de confirmación si es que
# realmente sobra (que para mí que sí).
if self.nuevo or self.modificado:
for idldv in self.__ldvs:
if (round(self.__ldvs[idldv]['ldv'].cantidad, 3)
!= round(self.__ldvs[idldv]['cantidad'], 3)):
if self.__ldvs[idldv]['cantidad'] > 0:
# TODO: Si la cantidad AÑADIDA no es cero, ajusto las
# cantidades. Si es 0 prefiero dejar la cantidad
# de la LDV original porque es posible que sea
# una venta "especial" de cable de fibra o
# desechos que no tienen objetos artículos
# relacionados en almacén. Esto será así hasta
# que termine de definir cómo voy a almacenar
# este tipo de productos de venta en la BD.
model = self.wids['tv_ldvs'].get_model()
iter = model.get_iter_first()
self.redistribuir_ldv(model.get_path(iter))
iter = model.iter_next(iter)
while iter != None:
self.redistribuir_ldv(model.get_path(iter))
iter = model.iter_next(iter)
self.actualizar_ventana()
break
def pre_salir(self, w):
"""
Bueno, se ejecuta antes de salir de la ventana, ¿qué nombre esperabas?
"""
self.preguntar_si_redistribuir()
self.salir(w)
def packinglist(self, boton, abrir_pdf = True):
"""
Prepara e imprime (genera un PDF) los datos del Packing List de los
artículos del albarán. Idealmente se usará solo para fibra, aunque
también soporta geotextiles, geocompuestos y fibra de cemento.
"""
pl = []
albaran = self.objeto
for ldv, linea_de_venta in [(self.__ldvs[id],
pclases.LineaDeVenta.get(id))
for id in self.__ldvs
if pclases.LineaDeVenta.get(id).productoVenta != None]:
# Diccionario de la LDV de la ventana y objeto LDV en sí
if (not (linea_de_venta.productoVenta != None
and not linea_de_venta.productoVenta.es_especial())):
continue
producto = linea_de_venta.productoVenta.descripcion
codigoproducto = linea_de_venta.productoVenta.codigo
fecha = utils.str_fecha(albaran.fecha)
try:
datos_empresa = pclases.DatosDeLaEmpresa.select()[0]
linea0 = datos_empresa.nombre.upper()
linea1 = datos_empresa.direccion
linea2 = "%s %s (%s)" % (datos_empresa.cp,
datos_empresa.ciudad,
datos_empresa.provincia)
if datos_empresa.fax:
linea3 = "TEL %s - FAX %s" % (datos_empresa.telefono,
datos_empresa.fax)
else:
linea3 = "TEL %s" % (datos_empresa.telefono)
except Exception, msg:
utils.dialogo_info(titulo="ERROR BUSCANDO DATOS DE LA EMPRESA",
texto = "Los datos de la cabecera "
"(información de la propia empresa) "
"no se encontraron.\n\nContacte con "
"el administrador para solventar "
"este error.\n\n\nInformación de "
"depuración:\n%s" % msg,
padre = self.wids['ventana'])
return
nombre = albaran.nombre
direccion = albaran.direccion
ciudad = albaran.ciudad
cp = albaran.cp
pais = albaran.pais
lotes = self.get_lotes_o_partidas(ldv['articulos'])
tipo = self.get_tipo(ldv)
balas = self.get_balas_o_rollos(ldv['articulos'])
total = "%d" % len(balas)
peso = "%s" % utils.float2str(sum([b[2] for b in balas]))
# balas es una tupla de tuplas con 5 elementos: código,
# peso como cadena, peso, id, código de trazabilidad (si es
# bala será el de Domenech) objeto articulo relacionado.
modelo_pl_balas = False
for i in range(len(balas)):
articulo = balas[i][-1]
if articulo.es_bala():
modelo_pl_balas = True
bala = articulo.bala
campos = articulo.productoVenta.camposEspecificosBala
baladic={'descripcion': articulo.productoVenta.descripcion,
'codigo':bala.codigo,
'color':str(campos.color),
'peso': utils.float2str(bala.pesobala),
'lote': bala.lote.codigo,
'tipo': campos.tipoMaterialBala and str(campos.tipoMaterialBala.descripcion) or "",
'longitud':str(campos.corte),
'nbala':str(bala.numbala),
'dtex':str(campos.dtex),
'dia':utils.str_fecha(bala.fechahora),
'acabado': campos.antiuv and 1 or 0,
'codigoBarra': articulo.productoVenta.codigo}
codigo_domher = geninformes._build_codigo_domenech(baladic)
codigo_trazabilidad = codigo_domher
else:
codigo_trazabilidad = articulo.codigo
balas[i].insert(-1, codigo_trazabilidad) # última posición
# siempre será el artículo. Código de trazabilidad penúltimo.
balas = list(balas)
# El orden que traía de la función solo es válido para 4 columnas,
# reordeno para que salgan de izquierda a derecha y de arriba a
# abajo.
balas.sort(lambda b1, b2: int(b1[3] - b2[3]))
balas = tuple(balas)
pl.append({'producto': producto,
'codigo_producto': codigoproducto,
'fecha': fecha,
'lote': lotes,
'tipo': tipo,
'balas': balas,
'total': total,
'peso': peso,
'envio': {'nombre': nombre,
'direccion': direccion,
'ciudad': ciudad,
'cp': cp,
'pais': pais},
'empresa': {'linea0': linea0,
'linea1': linea1,
'linea2': linea2,
'linea3': linea3}
})
return self.imprimir_packing_list(tuple(pl), abrir_pdf,
modelobalas = modelo_pl_balas)
def imprimir_packing_list(self, packing_lists, abrir_pdf = True,
modelobalas = True):
"""
«modelobalas» define el modelo de packing list a imprimir. El de
balas (códigos más altos, una columna) o el antiguo.
"""
self.guardar(None, actualizar_ventana = False) # Si se ha olvidado
# guardar, guardo yo.
packings_generados = []
if self.objeto.cliente.packingListConCodigo:
if modelobalas:
func_packinglist = geninformes.packingListBalas
else:
func_packinglist = geninformes._packingListBalas
else:
func_packinglist = geninformes.oldPackingListBalas
for i in xrange(len(packing_lists)):
titulopackinglist = "Packing list. Albarán %s" % (
self.objeto.numalbaran)
if len(packing_lists) > 1:
titulopackinglist += " (%d/%d)" % (i + 1, len(packing_lists))
nomarchivo = func_packinglist(packing_lists[i], i+1,
titulo = titulopackinglist)
if abrir_pdf:
self.abrir_albaran_imprimido(False, nomarchivo)
packings_generados.append(nomarchivo)
return packings_generados
def get_balas_o_rollos(self, articulos):
balas = []
pales_cemento_tratados = []
for a in articulos:
if a.es_bala():
balas.append([a.bala.codigo,
"%s kg" % utils.float2str(a.bala.pesobala),
a.bala.pesobala,
a.bala.numbala,
a])
elif a.es_caja():
if a.caja.pale not in pales_cemento_tratados:
pale = a.caja.pale
lista = [pale.codigo,
"%s kg" % utils.float2str(pale.calcular_peso()),
pale.calcular_peso(),
pale.numpale,
a]
balas.append(lista)
pales_cemento_tratados.append(pale)
elif a.es_bala_cable():
balas.append([a.balaCable.codigo,
"%s kg" % utils.float2str(a.balaCable.peso),
a.balaCable.peso,
a.balaCable.numbala,
a])
elif a.es_rollo():
try:
largo=a.productoVenta.camposEspecificosRollo.metrosLineales
ancho = a.productoVenta.camposEspecificosRollo.ancho
metros2 = largo * ancho
except:
self.logger.error("albaranes_de_salida.py: get_balas_o_rollos: (packing list). El producto de venta del artículo no tiene campos específicos o el artículo no tiene producto de venta relacionado.")
metros2 = 0
balas.append([a.rollo.codigo,
"%s m2" % utils.float2str(metros2),
metros2,
a.rollo.numrollo,
a])
elif a.es_bigbag():
balas.append([a.bigbag.codigo,
"%s kg." % utils.float2str(a.bigbag.pesobigbag),
a.bigbag.pesobigbag,
a.bigbag.numbigbag,
a])
elif a.es_rollo_defectuoso():
balas.append([a.codigo,
"%s m2" % (utils.float2str(a.superficie)),
a.superficie,
a.rolloDefectuoso.numrollo,
a])
elif a.es_rolloC():
balas.append([a.codigo,
"%s kg" % utils.float2str(a.peso),
a.peso,
a.rolloC.numrollo,
a])
else:
self.logger.error("El artículo ID %d no tiene asociado ni una bala, ni rollo [defectuoso] ni bigbag." % (a.id))
balas_aux = []
i = 0
fcmp = lambda x,y:[(x[-1]<y[-1] and -1) or (x[-1]>y[-1] and 1) or 0][0]
balas.sort(fcmp)
while balas:
numfilas = [(len(balas)/4.0) % 1 > 0 and int((len(balas)/4.0) + 1) or int(len(balas)/4.0)][0]
balas_aux.append(balas.pop(i))
i = [i+numfilas-1<len(balas) and i+numfilas-1 or 0][0]
# Para que entren en orden descendente, de arriba a abajo y de
# izquierda a derecha en las 4 columnas del packing list.
return tuple(balas_aux)
def get_tipo(self, ldv):
tipo = ""
if len(ldv['articulos']):
pv = ldv['articulos'][0].productoVenta
if pv.es_rollo():
tipo = "%.2fx%d" % (pv.camposEspecificosRollo.ancho,
pv.camposEspecificosRollo.metrosLineales)
elif pv.es_bala() or pv.es_bigbag() or pv.es_caja():
tipo = pv.camposEspecificosBala.color
else:
self.logger.error("El artículo %d no es bala, ni rollo, "
"ni fibra de cemento en bigbag ni fibra "
"de cemento embolsada." % (
ldv['articulos'][0].id))
return tipo
def get_lotes_o_partidas(self, articulos):
lotes = []
for articulo in articulos:
if articulo.productoVenta.es_bala():
try:
#codlote = articulo.bala.lote.numlote
codlote = articulo.bala.lote.codigo
except AttributeError:
self.logger.error("El artículo ID %d es fibra pero no tiene lote." % (articulo.id), exc_info = True)
codlote = None
if codlote != None and codlote not in lotes:
lotes.append(codlote)
elif articulo.productoVenta.es_rollo():
try:
#codlote = articulo.rollo.partida.numpartida
codlote = articulo.rollo.partida.codigo
except AttributeError:
self.logger.error("El artículo ID %d es geotextil pero no tiene partida." % (articulo.id), exc_info = True)
codlote = None
if codlote != None and codlote not in lotes:
lotes.append(codlote)
elif articulo.productoVenta.es_bigbag():
try:
codlote = articulo.bigbag.loteCem.codigo
except AttributeError:
self.logger.error("El artículo ID %d es geocem pero no tiene lote." % (articulo.id), exc_info = True)
codlote = None
if codlote != None and codlote not in lotes:
lotes.append(codlote)
elif articulo.productoVenta.es_caja():
try:
codlote = articulo.caja.pale.partidaCem.codigo
except AttributeError:
self.logger.error("El artículo ID %d es fibra embolsada pero no tiene partida." % (articulo.id), exc_info = True)
codlote = None
if codlote != None and codlote not in lotes:
lotes.append(codlote)
else:
self.logger.error("El artículo ID %d no es ni fibra ni geotextil (ni geocompuesto)." % (articulo.id))
try:
res = ', '.join(["%d" % (lote) for lote in lotes])
except TypeError:
res = ', '.join([lote for lote in lotes])
return res
def cambiar_cantidad_srv(self, cell, path, texto):
model = self.wids['tv_servicios'].get_model()
idsrv = model[path][-1]
#srv = pclases.Servicio.get(idsrv)
srv = pclases.getObjetoPUID(idsrv)
try:
srv.cantidad = utils._float(texto)
srv.syncUpdate()
# self.rellenar_servicios()
model[path][0] = srv.cantidad
model[path][4] = srv.precio * (1.0 - srv.descuento) * srv.cantidad
self.modificado = True
except:
utils.dialogo_info(titulo = "ERROR DE FORMATO",
texto = 'Formato numérico incorrecto',
padre = self.wids['ventana'])
def cambiar_precio_srv(self, cell, path, texto):
model = self.wids['tv_servicios'].get_model()
idsrv = model[path][-1]
#srv = pclases.Servicio.get(idsrv)
srv = pclases.getObjetoPUID(idsrv)
try:
srv.precio = utils._float(texto)
self.modificado = True
self.rellenar_servicios()
except:
utils.dialogo_info(titulo = "ERROR DE FORMATO",
texto = 'Formato numérico incorrecto',
padre = self.wids['ventana'])
def cambiar_descuento_srv(self, cell, path, texto):
model = self.wids['tv_servicios'].get_model()
idsrv = model[path][-1]
#srv = pclases.Servicio.get(idsrv)
srv = pclases.getObjetoPUID(idsrv)
try:
srv.descuento = utils.parse_porcentaje(texto)
if srv.descuento > 1.0:
srv.descuento /= 100.0
self.rellenar_servicios()
self.modificado = True
except:
utils.dialogo_info(titulo = "ERROR DE FORMATO",
texto = 'Formato numérico incorrecto',
padre = self.wids['ventana'])
def rellenar_servicios(self):
model = self.wids['tv_servicios'].get_model()
model.clear()
for servicio in self.objeto.servicios:
model.append((servicio.cantidad,
servicio.concepto,
servicio.precio,
servicio.descuento,
servicio.precio * (1.0 - servicio.descuento) * servicio.cantidad,
servicio.get_puid()))
def cambiar_concepto_srv(self, cell, path, texto):
model = self.wids['tv_servicios'].get_model()
idsrv = model[path][-1]
#srv = pclases.Servicio.get(idsrv)
srv = pclases.getObjetoPUID(idsrv)
srv.concepto = texto
self.modificado = True
self.rellenar_servicios()
def crear_servicio(self):
# Datos a pedir: Concepto, descuento y precio... Bah, el descuento que lo cambie en el TreeView.
concepto = utils.dialogo_entrada(titulo = "CONCEPTO",
texto = 'Introduzca el concepto para el transporte:',
padre = self.wids['ventana'])
if concepto != None:
precio = utils.dialogo_entrada(titulo = "PRECIO",
texto = 'Introduzca el precio unitario sin IVA:',
padre = self.wids['ventana'])
if precio != None:
try:
precio = utils._float(precio)
servicio = pclases.Servicio(facturaVenta = None,
albaranSalida = self.objeto,
concepto = concepto,
precio = precio,
descuento = 0)
# Cantidad es 1 por defecto.
self.modificado = True
except Exception, e:
utils.dialogo_info(titulo = "ERROR",
texto = """
Ocurrió un error al crear el servicio.
Asegúrese de haber introducido correctamente los datos,
especialmente el precio (que no debe incluir símbolos
monetarios), y vuelva a intentarlo.
DEBUG: %s
""" % (e),
padre = self.wids['ventana'])
raise e
return
self.rellenar_servicios()
def add_srv(self, boton):
if (self.objeto.cliente
and self.objeto.cliente.calcular_credito_disponible() <= 0):
utils.dialogo_info(titulo = "CLIENTE SIN CRÉDITO",
texto = "El cliente ha sobrepasado el "
"crédito concedido.",
padre = self.wids['ventana'])
return
if (self.objeto.cliente
and self.objeto.cliente.get_facturas_vencidas_sin_documento_de_cobro()):
frasvenc = self.objeto.cliente.get_facturas_vencidas_sin_documento_de_cobro()
utils.dialogo_info(titulo = "CLIENTE DEUDOR",
texto = "El cliente tiene %d facturas "
"vencidas sin documento de pago." % (
len(frasvenc)),
padre = self.wids['ventana'])
return
self.crear_servicio()
def drop_srv(self, boton):
if self.wids['tv_servicios'].get_selection().count_selected_rows() != 0:
model, iter = self.wids['tv_servicios'].get_selection().get_selected()
idservicio = model[iter][-1]
#servicio = pclases.Servicio.get(idservicio)
servicio = pclases.getObjetoPUID(idservicio)
servicio.albaranSalida = None
if (servicio.facturaVenta == None
and servicio.prefacturaID == None
and servicio.pedidoVenta == None):
servicio.destroySelf() # No debería saltar ninguna excepción.
self.rellenar_servicios()
self.modificado = True
def drop_transporte_a_cuenta(self, boton):
"""
Elimina el transporte a cuenta seleccionado en el treeview.
"""
if self.wids['tv_transportesACuenta'].get_selection().count_selected_rows() > 0:
model, paths = self.wids['tv_transportesACuenta'].get_selection().get_selected_rows()
for path in paths:
idtac = model[path][-1]
#tac = pclases.TransporteACuenta.get(idtac)
tac = pclases.getObjetoPUID(idtac)
tac.destroySelf()
self.rellenar_transportes_a_cuenta()
def add_transporte_a_cuenta(self, boton):
"""
Añade un nuevo transporte a cuenta al albarán actual.
"""
tac = pclases.TransporteACuenta(concepto = "Transporte pagado.",
precio = 0,
proveedor = None,
observaciones = "Introduzca el precio y empresa transportista.",
fecha = datetime.date.today(),
albaranSalidaID = self.objeto.id)
self.rellenar_transportes_a_cuenta()
def cambiar_concepto_tac(self, cell, path, texto):
model = self.wids['tv_transportesACuenta'].get_model()
idtac = model[path][-1]
#tac = pclases.TransporteACuenta.get(idtac)
tac = pclases.getObjetoPUID(idtac)
tac.concepto = texto
self.modificado = True
model[path][0] = tac.concepto
def cambiar_precio_tac(self, cell, path, texto):
try:
precio = utils._float(texto)
except ValueError:
utils.dialogo_info(titulo = "PRECIO INCORRECTO",
texto = "El texto introducido %s no es una cantidad correcta." % (texto),
padre = self.wids['ventana'])
else:
model = self.wids['tv_transportesACuenta'].get_model()
idtac = model[path][-1]
#tac = pclases.TransporteACuenta.get(idtac)
tac = pclases.getObjetoPUID(idtac)
tac.precio = precio
self.modificado = True
model[path][1] = utils.float2str(tac.precio)
def cambiar_observaciones_tac(self, cell, path, texto):
model = self.wids['tv_transportesACuenta'].get_model()
idtac = model[path][-1]
#tac = pclases.TransporteACuenta.get(idtac)
tac = pclases.getObjetoPUID(idtac)
tac.observaciones = texto
self.modificado = True
model[path][3] = tac.observaciones
def cambiar_fecha_tac(self, cell, path, texto):
model = self.wids['tv_transportesACuenta'].get_model()
idtac = model[path][-1]
#tac = pclases.TransporteACuenta.get(idtac)
tac = pclases.getObjetoPUID(idtac)
try:
fecha = utils.parse_fecha(texto)
except (ValueError, RangeError):
utils.dialogo_info(titulo = "FECHA INCORRECTA",
texto = "La fecha %s no es correcta." % (texto),
padre = self.wids['ventana'])
else:
tac.fecha = fecha
self.modificado = True
model[path][4] = utils.str_fecha(tac.fecha)
def cambiar_proveedor_tac(self, cell, path, texto):
model = self.wids['tv_transportesACuenta'].get_model()
idtac = model[path][-1]
#tac = pclases.TransporteACuenta.get(idtac)
tac = pclases.getObjetoPUID(idtac)
if texto.strip() == "":
tac.proveedor = None
model[path][2] = ""
else:
proveedor = buscar_proveedor(texto, self.wids['ventana'])
if proveedor != None:
tac.proveedor = proveedor
self.modificado = True
model[path][2] = tac.proveedor.nombre
# else:
# utils.dialogo_info(titulo = "PROVEEDOR NO ENCONTRADO",
# texto = "El proveedor del servicio o transporte debe estar dado de alta.\nCierre esta ventana, cree el proveedor y vuelva a intentarlo.",
# padre = self.wids['ventana'])
def rellenar_transportes_a_cuenta(self):
model = self.wids['tv_transportesACuenta'].get_model()
model.clear()
for tac in self.objeto.transportesACuenta:
precio = utils.float2str(tac.precio)
proveedor = tac.proveedor and tac.proveedor.nombre or ""
try:
numfactura = tac.serviciosTomados[0].facturaCompra.numfactura
except (IndexError, AttributeError):
numfactura = ""
model.append((tac.concepto,
precio,
proveedor,
tac.observaciones,
utils.str_fecha(tac.fecha),
numfactura,
tac.get_puid()))
# XXX Parte de facturación automática:
def get_siguiente_numfactura(self, cliente):
"""
Consulta el registro contador del cliente.
Si no tiene, devuelve None.
En otro caso, devuelve el número compuesto
por el contador+1 más el prefijo y sufijo
que indique el registro.
"""
# NOTA: Calcado de facturas_venta.py. Si cambio algo aquí, cambiar allí y viceversa.
numfactura = None
if cliente.contador != None:
cliente.contador.sync()
numfactura = "%s%04d%s" % (cliente.contador.prefijo, cliente.contador.contador, cliente.contador.sufijo)
# El número entre prefijo y sufijo pasa a tener 4 dígitos como mínimo
return numfactura
def generar_factura(self):
"""
Crea una factura de venta con el contenido del albarán.
1.- Se comprueba que no se hayan facturado ya las LDVs.
2.- Las que no han sido facturadas, se facturan.
"""
factura = None
(fras, ldvs_facturadas, srvs_facturados, ldvs_facturables,
srvs_facturables) = self.init_structs_factura()
if fras != []:
ldvs_facturadas = "\n".join(["Venta de %s en factura %s." %
(ldv.producto.descripcion,
((ldv.facturaVenta and ldv.facturaVenta.numfactura) or
(ldv.prefactura and ldv.prefactura.numfactura) or "")
) for ldv in ldvs_facturadas])
srvs_facturados = "\n".join(["%s en factura %s." %
(srv.concepto,
((srv.facturaVenta and srv.facturaVenta.numfactura) or
(srv.prefactura and srv.prefactura.numfactura) or "")
) for srv in srvs_facturados])
utils.dialogo_info(titulo = "VENTAS FACTURADAS",
texto = "Algunas salidas ya han sido facturadas:\n%s\n%s" % (
ldvs_facturadas, srvs_facturados),
padre = self.wids['ventana'])
if ldvs_facturables == [] and srvs_facturables == []:
utils.dialogo_info(titulo = "NO SE PUEDE GENERAR FACTURA",
texto="Todas las salidas del albarán han sido ya facturadas.",
padre = self.wids['ventana'])
ok = False
else:
cliente = self.objeto.cliente
if cliente == None:
utils.dialogo_info(titulo = "ERROR",
texto = "Albarán sin cliente.",
padre = self.wids['ventana'])
ok = False
return ok, None
numfactura = self.get_siguiente_numfactura(cliente)
if numfactura == None:
utils.dialogo_info(titulo = "ERROR",
texto = "Cliente sin contador.",
padre = self.wids['ventana'])
ok = False
return ok, None
#fecha = self.objeto.fecha # Había empezado a dar problemas de
# secuencialidad en los números de serie porque a veces se dejan
# albaranes sin facturar o con fechas posteriores para
# completarlos en Toledo. Al facturarlos desde la ventana de
# albaranes días más tarde, salían las facturas con la fecha del
# albarán, que era anterior al de las facturas anteriores. CWT: Si
# usamos siempre la fecha actual, no debería haber problemas.
fecha = datetime.date.today()
ultima_factura, ok = chequear_restricciones_nueva_factura(cliente,
numfactura,
fecha)
if ok:
contador = self.objeto.cliente.contador
numfactura2 = contador.get_and_commit_numfactura()
try:
assert numfactura == numfactura2, "Número de factura precalculado y obtenido al actualizar el contador difieren: %s != %s." % (numfactura, numfactura2)
except AssertionError, msg:
self.logger.error("%salbaranes_salida::generar_factura -> Error al actualizar contador (probablemente debido a concurrencia): %s" % (self.usuario and self.usuario.usuario or "", msg))
utils.dialogo_info(titulo = "ERROR",
texto = "Error al calcular el número de factura. Vuel"
"va a intentarlo o contacte con el administra"
"dor.",
padre = self.wids['ventana'])
return False, None
else:
try:
contador = self.objeto.cliente.contador
numfactura = probar_siguientes(contador, cliente, fecha)
except:
numfactura = None
if numfactura is None:
utils.dialogo_info(titulo = "ERROR",
texto = "Número y fecha no satisfacen restricciones "
"de secuencialidad o número de factura ya ex"
"iste. Compruebe contadores.",
padre = self.wids['ventana'])
ok = False
return ok, None
iva = cliente.get_iva_norm()
if len(ldvs_facturables) > 0:
try:
descuento = ldvs_facturables[0].pedidoVenta.descuento
# El descuento de la factura es el del pedido de la
# primera de las líneas de venta. Si tiene varias,
# es de esperar que todas sean del mismo pedido y
# con el mismo descuento.
except AttributeError: # PedidoVenta es None.
self.logger.warning("albaranes_de_salida.py: "
"Línea de venta con ID %d no tiene pedido de venta."
" ¿De dónde viene entonces?" % (
ldvs_facturables[0].id))
descuento = 0
else:
descuento = 0
try:
irpf = pclases.DatosDeLaEmpresa.select()[0].irpf
except (IndexError, AttributeError), msg:
self.logger.error("facturas_compra::crear_nueva_factura ->"
" No se encontraron los datos de la empresa."
" Excepción: %s" % (msg))
irpf = 0.0
factura = pclases.FacturaVenta(fecha = fecha,
numfactura = numfactura,
cliente = cliente,
iva = iva,
cargo = 0,
bloqueada = False,
descuento = descuento,
irpf = irpf)
for ldv in ldvs_facturables:
ldv.facturaVenta = factura
for srv in srvs_facturables:
srv.facturaVenta = factura
# ¡¡¡¿Por qué sigue teniendo el cliente el IVA como
# entero en vez de fracción de 1?!!!
utils.dialogo_info(titulo = "FACTURA GENERADA",
texto = "Factura %s generada correctamente.\n"
"A continuación se van a intentar "
"crear los vencimientos." % (
factura.numfactura),
padre = self.wids['ventana'])
ok = self.crear_vencimientos_por_defecto(factura)
ok = ok and factura.cliente.cif and factura.cliente.cif.strip()!=""
return ok, factura
def init_structs_factura(self):
fras = []
ldvs_facturables = [ldv for ldv in self.objeto.lineasDeVenta if ldv.facturaVentaID == None and ldv.prefacturaID == None]
srvs_facturables = [srv for srv in self.objeto.servicios if srv.facturaVentaID == None and srv.prefactura == None]
ldvs = [ldv for ldv in self.objeto.lineasDeVenta]
srvs = [srv for srv in self.objeto.servicios]
ldvs_facturadas = [ldv for ldv in self.objeto.lineasDeVenta if ldv.facturaVentaID != None or ldv.prefacturaID != None]
srvs_facturados = [srv for srv in self.objeto.servicios if srv.facturaVentaID != None or srv.prefacturaID != None]
for ldv in ldvs:
if ldv.facturaVentaID != None and ldv.facturaVenta not in fras:
fras.append(ldv.facturaVenta)
if ldv.prefacturaID != None and ldv.prefactura not in fras:
fras.append(ldv.prefactura)
for srv in srvs:
if srv.facturaVentaID != None and srv.facturaVenta not in fras:
fras.append(srv.facturaVenta)
if srv.prefacturaID != None and srv.prefactura not in fras:
fras.append(srv.prefactura)
return fras, ldvs_facturadas, srvs_facturados, ldvs_facturables, srvs_facturables
def borrar_vencimientos_y_estimaciones(self, factura):
for vto in factura.vencimientosCobro:
vto.factura = None
vto.destroySelf()
for est in factura.estimacionesCobro:
est.factura = None
est.destroySelf()
def rellenar_totales(self, factura):
"""
Calcula los totales de la factura a partir de
las LDVs, servicios, cargo, descuento y abonos.
"""
subtotal = self.total_ldvs(factura) + self.total_srvs(factura)
tot_dto = ffloat(-1 * (subtotal + factura.cargo) * factura.descuento)
abonos = sum([pa.importe for pa in factura.pagosDeAbono])
tot_iva = self.total_iva(factura.iva, subtotal, tot_dto, factura.cargo, abonos)
irpf = factura.calcular_total_irpf()
return self.total(subtotal, factura.cargo, tot_dto, tot_iva, abonos, irpf)
def total(self, subtotal, cargo, dto, iva, abonos, irpf):
return ffloat(subtotal + cargo + dto + iva + abonos - irpf)
def total_iva(self, iva, subtotal, tot_dto, cargo, abonos):
return ffloat(subtotal + tot_dto + cargo + abonos) * iva
def total_ldvs(self, factura):
"""
Total de las líneas de venta. Sin IVA.
"""
return sum([ffloat((l.cantidad * l.precio) * (1 - l.descuento)) for l in factura.lineasDeVenta])
def total_srvs(self, factura):
"""
Total de servicios. Sin IVA.
"""
return sum([ffloat((s.precio * s.cantidad) * (1 - s.descuento)) for s in factura.servicios])
def crear_vencimientos_por_defecto(self, factura):
"""
Crea e inserta los vencimientos por defecto
definidos por el cliente en la factura
actual y en función de las LDV que tenga
en ese momento (concretamente del valor
del total de la ventana calculado a partir
de las LDV.)
"""
ok = False
# NOTA: Casi-casi igual al de facturas_venta.py. Si cambia algo importante aquí, cambiar también allí y viceversa.
cliente = factura.cliente
if cliente.vencimientos != None and cliente.vencimientos != '':
try:
vtos = cliente.get_vencimientos(factura.fecha)
except:
utils.dialogo_info(titulo = 'ERROR VENCIMIENTOS POR DEFECTO',
texto = 'Los vencimientos por defecto del cliente no se pudieron procesar correctamente.\nVerifique que están bien escritos y el formato es correcto en la ventana de clientes.',
padre = self.wids['ventana'])
return ok # Los vencimientos no son válidos o no tiene.
self.borrar_vencimientos_y_estimaciones(factura)
total = self.rellenar_totales(factura)
numvtos = len(vtos)
try:
cantidad = total/numvtos
except ZeroDivisionError:
cantidad = total
if factura.fecha == None:
factura.fecha = time.localtime()
if cliente.diadepago != None and cliente.diadepago != '':
diaest = cliente.get_dias_de_pago()
else:
diaest = False
for incr in vtos:
fechavto = factura.fecha + (incr * datetime.timedelta(days = 1))
vto = pclases.VencimientoCobro(fecha = fechavto,
importe = cantidad,
facturaVenta = factura,
observaciones = factura.cliente and factura.cliente.textoformacobro or "",
cuentaOrigen = factura.cliente and factura.cliente.cuentaOrigen or None)
if diaest:
# XXX 24/05/06
# Esto es más complicado de lo que pueda parecer a simple
# vista. Ante poca inspiración... ¡FUERZA BRUTA!
fechas_est = []
for dia_estimado in diaest:
while True:
try:
fechaest = datetime.date(day = dia_estimado, month = fechavto.month, year = fechavto.year)
break
except:
dia_estimado -= 1
if dia_estimado <= 0:
dia_estimado = 31
if fechaest < fechavto: # El día estimado cae ANTES del día del vencimiento.
# No es lógico, la estimación debe ser posterior.
# Cae en el mes siguiente, pues.
mes = fechaest.month + 1
anno = fechaest.year
if mes > 12:
mes = 1
anno += 1
try:
fechaest = datetime.date(day = dia_estimado, month = mes, year = anno)
except ValueError:
# La ley de comercio dice que se pasa al último día del mes:
fechaest = utils.last_day_of(mes, anno)
fechas_est.append(fechaest)
fechas_est.sort(utils.cmp_DateTime)
fechaest = fechas_est[0]
vto.fecha = fechaest
ok = True
else:
utils.dialogo_info(titulo = "SIN DATOS",
texto = "El cliente no tiene datos suficientes para crear vencimientos por defecto.",
padre = self.wids['ventana'])
return ok
def descargar_de_terminal(self, boton):
"""
Lee los códigos almacenados en el terminal de códigos de barras
y los introduce en el albarán actual (siempre que los datos del
lector no incluyan número de albarán o éste sea igual al del
albarán actual) de la misma forma que si se teclearan manualmente.
"""
self.logger.warning("%salbaranes_de_salida -> Iniciando descargar_de_terminal (salida de artículos automática)" % (self.usuario and self.usuario.usuario + ": " or ""))
# TODO: Hay que meter una ventana de progreso o algo, porque en
# descargar 130 rollos se ha tirado por lo menos un minuto la ventana
# en blanco.
datos = None
cancelar = False
while datos == None and not cancelar:
datos = utils.descargar_phaser(logger = self.logger)
if datos == None:
cancelar = not utils.dialogo(titulo = "¿VOLVER A INTENTAR?",
texto = "Se ha superado el tiempo de espera.\n¿Desea continuar?\n\n(Pulse SÍ para volver a intentar o NO para cancelar la operación.)",
padre = self.wids['ventana'])
elif isinstance(datos, (type([]), type(()))):
self.descargar_y_meter_articulos_en_albaran_actual(datos)
elif isinstance(datos, type({})):
for albaran in datos:
if albaran == self.objeto:
self.descargar_y_meter_articulos_en_albaran_actual(datos[albaran])
else:
self.logger.warning("Albarán actual: %s. Albarán descargado: %s. IGNORO ALBARÁN." % (self.objeto.numalbaran, albaran.numalbaran))
utils.dialogo_info(titulo = "ALBARÁN INCORRECTO",
texto = "El albarán descargado (%s) no coincide con el albarán actual en ventana (%s).\nSe ignorará.\n\nNo borre la memoria del terminal y realice la descarga en el albarán correcto.\n\n\nSi ha leído más de un albarán en el terminal, a continuación se intentarán descargar también.",
padre = self.wids['ventana'])
def descargar_y_meter_articulos_en_albaran_actual(self, datos):
"""
Datos es una lista de objetos bala, bigbag o rollo.
Los lee e introduce en el albarán actual
"""
articulos = []
articulos_baja_calidad = []
for bala_o_rollo_o_bb in datos:
articulo = bala_o_rollo_o_bb.articulo
if articulo.es_de_baja_calidad():
articulos_baja_calidad.append(articulo)
articulos.append(articulo)
texto = """
Los siguientes artículos se han considerado que son
de baja calidad. ¿Continuar?
%s
""" % ("\n".join([a.codigo for a in articulos_baja_calidad]))
if articulos_baja_calidad == [] or utils.dialogo(titulo = "ARTÍCULOS DE BAJA CALIDAD", texto = texto, padre = self.wids['ventana']):
self.crear_ldv(articulos) # FLIPA: En realidad no crea, asocia
# artículos al albarán
self.actualizar_ventana()
def get_str_pedidos_albaranes(factura):
"""
Devuelve una cadena con los pedidos y albaranes entre paréntesis de las
LDV de la factura.
"""
peds = {'-': []}
for ldv in factura.lineasDeVenta:
if ldv.pedidoVenta == None and ldv.albaranSalida != None:
peds['-'].append(ldv.albaranSalida.numalbaran)
elif ldv.pedidoVenta != None:
if ldv.pedidoVenta.numpedido not in peds:
peds[ldv.pedidoVenta.numpedido] = []
if ldv.albaranSalida != None:
if not ldv.albaranSalida.numalbaran in peds[ldv.pedidoVenta.numpedido]:
peds[ldv.pedidoVenta.numpedido].append(ldv.albaranSalida.numalbaran)
pedsalbs = ""
for p in peds:
if p == '-' and peds[p] == []:
continue
pedsalbs += "%s(%s) " % (p, ','.join(peds[p]))
return pedsalbs
def imprimir_factura(factura, usuario = None, abrir = True, es_copia = False):
"""
Imprime una factura generando el PDF tal y como se hace
desde la ventana facturas_venta.py.
NOTA: usuario se pasaba con intención de abrir la ventana
de facturas desde aquí. Actualmente ya no se usa.
Si "abrir" es True, después de generar el PDF lo abre con
el visor predeterminado.
"""
cliente = {'numcli':str(factura.cliente.id),
'nombre':factura.cliente.nombre,
'nombref':factura.cliente.nombref,
'cif':factura.cliente.cif,
'direccion':factura.cliente.direccion,
'cp':factura.cliente.cp,
'localidad':factura.cliente.ciudad,
'provincia':factura.cliente.provincia,
'pais':factura.cliente.pais,
'telf':factura.cliente.telefono,
'fax':'',
'direccionf':factura.cliente.direccionfacturacion,
'cpf':factura.cliente.cpfacturacion,
'localidadf':factura.cliente.ciudadfacturacion,
'provinciaf':factura.cliente.provinciafacturacion,
'paisf':factura.cliente.paisfacturacion}
numpeds = get_str_pedidos_albaranes(factura)
facdata = {'facnum':factura.numfactura,
'fecha':utils.str_fecha(factura.fecha),
'pedido':numpeds,
'albaranes':'',
'observaciones': factura.observaciones}
lineas = []
lineasdeventa = [ldv for ldv in factura.lineasDeVenta]
lineasdeventa.sort(utils.f_sort_id)
for l in lineasdeventa:
linea = {'codigo':l.producto.codigo,
'cantidad':l.cantidad,
'descripcion':l.producto.descripcion,
'precio': l.precio,
'descuento':str(l.descuento),
'unidad': l.producto.get_str_unidad_de_venta()}
lineas.append(linea)
if factura.cliente.pais.upper().replace(' ','') != 'ESPAÑA':
#arancel_lista = [ldv.productoVenta.arancel
# for ldv in factura.lineasDeVenta
# if ldv.productoVenta
# and ldv.productoVenta.arancel != ""
# and ldv.productoVenta.arancel != None]
arancel_lista = []
# OJO: NOTA: El arancel es siempre el mismo. Muestro el del primer
# articulo que encuentre con arancel != "".
if arancel_lista != []:
arancel = arancel_lista[0]
else:
arancel = None
else:
arancel = None
for l in factura.servicios:
descripcion = l.concepto
linea = {'codigo': "",
'cantidad': l.cantidad,
'descripcion': descripcion,
'precio': l.precio,
'descuento': str(l.descuento),
"unidad": ""}
lineas.append(linea)
vtos = factura.vencimientosCobro[:]
vtos.sort(utils.cmp_fecha_id)
fechasVencimiento = []
documentosDePago = []
for vto in vtos:
fechasVencimiento.append(utils.str_fecha(vto.fecha))
if vto.cuentaOrigen:
cuenta = "a %s %s" % (vto.cuentaOrigen.banco, vto.cuentaOrigen.ccc)
else:
cuenta = ""
documentosDePago.append("%s %s" % (vto.observaciones, cuenta))
vencimiento = {'fecha': "; ".join(fechasVencimiento),
'pago': factura.cliente.vencimientos,
'documento': "; ".join(documentosDePago)}
import numerals
totalfra = factura.calcular_total()
totales = {}
totales['subtotal'] = "%s €" % (
utils.float2str(factura.calcular_subtotal(), 2))
cargo = factura.cargo
if not cargo: # Si es 0, 0.0, None o cualquier cosa de estas...
cargo = None
totales['cargo'] = cargo
descuento = factura.calcular_total_descuento()
if not descuento:
descuento = None
else:
descuento = "%s (%s %%)" % (utils.float2str(descuento),
utils.float2str(factura.descuento*100, 0))
totales['descuento'] = descuento
totales['iva'] = "%s %%" % (utils.float2str(factura.iva * 100, 0))
totales['totaliva'] = "%s €"%utils.float2str(factura.calcular_total_iva())
totales['total'] = "%s €" % (utils.float2str(totalfra, 2))
totales['irpf'] = "%s %%" % (utils.float2str(factura.irpf * 100, 0))
totales['totirpf'] = "%s €"%utils.float2str(factura.calcular_total_irpf())
texto = numerals.numerals(totalfra, moneda = "euros",
fraccion = "céntimos").upper()
if pclases.config.get_multipagina():
try:
import factura_multipag
except ImportError:
sys.path.append(os.path.join("..", "informes"))
import factura_multipag
nomarchivo = factura_multipag.go_from_facturaVenta(factura)
else:
nomarchivo = geninformes.factura(cliente,
facdata,
lineas,
arancel,
vencimiento,
texto,
totales,
es_copia = es_copia)
if abrir:
import informes
informes.abrir_pdf(nomarchivo)
return nomarchivo
def buscar_proveedor(nombre, ventana_padre = None):
"""
Busca un proveedor por su nombre. Si no lo encuentra solo con el
parámetro recibido o si encuentra más de uno, muestra una ventana
con los resultados para que el usuario elija uno de ellos.
Devuelve el proveedor seleccionado o None.
"""
proveedor = None
proveedores = pclases.Proveedor.select(pclases.Proveedor.q.nombre.contains(nombre))
numresultados = proveedores.count()
if numresultados == 0:
proveedores = pclases.Proveedor.select()
if numresultados != 1:
filas_res = [(p.id, p.nombre, p.cif) for p in proveedores]
idproveedor = utils.dialogo_resultado(filas_res,
titulo = 'Seleccione proveedor',
cabeceras = ('ID', 'Nombre', 'C.I.F.'),
padre = ventana_padre)
if idproveedor > 0:
proveedor = pclases.Proveedor.get(idproveedor)
elif numresultados == 1:
proveedor = proveedores[0]
return proveedor
def buscar_cliente(nombre, ventana_padre = None):
"""
Busca un cliente por su nombre. Si no lo encuentra solo con el
parámetro recibido o si encuentra más de uno, muestra una ventana
con los resultados para que el usuario elija uno de ellos.
Devuelve el cliente seleccionado o None.
"""
cliente = None
clientes = pclases.Cliente.select(pclases.Cliente.q.nombre.contains(nombre))
numresultados = clientes.count()
if numresultados == 0:
clientes = pclases.Cliente.select()
if numresultados != 1:
filas_res = [(p.id, p.nombre, p.cif) for p in clientes]
idcliente = utils.dialogo_resultado(filas_res,
titulo = 'Seleccione cliente',
cabeceras = ('ID', 'Nombre', 'C.I.F.'),
padre = ventana_padre)
if idcliente > 0:
cliente = pclases.Cliente.get(idcliente)
elif numresultados == 1:
cliente = clientes[0]
return cliente
def ajustar_existencias(ldv, cantidad_anterior = None):
"""
Si la LDV es de un producto de compra o de un producto especial,
ajusta las existencias del mismo y le resta la cantidad que
sale del almacén en la LDV.
Si el parámetro opcional cantidad_anterior es distinto de None (se
usa al cambiar la cantidad de la LDV con posterioridad) la cantidad
a restar a las existencias es la cantidad actual menos la cantidad_anterior.
Siempre ajusta las existencias aunque el campo controlExistencias esté
a False y el producto es un producto de compra, solo que en ese caso
se ignoran las existencias en el resto del programa; pero ajustarlas las
ajusta.
"""
producto = ldv.get_producto()
cantidad = ldv.cantidad
if cantidad_anterior != None:
cantidad = cantidad - cantidad_anterior
if cantidad != 0: # Para evitar tráfico innecesario. Si no hay cambios
# en las existencias de los productos, no los toco.
producto.sync()
if isinstance(producto, pclases.ProductoCompra):
producto.existencias -= cantidad
# Ajusto también las existencias del almacén origen.
try:
almacenorigen = ldv.albaranSalida.almacenOrigen
except AttributeError:
# OJO: Si la LDV viene de una factura que descuenta
# existencias sin que haya albarán de por medio, siempre se
# usará el principal como almacén origen.
almacenorigen = pclases.Almacen.get_almacen_principal()
producto.add_existencias(-cantidad, almacenorigen)
if ldv.albaranSalida and ldv.albaranSalida.almacenDestino:
producto.add_existencias(cantidad,
ldv.albaranSalida.almacenDestino)
elif (isinstance(producto, pclases.ProductoVenta)
and producto.es_especial()):
producto.camposEspecificosEspecial.sync()
try:
cantidad_por_bulto = producto.stock / producto.existencias
bultos = cantidad / cantidad_por_bulto
except ZeroDivisionError:
bultos = 0
#print producto.camposEspecificosEspecial.stock, cantidad
#print producto.camposEspecificosEspecial.existencias, bultos
# TODO: No hay rastro de las existencias por almacén
# en los productos de venta especiales. FUUUUUUUUUUUUUUUU
producto.camposEspecificosEspecial.stock -= cantidad
producto.camposEspecificosEspecial.existencias -= int(bultos)
# DONE: ¿Qué pasa con los bultos en los almacenes?
# Nada. Se guardan las existencias. Se mira la razón
# existencias/bultos y se multiplica por las existencias del
# stock_especial para que nos dé el número de bultos
# correspondiente a esas existencias.
producto.camposEspecificosEspecial.sync()
producto.sync()
def chequear_restricciones_nueva_factura(cliente, numfactura, fecha):
"""
Devuelve la última factura de la serie del cliente recibido y un booleano
que valdrá True si pasa las restricciones o False si no las cumple.
Las restricciones son:
1.- El número de factura no puede ser inferior al de la última
factura existente de la serie (que debería ser el contador -1)
2.- El número de factura no puede estar repetido.
3.- La fecha no debe ser inferior a la de la última factura
existente de la serie.
"""
# NOTA: Calcado (casi) de facturas_venta.py. Si cambio
# algo *significativo* aquí, cambiar allí y viceversa.
ultima_factura = None
ok = False
FV = pclases.FacturaVenta
if (FV.select(FV.q.numfactura == numfactura).count() == 0
and cliente.contador != None):
clientes = [str(c.id) for c in cliente.contador.clientes]
clientes = ','.join(clientes)
facturas = pclases.FacturaVenta.select("cliente_id IN (%s)"%clientes)
ok = True
# Any better?
facturas = [f for f in facturas
if f.numfactura.startswith(cliente.contador.prefijo)
and f.numfactura.endswith(cliente.contador.sufijo)]
facturas.sort(lambda f1, f2: f1.get_numero_numfactura() \
- f2.get_numero_numfactura())
if facturas:
ultima_factura = facturas[-1]
try:
numero = int(numfactura.replace(cliente.contador.prefijo, \
'').replace(cliente.contador.sufijo, ''))
except:
ok = False
ok = ok and numero > ultima_factura.get_numero_numfactura()
numero_repetido = pclases.FacturaVenta.select(\
pclases.FacturaVenta.q.numfactura == numfactura).count()
ok = ok and not numero_repetido
ok = ok and datetime.date(fecha) >= ultima_factura.fecha
else:
ultima_factura = None
return ultima_factura, ok
def probar_siguientes(c, cliente, fecha, rango = 5):
"""
Recibe un contador y devuelve un número de factura válido
dentro de los siguientes 5 números (por defecto).
Si se encuentra, actualiza el contador y lo devuelve.
En caso de que no se encuentre, devuelve None.
"""
numfactura = None
for i in range(1, rango):
tmpnumfactura = c.get_next_numfactura(inc = i)
ok, ult_factura = chequear_restricciones_nueva_factura(cliente,
tmpnumfactura,
fecha)
if ok:
numfactura = c.get_next_numfactura(commit = True, inc = i)
break
return numfactura
if __name__=='__main__':
pclases.DEBUG = True
try:
raise ZeroDivisionError, "Que estoy probando ahora otra cosa, leñe."
a = AlbaranesDeSalida(
objeto = pclases.AlbaranSalida.select(
pclases.AlbaranSalida.q.numalbaran == "11590",
orderBy = "-id")[0])
except ZeroDivisionError:
a = AlbaranesDeSalida()
#a = AlbaranesDeSalida()
|
Enlik/entropy
|
refs/heads/master
|
lib/entropy/dep.py
|
5
|
# -*- coding: utf-8 -*-
# Entropy miscellaneous tools module
"""
@author: Fabio Erculiani <lxnay@sabayon.org>
@contact: lxnay@sabayon.org
@copyright: Fabio Erculiani
@license: GPL-2
B{Entropy dependency functions module}.
This module contains Entropy package dependency manipulation functions.
"""
import re
from entropy.exceptions import InvalidAtom, EntropyException
from entropy.const import etpConst, const_cmp
# Imported from Gentoo portage_dep.py
# Copyright 1999-2010 Gentoo Foundation
# 2.1.1 A category name may contain any of the characters [A-Za-z0-9+_.-].
# It must not begin with a hyphen or a dot.
_cat = r'[\w+][\w+.-]*'
# 2.1.2 A package name may contain any of the characters [A-Za-z0-9+_-].
# It must not begin with a hyphen,
# and must not end in a hyphen followed by one or more digits.
_pkg = r'[\w+][\w+-]*?'
_v = r'(cvs\.)?(\d+)((\.\d+)*)([a-z]?)((_(pre|p|beta|alpha|rc)\d*)*)'
_rev = r'\d+'
_vr = _v + '(-r(' + _rev + '))?'
_cp = '(' + _cat + '/' + _pkg + '(-' + _vr + ')?)'
_cpv = '(' + _cp + '-' + _vr + ')'
_pv = '(?P<pn>' + _pkg + '(?P<pn_inval>-' + _vr + ')?)' + '-(?P<ver>' + _v + ')(-r(?P<rev>' + _rev + '))?'
ver_regexp = re.compile("^" + _vr + "$")
suffix_regexp = re.compile("^(alpha|beta|rc|pre|p)(\\d*)$")
suffix_value = {"pre": -2, "p": 0, "alpha": -4, "beta": -3, "rc": -1}
endversion_keys = ["pre", "p", "alpha", "beta", "rc"]
valid_category = re.compile("^\w[\w-]*")
invalid_atom_chars_regexp = re.compile("[()|@]")
def _ververify(myver):
if myver.endswith("*"):
m = ver_regexp.match(myver[:-1])
else:
m = ver_regexp.match(myver)
if m:
return True
return False
_pv_re = re.compile('^' + _pv + '$', re.VERBOSE)
def _pkgsplit(mypkg):
"""
@param mypkg: pv
@return:
1. None if input is invalid.
2. (pn, ver, rev) if input is pv
"""
m = _pv_re.match(mypkg)
if m is None:
return None
if m.group('pn_inval') is not None:
# package name appears to have a version-like suffix
return None
rev = m.group('rev')
if rev is None:
rev = '0'
rev = 'r' + rev
return (m.group('pn'), m.group('ver'), rev)
def _generic_sorter(inputlist, cmp_func):
inputs = inputlist[:]
if len(inputs) < 2:
return inputs
max_idx = len(inputs)
while True:
changed = False
for idx in range(max_idx):
second_idx = idx+1
if second_idx == max_idx:
continue
str_a = inputs[idx]
str_b = inputs[second_idx]
if cmp_func(str_a, str_b) < 0:
inputs[idx] = str_b
inputs[second_idx] = str_a
changed = True
if not changed:
break
return inputs
def isjustname(mypkg):
"""
Checks to see if the depstring is only the package name (no version parts)
Example usage:
>>> isjustname('media-libs/test-3.0')
False
>>> isjustname('test')
True
>>> isjustname('media-libs/test')
True
@param mypkg: the package atom to check
@param mypkg: string
@rtype: int
@return: if the package string is not just the package name
"""
# must match, in case of "1.2.3-r1".
rev = dep_get_spm_revision(mypkg)
if rev == "r0":
mypkg += "-r0"
ver_rev = '-'.join(mypkg.split('-')[-2:])
return not _ververify(ver_rev)
def catpkgsplit(mydata):
"""
Takes a Category/Package-Version-Rev and returns a list of each.
@param mydata: data to split
@type mydata: string
@rype: tuple
@return:
1. If each exists, it returns (cat, pkgname, version, rev)
2. If cat is not specificed in mydata, cat will be "null"
3. if rev does not exist it will be '-r0'
"""
# Categories may contain a-zA-z0-9+_- but cannot start with -
mysplit = mydata.split("/")
p_split = None
if len(mysplit) == 1:
retval = ("null",)
p_split = _pkgsplit(mydata)
elif len(mysplit) == 2:
retval = (mysplit[0],)
p_split = _pkgsplit(mysplit[1])
if not p_split:
return None
retval += p_split
return retval
def dep_getkey(mydep):
"""
Return the category/package-name of a depstring.
Example usage:
>>> dep_getkey('media-libs/test-3.0')
'media-libs/test'
@param mydep: the depstring to retrieve the category/package-name of
@type mydep: string
@rtype: string
@return: the package category/package-version
"""
if not mydep:
return mydep
mydep = remove_tag(mydep)
mydep = remove_usedeps(mydep)
mydep = dep_getcpv(mydep)
if mydep and (not isjustname(mydep)):
mysplit = catpkgsplit(mydep)
if not mysplit:
return mydep
return mysplit[0] + "/" + mysplit[1]
return mydep
def dep_getcat(mydep):
"""
Extract package category from dependency.
"""
return dep_getkey(mydep).split("/")[0]
def remove_cat(mydep):
"""
Drop category part from dependency, if any.
"""
if "/" in mydep:
return mydep.split("/", 1)[1]
return mydep
def dep_getcpv(mydep):
"""
Return the category-package-version with any operators/slot specifications stripped off
Example usage:
>>> dep_getcpv('>=media-libs/test-3.0')
'media-libs/test-3.0'
@param mydep: the depstring
@type mydep: string
@rtype: string
@return: the depstring with the operator removed
"""
if mydep and mydep[0] == "*":
mydep = mydep[1:]
if mydep and mydep[-1] == "*":
mydep = mydep[:-1]
if mydep and mydep[0] == "!":
mydep = mydep[1:]
if mydep[:2] in [">=", "<="]:
mydep = mydep[2:]
elif mydep[:1] in "=<>~":
mydep = mydep[1:]
colon = mydep.rfind(":")
if colon != -1:
mydep = mydep[:colon]
return mydep
def dep_getslot(mydep):
"""
# Imported from portage.dep
# $Id: dep.py 11281 2008-07-30 06:12:19Z zmedico $
Retrieve the slot on a depend.
Example usage:
>>> dep_getslot('app-misc/test:3')
'3'
@param mydep: the depstring to retrieve the slot of
@type mydep: string
@rtype: string
@return: the slot
"""
mydep = remove_tag(mydep)
colon = mydep.find(":")
if colon != -1:
bracket = mydep.find("[", colon)
if bracket == -1:
return mydep[colon+1:]
else:
return mydep[colon+1:bracket]
return None
def dep_getusedeps(depend):
"""
# Imported from portage.dep
# $Id: dep.py 11281 2008-07-30 06:12:19Z zmedico $
Pull a listing of USE Dependencies out of a dep atom.
Example usage:
>>> dep_getusedeps('app-misc/test:3[foo,-bar]')
('foo', '-bar')
@param depend: The depstring to process
@type depend: String
@rtype: List
@return: List of use flags ( or [] if no flags exist )
"""
use_list = []
open_bracket = depend.find('[')
# -1 = failure (think c++ string::npos)
comma_separated = False
bracket_count = 0
while( open_bracket != -1 ):
bracket_count += 1
if bracket_count > 1:
InvalidAtom("USE Dependency with more " + \
"than one set of brackets: %s" % (depend,))
close_bracket = depend.find(']', open_bracket )
if close_bracket == -1:
InvalidAtom("USE Dependency with no closing bracket: %s" % depend )
use = depend[open_bracket + 1: close_bracket]
# foo[1:1] may return '' instead of None, we don't want '' in the result
if not use:
InvalidAtom("USE Dependency with " + \
"no use flag ([]): %s" % depend )
if not comma_separated:
comma_separated = "," in use
if comma_separated and bracket_count > 1:
InvalidAtom("USE Dependency contains a mixture of " + \
"comma and bracket separators: %s" % depend )
if comma_separated:
for x in use.split(","):
if x:
use_list.append(x)
else:
InvalidAtom("USE Dependency with no use " + \
"flag next to comma: %s" % depend )
else:
use_list.append(use)
# Find next use flag
open_bracket = depend.find( '[', open_bracket+1 )
return tuple(use_list)
def remove_usedeps(depend):
"""
docstring_title
@param depend:
@type depend:
@return:
@rtype:
"""
new_depend = ""
skip = 0
for char in depend:
if char == "[":
skip += 1
elif char == "]":
skip -= 1
continue
if skip == 0:
new_depend += char
return new_depend
def remove_slot(mydep):
"""
# Imported from portage.dep
# $Id: dep.py 11281 2008-07-30 06:12:19Z zmedico $
Removes dep components from the right side of an atom:
* slot
* use
* repo
"""
colon = mydep.find(":")
if colon != -1:
mydep = mydep[:colon]
else:
bracket = mydep.find("[")
if bracket != -1:
mydep = mydep[:bracket]
return mydep
def remove_tag_from_slot(slot):
"""
Remove, if present, the tag part from SLOT string.
Packages append the kernel tag to the slot, by comma separating it.
"""
return slot[::-1].split(",", 1)[-1][::-1]
# input must be a valid package version or a full atom
def remove_revision(ver):
"""
docstring_title
@param ver:
@type ver:
@return:
@rtype:
"""
myver = ver.split("-")
if myver[-1][0] == "r":
return '-'.join(myver[:-1])
return ver
def remove_tag(mydep):
"""
docstring_title
@param mydep:
@type mydep:
@return:
@rtype:
"""
colon = mydep.rfind(etpConst['entropytagprefix'])
if colon == -1:
return mydep
return mydep[:colon]
def remove_entropy_revision(mydep):
"""
docstring_title
@param mydep:
@type mydep:
@return:
@rtype:
"""
dep = remove_package_operators(mydep)
operators = mydep[:-len(dep)]
colon = dep.rfind("~")
if colon == -1:
return mydep
return operators+dep[:colon]
def dep_get_entropy_revision(mydep):
"""
docstring_title
@param mydep:
@type mydep:
@return:
@rtype:
"""
#dep = remove_package_operators(mydep)
colon = mydep.rfind("~")
if colon != -1:
myrev = mydep[colon+1:]
try:
myrev = int(myrev)
except ValueError:
return None
return myrev
return None
def dep_split_or_deps(mydep):
"""
docstring_title
@param mydep:
@type mydep:
@return:
@rtype:
"""
dep = mydep.rstrip(etpConst['entropyordepquestion'])
return dep.split(etpConst['entropyordepsep'])
dep_revmatch = re.compile('^r[0-9]')
def dep_get_spm_revision(mydep):
"""
docstring_title
@param mydep:
@type mydep:
@return:
@rtype:
"""
myver = mydep.split("-")
myrev = myver[-1]
if dep_revmatch.match(myrev):
return myrev
else:
return "r0"
def dep_get_match_in_repos(mydep):
"""
docstring_title
@param mydep:
@type mydep:
@return:
@rtype:
"""
colon = mydep.rfind(etpConst['entropyrepoprefix'])
colon_offset = 1
if colon == -1:
# try with the alternate prefix
colon = mydep.rfind(etpConst['entropyrepoprefix_alt'])
colon_offset = 2
if colon != -1:
mydata = mydep[colon+colon_offset:]
mydata = mydata.split(",")
if not mydata:
mydata = None
return mydep[:colon], mydata
else:
return mydep, None
def dep_gettag(mydep):
"""
Retrieve the slot on a depend.
Example usage:
>>> dep_gettag('app-misc/test#2.6.23-sabayon-r1')
'2.6.23-sabayon-r1'
"""
dep = mydep[:]
dep = remove_entropy_revision(dep)
colon = dep.rfind(etpConst['entropytagprefix'])
if colon != -1:
mydep = dep[colon+1:]
rslt = remove_slot(mydep)
return rslt
return None
def remove_package_operators(atom):
"""
docstring_title
@param atom:
@type atom:
@return:
@rtype:
"""
return atom.lstrip("><=~")
def compare_versions(ver1, ver2):
"""
docstring_title
@param ver1:
@type ver1:
@param ver2:
@type ver2:
@return:
@rtype:
"""
if ver1 == ver2:
return 0
match1 = None
match2 = None
if ver1:
match1 = ver_regexp.match(ver1)
if ver2:
match2 = ver_regexp.match(ver2)
# checking that the versions are valid
invalid = False
invalid_rc = 0
if not match1:
invalid = True
elif not match1.groups():
invalid = True
elif not match2:
invalid_rc = 1
invalid = True
elif not match2.groups():
invalid_rc = 1
invalid = True
if invalid:
return invalid_rc
# building lists of the version parts before the suffix
# first part is simple
list1 = [int(match1.group(2))]
list2 = [int(match2.group(2))]
# this part would greatly benefit from a fixed-length version pattern
if len(match1.group(3)) or len(match2.group(3)):
vlist1 = match1.group(3)[1:].split(".")
vlist2 = match2.group(3)[1:].split(".")
for i in range(0, max(len(vlist1), len(vlist2))):
# Implcit .0 is given a value of -1, so that 1.0.0 > 1.0, since it
# would be ambiguous if two versions that aren't literally equal
# are given the same value (in sorting, for example).
if len(vlist1) <= i or len(vlist1[i]) == 0:
list1.append(-1)
list2.append(int(vlist2[i]))
elif len(vlist2) <= i or len(vlist2[i]) == 0:
list1.append(int(vlist1[i]))
list2.append(-1)
# Let's make life easy and use integers unless we're forced to use floats
elif (vlist1[i][0] != "0" and vlist2[i][0] != "0"):
list1.append(int(vlist1[i]))
list2.append(int(vlist2[i]))
# now we have to use floats so 1.02 compares correctly against 1.1
else:
list1.append(float("0."+vlist1[i]))
list2.append(float("0."+vlist2[i]))
# and now the final letter
if len(match1.group(5)):
list1.append(ord(match1.group(5)))
if len(match2.group(5)):
list2.append(ord(match2.group(5)))
for i in range(0, max(len(list1), len(list2))):
if len(list1) <= i:
return -1
elif len(list2) <= i:
return 1
elif list1[i] != list2[i]:
return list1[i] - list2[i]
# main version is equal, so now compare the _suffix part
list1 = match1.group(6).split("_")[1:]
list2 = match2.group(6).split("_")[1:]
for i in range(0, max(len(list1), len(list2))):
if len(list1) <= i:
s1 = ("p", "0")
else:
s1 = suffix_regexp.match(list1[i]).groups()
if len(list2) <= i:
s2 = ("p", "0")
else:
s2 = suffix_regexp.match(list2[i]).groups()
if s1[0] != s2[0]:
return suffix_value[s1[0]] - suffix_value[s2[0]]
if s1[1] != s2[1]:
# it's possible that the s(1|2)[1] == ''
# in such a case, fudge it.
try:
r1 = int(s1[1])
except ValueError:
r1 = 0
try:
r2 = int(s2[1])
except ValueError:
r2 = 0
return r1 - r2
# the suffix part is equal to, so finally check the revision
if match1.group(10):
r1 = int(match1.group(10))
else:
r1 = 0
if match2.group(10):
r2 = int(match2.group(10))
else:
r2 = 0
return r1 - r2
tag_regexp = re.compile("^([A-Za-z0-9+_.-]+)?$")
def is_valid_package_tag(tag):
"""
Return whether string is a valid package tag.
@param tag: package tag to test
@type tag: string
@return: True, if valid
@rtype: bool
"""
match = tag_regexp.match(tag)
if not match:
return False
if not match.groups():
return False
return True
def entropy_compare_package_tags(tag_a, tag_b):
"""
Compare two Entropy package tags using builtin cmp().
@param tag_a: Entropy package tag
@type tag_a: string
@param tag_b: Entropy package tag
@type tag_b: string
return: negative number if tag_a < tag_b, positive number if tag_a > tag_b.
zero if tag_a == tag_b.
rtype: int
"""
return const_cmp(tag_a, tag_b)
def sort_entropy_package_tags(tags):
"""
Return a sorted list of Entropy package tags.
@param tags: list of Entropy package tags
@type tags: list
@return: sorted list of Entropy package tags
@rtype: list
"""
return sorted(tags)
def entropy_compare_versions(ver_data, ver_data2):
"""
@description: compare two lists composed by
[version,tag,revision] and [version,tag,revision]
if ver_data > ver_data2 --> positive number
if ver_data == ver_data2 --> 0
if ver_data < ver_data2 --> negative number
@input package: ver_data[version,tag,rev] and ver_data2[version,tag,rev]
@output: integer number
"""
a_ver, a_tag, a_rev = ver_data
b_ver, b_tag, b_rev = ver_data2
# if both are tagged, check tag first
rc = 0
if a_tag and b_tag:
rc = const_cmp(a_tag, b_tag)
if rc == 0:
rc = compare_versions(a_ver, b_ver)
if rc == 0:
# check tag
tag_cmp = entropy_compare_package_tags(a_tag, b_tag)
if tag_cmp < 0:
return -1
elif tag_cmp > 0:
return 1
else:
# check rev
if a_rev > b_rev:
return 1
elif a_rev < b_rev:
return -1
return 0
return rc
def get_newer_version(versions):
"""
Return a sorted list of versions
@param versions: input version list
@type versions: list
@return: sorted version list
@rtype: list
"""
return _generic_sorter(versions, compare_versions)
def get_entropy_newer_version(versions):
"""
Sort a list of entropy package versions.
@param versions: list of package versions
@type versions: list
@return: sorted list
@rtype: list
"""
return _generic_sorter(versions, entropy_compare_versions)
sha1_re = re.compile(r"(.*)\.([a-f\d]{40})(.*)")
def get_entropy_package_sha1(package_name):
"""
Extract the SHA1 checksum from a package file name.
@param package_name: package file name
@type package_name: string
@return: the package SHA1 checksum, if any, or None
@rtype: string or None
"""
match = sha1_re.match(package_name)
if match:
groups = match.groups()
if len(groups) != 3:
return
return groups[1]
def remove_entropy_package_sha1(package_name):
"""
Remove the SHA1 checksum from a package file name.
@param package_name: package file name
@type package_name: string
"""
match = sha1_re.match(package_name)
if match:
groups = match.groups()
if len(groups) != 3:
return package_name
return groups[0]
return package_name
def create_package_filename(category, name, version, package_tag,
ext = None, revision = None,
sha1 = None):
"""
Create package filename string.
@param category: package category
@type category: string
@param name: package name
@type name: string
@param version: package version
@type version: string
@param package_tag: package tag, if any, or None
@type package_tag: string or None
@keyword ext: alternative package file extension
@type ext: string
@keyword sha1: a SHA1 checksum to add to the file name
@type sha1: string
@return: package file name string
@rtype: string
"""
if package_tag:
package_tag = "%s%s" % (etpConst['entropytagprefix'], package_tag,)
else:
package_tag = ''
package_name = "%s:%s-%s" % (category, name, version,)
package_name += package_tag
if ext is None:
ext = etpConst['packagesext']
if sha1 is not None:
package_name += ".%s" % (sha1,)
if revision is not None:
package_name += "~%d" % (revision,)
package_name += ext
return package_name
def create_package_relative_path(category, name, version, package_tag,
ext = None, revision = None,
sha1 = None):
"""
Create package relative path, containing the filename string.
The relative path contains a sub-directory part that is used to
distribute files across different directories (to avoid hot spots).
@param category: package category
@type category: string
@param name: package name
@type name: string
@param version: package version
@type version: string
@param package_tag: package tag, if any, or None
@type package_tag: string or None
@keyword ext: alternative package file extension
@type ext: string
@keyword sha1: a SHA1 checksum to add to the file name
@type sha1: string
@return: package file name string
@rtype: string
"""
return category + "/" + create_package_filename(
category, name, version, package_tag, ext = ext,
revision = revision, sha1 = sha1)
def strip_entropy_package_extension(pkg_path):
"""
Strip entropy package file extension from package path pkg_path.
@param pkg_path: package path
@type pkg_path: string
@return: stripped package path
@rtype: string
"""
return pkg_path.rstrip(etpConst['packagesext'])
def exploit_package_filename(package_name):
"""
This is the inverse function of create_package_filename, and returns
a tuple composed by category, name, version, package_tag (None if not set),
SHA1 checksum (None if not set), and additional revision (as int).
package_name should be a string like this:
<category>:<name>-<version>[.<sha1>][~<revision>[#<tag>]][.tbz2]
@param package_name: package file name
@type package_name: string
@return: tuple of strings/int composed by (category, name, version,
package_tag, revision)
@rtype: tuple
@raise AttributeError: if package_name string passed is improperly formatted
"""
pkg_str = strip_entropy_package_extension(package_name)
pkg_str = pkg_str.replace(":", "/")
pkg_str = strip_entropy_package_extension(pkg_str)
etp_rev = dep_get_entropy_revision(pkg_str)
pkg_str = remove_entropy_revision(pkg_str)
etp_sha1 = get_entropy_package_sha1(pkg_str)
pkg_str = remove_entropy_package_sha1(pkg_str)
etp_tag = dep_gettag(pkg_str)
pkg_str = remove_tag(pkg_str)
split_data = catpkgsplit(pkg_str)
if split_data is None:
raise AttributeError("invalid package name passed: %s" % (
package_name,))
etp_cat, etp_name, ver, rev = split_data
if rev != "r0":
ver += "-" + rev
return etp_cat, etp_name, ver, etp_tag, etp_sha1, etp_rev
def create_package_atom_string(category, name, version, package_tag):
"""
Create Entropy package atom string.
@param category: package category
@type category: string
@param name: package name
@type name: string
@param version: package version
@type version: string
@param package_tag: package tag, if any, or None
@type package_tag: string or None
@return: package atom string
@rtype: string
"""
if package_tag:
package_tag = "%s%s" % (etpConst['entropytagprefix'], package_tag,)
else:
package_tag = ''
package_name = "%s/%s-%s" % (category, name, version,)
package_name += package_tag
return package_name
class Dependency(object):
"""
Helper class used to evaluate dependency string containing boolean
expressions such as: (dep1 & dep2) | dep 3
"""
def __init__(self, entropy_dep, entropy_repository_list):
"""
Dependency constructor.
@param entropy_dep: entropy package dependency
@type entropy_dep: string
@param entropy_repository_list: ordered list of EntropyRepositoryBase
instances
@type entropy_repository_list: list
"""
self.__entropy_repository_list = entropy_repository_list
self.__dep = entropy_dep
def get(self):
"""
Return encapsulated depdenency string
@rtype: string
"""
return self.__dep
def __bool__(self):
"""
Same as __nonzero__ but meant for Python 3.x support
"""
for entropy_repository in self.__entropy_repository_list:
pkg_id, res = entropy_repository.atomMatch(self.__dep)
if res == 0:
return True
return False
def __nonzero__(self):
"""
Tries to match entropy_dep and returns True or False if dependency
is matched.
"""
for entropy_repository in self.__entropy_repository_list:
pkg_id, res = entropy_repository.atomMatch(self.__dep)
if res == 0:
return True
return False
def evaluate(self):
"""
Evaluate dependency trying to match dentropy_dep across all the
available repositories and return package matches.
"""
eval_data = set()
for entropy_repository in self.__entropy_repository_list:
repo_id = entropy_repository.repository_id()
pkg_deps, res = entropy_repository.atomMatch(self.__dep,
multiMatch = True)
if res == 0:
eval_data.update((x, repo_id) for x in pkg_deps)
return eval_data
class DependencyStringParser(object):
"""
Conditional dependency string parser. It is used by Entropy dependency
matching logic to evaluate dependency conditions containing boolean
operators. Example: "( app-foo/foo & foo-misc/foo ) | foo-misc/new-foo"
Example usage (self is an EntropyRepositoryBase instance):
>>> parser = DependencyStringParser("app-foo/foo & foo-misc/foo", self)
>>> matched, outcome = parser.parse()
>>> matched
True
>>> outcome
["app-foo/foo", "foo-misc/foo"]
"""
LOGIC_AND = "&"
LOGIC_OR = "|"
class MalformedDependency(EntropyException):
"""
Raised when dependency string is malformed.
"""
def __init__(self, entropy_dep, entropy_repository_list,
selected_matches = None):
"""
DependencyStringParser constructor.
@param entropy_dep: the dependency string to parse
@type entropy_dep: string
@param entropy_repository_list: ordered list of EntropyRepositoryBase
based instances
@type entropy_repository_list: list
@keyword selected_matches: if given, it will be used in the decisional
process of selecting conditional dependencies. Generally, a list
of selected matches comes directly from user packages selection.
@type selected_matches: set
"""
self.__dep = entropy_dep
self.__entropy_repository_list = entropy_repository_list
self.__selected_matches = None
if selected_matches:
self.__selected_matches = frozenset(selected_matches)
self.__dep_cache = {}
self.__eval_cache = {}
def __clear_cache(self):
self.__dep_cache.clear()
self.__eval_cache.clear()
def __dependency(self, dep):
"""
Helper function to make instantianting Dependency classes less annoying.
"""
cached = self.__dep_cache.get(dep)
if cached is not None:
return cached
obj = Dependency(dep, self.__entropy_repository_list)
self.__dep_cache[dep] = obj
return obj
def __evaluate(self, dep):
"""
Helper function to make instantianting Dependency classes and retrieving
match results less annoying.
"""
cached = self.__eval_cache.get(dep)
if cached is not None:
return cached
obj = Dependency(dep, self.__entropy_repository_list).evaluate()
self.__eval_cache[dep] = obj
return obj
def __split_subs(self, substring):
deep_count = 0
cur_str = ""
subs = []
for char in substring:
if char == " ":
continue
elif char == "(" and deep_count == 0:
if cur_str.strip():
subs.append(cur_str.strip())
cur_str = char
deep_count += 1
elif char == "(":
cur_str += char
deep_count += 1
elif char == self.LOGIC_OR and deep_count == 0:
if cur_str.strip():
subs.append(cur_str.strip())
subs.append(char)
cur_str = ""
elif char == self.LOGIC_AND and deep_count == 0:
if cur_str.strip():
subs.append(cur_str.strip())
subs.append(char)
cur_str = ""
elif char == ")":
cur_str += char
deep_count -= 1
if deep_count == 0:
cur_str = cur_str.strip()
deps = self.__encode_sub(cur_str)
if len(deps) == 1:
subs.append(deps[0])
elif deps:
subs.append(deps)
else:
raise DependencyStringParser.MalformedDependency()
cur_str = ""
else:
cur_str += char
if cur_str:
subs.append(cur_str.strip())
return subs
def __evaluate_subs(self, iterable):
if self.LOGIC_AND in iterable and self.LOGIC_OR in iterable:
raise DependencyStringParser.MalformedDependency(
"more than one operator in domain, not yet supported")
if self.LOGIC_AND in iterable:
iterable = [x for x in iterable if x != self.LOGIC_AND]
outcomes = []
for and_el in iterable:
if isinstance(and_el, list):
outcome = self.__evaluate_subs(and_el)
if outcome:
outcomes.extend(outcome)
else:
return []
elif self.__dependency(and_el):
outcomes.append(and_el)
else:
return []
return outcomes
elif self.LOGIC_OR in iterable:
iterable = [x for x in iterable if x != self.LOGIC_OR]
if self.__selected_matches:
# if there is something to prioritize
for or_el in iterable:
if isinstance(or_el, list):
outcome = self.__evaluate_subs(or_el)
if outcome:
difference = set(outcome) - self.__selected_matches
if not difference:
# everything matched, so this should be taken
return outcome
else: # simple string
outcome = self.__evaluate(or_el)
if outcome:
difference = set(outcome) - self.__selected_matches
if len(outcome) != len(difference):
# ok cool, got it!
return [or_el]
# no match using selected_matches priority list, fallback to
# first available.
for or_el in iterable:
if isinstance(or_el, list):
outcome = self.__evaluate_subs(or_el)
if outcome:
return outcome
elif self.__dependency(or_el):
return [or_el]
return []
# don't know what to do at the moment with this malformation
return []
def __encode_sub(self, dep):
"""
Generate a list of lists and strings from a plain dependency match
condition.
"""
open_bracket = dep.find("(")
closed_bracket = dep.rfind(")")
try:
substring = dep[open_bracket + 1:closed_bracket]
except IndexError:
raise DependencyStringParser.MalformedDependency()
if not substring:
raise DependencyStringParser.MalformedDependency()
subs = self.__split_subs(substring)
if not subs:
raise DependencyStringParser.MalformedDependency()
return subs
def parse(self):
"""
Execute the actual parsing and return the result.
@return: tuple composed by boolean (matched? not matched?) and list
of evaluated/matched dependencies.
@rtype: tuple
@raise MalformedDependency: if dependency string is malformed
"""
self.__clear_cache()
matched = False
try:
matched_deps = self.__evaluate_subs(
self.__encode_sub("(" + self.__dep + ")"))
if matched_deps:
matched = True
except DependencyStringParser.MalformedDependency:
matched_deps = []
return matched, matched_deps
def expand_dependencies(dependencies, entropy_repository_list,
selected_matches = None):
"""
Expand a list of dependencies resolving conditional ones.
NOTE: it automatically handles dependencies metadata extended format:
[(dep, type), ...]
@param dependencies: list of raw package dependencies, as
returned by EntropyRepositoryBase.retrieveDependencies{,List}()
@type dependencies: iterable
@param entropy_repository_list: ordered list of EntropyRepositoryBase instances
used to execute the actual resolution
@type entropy_repository_list: list
@keyword selected_matches: list of preferred package matches used to
evaluate or-dependencies.
@type selected_matches: set
@return: list (keeping the iterable order when possible) of expanded
dependencies
@rtype: list
@raise AttributeError: if dependencies structure is unsupported (this
function supports list of strings, or list of tuples of length 2)
"""
pkg_deps = []
for dep in dependencies:
dep_type = None
if isinstance(dep, tuple):
if len(dep) == 2:
dep, dep_type = dep
elif len(dep) == 1:
dep = dep[0]
else:
raise AttributeError("malformed input dependencies")
if dep.startswith("("):
_matched = False
try:
_matched, deps = DependencyStringParser(dep,
entropy_repository_list,
selected_matches = selected_matches).parse()
except DependencyStringParser.MalformedDependency:
# wtf! add as-is
if dep_type is None:
pkg_deps.append(dep)
else:
pkg_deps.append((dep, dep_type))
continue
if not _matched:
# if not matched, hold the original
# conditional dependency.
deps = [dep]
if dep_type is None:
pkg_deps.extend(deps)
else:
pkg_deps.extend([(x, dep_type) for x in deps])
elif dep_type is None:
pkg_deps.append(dep)
else:
pkg_deps.append((dep, dep_type))
return pkg_deps
|
blacklin/kbengine
|
refs/heads/master
|
kbe/src/lib/python/Lib/test/test_hash.py
|
83
|
# test the invariant that
# iff a==b then hash(a)==hash(b)
#
# Also test that hash implementations are inherited as expected
import datetime
import os
import sys
import unittest
from test.script_helper import assert_python_ok
from collections import Hashable
IS_64BIT = sys.maxsize > 2**32
def lcg(x, length=16):
"""Linear congruential generator"""
if x == 0:
return bytes(length)
out = bytearray(length)
for i in range(length):
x = (214013 * x + 2531011) & 0x7fffffff
out[i] = (x >> 16) & 0xff
return bytes(out)
def pysiphash(uint64):
"""Convert SipHash24 output to Py_hash_t
"""
assert 0 <= uint64 < (1 << 64)
# simple unsigned to signed int64
if uint64 > (1 << 63) - 1:
int64 = uint64 - (1 << 64)
else:
int64 = uint64
# mangle uint64 to uint32
uint32 = (uint64 ^ uint64 >> 32) & 0xffffffff
# simple unsigned to signed int32
if uint32 > (1 << 31) - 1:
int32 = uint32 - (1 << 32)
else:
int32 = uint32
return int32, int64
def skip_unless_internalhash(test):
"""Skip decorator for tests that depend on SipHash24 or FNV"""
ok = sys.hash_info.algorithm in {"fnv", "siphash24"}
msg = "Requires SipHash24 or FNV"
return test if ok else unittest.skip(msg)(test)
class HashEqualityTestCase(unittest.TestCase):
def same_hash(self, *objlist):
# Hash each object given and fail if
# the hash values are not all the same.
hashed = list(map(hash, objlist))
for h in hashed[1:]:
if h != hashed[0]:
self.fail("hashed values differ: %r" % (objlist,))
def test_numeric_literals(self):
self.same_hash(1, 1, 1.0, 1.0+0.0j)
self.same_hash(0, 0.0, 0.0+0.0j)
self.same_hash(-1, -1.0, -1.0+0.0j)
self.same_hash(-2, -2.0, -2.0+0.0j)
def test_coerced_integers(self):
self.same_hash(int(1), int(1), float(1), complex(1),
int('1'), float('1.0'))
self.same_hash(int(-2**31), float(-2**31))
self.same_hash(int(1-2**31), float(1-2**31))
self.same_hash(int(2**31-1), float(2**31-1))
# for 64-bit platforms
self.same_hash(int(2**31), float(2**31))
self.same_hash(int(-2**63), float(-2**63))
self.same_hash(int(2**63), float(2**63))
def test_coerced_floats(self):
self.same_hash(int(1.23e300), float(1.23e300))
self.same_hash(float(0.5), complex(0.5, 0.0))
def test_unaligned_buffers(self):
# The hash function for bytes-like objects shouldn't have
# alignment-dependent results (example in issue #16427).
b = b"123456789abcdefghijklmnopqrstuvwxyz" * 128
for i in range(16):
for j in range(16):
aligned = b[i:128+j]
unaligned = memoryview(b)[i:128+j]
self.assertEqual(hash(aligned), hash(unaligned))
_default_hash = object.__hash__
class DefaultHash(object): pass
_FIXED_HASH_VALUE = 42
class FixedHash(object):
def __hash__(self):
return _FIXED_HASH_VALUE
class OnlyEquality(object):
def __eq__(self, other):
return self is other
class OnlyInequality(object):
def __ne__(self, other):
return self is not other
class InheritedHashWithEquality(FixedHash, OnlyEquality): pass
class InheritedHashWithInequality(FixedHash, OnlyInequality): pass
class NoHash(object):
__hash__ = None
class HashInheritanceTestCase(unittest.TestCase):
default_expected = [object(),
DefaultHash(),
OnlyInequality(),
]
fixed_expected = [FixedHash(),
InheritedHashWithEquality(),
InheritedHashWithInequality(),
]
error_expected = [NoHash(),
OnlyEquality(),
]
def test_default_hash(self):
for obj in self.default_expected:
self.assertEqual(hash(obj), _default_hash(obj))
def test_fixed_hash(self):
for obj in self.fixed_expected:
self.assertEqual(hash(obj), _FIXED_HASH_VALUE)
def test_error_hash(self):
for obj in self.error_expected:
self.assertRaises(TypeError, hash, obj)
def test_hashable(self):
objects = (self.default_expected +
self.fixed_expected)
for obj in objects:
self.assertIsInstance(obj, Hashable)
def test_not_hashable(self):
for obj in self.error_expected:
self.assertNotIsInstance(obj, Hashable)
# Issue #4701: Check that some builtin types are correctly hashable
class DefaultIterSeq(object):
seq = range(10)
def __len__(self):
return len(self.seq)
def __getitem__(self, index):
return self.seq[index]
class HashBuiltinsTestCase(unittest.TestCase):
hashes_to_check = [enumerate(range(10)),
iter(DefaultIterSeq()),
iter(lambda: 0, 0),
]
def test_hashes(self):
_default_hash = object.__hash__
for obj in self.hashes_to_check:
self.assertEqual(hash(obj), _default_hash(obj))
class HashRandomizationTests:
# Each subclass should define a field "repr_", containing the repr() of
# an object to be tested
def get_hash_command(self, repr_):
return 'print(hash(eval(%a)))' % repr_
def get_hash(self, repr_, seed=None):
env = os.environ.copy()
env['__cleanenv'] = True # signal to assert_python not to do a copy
# of os.environ on its own
if seed is not None:
env['PYTHONHASHSEED'] = str(seed)
else:
env.pop('PYTHONHASHSEED', None)
out = assert_python_ok(
'-c', self.get_hash_command(repr_),
**env)
stdout = out[1].strip()
return int(stdout)
def test_randomized_hash(self):
# two runs should return different hashes
run1 = self.get_hash(self.repr_, seed='random')
run2 = self.get_hash(self.repr_, seed='random')
self.assertNotEqual(run1, run2)
class StringlikeHashRandomizationTests(HashRandomizationTests):
repr_ = None
repr_long = None
# 32bit little, 64bit little, 32bit big, 64bit big
known_hashes = {
'djba33x': [ # only used for small strings
# seed 0, 'abc'
[193485960, 193485960, 193485960, 193485960],
# seed 42, 'abc'
[-678966196, 573763426263223372, -820489388, -4282905804826039665],
],
'siphash24': [
# NOTE: PyUCS2 layout depends on endianess
# seed 0, 'abc'
[1198583518, 4596069200710135518, 1198583518, 4596069200710135518],
# seed 42, 'abc'
[273876886, -4501618152524544106, 273876886, -4501618152524544106],
# seed 42, 'abcdefghijk'
[-1745215313, 4436719588892876975, -1745215313, 4436719588892876975],
# seed 0, 'äú∑ℇ'
[493570806, 5749986484189612790, -1006381564, -5915111450199468540],
# seed 42, 'äú∑ℇ'
[-1677110816, -2947981342227738144, -1860207793, -4296699217652516017],
],
'fnv': [
# seed 0, 'abc'
[-1600925533, 1453079729188098211, -1600925533,
1453079729188098211],
# seed 42, 'abc'
[-206076799, -4410911502303878509, -1024014457,
-3570150969479994130],
# seed 42, 'abcdefghijk'
[811136751, -5046230049376118746, -77208053 ,
-4779029615281019666],
# seed 0, 'äú∑ℇ'
[44402817, 8998297579845987431, -1956240331,
-782697888614047887],
# seed 42, 'äú∑ℇ'
[-283066365, -4576729883824601543, -271871407,
-3927695501187247084],
]
}
def get_expected_hash(self, position, length):
if length < sys.hash_info.cutoff:
algorithm = "djba33x"
else:
algorithm = sys.hash_info.algorithm
if sys.byteorder == 'little':
platform = 1 if IS_64BIT else 0
else:
assert(sys.byteorder == 'big')
platform = 3 if IS_64BIT else 2
return self.known_hashes[algorithm][position][platform]
def test_null_hash(self):
# PYTHONHASHSEED=0 disables the randomized hash
known_hash_of_obj = self.get_expected_hash(0, 3)
# Randomization is enabled by default:
self.assertNotEqual(self.get_hash(self.repr_), known_hash_of_obj)
# It can also be disabled by setting the seed to 0:
self.assertEqual(self.get_hash(self.repr_, seed=0), known_hash_of_obj)
@skip_unless_internalhash
def test_fixed_hash(self):
# test a fixed seed for the randomized hash
# Note that all types share the same values:
h = self.get_expected_hash(1, 3)
self.assertEqual(self.get_hash(self.repr_, seed=42), h)
@skip_unless_internalhash
def test_long_fixed_hash(self):
if self.repr_long is None:
return
h = self.get_expected_hash(2, 11)
self.assertEqual(self.get_hash(self.repr_long, seed=42), h)
class StrHashRandomizationTests(StringlikeHashRandomizationTests,
unittest.TestCase):
repr_ = repr('abc')
repr_long = repr('abcdefghijk')
repr_ucs2 = repr('äú∑ℇ')
@skip_unless_internalhash
def test_empty_string(self):
self.assertEqual(hash(""), 0)
@skip_unless_internalhash
def test_ucs2_string(self):
h = self.get_expected_hash(3, 6)
self.assertEqual(self.get_hash(self.repr_ucs2, seed=0), h)
h = self.get_expected_hash(4, 6)
self.assertEqual(self.get_hash(self.repr_ucs2, seed=42), h)
class BytesHashRandomizationTests(StringlikeHashRandomizationTests,
unittest.TestCase):
repr_ = repr(b'abc')
repr_long = repr(b'abcdefghijk')
@skip_unless_internalhash
def test_empty_string(self):
self.assertEqual(hash(b""), 0)
class MemoryviewHashRandomizationTests(StringlikeHashRandomizationTests,
unittest.TestCase):
repr_ = "memoryview(b'abc')"
repr_long = "memoryview(b'abcdefghijk')"
@skip_unless_internalhash
def test_empty_string(self):
self.assertEqual(hash(memoryview(b"")), 0)
class DatetimeTests(HashRandomizationTests):
def get_hash_command(self, repr_):
return 'import datetime; print(hash(%s))' % repr_
class DatetimeDateTests(DatetimeTests, unittest.TestCase):
repr_ = repr(datetime.date(1066, 10, 14))
class DatetimeDatetimeTests(DatetimeTests, unittest.TestCase):
repr_ = repr(datetime.datetime(1, 2, 3, 4, 5, 6, 7))
class DatetimeTimeTests(DatetimeTests, unittest.TestCase):
repr_ = repr(datetime.time(0))
class HashDistributionTestCase(unittest.TestCase):
def test_hash_distribution(self):
# check for hash collision
base = "abcdefghabcdefg"
for i in range(1, len(base)):
prefix = base[:i]
with self.subTest(prefix=prefix):
s15 = set()
s255 = set()
for c in range(256):
h = hash(prefix + chr(c))
s15.add(h & 0xf)
s255.add(h & 0xff)
# SipHash24 distribution depends on key, usually > 60%
self.assertGreater(len(s15), 8, prefix)
self.assertGreater(len(s255), 128, prefix)
if __name__ == "__main__":
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.