repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
LIGGGHTS-WITH-BONDS | LIGGGHTS-WITH-BONDS-master/python/examples/gui.py | #!/usr/bin/env python -i
# preceeding line should have path for Python on your machine
# gui.py
# Purpose: control a continuously running LAMMPS simulation via a Tkinter GUI
# Syntax: gui.py in.lammps Nfreq
# in.lammps = LAMMPS input script
# Nfreq = query GUI every this many steps
# IMPORTANT: this script cannot yet be run in parallel via Pypar,
# because I can't seem to do a MPI-style broadcast in Pypar
import sys,time
# methods called by GUI
def go():
global runflag
runflag = 1
def stop():
global runflag
runflag = 0
def settemp(value):
global temptarget
temptarget = slider.get()
def quit():
global breakflag
breakflag = 1
# parse command line
argv = sys.argv
if len(argv) != 3:
print "Syntax: gui.py in.lammps Nfreq"
sys.exit()
infile = sys.argv[1]
nfreq = int(sys.argv[2])
me = 0
# uncomment if running in parallel via Pypar
#import pypar
#me = pypar.rank()
#nprocs = pypar.size()
from lammps import lammps
lmp = lammps()
# run infile all at once
# assumed to have no run command in it
lmp.file(infile)
lmp.command("thermo %d" % nfreq)
# display GUI with go/stop/quit buttons and slider for temperature
# just proc 0 handles GUI
breakflag = 0
runflag = 0
temptarget = 1.0
if me == 0:
from Tkinter import *
tkroot = Tk()
tkroot.withdraw()
root = Toplevel(tkroot)
root.title("LAMMPS GUI")
frame = Frame(root)
Button(frame,text="Go",command=go).pack(side=LEFT)
Button(frame,text="Stop",command=stop).pack(side=LEFT)
slider = Scale(frame,from_=0.0,to=5.0,resolution=0.1,
orient=HORIZONTAL,label="Temperature")
slider.bind('<ButtonRelease-1>',settemp)
slider.set(temptarget)
slider.pack(side=LEFT)
Button(frame,text="Quit",command=quit).pack(side=RIGHT)
frame.pack()
tkroot.update()
# endless loop, checking status of GUI settings every Nfreq steps
# run with pre yes/no and post yes/no depending on go/stop status
# re-invoke fix langevin with new seed when temperature slider changes
# after re-invoke of fix langevin, run with pre yes
running = 0
temp = temptarget
seed = 12345
lmp.command("fix 2 all langevin %g %g 0.1 %d" % (temp,temp,seed))
while 1:
if me == 0: tkroot.update()
if temp != temptarget:
temp = temptarget
seed += me+1
lmp.command("fix 2 all langevin %g %g 0.1 %d" % (temp,temp,seed))
running = 0
if runflag and running:
lmp.command("run %d pre no post no" % nfreq)
elif runflag and not running:
lmp.command("run %d pre yes post no" % nfreq)
elif not runflag and running:
lmp.command("run %d pre no post yes" % nfreq)
if breakflag: break
if runflag: running = 1
else: running = 0
time.sleep(0.01)
# uncomment if running in parallel via Pypar
#print "Proc %d out of %d procs has" % (me,nprocs), lmp
#pypar.finalize()
| 2,805 | 23.831858 | 77 | py |
LIGGGHTS-WITH-BONDS | LIGGGHTS-WITH-BONDS-master/python/examples/viz_atomeye.py | #!/usr/bin/env python -i
# preceeding line should have path for Python on your machine
# viz_atomeye.py
# Purpose: viz running LAMMPS simulation via AtomEye
# Syntax: viz_atomeye.py in.lammps Nfreq Nsteps
# in.lammps = LAMMPS input script
# Nfreq = dump and viz shapshot every this many steps
# Nsteps = run for this many steps
import sys,os
# set this to point to AtomEye version 3 executable
# first line if want AtomEye output to screen, 2nd line to file
#ATOMEYE3 = "/home/sjplimp/tools/atomeye3/A3.i686-20060530"
ATOMEYE3 = "/home/sjplimp/tools/atomeye3/A3.i686-20060530 > atomeye.out"
# parse command line
argv = sys.argv
if len(argv) != 4:
print "Syntax: viz_atomeye.py in.lammps Nfreq Nsteps"
sys.exit()
infile = sys.argv[1]
nfreq = int(sys.argv[2])
nsteps = int(sys.argv[3])
me = 0
# uncomment if running in parallel via Pypar
#import pypar
#me = pypar.rank()
#nprocs = pypar.size()
from lammps import lammps
lmp = lammps()
# run infile all at once
# assumed to have no run command in it
# dump a file in extended CFG format for AtomEye
lmp.file(infile)
lmp.command("thermo %d" % nfreq)
lmp.command("dump python all cfg %d tmp.cfg.* id type xs ys zs" % nfreq)
# initial 0-step run to generate dump file and image
lmp.command("run 0 pre yes post no")
ntimestep = 0
# wrapper on GL window via Pizza.py gl tool
# just proc 0 handles reading of dump file and viz
if me == 0:
a = os.popen(ATOMEYE3,'w')
a.write("load_config tmp.cfg.0\n")
a.flush()
# run nfreq steps at a time w/out pre/post, read dump snapshot, display it
while ntimestep < nsteps:
lmp.command("run %d pre no post no" % nfreq)
ntimestep += nfreq
if me == 0:
a.write("load_config tmp.cfg.%d\n" % ntimestep)
a.flush()
lmp.command("run 0 pre no post yes")
# uncomment if running in parallel via Pypar
#print "Proc %d out of %d procs has" % (me,nprocs), lmp
#pypar.finalize()
| 1,913 | 25.219178 | 74 | py |
LIGGGHTS-WITH-BONDS | LIGGGHTS-WITH-BONDS-master/python/examples/viz_vmd.py | #!/usr/bin/env python -i
# preceeding line should have path for Python on your machine
# viz_vmd.py
# Purpose: viz running LAMMPS simulation via VMD
# Syntax: viz_vmd.py in.lammps Nfreq Nsteps
# in.lammps = LAMMPS input script
# Nfreq = dump and viz shapshot every this many steps
# Nsteps = run for this many steps
import sys
sys.path.append("./pizza")
# parse command line
argv = sys.argv
if len(argv) != 4:
print "Syntax: viz_vmd.py in.lammps Nfreq Nsteps"
sys.exit()
infile = sys.argv[1]
nfreq = int(sys.argv[2])
nsteps = int(sys.argv[3])
me = 0
# uncomment if running in parallel via Pypar
#import pypar
#me = pypar.rank()
#nprocs = pypar.size()
from lammps import lammps
lmp = lammps()
# run infile all at once
# assumed to have no run command in it
lmp.file(infile)
lmp.command("thermo %d" % nfreq)
lmp.command("dump python all atom %d tmp.dump" % nfreq)
# initial 0-step run to generate dump file and image
lmp.command("run 0 pre yes post no")
ntimestep = 0
# wrapper on VMD window via Pizza.py vmd tool
# just proc 0 handles reading of dump file and viz
if me == 0:
from vmd import vmd
v = vmd()
v('menu main off')
v.rep('VDW')
from dump import dump
from pdbfile import pdbfile
d = dump('tmp.dump',0)
p = pdbfile(d)
d.next()
d.unscale()
p.single(ntimestep)
v.new('tmp.pdb','pdb')
# run nfreq steps at a time w/out pre/post, read dump snapshot, display it
while ntimestep < nsteps:
lmp.command("run %d pre no post no" % nfreq)
ntimestep += nfreq
if me == 0:
d.next()
d.unscale()
p.single(ntimestep)
# add frame to current data set
v.append('tmp.pdb','pdb')
# delete all frame and add new.
#v.update('tmp.dump')
lmp.command("run 0 pre no post yes")
if me == 0:
v.flush()
# uncomment the following, if you want to work with the viz some more.
#v('menu main on')
#print "type quit to terminate."
#v.enter()
#v.stop()
# uncomment if running in parallel via Pypar
#print "Proc %d out of %d procs has" % (me,nprocs), lmp
#pypar.finalize()
| 2,063 | 21.434783 | 74 | py |
LIGGGHTS-WITH-BONDS | LIGGGHTS-WITH-BONDS-master/python/examples/vizplotgui_vmd.py | #!/usr/bin/env python -i
# preceeding line should have path for Python on your machine
# vizplotgui_vmd.py
# Purpose: viz running LAMMPS simulation via VMD with plot and GUI
# Syntax: vizplotgui_vmd.py in.lammps Nfreq compute-ID
# in.lammps = LAMMPS input script
# Nfreq = plot data point and viz shapshot every this many steps
# compute-ID = ID of compute that calculates temperature
# (or any other scalar quantity)
# IMPORTANT: this script cannot yet be run in parallel via Pypar,
# because I can't seem to do a MPI-style broadcast in Pypar
import sys,time
sys.path.append("./pizza")
# methods called by GUI
def run():
global runflag
runflag = 1
def stop():
global runflag
runflag = 0
def settemp(value):
global temptarget
temptarget = slider.get()
def quit():
global breakflag
breakflag = 1
# method called by timestep loop every Nfreq steps
# read dump snapshot and viz it, update plot with compute value
def update(ntimestep):
d.next()
d.unscale()
p.single(ntimestep)
v.append('tmp.pdb','pdb')
value = lmp.extract_compute(compute,0,0)
xaxis.append(ntimestep)
yaxis.append(value)
gn.plot(xaxis,yaxis)
# parse command line
argv = sys.argv
if len(argv) != 4:
print "Syntax: vizplotgui_vmd.py in.lammps Nfreq compute-ID"
sys.exit()
infile = sys.argv[1]
nfreq = int(sys.argv[2])
compute = sys.argv[3]
me = 0
# uncomment if running in parallel via Pypar
#import pypar
#me = pypar.rank()
#nprocs = pypar.size()
from lammps import lammps
lmp = lammps()
# run infile all at once
# assumed to have no run command in it
# dump a file in native LAMMPS dump format for Pizza.py dump tool
lmp.file(infile)
lmp.command("thermo %d" % nfreq)
lmp.command("dump python all atom %d tmp.dump" % nfreq)
# initial 0-step run to generate initial 1-point plot, dump file, and image
lmp.command("run 0 pre yes post no")
value = lmp.extract_compute(compute,0,0)
ntimestep = 0
xaxis = [ntimestep]
yaxis = [value]
breakflag = 0
runflag = 0
temptarget = 1.0
# wrapper on VMD window via Pizza.py vmd tool
# just proc 0 handles reading of dump file and viz
if me == 0:
from vmd import vmd
v = vmd()
v('menu main off')
v.rep('VDW')
from dump import dump
from pdbfile import pdbfile
d = dump('tmp.dump',0)
p = pdbfile(d)
d.next()
d.unscale()
p.single(ntimestep)
v.new('tmp.pdb','pdb')
# display GUI with run/stop buttons and slider for temperature
if me == 0:
from Tkinter import *
tkroot = Tk()
tkroot.withdraw()
root = Toplevel(tkroot)
root.title("LAMMPS GUI")
frame = Frame(root)
Button(frame,text="Run",command=run).pack(side=LEFT)
Button(frame,text="Stop",command=stop).pack(side=LEFT)
slider = Scale(frame,from_=0.0,to=5.0,resolution=0.1,
orient=HORIZONTAL,label="Temperature")
slider.bind('<ButtonRelease-1>',settemp)
slider.set(temptarget)
slider.pack(side=LEFT)
Button(frame,text="Quit",command=quit).pack(side=RIGHT)
frame.pack()
tkroot.update()
# wrapper on GnuPlot via Pizza.py gnu tool
if me == 0:
from gnu import gnu
gn = gnu()
gn.plot(xaxis,yaxis)
gn.title(compute,"Timestep","Temperature")
# endless loop, checking status of GUI settings every Nfreq steps
# run with pre yes/no and post yes/no depending on go/stop status
# re-invoke fix langevin with new seed when temperature slider changes
# after re-invoke of fix langevin, run with pre yes
running = 0
temp = temptarget
seed = 12345
lmp.command("fix 2 all langevin %g %g 0.1 %d" % (temp,temp,seed))
while 1:
if me == 0: tkroot.update()
if temp != temptarget:
temp = temptarget
seed += me+1
lmp.command("fix 2 all langevin %g %g 0.1 12345" % (temp,temp))
running = 0
if runflag and running:
lmp.command("run %d pre no post no" % nfreq)
ntimestep += nfreq
if me == 0: update(ntimestep)
elif runflag and not running:
lmp.command("run %d pre yes post no" % nfreq)
ntimestep += nfreq
if me == 0: update(ntimestep)
elif not runflag and running:
lmp.command("run %d pre no post yes" % nfreq)
ntimestep += nfreq
if me == 0: update(ntimestep)
if breakflag: break
if runflag: running = 1
else: running = 0
time.sleep(0.01)
lmp.command("run 0 pre no post yes")
# uncomment if running in parallel via Pypar
#print "Proc %d out of %d procs has" % (me,nprocs), lmp
#pypar.finalize()
| 4,390 | 24.235632 | 75 | py |
LIGGGHTS-WITH-BONDS | LIGGGHTS-WITH-BONDS-master/python/examples/viz_pymol.py | #!/usr/bin/env python -i
# preceeding line should have path for Python on your machine
# viz_pymol.py
# Purpose: viz running LAMMPS simulation via PyMol
# Syntax: viz_pymol.py in.lammps Nfreq Nsteps
# in.lammps = LAMMPS input script
# Nfreq = dump and viz shapshot every this many steps
# Nsteps = run for this many steps
import sys
sys.path.append("./pizza")
# parse command line
argv = sys.argv
if len(argv) != 4:
print "Syntax: viz_pymol.py in.lammps Nfreq Nsteps"
sys.exit()
infile = sys.argv[1]
nfreq = int(sys.argv[2])
nsteps = int(sys.argv[3])
me = 0
# uncomment if running in parallel via Pypar
#import pypar
#me = pypar.rank()
#nprocs = pypar.size()
from lammps import lammps
lmp = lammps()
# run infile all at once
# assumed to have no run command in it
# dump a file in native LAMMPS dump format for Pizza.py dump tool
lmp.file(infile)
lmp.command("thermo %d" % nfreq)
lmp.command("dump python all atom %d tmp.dump" % nfreq)
# initial 0-step run to generate dump file and image
lmp.command("run 0 pre yes post no")
ntimestep = 0
# wrapper on PyMol
# just proc 0 handles reading of dump file and viz
if me == 0:
import pymol
pymol.finish_launching()
from dump import dump
from pdbfile import pdbfile
from pymol import cmd as pm
d = dump("tmp.dump",0)
p = pdbfile(d)
d.next()
d.unscale()
p.single(ntimestep)
pm.load("tmp.pdb")
pm.show("spheres","tmp")
# run nfreq steps at a time w/out pre/post, read dump snapshot, display it
while ntimestep < nsteps:
lmp.command("run %d pre no post no" % nfreq)
ntimestep += nfreq
if me == 0:
d.next()
d.unscale()
p.single(ntimestep)
pm.load("tmp.pdb")
pm.forward()
lmp.command("run 0 pre no post yes")
# uncomment if running in parallel via Pypar
#print "Proc %d out of %d procs has" % (me,nprocs), lmp
#pypar.finalize()
| 1,874 | 21.590361 | 74 | py |
LIGGGHTS-WITH-BONDS | LIGGGHTS-WITH-BONDS-master/python/examples/vizplotgui_pymol.py | #!/usr/bin/env python -i
# preceeding line should have path for Python on your machine
# vizplotgui_pymol.py
# Purpose: viz running LAMMPS simulation via PyMol with plot and GUI
# Syntax: vizplotgui_pymol.py in.lammps Nfreq compute-ID
# in.lammps = LAMMPS input script
# Nfreq = plot data point and viz shapshot every this many steps
# compute-ID = ID of compute that calculates temperature
# (or any other scalar quantity)
# IMPORTANT: this script cannot yet be run in parallel via Pypar,
# because I can't seem to do a MPI-style broadcast in Pypar
import sys,time
sys.path.append("./pizza")
# methods called by GUI
def run():
global runflag
runflag = 1
def stop():
global runflag
runflag = 0
def settemp(value):
global temptarget
temptarget = slider.get()
def quit():
global breakflag
breakflag = 1
# method called by timestep loop every Nfreq steps
# read dump snapshot and viz it, update plot with compute value
def update(ntimestep):
d.next()
d.unscale()
p.single(ntimestep)
pm.load("tmp.pdb")
pm.forward()
value = lmp.extract_compute(compute,0,0)
xaxis.append(ntimestep)
yaxis.append(value)
gn.plot(xaxis,yaxis)
# parse command line
argv = sys.argv
if len(argv) != 4:
print "Syntax: vizplotgui_pymol.py in.lammps Nfreq compute-ID"
sys.exit()
infile = sys.argv[1]
nfreq = int(sys.argv[2])
compute = sys.argv[3]
me = 0
# uncomment if running in parallel via Pypar
#import pypar
#me = pypar.rank()
#nprocs = pypar.size()
from lammps import lammps
lmp = lammps()
# run infile all at once
# assumed to have no run command in it
# dump a file in native LAMMPS dump format for Pizza.py dump tool
lmp.file(infile)
lmp.command("thermo %d" % nfreq)
lmp.command("dump python all atom %d tmp.dump" % nfreq)
# initial 0-step run to generate initial 1-point plot, dump file, and image
lmp.command("run 0 pre yes post no")
value = lmp.extract_compute(compute,0,0)
ntimestep = 0
xaxis = [ntimestep]
yaxis = [value]
breakflag = 0
runflag = 0
temptarget = 1.0
# wrapper on PyMol
# just proc 0 handles reading of dump file and viz
if me == 0:
import pymol
pymol.finish_launching()
from dump import dump
from pdbfile import pdbfile
from pymol import cmd as pm
d = dump("tmp.dump",0)
p = pdbfile(d)
d.next()
d.unscale()
p.single(ntimestep)
pm.load("tmp.pdb")
pm.show("spheres","tmp")
# display GUI with run/stop buttons and slider for temperature
if me == 0:
from Tkinter import *
tkroot = Tk()
tkroot.withdraw()
root = Toplevel(tkroot)
root.title("LAMMPS GUI")
frame = Frame(root)
Button(frame,text="Run",command=run).pack(side=LEFT)
Button(frame,text="Stop",command=stop).pack(side=LEFT)
slider = Scale(frame,from_=0.0,to=5.0,resolution=0.1,
orient=HORIZONTAL,label="Temperature")
slider.bind('<ButtonRelease-1>',settemp)
slider.set(temptarget)
slider.pack(side=LEFT)
Button(frame,text="Quit",command=quit).pack(side=RIGHT)
frame.pack()
tkroot.update()
# wrapper on GnuPlot via Pizza.py gnu tool
if me == 0:
from gnu import gnu
gn = gnu()
gn.plot(xaxis,yaxis)
gn.title(compute,"Timestep","Temperature")
# endless loop, checking status of GUI settings every Nfreq steps
# run with pre yes/no and post yes/no depending on go/stop status
# re-invoke fix langevin with new seed when temperature slider changes
# after re-invoke of fix langevin, run with pre yes
running = 0
temp = temptarget
seed = 12345
lmp.command("fix 2 all langevin %g %g 0.1 %d" % (temp,temp,seed))
while 1:
if me == 0: tkroot.update()
if temp != temptarget:
temp = temptarget
seed += me+1
lmp.command("fix 2 all langevin %g %g 0.1 12345" % (temp,temp))
running = 0
if runflag and running:
lmp.command("run %d pre no post no" % nfreq)
ntimestep += nfreq
if me == 0: update(ntimestep)
elif runflag and not running:
lmp.command("run %d pre yes post no" % nfreq)
ntimestep += nfreq
if me == 0: update(ntimestep)
elif not runflag and running:
lmp.command("run %d pre no post yes" % nfreq)
ntimestep += nfreq
if me == 0: update(ntimestep)
if breakflag: break
if runflag: running = 1
else: running = 0
time.sleep(0.01)
lmp.command("run 0 pre no post yes")
# uncomment if running in parallel via Pypar
#print "Proc %d out of %d procs has" % (me,nprocs), lmp
#pypar.finalize()
| 4,404 | 24.171429 | 75 | py |
LIGGGHTS-WITH-BONDS | LIGGGHTS-WITH-BONDS-master/python/examples/demo.py | #!/usr/bin/env python -i
# preceeding line should have path for Python on your machine
# demo.py
# Purpose: illustrate use of many library interface commands
# Syntax: demo.py
# uses in.demo as LAMMPS input script
import sys
# parse command line
argv = sys.argv
if len(argv) != 1:
print "Syntax: demo.py"
sys.exit()
me = 0
# uncomment if running in parallel via Pypar
#import pypar
#me = pypar.rank()
#nprocs = pypar.size()
from lammps import lammps
lmp = lammps()
# test out various library functions after running in.demo
lmp.file("in.demo")
if me == 0: print "\nPython output:"
natoms = lmp.extract_global("natoms",0)
mass = lmp.extract_atom("mass",2)
x = lmp.extract_atom("x",3)
print "Natoms, mass, x[0][0] coord =",natoms,mass[1],x[0][0]
temp = lmp.extract_compute("thermo_temp",0,0)
print "Temperature from compute =",temp
eng = lmp.extract_variable("eng",None,0)
print "Energy from equal-style variable =",eng
vy = lmp.extract_variable("vy","all",1)
print "Velocity component from atom-style variable =",vy[1]
natoms = lmp.get_natoms()
print "Natoms from get_natoms =",natoms
xc = lmp.gather_atoms("x",1,3)
print "Global coords from gather_atoms =",xc[0],xc[1],xc[31]
xc[0] = xc[0] + 1.0
lmp.scatter_atoms("x",1,3,xc)
print "Changed x[0][0] via scatter_atoms =",x[0][0]
# uncomment if running in parallel via Pypar
#print "Proc %d out of %d procs has" % (me,nprocs), lmp
#pypar.finalize()
| 1,430 | 22.080645 | 61 | py |
LIGGGHTS-WITH-BONDS | LIGGGHTS-WITH-BONDS-master/python/examples/plot.py | #!/usr/bin/env python -i
# preceeding line should have path for Python on your machine
# plot.py
# Purpose: plot Temp of running LAMMPS simulation via GnuPlot in Pizza.py
# Syntax: plot.py in.lammps Nfreq Nsteps compute-ID
# in.lammps = LAMMPS input script
# Nfreq = plot data point every this many steps
# Nsteps = run for this many steps
# compute-ID = ID of compute that calculates temperature
# (or any other scalar quantity)
import sys
sys.path.append("./pizza")
from gnu import gnu
# parse command line
argv = sys.argv
if len(argv) != 5:
print "Syntax: plot.py in.lammps Nfreq Nsteps compute-ID"
sys.exit()
infile = sys.argv[1]
nfreq = int(sys.argv[2])
nsteps = int(sys.argv[3])
compute = sys.argv[4]
me = 0
# uncomment if running in parallel via Pypar
#import pypar
#me = pypar.rank()
#nprocs = pypar.size()
from lammps import lammps
lmp = lammps()
# run infile all at once
# assumed to have no run command in it
lmp.file(infile)
lmp.command("thermo %d" % nfreq)
# initial 0-step run to generate initial 1-point plot
lmp.command("run 0 pre yes post no")
value = lmp.extract_compute(compute,0,0)
ntimestep = 0
xaxis = [ntimestep]
yaxis = [value]
# wrapper on GnuPlot via Pizza.py gnu tool
# just proc 0 handles plotting
if me == 0:
gn = gnu()
gn.plot(xaxis,yaxis)
gn.xrange(0,nsteps)
gn.title(compute,"Timestep","Temperature")
# run nfreq steps at a time w/out pre/post, query compute, refresh plot
while ntimestep < nsteps:
lmp.command("run %d pre no post no" % nfreq)
ntimestep += nfreq
value = lmp.extract_compute(compute,0,0)
xaxis.append(ntimestep)
yaxis.append(value)
if me == 0: gn.plot(xaxis,yaxis)
lmp.command("run 0 pre no post yes")
# uncomment if running in parallel via Pypar
#print "Proc %d out of %d procs has" % (me,nprocs), lmp
#pypar.finalize()
| 1,869 | 23.605263 | 73 | py |
LIGGGHTS-WITH-BONDS | LIGGGHTS-WITH-BONDS-master/python/examples/trivial.py | #!/usr/bin/env python -i
# preceeding line should have path for Python on your machine
# trivial.py
# Purpose: run a LAMMPS input script via Python
# Syntax: trivial.py in.lammps
# in.lammps = LAMMPS input script
import sys
# parse command line
argv = sys.argv
if len(argv) != 2:
print "Syntax: trivial.py in.lammps"
sys.exit()
infile = sys.argv[1]
me = 0
# uncomment if running in parallel via Pypar
#import pypar
#me = pypar.rank()
#nprocs = pypar.size()
from lammps import lammps
lmp = lammps()
# run infile all at once
lmp.file(infile)
# run infile one line at a time
#lines = open(infile,'r').readlines()
#for line in lines: lmp.command(line)
# uncomment if running in parallel via Pypar
#print "Proc %d out of %d procs has" % (me,nprocs), lmp
#pypar.finalize()
| 793 | 18.365854 | 61 | py |
LIGGGHTS-WITH-BONDS | LIGGGHTS-WITH-BONDS-master/python/examples/vizplotgui_gl.py | #!/usr/bin/env python -i
# preceeding line should have path for Python on your machine
# vizplotgui_gl.py
# Purpose: viz running LAMMPS simulation via GL tool with plot and GUI
# Syntax: vizplotgui_gl.py in.lammps Nfreq compute-ID
# in.lammps = LAMMPS input script
# Nfreq = plot data point and viz shapshot every this many steps
# compute-ID = ID of compute that calculates temperature
# (or any other scalar quantity)
# IMPORTANT: this script cannot yet be run in parallel via Pypar,
# because I can't seem to do a MPI-style broadcast in Pypar
import sys,time
sys.path.append("./pizza")
# methods called by GUI
def run():
global runflag
runflag = 1
def stop():
global runflag
runflag = 0
def settemp(value):
global temptarget
temptarget = slider.get()
def quit():
global breakflag
breakflag = 1
# method called by timestep loop every Nfreq steps
# read dump snapshot and viz it, update plot with compute value
def update(ntimestep):
d.next()
d.unscale()
g.show(ntimestep)
value = lmp.extract_compute(compute,0,0)
xaxis.append(ntimestep)
yaxis.append(value)
gn.plot(xaxis,yaxis)
# parse command line
argv = sys.argv
if len(argv) != 4:
print "Syntax: vizplotgui_gl.py in.lammps Nfreq compute-ID"
sys.exit()
infile = sys.argv[1]
nfreq = int(sys.argv[2])
compute = sys.argv[3]
me = 0
# uncomment if running in parallel via Pypar
#import pypar
#me = pypar.rank()
#nprocs = pypar.size()
from lammps import lammps
lmp = lammps()
# run infile all at once
# assumed to have no run command in it
# dump a file in native LAMMPS dump format for Pizza.py dump tool
lmp.file(infile)
lmp.command("thermo %d" % nfreq)
lmp.command("dump python all atom %d tmp.dump" % nfreq)
# initial 0-step run to generate initial 1-point plot, dump file, and image
lmp.command("run 0 pre yes post no")
value = lmp.extract_compute(compute,0,0)
ntimestep = 0
xaxis = [ntimestep]
yaxis = [value]
breakflag = 0
runflag = 0
temptarget = 1.0
# wrapper on GL window via Pizza.py gl tool
# just proc 0 handles reading of dump file and viz
if me == 0:
from Tkinter import *
tkroot = Tk()
tkroot.withdraw()
from dump import dump
from gl import gl
d = dump("tmp.dump",0)
g = gl(d)
d.next()
d.unscale()
g.zoom(1)
g.shift(0,0)
g.rotate(0,270)
g.q(10)
g.box(1)
g.show(ntimestep)
# display GUI with run/stop buttons and slider for temperature
if me == 0:
from Tkinter import *
tkroot = Tk()
tkroot.withdraw()
root = Toplevel(tkroot)
root.title("LAMMPS GUI")
frame = Frame(root)
Button(frame,text="Run",command=run).pack(side=LEFT)
Button(frame,text="Stop",command=stop).pack(side=LEFT)
slider = Scale(frame,from_=0.0,to=5.0,resolution=0.1,
orient=HORIZONTAL,label="Temperature")
slider.bind('<ButtonRelease-1>',settemp)
slider.set(temptarget)
slider.pack(side=LEFT)
Button(frame,text="Quit",command=quit).pack(side=RIGHT)
frame.pack()
tkroot.update()
# wrapper on GnuPlot via Pizza.py gnu tool
if me == 0:
from gnu import gnu
gn = gnu()
gn.plot(xaxis,yaxis)
gn.title(compute,"Timestep","Temperature")
# endless loop, checking status of GUI settings every Nfreq steps
# run with pre yes/no and post yes/no depending on go/stop status
# re-invoke fix langevin with new seed when temperature slider changes
# after re-invoke of fix langevin, run with pre yes
running = 0
temp = temptarget
seed = 12345
lmp.command("fix 2 all langevin %g %g 0.1 %d" % (temp,temp,seed))
while 1:
if me == 0: tkroot.update()
if temp != temptarget:
temp = temptarget
seed += me+1
lmp.command("fix 2 all langevin %g %g 0.1 12345" % (temp,temp))
running = 0
if runflag and running:
lmp.command("run %d pre no post no" % nfreq)
ntimestep += nfreq
if me == 0: update(ntimestep)
elif runflag and not running:
lmp.command("run %d pre yes post no" % nfreq)
ntimestep += nfreq
if me == 0: update(ntimestep)
elif not runflag and running:
lmp.command("run %d pre no post yes" % nfreq)
ntimestep += nfreq
if me == 0: update(ntimestep)
if breakflag: break
if runflag: running = 1
else: running = 0
time.sleep(0.01)
lmp.command("run 0 pre no post yes")
# uncomment if running in parallel via Pypar
#print "Proc %d out of %d procs has" % (me,nprocs), lmp
#pypar.finalize()
| 4,373 | 23.852273 | 75 | py |
LIGGGHTS-WITH-BONDS | LIGGGHTS-WITH-BONDS-master/python/examples/pizza/gnu.py | # Pizza.py toolkit, www.cs.sandia.gov/~sjplimp/pizza.html
# Steve Plimpton, sjplimp@sandia.gov, Sandia National Laboratories
#
# Copyright (2005) Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
# certain rights in this software. This software is distributed under
# the GNU General Public License.
# gnu tool
oneline = "Create plots via GnuPlot plotting program"
docstr = """
g = gnu() start up GnuPlot
g.stop() shut down GnuPlot process
g.plot(a) plot vector A against linear index
g.plot(a,b) plot B against A
g.plot(a,b,c,d,...) plot B against A, D against C, etc
g.mplot(M,N,S,"file",a,b,...) multiple plots saved to file0000.eps, etc
each plot argument can be a tuple, list, or Numeric/NumPy vector
mplot loops over range(M,N,S) and create one plot per iteration
last args are same as list of vectors for plot(), e.g. 1, 2, 4 vectors
each plot is made from a portion of the vectors, depending on loop index i
Ith plot is of b[0:i] vs a[0:i], etc
series of plots saved as file0000.eps, file0001.eps, etc
if use xrange(),yrange() then plot axes will be same for all plots
g("plot 'file.dat' using 2:3 with lines") execute string in GnuPlot
g.enter() enter GnuPlot shell
gnuplot> plot sin(x) with lines type commands directly to GnuPlot
gnuplot> exit, quit exit GnuPlot shell
g.export("data",range(100),a,...) create file with columns of numbers
all vectors must be of equal length
could plot from file with GnuPlot command: plot 'data' using 1:2 with lines
g.select(N) figure N becomes the current plot
subsequent commands apply to this plot
g.hide(N) delete window for figure N
g.save("file") save current plot as file.eps
Set attributes for current plot:
g.erase() reset all attributes to default values
g.aspect(1.3) aspect ratio
g.xtitle("Time") x axis text
g.ytitle("Energy") y axis text
g.title("My Plot") title text
g.title("title","x","y") title, x axis, y axis text
g.xrange(xmin,xmax) x axis range
g.xrange() default x axis range
g.yrange(ymin,ymax) y axis range
g.yrange() default y axis range
g.xlog() toggle x axis between linear and log
g.ylog() toggle y axis between linear and log
g.label(x,y,"text") place label at x,y coords
g.curve(N,'r') set color of curve N
colors: 'k' = black, 'r' = red, 'g' = green, 'b' = blue
'm' = magenta, 'c' = cyan, 'y' = yellow
"""
# History
# 8/05, Matt Jones (BYU): original version
# 9/05, Steve Plimpton: added mplot() method
# ToDo list
# allow choice of JPG or PNG or GIF when saving ?
# can this be done from GnuPlot or have to do via ImageMagick convert ?
# way to trim EPS plot that is created ?
# hide does not work on Mac aqua
# select does not pop window to front on Mac aqua
# Variables
# current = index of current figure (1-N)
# figures = list of figure objects with each plot's attributes
# so they aren't lost between replots
# Imports and external programs
import types, os
try: from DEFAULTS import PIZZA_GNUPLOT
except: PIZZA_GNUPLOT = "gnuplot"
try: from DEFAULTS import PIZZA_GNUTERM
except: PIZZA_GNUTERM = "x11"
# Class definition
class gnu:
# --------------------------------------------------------------------
def __init__(self):
self.GNUPLOT = os.popen(PIZZA_GNUPLOT,'w')
self.file = "tmp.gnu"
self.figures = []
self.select(1)
# --------------------------------------------------------------------
def stop(self):
self.__call__("quit")
del self.GNUPLOT
# --------------------------------------------------------------------
def __call__(self,command):
self.GNUPLOT.write(command + '\n')
self.GNUPLOT.flush()
# --------------------------------------------------------------------
def enter(self):
while 1:
command = raw_input("gnuplot> ")
if command == "quit" or command == "exit": return
self.__call__(command)
# --------------------------------------------------------------------
# write plot vectors to files and plot them
def plot(self,*vectors):
if len(vectors) == 1:
file = self.file + ".%d.1" % self.current
linear = range(len(vectors[0]))
self.export(file,linear,vectors[0])
self.figures[self.current-1].ncurves = 1
else:
if len(vectors) % 2: raise StandardError,"vectors must come in pairs"
for i in range(0,len(vectors),2):
file = self.file + ".%d.%d" % (self.current,i/2+1)
self.export(file,vectors[i],vectors[i+1])
self.figures[self.current-1].ncurves = len(vectors)/2
self.draw()
# --------------------------------------------------------------------
# create multiple plots from growing vectors, save to numbered files
# don't plot empty vector, create a [0] instead
def mplot(self,start,stop,skip,file,*vectors):
n = 0
for i in range(start,stop,skip):
partial_vecs = []
for vec in vectors:
if i: partial_vecs.append(vec[:i])
else: partial_vecs.append([0])
self.plot(*partial_vecs)
if n < 10: newfile = file + "000" + str(n)
elif n < 100: newfile = file + "00" + str(n)
elif n < 1000: newfile = file + "0" + str(n)
else: newfile = file + str(n)
self.save(newfile)
n += 1
# --------------------------------------------------------------------
# write list of equal-length vectors to filename
def export(self,filename,*vectors):
n = len(vectors[0])
for vector in vectors:
if len(vector) != n: raise StandardError,"vectors must be same length"
f = open(filename,'w')
nvec = len(vectors)
for i in xrange(n):
for j in xrange(nvec):
print >>f,vectors[j][i],
print >>f
f.close()
# --------------------------------------------------------------------
# select plot N as current plot
def select(self,n):
self.current = n
if len(self.figures) < n:
for i in range(n - len(self.figures)):
self.figures.append(figure())
cmd = "set term " + PIZZA_GNUTERM + ' ' + str(n)
self.__call__(cmd)
if self.figures[n-1].ncurves: self.draw()
# --------------------------------------------------------------------
# delete window for plot N
def hide(self,n):
cmd = "set term %s close %d" % (PIZZA_GNUTERM,n)
self.__call__(cmd)
# --------------------------------------------------------------------
# save plot to file.eps
# final re-select will reset terminal
# do not continue until plot file is written out
# else script could go forward and change data file
# use tmp.done as semaphore to indicate plot is finished
def save(self,file):
self.__call__("set terminal postscript enhanced solid lw 2 color portrait")
cmd = "set output '%s.eps'" % file
self.__call__(cmd)
if os.path.exists("tmp.done"): os.remove("tmp.done")
self.draw()
self.__call__("!touch tmp.done")
while not os.path.exists("tmp.done"): continue
self.__call__("set output")
self.select(self.current)
# --------------------------------------------------------------------
# restore default attributes by creating a new fig object
def erase(self):
fig = figure()
fig.ncurves = self.figures[self.current-1].ncurves
self.figures[self.current-1] = fig
self.draw()
# --------------------------------------------------------------------
def aspect(self,value):
self.figures[self.current-1].aspect = value
self.draw()
# --------------------------------------------------------------------
def xrange(self,*values):
if len(values) == 0:
self.figures[self.current-1].xlimit = 0
else:
self.figures[self.current-1].xlimit = (values[0],values[1])
self.draw()
# --------------------------------------------------------------------
def yrange(self,*values):
if len(values) == 0:
self.figures[self.current-1].ylimit = 0
else:
self.figures[self.current-1].ylimit = (values[0],values[1])
self.draw()
# --------------------------------------------------------------------
def label(self,x,y,text):
self.figures[self.current-1].labels.append((x,y,text))
self.figures[self.current-1].nlabels += 1
self.draw()
# --------------------------------------------------------------------
def nolabels(self):
self.figures[self.current-1].nlabel = 0
self.figures[self.current-1].labels = []
self.draw()
# --------------------------------------------------------------------
def title(self,*strings):
if len(strings) == 1:
self.figures[self.current-1].title = strings[0]
else:
self.figures[self.current-1].title = strings[0]
self.figures[self.current-1].xtitle = strings[1]
self.figures[self.current-1].ytitle = strings[2]
self.draw()
# --------------------------------------------------------------------
def xtitle(self,label):
self.figures[self.current-1].xtitle = label
self.draw()
# --------------------------------------------------------------------
def ytitle(self,label):
self.figures[self.current-1].ytitle = label
self.draw()
# --------------------------------------------------------------------
def xlog(self):
if self.figures[self.current-1].xlog:
self.figures[self.current-1].xlog = 0
else:
self.figures[self.current-1].xlog = 1
self.draw()
# --------------------------------------------------------------------
def ylog(self):
if self.figures[self.current-1].ylog:
self.figures[self.current-1].ylog = 0
else:
self.figures[self.current-1].ylog = 1
self.draw()
# --------------------------------------------------------------------
def curve(self,num,color):
fig = self.figures[self.current-1]
while len(fig.colors) < num: fig.colors.append(0)
fig.colors[num-1] = colormap[color]
self.draw()
# --------------------------------------------------------------------
# draw a plot with all its settings
# just return if no files of vectors defined yet
def draw(self):
fig = self.figures[self.current-1]
if not fig.ncurves: return
cmd = 'set size ratio ' + str(1.0/float(fig.aspect))
self.__call__(cmd)
cmd = 'set title ' + '"' + fig.title + '"'
self.__call__(cmd)
cmd = 'set xlabel ' + '"' + fig.xtitle + '"'
self.__call__(cmd)
cmd = 'set ylabel ' + '"' + fig.ytitle + '"'
self.__call__(cmd)
if fig.xlog: self.__call__("set logscale x")
else: self.__call__("unset logscale x")
if fig.ylog: self.__call__("set logscale y")
else: self.__call__("unset logscale y")
if fig.xlimit:
cmd = 'set xr [' + str(fig.xlimit[0]) + ':' + str(fig.xlimit[1]) + ']'
self.__call__(cmd)
else: self.__call__("set xr [*:*]")
if fig.ylimit:
cmd = 'set yr [' + str(fig.ylimit[0]) + ':' + str(fig.ylimit[1]) + ']'
self.__call__(cmd)
else: self.__call__("set yr [*:*]")
self.__call__("set nolabel")
for i in range(fig.nlabels):
x = fig.labels[i][0]
y = fig.labels[i][1]
text = fig.labels[i][2]
cmd = 'set label ' + '\"' + text + '\" at ' + str(x) + ',' + str(y)
self.__call__(cmd)
self.__call__("set key off")
cmd = 'plot '
for i in range(fig.ncurves):
file = self.file + ".%d.%d" % (self.current,i+1)
if len(fig.colors) > i and fig.colors[i]:
cmd += "'" + file + "' using 1:2 with line %d, " % fig.colors[i]
else:
cmd += "'" + file + "' using 1:2 with lines, "
self.__call__(cmd[:-2])
# --------------------------------------------------------------------
# class to store settings for a single plot
class figure:
def __init__(self):
self.ncurves = 0
self.colors = []
self.title = ""
self.xtitle = ""
self.ytitle = ""
self.aspect = 1.3
self.xlimit = 0
self.ylimit = 0
self.xlog = 0
self.ylog = 0
self.nlabels = 0
self.labels = []
# --------------------------------------------------------------------
# line color settings
colormap = {'k':-1, 'r':1, 'g':2, 'b':3, 'm':4, 'c':5, 'y':7}
| 12,601 | 31.817708 | 79 | py |
LIGGGHTS-WITH-BONDS | LIGGGHTS-WITH-BONDS-master/python/examples/pizza/dump.py | # Pizza.py toolkit, www.cs.sandia.gov/~sjplimp/pizza.html
# Steve Plimpton, sjplimp@sandia.gov, Sandia National Laboratories
#
# Copyright (2005) Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
# certain rights in this software. This software is distributed under
# the GNU General Public License.
# dump tool
oneline = "Read, write, manipulate dump files and particle attributes"
docstr = """
d = dump("dump.one") read in one or more dump files
d = dump("dump.1 dump.2.gz") can be gzipped
d = dump("dump.*") wildcard expands to multiple files
d = dump("dump.*",0) two args = store filenames, but don't read
incomplete and duplicate snapshots are deleted
if atoms have 5 or 8 columns, assign id,type,x,y,z (ix,iy,iz)
atoms will be unscaled if stored in files as scaled
time = d.next() read next snapshot from dump files
used with 2-argument constructor to allow reading snapshots one-at-a-time
snapshot will be skipped only if another snapshot has same time stamp
return time stamp of snapshot read
return -1 if no snapshots left or last snapshot is incomplete
no column name assignment or unscaling is performed
d.map(1,"id",3,"x") assign names to atom columns (1-N)
not needed if dump file is self-describing
d.tselect.all() select all timesteps
d.tselect.one(N) select only timestep N
d.tselect.none() deselect all timesteps
d.tselect.skip(M) select every Mth step
d.tselect.test("$t >= 100 and $t < 10000") select matching timesteps
d.delete() delete non-selected timesteps
selecting a timestep also selects all atoms in the timestep
skip() and test() only select from currently selected timesteps
test() uses a Python Boolean expression with $t for timestep value
Python comparison syntax: == != < > <= >= and or
d.aselect.all() select all atoms in all steps
d.aselect.all(N) select all atoms in one step
d.aselect.test("$id > 100 and $type == 2") select match atoms in all steps
d.aselect.test("$id > 100 and $type == 2",N) select matching atoms in one step
all() with no args selects atoms from currently selected timesteps
test() with one arg selects atoms from currently selected timesteps
test() sub-selects from currently selected atoms
test() uses a Python Boolean expression with $ for atom attributes
Python comparison syntax: == != < > <= >= and or
$name must end with a space
d.write("file") write selected steps/atoms to dump file
d.write("file",head,app) write selected steps/atoms to dump file
d.scatter("tmp") write selected steps/atoms to multiple files
write() can be specified with 2 additional flags
headd = 0/1 for no/yes snapshot header, app = 0/1 for write vs append
scatter() files are given timestep suffix: e.g. tmp.0, tmp.100, etc
d.scale() scale x,y,z to 0-1 for all timesteps
d.scale(100) scale atom coords for timestep N
d.unscale() unscale x,y,z to box size to all timesteps
d.unscale(1000) unscale atom coords for timestep N
d.wrap() wrap x,y,z into periodic box via ix,iy,iz
d.unwrap() unwrap x,y,z out of box via ix,iy,iz
d.owrap("other") wrap x,y,z to same image as another atom
d.sort() sort atoms by atom ID in all selected steps
d.sort("x") sort atoms by column value in all steps
d.sort(1000) sort atoms in timestep N
scale(), unscale(), wrap(), unwrap(), owrap() operate on all steps and atoms
wrap(), unwrap(), owrap() require ix,iy,iz be defined
owrap() requires a column be defined which contains an atom ID
name of that column is the argument to owrap()
x,y,z for each atom is wrapped to same image as the associated atom ID
useful for wrapping all molecule's atoms the same so it is contiguous
m1,m2 = d.minmax("type") find min/max values for a column
d.set("$ke = $vx * $vx + $vy * $vy") set a column to a computed value
d.setv("type",vector) set a column to a vector of values
d.spread("ke",N,"color") 2nd col = N ints spread over 1st col
d.clone(1000,"color") clone timestep N values to other steps
minmax() operates on selected timesteps and atoms
set() operates on selected timesteps and atoms
left hand side column is created if necessary
left-hand side column is unset or unchanged for non-selected atoms
equation is in Python syntax
use $ for column names, $name must end with a space
setv() operates on selected timesteps and atoms
if column label does not exist, column is created
values in vector are assigned sequentially to atoms, so may want to sort()
length of vector must match # of selected atoms
spread() operates on selected timesteps and atoms
min and max are found for 1st specified column across all selected atoms
atom's value is linear mapping (1-N) between min and max
that is stored in 2nd column (created if needed)
useful for creating a color map
clone() operates on selected timesteps and atoms
values at every timestep are set to value at timestep N for that atom ID
useful for propagating a color map
t = d.time() return vector of selected timestep values
fx,fy,... = d.atom(100,"fx","fy",...) return vector(s) for atom ID N
fx,fy,... = d.vecs(1000,"fx","fy",...) return vector(s) for timestep N
atom() returns vectors with one value for each selected timestep
vecs() returns vectors with one value for each selected atom in the timestep
index,time,flag = d.iterator(0/1) loop over dump snapshots
time,box,atoms,bonds,tris = d.viz(index) return list of viz objects
d.atype = "color" set column returned as "type" by viz
d.extra("dump.bond") read bond list from dump file
d.extra(data) extract bond/tri/line list from data
iterator() loops over selected timesteps
iterator() called with arg = 0 first time, with arg = 1 on subsequent calls
index = index within dump object (0 to # of snapshots)
time = timestep value
flag = -1 when iteration is done, 1 otherwise
viz() returns info for selected atoms for specified timestep index
time = timestep value
box = [xlo,ylo,zlo,xhi,yhi,zhi]
atoms = id,type,x,y,z for each atom as 2d array
bonds = id,type,x1,y1,z1,x2,y2,z2,t1,t2 for each bond as 2d array
if bonds() was used to define bonds, else empty list
tris = id,type,x1,y1,z1,x2,y2,z2,x3,y3,z3,nx,ny,nz for each tri as 2d array
if extra() was used to define tris, else empty list
lines = id,type,x1,y1,z1,x2,y2,z2 for each line as 2d array
if extra() was used to define lines, else empty list
atype is column name viz() will return as atom type (def = "type")
extra() stores list of bonds/tris/lines to return each time viz() is called
"""
# History
# 8/05, Steve Plimpton (SNL): original version
# 12/09, David Hart (SNL): allow use of NumPy or Numeric
# ToDo list
# try to optimize this line in read_snap: words += f.readline().split()
# allow $name in aselect.test() and set() to end with non-space
# should next() snapshot be auto-unscaled ?
# Variables
# flist = list of dump file names
# increment = 1 if reading snapshots one-at-a-time
# nextfile = which file to read from via next()
# eof = ptr into current file for where to read via next()
# nsnaps = # of snapshots
# nselect = # of selected snapshots
# snaps = list of snapshots
# names = dictionary of column names:
# key = "id", value = column # (0 to M-1)
# tselect = class for time selection
# aselect = class for atom selection
# atype = name of vector used as atom type by viz extract
# bondflag = 0 if no bonds, 1 if they are defined statically
# bondlist = static list of bonds to viz() return for all snapshots
# only a list of atom pairs, coords have to be created for each snapshot
# triflag = 0 if no tris, 1 if they are defined statically, 2 if dynamic
# trilist = static list of tris to return via viz() for all snapshots
# lineflag = 0 if no lines, 1 if they are defined statically
# linelist = static list of lines to return via viz() for all snapshots
# Snap = one snapshot
# time = time stamp
# tselect = 0/1 if this snapshot selected
# natoms = # of atoms
# nselect = # of selected atoms in this snapshot
# aselect[i] = 0/1 for each atom
# xlo,xhi,ylo,yhi,zlo,zhi = box bounds (float)
# atoms[i][j] = 2d array of floats, i = 0 to natoms-1, j = 0 to ncols-1
# Imports and external programs
import sys, commands, re, glob, types
from os import popen
from math import * # any function could be used by set()
try:
import numpy as np
oldnumeric = False
except:
import Numeric as np
oldnumeric = True
try: from DEFAULTS import PIZZA_GUNZIP
except: PIZZA_GUNZIP = "gunzip"
# Class definition
class dump:
# --------------------------------------------------------------------
def __init__(self,*list):
self.snaps = []
self.nsnaps = self.nselect = 0
self.names = {}
self.tselect = tselect(self)
self.aselect = aselect(self)
self.atype = "type"
self.bondflag = 0
self.bondlist = []
self.triflag = 0
self.trilist = []
self.triobj = 0
self.lineflag = 0
self.linelist = []
# flist = list of all dump file names
words = list[0].split()
self.flist = []
for word in words: self.flist += glob.glob(word)
if len(self.flist) == 0 and len(list) == 1:
raise StandardError,"no dump file specified"
if len(list) == 1:
self.increment = 0
self.read_all()
else:
self.increment = 1
self.nextfile = 0
self.eof = 0
# --------------------------------------------------------------------
def read_all(self):
# read all snapshots from each file
# test for gzipped files
for file in self.flist:
if file[-3:] == ".gz":
f = popen("%s -c %s" % (PIZZA_GUNZIP,file),'r')
else: f = open(file)
snap = self.read_snapshot(f)
while snap:
self.snaps.append(snap)
print snap.time,
sys.stdout.flush()
snap = self.read_snapshot(f)
f.close()
print
# sort entries by timestep, cull duplicates
self.snaps.sort(self.compare_time)
self.cull()
self.nsnaps = len(self.snaps)
print "read %d snapshots" % self.nsnaps
# select all timesteps and atoms
self.tselect.all()
# set default names for atom columns if file wasn't self-describing
if len(self.snaps) == 0:
print "no column assignments made"
elif len(self.names):
print "assigned columns:",self.names2str()
elif self.snaps[0].atoms == None:
print "no column assignments made"
elif len(self.snaps[0].atoms[0]) == 5:
self.map(1,"id",2,"type",3,"x",4,"y",5,"z")
print "assigned columns:",self.names2str()
elif len(self.snaps[0].atoms[0]) == 8:
self.map(1,"id",2,"type",3,"x",4,"y",5,"z",6,"ix",7,"iy",8,"iz")
print "assigned columns:",self.names2str()
else:
print "no column assignments made"
# if snapshots are scaled, unscale them
if (not self.names.has_key("x")) or \
(not self.names.has_key("y")) or \
(not self.names.has_key("z")):
print "no unscaling could be performed"
elif self.nsnaps > 0:
if self.scaled(self.nsnaps-1): self.unscale()
else: print "dump is already unscaled"
# --------------------------------------------------------------------
# read next snapshot from list of files
def next(self):
if not self.increment: raise StandardError,"cannot read incrementally"
# read next snapshot in current file using eof as pointer
# if fail, try next file
# if new snapshot time stamp already exists, read next snapshot
while 1:
f = open(self.flist[self.nextfile],'rb')
f.seek(self.eof)
snap = self.read_snapshot(f)
if not snap:
self.nextfile += 1
if self.nextfile == len(self.flist): return -1
f.close()
self.eof = 0
continue
self.eof = f.tell()
f.close()
try:
self.findtime(snap.time)
continue
except: break
# select the new snapshot with all its atoms
self.snaps.append(snap)
snap = self.snaps[self.nsnaps]
snap.tselect = 1
snap.nselect = snap.natoms
for i in xrange(snap.natoms): snap.aselect[i] = 1
self.nsnaps += 1
self.nselect += 1
return snap.time
# --------------------------------------------------------------------
# read a single snapshot from file f
# return snapshot or 0 if failed
# assign column names if not already done and file is self-describing
# convert xs,xu to x
def read_snapshot(self,f):
try:
snap = Snap()
item = f.readline()
snap.time = int(f.readline().split()[0]) # just grab 1st field
item = f.readline()
snap.natoms = int(f.readline())
snap.aselect = np.zeros(snap.natoms)
item = f.readline()
words = f.readline().split()
snap.xlo,snap.xhi = float(words[0]),float(words[1])
words = f.readline().split()
snap.ylo,snap.yhi = float(words[0]),float(words[1])
words = f.readline().split()
snap.zlo,snap.zhi = float(words[0]),float(words[1])
item = f.readline()
if len(self.names) == 0:
words = item.split()[2:]
if len(words):
for i in range(len(words)):
if words[i] == "xs" or words[i] == "xu":
self.names["x"] = i
elif words[i] == "ys" or words[i] == "yu":
self.names["y"] = i
elif words[i] == "zs" or words[i] == "zu":
self.names["z"] = i
else: self.names[words[i]] = i
if snap.natoms:
words = f.readline().split()
ncol = len(words)
for i in xrange(1,snap.natoms):
words += f.readline().split()
floats = map(float,words)
if oldnumeric: atoms = np.zeros((snap.natoms,ncol),np.Float)
else: atoms = np.zeros((snap.natoms,ncol),np.float)
start = 0
stop = ncol
for i in xrange(snap.natoms):
atoms[i] = floats[start:stop]
start = stop
stop += ncol
else: atoms = None
snap.atoms = atoms
return snap
except:
return 0
# --------------------------------------------------------------------
# decide if snapshot i is scaled/unscaled from coords of first and last atom
def scaled(self,i):
ix = self.names["x"]
iy = self.names["y"]
iz = self.names["z"]
natoms = self.snaps[i].natoms
if natoms == 0: return 0
x1 = self.snaps[i].atoms[0][ix]
y1 = self.snaps[i].atoms[0][iy]
z1 = self.snaps[i].atoms[0][iz]
x2 = self.snaps[i].atoms[natoms-1][ix]
y2 = self.snaps[i].atoms[natoms-1][iy]
z2 = self.snaps[i].atoms[natoms-1][iz]
if x1 >= -0.1 and x1 <= 1.1 and y1 >= -0.1 and y1 <= 1.1 and \
z1 >= -0.1 and z1 <= 1.1 and x2 >= -0.1 and x2 <= 1.1 and \
y2 >= -0.1 and y2 <= 1.1 and z2 >= -0.1 and z2 <= 1.1:
return 1
else: return 0
# --------------------------------------------------------------------
# map atom column names
def map(self,*pairs):
if len(pairs) % 2 != 0:
raise StandardError, "dump map() requires pairs of mappings"
for i in range(0,len(pairs),2):
j = i + 1
self.names[pairs[j]] = pairs[i]-1
# delete unselected snapshots
# --------------------------------------------------------------------
def delete(self):
ndel = i = 0
while i < self.nsnaps:
if not self.snaps[i].tselect:
del self.snaps[i]
self.nsnaps -= 1
ndel += 1
else: i += 1
print "%d snapshots deleted" % ndel
print "%d snapshots remaining" % self.nsnaps
# --------------------------------------------------------------------
# scale coords to 0-1 for all snapshots or just one
def scale(self,*list):
if len(list) == 0:
print "Scaling dump ..."
x = self.names["x"]
y = self.names["y"]
z = self.names["z"]
for snap in self.snaps: self.scale_one(snap,x,y,z)
else:
i = self.findtime(list[0])
x = self.names["x"]
y = self.names["y"]
z = self.names["z"]
self.scale_one(self.snaps[i],x,y,z)
# --------------------------------------------------------------------
def scale_one(self,snap,x,y,z):
xprdinv = 1.0 / (snap.xhi - snap.xlo)
yprdinv = 1.0 / (snap.yhi - snap.ylo)
zprdinv = 1.0 / (snap.zhi - snap.zlo)
atoms = snap.atoms
atoms[:,x] = (atoms[:,x] - snap.xlo) * xprdinv
atoms[:,y] = (atoms[:,y] - snap.ylo) * yprdinv
atoms[:,z] = (atoms[:,z] - snap.zlo) * zprdinv
# --------------------------------------------------------------------
# unscale coords from 0-1 to box size for all snapshots or just one
def unscale(self,*list):
if len(list) == 0:
print "Unscaling dump ..."
x = self.names["x"]
y = self.names["y"]
z = self.names["z"]
for snap in self.snaps: self.unscale_one(snap,x,y,z)
else:
i = self.findtime(list[0])
x = self.names["x"]
y = self.names["y"]
z = self.names["z"]
self.unscale_one(self.snaps[i],x,y,z)
# --------------------------------------------------------------------
def unscale_one(self,snap,x,y,z):
xprd = snap.xhi - snap.xlo
yprd = snap.yhi - snap.ylo
zprd = snap.zhi - snap.zlo
atoms = snap.atoms
atoms[:,x] = snap.xlo + atoms[:,x]*xprd
atoms[:,y] = snap.ylo + atoms[:,y]*yprd
atoms[:,z] = snap.zlo + atoms[:,z]*zprd
# --------------------------------------------------------------------
# wrap coords from outside box to inside
def wrap(self):
print "Wrapping dump ..."
x = self.names["x"]
y = self.names["y"]
z = self.names["z"]
ix = self.names["ix"]
iy = self.names["iy"]
iz = self.names["iz"]
for snap in self.snaps:
xprd = snap.xhi - snap.xlo
yprd = snap.yhi - snap.ylo
zprd = snap.zhi - snap.zlo
atoms = snap.atoms
atoms[:,x] -= atoms[:,ix]*xprd
atoms[:,y] -= atoms[:,iy]*yprd
atoms[:,z] -= atoms[:,iz]*zprd
# --------------------------------------------------------------------
# unwrap coords from inside box to outside
def unwrap(self):
print "Unwrapping dump ..."
x = self.names["x"]
y = self.names["y"]
z = self.names["z"]
ix = self.names["ix"]
iy = self.names["iy"]
iz = self.names["iz"]
for snap in self.snaps:
xprd = snap.xhi - snap.xlo
yprd = snap.yhi - snap.ylo
zprd = snap.zhi - snap.zlo
atoms = snap.atoms
atoms[:,x] += atoms[:,ix]*xprd
atoms[:,y] += atoms[:,iy]*yprd
atoms[:,z] += atoms[:,iz]*zprd
# --------------------------------------------------------------------
# wrap coords to same image as atom ID stored in "other" column
def owrap(self,other):
print "Wrapping to other ..."
id = self.names["id"]
x = self.names["x"]
y = self.names["y"]
z = self.names["z"]
ix = self.names["ix"]
iy = self.names["iy"]
iz = self.names["iz"]
iother = self.names[other]
for snap in self.snaps:
xprd = snap.xhi - snap.xlo
yprd = snap.yhi - snap.ylo
zprd = snap.zhi - snap.zlo
atoms = snap.atoms
ids = {}
for i in xrange(snap.natoms):
ids[atoms[i][id]] = i
for i in xrange(snap.natoms):
j = ids[atoms[i][iother]]
atoms[i][x] += (atoms[i][ix]-atoms[j][ix])*xprd
atoms[i][y] += (atoms[i][iy]-atoms[j][iy])*yprd
atoms[i][z] += (atoms[i][iz]-atoms[j][iz])*zprd
# --------------------------------------------------------------------
# convert column names assignment to a string, in column order
def names2str(self):
ncol = len(self.snaps[0].atoms[0])
pairs = self.names.items()
values = self.names.values()
str = ""
for i in xrange(ncol):
if i in values: str += pairs[values.index(i)][0] + ' '
return str
# --------------------------------------------------------------------
# sort atoms by atom ID in all selected timesteps by default
# if arg = string, sort all steps by that column
# if arg = numeric, sort atoms in single step
def sort(self,*list):
if len(list) == 0:
print "Sorting selected snapshots ..."
id = self.names["id"]
for snap in self.snaps:
if snap.tselect: self.sort_one(snap,id)
elif type(list[0]) is types.StringType:
print "Sorting selected snapshots by %s ..." % list[0]
id = self.names[list[0]]
for snap in self.snaps:
if snap.tselect: self.sort_one(snap,id)
else:
i = self.findtime(list[0])
id = self.names["id"]
self.sort_one(self.snaps[i],id)
# --------------------------------------------------------------------
# sort a single snapshot by ID column
def sort_one(self,snap,id):
atoms = snap.atoms
ids = atoms[:,id]
ordering = np.argsort(ids)
for i in xrange(len(atoms[0])):
atoms[:,i] = np.take(atoms[:,i],ordering)
# --------------------------------------------------------------------
# write a single dump file from current selection
def write(self,file,header=1,append=0):
if len(self.snaps): namestr = self.names2str()
if not append: f = open(file,"w")
else: f = open(file,"a")
for snap in self.snaps:
if not snap.tselect: continue
print snap.time,
sys.stdout.flush()
if header:
print >>f,"ITEM: TIMESTEP"
print >>f,snap.time
print >>f,"ITEM: NUMBER OF ATOMS"
print >>f,snap.nselect
print >>f,"ITEM: BOX BOUNDS"
print >>f,snap.xlo,snap.xhi
print >>f,snap.ylo,snap.yhi
print >>f,snap.zlo,snap.zhi
print >>f,"ITEM: ATOMS",namestr
atoms = snap.atoms
nvalues = len(atoms[0])
for i in xrange(snap.natoms):
if not snap.aselect[i]: continue
line = ""
for j in xrange(nvalues):
if (j < 2):
line += str(int(atoms[i][j])) + " "
else:
line += str(atoms[i][j]) + " "
print >>f,line
f.close()
print "\n%d snapshots" % self.nselect
# --------------------------------------------------------------------
# write one dump file per snapshot from current selection
def scatter(self,root):
if len(self.snaps): namestr = self.names2str()
for snap in self.snaps:
if not snap.tselect: continue
print snap.time,
sys.stdout.flush()
file = root + "." + str(snap.time)
f = open(file,"w")
print >>f,"ITEM: TIMESTEP"
print >>f,snap.time
print >>f,"ITEM: NUMBER OF ATOMS"
print >>f,snap.nselect
print >>f,"ITEM: BOX BOUNDS"
print >>f,snap.xlo,snap.xhi
print >>f,snap.ylo,snap.yhi
print >>f,snap.zlo,snap.zhi
print >>f,"ITEM: ATOMS",namestr
atoms = snap.atoms
nvalues = len(atoms[0])
for i in xrange(snap.natoms):
if not snap.aselect[i]: continue
line = ""
for j in xrange(nvalues):
if (j < 2):
line += str(int(atoms[i][j])) + " "
else:
line += str(atoms[i][j]) + " "
print >>f,line
f.close()
print "\n%d snapshots" % self.nselect
# --------------------------------------------------------------------
# find min/max across all selected snapshots/atoms for a particular column
def minmax(self,colname):
icol = self.names[colname]
min = 1.0e20
max = -min
for snap in self.snaps:
if not snap.tselect: continue
atoms = snap.atoms
for i in xrange(snap.natoms):
if not snap.aselect[i]: continue
if atoms[i][icol] < min: min = atoms[i][icol]
if atoms[i][icol] > max: max = atoms[i][icol]
return (min,max)
# --------------------------------------------------------------------
# set a column value via an equation for all selected snapshots
def set(self,eq):
print "Setting ..."
pattern = "\$\w*"
list = re.findall(pattern,eq)
lhs = list[0][1:]
if not self.names.has_key(lhs):
self.newcolumn(lhs)
for item in list:
name = item[1:]
column = self.names[name]
insert = "snap.atoms[i][%d]" % (column)
eq = eq.replace(item,insert)
ceq = compile(eq,'','single')
for snap in self.snaps:
if not snap.tselect: continue
for i in xrange(snap.natoms):
if snap.aselect[i]: exec ceq
# --------------------------------------------------------------------
# set a column value via an input vec for all selected snapshots/atoms
def setv(self,colname,vec):
print "Setting ..."
if not self.names.has_key(colname):
self.newcolumn(colname)
icol = self.names[colname]
for snap in self.snaps:
if not snap.tselect: continue
if snap.nselect != len(vec):
raise StandardError,"vec length does not match # of selected atoms"
atoms = snap.atoms
m = 0
for i in xrange(snap.natoms):
if snap.aselect[i]:
atoms[i][icol] = vec[m]
m += 1
# --------------------------------------------------------------------
# clone value in col across selected timesteps for atoms with same ID
def clone(self,nstep,col):
istep = self.findtime(nstep)
icol = self.names[col]
id = self.names["id"]
ids = {}
for i in xrange(self.snaps[istep].natoms):
ids[self.snaps[istep].atoms[i][id]] = i
for snap in self.snaps:
if not snap.tselect: continue
atoms = snap.atoms
for i in xrange(snap.natoms):
if not snap.aselect[i]: continue
j = ids[atoms[i][id]]
atoms[i][icol] = self.snaps[istep].atoms[j][icol]
# --------------------------------------------------------------------
# values in old column are spread as ints from 1-N and assigned to new column
def spread(self,old,n,new):
iold = self.names[old]
if not self.names.has_key(new): self.newcolumn(new)
inew = self.names[new]
min,max = self.minmax(old)
print "min/max = ",min,max
gap = max - min
invdelta = n/gap
for snap in self.snaps:
if not snap.tselect: continue
atoms = snap.atoms
for i in xrange(snap.natoms):
if not snap.aselect[i]: continue
ivalue = int((atoms[i][iold] - min) * invdelta) + 1
if ivalue > n: ivalue = n
if ivalue < 1: ivalue = 1
atoms[i][inew] = ivalue
# --------------------------------------------------------------------
# return vector of selected snapshot time stamps
def time(self):
vec = self.nselect * [0]
i = 0
for snap in self.snaps:
if not snap.tselect: continue
vec[i] = snap.time
i += 1
return vec
# --------------------------------------------------------------------
# extract vector(s) of values for atom ID n at each selected timestep
def atom(self,n,*list):
if len(list) == 0:
raise StandardError, "no columns specified"
columns = []
values = []
for name in list:
columns.append(self.names[name])
values.append(self.nselect * [0])
ncol = len(columns)
id = self.names["id"]
m = 0
for snap in self.snaps:
if not snap.tselect: continue
atoms = snap.atoms
for i in xrange(snap.natoms):
if atoms[i][id] == n: break
if atoms[i][id] != n:
raise StandardError, "could not find atom ID in snapshot"
for j in xrange(ncol):
values[j][m] = atoms[i][columns[j]]
m += 1
if len(list) == 1: return values[0]
else: return values
# --------------------------------------------------------------------
# extract vector(s) of values for selected atoms at chosen timestep
def vecs(self,n,*list):
snap = self.snaps[self.findtime(n)]
if len(list) == 0:
raise StandardError, "no columns specified"
columns = []
values = []
for name in list:
columns.append(self.names[name])
values.append(snap.nselect * [0])
ncol = len(columns)
m = 0
for i in xrange(snap.natoms):
if not snap.aselect[i]: continue
for j in xrange(ncol):
values[j][m] = snap.atoms[i][columns[j]]
m += 1
if len(list) == 1: return values[0]
else: return values
# --------------------------------------------------------------------
# add a new column to every snapshot and set value to 0
# set the name of the column to str
def newcolumn(self,str):
ncol = len(self.snaps[0].atoms[0])
self.map(ncol+1,str)
for snap in self.snaps:
atoms = snap.atoms
if oldnumeric: newatoms = np.zeros((snap.natoms,ncol+1),np.Float)
else: newatoms = np.zeros((snap.natoms,ncol+1),np.float)
newatoms[:,0:ncol] = snap.atoms
snap.atoms = newatoms
# --------------------------------------------------------------------
# sort snapshots on time stamp
def compare_time(self,a,b):
if a.time < b.time:
return -1
elif a.time > b.time:
return 1
else:
return 0
# --------------------------------------------------------------------
# delete successive snapshots with duplicate time stamp
def cull(self):
i = 1
while i < len(self.snaps):
if self.snaps[i].time == self.snaps[i-1].time:
del self.snaps[i]
else:
i += 1
# --------------------------------------------------------------------
# iterate over selected snapshots
def iterator(self,flag):
start = 0
if flag: start = self.iterate + 1
for i in xrange(start,self.nsnaps):
if self.snaps[i].tselect:
self.iterate = i
return i,self.snaps[i].time,1
return 0,0,-1
# --------------------------------------------------------------------
# return list of atoms to viz for snapshot isnap
# augment with bonds, tris, lines if extra() was invoked
def viz(self,isnap):
snap = self.snaps[isnap]
time = snap.time
box = [snap.xlo,snap.ylo,snap.zlo,snap.xhi,snap.yhi,snap.zhi]
id = self.names["id"]
type = self.names[self.atype]
x = self.names["x"]
y = self.names["y"]
z = self.names["z"]
# create atom list needed by viz from id,type,x,y,z
# need Numeric/Numpy mode here
atoms = []
for i in xrange(snap.natoms):
if not snap.aselect[i]: continue
atom = snap.atoms[i]
atoms.append([atom[id],atom[type],atom[x],atom[y],atom[z]])
# create list of current bond coords from static bondlist
# alist = dictionary of atom IDs for atoms list
# lookup bond atom IDs in alist and grab their coords
# try is used since some atoms may be unselected
# any bond with unselected atom is not returned to viz caller
# need Numeric/Numpy mode here
bonds = []
if self.bondflag:
alist = {}
for i in xrange(len(atoms)): alist[int(atoms[i][0])] = i
for bond in self.bondlist:
try:
i = alist[bond[2]]
j = alist[bond[3]]
atom1 = atoms[i]
atom2 = atoms[j]
bonds.append([bond[0],bond[1],atom1[2],atom1[3],atom1[4],
atom2[2],atom2[3],atom2[4],atom1[1],atom2[1]])
except: continue
tris = []
if self.triflag:
if self.triflag == 1: tris = self.trilist
elif self.triflag == 2:
timetmp,boxtmp,atomstmp,bondstmp, \
tris,linestmp = self.triobj.viz(time,1)
lines = []
if self.lineflag: lines = self.linelist
return time,box,atoms,bonds,tris,lines
# --------------------------------------------------------------------
def findtime(self,n):
for i in xrange(self.nsnaps):
if self.snaps[i].time == n: return i
raise StandardError, "no step %d exists" % n
# --------------------------------------------------------------------
# return maximum box size across all selected snapshots
def maxbox(self):
xlo = ylo = zlo = None
xhi = yhi = zhi = None
for snap in self.snaps:
if not snap.tselect: continue
if xlo == None or snap.xlo < xlo: xlo = snap.xlo
if xhi == None or snap.xhi > xhi: xhi = snap.xhi
if ylo == None or snap.ylo < ylo: ylo = snap.ylo
if yhi == None or snap.yhi > yhi: yhi = snap.yhi
if zlo == None or snap.zlo < zlo: zlo = snap.zlo
if zhi == None or snap.zhi > zhi: zhi = snap.zhi
return [xlo,ylo,zlo,xhi,yhi,zhi]
# --------------------------------------------------------------------
# return maximum atom type across all selected snapshots and atoms
def maxtype(self):
icol = self.names["type"]
max = 0
for snap in self.snaps:
if not snap.tselect: continue
atoms = snap.atoms
for i in xrange(snap.natoms):
if not snap.aselect[i]: continue
if atoms[i][icol] > max: max = atoms[i][icol]
return int(max)
# --------------------------------------------------------------------
# grab bonds/tris/lines from another object
def extra(self,arg):
# read bonds from bond dump file
if type(arg) is types.StringType:
try:
f = open(arg,'r')
item = f.readline()
time = int(f.readline())
item = f.readline()
nbonds = int(f.readline())
item = f.readline()
if not re.search("BONDS",item):
raise StandardError, "could not read bonds from dump file"
words = f.readline().split()
ncol = len(words)
for i in xrange(1,nbonds):
words += f.readline().split()
f.close()
# convert values to int and absolute value since can be negative types
if oldnumeric: bondlist = np.zeros((nbonds,4),np.Int)
else: bondlist = np.zeros((nbonds,4),np.int)
ints = [abs(int(value)) for value in words]
start = 0
stop = 4
for i in xrange(nbonds):
bondlist[i] = ints[start:stop]
start += ncol
stop += ncol
if bondlist:
self.bondflag = 1
self.bondlist = bondlist
except:
raise StandardError,"could not read from bond dump file"
# request bonds from data object
elif type(arg) is types.InstanceType and ".data" in str(arg.__class__):
try:
bondlist = []
bondlines = arg.sections["Bonds"]
for line in bondlines:
words = line.split()
bondlist.append([int(words[0]),int(words[1]),
int(words[2]),int(words[3])])
if bondlist:
self.bondflag = 1
self.bondlist = bondlist
except:
raise StandardError,"could not extract bonds from data object"
# request tris/lines from cdata object
elif type(arg) is types.InstanceType and ".cdata" in str(arg.__class__):
try:
tmp,tmp,tmp,tmp,tris,lines = arg.viz(0)
if tris:
self.triflag = 1
self.trilist = tris
if lines:
self.lineflag = 1
self.linelist = lines
except:
raise StandardError,"could not extract tris/lines from cdata object"
# request tris from mdump object
elif type(arg) is types.InstanceType and ".mdump" in str(arg.__class__):
try:
self.triflag = 2
self.triobj = arg
except:
raise StandardError,"could not extract tris from mdump object"
else:
raise StandardError,"unrecognized argument to dump.extra()"
# --------------------------------------------------------------------
def compare_atom(self,a,b):
if a[0] < b[0]:
return -1
elif a[0] > b[0]:
return 1
else:
return 0
# --------------------------------------------------------------------
# one snapshot
class Snap:
pass
# --------------------------------------------------------------------
# time selection class
class tselect:
def __init__(self,data):
self.data = data
# --------------------------------------------------------------------
def all(self):
data = self.data
for snap in data.snaps:
snap.tselect = 1
data.nselect = len(data.snaps)
data.aselect.all()
print "%d snapshots selected out of %d" % (data.nselect,data.nsnaps)
# --------------------------------------------------------------------
def one(self,n):
data = self.data
for snap in data.snaps:
snap.tselect = 0
i = data.findtime(n)
data.snaps[i].tselect = 1
data.nselect = 1
data.aselect.all()
print "%d snapshots selected out of %d" % (data.nselect,data.nsnaps)
# --------------------------------------------------------------------
def none(self):
data = self.data
for snap in data.snaps:
snap.tselect = 0
data.nselect = 0
print "%d snapshots selected out of %d" % (data.nselect,data.nsnaps)
# --------------------------------------------------------------------
def skip(self,n):
data = self.data
count = n-1
for snap in data.snaps:
if not snap.tselect: continue
count += 1
if count == n:
count = 0
continue
snap.tselect = 0
data.nselect -= 1
data.aselect.all()
print "%d snapshots selected out of %d" % (data.nselect,data.nsnaps)
# --------------------------------------------------------------------
def test(self,teststr):
data = self.data
snaps = data.snaps
cmd = "flag = " + teststr.replace("$t","snaps[i].time")
ccmd = compile(cmd,'','single')
for i in xrange(data.nsnaps):
if not snaps[i].tselect: continue
exec ccmd
if not flag:
snaps[i].tselect = 0
data.nselect -= 1
data.aselect.all()
print "%d snapshots selected out of %d" % (data.nselect,data.nsnaps)
# --------------------------------------------------------------------
# atom selection class
class aselect:
def __init__(self,data):
self.data = data
# --------------------------------------------------------------------
def all(self,*args):
data = self.data
if len(args) == 0: # all selected timesteps
for snap in data.snaps:
if not snap.tselect: continue
for i in xrange(snap.natoms): snap.aselect[i] = 1
snap.nselect = snap.natoms
else: # one timestep
n = data.findtime(args[0])
snap = data.snaps[n]
for i in xrange(snap.natoms): snap.aselect[i] = 1
snap.nselect = snap.natoms
# --------------------------------------------------------------------
def test(self,teststr,*args):
data = self.data
# replace all $var with snap.atoms references and compile test string
pattern = "\$\w*"
list = re.findall(pattern,teststr)
for item in list:
name = item[1:]
column = data.names[name]
insert = "snap.atoms[i][%d]" % column
teststr = teststr.replace(item,insert)
cmd = "flag = " + teststr
ccmd = compile(cmd,'','single')
if len(args) == 0: # all selected timesteps
for snap in data.snaps:
if not snap.tselect: continue
for i in xrange(snap.natoms):
if not snap.aselect[i]: continue
exec ccmd
if not flag:
snap.aselect[i] = 0
snap.nselect -= 1
for i in xrange(data.nsnaps):
if data.snaps[i].tselect:
print "%d atoms of %d selected in first step %d" % \
(data.snaps[i].nselect,data.snaps[i].natoms,data.snaps[i].time)
break
for i in xrange(data.nsnaps-1,-1,-1):
if data.snaps[i].tselect:
print "%d atoms of %d selected in last step %d" % \
(data.snaps[i].nselect,data.snaps[i].natoms,data.snaps[i].time)
break
else: # one timestep
n = data.findtime(args[0])
snap = data.snaps[n]
for i in xrange(snap.natoms):
if not snap.aselect[i]: continue
exec ccmd
if not flag:
snap.aselect[i] = 0
snap.nselect -= 1
| 40,308 | 31.771545 | 79 | py |
LIGGGHTS-WITH-BONDS | LIGGGHTS-WITH-BONDS-master/python/examples/pizza/pdbfile.py | # Pizza.py toolkit, www.cs.sandia.gov/~sjplimp/pizza.html
# Steve Plimpton, sjplimp@sandia.gov, Sandia National Laboratories
#
# Copyright (2005) Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
# certain rights in this software. This software is distributed under
# the GNU General Public License.
# pdb tool
oneline = "Read, write PDB files in combo with LAMMPS snapshots"
docstr = """
p = pdbfile("3CRO") create pdb object from PDB file or WWW
p = pdbfile("pep1 pep2") read in multiple PDB files
p = pdbfile("pep*") can use wildcards
p = pdbfile(d) read in snapshot data with no PDB file
p = pdbfile("3CRO",d) read in single PDB file with snapshot data
string arg contains one or more PDB files
don't need .pdb suffix except wildcard must expand to file.pdb
if only one 4-char file specified and it is not found,
it will be downloaded from http://www.rcsb.org as 3CRO.pdb
d arg is object with atom coordinates (dump, data)
p.one() write all output as one big PDB file to tmp.pdb
p.one("mine") write to mine.pdb
p.many() write one PDB file per snapshot: tmp0000.pdb, ...
p.many("mine") write as mine0000.pdb, mine0001.pdb, ...
p.single(N) write timestamp N as tmp.pdb
p.single(N,"new") write as new.pdb
how new PDB files are created depends on constructor inputs:
if no d: one new PDB file for each file in string arg (just a copy)
if only d specified: one new PDB file per snapshot in generic format
if one file in str arg and d: one new PDB file per snapshot
using input PDB file as template
multiple input PDB files with a d is not allowed
index,time,flag = p.iterator(0)
index,time,flag = p.iterator(1)
iterator = loop over number of PDB files
call first time with arg = 0, thereafter with arg = 1
N = length = # of snapshots or # of input PDB files
index = index of snapshot or input PDB file (0 to N-1)
time = timestep value (time stamp for snapshot, index for multiple PDB)
flag = -1 when iteration is done, 1 otherwise
typically call p.single(time) in iterated loop to write out one PDB file
"""
# History
# 8/05, Steve Plimpton (SNL): original version
# ToDo list
# for generic PDB file (no template) from a LJ unit system,
# the atoms in PDB file are too close together
# Variables
# files = list of input PDB files
# data = data object (ccell,data,dump) to read snapshots from
# atomlines = dict of ATOM lines in original PDB file
# key = atom id, value = tuple of (beginning,end) of line
# Imports and external programs
import sys, types, glob, urllib
# Class definition
class pdbfile:
# --------------------------------------------------------------------
def __init__(self,*args):
if len(args) == 1:
if type(args[0]) is types.StringType:
filestr = args[0]
self.data = None
else:
filestr = None
self.data = args[0]
elif len(args) == 2:
filestr = args[0]
self.data = args[1]
else: raise StandardError, "invalid args for pdb()"
# flist = full list of all PDB input file names
# append .pdb if needed
if filestr:
list = filestr.split()
flist = []
for file in list:
if '*' in file: flist += glob.glob(file)
else: flist.append(file)
for i in xrange(len(flist)):
if flist[i][-4:] != ".pdb": flist[i] += ".pdb"
if len(flist) == 0:
raise StandardError,"no PDB file specified"
self.files = flist
else: self.files = []
if len(self.files) > 1 and self.data:
raise StandardError, "cannot use multiple PDB files with data object"
if len(self.files) == 0 and not self.data:
raise StandardError, "no input PDB file(s)"
# grab PDB file from http://rcsb.org if not a local file
if len(self.files) == 1 and len(self.files[0]) == 8:
try:
open(self.files[0],'r').close()
except:
print "downloading %s from http://rcsb.org" % self.files[0]
fetchstr = "http://www.rcsb.org/pdb/cgi/export.cgi/%s?format=PDB&pdbId=2cpk&compression=None" % self.files[0]
urllib.urlretrieve(fetchstr,self.files[0])
if self.data and len(self.files): self.read_template(self.files[0])
# --------------------------------------------------------------------
# write a single large PDB file for concatenating all input data or files
# if data exists:
# only selected atoms returned by extract
# atoms written in order they appear in snapshot
# atom only written if its tag is in PDB template file
# if no data:
# concatenate all input files to one output file
def one(self,*args):
if len(args) == 0: file = "tmp.pdb"
elif args[0][-4:] == ".pdb": file = args[0]
else: file = args[0] + ".pdb"
f = open(file,'w')
# use template PDB file with each snapshot
if self.data:
n = flag = 0
while 1:
which,time,flag = self.data.iterator(flag)
if flag == -1: break
self.convert(f,which)
print >>f,"END"
print time,
sys.stdout.flush()
n += 1
else:
for file in self.files:
f.write(open(file,'r').read())
print >>f,"END"
print file,
sys.stdout.flush()
f.close()
print "\nwrote %d datasets to %s in PDB format" % (n,file)
# --------------------------------------------------------------------
# write series of numbered PDB files
# if data exists:
# only selected atoms returned by extract
# atoms written in order they appear in snapshot
# atom only written if its tag is in PDB template file
# if no data:
# just copy all input files to output files
def many(self,*args):
if len(args) == 0: root = "tmp"
else: root = args[0]
if self.data:
n = flag = 0
while 1:
which,time,flag = self.data.iterator(flag)
if flag == -1: break
if n < 10:
file = root + "000" + str(n)
elif n < 100:
file = root + "00" + str(n)
elif n < 1000:
file = root + "0" + str(n)
else:
file = root + str(n)
file += ".pdb"
f = open(file,'w')
self.convert(f,which)
f.close()
print time,
sys.stdout.flush()
n += 1
else:
n = 0
for infile in self.files:
if n < 10:
file = root + "000" + str(n)
elif n < 100:
file = root + "00" + str(n)
elif n < 1000:
file = root + "0" + str(n)
else:
file = root + str(n)
file += ".pdb"
f = open(file,'w')
f.write(open(infile,'r').read())
f.close()
print file,
sys.stdout.flush()
n += 1
print "\nwrote %d datasets to %s*.pdb in PDB format" % (n,root)
# --------------------------------------------------------------------
# write a single PDB file
# if data exists:
# time is timestamp in snapshot
# only selected atoms returned by extract
# atoms written in order they appear in snapshot
# atom only written if its tag is in PDB template file
# if no data:
# time is index into list of input PDB files
# just copy one input file to output file
def single(self,time,*args):
if len(args) == 0: file = "tmp.pdb"
elif args[0][-4:] == ".pdb": file = args[0]
else: file = args[0] + ".pdb"
f = open(file,'w')
if self.data:
which = self.data.findtime(time)
self.convert(f,which)
else:
f.write(open(self.files[time],'r').read())
f.close()
# --------------------------------------------------------------------
# iterate over list of input files or selected snapshots
# latter is done via data objects iterator
def iterator(self,flag):
if not self.data:
if not flag: self.iterate = 0
else:
self.iterate += 1
if self.iterate > len(self.files): return 0,0,-1
return self.iterate,self.iterate,1
return self.data.iterator(flag)
# --------------------------------------------------------------------
# read a PDB file and store ATOM lines
def read_template(self,file):
lines = open(file,'r').readlines()
self.atomlines = {}
for line in lines:
if line.find("ATOM") == 0:
tag = int(line[4:11])
begin = line[:30]
end = line[54:]
self.atomlines[tag] = (begin,end)
# --------------------------------------------------------------------
# convert one set of atoms to PDB format and write to f
def convert(self,f,which):
time,box,atoms,bonds,tris,lines = self.data.viz(which)
if len(self.files):
for atom in atoms:
id = atom[0]
if self.atomlines.has_key(id):
(begin,end) = self.atomlines[id]
line = "%s%8.3f%8.3f%8.3f%s" % (begin,atom[2],atom[3],atom[4],end)
print >>f,line,
else:
for atom in atoms:
begin = "ATOM %6d %2d R00 1 " % (atom[0],atom[1])
middle = "%8.3f%8.3f%8.3f" % (atom[2],atom[3],atom[4])
end = " 1.00 0.00 NONE"
print >>f,begin+middle+end
| 9,342 | 31.217241 | 117 | py |
LIGGGHTS-WITH-BONDS | LIGGGHTS-WITH-BONDS-master/python/examples/pizza/vmd.py | # Pizza.py toolkit, www.cs.sandia.gov/~sjplimp/pizza.html
# Steve Plimpton, sjplimp@sandia.gov, Sandia National Laboratories
#
# Copyright (2005) Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
# certain rights in this software. This software is distributed under
# the GNU General Public License.
# vmd tool
# Minimalistic VMD embedding for Pizza.py
# (c) 2010 Axel Kohlmeyer <akohlmey@gmail.com>
# This class will replace the VMD startup script,
# open a pipe to the executable,
# and feed it Tcl command lines one at a time
oneline = "Control VMD from python"
docstr = """
v = vmd() start up VMD
v.stop() shut down VMD instance
v.clear() delete all visualizations
v.rep(style) set default representation style. One of
(Lines|VDW|Licorice|DynamicBonds|Points|CPK)
v.new(file[,type]) load new file (default file type 'lammpstrj')
v.data(file[,atomstyle]) load new data file (default atom style 'full')
v.replace(file[,type]) replace current frames with new file
v.append(file[,type]) append file to current frame(s)
v.set(snap,x,y,z,(True|False)) set coordinates from a pizza.py snapshot to new or current frame
v.frame(frame) set current frame
v.flush() flush pending input to VMD and update GUI
v.read(file) read Tcl script file (e.g. saved state)
v.enter() enter interactive shell
v.debug([True|False]) display generated VMD script commands?
"""
# History
# 11/10, Axel Kohlmeyer (Temple U): original version
# Imports and external programs
import types, os
import numpy
try: from DEFAULTS import PIZZA_VMDNAME
except: PIZZA_VMDNAME = "vmd"
try: from DEFAULTS import PIZZA_VMDDIR
except: PIZZA_VMDDIR = "/usr/local/lib/vmd"
try: from DEFAULTS import PIZZA_VMDDEV
except: PIZZA_VMDDEV = "win"
try: from DEFAULTS import PIZZA_VMDARCH
except: PIZZA_VMDARCH = "LINUX"
# try these settings for a Mac
#PIZZA_VMDNAME = "vmd"
#PIZZA_VMDDIR = "/Applications/VMD\ 1.8.7.app/Contents/vmd"
#PIZZA_VMDDEV = "win"
#PIZZA_VMDARCH = "MACOSXX86"
try: import pexpect
except:
print "pexpect from http://pypi.python.org/pypi/pexpect", \
"is required for vmd tool"
raise
# Class definition
class vmd:
# --------------------------------------------------------------------
def __init__(self):
self.vmddir = PIZZA_VMDDIR
self.vmdexe = PIZZA_VMDDIR + '/' + PIZZA_VMDNAME + '_' + PIZZA_VMDARCH
# these are all defaults copied from the vmd launch script
os.environ['VMDDIR'] = PIZZA_VMDDIR
os.environ['VMDDISPLAYDEVICE'] = PIZZA_VMDDEV
os.environ['VMDSCRPOS'] = "596 190"
os.environ['VMDSCRSIZE'] = "669 834"
os.environ['VMDSCRHEIGHT'] = "6.0"
os.environ['VMDSCRDIST'] = "-2.0"
os.environ['VMDTITLE'] = "on"
os.environ['TCL_LIBRARY'] = PIZZA_VMDDIR + "/scripts/tcl"
os.environ['STRIDE_BIN'] = PIZZA_VMDDIR + "/stride_" + PIZZA_VMDARCH
os.environ['SURF_BIN'] = PIZZA_VMDDIR + "/surf_" + PIZZA_VMDARCH
os.environ['TACHYON_BIN'] = PIZZA_VMDDIR + "/tachyon_" + PIZZA_VMDARCH
ldpath = os.environ.get('LD_LIBRARY_PATH','')
if ldpath == '':
os.environ['LD_LIBRARY_PATH'] = PIZZA_VMDDIR
else:
os.environ['LD_LIBRARY_PATH'] = ldpath + ':' + PIZZA_VMDDIR
ldpath = os.environ.get('LD_LIBRARY_PATH','')
if ldpath == '':
os.environ['PYTHONPATH'] = PIZZA_VMDDIR
else:
os.environ['PYTHONPATH'] = PIZZA_VMDDIR + "/scripts/python"
self.debugme = False
# open pipe to vmd and wait until we have a prompt
self.VMD = pexpect.spawn(self.vmdexe)
self.VMD.expect('vmd >')
# --------------------------------------------------------------------
# post command to vmd and wait until the prompt returns.
def __call__(self,command):
if self.VMD.isalive():
self.VMD.sendline(command)
self.VMD.expect('vmd >')
if self.debugme:
print "call+result:"+self.VMD.before
return
# --------------------------------------------------------------------
# exit VMD
def stop(self):
self.__call__("quit")
del self.VMD
# --------------------------------------------------------------------
# force VMD display and GUI update.
def flush(self):
self.__call__('display update ui')
# --------------------------------------------------------------------
# turn on debugging info
def debug(self,status=True):
if status and not self.debugme:
print 'Turning vmd.py debugging ON.'
if not status and self.debugme:
print 'Turning vmd.py debugging OFF.'
self.debugme = status
# --------------------------------------------------------------------
# emulate a regular tcl command prompt
def enter(self,mode='tcl'):
self.__call__('menu main off')
self.__call__('menu main on')
while 1:
try:
command = raw_input("vmd > ")
except EOFError:
print "(EOF)"
self.__call__('menu main off')
return
if command == "quit" or command == "exit":
self.__call__('menu main off')
return
if command == "gopython":
print "gopython not supported here"
continue
self.__call__(command)
# --------------------------------------------------------------------
# read and execute tcl script file (e.g. a saved state)
def read(self,filename):
self.__call__('play ' + filename)
self.flush()
# --------------------------------------------------------------------
# remove all molecules, data and visualizations
def clear(self):
self.__call__("mol delete all")
# --------------------------------------------------------------------
# navigate to a given frame
def rep(self,style='Lines'):
if style == 'Lines' or style == 'VDW' or style == 'Licorice' \
or style == 'DynamicBonds' or style == 'Points' or style == 'CPK':
self.__call__('mol default style ' + style)
# --------------------------------------------------------------------
# navigate to a given frame
def frame(self,framespec):
self.__call__('animate goto ' + str(framespec))
# --------------------------------------------------------------------
# load a new molecule from a file supported by a molfile plugin
def new(self,filename,filetype='lammpstrj'):
self.__call__('mol new ' + filename + ' type ' + filetype + ' waitfor all')
self.flush()
# --------------------------------------------------------------------
# load a new molecule from a data file via the topotools plugin
def data(self,filename,atomstyle='full'):
self.__call__('package require topotools 1.0')
self.__call__('topo readlammpsdata ' + filename + ' ' + atomstyle)
self.flush()
# --------------------------------------------------------------------
# append all frames from a given file to the current molecule
def append(self,filename,filetype='lammpstrj'):
self.__call__('set tmol [molinfo top]')
self.__call__('array set viewpoints {}')
self.__call__('foreach mol [molinfo list] { set viewpoints($mol) [molinfo $mol get { center_matrix rotate_matrix scale_matrix global_matrix}]}')
self.__call__('mol addfile ' + filename + ' mol $tmol type ' + filetype + ' waitfor all')
self.__call__('foreach mol [molinfo list] { molinfo $mol set {center_matrix rotate_matrix scale_matrix global_matrix} $viewpoints($mol)}')
self.flush()
# --------------------------------------------------------------------
# replace all frames of a molecule with those from a given file
def update(self,filename,filetype='lammpstrj'):
self.__call__('set tmol [molinfo top]')
self.__call__('array set viewpoints {}')
self.__call__('foreach mol [molinfo list] {set viewpoints($mol) [molinfo $mol get { center_matrix rotate_matrix scale_matrix global_matrix}]}')
self.__call__('animate delete all $tmol')
self.__call__('mol addfile ' + filename + ' mol $tmol type ' + filetype + ' waitfor all')
self.__call__('foreach mol [molinfo list] {molinfo $mol set {center_matrix rotate_matrix scale_matrix global_matrix} $viewpoints($mol)}')
self.flush()
# --------------------------------------------------------------------
# add or overwrite coordinates with coordinates in a snapshot
def set(self,snap,x,y,z,append=True):
self.__call__('set vmdsel [atomselect top all]')
if append:
self.__call__('animate dup [molinfo top]')
cmd = '$vmdsel set {x y z} {'
for idx in range(0,snap.natoms):
cmd += ' {'+str(snap[idx,x])+' '+str(snap[idx,y])+' '+str(snap[idx,z])+'}'
cmd += '}'
self.__call__(cmd)
self.__call__('$vmdsel delete ; unset vmdsel')
self.flush()
| 8,758 | 38.102679 | 148 | py |
LIGGGHTS-WITH-BONDS | LIGGGHTS-WITH-BONDS-master/python/examples/pizza/vizinfo.py | # Pizza.py toolkit, www.cs.sandia.gov/~sjplimp/pizza.html
# Steve Plimpton, sjplimp@sandia.gov, Sandia National Laboratories
#
# Copyright (2005) Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
# certain rights in this software. This software is distributed under
# the GNU General Public License.
# vizinfo class, not a top-level Pizza.py tool
# History
# 8/05, Matt Jones (BYU): original version
# 9/05, Steve Plimpton: added 140-color table
# ToDo list
# Variables
# Imports and external programs
import types
# Class definition
class vizinfo:
"""
Information holder for Pizza.py visualization tools
acolor,bcolor,tcolor,lcolor = RGB values for each atom/bond/tri/line type
arad = radius of each atom type
brad,lrad = thickness of each bond/line type
tfill = fill flag for each triangle type
all of these arrays are indexed by object type which runs 1-Ntype
nacolor,nbcolor,ntcolor,nlcolor,narad,nbrad,nlrad,ntfill
are # of types each array holds
actual length is nacolor+1 so that array can be indexed by 1-Ntype
setcolors() = set atom/bond/tri/line colors
setradii() = set atom/bond/line radii/thickness
setfill() = set triangle fill factor
extend() = grow an array
"""
# --------------------------------------------------------------------
def __init__(self):
self.acolor = []
self.arad = []
self.bcolor = []
self.brad = []
self.tcolor = []
self.tfill = []
self.lcolor = []
self.lrad = []
self.nacolor = self.narad = 0
self.nbcolor = self.nbrad = 0
self.ntcolor = self.ntfill = 0
self.nlcolor = self.nlrad = 0
# --------------------------------------------------------------------
# set color RGB for which = atoms, bonds, triangles
def setcolors(self,which,ids,rgbs):
# convert args into lists if single values
# if arg = 0, convert to full-range list
if type(ids) is types.IntType and ids == 0:
if which == "atom": ids = range(self.nacolor)
if which == "bond": ids = range(self.nbcolor)
if which == "tri": ids = range(self.ntcolor)
if which == "line": ids = range(self.nlcolor)
if type(ids) is not types.ListType and type(ids) is not types.TupleType:
ids = [ids]
if type(rgbs) is not types.ListType and type(rgbs) is not types.TupleType:
rgbs = [rgbs]
# if list of types has a 0, increment each type value
if 0 in ids:
for i in xrange(len(ids)): ids[i] += 1
# extend storage list if necessary
# extend other arrays for same "which" so that gl::make_atom_calllist
# has valid arrays to work with
if which == "atom":
if max(ids) > self.nacolor:
self.nacolor = self.extend(self.acolor,max(ids))
self.nacolor = self.extend(self.arad,max(ids))
if which == "bond":
if max(ids) > self.nbcolor:
self.nbcolor = self.extend(self.bcolor,max(ids))
self.nbcolor = self.extend(self.brad,max(ids))
if which == "tri":
if max(ids) > self.ntcolor:
self.ntcolor = self.extend(self.tcolor,max(ids))
self.ntcolor = self.extend(self.tfill,max(ids))
if which == "line":
if max(ids) > self.nlcolor:
self.nlcolor = self.extend(self.lcolor,max(ids))
self.nlcolor = self.extend(self.lrad,max(ids))
# set color for each type
# if list lengths match, set directly, else interpolate
# convert final color from 0-255 to 0.0-1.0
ntypes = len(ids)
nrgbs = len(rgbs)
for i in xrange(ntypes):
id = ids[i]
if rgbs[0] == "loop":
list = colors.keys()
red,green,blue = colors[list[i % len(colors)]]
elif ntypes == nrgbs:
red,green,blue = colors[rgbs[i]]
else:
r = i/float(ntypes-1) * float(nrgbs-1)
jlo = int(r)
jhi = jlo + 1
if jhi == nrgbs: jhi = nrgbs - 1
clo = colors[rgbs[jlo]]
chi = colors[rgbs[jhi]]
delta = r - jlo
red = clo[0] + delta*(chi[0]-clo[0])
green = clo[1] + delta*(chi[1]-clo[1])
blue = clo[2] + delta*(chi[2]-clo[2])
color = [red/255.0,green/255.0,blue/255.0]
if which == "atom": self.acolor[id] = color
if which == "bond": self.bcolor[id] = color
if which == "tri": self.tcolor[id] = color
if which == "line": self.lcolor[id] = color
# --------------------------------------------------------------------
# set radii for which = atoms, bonds, lines
def setradii(self,which,ids,radii):
# convert args into lists if single values
# if arg = 0, convert to full-range list
if type(ids) is types.IntType and ids == 0:
if which == "atom": ids = range(self.narad)
if which == "bond": ids = range(self.nbrad)
if which == "line": ids = range(self.nlrad)
if type(ids) is not types.ListType and type(ids) is not types.TupleType:
ids = [ids]
if type(radii) is not types.ListType and \
type(radii) is not types.TupleType:
radii = [radii]
# if list of types has a 0, increment each type value
if 0 in ids:
for i in xrange(len(ids)): ids[i] += 1
# extend storage list if necessary
# extend other arrays for same "which" so that gl::make_atom_calllist
# has valid arrays to work with
if which == "atom":
if max(ids) > self.narad:
self.narad = self.extend(self.arad,max(ids))
self.narad = self.extend(self.acolor,max(ids))
if which == "bond":
if max(ids) > self.nbrad:
self.nbrad = self.extend(self.brad,max(ids))
self.nbrad = self.extend(self.bcolor,max(ids))
if which == "line":
if max(ids) > self.nlrad:
self.nlrad = self.extend(self.lrad,max(ids))
self.nlrad = self.extend(self.lcolor,max(ids))
# set radius for each type
# if list lengths match, set directly, else interpolate
ntypes = len(ids)
nradii = len(radii)
for i in range(ntypes):
id = ids[i]
if ntypes == nradii: rad = radii[i]
else:
r = i/float(ntypes-1) * float(nradii-1)
jlo = int(r)
jhi = jlo + 1
if jhi == nradii: jhi = nradii - 1
rlo = radii[jlo]
rhi = radii[jhi]
delta = r - jlo
rad = rlo + delta*(rhi-rlo)
if which == "atom": self.arad[id] = rad
if which == "bond": self.brad[id] = rad
if which == "line": self.lrad[id] = rad
# --------------------------------------------------------------------
# set triangle fill style
# 0 = fill only, 1 = line only, 2 = fill and line
def setfills(self,which,ids,fills):
# convert args into lists if single values
# if arg = 0, convert to full-range list
if type(ids) is types.IntType and ids == 0:
ids = range(self.ntfill)
if type(ids) is not types.ListType and type(ids) is not types.TupleType:
ids = [ids]
if type(fills) is not types.ListType and \
type(fills) is not types.TupleType:
fills = [fills]
# if list of types has a 0, increment each type value
if 0 in ids:
for i in xrange(len(ids)): ids[i] += 1
# extend storage list if necessary
# extend other arrays for same "which" so that gl::make_atom_calllist
# has valid arrays to work with
if max(ids) > self.ntfill:
self.ntfill = self.extend(self.tfill,max(ids))
self.ntfill = self.extend(self.tcolor,max(ids))
# set fill flag for each type
# if list lengths match, set directly, else set types to 1st fill value
if len(fills) == len(ids):
for i in xrange(len(ids)): self.tfill[ids[i]] = int(fills[i])
else:
for id in ids: self.tfill[id] = int(fills[0])
# --------------------------------------------------------------------
def extend(self,array,n):
for i in range(n-len(array)+1): array.append(0)
return n
# --------------------------------------------------------------------
# dictionary of 140 color names and associated RGB values
colors = {}
colors["aliceblue"] = [240, 248, 255]
colors["antiquewhite"] = [250, 235, 215]
colors["aqua"] = [0, 255, 255]
colors["aquamarine"] = [127, 255, 212]
colors["azure"] = [240, 255, 255]
colors["beige"] = [245, 245, 220]
colors["bisque"] = [255, 228, 196]
colors["black"] = [0, 0, 0]
colors["blanchedalmond"] = [255, 255, 205]
colors["blue"] = [0, 0, 255]
colors["blueviolet"] = [138, 43, 226]
colors["brown"] = [165, 42, 42]
colors["burlywood"] = [222, 184, 135]
colors["cadetblue"] = [95, 158, 160]
colors["chartreuse"] = [127, 255, 0]
colors["chocolate"] = [210, 105, 30]
colors["coral"] = [255, 127, 80]
colors["cornflowerblue"] = [100, 149, 237]
colors["cornsilk"] = [255, 248, 220]
colors["crimson"] = [220, 20, 60]
colors["cyan"] = [0, 255, 255]
colors["darkblue"] = [0, 0, 139]
colors["darkcyan"] = [0, 139, 139]
colors["darkgoldenrod"] = [184, 134, 11]
colors["darkgray"] = [169, 169, 169]
colors["darkgreen"] = [0, 100, 0]
colors["darkkhaki"] = [189, 183, 107]
colors["darkmagenta"] = [139, 0, 139]
colors["darkolivegreen"] = [85, 107, 47]
colors["darkorange"] = [255, 140, 0]
colors["darkorchid"] = [153, 50, 204]
colors["darkred"] = [139, 0, 0]
colors["darksalmon"] = [233, 150, 122]
colors["darkseagreen"] = [143, 188, 143]
colors["darkslateblue"] = [72, 61, 139]
colors["darkslategray"] = [47, 79, 79]
colors["darkturquoise"] = [0, 206, 209]
colors["darkviolet"] = [148, 0, 211]
colors["deeppink"] = [255, 20, 147]
colors["deepskyblue"] = [0, 191, 255]
colors["dimgray"] = [105, 105, 105]
colors["dodgerblue"] = [30, 144, 255]
colors["firebrick"] = [178, 34, 34]
colors["floralwhite"] = [255, 250, 240]
colors["forestgreen"] = [34, 139, 34]
colors["fuchsia"] = [255, 0, 255]
colors["gainsboro"] = [220, 220, 220]
colors["ghostwhite"] = [248, 248, 255]
colors["gold"] = [255, 215, 0]
colors["goldenrod"] = [218, 165, 32]
colors["gray"] = [128, 128, 128]
colors["green"] = [0, 128, 0]
colors["greenyellow"] = [173, 255, 47]
colors["honeydew"] = [240, 255, 240]
colors["hotpink"] = [255, 105, 180]
colors["indianred"] = [205, 92, 92]
colors["indigo"] = [75, 0, 130]
colors["ivory"] = [255, 240, 240]
colors["khaki"] = [240, 230, 140]
colors["lavender"] = [230, 230, 250]
colors["lavenderblush"] = [255, 240, 245]
colors["lawngreen"] = [124, 252, 0]
colors["lemonchiffon"] = [255, 250, 205]
colors["lightblue"] = [173, 216, 230]
colors["lightcoral"] = [240, 128, 128]
colors["lightcyan"] = [224, 255, 255]
colors["lightgoldenrodyellow"] = [250, 250, 210]
colors["lightgreen"] = [144, 238, 144]
colors["lightgrey"] = [211, 211, 211]
colors["lightpink"] = [255, 182, 193]
colors["lightsalmon"] = [255, 160, 122]
colors["lightseagreen"] = [32, 178, 170]
colors["lightskyblue"] = [135, 206, 250]
colors["lightslategray"] = [119, 136, 153]
colors["lightsteelblue"] = [176, 196, 222]
colors["lightyellow"] = [255, 255, 224]
colors["lime"] = [0, 255, 0]
colors["limegreen"] = [50, 205, 50]
colors["linen"] = [250, 240, 230]
colors["magenta"] = [255, 0, 255]
colors["maroon"] = [128, 0, 0]
colors["mediumaquamarine"] = [102, 205, 170]
colors["mediumblue"] = [0, 0, 205]
colors["mediumorchid"] = [186, 85, 211]
colors["mediumpurple"] = [147, 112, 219]
colors["mediumseagreen"] = [60, 179, 113]
colors["mediumslateblue"] = [123, 104, 238]
colors["mediumspringgreen"] = [0, 250, 154]
colors["mediumturquoise"] = [72, 209, 204]
colors["mediumvioletred"] = [199, 21, 133]
colors["midnightblue"] = [25, 25, 112]
colors["mintcream"] = [245, 255, 250]
colors["mistyrose"] = [255, 228, 225]
colors["moccasin"] = [255, 228, 181]
colors["navajowhite"] = [255, 222, 173]
colors["navy"] = [0, 0, 128]
colors["oldlace"] = [253, 245, 230]
colors["olive"] = [128, 128, 0]
colors["olivedrab"] = [107, 142, 35]
colors["orange"] = [255, 165, 0]
colors["orangered"] = [255, 69, 0]
colors["orchid"] = [218, 112, 214]
colors["palegoldenrod"] = [238, 232, 170]
colors["palegreen"] = [152, 251, 152]
colors["paleturquoise"] = [175, 238, 238]
colors["palevioletred"] = [219, 112, 147]
colors["papayawhip"] = [255, 239, 213]
colors["peachpuff"] = [255, 239, 213]
colors["peru"] = [205, 133, 63]
colors["pink"] = [255, 192, 203]
colors["plum"] = [221, 160, 221]
colors["powderblue"] = [176, 224, 230]
colors["purple"] = [128, 0, 128]
colors["red"] = [255, 0, 0]
colors["rosybrown"] = [188, 143, 143]
colors["royalblue"] = [65, 105, 225]
colors["saddlebrown"] = [139, 69, 19]
colors["salmon"] = [250, 128, 114]
colors["sandybrown"] = [244, 164, 96]
colors["seagreen"] = [46, 139, 87]
colors["seashell"] = [255, 245, 238]
colors["sienna"] = [160, 82, 45]
colors["silver"] = [192, 192, 192]
colors["skyblue"] = [135, 206, 235]
colors["slateblue"] = [106, 90, 205]
colors["slategray"] = [112, 128, 144]
colors["snow"] = [255, 250, 250]
colors["springgreen"] = [0, 255, 127]
colors["steelblue"] = [70, 130, 180]
colors["tan"] = [210, 180, 140]
colors["teal"] = [0, 128, 128]
colors["thistle"] = [216, 191, 216]
colors["tomato"] = [253, 99, 71]
colors["turquoise"] = [64, 224, 208]
colors["violet"] = [238, 130, 238]
colors["wheat"] = [245, 222, 179]
colors["white"] = [255, 255, 255]
colors["whitesmoke"] = [245, 245, 245]
colors["yellow"] = [255, 255, 0]
colors["yellowgreen"] = [154, 205, 50]
| 13,236 | 32.767857 | 78 | py |
LIGGGHTS-WITH-BONDS | LIGGGHTS-WITH-BONDS-master/python/examples/pizza/gl.py | # Pizza.py toolkit, www.cs.sandia.gov/~sjplimp/pizza.html
# Steve Plimpton, sjplimp@sandia.gov, Sandia National Laboratories
#
# Copyright (2005) Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
# certain rights in this software. This software is distributed under
# the GNU General Public License.
# gl tool
oneline = "3d interactive visualization via OpenGL"
docstr = """
g = gl(d) create OpenGL display for data in d
d = atom snapshot object (dump, data)
g.bg("black") set background color (def = "black")
g.size(N) set image size to NxN
g.size(N,M) set image size to NxM
g.rotate(60,135) view from z theta and azimuthal phi (def = 60,30)
g.shift(x,y) translate by x,y pixels in view window (def = 0,0)
g.zoom(0.5) scale image by factor (def = 1)
g.box(0/1/2) 0/1/2 = none/variable/fixed box
g.box(0/1/2,"green") set box color
g.box(0/1/2,"red",4) set box edge thickness
g.file = "image" file prefix for created images (def = "image")
g.show(N) show image of snapshot at timestep N
g.all() make images of all selected snapshots
g.all(P) images of all, start file label at P
g.all(N,M,P) make M images of snapshot N, start label at P
g.pan(60,135,1.0,40,135,1.5) pan during all() operation
g.pan() no pan during all() (default)
args = z theta, azimuthal phi, zoom factor at beginning and end
values at each step are interpolated between beginning and end values
g.select = "$x > %g*3.0" string to pass to d.aselect.test() during all()
g.select = "" no extra aselect (default)
%g varies from 0.0 to 1.0 from beginning to end of all()
g.acol(2,"green") set atom colors by atom type (1-N)
g.acol([2,4],["red","blue"]) 1st arg = one type or list of types
g.acol(0,"blue") 2nd arg = one color or list of colors
g.acol(range(20),["red","blue"]) if list lengths unequal, interpolate
g.acol(range(10),"loop") assign colors in loop, randomly ordered
if 1st arg is 0, set all types to 2nd arg
if list of types has a 0 (e.g. range(10)), +1 is added to each value
interpolate means colors blend smoothly from one value to the next
g.arad([1,2],[0.5,0.3]) set atom radii, same rules as acol()
g.bcol() set bond color, same args as acol()
g.brad() set bond thickness, same args as arad()
g.tcol() set triangle color, same args as acol()
g.tfill() set triangle fill, 0 fill, 1 line, 2 both
g.lcol() set line color, same args as acol()
g.lrad() set line thickness, same args as arad()
g.adef() set atom/bond/tri/line properties to default
g.bdef() default = "loop" for colors, 0.45 for radii
g.tdef() default = 0.25 for bond/line thickness
g.ldef() default = 0 fill
by default 100 types are assigned
if atom/bond/tri/line has type > # defined properties, is an error
from vizinfo import colors access color list
print colors list defined color names and RGB values
colors["nickname"] = [R,G,B] set new RGB values from 0 to 255
140 pre-defined colors: red, green, blue, purple, yellow, black, white, etc
Settings specific to gl tool:
g.q(10) set quality of image (def = 5)
g.axis(0/1) turn xyz axes off/on
g.ortho(0/1) perspective (0) vs orthographic (1) view
g.clip('xlo',0.25) clip in xyz from lo/hi at box fraction (0-1)
g.reload() force all data to be reloaded
g.cache = 0/1 turn off/on GL cache lists (def = on)
theta,phi,x,y,scale,up = g.gview() grab all current view parameters
g.sview(theta,phi,x,y,scale,up) set all view parameters
data reload is necessary if dump selection is used to change the data
cache lists usually improve graphics performance
gview returns values to use in other commands:
theta,phi are args to rotate()
x,y are args to shift()
scale is arg to zoom()
up is a 3-vector arg to sview()
"""
# History
# 9/05, Steve Plimpton (SNL): original version
# ToDo list
# when do aselect with select str while looping N times on same timestep
# would not let you grow # of atoms selected
# Variables
# ztheta = vertical angle from z-azis of viewpoint
# azphi = azimuthal angle of viewpoint
# xshift,yshift = xy translation of scene (in pixels)
# distance = size of simulation box (largest dim)
# eye = viewpoint distance from center of scene
# file = filename prefix to use for images produced
# boxflag = 0/1/2 for drawing simulation box: none/variable/fixed
# bxcol = color of box
# bxthick = thickness of box lines
# bgcol = color of background
# vizinfo = scene attributes
# center[3] = center point of simulation box
# view[3] = direction towards eye in simulation box (unit vector)
# up[3] = screen up direction in simulation box (unit vector)
# right[3] = screen right direction in simulation box (unit vector)
# Imports and external programs
from math import sin,cos,sqrt,pi,acos
from OpenGL.Tk import *
from OpenGL.GLUT import *
import Image
from vizinfo import vizinfo
# Class definition
class gl:
# --------------------------------------------------------------------
def __init__(self,data):
self.data = data
self.root = None
self.xpixels = 512
self.ypixels = 512
self.ztheta = 60
self.azphi = 30
self.scale = 1.0
self.xshift = self.yshift = 0
self.file = "image"
self.boxflag = 0
self.bxcol = [1,1,0]
self.bxthick = 0.3
self.bgcol = [0,0,0]
self.labels = []
self.panflag = 0
self.select = ""
self.axisflag = 0
self.orthoflag = 1
self.nslices = 5
self.nstacks = 5
self.nsides = 10
self.theta_amplify = 2
self.shiny = 2
self.clipflag = 0
self.clipxlo = self.clipylo = self.clipzlo = 0.0
self.clipxhi = self.clipyhi = self.clipzhi = 1.0
self.nclist = 0
self.calllist = [0] # indexed by 1-Ntype, so start with 0 index
self.cache = 1
self.cachelist = 0
self.boxdraw = []
self.atomdraw = []
self.bonddraw = []
self.tridraw = []
self.linedraw = []
self.ready = 0
self.create_window()
self.vizinfo = vizinfo()
self.adef()
self.bdef()
self.tdef()
self.ldef()
self.center = 3*[0]
self.view = 3*[0]
self.up = 3*[0]
self.right = 3*[0]
self.viewupright()
# --------------------------------------------------------------------
def bg(self,color):
from vizinfo import colors
self.bgcol = [colors[color][0]/255.0,colors[color][1]/255.0,
colors[color][2]/255.0]
self.w.tkRedraw()
# --------------------------------------------------------------------
def size(self,xnew,ynew=None):
self.xpixels = xnew
if not ynew: self.ypixels = self.xpixels
else: self.ypixels = ynew
self.create_window()
# --------------------------------------------------------------------
def axis(self,value):
self.axisflag = value
self.cachelist = -self.cachelist
self.w.tkRedraw()
# --------------------------------------------------------------------
def create_window(self):
if self.root: self.root.destroy()
from __main__ import tkroot
self.root = Toplevel(tkroot)
self.root.title('Pizza.py gl tool')
self.w = MyOpengl(self.root,width=self.xpixels,height=self.ypixels,
double=1,depth=1)
self.w.pack(expand=YES)
# self.w.pack(expand=YES,fill=BOTH)
glViewport(0,0,self.xpixels,self.ypixels)
glEnable(GL_LIGHTING);
glEnable(GL_LIGHT0);
glEnable(GL_DEPTH_TEST);
glLightModeli(GL_LIGHT_MODEL_TWO_SIDE,GL_TRUE);
glPolygonMode(GL_FRONT_AND_BACK,GL_FILL)
self.rtrack = self.xpixels
if self.ypixels > self.xpixels: self.rtrack = self.ypixels
self.w.redraw = self.redraw
self.w.parent = self
self.w.tkRedraw()
tkroot.update_idletasks() # force window to appear
# --------------------------------------------------------------------
def clip(self,which,value):
if which == "xlo":
self.clipxlo = value
if value > self.clipxhi: self.clipxlo = self.clipxhi
elif which == "xhi":
self.clipxhi = value
if value < self.clipxlo: self.clipxhi = self.clipxlo
elif which == "ylo":
self.clipylo = value
if value > self.clipyhi: self.clipylo = self.clipyhi
elif which == "yhi":
self.clipyhi = value
if value < self.clipylo: self.clipyhi = self.clipylo
elif which == "zlo":
self.clipzlo = value
if value > self.clipzhi: self.clipzlo = self.clipzhi
elif which == "zhi":
self.clipzhi = value
if value < self.clipzlo: self.clipzhi = self.clipzlo
oldflag = self.clipflag
if self.clipxlo > 0 or self.clipylo > 0 or self.clipzlo > 0 or \
self.clipxhi < 1 or self.clipyhi < 1 or self.clipzhi < 1:
self.clipflag = 1
else: self.clipflag = 0
if oldflag == 0 and self.clipflag == 0: return
self.cachelist = -self.cachelist
self.w.tkRedraw()
# --------------------------------------------------------------------
def q(self,value):
self.nslices = value
self.nstacks = value
self.make_atom_calllist()
self.cachelist = -self.cachelist
self.w.tkRedraw()
# --------------------------------------------------------------------
def ortho(self,value):
self.orthoflag = value
self.w.tkRedraw()
# --------------------------------------------------------------------
# set unit vectors for view,up,right from ztheta,azphi
# assume +z in scene should be up on screen (unless looking down z-axis)
# right = up x view
def viewupright(self):
self.view[0] = cos(pi*self.azphi/180) * sin(pi*self.ztheta/180)
self.view[1] = sin(pi*self.azphi/180) * sin(pi*self.ztheta/180)
self.view[2] = cos(pi*self.ztheta/180)
if self.ztheta == 0.0:
self.up[0] = cos(pi*self.azphi/180)
self.up[1] = -sin(pi*self.azphi/180)
self.up[2] = 0.0
elif self.ztheta == 180.0:
self.up[0] = cos(pi*self.azphi/180)
self.up[1] = sin(pi*self.azphi/180)
self.up[2] = 0.0
else:
dot = self.view[2] # dot = (0,0,1) . view
self.up[0] = -dot*self.view[0] # up projected onto v = dot * v
self.up[1] = -dot*self.view[1] # up perp to v = up - dot * v
self.up[2] = 1.0 - dot*self.view[2]
self.up = vecnorm(self.up)
self.right = veccross(self.up,self.view)
# --------------------------------------------------------------------
# reset ztheta,azphi and thus view,up.right
# called as function from Pizza.py
def rotate(self,ztheta,azphi):
self.ztheta = ztheta
self.azphi = azphi
self.viewupright()
self.setview()
self.w.tkRedraw()
# --------------------------------------------------------------------
# return all view params to reproduce current display via sview()
def gview(self):
return self.ztheta,self.azphi,self.xshift,self.yshift,self.scale,self.up
# --------------------------------------------------------------------
# set current view, called by user with full set of view params
# up is not settable via any other call, all other params are
def sview(self,ztheta,azphi,xshift,yshift,scale,up):
self.ztheta = ztheta
self.azphi = azphi
self.xshift = xshift
self.yshift = yshift
self.scale = scale
self.up[0] = up[0]
self.up[1] = up[1]
self.up[2] = up[2]
self.up = vecnorm(self.up)
self.view[0] = cos(pi*self.azphi/180) * sin(pi*self.ztheta/180)
self.view[1] = sin(pi*self.azphi/180) * sin(pi*self.ztheta/180)
self.view[2] = cos(pi*self.ztheta/180)
self.right = veccross(self.up,self.view)
self.setview()
self.w.tkRedraw()
# --------------------------------------------------------------------
# rotation triggered by mouse trackball
# project old,new onto unit trackball surf
# rotate view,up around axis of rotation = old x new
# right = up x view
# reset ztheta,azphi from view
def mouse_rotate(self,xnew,ynew,xold,yold):
# change y pixels to measure from bottom of window instead of top
yold = self.ypixels - yold
ynew = self.ypixels - ynew
# vold = unit vector to (xold,yold) projected onto trackball
# vnew = unit vector to (xnew,ynew) projected onto trackball
# return (no rotation) if either projection point is outside rtrack
vold = [0,0,0]
vold[0] = xold - (0.5*self.xpixels + self.xshift)
vold[1] = yold - (0.5*self.ypixels + self.yshift)
vold[2] = self.rtrack*self.rtrack - vold[0]*vold[0] - vold[1]*vold[1]
if vold[2] < 0: return
vold[2] = sqrt(vold[2])
vold = vecnorm(vold)
vnew = [0,0,0]
vnew[0] = xnew - (0.5*self.xpixels + self.xshift)
vnew[1] = ynew - (0.5*self.ypixels + self.yshift)
vnew[2] = self.rtrack*self.rtrack - vnew[0]*vnew[0] - vnew[1]*vnew[1]
if vnew[2] < 0: return
vnew[2] = sqrt(vnew[2])
vnew = vecnorm(vnew)
# rot = trackball rotation axis in screen ref frame = vold x vnew
# theta = angle of rotation = sin(theta) for small theta
# axis = rotation axis in body ref frame described by right,up,view
rot = veccross(vold,vnew)
theta = sqrt(rot[0]*rot[0] + rot[1]*rot[1] + rot[2]*rot[2])
theta *= self.theta_amplify
axis = [0,0,0]
axis[0] = rot[0]*self.right[0] + rot[1]*self.up[0] + rot[2]*self.view[0]
axis[1] = rot[0]*self.right[1] + rot[1]*self.up[1] + rot[2]*self.view[1]
axis[2] = rot[0]*self.right[2] + rot[1]*self.up[2] + rot[2]*self.view[2]
axis = vecnorm(axis)
# view is changed by (axis x view) scaled by theta
# up is changed by (axis x up) scaled by theta
# force up to be perp to view via up_perp = up - (up . view) view
# right = up x view
delta = veccross(axis,self.view)
self.view[0] -= theta*delta[0]
self.view[1] -= theta*delta[1]
self.view[2] -= theta*delta[2]
self.view = vecnorm(self.view)
delta = veccross(axis,self.up)
self.up[0] -= theta*delta[0]
self.up[1] -= theta*delta[1]
self.up[2] -= theta*delta[2]
dot = vecdot(self.up,self.view)
self.up[0] -= dot*self.view[0]
self.up[1] -= dot*self.view[1]
self.up[2] -= dot*self.view[2]
self.up = vecnorm(self.up)
self.right = veccross(self.up,self.view)
# convert new view to ztheta,azphi
self.ztheta = acos(self.view[2])/pi * 180.0
if (self.ztheta == 0.0): self.azphi = 0.0
else: self.azphi = acos(self.view[0]/sin(pi*self.ztheta/180.0))/pi * 180.0
if self.view[1] < 0: self.azphi = 360.0 - self.azphi
self.setview()
self.w.tkRedraw()
# --------------------------------------------------------------------
def shift(self,x,y):
self.xshift = x;
self.yshift = y;
self.setview()
self.w.tkRedraw()
# --------------------------------------------------------------------
def zoom(self,scale):
self.scale = scale
self.setview()
self.w.tkRedraw()
# --------------------------------------------------------------------
# set view params needed by redraw
# input: center = center of box
# distance = size of scene (longest box length)
# scale = zoom factor (1.0 = no zoom)
# xshift,yshift = translation factor in pixels
# view = unit vector from center to viewpoint
# up = unit vector in up direction in scene
# right = unit vector in right direction in scene
# output: eye = distance to view scene from
# xto,yto,zto = point to look to
# xfrom,yfrom,zfrom = point to look from
def setview(self):
if not self.ready: return # no distance since no scene yet
self.eye = 3 * self.distance / self.scale
xfactor = 0.5*self.eye*self.xshift/self.xpixels
yfactor = 0.5*self.eye*self.yshift/self.ypixels
self.xto = self.center[0] - xfactor*self.right[0] - yfactor*self.up[0]
self.yto = self.center[1] - xfactor*self.right[1] - yfactor*self.up[1]
self.zto = self.center[2] - xfactor*self.right[2] - yfactor*self.up[2]
self.xfrom = self.xto + self.eye*self.view[0]
self.yfrom = self.yto + self.eye*self.view[1]
self.zfrom = self.zto + self.eye*self.view[2]
# --------------------------------------------------------------------
# box attributes, also used for triangle lines
def box(self,*args):
self.boxflag = args[0]
if len(args) > 1:
from vizinfo import colors
self.bxcol = [colors[args[1]][0]/255.0,colors[args[1]][1]/255.0,
colors[args[1]][2]/255.0]
if len(args) > 2: self.bxthick = args[2]
self.cachelist = -self.cachelist
self.w.tkRedraw()
# --------------------------------------------------------------------
# grab all selected snapshots from data object
# add GL-specific info to each bond
def reload(self):
print "Loading data into gl tool ..."
data = self.data
self.timeframes = []
self.boxframes = []
self.atomframes = []
self.bondframes = []
self.triframes = []
self.lineframes = []
box = []
if self.boxflag == 2: box = data.maxbox()
flag = 0
while 1:
which,time,flag = data.iterator(flag)
if flag == -1: break
time,boxone,atoms,bonds,tris,lines = data.viz(which)
if self.boxflag < 2: box = boxone
if bonds: self.bonds_augment(bonds)
self.timeframes.append(time)
self.boxframes.append(box)
self.atomframes.append(atoms)
self.bondframes.append(bonds)
self.triframes.append(tris)
self.lineframes.append(lines)
print time,
sys.stdout.flush()
print
self.nframes = len(self.timeframes)
self.distance = compute_distance(self.boxframes[0])
self.center = compute_center(self.boxframes[0])
self.ready = 1
self.setview()
# --------------------------------------------------------------------
def nolabel(self):
self.cachelist = -self.cachelist
self.labels = []
# --------------------------------------------------------------------
# show a single snapshot
# distance from snapshot box or max box for all selected steps
def show(self,ntime):
data = self.data
which = data.findtime(ntime)
time,box,atoms,bonds,tris,lines = data.viz(which)
if self.boxflag == 2: box = data.maxbox()
self.distance = compute_distance(box)
self.center = compute_center(box)
if bonds: self.bonds_augment(bonds)
self.boxdraw = box
self.atomdraw = atoms
self.bonddraw = bonds
self.tridraw = tris
self.linedraw = lines
self.ready = 1
self.setview()
self.cachelist = -self.cachelist
self.w.tkRedraw()
self.save()
# --------------------------------------------------------------------
def pan(self,*list):
if len(list) == 0: self.panflag = 0
else:
self.panflag = 1
self.ztheta_start = list[0]
self.azphi_start = list[1]
self.scale_start = list[2]
self.ztheta_stop = list[3]
self.azphi_stop = list[4]
self.scale_stop = list[5]
# --------------------------------------------------------------------
def all(self,*list):
data = self.data
if len(list) == 0:
nstart = 0
ncount = data.nselect
elif len(list) == 1:
nstart = list[0]
ncount = data.nselect
else:
ntime = list[0]
nstart = list[2]
ncount = list[1]
if self.boxflag == 2: box = data.maxbox()
# loop over all selected steps
# distance from 1st snapshot box or max box for all selected steps
# recompute box center on 1st step or if panning
if len(list) <= 1:
n = nstart
i = flag = 0
while 1:
which,time,flag = data.iterator(flag)
if flag == -1: break
fraction = float(i) / (ncount-1)
if self.select != "":
newstr = self.select % fraction
data.aselect.test(newstr,time)
time,boxone,atoms,bonds,tris,lines = data.viz(which)
if self.boxflag < 2: box = boxone
if n == nstart: self.distance = compute_distance(box)
if n < 10: file = self.file + "000" + str(n)
elif n < 100: file = self.file + "00" + str(n)
elif n < 1000: file = self.file + "0" + str(n)
else: file = self.file + str(n)
if self.panflag:
self.ztheta = self.ztheta_start + \
fraction*(self.ztheta_stop - self.ztheta_start)
self.azphi = self.azphi_start + \
fraction*(self.azphi_stop - self.azphi_start)
self.scale = self.scale_start + \
fraction*(self.scale_stop - self.scale_start)
self.viewupright()
if n == nstart or self.panflag: self.center = compute_center(box)
if bonds: self.bonds_augment(bonds)
self.boxdraw = box
self.atomdraw = atoms
self.bonddraw = bonds
self.tridraw = tris
self.linedraw = lines
self.ready = 1
self.setview()
self.cachelist = -self.cachelist
self.w.tkRedraw()
self.save(file)
print time,
sys.stdout.flush()
i += 1
n += 1
# loop ncount times on same step
# distance from 1st snapshot box or max box for all selected steps
# recompute box center on 1st step or if panning
else:
which = data.findtime(ntime)
n = nstart
for i in range(ncount):
fraction = float(i) / (ncount-1)
if self.select != "":
newstr = self.select % fraction
data.aselect.test(newstr,ntime)
time,boxone,atoms,bonds,tris,lines = data.viz(which)
if self.boxflag < 2: box = boxone
if n == nstart: self.distance = compute_distance(box)
if n < 10: file = self.file + "000" + str(n)
elif n < 100: file = self.file + "00" + str(n)
elif n < 1000: file = self.file + "0" + str(n)
else: file = self.file + str(n)
if self.panflag:
self.ztheta = self.ztheta_start + \
fraction*(self.ztheta_stop - self.ztheta_start)
self.azphi = self.azphi_start + \
fraction*(self.azphi_stop - self.azphi_start)
self.scale = self.scale_start + \
fraction*(self.scale_stop - self.scale_start)
self.viewupright()
if n == nstart or self.panflag: self.center = compute_center(box)
if bonds: self.bonds_augment(bonds)
self.boxdraw = box
self.atomdraw = atoms
self.bonddraw = bonds
self.tridraw = tris
self.linedraw = lines
self.ready = 1
self.setview()
self.cachelist = -self.cachelist
self.w.tkRedraw()
self.save(file)
print n,
sys.stdout.flush()
n += 1
print "\n%d images" % ncount
# --------------------------------------------------------------------
def display(self,index):
self.boxdraw = self.boxframes[index]
self.atomdraw = self.atomframes[index]
self.bonddraw = self.bondframes[index]
self.tridraw = self.triframes[index]
self.linedraw = self.lineframes[index]
self.ready = 1
self.cachelist = -self.cachelist
self.w.tkRedraw()
return (self.timeframes[index],len(self.atomdraw))
# --------------------------------------------------------------------
# draw the GL scene
def redraw(self,o):
# clear window to background color
glClearColor(self.bgcol[0],self.bgcol[1],self.bgcol[2],0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# not ready if no scene yet
if not self.ready: return
# set view from eye, distance, 3 lookat vectors (from,to,up)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
if self.orthoflag:
glOrtho(-0.25*self.eye,0.25*self.eye,-0.25*self.eye,0.25*self.eye,
self.eye-2*self.distance,self.eye+2*self.distance)
else:
gluPerspective(30.0,1.0,0.01,10000.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
gluLookAt(self.xfrom,self.yfrom,self.zfrom,self.xto,self.yto,self.zto,
self.up[0],self.up[1],self.up[2])
# draw scene from display list if caching allowed and list hasn't changed
# else redraw and store as new display list if caching allowed
if self.cache and self.cachelist > 0: glCallList(self.cachelist);
else:
if self.cache:
if self.cachelist < 0: glDeleteLists(-self.cachelist,1)
self.cachelist = glGenLists(1)
glNewList(self.cachelist,GL_COMPILE_AND_EXECUTE)
# draw box, clip-box, xyz axes, lines
glDisable(GL_LIGHTING)
if self.boxflag:
self.draw_box(0)
if self.clipflag: self.draw_box(1)
if self.axisflag: self.draw_axes()
ncolor = self.vizinfo.nlcolor
for line in self.linedraw:
itype = int(line[1])
if itype > ncolor: raise StandardError,"line type too big"
red,green,blue = self.vizinfo.lcolor[itype]
glColor3f(red,green,blue)
thick = self.vizinfo.lrad[itype]
glLineWidth(thick)
glBegin(GL_LINES)
glVertex3f(line[2],line[3],line[4])
glVertex3f(line[5],line[6],line[7])
glEnd()
glEnable(GL_LIGHTING)
# draw non-clipped scene = atoms, bonds, triangles
# draw atoms as collection of points
# cannot put PointSize inside glBegin
# so probably need to group atoms by type for best performance
# or just allow one radius
# need to scale radius appropriately with box size
# or could leave it at absolute value
# use POINT_SMOOTH to enable anti-aliasing and round points
# multiple timesteps via vcr::play() is still not fast
# caching makes it fast for single frame, but multiple frames is slow
# need to enable clipping
# if not self.clipflag:
# glDisable(GL_LIGHTING)
# glEnable(GL_POINT_SMOOTH)
# glPointSize(self.vizinfo.arad[int(self.atomdraw[0][1])])
# glBegin(GL_POINTS)
# for atom in self.atomdraw:
# red,green,blue = self.vizinfo.acolor[int(atom[1])]
# glColor(red,green,blue)
# glVertex3d(atom[2],atom[3],atom[4])
# glEnd()
# glEnable(GL_LIGHTING)
if not self.clipflag:
for atom in self.atomdraw:
glTranslatef(atom[2],atom[3],atom[4]);
glCallList(self.calllist[int(atom[1])]);
glTranslatef(-atom[2],-atom[3],-atom[4]);
if self.bonddraw:
bound = 0.25 * self.distance
ncolor = self.vizinfo.nbcolor
for bond in self.bonddraw:
if bond[10] > bound: continue
itype = int(bond[1])
if itype > ncolor: raise StandardError,"bond type too big"
red,green,blue = self.vizinfo.bcolor[itype]
rad = self.vizinfo.brad[itype]
glPushMatrix()
glTranslatef(bond[2],bond[3],bond[4])
glRotatef(bond[11],bond[12],bond[13],0.0)
glMaterialfv(GL_FRONT_AND_BACK,GL_EMISSION,[red,green,blue,1.0]);
glMaterialf(GL_FRONT_AND_BACK,GL_SHININESS,self.shiny);
obj = gluNewQuadric()
gluCylinder(obj,rad,rad,bond[10],self.nsides,self.nsides)
glPopMatrix()
if self.tridraw:
fillflag = self.vizinfo.tfill[int(self.tridraw[0][1])]
if fillflag != 1:
if fillflag:
glEnable(GL_POLYGON_OFFSET_FILL)
glPolygonOffset(1.0,1.0)
glBegin(GL_TRIANGLES)
ncolor = self.vizinfo.ntcolor
for tri in self.tridraw:
itype = int(tri[1])
if itype > ncolor: raise StandardError,"tri type too big"
red,green,blue = self.vizinfo.tcolor[itype]
glMaterialfv(GL_FRONT_AND_BACK,GL_EMISSION,[red,green,blue,1.0]);
glMaterialf(GL_FRONT_AND_BACK,GL_SHININESS,self.shiny);
glNormal3f(tri[11],tri[12],tri[13])
glVertex3f(tri[2],tri[3],tri[4])
glVertex3f(tri[5],tri[6],tri[7])
glVertex3f(tri[8],tri[9],tri[10])
glEnd()
if fillflag: glDisable(GL_POLYGON_OFFSET_FILL)
if fillflag:
glDisable(GL_LIGHTING)
glPolygonMode(GL_FRONT_AND_BACK,GL_LINE)
glLineWidth(self.bxthick)
glColor3f(self.bxcol[0],self.bxcol[1],self.bxcol[2])
glBegin(GL_TRIANGLES)
for tri in self.tridraw:
glVertex3f(tri[2],tri[3],tri[4])
glVertex3f(tri[5],tri[6],tri[7])
glVertex3f(tri[8],tri[9],tri[10])
glEnd()
glEnable(GL_LIGHTING)
glPolygonMode(GL_FRONT_AND_BACK,GL_FILL)
# draw clipped scene = atoms, bonds, triangles
else:
box = self.boxdraw
xlo = box[0] + self.clipxlo*(box[3] - box[0])
xhi = box[0] + self.clipxhi*(box[3] - box[0])
ylo = box[1] + self.clipylo*(box[4] - box[1])
yhi = box[1] + self.clipyhi*(box[4] - box[1])
zlo = box[2] + self.clipzlo*(box[5] - box[2])
zhi = box[2] + self.clipzhi*(box[5] - box[2])
for atom in self.atomdraw:
x,y,z = atom[2],atom[3],atom[4]
if x >= xlo and x <= xhi and y >= ylo and y <= yhi and \
z >= zlo and z <= zhi:
glTranslatef(x,y,z);
glCallList(self.calllist[int(atom[1])]);
glTranslatef(-x,-y,-z);
if self.bonddraw:
bound = 0.25 * self.distance
ncolor = self.vizinfo.nbcolor
for bond in self.bonddraw:
xmin = min2(bond[2],bond[5])
xmax = max2(bond[2],bond[5])
ymin = min2(bond[3],bond[6])
ymax = max2(bond[3],bond[6])
zmin = min2(bond[4],bond[7])
zmax = max2(bond[4],bond[7])
if xmin >= xlo and xmax <= xhi and \
ymin >= ylo and ymax <= yhi and zmin >= zlo and zmax <= zhi:
if bond[10] > bound: continue
itype = int(bond[1])
if itype > ncolor: raise StandardError,"bond type too big"
red,green,blue = self.vizinfo.bcolor[itype]
rad = self.vizinfo.brad[itype]
glPushMatrix()
glTranslatef(bond[2],bond[3],bond[4])
glRotatef(bond[11],bond[12],bond[13],0.0)
glMaterialfv(GL_FRONT_AND_BACK,GL_EMISSION,[red,green,blue,1.0]);
glMaterialf(GL_FRONT_AND_BACK,GL_SHININESS,self.shiny);
obj = gluNewQuadric()
gluCylinder(obj,rad,rad,bond[10],self.nsides,self.nsides)
glPopMatrix()
if self.tridraw:
fillflag = self.vizinfo.tfill[int(self.tridraw[0][1])]
if fillflag != 1:
if fillflag:
glEnable(GL_POLYGON_OFFSET_FILL)
glPolygonOffset(1.0,1.0)
glBegin(GL_TRIANGLES)
ncolor = self.vizinfo.ntcolor
for tri in self.tridraw:
xmin = min3(tri[2],tri[5],tri[8])
xmax = max3(tri[2],tri[5],tri[8])
ymin = min3(tri[3],tri[6],tri[9])
ymax = max3(tri[3],tri[6],tri[9])
zmin = min3(tri[4],tri[7],tri[10])
zmax = max3(tri[4],tri[7],tri[10])
if xmin >= xlo and xmax <= xhi and \
ymin >= ylo and ymax <= yhi and \
zmin >= zlo and zmax <= zhi:
itype = int(tri[1])
if itype > ncolor: raise StandardError,"tri type too big"
red,green,blue = self.vizinfo.tcolor[itype]
glMaterialfv(GL_FRONT_AND_BACK,GL_EMISSION,
[red,green,blue,1.0]);
glMaterialf(GL_FRONT_AND_BACK,GL_SHININESS,self.shiny);
glNormal3f(tri[11],tri[12],tri[13])
glVertex3f(tri[2],tri[3],tri[4])
glVertex3f(tri[5],tri[6],tri[7])
glVertex3f(tri[8],tri[9],tri[10])
glEnd()
if fillflag: glDisable(GL_POLYGON_OFFSET_FILL)
if fillflag:
glDisable(GL_LIGHTING)
glPolygonMode(GL_FRONT_AND_BACK,GL_LINE)
glLineWidth(self.bxthick)
glColor3f(self.bxcol[0],self.bxcol[1],self.bxcol[2])
glBegin(GL_TRIANGLES)
for tri in self.tridraw:
xmin = min3(tri[2],tri[5],tri[8])
xmax = max3(tri[2],tri[5],tri[8])
ymin = min3(tri[3],tri[6],tri[9])
ymax = max3(tri[3],tri[6],tri[9])
zmin = min3(tri[4],tri[7],tri[10])
zmax = max3(tri[4],tri[7],tri[10])
if xmin >= xlo and xmax <= xhi and \
ymin >= ylo and ymax <= yhi and \
zmin >= zlo and zmax <= zhi:
glVertex3f(tri[2],tri[3],tri[4])
glVertex3f(tri[5],tri[6],tri[7])
glVertex3f(tri[8],tri[9],tri[10])
glEnd()
glEnable(GL_LIGHTING)
glPolygonMode(GL_FRONT_AND_BACK,GL_FILL)
if self.cache: glEndList()
glFlush()
# --------------------------------------------------------------------
# make new call list for each atom type
# called when atom color/rad/quality is changed
def make_atom_calllist(self):
# extend calllist array if necessary
if self.vizinfo.nacolor > self.nclist:
for i in range(self.vizinfo.nacolor-self.nclist): self.calllist.append(0)
self.nclist = self.vizinfo.nacolor
# create new calllist for each atom type
for itype in xrange(1,self.vizinfo.nacolor+1):
if self.calllist[itype]: glDeleteLists(self.calllist[itype],1)
ilist = glGenLists(1)
self.calllist[itype] = ilist
glNewList(ilist,GL_COMPILE)
red,green,blue = self.vizinfo.acolor[itype]
rad = self.vizinfo.arad[itype]
glColor3f(red,green,blue);
# glPointSize(10.0*rad)
# glBegin(GL_POINTS)
# glVertex3f(0.0,0.0,0.0)
# glEnd()
glMaterialfv(GL_FRONT,GL_EMISSION,[red,green,blue,1.0]);
glMaterialf(GL_FRONT,GL_SHININESS,self.shiny);
glutSolidSphere(rad,self.nslices,self.nstacks)
glEndList()
# --------------------------------------------------------------------
# augment bond info returned by viz() with info needed for GL draw
# info = length, theta, -dy, dx for bond orientation
def bonds_augment(self,bonds):
for bond in bonds:
dx = bond[5] - bond[2]
dy = bond[6] - bond[3]
dz = bond[7] - bond[4]
length = sqrt(dx*dx + dy*dy + dz*dz)
dx /= length
dy /= length
dz /= length
theta = acos(dz)*180.0/pi
bond += [length,theta,-dy,dx]
# --------------------------------------------------------------------
def draw_box(self,flag):
xlo,ylo,zlo,xhi,yhi,zhi = self.boxdraw
if flag:
tmp = xlo + self.clipxlo*(xhi - xlo)
xhi = xlo + self.clipxhi*(xhi - xlo)
xlo = tmp
tmp = ylo + self.clipylo*(yhi - ylo)
yhi = ylo + self.clipyhi*(yhi - ylo)
ylo = tmp
tmp = zlo + self.clipzlo*(zhi - zlo)
zhi = zlo + self.clipzhi*(zhi - zlo)
zlo = tmp
glLineWidth(self.bxthick)
glColor3f(self.bxcol[0],self.bxcol[1],self.bxcol[2])
glBegin(GL_LINE_LOOP)
glVertex3f(xlo,ylo,zlo)
glVertex3f(xhi,ylo,zlo)
glVertex3f(xhi,yhi,zlo)
glVertex3f(xlo,yhi,zlo)
glEnd()
glBegin(GL_LINE_LOOP)
glVertex3f(xlo,ylo,zhi)
glVertex3f(xhi,ylo,zhi)
glVertex3f(xhi,yhi,zhi)
glVertex3f(xlo,yhi,zhi)
glEnd()
glBegin(GL_LINES)
glVertex3f(xlo,ylo,zlo)
glVertex3f(xlo,ylo,zhi)
glVertex3f(xhi,ylo,zlo)
glVertex3f(xhi,ylo,zhi)
glVertex3f(xhi,yhi,zlo)
glVertex3f(xhi,yhi,zhi)
glVertex3f(xlo,yhi,zlo)
glVertex3f(xlo,yhi,zhi)
glEnd()
# --------------------------------------------------------------------
def draw_axes(self):
xlo,ylo,zlo,xhi,yhi,zhi = self.boxdraw
delta = xhi-xlo
if yhi-ylo > delta: delta = yhi-ylo
if zhi-zlo > delta: delta = zhi-zlo
delta *= 0.1
glLineWidth(self.bxthick)
glBegin(GL_LINES)
glColor3f(1,0,0)
glVertex3f(xlo-delta,ylo-delta,zlo-delta)
glVertex3f(xhi-delta,ylo-delta,zlo-delta)
glColor3f(0,1,0)
glVertex3f(xlo-delta,ylo-delta,zlo-delta)
glVertex3f(xlo-delta,yhi-delta,zlo-delta)
glColor3f(0,0,1)
glVertex3f(xlo-delta,ylo-delta,zlo-delta)
glVertex3f(xlo-delta,ylo-delta,zhi-delta)
glEnd()
# --------------------------------------------------------------------
def save(self,file=None):
self.w.update() # force image on screen to be current before saving it
pstring = glReadPixels(0,0,self.xpixels,self.ypixels,
GL_RGBA,GL_UNSIGNED_BYTE)
snapshot = Image.fromstring("RGBA",(self.xpixels,self.ypixels),pstring)
snapshot = snapshot.transpose(Image.FLIP_TOP_BOTTOM)
if not file: file = self.file
snapshot.save(file + ".png")
# --------------------------------------------------------------------
def adef(self):
self.vizinfo.setcolors("atom",range(100),"loop")
self.vizinfo.setradii("atom",range(100),0.45)
self.make_atom_calllist()
self.cachelist = -self.cachelist
self.w.tkRedraw()
# --------------------------------------------------------------------
def bdef(self):
self.vizinfo.setcolors("bond",range(100),"loop")
self.vizinfo.setradii("bond",range(100),0.25)
self.cachelist = -self.cachelist
self.w.tkRedraw()
# --------------------------------------------------------------------
def tdef(self):
self.vizinfo.setcolors("tri",range(100),"loop")
self.vizinfo.setfills("tri",range(100),0)
self.cachelist = -self.cachelist
self.w.tkRedraw()
# --------------------------------------------------------------------
def ldef(self):
self.vizinfo.setcolors("line",range(100),"loop")
self.vizinfo.setradii("line",range(100),0.25)
self.cachelist = -self.cachelist
self.w.tkRedraw()
# --------------------------------------------------------------------
def acol(self,atypes,colors):
self.vizinfo.setcolors("atom",atypes,colors)
self.make_atom_calllist()
self.cachelist = -self.cachelist
self.w.tkRedraw()
# --------------------------------------------------------------------
def arad(self,atypes,radii):
self.vizinfo.setradii("atom",atypes,radii)
self.make_atom_calllist()
self.cachelist = -self.cachelist
self.w.tkRedraw()
# --------------------------------------------------------------------
def bcol(self,btypes,colors):
self.vizinfo.setcolors("bond",btypes,colors)
self.cachelist = -self.cachelist
self.w.tkRedraw()
# --------------------------------------------------------------------
def brad(self,btypes,radii):
self.vizinfo.setradii("bond",btypes,radii)
self.cachelist = -self.cachelist
self.w.tkRedraw()
# --------------------------------------------------------------------
def tcol(self,ttypes,colors):
self.vizinfo.setcolors("tri",ttypes,colors)
self.cachelist = -self.cachelist
self.w.tkRedraw()
# --------------------------------------------------------------------
def tfill(self,ttypes,flags):
self.vizinfo.setfills("tri",ttypes,flags)
self.cachelist = -self.cachelist
self.w.tkRedraw()
# --------------------------------------------------------------------
def lcol(self,ltypes,colors):
self.vizinfo.setcolors("line",ltypes,colors)
self.cachelist = -self.cachelist
self.w.tkRedraw()
# --------------------------------------------------------------------
def lrad(self,ltypes,radii):
self.vizinfo.setradii("line",ltypes,radii)
self.cachelist = -self.cachelist
self.w.tkRedraw()
# --------------------------------------------------------------------
# derived class from Togl's Opengl
# overwrite redraw, translate, rotate, scale methods
# latter 3 are mouse-motion methods
class MyOpengl(Opengl):
def __init__(self, master, cnf={}, **kw):
args = (self,master,cnf)
Opengl.__init__(*args,**kw)
Opengl.autospin_allowed = 0
# redraw Opengl scene
# call parent redraw() method
def tkRedraw(self,*dummy):
if not self.initialised: return
self.tk.call(self._w,'makecurrent')
self.redraw(self)
self.tk.call(self._w,'swapbuffers')
# left button translate
# access parent xshift/yshift and call parent trans() method
def tkTranslate(self,event):
dx = event.x - self.xmouse
dy = event.y - self.ymouse
x = self.parent.xshift + dx
y = self.parent.yshift - dy
self.parent.shift(x,y)
self.tkRedraw()
self.tkRecordMouse(event)
# middle button trackball
# call parent mouse_rotate() method
def tkRotate(self,event):
self.parent.mouse_rotate(event.x,event.y,self.xmouse,self.ymouse)
self.tkRedraw()
self.tkRecordMouse(event)
# right button zoom
# access parent scale and call parent zoom() method
def tkScale(self,event):
scale = 1 - 0.01 * (event.y - self.ymouse)
if scale < 0.001: scale = 0.001
elif scale > 1000: scale = 1000
scale *= self.parent.scale
self.parent.zoom(scale)
self.tkRedraw()
self.tkRecordMouse(event)
# --------------------------------------------------------------------
# draw a line segment
def segment(p1,p2):
glVertex3f(p1[0],p1[1],p1[2])
glVertex3f(p2[0],p2[1],p2[2])
# --------------------------------------------------------------------
# normalize a 3-vector to unit length
def vecnorm(v):
length = sqrt(v[0]*v[0] + v[1]*v[1] + v[2]*v[2])
return [v[0]/length,v[1]/length,v[2]/length]
# --------------------------------------------------------------------
# dot product of two 3-vectors
def vecdot(v1,v2):
return v1[0]*v2[0] + v1[1]*v2[1] + v1[2]*v2[2]
# --------------------------------------------------------------------
# cross product of two 3-vectors
def veccross(v1,v2):
v = [0,0,0]
v[0] = v1[1]*v2[2] - v1[2]*v2[1]
v[1] = v1[2]*v2[0] - v1[0]*v2[2]
v[2] = v1[0]*v2[1] - v1[1]*v2[0]
return v
# --------------------------------------------------------------------
# return characteristic distance of simulation domain = max dimension
def compute_distance(box):
distance = box[3]-box[0]
if box[4]-box[1] > distance: distance = box[4]-box[1]
if box[5]-box[2] > distance: distance = box[5]-box[2]
return distance
# --------------------------------------------------------------------
# return center of box as 3 vector
def compute_center(box):
c = [0,0,0]
c[0] = 0.5 * (box[0] + box[3])
c[1] = 0.5 * (box[1] + box[4])
c[2] = 0.5 * (box[2] + box[5])
return c
# --------------------------------------------------------------------
# return min of 2 values
def min2(a,b):
if b < a: a = b
return a
# --------------------------------------------------------------------
# return max of 2 values
def max2(a,b):
if b > a: a = b
return a
# --------------------------------------------------------------------
# return min of 3 values
def min3(a,b,c):
if b < a: a = b
if c < a: a = c
return a
# --------------------------------------------------------------------
# return max of 3 values
def max3(a,b,c):
if b > a: a = b
if c > a: a = c
return a
| 43,711 | 31.866165 | 79 | py |
LIGGGHTS-WITH-BONDS | LIGGGHTS-WITH-BONDS-master/doc/Scripts/correlate.py | #!/usr/bin/env python
"""
function:
parse the block of thermo data in a lammps logfile and perform auto- and
cross correlation of the specified column data. The total sum of the
correlation is also computed which can be converted to an integral by
multiplying by the timestep.
output:
standard output contains column data for the auto- & cross correlations
plus the total sum of each. Note, only the upper triangle of the
correlation matrix is computed.
usage:
correlate.py [-c col] <-c col2> <-s max_correlation_time> [logfile]
"""
import sys
import re
import array
# parse command line
maxCorrelationTime = 0
cols = array.array("I")
nCols = 0
args = sys.argv[1:]
index = 0
while index < len(args):
arg = args[index]
index += 1
if (arg == "-c"):
cols.append(int(args[index])-1)
nCols += 1
index += 1
elif (arg == "-s"):
maxCorrelationTime = int(args[index])
index += 1
else :
filename = arg
if (nCols < 1): raise RuntimeError, 'no data columns requested'
data = [array.array("d")]
for s in range(1,nCols) : data.append( array.array("d") )
# read data block from log file
start = False
input = open(filename)
nSamples = 0
pattern = re.compile('\d')
line = input.readline()
while line :
columns = line.split()
if (columns and pattern.match(columns[0])) :
for i in range(nCols):
data[i].append( float(columns[cols[i]]) )
nSamples += 1
start = True
else :
if (start) : break
line = input.readline()
print "# read :",nSamples," samples of ", nCols," data"
if( maxCorrelationTime < 1): maxCorrelationTime = int(nSamples/2);
# correlate and integrate
correlationPairs = []
for i in range(0,nCols):
for j in range(i,nCols): # note only upper triangle of the correlation matrix
correlationPairs.append([i,j])
header = "# "
for k in range(len(correlationPairs)):
i = str(correlationPairs[k][0]+1)
j = str(correlationPairs[k][1]+1)
header += " C"+i+j+" sum_C"+i+j
print header
nCorrelationPairs = len(correlationPairs)
sum = [0.0] * nCorrelationPairs
for s in range(maxCorrelationTime) :
correlation = [0.0] * nCorrelationPairs
nt = nSamples-s
for t in range(0,nt) :
for p in range(nCorrelationPairs):
i = correlationPairs[p][0]
j = correlationPairs[p][1]
correlation[p] += data[i][t]*data[j][s+t]
output = ""
for p in range(0,nCorrelationPairs):
correlation[p] /= nt
sum[p] += correlation[p]
output += str(correlation[p]) + " " + str(sum[p]) + " "
print output
| 2,537 | 27.2 | 79 | py |
verrou | verrou-master/generateBackendInterOperator.py | #!/usr/bin/env python3
# This file is part of Verrou, a FPU instrumentation tool.
# Copyright (C) 2014-2021 EDF
# F. Févotte <francois.fevotte@edf.fr>
# B. Lathuilière <bruno.lathuiliere@edf.fr>
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
# 02111-1307, USA.
# The GNU Lesser General Public License is contained in the file COPYING.
import sys
import re
def generateNargs(fileOut, fileNameTemplate, listOfBackend, listOfOp, nargs, post="", roundingTab=[None]):
templateStr=open(fileNameTemplate, "r").readlines()
FctNameRegExp=re.compile("(.*)FCTNAME\(([^,]*),([^)]*)\)(.*)")
BckNameRegExp=re.compile("(.*)BACKENDFUNC\(([^)]*)\)(.*)")
for backend in listOfBackend:
for op in listOfOp:
for rounding in roundingTab:
if nargs in [1,2]:
applyTemplate(fileOut, templateStr, FctNameRegExp, BckNameRegExp, backend,op, post, sign=None, rounding=rounding)
if nargs==3:
sign=""
if "msub" in op:
sign="-"
applyTemplate(fileOut, templateStr,FctNameRegExp,BckNameRegExp, backend, op, post, sign, rounding=rounding)
def applyTemplate(fileOut, templateStr, FctRegExp, BckRegExp, backend, op, post, sign=None, rounding=None):
fileOut.write("// generation of operation %s backend %s\n"%(op,backend))
backendFunc=backend
if rounding!=None:
backendFunc=backend+"_"+rounding
def fctName(typeVal,opt):
return "vr_"+backendFunc+post+op+typeVal+opt
def bckName(typeVal):
if sign!="-":
if rounding!=None:
return "interflop_"+backend+"_"+op+"_"+typeVal+"_"+rounding
return "interflop_"+backend+"_"+op+"_"+typeVal
else:
if rounding!=None:
return "interflop_"+backend+"_"+op.replace("sub","add")+"_"+typeVal+"_"+rounding
return "interflop_"+backend+"_"+op.replace("sub","add")+"_"+typeVal
def bckNamePost(typeVal):
if sign!="-":
return "interflop_"+post+"_"+op+"_"+typeVal
else:
return "interflop_"+post+"_"+op.replace("sub","add")+"_"+typeVal
contextName="backend_"+backend+"_context"
contextNamePost="backend_"+post+"_context"
for line in templateStr:
if "CONTEXT" in line:
line=line.replace("CONTEXT", contextName)
if "SIGN" in line:
if sign!=None:
line=line.replace("SIGN", sign)
else:
print("Generation failed")
sys.exit()
result=FctRegExp.match(line)
if result!=None:
res=result.group(1) + fctName(result.group(2), result.group(3)) + result.group(4)
fileOut.write(res+"\n")
continue
result=BckRegExp.match(line)
if result!=None:
res=result.group(1) + bckName(result.group(2)) + result.group(3)
fileOut.write(res+"\n")
if post!="":
res=result.group(1) + bckNamePost(result.group(2)) + result.group(3)
res=res.replace(contextName, contextNamePost)
fileOut.write(res+"\n")
continue
fileOut.write(line)
if __name__=="__main__":
fileNameOutput="vr_generated_from_templates.h"
fileOut=open(fileNameOutput,"w")
fileOut.write("//Generated by %s\n"%(str(sys.argv)[1:-1]))
roundingTab=["NEAREST", "UPWARD", "DOWNWARD", "FARTHEST", "ZERO", "AWAY_ZERO"]+[rnd + det for rnd in ["RANDOM", "AVERAGE", "PRANDOM"] for det in ["","_DET","_COMDET" ]]
roundingTab+=[rnd + det for rnd in ["RANDOM", "AVERAGE"] for det in ["_SCOMDET" ]]
roundingTab+=["SR_MONOTONIC"]
template1Args="vr_interp_operator_template_cast.h"
listOfOp1Args=["cast"]
generateNargs(fileOut,template1Args, ["verrou","mcaquad","checkdenorm"], listOfOp1Args, 1)
generateNargs(fileOut,template1Args, ["verrou"], listOfOp1Args, 1, post="check_float_max")
generateNargs(fileOut,template1Args, ["verrou"], listOfOp1Args, 1, roundingTab=roundingTab)
template1Args="vr_interp_operator_template_1args.h"
listOfOp1Args=["sqrt"]
fileOut.write("#ifdef USE_VERROU_SQRT\n")
generateNargs(fileOut,template1Args, ["verrou","checkdenorm"], listOfOp1Args, 1)
generateNargs(fileOut,template1Args, ["verrou"], listOfOp1Args, 1, post="check_float_max")
generateNargs(fileOut,template1Args, ["verrou"], listOfOp1Args, 1, roundingTab=roundingTab)
fileOut.write("#endif\n")
template2Args="vr_interp_operator_template_2args.h"
listOfOp2Args=["add","sub","mul","div"]
generateNargs(fileOut,template2Args, ["verrou","mcaquad","checkdenorm"], listOfOp2Args, 2)
generateNargs(fileOut,template2Args, ["verrou"], listOfOp2Args, 2, post="check_float_max")
generateNargs(fileOut,template2Args, ["verrou"], listOfOp2Args, 2, roundingTab=roundingTab)
listOfOp2Args=["add","sub"]
generateNargs(fileOut,template2Args, ["verrou","mcaquad","checkdenorm"], listOfOp2Args, 2, post="checkcancellation")
template3Args="vr_interp_operator_template_3args.h"
listOfOp3Args=["madd","msub"]
generateNargs(fileOut,template3Args, ["verrou","mcaquad","checkdenorm"], listOfOp3Args, 3)
generateNargs(fileOut,template3Args, ["verrou","mcaquad","checkdenorm"], listOfOp3Args, 3, post="checkcancellation")
generateNargs(fileOut,template3Args, ["verrou"], listOfOp3Args, 3, post="check_float_max")
generateNargs(fileOut,template3Args, ["verrou"], listOfOp3Args, 3, roundingTab=roundingTab)
fileOut.close()
| 6,229 | 40.258278 | 172 | py |
verrou | verrou-master/pyTools/DD_stoch.py |
# This file is part of Verrou, a FPU instrumentation tool.
# Copyright (C) 2014-2021 EDF
# F. Févotte <francois.fevotte@edf.fr>
# B. Lathuilière <bruno.lathuiliere@edf.fr>
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
# 02111-1307, USA.
# The GNU Lesser General Public License is contained in the file COPYING.
import sys
import os
import subprocess
import shutil
import hashlib
import copy
import glob
import datetime
import math
from valgrind import convNumLineTool
from valgrind import DD
def runCmdAsync(cmd, fname, envvars=None):
"""Run CMD, adding ENVVARS to the current environment, and redirecting standard
and error outputs to FNAME.out and FNAME.err respectively.
Returns CMD's exit code."""
if envvars is None:
envvars = {}
with open("%s.out"%fname, "w") as fout:
with open("%s.err"%fname, "w") as ferr:
env = copy.deepcopy(os.environ)
for var in envvars:
env[var] = envvars[var]
return subprocess.Popen(cmd, env=env, stdout=fout, stderr=ferr)
def getResult(subProcess):
subProcess.wait()
return subProcess.returncode
def runCmd(cmd, fname, envvars=None):
"""Run CMD, adding ENVVARS to the current environment, and redirecting standard
and error outputs to FNAME.out and FNAME.err respectively.
Returns CMD's exit code."""
return getResult(runCmdAsync(cmd,fname,envvars))
class verrouTask:
def __init__(self, dirname, refDir,runCmd, cmpCmd,nbRun, maxNbPROC, runEnv , verbose=True, seedTab=None):
self.dirname=dirname
self.refDir=refDir
self.runCmd=runCmd
self.cmpCmd=cmpCmd
self.nbRun=nbRun
self.FAIL=DD.DD.FAIL
self.PASS=DD.DD.PASS
self.subProcessRun={}
self.maxNbPROC= maxNbPROC
self.runEnv=runEnv
self.verbose=verbose
self.alreadyFail=False
self.pathToPrint=os.path.relpath(self.dirname, os.getcwd())
self.preRunLambda=None
self.postRunLambda=None
self.seedTab=seedTab
def setPostRun(self, postLambda):
self.postRunLambda=postLambda
def setPreRun(self, preLambda):
self.preRunLambda=preLambda
def printDir(self):
print(self.pathToPrint,end="")
def nameDir(self,i):
return os.path.join(self.dirname,"dd.run%i" % (i))
def mkdir(self,i):
os.mkdir(self.nameDir(i))
def rmdir(self,i):
shutil.rmtree(self.nameDir(i))
def runOneSample(self,i):
rundir= self.nameDir(i)
env={key:self.runEnv[key] for key in self.runEnv}
if self.seedTab!=None:
env["VERROU_SEED"]=str(self.seedTab[i])
if self.preRunLambda!=None:
self.preRunLambda(rundir, env)
self.subProcessRun[i]=runCmdAsync([self.runCmd, rundir],
os.path.join(rundir,"dd.run"),
env)
def cmpOneSample(self,i, assertRun=True):
rundir= self.nameDir(i)
if assertRun:
if self.subProcessRun[i]!=None:
getResult(self.subProcessRun[i])
if self.postRunLambda!=None:
self.postRunLambda(rundir)
if self.refDir==None: #if there are no reference provided cmp is ignored
return self.PASS
retval = runCmd([self.cmpCmd, self.refDir, rundir],
os.path.join(rundir,"dd.compare"))
with open(os.path.join(rundir, "dd.return.value"),"w") as f:
f.write(str(retval))
if retval != 0:
self.alreadyFail=True
# if self.verbose:
# print("FAIL(%d)" % i)
return self.FAIL
else:
# if self.alreadyFail:
# print("PASS(%d)" % i)
return self.PASS
def sampleToCompute(self, nbRun, earlyExit):
"""Return the two lists of samples which have to be compared or computed (and compared) to perforn nbRun Success run : None means Failure ([],[]) means Success """
listOfDirString=[runDir for runDir in os.listdir(self.dirname) if runDir.startswith("dd.run")]
listOfDirIndex=[ int(x.replace("dd.run","")) for x in listOfDirString ]
cmpDone=[]
runDone=[]
workToCmpOnly=[]
failureIndex=[]
for runDir in listOfDirString:
returnValuePath=os.path.join(self.dirname, runDir, "dd.return.value")
ddRunIndex=int(runDir.replace("dd.run",""))
if os.path.exists(returnValuePath):
statusCmp=int((open(returnValuePath).readline()))
if statusCmp!=0:
if earlyExit:
return None
else:
failureIndex+=[ddRunIndex]
cmpDone+=[ddRunIndex]
else:
runPath=os.path.join(self.dirname, runDir, "dd.run.out")
if os.path.exists(runPath):
runDone+=[ddRunIndex]
workToRun= [x for x in range(nbRun) if (((not x in runDone+cmpDone) and (x in listOfDirIndex )) or (not (x in listOfDirIndex))) ]
return (runDone, workToRun, cmpDone, failureIndex)
def getEstimatedFailProbability(self):
"""Return an estimated probablity of fail for the configuration"""
listOfDirString=[runDir for runDir in os.listdir(self.dirname) if runDir.startswith("dd.run")]
listOfDirIndex=[ int(x.replace("dd.run","")) for x in listOfDirString ]
cacheCounter=0.
cacheFail=0.
for runDir in listOfDirString:
returnValuePath=os.path.join(self.dirname, runDir, "dd.return.value")
if os.path.exists(returnValuePath):
cacheCounter+=1.
statusCmp=int((open(returnValuePath).readline()))
if statusCmp!=0:
cacheFail+=1.
return cacheFail / cacheCounter
def run(self, earlyExit=True):
if self.verbose:
self.printDir()
workToDo=self.sampleToCompute(self.nbRun, earlyExit)
if workToDo==None:
print(" --(cache) -> FAIL")
return self.FAIL
cmpOnlyToDo=workToDo[0]
runToDo=workToDo[1]
cmpDone=workToDo[2]
failureIndex=workToDo[3]
if len(cmpOnlyToDo)==0 and len(runToDo)==0:
if(len(failureIndex)==0):
print(" --(cache) -> PASS("+str(self.nbRun)+")")
return self.PASS
else:
print(" --(cache) -> FAIL(%s)"%((str(failureIndex)[1:-1]).replace(" ","")))
return self.FAIL
if len(cmpOnlyToDo)!=0:
print(" --( cmp ) -> ",end="",flush=True)
returnVal=self.cmpSeq(cmpOnlyToDo, earlyExit)
if returnVal==self.FAIL:
if earlyExit:
print("FAIL", end="\n",flush=True)
return self.FAIL
else:
print("FAIL", end="",flush= True)
else:
print("PASS(+" + str(len(cmpOnlyToDo))+"->"+str(len(cmpDone) +len(cmpOnlyToDo))+")" , end="", flush=True)
if len(runToDo)!=0:
if self.maxNbPROC==None:
returnVal=self.runSeq(runToDo, earlyExit, self.verbose)
else:
returnVal=self.runPar(runToDo)
if(returnVal==self.PASS):
print("PASS(+" + str(len(runToDo))+"->"+str( len(cmpOnlyToDo) +len(cmpDone) +len(runToDo) )+")" )
return returnVal
else:
print("")
return self.PASS
def cmpSeq(self,workToDo, earlyExit):
res=self.PASS
for run in workToDo:
retVal=self.cmpOneSample(run,assertRun=False)
if retVal=="FAIL":
res=self.FAIL
if earlyExit:
return res
return res
def runSeq(self,workToDo, earlyExit,printStatus=False):
if printStatus:
print(" --( run ) -> ",end="",flush=True)
res=self.PASS
for run in workToDo:
if not os.path.exists(self.nameDir(run)):
self.mkdir(run)
else:
print("Manual cache modification detected (runSeq)")
if self.alreadyFail:
if printStatus:
print(" "*len(self.pathToPrint)+" --( run ) -> ", end="", flush=True)
self.runOneSample(run)
retVal=self.cmpOneSample(run)
if retVal=="FAIL":
res=self.FAIL
if earlyExit:
if printStatus:
print("FAIL(%i)"%(run))
return res
else:
if printStatus:
print("FAIL(%i)"%(run))
self.alreadyFail=True
return res
def runPar(self,workToDo):
print(" --(/run ) -> ",end="",flush=True)
import concurrent.futures
with concurrent.futures.ThreadPoolExecutor(max_workers=self.maxNbPROC) as executor:
futures=[executor.submit(self.runSeq, [work],False, False) for work in workToDo]
concurrent.futures.wait(futures)
results=[futur.result() for futur in futures]
if self.FAIL in results:
indices=[i for i in range(len(futures)) if futures[i].result()==self.FAIL]
failIndices=[workToDo[indice] for indice in indices ]
print("FAIL(%s)"%((str(failIndices)[1:-1])).replace(" ",""))
return self.FAIL
return self.PASS
def md5Name(deltas):
copyDeltas=copy.copy(deltas)
copyDeltas.sort()
return hashlib.md5(("".join(copyDeltas)).encode('utf-8')).hexdigest()
def prepareOutput(dirname):
shutil.rmtree(dirname, ignore_errors=True)
os.makedirs(dirname)
def failure():
sys.exit(42)
class DDStoch(DD.DD):
def __init__(self, config, prefix,
selectBlocAndNumLine=lambda x: (x,0), joinBlocAndNumLine= lambda x,y: x ):
DD.DD.__init__(self)
self.config_=config
if not self.config_.get_quiet():
print("delta debug options :")
print(self.config_.optionToStr())
self.run_ = self.config_.get_runScript()
self.compare_ = self.config_.get_cmpScript()
self.cache_outcomes = False # the cache of DD.DD is ignored
self.index=0
self.prefix_ = os.path.join(os.getcwd(),prefix)
self.relPrefix_=prefix
self.ref_ = os.path.join(self.prefix_, "ref")
self.prepareCache()
prepareOutput(self.ref_)
self.reference() #generate the reference computation
self.mergeList() #generate the search space
self.rddminHeuristicLoadRep(selectBlocAndNumLine, joinBlocAndNumLine) # at the end because need the search space
def symlink(self,src, dst):
"""Create a relative symlink"""
if os.path.lexists(dst):
os.remove(dst)
relSrc=os.path.relpath(src, self.prefix_ )
relDist=os.path.relpath(dst, self.prefix_)
relPrefix=os.path.relpath(self.prefix_, os.getcwd())
os.symlink(relSrc, os.path.join(relPrefix, relDist))
def cleanSymLink(self):
"""Delete all symlink in the cache"""
self.saveCleabSymLink=[]
symLinkTab=self.searchSymLink()
for symLink in symLinkTab:
if os.path.lexists(symLink):
os.remove(symLink)
if self.config_.get_cache=="continue":
self.saveCleanSymLink+=[symLink]
def searchSymLink(self):
"""Return the list of symlink (created by DD_stoch) in the cache"""
res =glob.glob(os.path.join(self.prefix_, "ddmin*"))
res+=glob.glob(os.path.join(self.prefix_, "ddmax"))
res+=glob.glob(os.path.join(self.prefix_, "rddmin-cmp"))
res+=glob.glob(os.path.join(self.prefix_, "FullPerturbation"))
res+=glob.glob(os.path.join(self.prefix_, "NoPerturbation"))
return res
def rddminHeuristicLoadRep(self, selectBlocAndNumLine, joinBlocAndNumLine):
""" Load the results of previous ddmin. Need to be called after prepareCache"""
self.useRddminHeuristic=False
if self.config_.get_rddminHeuristicsCache() !="none" or len(self.config_.get_rddminHeuristicsRep_Tab())!=0:
self.useRddminHeuristic=True
if self.useRddminHeuristic==False:
return
rddmin_heuristic_rep=[]
if "cache" in self.config_.get_rddminHeuristicsCache():
if self.config_.get_cache()=="rename":
if self.oldCacheName!=None:
cacheRep=self.oldCacheName
if os.path.isdir(cacheRep):
rddmin_heuristic_rep+=[cacheRep]
else:
cacheRep=self.prefix_
if os.path.isdir(cacheRep):
rddmin_heuristic_rep+=[cacheRep]
if self.config_.get_rddminHeuristicsCache()=="all_cache":
rddmin_heuristic_rep+=glob.glob(self.prefix_+"-*-*-*_*h*m*s")
for rep in self.config_.get_rddminHeuristicsRep_Tab():
if rep not in rddmin_heuristic_rep:
rddmin_heuristic_rep+=[rep]
self.ddminHeuristic=[]
if self.config_.get_cache=="continue":
self.ddminHeuristic+=[ self.loadDeltaFile(rep) for rep in self.saveCleanSymLink if "ddmin" in rep]
if self.config_.get_rddminHeuristicsLineConv():
for rep in rddmin_heuristic_rep:
deltaOld=self.loadDeltaFile(os.path.join(rep,"ref"), True)
if deltaOld==None:
continue
cvTool=convNumLineTool.convNumLineTool(deltaOld, self.getDelta0(), selectBlocAndNumLine, joinBlocAndNumLine)
repTab=glob.glob(os.path.join(rep, "ddmin*"))
for repDDmin in repTab:
deltas=self.loadDeltaFile(repDDmin)
if deltas==None:
continue
deltasNew=[]
for delta in deltas:
deltasNew+= cvTool.getNewLines(delta)
self.ddminHeuristic+=[deltasNew]
else:
for rep in rddmin_heuristic_rep:
repTab=glob.glob(os.path.join(rep, "ddmin*"))
for repDDmin in repTab:
deltas=self.loadDeltaFile(repDDmin)
if deltas==None:
continue
self.ddminHeuristic+=[deltas]
def loadDeltaFile(self,rep, ref=False):
fileName=os.path.join(rep, self.getDeltaFileName()+".include")
if ref:
fileName=os.path.join(rep, self.getDeltaFileName())
if os.path.exists(fileName):
deltasTab=[ x.rstrip() for x in (open(fileName)).readlines()]
return deltasTab
else:
print(fileName + " do not exist")
return None
def prepareCache(self):
cache=self.config_.get_cache()
if cache=="continue":
if not os.path.exists(self.prefix_):
os.mkdir(self.prefix_)
self.cleanSymLink()
return
if cache=="clean":
shutil.rmtree(self.prefix_, ignore_errors=True)
os.mkdir(self.prefix_)
return
if cache=="rename_keep_result":
#delete unusefull rep : rename treated later
symLinkTab=self.searchSymLink()
repToKeep=[os.readlink(x) for x in symLinkTab]
print(repToKeep)
for item in os.listdir(self.prefix_):
if len(item)==32 and all(i in ['a', 'b', 'c', 'd', 'e', 'f']+[str(x) for x in range(10)] for i in item) :
if not item in repToKeep:
shutil.rmtree(os.path.join(self.prefix_, item))
if cache.startswith("rename"):
if os.path.exists(self.prefix_):
symLinkTab=self.searchSymLink()
if symLinkTab==[]: #find alternative file to get time stamp
refPath=os.path.join(self.prefix_, "ref")
if os.path.exists(refPath):
symLinkTab=[refPath]
else:
symLinkTab=[self.prefix_]
timeStr=datetime.datetime.fromtimestamp(max([os.path.getmtime(x) for x in symLinkTab])).strftime("%m-%d-%Y_%Hh%Mm%Ss")
self.oldCacheName=self.prefix_+"-"+timeStr
os.rename(self.prefix_,self.oldCacheName )
else:
self.oldCacheName=None
os.mkdir(self.prefix_)
if cache=="keep_run":
if not os.path.exists(self.prefix_):
os.mkdir(self.prefix_)
else:
self.cleanSymLink()
filesToDelete =glob.glob(os.path.join(self.prefix_, "*/dd.run[0-9]*/dd.compare.*"))
filesToDelete +=glob.glob(os.path.join(self.prefix_, "*/dd.run[0-9]*/dd.return.value"))
for fileToDelete in filesToDelete:
os.remove(fileToDelete)
def reference(self):
"""Run the reference and check the result"""
print(os.path.relpath(self.ref_, os.getcwd()),end="")
print(" -- (run) -> ",end="",flush=True)
retval = runCmd([self.run_, self.ref_],
os.path.join(self.ref_,"dd"),
self.referenceRunEnv())
if retval!=0:
print("")
self.referenceRunFailure()
else:
"""Check the comparison between the reference and refrence is valid"""
retval = runCmd([self.compare_,self.ref_, self.ref_],
os.path.join(self.ref_,"checkRef"))
if retval != 0:
print("FAIL")
self.referenceFailsFailure()
else:
print("PASS")
def mergeList(self):
"""merge the file name.$PID into a uniq file called name """
dirname=self.ref_
name=self.getDeltaFileName()
listOfExcludeFile=[ x for x in os.listdir(dirname) if self.isFileValidToMerge(x) ]
if len(listOfExcludeFile)<1:
self.searchSpaceGenerationFailure()
# with open(os.path.join(dirname,listOfExcludeFile[0]), "r") as f:
# excludeMerged=f.readlines()
# for excludeFile in listOfExcludeFile[1:]:
excludeMerged=[]
for excludeFile in listOfExcludeFile:
with open(os.path.join(dirname,excludeFile), "r") as f:
for line in f.readlines():
rsline=line.rstrip()
if rsline not in excludeMerged:
excludeMerged+=[rsline]
with open(os.path.join(dirname, name), "w" )as f:
for line in excludeMerged:
f.write(line+"\n")
def testWithLink(self, deltas, linkname, earlyExit=True):
testResult=self._test(deltas, self.config_.get_nbRUN() , earlyExit)
dirname = os.path.join(self.prefix_, md5Name(deltas))
self.symlink(dirname, os.path.join(self.prefix_,linkname))
return testResult
def report_progress(self, c, title):
if not self.config_.get_quiet:
super().report_progress(c,title)
def configuration_found(self, kind_str, delta_config,verbose=True):
if verbose:
print("%s (%s):"%(kind_str,self.coerce(delta_config)))
earlyExit=True
if self.config_.resWithAllSamples:
earlyExit=False
self.testWithLink(delta_config, kind_str, earlyExit)
def run(self, deltas=None):
# get the search space
if deltas==None:
deltas=self.getDelta0()
if(len(deltas)==0):
emptySearchSpaceFailure()
#basic verification
testResult=self._test(deltas)
self.configuration_found("FullPerturbation",deltas)
if testResult!=self.FAIL:
self.fullPerturbationSucceedsFailure()
testResult=self._test([])
self.configuration_found("NoPerturbation",[])
if testResult!=self.PASS:
self.noPerturbationFailsFailure()
#select the right variant of algo and apply it
algo=self.config_.get_ddAlgo()
resConf=None
def rddminAlgo(localDeltas):
if algo=="rddmin":
localConf = self.RDDMin(localDeltas, self.config_.get_nbRUN())
if algo.startswith("srddmin"):
localConf= self.SRDDMin(localDeltas, self.config_.get_rddMinTab())
if algo.startswith("drddmin"):
localConf = self.DRDDMin(localDeltas,
self.config_.get_rddMinTab(),
self.config_.get_splitTab(),
self.config_.get_splitGranularity())
return localConf
if self.useRddminHeuristic and "rddmin" in algo:
resConf=self.applyRddminWithHeuristics(deltas,rddminAlgo)
else:
resConf=rddminAlgo(deltas)
if algo=="ddmax":
resConf= self.DDMax(deltas)
else:
if resConf!=None:
flatRes=[c for conf in resConf for c in conf]
cmp= [delta for delta in deltas if delta not in flatRes ]
self.configuration_found("rddmin-cmp", cmp)
return resConf
def applyRddminWithHeuristics(self,deltas, algo):
"""Test the previous ddmin configuration (previous run of DD_stoch) as a filter to rddmin algo"""
res=[]
for heuristicsDelta in self.ddminHeuristic:
if all(x in deltas for x in heuristicsDelta): #check inclusion
testResult=self._test(heuristicsDelta, self.config_.get_nbRUN())
if testResult!=self.FAIL:
if not self.config_.get_quiet():
print("Bad rddmin heuristic : %s"%self.coerce(heuristicsDelta))
else:
if not self.config_.get_quiet():
print("Good rddmin heuristics : %s"%self.coerce(heuristicsDelta))
if len(heuristicsDelta)==1:
res+=[heuristicsDelta]
self.configuration_found("ddmin%d"%(self.index), heuristicsDelta)
self.index+=1
deltas=[delta for delta in deltas if delta not in heuristicsDelta]
else:
resTab= self.check1Min(heuristicsDelta, self.config_.get_nbRUN())
for resMin in resTab:
res+=[resMin] #add to res
deltas=[delta for delta in deltas if delta not in resMin] #reduce search space
print("Heuristics applied")
#after the heuristic filter a classic (s)rddmin is applied
testResult=self._test(deltas, self.config_.get_nbRUN())
if testResult!=self.FAIL:
return res
else:
return res+algo(deltas)
def DDMax(self, deltas):
res=self.verrou_dd_max(deltas)
cmp=[delta for delta in deltas if delta not in res]
self.configuration_found("ddmax", cmp)
self.configuration_found("ddmax-cmp", res)
return cmp
def RDDMin(self, deltas,nbRun):
ddminTab=[]
testResult=self._test(deltas)
if testResult!=self.FAIL:
self.internalError("RDDMIN", md5Name(deltas)+" should fail")
while testResult==self.FAIL:
conf = self.verrou_dd_min(deltas,nbRun)
ddminTab += [conf]
self.configuration_found("ddmin%d"%(self.index), conf)
#print("ddmin%d (%s):"%(self.index,self.coerce(conf)))
#update deltas
deltas=[delta for delta in deltas if delta not in conf]
testResult=self._test(deltas,nbRun)
self.index+=1
return ddminTab
def check1Min(self, deltas,nbRun):
ddminTab=[]
testResult=self._test(deltas)
if testResult!=self.FAIL:
self.internalError("Check1-MIN", md5Name(deltas)+" should fail")
for deltaMin1 in deltas:
newDelta=[delta for delta in deltas if delta!=deltaMin1]
testResult=self._test(newDelta)
if testResult==self.FAIL:
if not self.config_.get_quiet():
print("Heuristics not 1-Minimal")
return self.RDDMin(deltas, nbRun)
self.configuration_found("ddmin%d"%(self.index), deltas)
self.index+=1
return [deltas]
def splitDeltas(self, deltas,nbRun,granularity):
nbProc=self.config_.get_maxNbPROC()
if nbProc in [None,1]:
return self.splitDeltasSeq(deltas, nbRun, granularity)
return self.splitDeltasPar(deltas, nbRun, granularity,nbProc)
def splitDeltasPar(self, deltas,nbRun,granularity, nbProc):
if self._test(deltas, self.config_.get_nbRUN())==self.PASS:
return [] #short exit
res=[] #result : set of smallest (each subset with repect with granularity lead to success)
toTreat=[deltas]
#name for progression
algo_name="splitDeltasPara"
nbPara=math.ceil( nbProc/granularity)
while len(toTreat)>0:
toTreatNow=toTreat[0:nbPara]
toTreatLater=toTreat[nbPara:]
ciTab=[self.split(candidat, min(granularity, len(candidat))) for candidat in toTreatNow]
flatciTab=sum(ciTab,[])
flatResTab=self._testTab(flatciTab, [nbRun]* len(flatciTab))
resTab=[]
lBegin=0
for i in range(len(ciTab)): #unflat flatRes
lEnd=lBegin+len(ciTab[i])
resTab+=[flatResTab[lBegin: lEnd]]
lBegin=lEnd
remainToTreat=[]
for i in range(len(ciTab)):
ci=ciTab[i]
splitFailed=False
for j in range(len(ci)):
conf=ci[j]
if resTab[i][j]==self.FAIL:
splitFailed=True
if len(conf)==1:
self.configuration_found("ddmin%d"%(self.index), conf)
self.index+=1
res.append(conf)
else:
remainToTreat+=[conf]
if not splitFailed:
res+=[toTreatNow[i]]
toTreat=remainToTreat+toTreatLater
return res
def splitDeltasSeq(self, deltas,nbRun,granularity):
if self._test(deltas, self.config_.get_nbRUN())==self.PASS:
return [] #short exit
res=[] #result : set of smallest (each subset with repect with granularity lead to success)
#two lists which contain tasks
# -the fail status is known
toTreatFailed=[deltas]
# -the status is no not known
toTreatUnknown=[]
#name for progression
algo_name="splitDeltas"
def treatFailedCandidat(candidat):
#treat a failing configuration
self.report_progress(candidat, algo_name)
# create subset
cutSize=min(granularity, len(candidat))
ciTab=self.split(candidat, cutSize)
cutAbleStatus=False
for i in range(len(ciTab)):
ci=ciTab[i]
#test each subset
status=self._test(ci ,nbRun)
if status==self.FAIL:
if len(ci)==1:
#if the subset size is one the subset is a valid ddmin : treat as such
self.configuration_found("ddmin%d"%(self.index), ci)
#print("ddmin%d (%s):"%(self.index,self.coerce(ci)))
self.index+=1
res.append(ci)
else:
#insert the subset in the begin of the failed task list
toTreatFailed.insert(0,ci)
#insert the remaining subsets to the unknown task list
tail= ciTab[i+1:]
tail.reverse() # to keep the same order
for cip in tail:
toTreatUnknown.insert(0,cip)
return
cutAbleStatus=True
#the failing configuration is failing
if cutAbleStatus==False:
res.append(candidat)
def treatUnknownStatusCandidat(candidat):
#test the configuration : do nothing in case of success and add to the failed task list in case of success
self.report_progress(candidat, algo_name+ "(unknownStatus)")
status=self._test(candidat, nbRun)
if status==self.FAIL:
toTreatFailed.insert(0,candidat)
else:
pass
# loop over tasks
while len(toTreatFailed)!=0 or len(toTreatUnknown)!=0:
unknownStatusSize=len(deltas) #to get a max
if len(toTreatUnknown)!=0:
unknownStatusSize=len(toTreatUnknown[0])
if len(toTreatFailed)==0:
treatUnknownStatusCandidat(toTreatUnknown[0])
toTreatUnknown=toTreatUnknown[1:]
continue
#select the smallest candidat : in case of equality select a fail
toTreatCandidat=toTreatFailed[0]
if len(toTreatCandidat) <= unknownStatusSize:
cutCandidat=toTreatCandidat
toTreatFailed=toTreatFailed[1:]
treatFailedCandidat(cutCandidat)
else:
treatUnknownStatusCandidat(toTreatUnknown[0])
toTreatUnknown=toTreatUnknown[1:]
return res
def SsplitDeltas(self, deltas, runTab, granularity):#runTab=splitTab ,granularity=2):
#apply splitDeltas recussivly with increasing sample number (runTab)
#remarks the remain treatment do not respect the binary split structure
#name for progression
algo_name="ssplitDelta"
currentSplit=[deltas]
for run in runTab:
nextCurrent=[]
for candidat in currentSplit:
if len(candidat)==1:
nextCurrent.append(candidat)
continue
self.report_progress(candidat,algo_name)
res=self.splitDeltas(candidat,run, granularity)
nextCurrent.extend(res)
#the remainDeltas in recomputed from the wall list (indeed the set can increase with the apply )
flatNextCurrent=[flatItem for nextCurrentItem in nextCurrent for flatItem in nextCurrentItem]
remainDeltas=[delta for delta in deltas if delta not in flatNextCurrent ]
#apply split to remainDeltas
self.report_progress(remainDeltas,algo_name)
nextCurrent.extend(self.splitDeltas(remainDeltas, run, granularity))
currentSplit=nextCurrent
return currentSplit
def DRDDMin(self, deltas, SrunTab, dicRunTab, granularity):#SrunTab=rddMinTab, dicRunTab=splitTab, granularity=2):
#name for progression
algo_name="DRDDMin"
#assert with the right nbRun number
nbRun=SrunTab[-1]
testResult=self._test(deltas,nbRun)
if testResult!=self.FAIL:
self.internalError("DRDDMIN", md5Name(deltas)+" should fail")
#apply dichotomy
candidats=self.SsplitDeltas(deltas,dicRunTab, granularity)
print("Dichotomy split done: " + str([len(candidat) for candidat in candidats if len(candidat)!=1] ))
res=[]
for candidat in candidats:
if len(candidat)==1: #is a valid ddmin
res+=[candidat]
deltas=[delta for delta in deltas if delta not in candidat]
else:
self.report_progress(candidat, algo_name)
#we do not known id candidat is a valid ddmin (in case of sparse pattern)
resTab=self.SRDDMin(candidat,SrunTab)
for resMin in resTab:
res+=[resMin] #add to res
deltas=[delta for delta in deltas if delta not in resMin] #reduce search space
print("Dichotomy split analyze done")
#after the split filter a classic (s)rddmin is applied
testResult=self._test(deltas,nbRun)
if testResult!=self.FAIL:
return res
else:
return res+self.SRDDMin(deltas, SrunTab)
def SRDDMin(self, deltas,runTab):#runTab=rddMinTab):
#name for progression
algo_name="SRDDMin"
#assert with the right nbRun number
nbRun=runTab[-1]
testResult=self._test(deltas,nbRun)
if testResult!=self.FAIL:
self.internalError("SRDDMIN", md5Name(deltas)+" should fail")
ddminTab=[]
nbMin=self._getSampleNumberToExpectFail(deltas)
filteredRunTab=[x for x in runTab if x>=nbMin]
if len(filteredRunTab)==0:
filteredRunTab=[nbRun]
#increasing number of run
for run in filteredRunTab:
testResult=self._test(deltas,run)
#rddmin loop
while testResult==self.FAIL:
self.report_progress(deltas, algo_name)
conf = self.verrou_dd_min(deltas,run)
if len(conf)!=1:
#may be not minimal due to number of run)
for runIncValue in [x for x in runTab if x>run ]:
conf = self.verrou_dd_min(conf,runIncValue)
if len(conf)==1:
break
ddminTab += [conf]
self.configuration_found("ddmin%d"%(self.index), conf)
#print("ddmin%d (%s):"%(self.index,self.coerce(conf)))
self.index+=1
#update search space
deltas=[delta for delta in deltas if delta not in conf]
#end test loop of rddmin
testResult=self._test(deltas,nbRun)
return ddminTab
#Error Msg
def emptySearchSpaceFailure(self):
print("FAILURE : delta-debug search space is empty")
failure()
def searchSpaceGenerationFailure(self):
print("The generation of exclusion/source files failed")
failure()
def fullPerturbationSucceedsFailure(self):
print("FAILURE: nothing to debug (the run with all symbols activated succeed)")
print("Suggestions:")
print("\t1) check the correctness of the %s script : the failure criteria may be too large"%self.compare_)
print("\t2) check if the number of samples (option --nruns=) is sufficient ")
print("Directory to analyze: FullPerturbation")
failure()
def noPerturbationFailsFailure(self):
print("FAILURE: the comparison between the reference (code instrumented with nearest mode) andthe code without instrumentation failed")
print("Suggestions:")
print("\t1) check if reproducibilty discrepancies are larger than the failure criteria of the script %s"%self.compare_)
print("\t2) check the libm library is not instrumented")
print("Directory to analyze: NoPerturbation")
failure()
def referenceFailsFailure(self):
print("FAILURE: the reference is not valid ")
print("Suggestions:")
print("\t1) check the correctness of the %s script"%self.compare_)
print("Files to analyze:")
print("\t run output: " + os.path.join(self.ref_,"dd.out") + " " + os.path.join(self.ref_,"dd.err"))
print("\t cmp output: " + os.path.join(self.ref_,"checkRef.out") + " "+ os.path.join(self.ref_,"checkRef.err"))
failure()
def referenceRunFailure(self):
print("FAILURE: the reference run fails ")
print("Suggestions:")
print("\t1) check the correctness of the %s script"%self.run_)
print("Files to analyze:")
print("\t run output: " + os.path.join(self.ref_,"dd.out") + " " + os.path.join(self.ref_,"dd.err"))
failure()
def getDelta0(self):
return self.loadDeltaFile(self.ref_, True)
# with open(os.path.join(self.ref_ ,self.getDeltaFileName()), "r") as f:
# return f.readlines()
def genExcludeIncludeFile(self, dirname, deltas, include=False, exclude=False):
"""Generate the *.exclude and *.include file in dirname rep from deltas"""
excludes=self.getDelta0()
dd=self.getDeltaFileName()
if include:
with open(os.path.join(dirname,dd+".include"), "w") as f:
for d in deltas:
f.write(d+"\n")
if exclude:
with open(os.path.join(dirname,dd+".exclude"), "w") as f:
for d in deltas:
excludes.remove(d)
for line in excludes:
f.write(line+"\n")
def _test(self, deltas,nbRun=None, earlyExit=True):
if nbRun==None:
nbRun=self.config_.get_nbRUN()
# return self._testTab([deltas],[nbRun])[0]
dirname=os.path.join(self.prefix_, md5Name(deltas))
if not os.path.exists(dirname):
os.makedirs(dirname)
self.genExcludeIncludeFile(dirname, deltas, include=True, exclude=True)
vT=verrouTask(dirname, self.ref_, self.run_, self.compare_ ,nbRun, self.config_.get_maxNbPROC() , self.sampleRunEnv(dirname))
return vT.run(earlyExit=earlyExit)
def _getSampleNumberToExpectFail(self, deltas):
nbRun=self.config_.get_nbRUN()
dirname=os.path.join(self.prefix_, md5Name(deltas))
if not os.path.exists(dirname):
self.internalError("_getSampleNumberToExpectFail:", dirname+" should exist")
vT=verrouTask(dirname,None, None, None ,None, None, None)
p=vT.getEstimatedFailProbability()
if p==1.:
return 1
else:
alpha=0.85
return int(min( math.ceil(math.log(1-alpha) / math.log(1-p)), nbRun))
def _testTab(self, deltasTab,nbRunTab=None):
nbDelta=len(deltasTab)
if nbRunTab==None:
nbRunTab=[self.config_.get_nbRUN()]*nbDelta
import concurrent.futures
executor=concurrent.futures.ThreadPoolExecutor(max_workers=self.config_.get_maxNbPROC())
resTab=[None] *nbDelta
taskTab=[None] *nbDelta
indexCmp=[]
futureCmpTab=[None] *nbDelta
doCmpTab=[None] *nbDelta
indexRun=[]
futureRunTab=[None] *nbDelta
workToDoTab=[None]*nbDelta
for i in range(nbDelta):
deltas=deltasTab[i]
dirname=os.path.join(self.prefix_, md5Name(deltas))
if not os.path.exists(dirname):
os.makedirs(dirname)
self.genExcludeIncludeFile(dirname, deltas, include=True, exclude=True)
#the node is there to avoid inner/outer parallelism
taskTab[i]=verrouTask(dirname, self.ref_, self.run_, self.compare_ ,nbRunTab[i], None , self.sampleRunEnv(dirname),verbose=False)
workToDo=taskTab[i].sampleToCompute(nbRunTab[i], earlyExit=True)
workToDoTab[i]=workToDo
if workToDo==None:
resTab[i]=(taskTab[i].FAIL,"cache")
taskTab[i].printDir()
print(" --(/cache) -> FAIL")
continue
# print("WorkToDo", workToDo)
cmpOnlyToDo=workToDo[0]
runToDo=workToDo[1]
cmpDone=workToDo[2]
if len(cmpOnlyToDo)==0 and len(runToDo)==0: #evrything in cache
resTab[i]=(taskTab[i].PASS,"cache")
taskTab[i].printDir()
print(" --(/cache) -> PASS("+ str(nbRunTab[i])+")")
continue
if len(cmpOnlyToDo)!=0: #launch Cmp asynchronously
indexCmp+=[i]
futureCmpTab[i]=[executor.submit(taskTab[i].cmpSeq, [cmpConf],False) for cmpConf in cmpOnlyToDo]
continue
if len(runToDo)!=0: #launch run asynchronously
indexRun+=[i]
futureRunTab[i]=[ executor.submit(taskTab[i].runSeq, [run],False) for run in runToDo ]
continue
print("error parallel")
failure()
for i in indexCmp: #wait cmp result
workToDo=workToDoTab[i]
cmpOnlyToDo, runToDo, cmpDone =workToDo[0],workToDo[1],workToDo[2]
concurrent.futures.wait(futureCmpTab[i])
cmpResult=[future.result() for future in futureCmpTab[i]]
if taskTab[i].FAIL in cmpResult:
failIndex=cmpResult.index(taskTab[i].FAIL)
resTab[i]=(taskTab[i].FAIL, "cmp")
taskTab[i].printDir()
print(" --(/cmp/) -> FAIL(%i)"%(cmpOnlyToDo[failIndex]))
else: #launch run asynchronously (depending of cmp result)
runToDo=workToDoTab[i][1]
if len(runToDo)==0:
resTab[i]=(taskTab[i].PASS,"cmp")
taskTab[i].printDir()
print(" --(/cmp/) -> PASS(+" + str(len(cmpOnlyToDo))+"->"+str(len(cmpDone) +len(cmpOnlyToDo))+")" )
continue
else:
futureRunTab[i]=[ executor.submit(taskTab[i].runSeq, [run], False) for run in runToDo]
indexRun+=[i]
continue
for i in indexRun: #wait run result
workToDo=workToDoTab[i]
cmpOnlyToDo, runToDo, cmpDone =workToDo[0],workToDo[1],workToDo[2]
concurrent.futures.wait(futureRunTab[i])
runResult=[future.result() for future in futureRunTab[i]]
taskTab[i].printDir()
if taskTab[i].FAIL in runResult:
indexRun=runResult.index(taskTab[i].FAIL)
resTab[i]=(taskTab[i].FAIL, "index//")
print(" --(/run/) -> FAIL(%i)"%(runToDo[indexRun]))
else:
resTab[i]=(taskTab[i].PASS, "index//")
print(" --(/run/) -> PASS(+" + str(len(runToDo))+"->"+str( len(cmpOnlyToDo) +len(cmpDone) +len(runToDo) )+")" )
#affichage à faire
return [res[0] for res in resTab]
| 43,084 | 37.128319 | 171 | py |
verrou | verrou-master/pyTools/paraview_script.py | ##### import the simple module from the paraview
from paraview.simple import *
#### disable automatic camera reset on 'Show'
paraview.simple._DisableFirstRenderCameraReset()
import os
DATAPATH=os.environ["VERROU_PARAVIEW_DATA_PATH"]
try:
DATAPATH
except NameError:
print("Please set the DATAPATH variable with the full path to the directory that contains this script and the CSV files. Type: DATAPATH='/path/to/the_cvs_datasets/'")
RemoveViewsAndLayouts()
layout1 = CreateLayout()
pathTime=DATAPATH+'/paraviewTime.csv'
pathParam=DATAPATH+'/paraviewParam.csv'
# create a new 'CSV Reader'
timeCsv = CSVReader(FileName=[pathTime], FieldDelimiterCharacters="\t")
paramCsv = CSVReader(FileName=[pathParam], FieldDelimiterCharacters="\t")
keyInput=((open(pathParam)).readline().strip()).split("\t")
#rangeInput=[ line.split("\t")[0] for line in open(pathParam).readlines()]
timeIndex=((open(pathTime)).readline().strip()).split("\t")[1:]
keyIndex=[(line.split("\t"))[0] for line in ((open(pathTime)).readlines())][1:]
transposeTable1 = TransposeTable(Input=timeCsv)
transposeTable1.VariablesofInterest=timeIndex
transposeTable1.Addacolumnwithoriginalcolumnsname = 0
lineChartView1=CreateView("XYFunctionalBagChartView")
lineDisplay= Show(transposeTable1, lineChartView1)
lineDisplay.AttributeType= 'Row Data'
rangeIn=[x for x in lineDisplay.SeriesLabel ]
nbSample=len(keyIndex)
if len(rangeIn)!=2*nbSample:
print("Incoherent size")
sys.exit()
for i in range(nbSample):
rangeIn[2*i+1]=keyIndex[i]
lineDisplay.SeriesLabel= rangeIn
lineDisplay.SeriesVisibility= rangeIn
SetActiveSource(transposeTable1)
layout1.SplitVertical(0, 0.5)
# Create a new 'Parallel Coordinates View'
parallelCoordinatesView1 = CreateView('ParallelCoordinatesChartView')
input_parameterscsvDisplay = Show(paramCsv, parallelCoordinatesView1)
# trace defaults for the display properties.
input_parameterscsvDisplay.CompositeDataSetIndex = 0
input_parameterscsvDisplay.FieldAssociation = 'Row Data'
input_parameterscsvDisplay.SeriesVisibility = keyInput
# Properties modified on campbell_1d_input_parameterscsvDisplay
input_parameterscsvDisplay.SeriesVisibility = keyInput
#### uncomment the following to render all views
RenderAllViews()
| 2,245 | 26.728395 | 168 | py |
verrou | verrou-master/pyTools/rounding_tool.py | #!/usr/bin/python3
import sys
roundingDetTab=["nearest","upward","downward", "toward_zero", "away_zero","farthest","float","ftz"]
roundingNonDetTab=[x+y for x in ["random", "average", "prandom"] for y in ["","_det","_comdet"]] +[x+y for x in ["random", "average"] for y in ["_scomdet"]]+["sr_monotonic"]
allRoundingTab=roundingDetTab+roundingNonDetTab
def filterNonDetTab(roundingTab):
validTab=list(filter(isValidRounding, roundingTab ))
return list(filter(lambda x: not (x in roundingDetTab), validTab ))
def filterDetRoundingTab(roundingTab):
return list(filter(lambda x: x in roundingDetTab, roundingTab ))
def isValidRounding(rounding):
return roundingToEnvVar(rounding,failure=False)!=None
def isStrFloat(strFloat):
try:
value=float(strFloat)
except:
return False
return True
def isValidRef(strRounding):
if isValidRounding(strRounding):
return True
return isStrFloat(strRounding)
def roundingToEnvVar(rounding, res={}, failure=True):
for prand in ["prandom_det", "prandom_comdet", "prandom"]:
if rounding.startswith(prand):
if rounding in ["prandom_det", "prandom_comdet", "prandom"]:
res.update({"VERROU_ROUNDING_MODE":rounding,"VERROU_PRANDOM_UPDATE":"none"})
return res
rest=rounding.replace(prand,"")
if rest.startswith("_"):
rest=rest[1:]
if rest=="func":
res.update({"VERROU_ROUNDING_MODE":prand, "VERROU_PRANDOM_UPDATE":"func"})
return res
try:
value=float(rest)
res.update({"VERROU_ROUNDING_MODE":prand, "VERROU_PRANDOM_PVALUE":rest})
return res
except:
if failure:
print("No valid prandom rounding specification : ", rounding )
sys.exit(42)
else:
return None
else:
if failure:
print("No valid prandom rounding : ",rounding)
sys.exit(42)
else:
return None
if rounding.startswith("mca"):
mcaConfig=rounding.split("-")[1:]
mode=mcaConfig[0]
doublePrec=mcaConfig[1]
floatPrec=mcaConfig[2]
envvars={"VERROU_BACKEND":"mcaquad",
"VERROU_MCA_MODE":mode,
"VERROU_MCA_PRECISION_DOUBLE": doublePrec,
"VERROU_MCA_PRECISION_FLOAT": floatPrec}
res.update(envvars)
return res
if rounding in roundingDetTab + roundingNonDetTab:
res.update({"VERROU_ROUNDING_MODE":rounding })
return res
if failure:
print("No valid rounding : ",rounding)
sys.exit(42)
else:
return None
| 2,865 | 33.119048 | 173 | py |
verrou | verrou-master/pyTools/DD.py | #!/usr/bin/env python3
# $Id: DD.py,v 1.2 2001/11/05 19:53:33 zeller Exp $
# Enhanced Delta Debugging class
# Copyright (c) 1999, 2000, 2001 Andreas Zeller.
# This module (written in Python) implements the base delta debugging
# algorithms and is at the core of all our experiments. This should
# easily run on any platform and any Python version since 1.6.
#
# To plug this into your system, all you have to do is to create a
# subclass with a dedicated `test()' method. Basically, you would
# invoke the DD test case minimization algorithm (= the `ddmin()'
# method) with a list of characters; the `test()' method would combine
# them to a document and run the test. This should be easy to realize
# and give you some good starting results; the file includes a simple
# sample application.
#
# This file is in the public domain; feel free to copy, modify, use
# and distribute this software as you wish - with one exception.
# Passau University has filed a patent for the use of delta debugging
# on program states (A. Zeller: `Isolating cause-effect chains',
# Saarland University, 2001). The fact that this file is publicly
# available does not imply that I or anyone else grants you any rights
# related to this patent.
#
# The use of Delta Debugging to isolate failure-inducing code changes
# (A. Zeller: `Yesterday, my program worked', ESEC/FSE 1999) or to
# simplify failure-inducing input (R. Hildebrandt, A. Zeller:
# `Simplifying failure-inducing input', ISSTA 2000) is, as far as I
# know, not covered by any patent, nor will it ever be. If you use
# this software in any way, I'd appreciate if you include a citation
# such as `This software uses the delta debugging algorithm as
# described in (insert one of the papers above)'.
#
# All about Delta Debugging is found at the delta debugging web site,
#
# http://www.st.cs.uni-sb.de/dd/
#
# Happy debugging,
#
# Andreas Zeller
import sys
import os
# Start with some helpers.
class OutcomeCache:
# This class holds test outcomes for configurations. This avoids
# running the same test twice.
# The outcome cache is implemented as a tree. Each node points
# to the outcome of the remaining list.
#
# Example: ([1, 2, 3], PASS), ([1, 2], FAIL), ([1, 4, 5], FAIL):
#
# (2, FAIL)--(3, PASS)
# /
# (1, None)
# \
# (4, None)--(5, FAIL)
def __init__(self):
self.tail = {} # Points to outcome of tail
self.result = None # Result so far
def add(self, c, result):
"""Add (C, RESULT) to the cache. C must be a list of scalars."""
cs = c[:]
cs.sort()
p = self
for start in range(len(c)):
if not c[start] in p.tail:
p.tail[c[start]] = OutcomeCache()
p = p.tail[c[start]]
p.result = result
def lookup(self, c):
"""Return RESULT if (C, RESULT) is in the cache; None, otherwise."""
p = self
for start in range(len(c)):
if not c[start] in p.tail:
return None
p = p.tail[c[start]]
return p.result
def lookup_superset(self, c, start = 0):
"""Return RESULT if there is some (C', RESULT) in the cache with
C' being a superset of C or equal to C. Otherwise, return None."""
# FIXME: Make this non-recursive!
if start >= len(c):
if self.result:
return self.result
elif self.tail != {}:
# Select some superset
superset = self.tail[self.tail.keys()[0]]
return superset.lookup_superset(c, start + 1)
else:
return None
if c[start] in self.tail:
return self.tail[c[start]].lookup_superset(c, start + 1)
# Let K0 be the largest element in TAIL such that K0 <= C[START]
k0 = None
for k in self.tail.keys():
if (k0 == None or k > k0) and k <= c[start]:
k0 = k
if k0 != None:
return self.tail[k0].lookup_superset(c, start)
return None
def lookup_subset(self, c):
"""Return RESULT if there is some (C', RESULT) in the cache with
C' being a subset of C or equal to C. Otherwise, return None."""
p = self
for start in range(len(c)):
if c[start] in p.tail:
p = p.tail[c[start]]
return p.result
# Test the outcome cache
def oc_test():
oc = OutcomeCache()
assert oc.lookup([1, 2, 3]) == None
oc.add([1, 2, 3], 4)
assert oc.lookup([1, 2, 3]) == 4
assert oc.lookup([1, 2, 3, 4]) == None
assert oc.lookup([5, 6, 7]) == None
oc.add([5, 6, 7], 8)
assert oc.lookup([5, 6, 7]) == 8
assert oc.lookup([]) == None
oc.add([], 0)
assert oc.lookup([]) == 0
assert oc.lookup([1, 2]) == None
oc.add([1, 2], 3)
assert oc.lookup([1, 2]) == 3
assert oc.lookup([1, 2, 3]) == 4
assert oc.lookup_superset([1]) == 3 or oc.lookup_superset([1]) == 4
assert oc.lookup_superset([1, 2]) == 3 or oc.lookup_superset([1, 2]) == 4
assert oc.lookup_superset([5]) == 8
assert oc.lookup_superset([5, 6]) == 8
assert oc.lookup_superset([6, 7]) == 8
assert oc.lookup_superset([7]) == 8
assert oc.lookup_superset([]) != None
assert oc.lookup_superset([9]) == None
assert oc.lookup_superset([7, 9]) == None
assert oc.lookup_superset([-5, 1]) == None
assert oc.lookup_superset([1, 2, 3, 9]) == None
assert oc.lookup_superset([4, 5, 6, 7]) == None
assert oc.lookup_subset([]) == 0
assert oc.lookup_subset([1, 2, 3]) == 4
assert oc.lookup_subset([1, 2, 3, 4]) == 4
assert oc.lookup_subset([1, 3]) == None
assert oc.lookup_subset([1, 2]) == 3
assert oc.lookup_subset([-5, 1]) == None
assert oc.lookup_subset([-5, 1, 2]) == 3
assert oc.lookup_subset([-5]) == 0
# Main Delta Debugging algorithm.
class DD:
# Delta debugging base class. To use this class for a particular
# setting, create a subclass with an overloaded `test()' method.
#
# Main entry points are:
# - `ddmin()' which computes a minimal failure-inducing configuration, and
# - `dd()' which computes a minimal failure-inducing difference.
#
# See also the usage sample at the end of this file.
#
# For further fine-tuning, you can implement an own `resolve()'
# method (tries to add or remove configuration elements in case of
# inconsistencies), or implement an own `split()' method, which
# allows you to split configurations according to your own
# criteria.
#
# The class includes other previous delta debugging alorithms,
# which are obsolete now; they are only included for comparison
# purposes.
# Test outcomes.
PASS = "PASS"
FAIL = "FAIL"
UNRESOLVED = "UNRESOLVED"
# Resolving directions.
ADD = "ADD" # Add deltas to resolve
REMOVE = "REMOVE" # Remove deltas to resolve
# Debugging output (set to 1 to enable)
debug_test = 0
debug_dd = 0
debug_split = 0
debug_resolve = 0
def __init__(self):
self.__resolving = 0
self.__last_reported_length = 0
self.monotony = 0
self.outcome_cache = OutcomeCache()
self.cache_outcomes = 1
self.minimize = 1
self.maximize = 1
self.assume_axioms_hold = 1
# Helpers
def __listminus(self, c1, c2):
"""Return a list of all elements of C1 that are not in C2."""
s2 = {}
for delta in c2:
s2[delta] = 1
c = []
for delta in c1:
if not delta in s2:
c.append(delta)
return c
def __listintersect(self, c1, c2):
"""Return the common elements of C1 and C2."""
s2 = {}
for delta in c2:
s2[delta] = 1
c = []
for delta in c1:
if delta in s2:
c.append(delta)
return c
def __listunion(self, c1, c2):
"""Return the union of C1 and C2."""
s1 = {}
for delta in c1:
s1[delta] = 1
c = c1[:]
for delta in c2:
if not delta in s1:
c.append(delta)
return c
def __listsubseteq(self, c1, c2):
"""Return 1 if C1 is a subset or equal to C2."""
s2 = {}
for delta in c2:
s2[delta] = 1
for delta in c1:
if not delta in s2:
return 0
return 1
# Output
def coerce(self, c):
"""Return the configuration C as a compact string"""
# Default: use printable representation
return repr(c)
def pretty(self, c):
"""Like coerce(), but sort beforehand"""
sorted_c = c[:]
sorted_c.sort()
return self.coerce(sorted_c)
# Testing
def test(self, c):
"""Test the configuration C. Return PASS, FAIL, or UNRESOLVED"""
#c.sort()
# If we had this test before, return its result
if self.cache_outcomes:
cached_result = self.outcome_cache.lookup(c)
if cached_result != None:
return cached_result
if self.monotony:
# Check whether we had a passing superset of this test before
cached_result = self.outcome_cache.lookup_superset(c)
if cached_result == self.PASS:
return self.PASS
cached_result = self.outcome_cache.lookup_subset(c)
if cached_result == self.FAIL:
return self.FAIL
if self.debug_test:
print()
print("test(" + self.coerce(c) + ")...")
outcome = self._test(c)
if self.debug_test:
print("test(" + self.coerce(c) + ") = " + repr(outcome))
if self.cache_outcomes:
self.outcome_cache.add(c, outcome)
return outcome
def _test(self, c):
"""Stub to overload in subclasses"""
return self.UNRESOLVED # Placeholder
# Splitting
def split(self, c, n):
"""Split C into [C_1, C_2, ..., C_n]."""
if self.debug_split:
print("split(" + self.coerce(c) + ", " + repr(n) + ")...")
outcome = self._split(c, n)
if self.debug_split:
print( "split(" + self.coerce(c) + ", " + repr(n) + ") = " + repr(outcome))
return outcome
def _split(self, c, n):
"""Stub to overload in subclasses"""
subsets = []
start = 0
for i in range(n):
subset = c[start:start + (len(c) - start) // (n - i)]
subsets.append(subset)
start = start + len(subset)
return subsets
# Resolving
def resolve(self, csub, c, direction):
"""If direction == ADD, resolve inconsistency by adding deltas
to CSUB. Otherwise, resolve by removing deltas from CSUB."""
if self.debug_resolve:
print("resolve(" + repr(csub) + ", " + self.coerce(c) + ", " + \
repr(direction) + ")...")
outcome = self._resolve(csub, c, direction)
if self.debug_resolve:
print("resolve(" + repr(csub) + ", " + self.coerce(c) + ", " + \
repr(direction) + ") = " + repr(outcome))
return outcome
def _resolve(self, csub, c, direction):
"""Stub to overload in subclasses."""
# By default, no way to resolve
return None
# Test with fixes
def test_and_resolve(self, csub, r, c, direction):
"""Repeat testing CSUB + R while unresolved."""
initial_csub = csub[:]
c2 = self.__listunion(r, c)
csubr = self.__listunion(csub, r)
t = self.test(csubr)
# necessary to use more resolving mechanisms which can reverse each
# other, can (but needn't) be used in subclasses
self._resolve_type = 0
while t == self.UNRESOLVED:
self.__resolving = 1
csubr = self.resolve(csubr, c, direction)
if csubr == None:
# Nothing left to resolve
break
if len(csubr) >= len(c2):
# Added everything: csub == c2. ("Upper" Baseline)
# This has already been tested.
csubr = None
break
if len(csubr) <= len(r):
# Removed everything: csub == r. (Baseline)
# This has already been tested.
csubr = None
break
t = self.test(csubr)
self.__resolving = 0
if csubr == None:
return self.UNRESOLVED, initial_csub
# assert t == self.PASS or t == self.FAIL
csub = self.__listminus(csubr, r)
return t, csub
# Inquiries
def resolving(self):
"""Return 1 while resolving."""
return self.__resolving
# Logging
def report_progress(self, c, title):
if len(c) != self.__last_reported_length:
print()
print(title + ": " + repr(len(c)) + " deltas left:", self.coerce(c))
self.__last_reported_length = len(c)
# Delta Debugging (old ESEC/FSE version)
def old_dd(self, c, r = [], n = 2):
"""Return the failure-inducing subset of C"""
assert self.test([]) == dd.PASS
assert self.test(c) == dd.FAIL
if self.debug_dd:
print ("dd(" + self.pretty(c) + ", " + repr(r) + ", " + repr(n) + ")...")
outcome = self._old_dd(c, r, n)
if self.debug_dd:
print ("dd(" + self.pretty(c) + ", " + repr(r) + ", " + repr(n) +
") = " + repr(outcome))
return outcome
def test_mix(self, csub, c, direction):
if self.minimize:
(t, csub) = self.test_and_resolve(csub, [], c, direction)
if t == self.FAIL:
return (t, csub)
if self.maximize:
csubbar = self.__listminus(self.CC, csub)
cbar = self.__listminus(self.CC, c)
if direction == self.ADD:
directionbar = self.REMOVE
else:
directionbar = self.ADD
(tbar, csubbar) = self.test_and_resolve(csubbar, [], cbar,
directionbar)
csub = self.__listminus(self.CC, csubbar)
if tbar == self.PASS:
t = self.FAIL
elif tbar == self.FAIL:
t = self.PASS
else:
t = self.UNRESOLVED
return (t, csub)
# Delta Debugging (new ISSTA version)
def ddgen(self, c, minimize, maximize):
"""Return a 1-minimal failing subset of C"""
self.minimize = minimize
self.maximize = maximize
n = 2
self.CC = c
if self.debug_dd:
print ("dd(" + self.pretty(c) + ", " + repr(n) + ")...")
outcome = self._dd(c, n)
if self.debug_dd:
print ("dd(" + self.pretty(c) + ", " + repr(n) + ") = " + repr(outcome))
return outcome
def _dd(self, c, n):
"""Stub to overload in subclasses"""
testNoDelta=self.test([])
if testNoDelta!=self.PASS:
self.internalError("_dd", "ERROR: test([]) == FAILED")
# assert self.test([]) == self.PASS
run = 1
cbar_offset = 0
# We replace the tail recursion from the paper by a loop
while 1:
tc = self._test(c)
if tc != self.FAIL and tc != self.UNRESOLVED:
self.internalError("_dd","test([all deltas]) == PASS")
if n > len(c):
# No further minimizing
print ("dd: done")
return c
self.report_progress(c, "dd")
cs = self.split(c, n)
print ()
print ("dd (run #" + repr(run) + "): trying", "+".join([repr(len(cs[i])) for i in range(n)] ) )
c_failed = 0
cbar_failed = 0
next_c = c[:]
next_n = n
# Check subsets
for i in range(n):
if self.debug_dd:
print ("dd: trying", self.pretty(cs[i]))
(t, cs[i]) = self.test_mix(cs[i], c, self.REMOVE)
if t == self.FAIL:
# Found
if self.debug_dd:
print ("dd: found", len(cs[i]), "deltas:",)
print (self.pretty(cs[i]))
c_failed = 1
next_c = cs[i]
next_n = 2
cbar_offset = 0
self.report_progress(next_c, "dd")
break
if not c_failed:
# Check complements
cbars = n * [self.UNRESOLVED]
# print "cbar_offset =", cbar_offset
for j in range(n):
i = (j + cbar_offset) % n
cbars[i] = self.__listminus(c, cs[i])
t, cbars[i] = self.test_mix(cbars[i], c, self.ADD)
doubled = self.__listintersect(cbars[i], cs[i])
if doubled != []:
cs[i] = self.__listminus(cs[i], doubled)
if t == self.FAIL:
if self.debug_dd:
print ("dd: reduced to", len(cbars[i]),)
print ("deltas:", end="")
print (self.pretty(cbars[i]))
cbar_failed = 1
next_c = self.__listintersect(next_c, cbars[i])
next_n = next_n - 1
self.report_progress(next_c, "dd")
# In next run, start removing the following subset
cbar_offset = i
break
if not c_failed and not cbar_failed:
if n >= len(c):
# No further minimizing
print ("dd: done")
return c
next_n = min(len(c), n * 2)
print ("dd: increase granularity to", next_n)
cbar_offset = (cbar_offset * next_n) // n
c = next_c
n = next_n
run = run + 1
def verrou_dd_max(self, c):
"""Stub to overload in subclasses"""
self.maximize=1
self.minimize=0
n = 2
self.CC = c
algo_name="dd_max"
testNoDelta=self.test([])
if testNoDelta!=self.PASS:
self.internalError("verrou_dd_max","ERROR: test([]) == FAILED")
run = 1
cbar_offset = 0
# We replace the tail recursion from the paper by a loop
while 1:
tc = self.test(c)
if tc != self.FAIL and tc != self.UNRESOLVED:
self.internalError("verrou_dd_max","test([all deltas]) == PASS")
if n > len(c):
# No further minimizing
print (algo_name+": done")
return c
self.report_progress(c, algo_name)
cs = self.split(c, n)
print ()
print (algo_name+" (run #" + repr(run) + "): trying", "+".join([repr(len(cs[i])) for i in range(n)] ) )
c_failed = 0
cbar_failed = 0
next_c = c[:]
next_n = n
if not c_failed:
# Check complements
cbars = n * [self.UNRESOLVED]
# print "cbar_offset =", cbar_offset
for j in range(n):
i = (j + cbar_offset) % n
cbars[i] = self.__listminus(c, cs[i])
t, cbars[i] = self.test_mix(cbars[i], c, self.ADD)
doubled = self.__listintersect(cbars[i], cs[i])
if doubled != []:
cs[i] = self.__listminus(cs[i], doubled)
if t == self.FAIL:
if self.debug_dd:
print (algo_name+": reduced to", len(cbars[i]),)
print ("deltas:", end="")
print (self.pretty(cbars[i]))
cbar_failed = 1
next_c = self.__listintersect(next_c, cbars[i])
next_n = next_n - 1
self.report_progress(next_c, algo_name)
# In next run, start removing the following subset
cbar_offset = i
break
if not c_failed and not cbar_failed:
if n >= len(c):
# No further minimizing
print (algo_name+": done")
return c
next_n = min(len(c), n * 2)
print (algo_name+": increase granularity to", next_n)
cbar_offset = (cbar_offset * next_n) // n
c = next_c
n = next_n
run = run + 1
def verrou_dd_min(self, c , nbRun):
"""Stub to overload in subclasses"""
n = 2
algo_name="ddmin"
testNoDelta=self._test([],nbRun)
if testNoDelta!=self.PASS:
self.internalError("verrou_dd_min","ERROR: test([]) == FAILED")
run = 1
cbar_offset = 0
# We replace the tail recursion from the paper by a loop
while 1:
tc = self._test(c ,nbRun)
if tc != self.FAIL and tc != self.UNRESOLVED:
self.internalError("verrou_dd_min","ERROR: test([all deltas]) == PASS")
if n > len(c):
# No further minimizing
print (algo_name+": done")
return c
self.report_progress(c, algo_name)
cs = self.split(c, n)
print ()
print (algo_name+" (run #" + repr(run) + "): trying", "+".join([repr(len(cs[i])) for i in range(n)] ) )
c_failed = False
cbar_failed = False
next_c = c[:]
next_n = n
# Check subsets
for i in range(n):
if self.debug_dd:
print (algo_name+": trying", self.pretty(cs[i]))
t = self._test(cs[i],nbRun)
if t == self.FAIL:
# Found
if self.debug_dd:
print (algo_name+": found", len(cs[i]), "deltas:",)
print (self.pretty(cs[i]))
c_failed = True
next_c = cs[i]
next_n = 2
cbar_offset = 0
self.report_progress(next_c, algo_name)
break
if not c_failed:
# Check complements
cbars = n * [self.UNRESOLVED]
# print "cbar_offset =", cbar_offset
for j in range(n):
i = (j + cbar_offset) % n
cbars[i] = self.__listminus(c, cs[i])
t = self._test(cbars[i],nbRun)
if t == self.FAIL:
if self.debug_dd:
print (algo_name+": reduced to", len(cbars[i]),)
print ("deltas:", end="")
print (self.pretty(cbars[i]))
cbar_failed = True
next_c = cbars[i]
next_n = next_n - 1
self.report_progress(next_c, algo_name)
# In next run, start removing the following subset
cbar_offset = i
break
if not c_failed and not cbar_failed:
if n >= len(c):
# No further minimizing
print (algo_name+": done")
return c
next_n = min(len(c), n * 2)
print (algo_name+": increase granularity to", next_n)
cbar_offset = (cbar_offset * next_n) // n
c = next_c
n = next_n
run = run + 1
def ddmin(self, c):
return self.ddgen(c, 1, 0)
def ddmax(self, c):
return self.ddgen(c, 0, 1)
def ddmix(self, c):
return self.ddgen(c, 1, 1)
def internalError(self, func, msg):
raise AssertionError(func +"\t"+ msg)
# General delta debugging (new TSE version)
def dddiff(self, c):
n = 2
if self.debug_dd:
print ("dddiff(" + self.pretty(c) + ", " + repr(n) + ")...")
outcome = self._dddiff([], c, n)
if self.debug_dd:
print ("dddiff(" + self.pretty(c) + ", " + repr(n) + ") = " +
repr(outcome))
return outcome
def _dddiff(self, c1, c2, n):
run = 1
cbar_offset = 0
# We replace the tail recursion from the paper by a loop
while 1:
if self.debug_dd:
print ("dd: c1 =", self.pretty(c1))
print ("dd: c2 =", self.pretty(c2))
if self.assume_axioms_hold:
t1 = self.PASS
t2 = self.FAIL
else:
t1 = self.test(c1)
t2 = self.test(c2)
assert t1 == self.PASS
assert t2 == self.FAIL
assert self.__listsubseteq(c1, c2)
c = self.__listminus(c2, c1)
if self.debug_dd:
print ("dd: c2 - c1 =", self.pretty(c))
if n > len(c):
# No further minimizing
print ("dd: done")
return (c, c1, c2)
self.report_progress(c, "dd")
cs = self.split(c, n)
print ()
print ("dd (run #" + repr(run) + "): trying",)
for i in range(n):
if i > 0:
print ("+",)
print (len(cs[i]),)
print ()
progress = 0
next_c1 = c1[:]
next_c2 = c2[:]
next_n = n
# Check subsets
for j in range(n):
i = (j + cbar_offset) % n
if self.debug_dd:
print ("dd: trying", self.pretty(cs[i]))
(t, csub) = self.test_and_resolve(cs[i], c1, c, self.REMOVE)
csub = self.__listunion(c1, csub)
if t == self.FAIL and t1 == self.PASS:
# Found
progress = 1
next_c2 = csub
next_n = 2
cbar_offset = 0
if self.debug_dd:
print ("dd: reduce c2 to", len(next_c2), "deltas:",)
print (self.pretty(next_c2))
break
if t == self.PASS and t2 == self.FAIL:
# Reduce to complement
progress = 1
next_c1 = csub
next_n = max(next_n - 1, 2)
cbar_offset = i
if self.debug_dd:
print ("dd: increase c1 to", len(next_c1), "deltas:",)
print (self.pretty(next_c1))
break
csub = self.__listminus(c, cs[i])
(t, csub) = self.test_and_resolve(csub, c1, c, self.ADD)
csub = self.__listunion(c1, csub)
if t == self.PASS and t2 == self.FAIL:
# Found
progress = 1
next_c1 = csub
next_n = 2
cbar_offset = 0
if self.debug_dd:
print ("dd: increase c1 to", len(next_c1), "deltas:",)
print (self.pretty(next_c1))
break
if t == self.FAIL and t1 == self.PASS:
# Increase
progress = 1
next_c2 = csub
next_n = max(next_n - 1, 2)
cbar_offset = i
if self.debug_dd:
print ("dd: reduce c2 to", len(next_c2), "deltas:",)
print (self.pretty(next_c2))
break
if progress:
self.report_progress(self.__listminus(next_c2, next_c1), "dd")
else:
if n >= len(c):
# No further minimizing
print ("dd: done")
return (c, c1, c2)
next_n = min(len(c), n * 2)
print ("dd: increase granularity to", next_n)
cbar_offset = (cbar_offset * next_n) // n
c1 = next_c1
c2 = next_c2
n = next_n
run = run + 1
def dd(self, c):
return self.dddiff(c) # Backwards compatibility
if __name__ == '__main__':
# Test the outcome cache
oc_test()
# Define our own DD class, with its own test method
class MyDD(DD):
def _test_a(self, c):
"Test the configuration C. Return PASS, FAIL, or UNRESOLVED."
# Just a sample
# if 2 in c and not 3 in c:
# return self.UNRESOLVED
# if 3 in c and not 7 in c:
# return self.UNRESOLVED
if 7 in c and not 2 in c:
return self.UNRESOLVED
if 5 in c and 8 in c:
return self.FAIL
return self.PASS
def _test_b(self, c):
if c == []:
return self.PASS
if 1 in c and 2 in c and 3 in c and 4 in c and \
5 in c and 6 in c and 7 in c and 8 in c:
return self.FAIL
return self.UNRESOLVED
def _test_c(self, c):
if 1 in c and 2 in c and 3 in c and 4 in c and \
6 in c and 8 in c:
if 5 in c and 7 in c:
return self.UNRESOLVED
else:
return self.FAIL
if 1 in c or 2 in c or 3 in c or 4 in c or \
6 in c or 8 in c:
return self.UNRESOLVED
return self.PASS
def __init__(self):
self._test = self._test_c
DD.__init__(self)
print ("WYNOT - a tool for delta debugging.")
mydd = MyDD()
# mydd.debug_test = 1 # Enable debugging output
# mydd.debug_dd = 1 # Enable debugging output
# mydd.debug_split = 1 # Enable debugging output
# mydd.debug_resolve = 1 # Enable debugging output
# mydd.cache_outcomes = 0
# mydd.monotony = 0
print ("Minimizing failure-inducing input...")
c = mydd.ddmin([1, 2, 3, 4, 5, 6, 7, 8]) # Invoke DDMIN
print ("The 1-minimal failure-inducing input is", c)
print ("Removing any element will make the failure go away.")
print ()
print ("Computing the failure-inducing difference...")
(c, c1, c2) = mydd.dd([1, 2, 3, 4, 5, 6, 7, 8]) # Invoke DD
print ("The 1-minimal failure-inducing difference is", c)
print (c1, "passes,", c2, "fails")
# Local Variables:
# mode: python
# End:
| 31,416 | 29.710655 | 115 | py |
verrou | verrou-master/pyTools/post_config.py | import getopt
import os
import sys
import re
import copy
from . import gen_config
from . import rounding_tool
class postConfig(gen_config.gen_config):
def __init__(self, argv, environ,config_keys=["POST"]):
super().__init__(argv,environ, config_keys)
self.normalize()
self.check_instr_tab()
self.check_trace_file()
def registerOptions(self):
self.addRegistry("nbRUN", "int", "NRUNS", ["--nruns="], 5)
self.addRegistry("maxNbPROC", "int", "NUM_THREADS", ["--num-threads="], None)
self.addRegistry("ddQuiet", "bool", "QUIET", ["--quiet"], False)
self.addRegistry("rep", "string", "REP", ["--rep="], "dd.line", "rep_exists")
self.addRegistry("sub_rep", "string", "CONFIGURATION",["--sub-rep=","--configuration="], [] , "rep_exists", additive=True)
self.addRegistry("instr" , "string", "INSTR", ["--instr="], [] , additive=True)
self.addRegistry("rounding", "string", "ROUNDING_LIST", ["--rounding-list=","--rounding=","--rounding-mode"] , [], additive=True,
docStr="rounding mode list (coma separated) [default rounding in run.sh]")
self.addRegistry("trace_bin", "bool", "TRACE_BIN", ["--trace-bin"], False)
self.addRegistry("trace_pattern","string", "TRACE_PATTERN", ["--trace-pattern="], [], additive=True)
self.addRegistry("trace_file", "string", "TRACE_FILE", ["--trace-file="], None)
def usageCmd(self):
print("Usage: "+ os.path.basename(sys.argv[0]) + " [options] runScript cmpScript")
print(self.get_EnvDoc(self.config_keys[-1]))
print("Valid rounding modes are:")
print("\t", ",".join(rounding_tool.roundingDetTab ))
print("\t", ",".join(rounding_tool.roundingNonDetTab ))
print("\t", ",".join(["mca-rr-53-24", "mca-pb-53-24", "mca-mca-53-24"]) , "(53 and 24 can be modified)")
print("\t det is an alias to "+",".join([x for x in rounding_tool.roundingDetTab if x not in ["float","ftz"]]))
print("\t no_det is an alias to "+",".join(["random","average", "prandom"]))
def normalize(self):
self.rep=os.path.abspath(self.rep)
if self.trace_file!=None:
self.trace_file=os.path.abspath(self.trace_file)
for r in self.rounding:
if "," in r:
splitR=r.split(",")
self.rounding.remove(r)
self.rounding+=splitR
if "det" in self.rounding:
self.rounding.remove("det")
self.rounding+=[x for x in rounding_tool.roundingDetTab if x !="float" and x!="ftz" ]
if "no_det" in self.rounding:
self.rounding.remove("no_det")
self.rounding+=["random","average", "prandom"]
#check valid rounding
for r in self.rounding:
runEnv=rounding_tool.roundingToEnvVar(r,{})
self.runScript=self.exec_arg[0]
self.cmpScript=self.exec_arg[1]
def check_instr_tab(self):
for instrConfig in self.instr:
for instr in instrConfig.split(","):
validInstrTab=["add","sub", "mul","div", "mAdd", "mSub", "sqrt","conv"]
if instr not in validInstrTab:
print("%s is not a valid instr configuration."%(instr))
print("%s should be a coma separated list of element of %s"%(instrConfig, str(validInstrTab)))
self.usageCmd()
self.failure()
def check_trace_file(self):
if self.trace_file !=None and (self.trace_pattern!=[] or self.trace_bin):
print("--trace_file is incompatible with trace_pattern and trace_bin option")
self.failure()
"""Basic check : not valid file could sucessed"""
if self.trace_file!=None:
numLine=0
for line in (open(self.trace_file)).readlines():
numLine+=1
spline=line.strip()
if spline.startswith("#") or spline=="":
continue
if not (" " in spline) and not("\t" in spline):
print("%s is not a valid trace file"%(self.trace_file))
print("%s line %i is not valid"%(spline, numLine))
self.usageCmd()
self.failure()
#accessors
def get_maxNbPROC(self):
return self.maxNbPROC
def get_nbRUN(self):
return self.nbRUN
def get_quiet(self):
return self.ddQuiet
def get_runScript(self):
return self.runScript
def get_cmpScript(self):
return self.cmpScript
def get_rep(self):
return self.rep
def get_rep_sub_rep(self):
if self.sub_rep==[]:
return {self.rep:self.findDDmin(self.rep)}
else:
res={}
for sub_rep in self.sub_rep:
sub_rep=os.path.abspath(sub_rep)
rep=os.path.dirname(sub_rep)
if rep in res:
res[rep]+=[sub_rep]
else:
if os.path.isdir(os.path.join(rep,"ref")):
res[rep]=[sub_rep]
else:
print("sub_rep %s is not a valid"%(sub_rep))
self.usageCmd()
self.failure()
return res
def get_instr(self):
if self.instr==[]:
return [""]
else:
return self.instr
def getNonDetTab(self):
if self.rounding==[]:
return [""]
return rounding_tool.filterNonDetTab(self.rounding)
def getDetTab(self):
return rounding_tool.filterDetRoundingTab(self.rounding)
def get_trace_bin(self):
return self.trace_bin
def get_trace_pattern(self):
return self.trace_pattern
def get_trace_file(self):
return self.trace_file
def get_trace(self):
if self.trace_bin==True:
return True
if self.trace_pattern!=[]:
return True
if self.trace_file!=None:
return True
return False
def findDDmin(self, rep):
ddminList=[os.path.abspath(os.path.join(rep,x)) for x in os.listdir(rep) if (re.match("^ddmin[0-9]+$",x) or x=="rddmin-cmp") or x=="FullPerturbation" or x=="NoPerturbation"]
return ddminList
| 6,419 | 36.54386 | 181 | py |
verrou | verrou-master/pyTools/dd_config.py | # This file is part of Verrou, a FPU instrumentation tool.
# Copyright (C) 2014-2021 EDF
# F. Févotte <francois.fevotte@edf.fr>
# B. Lathuilière <bruno.lathuiliere@edf.fr>
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
# 02111-1307, USA.
# The GNU Lesser General Public License is contained in the file COPYING.
import os
import math
import sys
import getopt
from . import gen_config
def exponentialRange(nbRun):
tab=[int(nbRun / (2**i)) for i in range(1+int(math.floor(math.log(nbRun,2)))) ]
tab.reverse()
return tab
class ddConfig(gen_config.gen_config):
def __init__(self, argv, environ,config_keys=["INTERFLOP"]):
super().__init__(argv, environ, config_keys)
self.normalizeOptions()
self.runScript=self.exec_arg[0]
self.cmpScript=self.exec_arg[1]
def registerOptions(self):
self.addRegistry("nbRUN", "int", "DD_NRUNS", ["--nruns="], 5, None)
self.addRegistry("maxNbPROC", "int", "DD_NUM_THREADS", ["--num-threads="], None, None)
self.addRegistry("ddAlgo", "string", "DD_ALGO", ["--algo="], "rddmin", ["ddmax", "rddmin"])
self.addRegistry("rddminVariant", "string", "DD_RDDMIN", ["--rddmin="], "d", ["s", "stoch", "d", "dicho", "", "strict"])
self.addRegistry("param_rddmin_tab", "string", "DD_RDDMIN_TAB", ["--rddmin-tab="], "exp", ["exp", "all", "single"])
self.addRegistry("param_dicho_tab", "int/string", "DD_DICHO_TAB" , ["--dicho-tab="], "half", ["exp", "all", "half", "single"])
self.addRegistry("splitGranularity", "int", "DD_DICHO_GRANULARITY", ["--dicho-granularity="], 2, None)
self.addRegistry("ddQuiet", "bool", "DD_QUIET", ["--quiet"], False, None)
self.addRegistry("cache", "string", "DD_CACHE" , ["--cache="] , "continue",["clean", "rename", "rename_keep_result","keep_run", "continue"])
self.addRegistry("rddminHeuristicsCache", "string", "DD_RDDMIN_HEURISTICS_CACHE", ["--rddmin-heuristics-cache="], "none", ["none", "cache", "all_cache"])
self.addRegistry("rddminHeuristicsRep" , "string", "DD_RDDMIN_HEURISTICS_REP", ["--rddmin-heuristics-rep="], [] , "rep_exists", True)
self.addRegistry("rddminHeuristicsLineConv" , "bool", "DD_RDDMIN_HEURISTICS_LINE_CONV", ["--rddmin-heuristics-line-conv"], False, None)
self.addRegistry("resWithAllSamples" , "bool", "DD_RES_WITH_ALL_SAMPLES", ["--res-with-all-samples"], False, None)
def normalizeOptions(self):
if self.rddminVariant=="stoch":
self.rddminVariant="s"
if self.rddminVariant=="dicho":
self.rddminVariant="d"
if self.rddminVariant=="strict":
self.rddminVariant=""
## Accessors
def get_splitGranularity(self):
return self.splitGranularity
def get_ddAlgo(self):
if self.ddAlgo.endswith("rddmin"):
return self.rddminVariant+self.ddAlgo
return self.ddAlgo
def get_maxNbPROC(self):
return self.maxNbPROC
def get_nbRUN(self):
return self.nbRUN
def get_quiet(self):
return self.ddQuiet
def get_resWithAllsamples(self):
return self.resWithAllSamples
def get_rddMinTab(self):
rddMinTab=None
nbProc=1
if self.maxNbPROC!=None:
nbProc=self.maxNbPROC
if self.param_rddmin_tab=="exp":
if nbProc >self.nbRUN:
return [self.nbRUN]
else:
return [x for x in exponentialRange(self.nbRUN) if x>=nbProc]
if self.param_rddmin_tab=="all":
if nbProc>self.nbRUN:
return range(1,self.nbRUN+1)
else:
return range(nbProc, self.nbRUN+1)
if self.param_rddmin_tab=="single":
rddMinTab=[self.nbRUN]
return rddMinTab
def get_splitTab(self):
splitTab=None
if self.param_dicho_tab=="exp":
splitTab=exponentialRange(self.nbRUN)
if self.param_dicho_tab=="all":
splitTab=range(self.nbRUN)
if self.param_dicho_tab=="single":
splitTab=[self.nbRUN]
if self.param_dicho_tab=="half":
splitTab=[ int(math.ceil(self.nbRUN / 2.))]
if self.param_dicho_tab in [str(i) for i in range(1, self.nbRUN+1) ]:
splitTab=[self.param_dicho_tab]
return splitTab
def get_runScript(self):
return self.runScript
def get_cmpScript(self):
return self.cmpScript
def get_cache(self):
return self.cache
def get_rddminHeuristicsCache(self):
return self.rddminHeuristicsCache
def get_rddminHeuristicsRep_Tab(self):
return self.rddminHeuristicsRep
def get_rddminHeuristicsLineConv(self):
return self.rddminHeuristicsLineConv
| 5,918 | 40.391608 | 202 | py |
verrou | verrou-master/pyTools/DD_exec_stat.py | # This file is part of Verrou, a FPU instrumentation tool.
# Copyright (C) 2014-2021 EDF
# F. Févotte <francois.fevotte@edf.fr>
# B. Lathuilière <bruno.lathuiliere@edf.fr>
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
# 02111-1307, USA.
# The GNU Lesser General Public License is contained in the file COPYING.
import sys
import os
import time
class exec_stat:
def __init__(self,repName):
self.repName=repName
self.timeInit()
def terminate(self):
self.timeEnd()
self.printElapsed(int(self.end- self.start))
self.printNbRun()
def timeInit(self):
self.start = time.time()
def timeEnd(self):
self.end = int(time.time())
def printElapsed(self,duration):
s= duration % 60
rm= duration //60
m=rm%60
rh=rm//60
h=rh%24
rd=rh//24
print ("\nElapsed Time: %id %ih %imin %is "%(rd,h,m,s) )
def isNew(self, filename):
return ((os.stat(filename).st_mtime) > self.start)
def printNbRun(self,dirName="."):
import glob
runTab=glob.glob(dirName+"/"+self.repName+"/*/dd.run*/dd.run.out")
runFilter=[filename for filename in runTab if self.isNew(filename)]
print(self.repName+" search : %i run (with cache included: %i)"%(len(runFilter),len(runTab)) )
| 1,983 | 30.492063 | 103 | py |
verrou | verrou-master/pyTools/gen_config.py | import getopt
import os
import sys
import re
import copy
class gen_config:
def __init__(self, argv, environ,config_keys=["INTERFLOP"], lengthValidTab=[2]):
self.config_keys=config_keys
self.registryTab =[]
self.registerOptions()
self.readDefaultValueFromRegister()
self.parseArgv(argv, lengthValidTab)
for config_key in self.config_keys:
self.read_environ(environ, config_key)
def addRegistry(self,attribut, optionType, ENV, tabOption, default, checkParam=None, additive=False, docStr=None ):
registry={"attribut":attribut, "type": optionType, "ENV":ENV, "tabOption":tabOption,
"default":default, "checkParam":checkParam,
"additive":additive,
"docStr":docStr}
self.registryTab+=[registry]
def readDefaultValueFromRegister(self):
for registry in self.registryTab:
attribut=registry["attribut"]
default=registry["default"]
exec("self."+attribut+"= copy.deepcopy(default)")
def optionToStr(self):
strOption=""
for registry in self.registryTab:
attribut=registry["attribut"]
strOption+="\t%s : %s\n"%(attribut,eval("str(self."+attribut+")"))
return strOption
def parseArgv(self,argv, lengthValidTab):
shortOptionsForGetOpt="h" + "".join([y[1:] for x in self.registryTab for y in x["tabOption"] if y.startswith("-") and y[1]!="-"])
longOptionsForGetOpt=["help"] + [y[2:] for x in self.registryTab for y in x["tabOption"] if y.startswith("--")]
try:
opts,args=getopt.getopt(argv[1:], shortOptionsForGetOpt, longOptionsForGetOpt)
except getopt.GetoptError:
self.usageCmd()
self.failure()
for opt, arg in opts:
if opt in ["-h","--help"]:
self.usageCmd()
self.failure()
for registry in self.registryTab:
for registryName in registry["tabOption"]:
fromRegistryName=registryName.replace("=","")
fromRegistryName=fromRegistryName.replace(":","")
if opt==fromRegistryName:
self.readOneOption(arg,registry["attribut"], registry["type"], registry["ENV"],registryName,registry["checkParam"], registry["additive"], parse="parse")
break
if len(args) in lengthValidTab:
self.exec_arg=[self.checkScriptPath(arg) for arg in args]
else:
self.usageCmd()
self.failure()
def read_environ(self,environ, PREFIX):
self.environ=environ #configuration to prepare the call to readOneOption
self.PREFIX=PREFIX
for registry in self.registryTab:
try:
strValue=self.environ[self.PREFIX+"_"+registry["ENV"]]
param=[registry["attribut"], registry["type"], registry["ENV"], registry["tabOption"][0],registry["checkParam"], registry["additive"]]
self.readOneOption(strValue,*param, parse="environ")
except KeyError:
pass
def usageCmd(self):
print("Usage: "+ os.path.basename(sys.argv[0]) + " [options] runScript cmpScript")
print(self.get_EnvDoc(self.config_keys[-1]))
def failure(self):
sys.exit(42)
def checkScriptPath(self,fpath):
if os.path.isfile(fpath) and os.access(fpath, os.X_OK):
return os.path.abspath(fpath)
else:
print("Invalid Cmd:"+str(sys.argv))
if os.path.isfile(fpath) and not os.access(fpath, os.X_OK):
print(fpath + " should be executable")
if not os.path.isfile(fpath):
print(fpath + " is not a file")
self.usageCmd()
self.failure()
def readOneOption(self,strOption, attribut,conv_type ,key_name, argv_name, acceptedValue, addAttributTab, parse):
value=False
if conv_type=="int":
value = int(strOption)
else:
value = strOption
if conv_type=="bool":
value=True
cmd="self."+attribut+"= copy.deepcopy(value)"
if addAttributTab:
cmd="self."+attribut+"+= [copy.deepcopy(value)]"
if acceptedValue==None :
exec(cmd)
return
elif acceptedValue=="rep_exists":
if os.path.isdir(value):
exec(cmd)
return
else:
if parse=="environ":
print("Error : "+ self.PREFIX+"_"+key_name+ " should be a directory")
else:
print("Error : "+ argv_name+" : " + strOption+" should be a directory")
self.failure()
else:
if value in acceptedValue or None in acceptedValue:
exec(cmd)
return
elif conv_type=="string/int":
try:
value=int(value)
exec(cmd)
return
except:
if parse=="environ":
print("Error : "+ self.PREFIX+"_"+key_name+ " should be in "+str(acceptedValue) +" or be a int value")
else:
print("Error : "+ argv_name+" : " + strOption+" should be in "+str(acceptedValue) +" or be a int value")
self.failure()
else:
if parse=="environ":
print("Error : "+ self.PREFIX+"_"+key_name+ " should be in "+str(acceptedValue))
else:
print("Error : "+ argv_name+" : " + strOption+" should be in "+str(acceptedValue))
self.failure()
def get_EnvDoc(self,PREFIX="INTERFLOP"):
doc="""List of env variables and options :\n"""
for registry in self.registryTab:
# (attribut, attributType, envVar, option, default, expectedValue, add)=registry
optionTab=registry["tabOption"]
if len(optionTab)==1:
optionStr=str(optionTab[0])
else:
optionStr=" or ".join(optionTab)
optionNameStr="%s or %s"%(PREFIX+"_"+registry["ENV"], optionStr)
expectedValue=registry["checkParam"]
expectedValueStr=""
if expectedValue!=None:
if str(type(expectedValue))=="<class 'list'>":
v=copy.deepcopy(expectedValue)
if None in v:
v.remove(None)
v+=["..."]
expectedValueStr="in "+str(v)
else:
expectedValueStr="in "+str(expectedValue)
attributType=registry["type"]
typeStr=""
if attributType== "int":
typeStr="int"
if attributType== "int/string":
typeStr="or int"
if attributType=="bool":
typeStr="set or not"
default=registry["default"]
defaultStr='(default "%s")'%(default)
if default==None:
defaultStr='(default none)'
if default==False:
defaultStr='(default not)'
if default==True:
defaultStr='(default set)'
if registry["docStr"]==None:
doc+="\t%s : %s %s %s \n"%(optionNameStr,expectedValueStr,typeStr, defaultStr)
else:
doc+="\t%s : %s\n"%(optionNameStr,registry["docStr"])
return doc
| 7,593 | 38.14433 | 176 | py |
verrou | verrou-master/pyTools/convNumLineTool.py | #!/usr/bin/python3
import sys
import difflib
class convNumLineTool:
"""convNumLineTool provides a way to update a delta line when the search space
computed by two delta-debug run (ie the two reference computations).
The class user has to provide the two search spaces and two functions (one to partition
the delta line in a bloc of data and a number of line and one to regenerate the line from the
bloc data and the number of line).
The basic idea of the algorithm :
for each bloc data, we apply a diff algorithm on the sequence of difference between successive num line.
Can be used only as heuristic.
"""
def __init__(self,deltasOrg, deltasNew, selectBlocAndNumLineFunctor, joinBlocAndNumLine):
self.selectBlocAndNumLineFunctor=selectBlocAndNumLineFunctor
self.joinBlocAndNumLine=joinBlocAndNumLine
self.pOrg=self._parseDeltas(deltasOrg, selectBlocAndNumLineFunctor)
self.pNew=self._parseDeltas(deltasNew, selectBlocAndNumLineFunctor)
self.cacheRes={}
def _parseDeltas(self,deltas,selectBlocAndNumLineFunctor):
"""Parse the search space define by fileName"""
res={}
for delta in deltas:
(bloc, numLine)=selectBlocAndNumLineFunctor(delta)
if bloc in res:
res[bloc]=res[bloc]+[numLine]
else:
res[bloc]=[numLine]
return res
def getNewLines(self, oldLine):
"""Function that convert the old line to the new one"""
oldBloc, oldLineNum=self.selectBlocAndNumLineFunctor(oldLine)
if not oldBloc in self.cacheRes:
convTab=self._convBloc(oldBloc)
dic={}
for (key,value) in convTab:
if key in dic:
dic[key]+=[value]
else:
dic[key]=[value]
self.cacheRes[oldBloc]= dic
convDic=self.cacheRes[oldBloc]
if oldLineNum not in convDic:
raise AssertionError("Internal error oldLineNum should be in blocDic")
newLineNumTab=convDic[oldLineNum]
return [self.joinBlocAndNumLine(oldBloc, newLineNum) for newLineNum in newLineNumTab]
def _convBloc(self,bloc):
if bloc not in self.pOrg:
raise AssertionError("Internal error : bloc should be in pOrg")
lineNumTabOrg=self.pOrg[bloc]
lineNumTabNew=[]
if bloc in self.pNew:
lineNumTabNew=self.pNew[bloc]
#The conversion to successive numline difference format requires a sorted tab
lineNumTabNewSorted=sorted(lineNumTabNew)
lineNumTabOrgSorted=sorted(lineNumTabOrg)
if len(lineNumTabNewSorted)==0:
return [(x,None) for x in lineNumTabOrgSorted]
if lineNumTabOrgSorted==lineNumTabNewSorted:
return [(x,x) for x in lineNumTabOrgSorted]
else:
diffTabNew=self._convertLineTabToLineDiffTab(lineNumTabNewSorted)
diffTabOrg=self._convertLineTabToLineDiffTab(lineNumTabOrgSorted)
s = difflib.SequenceMatcher(None, diffTabOrg, diffTabNew)
res=[]
accOrg=0
accNew=0
for tag, i1, i2, j1, j2 in s.get_opcodes():
minSize=min(i2-i1, j2-j1)
for i in range(minSize):
orgValue=accOrg+diffTabOrg[i1+i]
newValue=accNew+diffTabNew[j1+i]
accOrg=orgValue
accNew=newValue
res+=[(orgValue, newValue)]
for i in range(i1+minSize,i2):
orgValue=accOrg+diffTabOrg[i]
accOrg=orgValue
res+=[(orgValue, accNew)]
for i in range(j1+minSize,j2):
newValue=accNew+diffTabNew[j]
orgValue=accOrg
accNew=newValue
res+=[(orgValue, newValue)]
return res
def _convertLineTabToLineDiffTab(self,tab):
#Conversion to successive numline difference format
if len(tab)==0:
return []
return [tab[0]]+ [tab[i] -tab[i-1] for i in range(1, len(tab))]
if __name__=="__main__":
"""Use case exemple :
sys.argv[1] and sys.argv[2] are two files dd.line/ref/dd.line generated by verrou_dd_line
linesTest contains two lines included in sys.argv[1]
"""
def selectBlocAndNumLine(line):
[v1, v2,v3] =(line.strip()).split("\t")
return ((v1,v3),int(v2))
def joinBlocAndNumLine(bloc, numLine):
return bloc[0]+"\t"+str(numLine)+"\t"+bloc[1]
convTool= convNumLineTool((open(sys.argv[1])).readlines(),
(open(sys.argv[2])).readlines(),
selectBlocAndNumLine, joinBlocAndNumLine)
linesTest= ["rc32spa.F90\t425\trc32spa_", "rc32spa.F90\t458\trc32spa_"]
for lineOrg in linesTest:
linesNew=convTool.getNewLines(lineOrg)
print("lineOrg:", lineOrg)
print("linesNew:", linesNew)
print("")
| 5,044 | 36.649254 | 108 | py |
verrou | verrou-master/Interlibmath/testCos.py | #!/usr/bin/env python3
import math
import sys
import numpy
x=float(sys.argv[1])
y=numpy.nextafter(x,math.inf)
for i in range(4):
print("cos(x)-cos(x): ", math.cos(x)-math.cos(x))
print("cos(x)-cos(y): ", math.cos(x)-math.cos(y))
| 242 | 15.2 | 53 | py |
verrou | verrou-master/interflop_backends/interflop_verrou/gen_interflop_verrou_flop_impl.py | #!/usr/bin/python3
import sys
listOfUnaryOp=["sqrt"]
listOfBinaryOp=["add","sub","mul","div"]
listOfTernaryOp=["madd"]
listOfType=["double","float"]
protoTypeUnary="""
IFV_INLINE void IFV_FCTNAME(/OP/_/TYPE/) (/TYPE/ a, /TYPE/* res,void* context) {
typedef OpWithDynSelectedRoundingMode</OPCLASS/ </TYPE/> > Op;
Op::apply(Op::PackArgs(a),res,context);
}
"""
protoTypeBinary="""
IFV_INLINE void IFV_FCTNAME(/OP/_/TYPE/) (/TYPE/ a, /TYPE/ b, /TYPE/* res,void* context) {
typedef OpWithDynSelectedRoundingMode</OPCLASS/ </TYPE/> > Op;
Op::apply(Op::PackArgs(a,b),res,context);
}
"""
protoTypeTernary="""
IFV_INLINE void IFV_FCTNAME(/OP/_/TYPE/) (/TYPE/ a, /TYPE/ b, /TYPE/ c, /TYPE/* res,void* context) {
typedef OpWithDynSelectedRoundingMode</OPCLASS/ </TYPE/> > Op;
Op::apply(Op::PackArgs(a,b,c),res,context);
}
"""
protoTypeCast="""
IFV_INLINE void IFV_FCTNAME(/OP/_double_to_float) (double a, float* res, void* context){
typedef OpWithDynSelectedRoundingMode</OPCLASS/</TYPE/> > Op;
Op::apply(Op::PackArgs(a),res,context);
}
"""
post_treatmement_code="""
#ifndef VERROU_IGNORE_NANINF_CHECK
if (isNanInf(*res)) {
if(isNan(*res)){
vr_nanHandler();
}
if(isinf(*res)){
vr_infHandler();
}
}
#endif
"""
protoTypeUnaryStatic="""
IFV_INLINE void IFV_FCTNAME(/OP/_/TYPE/_/ROUNDING_NAME/) (/TYPE/ a, /TYPE/* res,void* context) {
typedef /ROUNDING_CLASS/</OPCLASS/ </TYPE/> /RANDCLASS/> Op;
*res=Op::apply(Op::PackArgs(a));
/POST_TREATMEMENT_CODE/;
}
"""
protoTypeBinaryStatic="""
IFV_INLINE void IFV_FCTNAME(/OP/_/TYPE/_/ROUNDING_NAME/) (/TYPE/ a, /TYPE/ b, /TYPE/* res,void* context) {
typedef /ROUNDING_CLASS/</OPCLASS/ </TYPE/> /RANDCLASS/> Op;
*res=Op::apply(Op::PackArgs(a,b));
/POST_TREATMEMENT_CODE/;
}
"""
protoTypeTernaryStatic="""
IFV_INLINE void IFV_FCTNAME(/OP/_/TYPE/_/ROUNDING_NAME/) (/TYPE/ a, /TYPE/ b, /TYPE/ c, /TYPE/* res,void* context) {
typedef /ROUNDING_CLASS/</OPCLASS/ </TYPE/> /RANDCLASS/> Op;
*res=Op::apply(Op::PackArgs(a,b,c));
/POST_TREATMEMENT_CODE/;
}
"""
protoTypeCastStatic="""
IFV_INLINE void IFV_FCTNAME(/OP/_double_to_float_/ROUNDING_NAME/) (double a, float* res, void* context){
typedef /ROUNDING_CLASS/</OPCLASS/</TYPE/> /RANDCLASS/> Op;
*res=Op::apply(Op::PackArgs(a));
/POST_TREATMEMENT_CODE/;
}
"""
header_interflop="""
void IFV_FCTNAME(add_double) (double a, double b, double* res, void* context);
void IFV_FCTNAME(add_float) (float a, float b, float* res, void* context);
void IFV_FCTNAME(sub_double) (double a, double b, double* res, void* context);
void IFV_FCTNAME(sub_float) (float a, float b, float* res, void* context);
void IFV_FCTNAME(mul_double) (double a, double b, double* res, void* context);
void IFV_FCTNAME(mul_float) (float a, float b, float* res, void* context);
void IFV_FCTNAME(div_double) (double a, double b, double* res, void* context);
void IFV_FCTNAME(div_float) (float a, float b, float* res, void* context);
void IFV_FCTNAME(sqrt_double) (double a, double* res, void* context);
void IFV_FCTNAME(sqrt_float) (float a, float* res, void* context);
void IFV_FCTNAME(cast_double_to_float) (double a, float* b, void* context);
void IFV_FCTNAME(madd_double)(double a, double b, double c, double* res, void* context);
void IFV_FCTNAME(madd_float) (float a, float b, float c, float* res, void* context);
"""
def getOpClass(op):
if op=="add":
return "AddOp"
if op=="sub":
return "SubOp"
if op=="mul":
return "MulOp"
if op=="div":
return "DivOp"
if op=="sqrt":
return "SqrtOp"
if op=="madd":
return "MAddOp"
if op=="cast":
return "CastOp"
return None
def getRoundingClass(rounding):
if rounding=="NEAREST":
return "RoundingNearest"
if rounding=="UPWARD":
return "RoundingUpward"
if rounding=="DOWNWARD":
return "RoundingDownward"
if rounding=="ZERO":
return "RoundingZero"
if rounding=="AWAY_ZERO":
return "RoundingAwayZero"
if rounding=="FARTHEST":
return "RoundingFarthest"
if rounding in ["RANDOM"+det for det in ["","_DET","_COMDET"] ]:
return "RoundingRandom"
if rounding in ["RANDOM"+det for det in ["_SCOMDET"] ]:
return "RoundingPRandom"
if rounding in ["AVERAGE"+det for det in ["_SCOMDET"] ]:
return "RoundingSAverage"
if rounding in ["AVERAGE"+det for det in ["","_DET","_COMDET"] ]:
return "RoundingAverage"
if rounding in ["PRANDOM"+det for det in ["","_DET","_COMDET"] ]:
return "RoundingPRandom"
if rounding in ["SR_MONOTONIC"]:
return "RoundingSRMonotonic"
return None
def getRandClass(rounding):
if rounding in ["NEAREST","UPWARD","DOWNWARD","ZERO", "AWAY_ZERO", "FARTHEST"]:
return None
if rounding in ["RANDOM","AVERAGE"]:
return "vr_rand_prng</OPCLASS/ </TYPE/> >"
if rounding in ["RANDOM_DET","AVERAGE_DET","SR_MONOTONIC" ]:
return "vr_rand_det</OPCLASS/ </TYPE/> >"
if rounding in ["RANDOM_COMDET","AVERAGE_COMDET"]:
return "vr_rand_comdet</OPCLASS/ </TYPE/> >"
if rounding in ["RANDOM_SCOMDET","AVERAGE_SCOMDET"]:
return "vr_rand_scomdet</OPCLASS/ </TYPE/> >"
if rounding == "PRANDOM":
return "vr_rand_p</OPCLASS/ </TYPE/>,vr_rand_prng>"
if rounding == "PRANDOM_DET":
return "vr_rand_p</OPCLASS/ </TYPE/>,vr_rand_det>"
if rounding == "PRANDOM_COMDET":
return "vr_rand_p</OPCLASS/ </TYPE/>,vr_rand_comdet>"
return None
def specializePatternDyn(template, op, typeFp):
res=template.replace("/OP/",op)
res=res.replace("/TYPE/",typeFp)
res=res.replace("/OPCLASS/", getOpClass(op))
return res
def specializePatternStatic(template, op, typeFp, roundingName,roundingClass, randClass):
if roundingClass==None:
return "#error \"in code generation\""
res=template
if randClass==None:
res=res.replace("/RANDCLASS/","")
else:
res=res.replace("/RANDCLASS/",","+randClass+" ")
res=res.replace("/ROUNDING_CLASS/", roundingClass)
res=res.replace("/OP/",op)
res=res.replace("/TYPE/",typeFp)
if op!="":
res=res.replace("/OPCLASS/", getOpClass(op))
res=res.replace("/ROUNDING_NAME/", roundingName)
res=res.replace("/POST_TREATMEMENT_CODE/",post_treatmement_code)
return res
def genFlopImpl(handler, roundingmode="dyn"):
for op in listOfUnaryOp:
for fpType in listOfType:
if roundingmode=="dyn":
handler.write(specializePatternDyn(protoTypeUnary, op, fpType)+"\n")
else:
handler.write(specializePatternStatic(protoTypeUnaryStatic, op, fpType, roundingmode, getRoundingClass(roundingmode), getRandClass(roundingmode))+"\n")
for op in listOfBinaryOp:
for fpType in listOfType:
if roundingmode=="dyn":
handler.write(specializePatternDyn(protoTypeBinary, op, fpType)+"\n")
else:
handler.write(specializePatternStatic(protoTypeBinaryStatic, op, fpType, roundingmode, getRoundingClass(roundingmode), getRandClass(roundingmode))+"\n")
for op in listOfTernaryOp:
for fpType in listOfType:
if roundingmode=="dyn":
handler.write(specializePatternDyn(protoTypeTernary, op, fpType)+"\n")
else:
handler.write(specializePatternStatic(protoTypeTernaryStatic, op, fpType, roundingmode, getRoundingClass(roundingmode),getRandClass(roundingmode))+"\n")
if roundingmode=="dyn":
handler.write(specializePatternDyn(protoTypeCast, "cast", "double,float") +"\n")
else:
handler.write(specializePatternStatic(protoTypeCastStatic, "cast", "double,float", roundingmode, getRoundingClass(roundingmode), getRandClass(roundingmode))+"\n")
if __name__=="__main__":
roundingTab =["NEAREST", "UPWARD", "DOWNWARD", "FARTHEST", "ZERO", "AWAY_ZERO"]
roundingTab+=[rnd + det for rnd in ["RANDOM", "AVERAGE"] for det in ["","_DET","_COMDET","_SCOMDET" ]]
roundingTab+=[rnd + det for rnd in ["PRANDOM"] for det in ["","_DET","_COMDET" ]]
roundingTab+=["SR_MONOTONIC"]
handler=open("interflop_verrou_flop_impl.hxx","w")
handler.write("// generated by : "+str(sys.argv)+"\n")
handler.write("#define IFV_INLINE\n")
genFlopImpl(handler, roundingmode="dyn")
for rounding in roundingTab:
genFlopImpl(handler, roundingmode=rounding)
handler.close()
handler=open("interflop_verrou_rounding.h","w")
handler.write("#undef IFV_FCTNAME\n")
for rounding in roundingTab:
handler.write("#define IFV_FCTNAME(FCT) interflop_verrou_##FCT##_"+rounding+"\n")
handler.write(header_interflop)
handler.write("#undef IFV_FCTNAME\n")
handler.write("#define IFV_FCTNAME(FCT) interflop_verrou_##FCT\n")
| 8,887 | 35.727273 | 170 | py |
verrou | verrou-master/interflop_backends/interflop_verrou/prng/genXoshiro.py | #!/usr/bin/python3
import os
import sys
#Script use to generate all xoshiro function with a specific name to be able to use different one in the same binary. If you accept modern C++ (>17) you should consider
# https://github.com/Reputeless/Xoshiro-cpp instead.
repOrgImpl="org"
def extractFun(fileName, name):
res=[]
numberOfAcc=None
for line in open(fileName,"r").readlines():
if name in line: #should implement regexp
numberOfAcc=0
if numberOfAcc!=None:
res+=[line]
numberOfAcc+=line.count("{")
numberOfAcc-=line.count("}")
if numberOfAcc==0:
return res
def replaceFunName(extractFun, oldName, newName):
return [x.replace(oldName, newName) for x in extractFun]
def addParamState(extractedFun, paramState, paramStateName="s"):
if "(void)" in extractedFun[0]:
return [extractedFun[0].replace("(void)", "("+paramState +"& "+paramStateName+")")] +extractedFun[1:]
if "()" in extractedFun[0]:
return [extractedFun[0].replace("()", "("+paramState +"& "+paramStateName+")")] +extractedFun[1:]
print("error : impossible to add param")
sys.exit(42)
def addHeader(extractedFun):
return [extractedFun[0].replace("{",";")] +extractedFun
def addInline(extractedFun):
return ["inline "+extractedFun[0]] +extractedFun[1:]
def writeFun(handler,extractedFun):
if handler==None:
for line in extractedFun:
print(line[0:-1])
else:
for line in extractedFun:
handler.write(line)
def genNextAndRotl(impl, size):
assert(size in [128,256])
assert(str(size) in impl)
pathName=os.path.join(repOrgImpl, impl+".c")
fun_next=extractFun(pathName, 'next')
fun_next=addParamState(fun_next , "xoshiro"+str(size)+"_state_t")
fun_next=replaceFunName(fun_next, "next", impl+"_next")
fun_next=replaceFunName(fun_next, "rotl", impl+"_rotl")
fun_next=addInline(fun_next);
fun_rotl=extractFun(pathName, 'rotl')
fun_rotl=replaceFunName(fun_rotl, "rotl", impl+"_rotl")
return fun_rotl +["\n"] +fun_next
if __name__=="__main__":
implemList128 =["xoshiro128plus","xoshiro128plusplus","xoshiro128starstar"]
implemList128+=["xoroshiro128plus","xoroshiro128plusplus","xoroshiro128starstar"]
implemList256 =["xoshiro256plus", "xoshiro256plusplus", "xoshiro256starstar"]
implMix="splitmix64"
res=["//generated by %s\n"%(str(sys.argv))]
res+=["//generated from %s\n"%(str([os.path.join(repOrgImpl, impl+".c") for impl in implemList128 + implemList256 ]+[implMix]))]
res+=["// cf. copyright\n"]
res+=["\n"]
res+=["#include<stdint.h>\n"]
res+=["#include<cfloat>\n"]
res+=["\n"]
res+=["typedef uint64_t xoshiro128_state_t[2];\n"]
res+=["typedef uint64_t xoshiro256_state_t[4];\n"]
res+=["\n"]
for impl in implemList128:
res+=genNextAndRotl(impl,128)
for impl in implemList256:
res+=genNextAndRotl(impl,256)
res+=["\n"]
pathName=os.path.join(repOrgImpl, implMix+".c")
fun_next=extractFun(pathName, "next");
fun_next=addParamState(fun_next , "uint64_t ", "x" );
fun_next=replaceFunName(fun_next, "next", implMix+"_next")
fun_next=addInline(fun_next);
res+=fun_next
init_code="""
inline void init_xoshiro256_state(xoshiro256_state_t& state, uint64_t seed){
uint64_t splitMixState=seed;
state[0]= splitmix64_next(splitMixState);
state[1]= splitmix64_next(splitMixState);
state[2]= splitmix64_next(splitMixState);
state[3]= splitmix64_next(splitMixState);
}
inline void init_xoshiro128_state(xoshiro128_state_t& state,uint64_t seed){
uint64_t splitMixState=seed;
state[0]= splitmix64_next(splitMixState);
state[1]= splitmix64_next(splitMixState);
}
"""
res+=[line+"\n" for line in init_code.split("\n")]
convFloatCode="""
inline float xoshiro_uint32_to_float(const uint32_t i){
constexpr float factor(0.5*FLT_EPSILON);//0x1.0p-24
return (i >> 8) * factor;
};
inline double xoshiro_uint64_to_double(const uint64_t i){
constexpr double factor(0.5*DBL_EPSILON);//0x1.0p-53
return (i >> 11) * factor;
};
"""
res+=[line+"\n" for line in convFloatCode.split("\n")]
writeFun(open("xoshiro.cxx","w"), res)
| 4,265 | 30.367647 | 168 | py |
verrou | verrou-master/check_perf_tools/gen_stat.py | #!/usr/bin/python3
import os
import re
import sys
import subprocess
import math
from tabular import *
detRounding=["random_det","average_det", "random_comdet","average_comdet", "random_scomdet","average_scomdet", "sr_monotonic"]
roundingListNum=["random", "average", "nearest", "upward", "downward"]
buildConfList=[ "current","dietzfelbinger","multiply_shift","double_tabulation", "xxhash","mersenne_twister"]
#buildConfList=["double_tabulation"]#,"mersenne_twister"]
buildConfListXoshiro=[]#"xoshiro","xoshiro-2","xoshiro-8"]
pathNumBin="../unitTest/checkStatRounding"
runNum="run.sh"
extractNum="extract.py"
numEnvConfigTab=[{"ALGO":algo, "ALGO_TYPE":realtype} for realtype in ["double", "float"] for algo in ["Seq", "Rec"]]
def runCmd(cmd):
subprocess.call(cmd, shell=True)
def runCmdToLines(cmd):
process=subprocess.run(cmd, shell=True,capture_output=True, text=True)
return process.stdout.split("\n")
def parsePlotStat(lineTab):
toParse=False
res={}
for line in lineTab:
if line.startswith("[mca] estimator"):
continue
if line.startswith("[mca] all:"):
toParse=True
if toParse:
if not line.startswith("[mca]"):
spline=line.strip().split("\t")
rounding=spline[0][0:-1]
absEst=spline[1]
relEst=spline[2]
bit=spline[3].replace("bit","")
res[rounding]={"absEst":absEst, "relEst": relEst , "bit":bit }
if rounding=="all":
toParse=False
else:
line=line.replace("[mca] ","")
spline=line.strip().split("\t")
rounding=spline[0][0:-1]
relEst=spline[1]
bit=spline[2].replace("bit","")
res[rounding]["mca"]=relEst
res[rounding]["mca_bit"]=bit
if rounding=="all":
toParse=False
if line.startswith("refValue[nearest]"):
res["nearest"]=(line.split(":")[1]).strip()
if line.startswith("estimator"):
toParse=True
return res
def extractStat():
res={}
for name in buildConfList+buildConfListXoshiro:
resName={}
repNum="buildRep-%s/num"%(name)
roundingTab=roundingListNum + detRounding
roundingStr=",".join(roundingTab)
for envConfig in numEnvConfigTab:
envStr=""
concatStr=""
for key in envConfig:
envStr+= " "+key+"="+envConfig[key]
concatStr+=envConfig[key]
cmd="verrou_plot_stat --rep=%s --seed=42 --rounding-list=%s --no-plot --mca-estimator %s %s "%( repNum, roundingStr, pathNumBin+"/"+runNum, pathNumBin+"/"+extractNum)
print(envStr, cmd)
lineTab=runCmdToLines(". ./buildRep-%s/install/env.sh ; %s %s "%(name,envStr,cmd))
resName[concatStr]=parsePlotStat(lineTab)
res[name]=resName
return res
def checkCoherence(stat):
for code in ["Seqdouble", "Seqfloat", "Recdouble", "Recfloat"]:
for rounding in ["random","average","all"]:
try:
resTab =[stat[conf][code][rounding]["bit"] for conf in buildConfList]
except:
print("debug error stat")
for conf in buildConfList:
print("stat["+conf+"]",stat[conf])
if len(set(resTab))>1:
bitTab=[float(x) for x in set(resTab)]
maxError=max([abs(x-bitTab[0])/ bitTab[0] for x in bitTab ])
if maxError > 0.01:
print(code + " "+rounding+ " " +str(resTab))
print("maxError:",maxError)
return False
resTab =[stat[conf][code]["nearest"] for conf in buildConfList]
if len(set(resTab))>1:
print("resTab Neaerest", resTab)
return False
return True
def feedTab(stat, rndList=["random","average","sr_monotonic" ] ,detTab=["_det","_comdet"], extraRounding=[], ref=None, precisionVar="bit", buildConfList=buildConfList):
refName="current"
codeTab=["Seqfloat","Seqdouble", "Recfloat","Recdouble"]
codeTabName=[x.replace("float","<float>").replace("double","<double>")for x in codeTab]
tab.begin()
tab.lineMultiple([(1,""), (2,"Seq"),(2,"Rec")])
tab.endLine()
tab.line(["","float","double", "float","double"])
tab.endLine()
tab.lineSep()
tab.line(["error(nearest)"]+ [ "%.2f"%( -math.log2(abs(float(stat[refName][code]["nearest"])-float(ref)) / float(ref))) for code in codeTab ])
tab.endLine()
roundingTab=[("all", "all", "current"),"SEPARATOR"]
for rd in rndList:
if rd!= "sr_monotonic":
roundingTab+=[(rd, rd,refName)]
if rd=="average":
for gen in buildConfListXoshiro:
roundingTab+=[(rd+ "("+gen+")" ,rd ,gen )]
if rd=="random":
for gen in buildConfListXoshiro:
roundingTab+=[(rd+ "("+gen+")" ,rd ,gen )]
for gen in buildConfList:
if gen=="current":
continue
if rd in ["random","average"]:
for detType in detTab:
roundingTab+=[(rd+detType+"("+gen+")",rd+detType,gen)]
else:
roundingTab+=[(rd+"("+gen+")",rd,gen)]
roundingTab+=["SEPARATOR"]
for confLine in roundingTab[0:-1]:
if confLine=="SEPARATOR":
tab.lineSep()
continue
head= [confLine[0]]
content=[stat[confLine[2]][code][confLine[1]][precisionVar] for code in codeTab ]
tab.line(head+content)
tab.endLine()
tab.end()
def plotNumConfig():
histRep="histPng"
if not os.path.exists(histRep):
os.mkdir(histRep)
for name in buildConfList:
repNum="buildRep-%s/num"%(name)
if not os.path.exists(repNum):
os.mkdir(repNum)
roundingTab=detRounding+ roundingListNum
roundingStr=",".join(roundingTab)
for envConfig in numEnvConfigTab:
envStr=""
pngStr=os.path.join(histRep,name+"-")
for key in envConfig:
envStr+= " "+key+"="+envConfig[key]
pngStr+=envConfig[key]
cmd="verrou_plot_stat --rep=%s --num-threads=5 --seed=42 --relative=104857.6 --rounding-list=%s --png=%s.png %s %s "%( repNum, roundingStr, pngStr, pathNumBin+"/"+runNum, pathNumBin+"/"+extractNum)
print(envStr, cmd)
if name!="local":
runCmd(". ./buildRep-%s/install/env.sh ; %s %s "%(name,envStr,cmd))
else:
runCmd("%s %s "%(envStr,cmd))
if __name__=="__main__":
# plotNumConfig()
statRes=extractStat()
if checkCoherence(statRes):
print("checkCoherence OK")
else:
print("checkCoherence FAILURE")
tab=tabularLatex("lcccc", output="tabDet.tex")
feedTab(statRes,rndList=["random","average"],detTab=["_det"], ref=2**20*0.1)
tab=tabularLatex("lcccc", output="tabComDet.tex")
feedTab(statRes,rndList=["random","average"],detTab=["_comdet"], ref=2**20*0.1)
tab=tabularLatex("lcccc", output="tabScomDet.tex")
feedTab(statRes,rndList=["random","average"],detTab=["_scomdet"], ref=2**20*0.1)
tab=tabularLatex("lcccc", output="tabMono.tex")
feedTab(statRes,rndList=["average","sr_monotonic"],detTab=["_scomdet"], ref=2**20*0.1)
tab=tabularLatex("lcccc", output="tabMCA.tex")
feedTab(statRes,rndList=["random","average","sr_monotonic"],detTab=["_det","_scomdet"], ref=2**20*0.1, precisionVar="mca_bit", buildConfList=[ "current","double_tabulation", "xxhash","mersenne_twister"])
cmd="ALGO=Rec ALGO_TYPE=float verrou_plot_stat --rep=buildRep-mersenne_twister/num --seed=42 --relative=104857.6 --rounding-list=random,average,nearest,upward,downward,random_det,average_det --png=Recfloatmersenne_twisterDet.png ../unitTest/checkStatRounding/run.sh ../unitTest/checkStatRounding/extract.py"
print(cmd)
runCmd(cmd)
cmd="ALGO=Seq ALGO_TYPE=float verrou_plot_stat --nb-bin=200 --rep=buildRep-mersenne_twister/num --seed=42 --relative=104857.6 --rounding-list=average,random,random_det,average_det --png=SeqFloatmersenne_twisterDetZoom.png ../unitTest/checkStatRounding/run.sh ../unitTest/checkStatRounding/extract.py"
print(cmd)
runCmd(cmd)
cmd="ALGO=Seq ALGO_TYPE=float verrou_plot_stat --rep=buildRep-mersenne_twister/num --seed=42 --relative=104857.6 --rounding-list=average,random,random_det,average_det,nearest,downward,upward --png=SeqFloatmersenne_twisterDet.png ../unitTest/checkStatRounding/run.sh ../unitTest/checkStatRounding/extract.py"
print(cmd)
runCmd(cmd)
| 8,753 | 37.734513 | 313 | py |
verrou | verrou-master/check_perf_tools/tabular.py |
class tabular:
def __init__(self):
self.currentStr=""
def begin(self):
pass
def end(self):
pass
def lineSep(self):
print("")
def endLine(self):
print(self.currentStr)
self.currentStr=""
def line(self,tab):
self.currentStr+=("\t".join(tab))
def lineMultiple(self, tab):
lineTab=[]
for (nb, value) in tab:
for i in range(nb):
lineTab+=[value]
self.currentStr+= ("\t".join(lineTab))
class tabularLatex:
def __init__(self,keyStr="c", output=None):
self.currentStr=""
self.keyStr=keyStr
self.output=output
def begin(self):
self.currentStr+="\\begin{tabular}{%s}\\toprule\n"%self.keyStr
def end(self):
self.currentStr+="\\bottomrule\n"
self.currentStr+="\end{tabular}\n"
if self.output==None:
print(self.currentStr)
else:
handler=open(self.output,"w")
handler.write(self.currentStr)
def lineSep(self):
self.currentStr+="\\midrule\n"
def endLine(self):
self.currentStr+="\\\\\n"
def line(self,tab):
lineStr=("\t&\t".join(tab))
lineStr=lineStr.replace("_","\_")
self.currentStr+=lineStr
def lineMultiple(self, tab):
lineStr=""
lineTab=[]
for (nb, value) in tab:
if nb>1:
lineTab+=["\multicolumn{%s}{c}{%s}"%(str(nb), value.replace("_","\_")) ]
if nb==1:
lineTab+=[value.replace("_","\_")]
lineStr+= ("\t&\t".join(lineTab))
self.currentStr+=lineStr
| 1,646 | 26.45 | 88 | py |
verrou | verrou-master/check_perf_tools/gen_build.py | #!/usr/bin/python3
import os
import re
import sys
import subprocess
gitRepositoty="origin/"
gitRepositoty=""
branch="master"
valgrind_version="valgrind-3.21.0"
verrouConfigList={
"stable": { "tag":"v2.4.0" ,"flags":"--enable-verrou-fma"},
"current": { "valgrind":valgrind_version, "branch_verrou":branch ,"flags":""},
"current_fast": { "valgrind":valgrind_version, "branch_verrou":branch ,"flags":"--enable-verrou-check-naninf=no --with-verrou-denorm-hack=none"},
"dietzfelbinger": { "valgrind":valgrind_version, "branch_verrou":branch ,"flags":"--with-verrou-det-hash=dietzfelbinger --enable-verrou-check-naninf=no --with-verrou-denorm-hack=none"},
"multiply_shift": { "valgrind":valgrind_version, "branch_verrou":branch ,"flags":"--with-verrou-det-hash=multiply_shift --enable-verrou-check-naninf=no --with-verrou-denorm-hack=none"},
"double_tabulation":{ "valgrind":valgrind_version, "branch_verrou":branch ,"flags":"--with-verrou-det-hash=double_tabulation --enable-verrou-check-naninf=no --with-verrou-denorm-hack=none"},
"mersenne_twister": { "valgrind":valgrind_version, "branch_verrou":branch ,"flags":"--with-verrou-det-hash=mersenne_twister --enable-verrou-check-naninf=no --with-verrou-denorm-hack=none"},
"xxhash": { "valgrind":valgrind_version, "branch_verrou":branch ,"flags":"--with-verrou-det-hash=xxhash --enable-verrou-check-naninf=no --with-verrou-denorm-hack=none"},
#}
#verrouConfigList={
# "current": { "valgrind":"valgrind-3.20.0", "branch_verrou":"master" ,"flags":""},
# "current-upgrade": { "valgrind":"valgrind-3.21.0", "branch_verrou":"bl/val3.21" ,"flags":""},
}
valgrindConfigList={
"valgrind-3.17.0": {"file": "valgrind-3.17.0.tar.bz2", "url":"https://sourceware.org/pub/valgrind/valgrind-3.17.0.tar.bz2"},
"valgrind-3.19.0": {"file": "valgrind-3.19.0.tar.bz2", "url":"https://sourceware.org/pub/valgrind/valgrind-3.19.0.tar.bz2"},
"valgrind-3.20.0": {"file": "valgrind-3.20.0.tar.bz2", "url":"https://sourceware.org/pub/valgrind/valgrind-3.20.0.tar.bz2"},
"valgrind-3.21.0": {"file": "valgrind-3.21.0.tar.bz2", "url":"https://sourceware.org/pub/valgrind/valgrind-3.21.0.tar.bz2"},
"v2.3.1": {"file": "v2.3.1.tar.gz","url":"https://github.com/edf-hpc/verrou/releases/download/v2.3.1/valgrind-3.17.0_verrou-2.3.1.tar.gz"},
"v2.4.0": {"file": "v2.4.0.tar.gz","url":"https://github.com/edf-hpc/verrou/releases/download/v2.4.0/valgrind-3.20.0_verrou-2.4.0.tar.gz"},
}
def runCmd(cmd):
subprocess.call(cmd, shell=True)
def buildConfig(name):
buildRep="buildRep-"+name
if name=="local":
if not os.path.exists(buildRep):
os.mkdir(buildRep)
return
verrouConfigParam=verrouConfigList[name]
valgrindKey=None
if "valgrind" in verrouConfigParam:
valgrindKey=verrouConfigParam["valgrind"]
if "tag" in verrouConfigParam:
valgrindKey=verrouConfigParam["tag"]
if valgrindKey==None:
print("Error valgrind key needed")
sys.exit(42)
valgrindArchive=valgrindConfigList[valgrindKey]["file"]
if not os.path.exists(valgrindArchive):
valgrindUrl=valgrindConfigList[valgrindKey]["url"]
runCmd("wget --output-document=%s %s"%(valgrindArchive,valgrindUrl))
if not os.path.exists(buildRep):
if "valgrind" in verrouConfigParam:
branch=verrouConfigParam["branch_verrou"]
if "gitRepositoty" in verrouConfigParam:
branch=verrouConfigParam["gitRepositoty"]+branch
else:
branch=gitRepositoty+branch
runCmd("./buildConfig.sh %s %s %s \"%s\""%(
buildRep,
valgrindConfigList[verrouConfigParam["valgrind"]]["file"],
branch,
verrouConfigParam["flags"])
)
if "tag" in verrouConfigParam:
runCmd("./buildTag.sh %s %s \"%s\""%(
buildRep,
valgrindConfigList[verrouConfigParam["tag"]]["file"],
verrouConfigParam["flags"])
)
if __name__=="__main__":
for name in verrouConfigList:
buildConfig(name)
| 4,201 | 45.175824 | 195 | py |
verrou | verrou-master/check_perf_tools/gen_perf.py | #!/usr/bin/python3
import os
import re
import sys
import subprocess
from tabular import *
roundingListPerf=["random", "average","nearest"]
detRounding=["random_det","average_det", "random_comdet","average_comdet","random_scomdet","average_scomdet", "sr_monotonic"]
buildConfigList=["stable","current", "current_fast"]
buildSpecialConfigList=["dietzfelbinger", "multiply_shift","double_tabulation", "xxhash","mersenne_twister"]
nbRunTuple=(5,5) #inner outer
ref_name="current_fast"
slowDown=True
# buildConfigList=["current", "current-upgrade"]
# ref_name="current"
# buildSpecialConfigList=[]
# detRounding=[]
# nbRunTuple=(5,20) #inner outer
# slowDown=False
verrouOptionsList=[("","")]
postFixTab=["O0-DOUBLE-FMA", "O3-DOUBLE-FMA", "O0-FLOAT-FMA", "O3-FLOAT-FMA"]
#postFixTab=["O3-DOUBLE-FMA"]
pathPerfBin="../unitTest/testPerf"
perfBinNameList=["stencil-"+i for i in postFixTab]
#perfBinNameList=["stencil-"+i for i in ["O3-DOUBLE"] ]
perfCmdParam= "--scale=1 "+str(nbRunTuple[0])
def get_rounding_tab(name):
if name in ["current","current_fast", "current-upgrade"]:
return roundingListPerf+detRounding
if name in buildConfigList:
return roundingListPerf
if name in buildSpecialConfigList:
return detRounding
def runCmd(cmd):
subprocess.call(cmd, shell=True)
def runPerfConfig(name):
repMeasure="buildRep-%s/measure"%(name)
print("working in %s"%(repMeasure))
if not os.path.exists(repMeasure):
os.mkdir(repMeasure)
for binName in perfBinNameList:
for (optName, opt) in verrouOptionsList:
roundingTab=get_rounding_tab(name)
for rounding in roundingTab:
cmd="valgrind --tool=verrou --rounding-mode=%s %s %s %s "%(rounding, optName, pathPerfBin+"/"+binName,perfCmdParam)
toPrint=True
for i in range(nbRunTuple[1]):
outputName="buildRep-%s/measure/%s_%s_%s.%i"%(name, binName, optName, rounding, i)
if not os.path.exists(outputName):
if toPrint:
print(cmd)
toPrint=False
if name!="local":
runCmd(". ./buildRep-%s/install/env.sh ; %s > %s 2> %s"%(name,cmd,outputName, outputName+".err"))
else:
runCmd("%s > %s 2> %s"%(cmd,outputName, outputName+".err"))
def runPerfRef():
repMeasure="measureRef"
if not os.path.exists(repMeasure):
os.mkdir(repMeasure)
for binName in perfBinNameList:
cmd="%s %s "%(pathPerfBin+"/"+binName,perfCmdParam)
toPrint=True
for i in range(nbRunTuple[1]):
outputName="measureRef/%s.%i"%(binName, i)
if not os.path.exists(outputName):
if toPrint:
print(cmd)
toPrint=False
runCmd("%s > %s 2> %s"%(cmd,outputName, outputName+".err"))
timeRegExp = re.compile("@time of serial run:\s*\[(.+)\] secondes\s*")
minTimeRegExp = re.compile("@mintime of serial run:\s*\[(.+)\] secondes\s*")
def extractPerfMeasure(fileName):
resTab=[]
resMin=None
for line in open(fileName).readlines():
m=timeRegExp.match(line)
if m!=None:
t=m.group(1)
resTab+=[float(t)]
continue
m=minTimeRegExp.match(line)
if m!=None:
t=m.group(1)
resMin=float(t)
continue
if resMin==None:
print("No timer in file %s "%(fileName))
return {"min": resMin ,"tab":resTab}
def joinMeasure(m1,m2):
return {"min": min(m1["min"],m2["min"]), "tab": m1["tab"] + m2["tab"] }
def extractPerf(name):
res={}
for binName in perfBinNameList:
res[binName]={}
for (optName, opt) in verrouOptionsList:
res[binName][optName]={}
for rounding in get_rounding_tab(name):
resPerf=None
for i in range(nbRunTuple[1]):
outputName="buildRep-%s/measure/%s_%s_%s.%i"%(name, binName, optName, rounding, i)
if resPerf==None:
resPerf=extractPerfMeasure(outputName)
else:
resPerf=joinMeasure(resPerf,extractPerfMeasure(outputName))
res[binName][optName][rounding]=resPerf
return res
def extractPerfRef():
res={}
for binName in perfBinNameList:
res[binName]={}
resPerf=None
for i in range(nbRunTuple[1]):
outputName="measureRef/%s.%i"%( binName, i)
if resPerf==None:
resPerf=extractPerfMeasure(outputName)
else:
resPerf=joinMeasure(resPerf,extractPerfMeasure(outputName))
res[binName]=resPerf
return res
def nonPerfRegressionAnalyze(data, refName, refOption=""):
newVersionTab=[x for x in data.keys() if not x==refName]
dataRef=data[refName]
print("reference verrou version : %s"%(refName))
for newVersion in newVersionTab:
print("verrou version : %s"%(newVersion))
for (optionStr, optionVal) in verrouOptionsList:
print("\truntime verrou option : ", optionStr)
dataNew=data[newVersion]
roundingTab=get_rounding_tab(newVersion)# roundingListPerf +list(set(special_rounding(refName)).intersection(set(special_rounding(newVersion))))
for rounding in roundingTab:
print("\t\trounding : %s "%(rounding))
for binName in perfBinNameList:
minTimeRef=dataRef[binName][refOption][rounding]["min"]
minTimeNew=dataNew[binName][optionStr][rounding]["min"]
print("\t\t\t%s ratio: %.4f "%(binName, minTimeNew/minTimeRef))
def slowDownAnalyze(data):
versionTab=[x for x in data.keys()]
refData=extractPerfRef()
for version in versionTab:
print("verrou version : %s"%(version))
for (optionStr, optionVal) in verrouOptionsList:
print("\t runtime verrou option : ", optionStr)
dataNew=data[version]
roundingTab= get_rounding_tab(name) #roundingListPerf + special_rounding(version)
for rounding in roundingTab:
print("\t\trounding : %s "%(rounding))
for binName in perfBinNameList:
minTimeNew=dataNew[binName][optionStr][rounding]["min"]
refTime=refData[binName]["min"]
print("\t\t\t%s slowDown: x%.1f "%(binName, minTimeNew/refTime))
def feedPerfTab(data, buildList, detTab=["_det","_comdet"], extraRounding=[], optionStr=""):
# codeTabName=[x.replace("FLOAT","float").replace("DOUBLE","double")for x in postFixTab]
tab.begin()
if len(postFixTab)==4:
tab.lineMultiple([(1,"type"), (2,"double"),(2,"float") ])
if len(postFixTab)==1:
tab.lineMultiple([(1,"type"), (1,"double")])
tab.endLine()
if len(postFixTab)==4:
tab.line(["compilation option", "O0", "O3","O0", "O3"])
if len(postFixTab)==1:
tab.line(["compilation option", "O3"])
tab.endLine()
tab.lineSep()
roundingTab=[("nearest", "nearest", "current"),"SEPARATOR"]
for rd in ["random","average"]:
roundingTab+=[(rd, rd,"current")]
for gen in buildList:#on supprime master
for detType in detTab:
roundingTab+=[(rd+detType+"("+gen+")",rd+detType,gen)]
roundingTab+=["SEPARATOR"]
roundingTab=roundingTab[0:-1]
if extraRounding != []:
roundingTab+=["SEPARATOR"]
for rd in extraRounding:
for gen in buildList:#on supprime master
roundingTab+=[(rd+"("+gen+")",rd,gen)]
refData=extractPerfRef()
for confLine in roundingTab:
if confLine=="SEPARATOR":
tab.lineSep()
continue
head= [confLine[0]]
def content(post, rounding, configure):
binName="stencil-"+post
minTimeNew=data[configure][binName][optionStr][rounding]["min"]
refTime=refData[binName]["min"]
slowDown="x%.1f "%(minTimeNew/refTime)
return slowDown
contentTab=[ content(post,confLine[1],confLine[2]) for post in postFixTab ]
tab.line(head+contentTab)
tab.endLine()
tab.end()
if __name__=="__main__":
runCmd("make -C ../unitTest/testPerf/")
for name in buildConfigList+buildSpecialConfigList:
runPerfConfig(name)
if slowDown:
runPerfRef()
resAll={}
for name in buildConfigList+buildSpecialConfigList:
resAll[name]=extractPerf(name)
print(resAll)
print("ref_name:",ref_name)
nonPerfRegressionAnalyze(resAll, ref_name)
print("")
if slowDown:
tab=tabularLatex("lcccc", output="slowDown_det.tex")
feedPerfTab(resAll,buildSpecialConfigList, detTab=["_det"])
tab=tabularLatex("lcccc", output="slowDown_comdet.tex")
feedPerfTab(resAll,buildSpecialConfigList, detTab=["_comdet"])
tab=tabularLatex("lcccc", output="slowDown_scomdet.tex")
feedPerfTab(resAll,buildSpecialConfigList, detTab=["_scomdet"])
# tab=tabularLatex("lcccc", output="slowDown_doubleTab.tex")
# feedPerfTab(resAll,["double_tabulation"], detTab=["_det","_comdet","_scomdet"])
tab=tabularLatex("lcccc", output="slowDown_xxhash.tex")
feedPerfTab(resAll,["xxhash"], detTab=["_det","_comdet","_scomdet"], extraRounding=["sr_monotonic"])
sys.exit()
tab=tabular()
feedPerfTab(resAll,buildSpecialConfigList, detTab=["_det","_comdet"])
| 9,679 | 34.2 | 157 | py |
verrou | verrou-master/synchroLib/trace_verrou_synchro.py | #! /usr/bin/python3.5
# portions copyright 2001, Autonomous Zones Industries, Inc., all rights...
# err... reserved and offered to the public under the terms of the
# Python 2.2 license.
# Author: Zooko O'Whielacronx
# http://zooko.com/
# mailto:zooko@zooko.com
#
# Copyright 2000, Mojam Media, Inc., all rights reserved.
# Author: Skip Montanaro
#
# Copyright 1999, Bioreason, Inc., all rights reserved.
# Author: Andrew Dalke
#
# Copyright 1995-1997, Automatrix, Inc., all rights reserved.
# Author: Skip Montanaro
#
# Copyright 1991-1995, Stichting Mathematisch Centrum, all rights reserved.
#
#
# Permission to use, copy, modify, and distribute this Python software and
# its associated documentation for any purpose without fee is hereby
# granted, provided that the above copyright notice appears in all copies,
# and that both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of neither Automatrix,
# Bioreason or Mojam Media be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior permission.
#
"""program/module to trace Python program or function execution
Sample use, command line:
trace.py -c -f counts --ignore-dir '$prefix' spam.py eggs
trace.py -t --ignore-dir '$prefix' spam.py eggs
trace.py --trackcalls spam.py eggs
Sample use, programmatically
import sys
# create a Trace object, telling it what to ignore, and whether to
# do tracing or line-counting or both.
tracer = trace.Trace(ignoredirs=[sys.base_prefix, sys.base_exec_prefix,],
trace=0, count=1)
# run the new command using the given tracer
tracer.run('main()')
# make a report, placing output in /tmp
r = tracer.results()
r.write_results(show_missing=True, coverdir="/tmp")
"""
__all__ = ['Trace', 'CoverageResults']
import linecache
import os
import re
import sys
import token
import tokenize
import inspect
import gc
import dis
import pickle
from warnings import warn as _warn
from time import monotonic as _time
from verrouPyBinding import bindingSynchroLib
try:
import threading
except ImportError:
_settrace = sys.settrace
def _unsettrace():
sys.settrace(None)
else:
def _settrace(func):
threading.settrace(func)
sys.settrace(func)
def _unsettrace():
sys.settrace(None)
threading.settrace(None)
def _usage(outfile):
outfile.write("""Usage: %s [OPTIONS] <file> [ARGS]
Meta-options:
--help Display this help then exit.
--version Output version information then exit.
Otherwise, exactly one of the following three options must be given:
-t, --trace Print each line to sys.stdout before it is executed.
-c, --count Count the number of times each line is executed
and write the counts to <module>.cover for each
module executed, in the module's directory.
See also `--coverdir', `--file', `--no-report' below.
-l, --listfuncs Keep track of which functions are executed at least
once and write the results to sys.stdout after the
program exits.
-T, --trackcalls Keep track of caller/called pairs and write the
results to sys.stdout after the program exits.
-r, --report Generate a report from a counts file; do not execute
any code. `--file' must specify the results file to
read, which must have been created in a previous run
with `--count --file=FILE'.
Modifiers:
-f, --file=<file> File to accumulate counts over several runs.
-R, --no-report Do not generate the coverage report files.
Useful if you want to accumulate over several runs.
-C, --coverdir=<dir> Directory where the report files. The coverage
report for <package>.<module> is written to file
<dir>/<package>/<module>.cover.
-m, --missing Annotate executable lines that were not executed
with '>>>>>> '.
-s, --summary Write a brief summary on stdout for each file.
(Can only be used with --count or --report.)
-g, --timing Prefix each line with the time since the program started.
Only used while tracing.
Filters, may be repeated multiple times:
--ignore-module=<mod> Ignore the given module(s) and its submodules
(if it is a package). Accepts comma separated
list of module names
--ignore-dir=<dir> Ignore files in the given directory (multiple
directories can be joined by os.pathsep).
""" % sys.argv[0])
PRAGMA_NOCOVER = "#pragma NO COVER"
# Simple rx to find lines with no code.
rx_blank = re.compile(r'^\s*(#.*)?$')
class _Ignore:
def __init__(self, modules=None, dirs=None):
self._mods = set() if not modules else set(modules)
self._dirs = [] if not dirs else [os.path.normpath(d)
for d in dirs]
self._ignore = { '<string>': 1 }
def names(self, filename, modulename):
if modulename in self._ignore:
return self._ignore[modulename]
# haven't seen this one before, so see if the module name is
# on the ignore list.
if modulename in self._mods: # Identical names, so ignore
self._ignore[modulename] = 1
return 1
# check if the module is a proper submodule of something on
# the ignore list
for mod in self._mods:
# Need to take some care since ignoring
# "cmp" mustn't mean ignoring "cmpcache" but ignoring
# "Spam" must also mean ignoring "Spam.Eggs".
if modulename.startswith(mod + '.'):
self._ignore[modulename] = 1
return 1
# Now check that filename isn't in one of the directories
if filename is None:
# must be a built-in, so we must ignore
self._ignore[modulename] = 1
return 1
# Ignore a file when it contains one of the ignorable paths
for d in self._dirs:
# The '+ os.sep' is to ensure that d is a parent directory,
# as compared to cases like:
# d = "/usr/local"
# filename = "/usr/local.py"
# or
# d = "/usr/local.py"
# filename = "/usr/local.py"
if filename.startswith(d + os.sep):
self._ignore[modulename] = 1
return 1
# Tried the different ways, so we don't ignore this module
self._ignore[modulename] = 0
return 0
def _modname(path):
"""Return a plausible module name for the patch."""
base = os.path.basename(path)
filename, ext = os.path.splitext(base)
return filename
def _fullmodname(path):
"""Return a plausible module name for the path."""
# If the file 'path' is part of a package, then the filename isn't
# enough to uniquely identify it. Try to do the right thing by
# looking in sys.path for the longest matching prefix. We'll
# assume that the rest is the package name.
comparepath = os.path.normcase(path)
longest = ""
for dir in sys.path:
dir = os.path.normcase(dir)
if comparepath.startswith(dir) and comparepath[len(dir)] == os.sep:
if len(dir) > len(longest):
longest = dir
if longest:
base = path[len(longest) + 1:]
else:
base = path
# the drive letter is never part of the module name
drive, base = os.path.splitdrive(base)
base = base.replace(os.sep, ".")
if os.altsep:
base = base.replace(os.altsep, ".")
filename, ext = os.path.splitext(base)
return filename.lstrip(".")
class synchro_lib:
def __init__(self):
print("Debug trace_verrou_init")
print("os.environ: ", os.environ)
# self.lib=bindingSynchroLib("./verrouSynchroLib.so")
self.lib=bindingSynchroLib()
self.lib.verrou_synchro_init()
def finalyze(self):
print("Debug trace_verrou_finalize")
self.lib.verrou_synchro_finalyze()
def synchro(self,bname,lineno):
print("Debug trace_verrou_synchro %s %i"%(bname,lineno))
self.lib.verrou_synchro(bname,lineno)
class CoverageResults:
def __init__(self, counts=None, calledfuncs=None, infile=None,
callers=None, outfile=None):
self.counts = counts
if self.counts is None:
self.counts = {}
self.counter = self.counts.copy() # map (filename, lineno) to count
self.calledfuncs = calledfuncs
if self.calledfuncs is None:
self.calledfuncs = {}
self.calledfuncs = self.calledfuncs.copy()
self.callers = callers
if self.callers is None:
self.callers = {}
self.callers = self.callers.copy()
self.infile = infile
self.outfile = outfile
if self.infile:
# Try to merge existing counts file.
try:
with open(self.infile, 'rb') as f:
counts, calledfuncs, callers = pickle.load(f)
self.update(self.__class__(counts, calledfuncs, callers))
except (OSError, EOFError, ValueError) as err:
print(("Skipping counts file %r: %s"
% (self.infile, err)), file=sys.stderr)
def is_ignored_filename(self, filename):
"""Return True if the filename does not refer to a file
we want to have reported.
"""
return filename.startswith('<') and filename.endswith('>')
def update(self, other):
"""Merge in the data from another CoverageResults"""
counts = self.counts
calledfuncs = self.calledfuncs
callers = self.callers
other_counts = other.counts
other_calledfuncs = other.calledfuncs
other_callers = other.callers
for key in other_counts:
counts[key] = counts.get(key, 0) + other_counts[key]
for key in other_calledfuncs:
calledfuncs[key] = 1
for key in other_callers:
callers[key] = 1
def write_results(self, show_missing=True, summary=False, coverdir=None):
"""
@param coverdir
"""
if self.calledfuncs:
print()
print("functions called:")
calls = self.calledfuncs
for filename, modulename, funcname in sorted(calls):
print(("filename: %s, modulename: %s, funcname: %s"
% (filename, modulename, funcname)))
if self.callers:
print()
print("calling relationships:")
lastfile = lastcfile = ""
for ((pfile, pmod, pfunc), (cfile, cmod, cfunc)) \
in sorted(self.callers):
if pfile != lastfile:
print()
print("***", pfile, "***")
lastfile = pfile
lastcfile = ""
if cfile != pfile and lastcfile != cfile:
print(" -->", cfile)
lastcfile = cfile
print(" %s.%s -> %s.%s" % (pmod, pfunc, cmod, cfunc))
# turn the counts data ("(filename, lineno) = count") into something
# accessible on a per-file basis
per_file = {}
for filename, lineno in self.counts:
lines_hit = per_file[filename] = per_file.get(filename, {})
lines_hit[lineno] = self.counts[(filename, lineno)]
# accumulate summary info, if needed
sums = {}
for filename, count in per_file.items():
if self.is_ignored_filename(filename):
continue
if filename.endswith(".pyc"):
filename = filename[:-1]
if coverdir is None:
dir = os.path.dirname(os.path.abspath(filename))
modulename = _modname(filename)
else:
dir = coverdir
if not os.path.exists(dir):
os.makedirs(dir)
modulename = _fullmodname(filename)
# If desired, get a list of the line numbers which represent
# executable content (returned as a dict for better lookup speed)
if show_missing:
lnotab = _find_executable_linenos(filename)
else:
lnotab = {}
if lnotab:
source = linecache.getlines(filename)
coverpath = os.path.join(dir, modulename + ".cover")
with open(filename, 'rb') as fp:
encoding, _ = tokenize.detect_encoding(fp.readline)
n_hits, n_lines = self.write_results_file(coverpath, source,
lnotab, count, encoding)
if summary and n_lines:
percent = int(100 * n_hits / n_lines)
sums[modulename] = n_lines, percent, modulename, filename
if summary and sums:
print("lines cov% module (path)")
for m in sorted(sums):
n_lines, percent, modulename, filename = sums[m]
print("%5d %3d%% %s (%s)" % sums[m])
if self.outfile:
# try and store counts and module info into self.outfile
try:
pickle.dump((self.counts, self.calledfuncs, self.callers),
open(self.outfile, 'wb'), 1)
except OSError as err:
print("Can't save counts files because %s" % err, file=sys.stderr)
def write_results_file(self, path, lines, lnotab, lines_hit, encoding=None):
"""Return a coverage results file in path."""
try:
outfile = open(path, "w", encoding=encoding)
except OSError as err:
print(("trace: Could not open %r for writing: %s"
"- skipping" % (path, err)), file=sys.stderr)
return 0, 0
n_lines = 0
n_hits = 0
with outfile:
for lineno, line in enumerate(lines, 1):
# do the blank/comment match to try to mark more lines
# (help the reader find stuff that hasn't been covered)
if lineno in lines_hit:
outfile.write("%5d: " % lines_hit[lineno])
n_hits += 1
n_lines += 1
elif rx_blank.match(line):
outfile.write(" ")
else:
# lines preceded by no marks weren't hit
# Highlight them if so indicated, unless the line contains
# #pragma: NO COVER
if lineno in lnotab and not PRAGMA_NOCOVER in line:
outfile.write(">>>>>> ")
n_lines += 1
else:
outfile.write(" ")
outfile.write(line.expandtabs(8))
return n_hits, n_lines
def _find_lines_from_code(code, strs):
"""Return dict where keys are lines in the line number table."""
linenos = {}
for _, lineno in dis.findlinestarts(code):
if lineno not in strs:
linenos[lineno] = 1
return linenos
def _find_lines(code, strs):
"""Return lineno dict for all code objects reachable from code."""
# get all of the lineno information from the code of this scope level
linenos = _find_lines_from_code(code, strs)
# and check the constants for references to other code objects
for c in code.co_consts:
if inspect.iscode(c):
# find another code object, so recurse into it
linenos.update(_find_lines(c, strs))
return linenos
def _find_strings(filename, encoding=None):
"""Return a dict of possible docstring positions.
The dict maps line numbers to strings. There is an entry for
line that contains only a string or a part of a triple-quoted
string.
"""
d = {}
# If the first token is a string, then it's the module docstring.
# Add this special case so that the test in the loop passes.
prev_ttype = token.INDENT
with open(filename, encoding=encoding) as f:
tok = tokenize.generate_tokens(f.readline)
for ttype, tstr, start, end, line in tok:
if ttype == token.STRING:
if prev_ttype == token.INDENT:
sline, scol = start
eline, ecol = end
for i in range(sline, eline + 1):
d[i] = 1
prev_ttype = ttype
return d
def _find_executable_linenos(filename):
"""Return dict where keys are line numbers in the line number table."""
try:
with tokenize.open(filename) as f:
prog = f.read()
encoding = f.encoding
except OSError as err:
print(("Not printing coverage data for %r: %s"
% (filename, err)), file=sys.stderr)
return {}
code = compile(prog, filename, "exec")
strs = _find_strings(filename, encoding)
return _find_lines(code, strs)
class Trace:
def __init__(self, count=1, trace=1, countfuncs=0, countcallers=0,
ignoremods=(), ignoredirs=(), infile=None, outfile=None,
timing=False):
"""
@param count true iff it should count number of times each
line is executed
@param trace true iff it should print out each line that is
being counted
@param countfuncs true iff it should just output a list of
(filename, modulename, funcname,) for functions
that were called at least once; This overrides
`count' and `trace'
@param ignoremods a list of the names of modules to ignore
@param ignoredirs a list of the names of directories to ignore
all of the (recursive) contents of
@param infile file from which to read stored counts to be
added into the results
@param outfile file in which to write the results
@param timing true iff timing information be displayed
"""
self.infile = infile
self.outfile = outfile
self.ignore = _Ignore(ignoremods, ignoredirs)
self.counts = {} # keys are (filename, linenumber)
self.pathtobasename = {} # for memoizing os.path.basename
self.donothing = 0
self.trace = trace
self._calledfuncs = {}
self._callers = {}
self._caller_cache = {}
self.start_time = None
self.synchroLib= synchro_lib()
if timing:
self.start_time = _time()
if countcallers:
self.globaltrace = self.globaltrace_trackcallers
elif countfuncs:
self.globaltrace = self.globaltrace_countfuncs
elif trace and count:
self.globaltrace = self.globaltrace_lt
self.localtrace = self.localtrace_trace_and_count
elif trace:
self.globaltrace = self.globaltrace_lt
self.localtrace = self.localtrace_trace
elif count:
self.globaltrace = self.globaltrace_lt
self.localtrace = self.localtrace_count
else:
self.globaltrace = self.globaltrace_lt
self.localtrace = self.localtrace_verrou
# Ahem -- do nothing? Okay.
#self.donothing = 1
def run(self, cmd):
import __main__
dict = __main__.__dict__
self.runctx(cmd, dict, dict)
def runctx(self, cmd, globals=None, locals=None):
if globals is None: globals = {}
if locals is None: locals = {}
if not self.donothing:
_settrace(self.globaltrace)
try:
self.synchroLib.synchro("runctx",0)
exec(cmd, globals, locals)
finally:
if not self.donothing:
_unsettrace()
self.synchroLib.finalyze()
def runfunc(self, func, *args, **kw):
result = None
if not self.donothing:
sys.settrace(self.globaltrace)
try:
result = func(*args, **kw)
finally:
if not self.donothing:
sys.settrace(None)
return result
def file_module_function_of(self, frame):
code = frame.f_code
filename = code.co_filename
if filename:
modulename = _modname(filename)
else:
modulename = None
funcname = code.co_name
clsname = None
if code in self._caller_cache:
if self._caller_cache[code] is not None:
clsname = self._caller_cache[code]
else:
self._caller_cache[code] = None
## use of gc.get_referrers() was suggested by Michael Hudson
# all functions which refer to this code object
funcs = [f for f in gc.get_referrers(code)
if inspect.isfunction(f)]
# require len(func) == 1 to avoid ambiguity caused by calls to
# new.function(): "In the face of ambiguity, refuse the
# temptation to guess."
if len(funcs) == 1:
dicts = [d for d in gc.get_referrers(funcs[0])
if isinstance(d, dict)]
if len(dicts) == 1:
classes = [c for c in gc.get_referrers(dicts[0])
if hasattr(c, "__bases__")]
if len(classes) == 1:
# ditto for new.classobj()
clsname = classes[0].__name__
# cache the result - assumption is that new.* is
# not called later to disturb this relationship
# _caller_cache could be flushed if functions in
# the new module get called.
self._caller_cache[code] = clsname
if clsname is not None:
funcname = "%s.%s" % (clsname, funcname)
return filename, modulename, funcname
def globaltrace_trackcallers(self, frame, why, arg):
"""Handler for call events.
Adds information about who called who to the self._callers dict.
"""
if why == 'call':
# XXX Should do a better job of identifying methods
this_func = self.file_module_function_of(frame)
parent_func = self.file_module_function_of(frame.f_back)
self._callers[(parent_func, this_func)] = 1
def globaltrace_countfuncs(self, frame, why, arg):
"""Handler for call events.
Adds (filename, modulename, funcname) to the self._calledfuncs dict.
"""
if why == 'call':
this_func = self.file_module_function_of(frame)
self._calledfuncs[this_func] = 1
def globaltrace_lt(self, frame, why, arg):
"""Handler for call events.
If the code block being entered is to be ignored, returns `None',
else returns self.localtrace.
"""
if why == 'call':
self.synchroLib.synchro("globaltrace_lt",0)
code = frame.f_code
filename = frame.f_globals.get('__file__', None)
if filename:
# XXX _modname() doesn't work right for packages, so
# the ignore support won't work right for packages
modulename = _modname(filename)
if modulename is not None:
ignore_it = self.ignore.names(filename, modulename)
if not ignore_it:
if self.trace:
print((" --- modulename: %s, funcname: %s"
% (modulename, code.co_name)))
return self.localtrace
else:
return None
def localtrace_trace_and_count(self, frame, why, arg):
if why == "line":
# record the file name and line number of every trace
filename = frame.f_code.co_filename
lineno = frame.f_lineno
key = filename, lineno
self.counts[key] = self.counts.get(key, 0) + 1
if self.start_time:
print('%.2f' % (_time() - self.start_time), end=' ')
bname = os.path.basename(filename)
print("%s(%d): %s" % (bname, lineno,
linecache.getline(filename, lineno)), end='')
self.synchroLib.synchro(bname,lineno)
return self.localtrace
def localtrace_trace(self, frame, why, arg):
if why == "line":
# record the file name and line number of every trace
filename = frame.f_code.co_filename
lineno = frame.f_lineno
if self.start_time:
print('%.2f' % (_time() - self.start_time), end=' ')
bname = os.path.basename(filename)
print("%s(%d): %s" % (bname, lineno,
linecache.getline(filename, lineno)), end='')
self.synchroLib.synchro(bname,lineno)
return self.localtrace
def localtrace_count(self, frame, why, arg):
if why == "line":
filename = frame.f_code.co_filename
lineno = frame.f_lineno
key = filename, lineno
self.counts[key] = self.counts.get(key, 0) + 1
self.synchroLib.synchro(filename,lineno)
return self.localtrace
def localtrace_verrou(self, frame, why, arg):
if why == "line":
filename = frame.f_code.co_filename
lineno = frame.f_lineno
self.synchroLib.synchro(filename,lineno)
return self.localtrace
def results(self):
return CoverageResults(self.counts, infile=self.infile,
outfile=self.outfile,
calledfuncs=self._calledfuncs,
callers=self._callers)
def _err_exit(msg):
sys.stderr.write("%s: %s\n" % (sys.argv[0], msg))
sys.exit(1)
def main(argv=None):
import getopt
if argv is None:
argv = sys.argv
try:
opts, prog_argv = getopt.getopt(argv[1:], "tcrRf:d:msC:lTg",
["help", "version", "trace", "count",
"report", "no-report", "summary",
"file=", "missing",
"ignore-module=", "ignore-dir=",
"coverdir=", "listfuncs",
"trackcalls", "timing"])
except getopt.error as msg:
sys.stderr.write("%s: %s\n" % (sys.argv[0], msg))
sys.stderr.write("Try `%s --help' for more information\n"
% sys.argv[0])
sys.exit(1)
trace = 0
count = 0
report = 0
no_report = 0
verrou = 1
counts_file = None
missing = 0
ignore_modules = []
ignore_dirs = []
coverdir = None
summary = 0
listfuncs = False
countcallers = False
timing = False
for opt, val in opts:
if opt == "--help":
_usage(sys.stdout)
sys.exit(0)
if opt == "--version":
sys.stdout.write("trace 2.0\n")
sys.exit(0)
if opt == "-T" or opt == "--trackcalls":
countcallers = True
continue
if opt == "-l" or opt == "--listfuncs":
listfuncs = True
continue
if opt == "-g" or opt == "--timing":
timing = True
continue
if opt == "-t" or opt == "--trace":
trace = 1
continue
if opt == "-c" or opt == "--count":
count = 1
continue
if opt == "-r" or opt == "--report":
report = 1
continue
if opt == "-R" or opt == "--no-report":
no_report = 1
continue
if opt == "-f" or opt == "--file":
counts_file = val
continue
if opt == "-m" or opt == "--missing":
missing = 1
continue
if opt == "-C" or opt == "--coverdir":
coverdir = val
continue
if opt == "-s" or opt == "--summary":
summary = 1
continue
if opt == "--ignore-module":
for mod in val.split(","):
ignore_modules.append(mod.strip())
continue
if opt == "--ignore-dir":
for s in val.split(os.pathsep):
s = os.path.expandvars(s)
# should I also call expanduser? (after all, could use $HOME)
s = s.replace("$prefix",
os.path.join(sys.base_prefix, "lib",
"python" + sys.version[:3]))
s = s.replace("$exec_prefix",
os.path.join(sys.base_exec_prefix, "lib",
"python" + sys.version[:3]))
s = os.path.normpath(s)
ignore_dirs.append(s)
continue
assert 0, "Should never get here"
if listfuncs and (count or trace):
_err_exit("cannot specify both --listfuncs and (--trace or --count)")
if not (count or trace or report or listfuncs or countcallers or verrou):
_err_exit("must specify one of --trace, --count, --report, "
"--listfuncs, or --trackcalls")
if report and no_report:
_err_exit("cannot specify both --report and --no-report")
if report and not counts_file:
_err_exit("--report requires a --file")
if no_report and len(prog_argv) == 0:
_err_exit("missing name of file to run")
# everything is ready
if report:
results = CoverageResults(infile=counts_file, outfile=counts_file)
results.write_results(missing, summary=summary, coverdir=coverdir)
else:
sys.argv = prog_argv
progname = prog_argv[0]
sys.path[0] = os.path.split(progname)[0]
t = Trace(count, trace, countfuncs=listfuncs,
countcallers=countcallers, ignoremods=ignore_modules,
ignoredirs=ignore_dirs, infile=counts_file,
outfile=counts_file, timing=timing)
try:
t.synchroLib.synchro("compile",0)
with open(progname) as fp:
code = compile(fp.read(), progname, 'exec')
# try to emulate __main__ namespace as much as possible
globs = {
'__file__': progname,
'__name__': '__main__',
'__package__': None,
'__cached__': None,
}
t.runctx(code, globs, globs)
except OSError as err:
_err_exit("Cannot run file %r because: %s" % (sys.argv[0], err))
except SystemExit:
pass
results = t.results()
if not no_report:
results.write_results(missing, summary=summary, coverdir=coverdir)
# Deprecated API
def usage(outfile):
_warn("The trace.usage() function is deprecated",
DeprecationWarning, 2)
_usage(outfile)
class Ignore(_Ignore):
def __init__(self, modules=None, dirs=None):
_warn("The class trace.Ignore is deprecated",
DeprecationWarning, 2)
_Ignore.__init__(self, modules, dirs)
def modname(path):
_warn("The trace.modname() function is deprecated",
DeprecationWarning, 2)
return _modname(path)
def fullmodname(path):
_warn("The trace.fullmodname() function is deprecated",
DeprecationWarning, 2)
return _fullmodname(path)
def find_lines_from_code(code, strs):
_warn("The trace.find_lines_from_code() function is deprecated",
DeprecationWarning, 2)
return _find_lines_from_code(code, strs)
def find_lines(code, strs):
_warn("The trace.find_lines() function is deprecated",
DeprecationWarning, 2)
return _find_lines(code, strs)
def find_strings(filename, encoding=None):
_warn("The trace.find_strings() function is deprecated",
DeprecationWarning, 2)
return _find_strings(filename, encoding=None)
def find_executable_linenos(filename):
_warn("The trace.find_executable_linenos() function is deprecated",
DeprecationWarning, 2)
return _find_executable_linenos(filename)
if __name__=='__main__':
main()
| 32,905 | 35.643653 | 82 | py |
verrou | verrou-master/synchroLib/verrouPyBinding.py | #!/usr/bin/python3
import sys, platform
import ctypes, ctypes.util
import os
import os.path
class bindingSynchroLib:
def __init__(self, pathLib=None):
if(pathLib!=None):
self.lib=ctypes.CDLL(pathLib)
else:
self.lib=ctypes.CDLL(searchDefaultPath("verrouSynchroLib.so") )
self.lib.verrou_synchro.argtypes = [ ctypes.c_char_p, ctypes.c_int]
def verrou_synchro_init(self):
print("bindingSynchroLib : verrou_synchro_init")
self.lib.verrou_synchro_init()
def verrou_synchro_finalyze(self):
print("bindingSynchroLib : verrou_synchro_finalyze")
self.lib.verrou_synchro_finalyze()
def verrou_synchro(self,string, index):
print("bindingSynchroLib : verrou_synchro", string, index)
self.lib.verrou_synchro(string.encode('utf-8'), index)
class bindingVerrouCLib:
def __init__(self, pathLib):
if(pathLib!=None):
self.lib=ctypes.CDLL(pathLib)
else:
self.lib=ctypes.CDLL(searchDefaultPath("verrouCBindingLib.so") )
self.lib=ctypes.CDLL(pathLib)
self.lib.c_verrou_start_instrumentation.argtypes = []
self.lib.c_verrou_stop_instrumentation.argtypes = []
self.lib.c_verrou_start_determinitic.argtypes = [ctypes.c_int]
self.lib.c_verrou_stop_determinitic.argtypes = [ctypes.c_int]
self.lib.c_verrou_display_counters.argtypes = []
self.lib.c_verrou_dump_cover.argtypes= []
self.lib.c_verrou_dump_cover.restype= ctypes.c_uint
self.lib.c_verrou_count_fp_instrumented.argtypes= []
self.lib.c_verrou_count_fp_not_instrumented.argtypes= []
self.lib.c_verrou_count_fp_instrumented.restype= ctypes.c_uint
self.lib.c_verrou_count_fp_not_instrumented.restype= ctypes.c_uint
def verrou_start_instrumentation(self):
self.lib.c_verrou_start_instrumentation()
def verrou_stop_instrumentation(self):
self.lib.c_verrou_stop_instrumentation()
def verrou_start_determinitic (self, level):
self.lib.c_verrou_start_determinitic(level)
def verrou_stop_determinitic (self, level):
self.lib.c_verrou_stop_determinitic(level)
def verrou_dump_cover(self):
return self.lib.c_verrou_dump_cover();
def verrou_display_counters(self):
self.lib.c_verrou_display_counters()
def verrou_count_fp_instrumented(self):
return self.lib.c_verrou_count_fp_instrumented()
def verrou_count_fp_not_instrumented(self):
return self.lib.c_verrou_count_fp_not_instrumented()
def searchDefaultPath(fileName):
dirName=os.path.dirname(os.path.abspath(__file__))
pathPrefixTab=["./", dirName, os.path.join(dirName, "..", "lib")]
print(pathPrefixTab, file=sys.stderr)
for pathPrefix in pathPrefixTab:
absPath=os.path.join(pathPrefix, fileName)
if os.path.exists(absPath):
print("absPath: ", absPath,file=sys.stderr)
return absPath
print("FileName %s not found"%(fileName),file=sys.stderr)
sys.exit(42)
if __name__=="__main__":
verrouSynchroLib="./verrouSynchroLib.so"
verrouCBindingLib="./verrouCBindingLib.so"
bindVerrou=bindingVerrouCLib(verrouCBindingLib)
bindVerrou.verrou_stop_instrumentation()
bindVerrou.verrou_display_counters()
# print("dumpCover : ",bindVerrou.verrou_dump_cover())
# print("dumpCover : ",bindVerrou.verrou_dump_cover())
# print("dumpCover : ",bindVerrou.verrou_dump_cover())
print("avt a binding fp: ", bindVerrou.verrou_count_fp_instrumented())
print("avt a binding not fp: ", bindVerrou.verrou_count_fp_not_instrumented())
a=3.*4.
print(a)
print("apres a binding fp: ", bindVerrou.verrou_count_fp_instrumented())
print("apres a binding not fp: ", bindVerrou.verrou_count_fp_not_instrumented())
bindSynchro=bindingSynchroLib(verrouSynchroLib)
bindSynchro.verrou_synchro_init()
bindSynchro.verrou_synchro("toto",10)
bindVerrou.verrou_display_counters()
bindVerrou.verrou_start_instrumentation()
a=3.*4
bindVerrou.verrou_stop_instrumentation()
bindSynchro.verrou_synchro("toto",10)
bindVerrou.verrou_start_instrumentation()
a*=5
bindVerrou.verrou_stop_instrumentation()
print(a)
bindSynchro.verrou_synchro("toto",10)
bindSynchro.verrou_synchro_finalyze()
| 4,470 | 29.209459 | 84 | py |
verrou | verrou-master/synchroLib/tstDDPython/Muller.py | #!/usr/bin/python3
def muller(nt,verbose=False):
x0 = 11./2.;
x1 = 61./11.;
for it in range(nt):
temp0 = 3000./x0;
temp1 = 1130. - temp0;
temp2 = temp1 /x1 ;
x2 = 111. - temp2;
if verbose:
print("it: %i\tx2: %f\ttemp0: %f\ttemp1: %f\ttemp2: %f"%(it,x2,temp0,temp1,temp2))
x0 = x1;
x1 = x2;
print("x[%i]=%f"%(nt,x1))
if __name__=="__main__":
muller(12)
| 472 | 15.310345 | 94 | py |
verrou | verrou-master/synchroLib/tstDDPython/extractOrCmp.py | #!/usr/bin/python3
import sys
import os
def extractValue(rep):
lines=(open(os.path.join(rep,"res.dat")).readlines())
for line in lines:
if line.startswith("x[12]="):
return float(line.partition("=")[2])
return None
if __name__=="__main__":
if len(sys.argv)==2:
print(extractValue(sys.argv[1]))
if len(sys.argv)==3:
valueRef=extractValue(sys.argv[1])
value=extractValue(sys.argv[2])
relDiff=abs((value-valueRef)/valueRef)
if relDiff < 1.e-2:
sys.exit(0)
else:
sys.exit(1)
| 603 | 22.230769 | 57 | py |
verrou | verrou-master/unitTest/check-verrou-dd-synchro/cmp.py | #!/usr/bin/python3
import sys
import os
def extract(rep):
fileName=os.path.join(rep,"res.out")
value=float(open(fileName).readline().split(":")[1])
return value
def cmpRep(repRef,rep):
valueRef=extract(repRef)
value=extract(rep)
print("valueRef: ",valueRef)
print("value: ",value)
return abs(value -valueRef) < 0.05*abs(valueRef)
if __name__=="__main__":
if len(sys.argv)==3:
ok=cmpRep(sys.argv[1],sys.argv[2])
if ok:
sys.exit(0)
else:
sys.exit(1)
if len(sys.argv)==2:
print(extract(sys.argv[1]))
sys.exit(0)
print("Use one or two args")
sys.exit(1)
| 690 | 19.323529 | 56 | py |
verrou | verrou-master/unitTest/check-libM/genTab.py | #!/usr/bin/env python3
import sys
import math
def readFile(fileName):
data=(open(fileName).readlines())
keyData=data[0].split()
brutData=[line.split() for line in data[1:]]
res={}
for index in range(len(keyData)):
fileNameKey=fileName.replace("res","")
dataIndex=[float(line[index]) for line in brutData]
res[keyData[index]]=(min(dataIndex),max(dataIndex))
return res
def computeEvalError(dataNative, data):
res={}
for key in dataNative.keys():
resIEEE=float(dataNative[key][0])
evalError= - math.log2(max(abs(data[key][1] - resIEEE),
abs(data[key][0] - resIEEE)) / resIEEE)
res[key]=evalError
return res
def loadRef(fileName, num=2):
res={}
for line in open(fileName):
spline=line.split(":")
typeRealtype=spline[0].split()[0]
correction=spline[0].split()[1]
nbBitStr=spline[1].strip()
if nbBitStr in ["24","53"]:
res[(typeRealtype, correction)]=float(nbBitStr)
continue
[valueLow,valueUp]=nbBitStr[1:-1].split(",")
if(float(valueUp)!=float(valueLow)):
print("Please Increase the mpfi precision")
sys.exit()
value=float(valueUp)
res[(typeRealtype, correction)]=value
return res
def main(reference=None):
output=open("tabAster.tex","w")
outputReg=open("testReg","w")
keys=["Native", "Randominterlibm", "Randomverrou", "Randomverrou+interlibm"]
data={}
strLatex=""
for i in range(len(keys)):
key=keys[i]
data[key]=readFile("res"+key+".dat")
# for key in sorted(keys[1:]):
for i in range(1,len(keys)):
key=keys[i]
outputReg.write(key+"\n")
evalError=computeEvalError(data["Native"], data[key])
for keyCase in sorted(evalError.keys()):
outputReg.write(keyCase +" "+str(evalError[keyCase])+"\n")
output.write(r"\begin{table}" +" \n")
output.write(r"\begin{center}" +" \n")
output.write(r"\begin{tabular}{l@{~}lccccc}\toprule" +" \n")
output.write(r"& & \multicolumn{2}{c}{single precision}& \multicolumn{2}{c}{double precision}\\"+"\n"+
r"&& first & second & first & second \\ \midrule"+"\n")
if reference!=None:
output.write("&IEEE Error & %.2f & %.2f & %.2f & %.2f"%(
reference[("Float","Before")],reference[("Float","After")],
reference[("Double","Before")], reference[("Double","After")])
+ r"\\\midrule"+"\n")
for i in range(1,len(keys)):
key=keys[i]
evalError=computeEvalError(data["Native"], data[key])
keyConvert={"Randominterlibm": r"\textit{(i)}&interlibm",
"Randomverrou": r"\textit{(ii)}&verrou",
"Randomverrou+interlibm":r"\textit{(iii)}&verrou+interlib"}
lineStr=keyConvert[key]+ " "
for typeFP in ["Float","Double"]:
lineStr+=r"&%.2f & %.2f "%(evalError["BeforeCorrection_"+typeFP], evalError["AfterCorrection_"+typeFP])
lineStr+=r"\\"+"\n"
output.write(lineStr)
output.write(r"\bottomrule"+"\n")
output.write(r"\end{tabular}"+"\n")
output.write(r"\end{center}" +" \n")
output.write(r"\caption{Number of significant bits for 4~implementations of function $f(a, a+6.ulp(a))$, as assessed by 3~techniques.}"+"\n")
output.write(r"\label{sdAster}"+"\n")
output.write(r"\end{table}"+"\n")
if __name__=="__main__":
reference=loadRef("reference.dat")
if len(reference)!=4:
reference=None
main(reference)
| 3,645 | 33.396226 | 145 | py |
verrou | verrou-master/unitTest/checkRounding/runCheck.py | #!/usr/bin/env python3
# This file is part of Verrou, a FPU instrumentation tool.
# Copyright (C) 2014-2021 EDF
# F. Févotte <francois.fevotte@edf.fr>
# B. Lathuilière <bruno.lathuiliere@edf.fr>
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
# 02111-1307, USA.
# The GNU Lesser General Public License is contained in the file COPYING.
import os
import sys
import subprocess as sp
import shlex
import re
stdRounding=["nearest", "toward_zero", "downward", "upward" ]
valgrindRounding=stdRounding + ["random", "random_det", "random_comdet", "random_scomdet", "sr_monotonic",
"average", "average_det", "average_comdet","average_scomdet",
"float", "farthest", "memcheck" ,"ftz", "away_zero", "prandom", "prandom_0.5", "prandom_det", "prandom_comdet" ]
def printRes(res):
print("stdout:")
for line in res[0 ]:
print(line[0:-1])
print("cerr :")
for line in res[1 ]:
print(line[0:-1])
def runCmd(cmd,expectedResult=0, printCmd=True, printCwd=True):
if printCmd:
print("Cmd:", cmd)
if printCwd:
print("Cwd:", os.getcwd())
#lancement de la commande
process=sp.Popen(args=shlex.split(cmd),
stdout=sp.PIPE,
stderr=sp.PIPE)
(resStdStr, resErrStr)=process.communicate()
resStd=resStdStr.decode('utf8').splitlines()
resErr=resErrStr.decode('utf8').splitlines()
error=process.wait()
#Traitement des erreurs
if error !=expectedResult:
msg = "Error with the execution of : " +cmd+"\n"
msg+= "\t error is " +str(error) +"\n"
msg+= "\t expectedResult is " +str(expectedResult)
print(msg)
printRes((resStd, resErr))
sys.exit(42)
return (resStd, resErr)
class cmdPrepare:
def __init__(self, arg):
self.valgrindPath=os.path.join(os.environ["INSTALLPATH"], "bin", "valgrind")
self.execPath=arg
def run(self,env="fenv", rounding="nearest"):
self.checkRounding(env, rounding)
cmd=None
if env=="fenv":
cmd=self.execPath + " fenv "+ rounding
if env=="valgrind":
if rounding=="memcheck":
cmd=self.valgrindPath + " --tool=memcheck " +self.execPath +" valgrind"
else:
roundingStr=rounding
for prand in ["prandom_det_", "prandom_comdet_", "prandom_"]:
if rounding in ["prandom_det", "prandom_comdet", "prandom"]:
break
if rounding.startswith(prand):
end=rounding.replace(prand,"")
value=float(end)
roundingStr=prand.replace("_","")+" --prandom-pvalue="+end
break
cmd=self.valgrindPath + " --tool=verrou --vr-verbose=no --check-inf=no --rounding-mode=" + roundingStr+ " " +self.execPath +" valgrind"
return runCmd(cmd)
# print cmd
def checkRounding(self, env, rounding):
if env=="fenv" and rounding in stdRounding:
return True
if env=="valgrind" and rounding in valgrindRounding:
return True
print("Failure in checkRounding")
sys.exit(-1)
def generatePairOfAvailableComputation():
res=[]
for i in stdRounding:
res+=[("fenv", i)]
for i in valgrindRounding:
res+=[("valgrind", i)]
return res
def verrouCerrFilter(res):
pidStr=(res[0].split())[0]
pidStrBis=pidStr.replace("==","--")
# pidStr="==2958=="
newRes=[]
for line in res:
newLine=line.replace(pidStr, "").replace(pidStrBis, "")
if newLine.startswith(" Backend verrou simulating ") and newLine.endswith(" rounding mode"):
continue
if newLine.startswith(" First seed : "):
continue
if (newLine.strip()).startswith("PRANDOM: pvalue="):
continue
newRes+=[newLine]
return newRes
def getDiff(outPut, testName):
for line in outPut[0]:
if line.startswith(testName+":"):
return float(line.split()[-1])
print("unknown testName: ", testName)
return None
class errorCounter:
def __init__(self,ok=0,ko=0,warn=0):
self.ok=ok
self.ko=ko
self.warn=warn
def incOK(self, v):
self.ok+=v
def incKO(self, v):
self.ko+=v
def incWarn(self, v):
self.warn+=v
def add(self, tupleV):
self.ok =tupleV[0]
self.ko =tupleV[1]
self.warn=tupleV[2]
def __add__(self, v):
self.ok += v.ok
self.ko += v.ko
self.warn+= v.warn
return self
def printSummary(self):
print("error summary")
print("\tOK : "+str(self.ok))
print("\tKO : "+str(self.ko))
print("\tWarning : "+str(self.warn))
def checkVerrouInvariant(allResult):
ref=allResult[("valgrind", "nearest")][1]
ko=0
ok=0
for rounding in valgrindRounding:
if rounding in ["nearest", "memcheck"]:
#nearest : because it is the ref
#memcheck : because it is not verrou
continue
(cout, cerr)=allResult[("valgrind", rounding)]
if cerr!=ref:
for i in range(len(ref)):
if cerr[i]!=ref[i]:
print("cerr:", cerr[i])
print("ref: ", ref[i],"\n")
print("KO : incoherent number of operation ("+rounding+")")
ko+=1
else:
print("OK : coherent number of operation ("+rounding+")")
ok+=1
return errorCounter(ok, ko, 0)
def diffRes(res1, res2):
if len(res1)!=len(res2):
print("Wrong number of line")
print("fenv", res1)
print("val", res2)
sys.exit(-1)
else:
acc=0
for i in range(len(res1)):
line1=res1[i]
line2=res2[i]
if line1 !=line2:
print("\tfenv: "+line1.strip())
print("\tfval: "+line2.strip()+"\n")
acc+=1
return acc
def checkRoundingInvariant(allResult):
ok=0
ko=0
for rounding in stdRounding:
fenvRes=(allResult["fenv", rounding])[0]
valRes=(allResult["valgrind", rounding])[0]
if fenvRes!=valRes:
print("KO : incoherent comparison between fenv and valgrind ("+rounding+")")
ko+=diffRes(fenvRes, valRes)
else:
ok+=1
print("OK : coherent comparison between fenv and valgrind ("+rounding+")")
return errorCounter(ok, ko, 0)
# def checkOrder(testName, *args):
# tabValue=[x[0] for x in args]
# nameValue=[x[0] for x in args]
# for i in range(len(tabValue)-1):
class assertRounding:
def __init__(self, testName):
self.testName=testName
self.diff_nearestMemcheck=getDiff(allResult[("valgrind", "memcheck")], testName)
self.diff_nearestNative=getDiff(allResult[("fenv", "nearest")], testName)
self.diff_toward_zeroNative =getDiff(allResult[("fenv", "toward_zero")], testName)
self.diff_downwardNative =getDiff(allResult[("fenv", "downward")], testName)
self.diff_upwardNative =getDiff(allResult[("fenv", "upward")], testName)
self.diff_nearest =getDiff(allResult[("valgrind", "nearest")], testName)
self.diff_toward_zero =getDiff(allResult[("valgrind", "toward_zero")], testName)
self.diff_away_zero =getDiff(allResult[("valgrind", "away_zero")], testName)
self.diff_downward =getDiff(allResult[("valgrind", "downward")], testName)
self.diff_upward =getDiff(allResult[("valgrind", "upward")], testName)
self.diff_float =getDiff(allResult[("valgrind", "float")], testName)
self.diff_farthest =getDiff(allResult[("valgrind", "farthest")], testName)
self.diff_ftz =getDiff(allResult[("valgrind", "ftz")], testName)
self.diff_random =getDiff(allResult[("valgrind", "random")], testName)
self.diff_random_det =getDiff(allResult[("valgrind", "random_det")], testName)
self.diff_random_comdet =getDiff(allResult[("valgrind", "random_comdet")], testName)
self.diff_random_scomdet=getDiff(allResult[("valgrind", "random_scomdet")], testName)
self.diff_prandom =getDiff(allResult[("valgrind", "prandom")], testName)
self.diff_prandom_half =getDiff(allResult[("valgrind", "prandom_0.5")], testName)
self.diff_prandom_det =getDiff(allResult[("valgrind", "prandom_det")], testName)
self.diff_prandom_comdet =getDiff(allResult[("valgrind", "prandom_comdet")], testName)
self.diff_average =getDiff(allResult[("valgrind", "average")], testName)
self.diff_average_det =getDiff(allResult[("valgrind", "average_det")], testName)
self.diff_average_comdet =getDiff(allResult[("valgrind", "average_comdet")], testName)
self.diff_average_scomdet =getDiff(allResult[("valgrind", "average_scomdet")], testName)
self.diff_sr_monotonic =getDiff(allResult[("valgrind", "sr_monotonic")], testName)
self.KoStr="Warning"
self.warnBool=True
self.ok=0
self.warn=0
self.ko=0
self.assertEqual("nearestNative", "nearestMemcheck")
if self.ok!=0:
self.KoStr="KO"
self.warnBool=False
def getValue(self, str1):
return eval("self.diff_"+str1)
def printKo(self, str):
print(self.KoStr+" : "+self.testName+ " "+str)
if self.warnBool:
self.warn+=1
else:
self.ko+=1
def printOk(self, str):
print("OK : "+self.testName+ " "+str)
self.ok+=1
def assertEqValue(self, str1, value):
value1=eval("self.diff_"+str1)
value2=value
if value1!= value2:
self.printKo(str1+ "!=" +str(value2) + " "+str(value1))
else:
self.printOk(str1+ "="+str(value))
def assertEqual(self, str1, str2):
value1= eval("self.diff_"+str1)
value2= eval("self.diff_"+str2)
if value1!= value2:
self.printKo(str1+ "!="+str2 + " "+str(value1) + " " +str(value2))
else:
self.printOk(str1+ "="+str2)
def assertLeq(self, str1, str2):
value1= eval("self.diff_"+str1)
value2= eval("self.diff_"+str2)
if value1 <= value2:
self.printOk(str1+ "<="+str2)
else:
self.printKo(str1+ ">"+str2 + " "+str(value1) + " " +str(value2))
def assertDiff(self, str1, str2):
value1= eval("self.diff_"+str1)
value2= eval("self.diff_"+str2)
if value1 != value2:
self.printOk(str1+ "!="+str2)
else:
self.printKo(str1+ "=="+str2 + " "+str(value1) + " " +str(value2))
def assertLess(self, str1, str2):
value1= eval("self.diff_"+str1)
value2= eval("self.diff_"+str2)
if value1 < value2:
self.printOk(str1+ "<"+str2)
else:
self.printKo(str1+ ">="+str2 + " "+str(value1) + " " +str(value2))
def assertAbsLess(self, str1, str2):
value1= abs(eval("self.diff_"+str1))
value2= abs(eval("self.diff_"+str2))
if value1 < value2:
self.printOk("|"+str1+ "| < |"+str2+"|")
else:
self.printKo("|"+str1+ "| >= |"+str2+"|" + " "+str(value1) + " " +str(value2))
def assertNative(self):
for rd in ["nearest", "toward_zero", "downward", "upward"]:
self.assertEqual(rd, rd+"Native")
def checkTestPositiveAndOptimistRandomVerrou(allResult,testList,typeTab=["<double>", "<float>"]):
ok=0
warn=0
ko=0
for test in testList:
for RealType in typeTab:
testName=test+RealType
testCheck=assertRounding(testName)
testCheck.assertNative()
testCheck.assertEqual("nearest","ftz") #hypothesis : no denorm
testCheck.assertEqual("toward_zero", "downward")
testCheck.assertEqual("away_zero", "upward")
testCheck.assertLeq("downward", "nearest")
testCheck.assertLeq("downward", "farthest")
testCheck.assertLeq("farthest", "upward")
testCheck.assertLeq("nearest", "upward")
for rnd in [ "upward", "prandom_half", "sr_monotonic"] + [ x+y for x in ["random", "average", "prandom"] for y in ["","_det","_comdet"]] + [ x+y for x in ["random", "average"] for y in ["_scomdet"]]:
testCheck.assertLess("downward", rnd)
for rnd in [ "prandom_half", "sr_monotonic"] + [ x+y for x in ["random", "average", "prandom"] for y in ["","_det","_comdet"] ] + [ x+y for x in ["random", "average"] for y in ["_scomdet"]]:
testCheck.assertLess(rnd, "upward")
for avg in ["average","average_det","average_comdet","average_scomdet","sr_monotonic"]:
testCheck.assertAbsLess(avg, "random")
testCheck.assertAbsLess(avg, "prandom_half")
testCheck.assertAbsLess(avg, "random_det")
testCheck.assertAbsLess(avg, "upward")
testCheck.assertAbsLess(avg, "downward")
testCheck.assertAbsLess(avg, "nearest")
ok+=testCheck.ok
ko+=testCheck.ko
warn+=testCheck.warn
return errorCounter(ok, ko, warn)
def checkFloat(allResult, testList):
ok=0
warn=0
ko=0
for test in testList:
testCheckFloat=assertRounding(test+"<float>")
testCheckDouble=assertRounding(test+"<double>")
testCheckFloat.assertEqual("nearest", "float")
testCheckDouble.assertEqValue("float", testCheckFloat.getValue("nearest"))
ok+=testCheckFloat.ok
ko+=testCheckFloat.ko
warn+=testCheckFloat.warn
ok+=testCheckDouble.ok
ko+=testCheckDouble.ko
warn+=testCheckDouble.warn
return errorCounter(ok, ko, warn)
def checkTestNegativeAndOptimistRandomVerrou(allResult,testList,typeTab=["<double>", "<float>"]):
ok=0
warn=0
ko=0
for test in testList:
for RealType in typeTab:
testName=test+RealType
testCheck=assertRounding(testName)
testCheck.assertNative()
testCheck.assertEqual("nearest","ftz") #hypothesis : no denorm
testCheck.assertEqual("toward_zero", "upward")
testCheck.assertLeq("downward", "nearest")
testCheck.assertLeq("nearest", "upward")
testCheck.assertLeq("downward", "farthest")
testCheck.assertLeq("farthest", "upward")
for rnd in [ "upward", "prandom_half","sr_monotonic"] + [ x+y for x in ["random", "average", "prandom"] for y in ["","_det","_comdet"] ] + [ x+y for x in ["random", "average"] for y in ["_scomdet"] ]:
testCheck.assertLess("downward", rnd)
for rnd in [ "prandom"+y for y in ["","_det","_comdet"] ]:
testCheck.assertLeq("downward", rnd)
for rnd in [ "prandom_half","sr_monotonic"] + [ x+y for x in ["random", "average", "prandom"] for y in ["","_det","_comdet"] ]+ [ x+y for x in ["random", "average"] for y in ["_scomdet"] ]:
testCheck.assertLess(rnd, "upward")
for rnd in [ "prandom"+y for y in ["","_det","_comdet"] ]:
testCheck.assertLeq(rnd, "upward")
for avg in ["average","average_det","average_comdet", "average_scomdet", "sr_monotonic"]:
testCheck.assertAbsLess(avg, "random")
testCheck.assertAbsLess(avg, "prandom_half")
testCheck.assertAbsLess(avg, "random_det")
testCheck.assertAbsLess(avg, "upward")
testCheck.assertAbsLess(avg, "downward")
testCheck.assertAbsLess(avg, "nearest")
ok+=testCheck.ok
ko+=testCheck.ko
warn+=testCheck.warn
return errorCounter(ok, ko, warn)
def checkTestPositive(allResult,testList, typeTab=["<double>", "<float>"]):
ok=0
warn=0
ko=0
for test in testList:
for RealType in typeTab:
testName=test+RealType
testCheck=assertRounding(testName)
testCheck.assertNative()
testCheck.assertEqual("nearest","ftz") #hypothesis : no denorm
testCheck.assertEqual("toward_zero", "downward")
testCheck.assertEqual("away_zero", "upward")
testCheck.assertLeq("downward", "nearest")
testCheck.assertLeq("nearest", "upward")
testCheck.assertLeq("downward", "farthest")
testCheck.assertLeq("farthest", "upward")
for rnd in [ "upward", "prandom_half", "sr_monotonic"] + [ x+y for x in ["random", "average"] for y in ["","_det","_comdet","_scomdet"] ]:
testCheck.assertLess("downward", rnd)
for rnd in [ "prandom"+y for y in ["","_det","_comdet"] ]:
testCheck.assertLeq("downward", rnd)
for rnd in [ "prandom_half", "sr_monotonic"] + [ x+y for x in ["random", "average"] for y in ["","_det","_comdet","_scomdet"] ]:
testCheck.assertLess(rnd, "upward")
for rnd in [ "prandom"+y for y in ["","_det","_comdet"] ]:
testCheck.assertLeq(rnd,"upward")
ok+=testCheck.ok
ko+=testCheck.ko
warn+=testCheck.warn
return errorCounter(ok, ko, warn)
def checkTestNegative(allResult,testList,typeTab=["<double>", "<float>"]):
ok=0
warn=0
ko=0
for test in testList:
for RealType in typeTab:
testName=test+RealType
testCheck=assertRounding(testName)
testCheck.assertNative()
testCheck.assertEqual("nearest","ftz") #hypothesis : no denorm
testCheck.assertEqual("toward_zero", "upward")
testCheck.assertEqual("away_zero", "downward")
testCheck.assertLeq("downward", "nearest")
testCheck.assertLeq("nearest", "upward")
testCheck.assertLeq("downward", "farthest")
testCheck.assertLeq("farthest", "upward")
for rnd in [ "upward", "prandom_half","sr_monotonic"] + [ x+y for x in ["random", "average"] for y in ["","_det","_comdet","_scomdet"] ]:
testCheck.assertLess("downward", rnd)
for rnd in [ "prandom"+y for y in ["","_det","_comdet"] ]:
testCheck.assertLeq("downward", rnd)
for rnd in [ "prandom_half","sr_monotonic"] + [ x+y for x in ["random", "average"] for y in ["","_det","_comdet","_scomdet"] ]:
testCheck.assertLess(rnd, "upward")
for rnd in [ "prandom"+y for y in ["","_det","_comdet"] ]:
testCheck.assertLeq(rnd,"upward")
ok+=testCheck.ok
ko+=testCheck.ko
warn+=testCheck.warn
return errorCounter(ok, ko, warn)
def checkTestPositiveBetweenTwoValues(allResult,testList, typeTab=["<double>", "<float>"]):
ok=0
warn=0
ko=0
for test in testList:
for RealType in typeTab:
testName=test+RealType
testCheck=assertRounding(testName)
testCheck.assertNative()
testCheck.assertEqual("nearest","ftz") #hypothesis : no denorm
testCheck.assertEqual("toward_zero", "downward")
testCheck.assertEqual("away_zero", "upward")
testCheck.assertDiff("nearest", "farthest")
testCheck.assertLess("downward", "upward")
for rnd in [ "prandom_half", "farthest", "nearest","sr_monotonic"] + [ x+y for x in ["random", "average"] for y in ["","_det","_comdet", "_scomdet"] ]:
testCheck.assertLeq("downward", rnd)
for rnd in [ "prandom"+y for y in ["","_det","_comdet"] ]:
testCheck.assertLeq("downward", rnd)
for rnd in [ "prandom_half", "farthest", "nearest","sr_monotonic"] + [ x+y for x in ["random", "average"] for y in ["","_det","_comdet", "_scomdet"] ]:
testCheck.assertLeq(rnd, "upward")
for rnd in [ "prandom"+y for y in ["","_det","_comdet"] ]:
testCheck.assertLeq(rnd,"upward")
ok+=testCheck.ok
ko+=testCheck.ko
warn+=testCheck.warn
return errorCounter(ok, ko, warn)
def checkTestNegativeBetweenTwoValues(allResult,testList, typeTab=["<double>", "<float>"]):
ok=0
warn=0
ko=0
for test in testList:
for RealType in typeTab:
testName=test+RealType
testCheck=assertRounding(testName)
testCheck.assertNative()
testCheck.assertEqual("nearest","ftz") #hypothesis : no denorm
testCheck.assertEqual("toward_zero", "upward")
testCheck.assertEqual("away_zero", "downward")
testCheck.assertLess("downward", "upward")
testCheck.assertDiff("nearest", "farthest")
for rnd in [ "prandom_half", "farthest", "nearest","sr_monotonic"] + [ x+y for x in ["random", "average"] for y in ["","_det","_comdet", "_scomdet"] ]:
testCheck.assertLeq("downward", rnd)
for rnd in [ "prandom"+y for y in ["","_det","_comdet"] ]:
testCheck.assertLeq("downward", rnd)
for rnd in [ "prandom_half", "farthest", "nearest","sr_monotonic"] + [ x+y for x in ["random", "average"] for y in ["","_det","_comdet", "_scomdet"] ]:
testCheck.assertLeq(rnd, "upward")
for rnd in [ "prandom"+y for y in ["","_det","_comdet"] ]:
testCheck.assertLeq(rnd,"upward")
ok+=testCheck.ok
ko+=testCheck.ko
warn+=testCheck.warn
return errorCounter(ok, ko, warn)
def checkExact(allResult,testList,typeTab=["<double>", "<float>"]):
ok=0
warn=0
ko=0
for test in testList:
for RealType in typeTab:
testName=test+RealType
testCheck=assertRounding(testName)
testCheck.assertNative()
testCheck.assertEqual("nearest","ftz") #hypothesis : no denorm
for rnd in ["downward", "prandom_half", "farthest", "nearest", "toward_zero", "away_zero","sr_monotonic"] + [ x+y for x in ["random", "average", "prandom"] for y in ["","_det","_comdet"] ]+[ x+y for x in ["random", "average"] for y in ["_scomdet"] ]:
testCheck.assertEqual(rnd,"upward")
ok+=testCheck.ok
ko+=testCheck.ko
warn+=testCheck.warn
return errorCounter(ok, ko, warn)
def checkExactDetAndOptimistAverage(allResult,testList,typeTab=["<double>", "<float>"]):
ok=0
warn=0
ko=0
for test in testList:
for RealType in typeTab:
testName=test+RealType
testCheck=assertRounding(testName)
testCheck.assertNative()
testCheck.assertEqual("nearest","ftz") #hypothesis : no denorm
for rnd in ["downward","farthest", "nearest", "toward_zero", "away_zero","sr_monotonic"] + [ x+y for x in ["random", "average", "prandom"] for y in ["_det","_comdet"] ]+[ x+y for x in ["random", "average"] for y in ["_scomdet"] ]:
testCheck.assertEqual(rnd,"upward")
for rnd in ["random", "prandom_half"]:
testCheck.assertLess("downward",rnd)
testCheck.assertLess("average",rnd)
testCheck.assertLess("downward","average")
ok+=testCheck.ok
ko+=testCheck.ko
warn+=testCheck.warn
return errorCounter(ok, ko, warn)
def assertCmpTest(testName1, rounding1, testName2, rounding2, opposite=False):
diff1=getDiff(allResult[("valgrind", rounding1)], testName1)
diff2=getDiff(allResult[("valgrind", rounding2)], testName2)
if diff1==diff2 and opposite==False:
print("OK "+testName1+"("+rounding1+") / " +testName2 +"("+rounding2+")")
return True
if diff1==-diff2 and opposite:
print("OK - "+testName1+"("+rounding1+") / " +testName2 +"("+rounding2+")")
return True
print("KO %s(%s) / %s(%s) %.17f %.17f"%(testName1,rounding1, testName2,rounding2, diff1,diff2))
return False
def checkScomdet(allResult, testPairList, typeTab=["<double>", "<float>"]):
ok=0
ko=0
roundingList=["random_scomdet", "average_scomdet", "sr_monotonic"]
for (code1,code2, oppositeSign) in testPairList:
for RealType in typeTab:
testName1=code1+RealType
testName2=code2+RealType
for rounding in roundingList:
if assertCmpTest(testName1, rounding, testName2, rounding, oppositeSign):
ok+=1
else:
ko+=1
return errorCounter(ok,ko,0)
if __name__=='__main__':
cmdHandler=cmdPrepare(os.path.join(os.curdir,sys.argv[1]))
allResult={}
for (env, rounding) in generatePairOfAvailableComputation():
(cout, cerr)=cmdHandler.run(env, rounding)
if env=="valgrind":
allResult[(env, rounding)]=(cout, verrouCerrFilter(cerr))
else:
allResult[(env, rounding)]=(cout, cerr)
# printRes(allResult[("fenv" ,"toward_zero")])
# printRes(allResult[("valgrind" ,"toward_zero")])
typeTab=["<double>", "<float>"]#,"<long double>"]
eCount=errorCounter()
eCount+=checkVerrouInvariant(allResult)
eCount+=checkTestPositiveAndOptimistRandomVerrou(allResult, testList=["testInc0d1", "testIncSquare0d1", "testIncDiv10"], typeTab=typeTab)
eCount+=checkTestNegativeAndOptimistRandomVerrou(allResult, testList=["testInc0d1m", "testIncSquare0d1m", "testIncDiv10m"], typeTab=typeTab)
eCount+=checkTestPositive(allResult, testList=["testInvariantProdDiv"], typeTab=typeTab)
eCount+=checkTestNegative(allResult, testList=["testInvariantProdDivm"], typeTab=typeTab)
eCount+=checkTestPositiveAndOptimistRandomVerrou(allResult, testList=["testFma"], typeTab=["<double>", "<float>"])
eCount+=checkTestNegativeAndOptimistRandomVerrou(allResult, testList=["testFmam"], typeTab=["<double>", "<float>"])
eCount+=checkExact(allResult, testList=["testMixSseLlo"], typeTab=["<double>", "<float>"])
eCount+=checkExact(allResult, testList=["testCast", "testCastm"], typeTab=["<double>"])
eCount+=checkTestPositiveBetweenTwoValues(allResult, testList=["testCast"], typeTab=["<float>"])
eCount+=checkTestNegativeBetweenTwoValues(allResult, testList=["testCastm"], typeTab=["<float>"])
eCount+=checkExactDetAndOptimistAverage(allResult, testList=["testDiffSqrt"],typeTab=["<double>", "<float>"])
eCount+=checkFloat(allResult, ["testInc0d1", "testIncSquare0d1", "testIncDiv10", "testInc0d1m", "testIncSquare0d1m", "testIncDiv10m", "testFma", "testFmam", "testMixSseLlo"])
eCount+=checkScomdet(allResult,[("testInc0d1","testInc0d1m", True),( "testIncSquare0d1", "testIncSquare0d1m",True),("testIncDiv10", "testIncDiv10m",True),("testFma", "testFmam",True)])
eCount.printSummary()
sys.exit(eCount.ko)
| 27,482 | 37.330544 | 262 | py |
verrou | verrou-master/unitTest/testPlotStat/dot.py | #!/usr/bin/env python3
def computeDot(v1,v2, blockSize):
totalSize=len(v1)
if totalSize!=len(v2):
print("incoherent size")
sys.exit(42)
if totalSize % blockSize !=0:
print("block size should divide total size")
sys.exit(42)
res=0.
for iExtern in range(int(totalSize / blockSize)):
resLocal=0.
for iIntern in range(blockSize):
i=iExtern *blockSize + iIntern
resLocal+=v1[i]*v2[i]
res+=resLocal
return res
if __name__=="__main__":
v1=[0.1 for i in range(128)]
v2=[0.1 for i in range(128)]
for b in [1,2,4,8,16,32]:
dot1=computeDot(v1,v2, b)
dot2=computeDot(v1,v2, b)
print("1-dot("+str(b)+") %.17g %.17g"%(dot1, dot1-1.28))
print("2-dot("+str(b)+") %.17g %.17g"%(dot2, dot2-1.28))
| 841 | 25.3125 | 64 | py |
verrou | verrou-master/unitTest/testPlotStat/extract.py | #!/usr/bin/env python3
import sys
blockSize=32
for line in open(sys.argv[1]+"/out"):
if line.startswith("1-dot("+str(blockSize)+")"):
print(line.split()[2])
sys.exit(0)
sys.exit(42)
| 208 | 18 | 52 | py |
verrou | verrou-master/unitTest/checkStatRounding/extract.py | #!/usr/bin/python3
import os
import sys
defaultType="double"
defaultAlgo="Seq"
def extract(fileName,typeFloat, algo):
if not typeFloat in ["double","float"]:
print("invalid ALGO_TYPE")
sys.exit(42)
if not algo in ["Seq","Rec","SeqRatio","RecRatio"]:
print("invalid ALGO")
sys.exit(42)
for line in open(fileName):
if line.startswith("<%s>"%(typeFloat)):
resLine=line.split("\t")[1:]
for algoRes in resLine:
algoStr, res= algoRes.split(":")
if algoStr=="res"+algo:
return res.strip()
print("invalid file :", fileName)
return None
if __name__=="__main__":
if "ALGO_TYPE" in os.environ:
algo_type=os.environ["ALGO_TYPE"]
else:
algo_type=defaultType
if "ALGO" in os.environ:
algo=os.environ["ALGO"]
else:
algo=defaultAlgo
if len(sys.argv)==2:
print(extract(sys.argv[1]+"/res.out", algo_type, algo))
else:
v1=float(extract(sys.argv[1]+"/res.out", algo_type, algo))
v2=float(extract(sys.argv[2]+"/res.out", algo_type, algo))
if abs(v2 - v1)/abs(v1) < 1e-17:
sys.exit(0)
else:
sys.exit(42)
| 1,253 | 26.26087 | 66 | py |
verrou | verrou-master/unitTest/ddTest/ddCmpTrue.py | #!/usr/bin/env python3
import sys
sys.exit(0)
| 48 | 7.166667 | 22 | py |
verrou | verrou-master/unitTest/ddTest/ddCmp.py | #!/usr/bin/env python3
import sys
import ddRun
#DDConfig
import os
import pickle
def cmpNorm(ref, toCmp, ddCase):
print("norm")
if "dd.sym" in ref and "dd.line" not in ref:
return ddCase.statusOfSymConfig(open(os.path.join(toCmp,"path_exclude")).readline())
if "dd.line" in ref:
return ddCase.statusOfSourceConfig(open(os.path.join(toCmp,"path_source")).readline())
if __name__=="__main__":
if sys.argv[1]== sys.argv[2]:
sys.exit(0)
else:
ddCase=ddRun.ddConfig()
ref=sys.argv[1]
ddCase.unpickle(os.path.join(ref,"dd.pickle"))
toCmp=sys.argv[2]
sys.exit(cmpNorm(ref, toCmp, ddCase))
| 679 | 20.935484 | 94 | py |
verrou | verrou-master/unitTest/ddTest/ddCmpFalse.py | #!/usr/bin/env python3
import sys
sys.exit(1)
| 48 | 7.166667 | 22 | py |
verrou | verrou-master/unitTest/ddTest/ddRun.py | #!/usr/bin/env python3
import sys
import os
import pickle
import random
proba=1.
try:
proba = float(os.environ["DD_TEST_PROBA"])
except:
pass
def simulateRandom(fail):
if fail!=0:
if( random.random()<proba):
return fail
return 0
class ddConfig:
def __init__(self,listOf1Failure=[], listOf2Failures=[]):
self.nbSym=len(listOf1Failure)
self.listOf1Failure=listOf1Failure
self.listOf2Failures=listOf2Failures
self.check2Failure()
def check2Failure(self):
for x in self.listOf2Failures:
((sym0,sym1), fail, tab)=x
if sym0>= self.nbSym or sym1 >= self.nbSym:
print("failure")
sys.exit()
#todo check tab
# for (s1, l1, s2, l2) in tab:
def pickle(self, fileName):
"""To serialize the ddConfig object in the file fileName"""
fileHandler= open(fileName, "wb")
pickle.dump(self.listOf1Failure,fileHandler)
pickle.dump(self.listOf2Failures,fileHandler)
def unpickle(self, fileName):
"""To deserialize the ddConfig object from the file fileName"""
fileHandler=open(fileName, "rb")
self.listOf1Failure=pickle.load(fileHandler)
self.listOf2Failures=pickle.load(fileHandler)
self.nbSym=len(self.listOf1Failure)
def listOfIntSym(self):
"""Return the int list of symbol"""
return range(self.nbSym)
def listOfTxtSym(self):
"""Return a fake list of symbol"""
return [("sym-"+str(i), "fake.so") for i in self.listOfIntSym()]
def getExcludeIntSymFromExclusionFile(self, excludeFile):
""" Return the Int Symbol list excluded with excludeFile """
if excludeFile==None:
return []
return [int((line.split()[0]).replace("sym-", "")) for line in ((open(excludeFile.strip(), "r")).readlines()) ]
def getIncludeIntSymFromExclusionFile(self,excludeFile):
""" Return the Int Symbol list included defined through the excludeFile"""
return [i for i in self.listOfIntSym() if i not in self.getExcludeIntSymFromExclusionFile(excludeFile)]
def listOfTxtLine(self, excludeFile):
"""Generate a fake list of line : it takes into account the excludeFile"""
listOfSymIncluded=self.getIncludeIntSymFromExclusionFile(excludeFile)
res=[]
for (symFailureIndex, failure, listOfLine) in self.listOf1Failure:
if symFailureIndex in listOfSymIncluded:
for (lineIndex, failureLine) in listOfLine:
res+=[("sym"+str(symFailureIndex)+".c", lineIndex, "sym-"+str(symFailureIndex))]
print("print listOfLine", res)
return res
def getIncludedLines(self, sourceFile):
includedLines=[line.split() for line in (open(sourceFile.strip(), "r")).readlines()]
return includedLines
def statusOfSymConfig(self, config):
"""Return the status of the config"""
print(config)
listOfConfigSym=self.getExcludeIntSymFromExclusionFile(config)
#test single sym
for sym in self.listOfIntSym():
if sym not in listOfConfigSym and self.listOf1Failure[sym][1]!=0:
res=simulateRandom(1)
if res==1:
return 1
#test couple sym
for ((sym1,sym2), failure, tab) in self.listOf2Failures:
if failure==0:
continue
if not sym1 in listOfConfigSym and not sym2 in listOfConfigSym:
res=simulateRandom(1)
if res==1:
return 1
return 0
def statusOfSourceConfig(self, configLine):
print("configLine:", configLine)
listOfSym=[]
configLineLines=self.getIncludedLines(configLine)
print("configLineLines:", configLineLines)
for sym in range(self.nbSym):
if sym not in listOfSym and self.listOf1Failure[sym][1]!=0:
print("sym:", sym)
print("listofLineFailure :", self.listOf1Failure[sym][2])
selectedConfigLines=[int(line[1]) for line in configLineLines if line[2]=="sym-"+str(sym) ]
print("selectedConfigLines:", selectedConfigLines)
for (lineFailure, failure) in self.listOf1Failure[sym][2]:
if lineFailure in selectedConfigLines and failure :
print("line return : ", lineFailure)
return 1
#test couple sym
for ((sym1,sym2), failure, tab) in self.listOf2Failures:
print ("sym1 sym2 tab", sym1, sym2, tab)
if failure==0:
continue
if not sym1 in listOfSym and not sym2 in listOfSym:
selectedConfigLines1=[int(line[1]) for line in configLineLines if line[2]=="sym-"+str(sym1) ]
selectedConfigLines2=[int(line[1]) for line in configLineLines if line[2]=="sym-"+str(sym2) ]
print("selectedConfigLines1:", selectedConfigLines1)
print("selectedConfigLines2:", selectedConfigLines2)
for (s1, l1, s2,l2) in tab:
if s1==sym1 and s2==sym2:
if l1 in selectedConfigLines1 and l2 in selectedConfigLines2:
return 1
return 0
def checkRddminSymResult(self,loadRes):
fullList=[int(line.split()[0].replace("sym-","")) for line in loadRes["full"]]
fullList.sort()
if fullList != [x for x in range(self.nbSym)]:
print("invalid full perturbation number")
print("fullList", fullList)
print("fullList expected", [x for x in range(self.nbSym)])
return False
if len(loadRes["noperturbation"])!=0:
print("invalid no empty noperturbation")
return False
ddminList=[ [int(line.split()[0].replace("sym-",""))
for line in ddmin
]
for ddmin in loadRes["ddmin"]]
ddminList1=[x[0] for x in ddminList if len(x)==1]
ddminList1.sort()
ddminList2=[[min(x), max(x)] for x in ddminList if len(x)==2]
ddminList2.sort()
ddminListRes=[x for x in ddminList if not len(x) in [1,2]]
if len(ddminListRes)!=0:
print("unexpected ddmin size")
print("ddminListRes" , ddminListRes)
return False
ddminList1Expected=[sym for sym in range(self.nbSym) if self.listOf1Failure[sym][1]!=0]
ddminList1Expected.sort()
if ddminList1 != ddminList1Expected:
print("unexpected ddmin of size 1")
print("ddminList1Expected", ddminList1Expected)
print("ddminList1", ddminList1)
return False
ddminList2Expected=[[tup[0][0], tup[0][1]]for tup in self.listOf2Failures if tup[1]!=0]
ddmin2error=False
for x in ddminList2Expected:
if not x in ddminList2:
ddmin2error=True
for x in ddminList2:
if not x in ddminList2Expected:
ddmin2error=True
if ddmin2error:
print("unexpected ddminList2")
print("ddminList2Expected", ddminList2Expected)
print("ddminList2",ddminList2)
return False
rddmincmpExpected=[x for x in range(self.nbSym) if not x in ddminList1Expected and not x in [t[0] for t in ddminList2Expected] and not x in [t[1] for t in ddminList2Expected]]
rddmincmp=[int(line.split()[0].replace("sym-","")) for line in loadRes["rddmincmp"]]
rddmincmp.sort()
if rddmincmp != rddmincmpExpected:
print("unexpected rddmincmp")
print("rddmincmpExpected", rddmincmpExpected)
print("rddmincmp", rddmincmp)
return False
return True
def checkRddminLineResult(self,loadRes):
#check fulllist
fullList=[(int(line.split("\t")[2].replace("sym-","")),
int(line.split("\t")[1]))
for line in loadRes["full"]]
fullList.sort()
fullListExpected=[(symFailureIndex, line[0]) for (symFailureIndex, failure, listOfLine) in self.listOf1Failure for line in listOfLine]
fullListExpected.sort()
if fullListExpected!= fullList:
print("invalid full perturbation number")
print("fullList", fullList)
print("fullList expected", fullListExpected)
return False
#check empty list
if len(loadRes["noperturbation"])!=0:
print("invalid no empty noperturbation")
return False
#extract result form loadRes
ddminList=[ [(int(line.split("\t")[2].replace("sym-","")),
int(line.split("\t")[1]))
for line in ddmin]
for ddmin in loadRes["ddmin"]]
ddminList1=[x[0] for x in ddminList if len(x)==1]
ddminList1.sort()
ddminList2=[x for x in ddminList if len(x)==2]
ddminList2.sort()
ddminListRes=[x for x in ddminList if not len(x) in [1,2]]
#check there are only result with length 1 or 2
if len(ddminListRes)!=0:
print("unexpected ddmin size")
print("ddminListRes" , ddminListRes)
return False
#check ddmin with length 1
ddminList1Expected=[(sym, lineTuple[0]) for sym in range(self.nbSym) if self.listOf1Failure[sym][1]!=0 for lineTuple in self.listOf1Failure[sym][2] if lineTuple[1]!=0]
ddminList1Expected.sort()
if ddminList1 != ddminList1Expected:
print("unexpected ddmin of size 1")
print("ddminList1Expected", ddminList1Expected)
print("ddminList1", ddminList1)
return False
#check ddmin with length 2
ddminList2ExpectedSym=[[tup[0][0], tup[0][1]]for tup in self.listOf2Failures if tup[1]!=0]
ddminList2Sym=set([(x[0][0],x[1][0]) for x in ddminList2 ])
for (x,y) in ddminList2Sym:
if not [min(x,y), max(x,y)] in ddminList2ExpectedSym:
print("to much ddmin2 found")
return False
for ddmin2ExpectedSym in ddminList2ExpectedSym:
line0=[[y[1] for y in x[2]] for x in self.listOf2Failures if x[1]!=0 and x[0]==(min(ddmin2ExpectedSym), max(ddmin2ExpectedSym)) ][0]
line1=[[y[3] for y in x[2]] for x in self.listOf2Failures if x[1]!=0 and x[0]==(min(ddmin2ExpectedSym), max(ddmin2ExpectedSym)) ][0]
line0=list(set(line0))
line1=list(set(line1))
lineTupleRes=[(ddmin[0][1], ddmin[1][1])
for ddmin in ddminList2 if ddmin[0][0]==min(ddmin2ExpectedSym) and ddmin[1][0]==max(ddmin2ExpectedSym)]
errorLine=False
if len(lineTupleRes) != min( len(line0),len(line1)) :
errorLine=True
line0Res=[line[0] for line in lineTupleRes]
line1Res=[line[1] for line in lineTupleRes]
for i in line0Res:
if not i in line0:
errorLine=True
for i in line1Res:
if not i in line1:
errorLine=True
if errorLine:
print("ddmin2 Error for ", ddmin2ExpectedSym)
print("lineTupleRes" , lineTupleRes)
print("line0, line1", line0, line1)
return False
#check rddmincmp
rddmincmpExpected=[x for x in fullListExpected if not x in ddminList1Expected and not x in sum(ddminList2,[])]
rddmincmp=[(int(line.split("\t")[2].replace("sym-","")),
int(line.split("\t")[1]))
for line in loadRes["rddmincmp"]]
rddmincmp.sort()
if rddmincmp != rddmincmpExpected:
print("unexpected rddmincmp")
print("rddmincmpExpected", rddmincmpExpected)
print("rddmincmp", rddmincmp)
return False
return True
def generateFakeExclusion(ddCase):
genExcludeFile=os.environ["VERROU_GEN_EXCLUDE"]
genExcludeFile=genExcludeFile.replace("%p", "4242")
f=open(genExcludeFile, "w")
dataToWrite=ddCase.listOfTxtSym()
import random
random.shuffle(dataToWrite)
for (sym, name,) in dataToWrite:
f.write(sym +"\t" + name+"\n")
f.close()
def generateFakeSource(ddCase):
genSourceFile=os.environ["VERROU_GEN_SOURCE"]
genSourceFile=genSourceFile.replace("%p", "4242")
excludeFile=None
try:
excludeFile= os.environ["VERROU_EXCLUDE"]
except:
excludeFile=None
print('excludeFile:',excludeFile)
f=open(genSourceFile, "w")
for (source, line, symName) in ddCase.listOfTxtLine(excludeFile):
f.write(source +"\t" + str(line)+"\t"+symName+"\n")
f.close()
def runRef(dir_path, ddCase):
print("ref")
if "dd.sym" in dir_path and not "dd.line" in dir_path:
generateFakeExclusion(ddCase)
ddCase.pickle(os.path.join(dir_path,"dd.pickle"))
return 0
if "dd.line" in dir_path:
generateFakeSource(ddCase)
ddCase.pickle(os.path.join(dir_path,"dd.pickle"))
return 0
def runNorm(dir_path, ddCase):
print("norm")
if "dd.sym" in dir_path and not "dd.line" in dir_path:
f=open(os.path.join(dir_path , "path_exclude"), "w")
f.write(os.environ["VERROU_EXCLUDE"]+"\n")
f.close()
return 0
if "dd.line" in dir_path:
f=open(os.path.join(dir_path,"path_source"), "w")
f.write(os.environ["VERROU_SOURCE"]+"\n")
f.close()
if __name__=="__main__":
ddCase=ddConfig([(sym, max(0, sym-16), [(line, max(0, line-8)) for line in range(11) ] ) for sym in range(20)],
[((0,1), 1, [(0,line, 1,max(0,line-1)) for line in range(4)]) ]
)
# ddCase=ddConfig([(0, 0, []),
# (1, 1, [(0, 0),(1,1)] )])
if "ref" in sys.argv[1]:
sys.exit(runRef(sys.argv[1], ddCase))
else:
sys.exit(runNorm(sys.argv[1], ddCase))
| 14,135 | 37.102426 | 185 | py |
verrou | verrou-master/unitTest/ddTest/ddCheck.py | #!/usr/bin/env python3
import sys
import ddRun
import os
import pickle
def loadResult(rep):
if "dd.line" in rep:
pref="dd.line"
if "dd.sym" in rep:
pref="dd.sym"
listOfrddim=[[line.strip()
for line in open(os.path.join(rep,x,pref+".include")).readlines()]
for x in os.listdir(rep) if x.startswith("ddmin")]
full=[line.strip()
for line in open(os.path.join(rep,"FullPerturbation" ,pref+".include")).readlines()]
rddmincmp=[line.strip()
for line in open(os.path.join(rep,"rddmin-cmp" ,pref+".include")).readlines()]
noPerturb=[line.strip()
for line in open(os.path.join(rep,"NoPerturbation" ,pref+".include")).readlines()]
return {"ddmin": listOfrddim,
"full": full,
"rddmincmp": rddmincmp,
"noperturbation": noPerturb}
if __name__=="__main__":
resRep=sys.argv[1]
resOut=sys.argv[2]
ddCase=ddRun.ddConfig()
ref=os.path.join(resRep,"ref")
ddCase.unpickle(os.path.join(ref,"dd.pickle"))
loadedRes=loadResult(resRep)
res=False
if "dd.sym" in resRep:
res=ddCase.checkRddminSymResult(loadedRes)
if "dd.line" in resRep:
res=ddCase.checkRddminLineResult(loadedRes)
if res:
print("valid rddmin result")
else:
print("invalid rddmin result")
sys.exit(42)
| 1,394 | 25.826923 | 97 | py |
tet-vs-hex | tet-vs-hex-master/bending/run_forces.py | import os
import json
import glob
import subprocess
import tempfile
import numpy as np
if __name__ == '__main__':
polyfem_exe = os.path.join(os.environ["POLYFEM_BIN_DIR"], "PolyFEM_bin")
# out_folder = "results"
# folder_path = "meshes"
# j_file = "bar.json"
out_folder = "ar_res"
folder_path = "ar"
j_file = "ar.json"
discr_orders = [1, 2]
fs = -np.arange(0.1, 2.1, 0.1)
exts = [".mesh", ".HYBRID"]
nodes = {"P1": 3962, "P2": 13688, "Q1": 6077, "Q2": 43097}
current_folder = cwd = os.getcwd()
with open(j_file, 'r') as f:
json_data = json.load(f)
for ext in exts:
for mesh in glob.glob(os.path.join(folder_path, "*" + ext)):
print(mesh)
basename = os.path.splitext(os.path.basename(mesh))[0]
bc = os.path.join(current_folder, folder_path, basename + ".txt")
mesh = os.path.join(current_folder, mesh)
json_data["mesh"] = mesh
key = "Q" if "HYBRID" in ext else "P"
for discr_order in discr_orders:
node_id = nodes[key + str(discr_order)]
if "rail" in basename:
json_data["export"]["sol_at_node"] = node_id
else:
json_data["export"]["sol_at_node"] = -1
for f in fs:
json_data["discr_order"] = discr_order
if "nice" in basename:
f = -f
json_data["problem_params"]["neumann_boundary"][0]["value"][1] = f
json_data["output"] = os.path.join(current_folder, out_folder, basename + "_k" + str(discr_order) + "_f" + str(abs(f)) + ".json")
# json_data["output"] = os.path.join(current_folder, out_folder, "out_" + basename + "_k" + str(discr_order) + ".json")
with tempfile.NamedTemporaryFile(suffix=".json") as tmp_json:
with open(tmp_json.name, 'w') as f:
f.write(json.dumps(json_data, indent=4))
args = [polyfem_exe,
'--json', tmp_json.name,
'--cmd']
subprocess.run(args)
| 2,250 | 32.597015 | 149 | py |
tet-vs-hex | tet-vs-hex-master/bending/run_times.py | import os
import json
import glob
import subprocess
import tempfile
if __name__ == '__main__':
polyfem_exe = os.path.join(os.environ["POLYFEM_BIN_DIR"], "PolyFEM_bin")
# out_folder = "times"
# folder_path = "meshes"
# j_file = "bar.json"
out_folder = "ar_times"
folder_path = "ar"
j_file = "ar.json"
discr_orders = [1, 2]
force = -1
exts = ["mesh", "HYBRID"]
n_runs = 10
current_folder = cwd = os.getcwd()
with open(j_file, 'r') as f:
json_data = json.load(f)
json_data["problem_params"]["neumann_boundary"][0]["value"][1] = force
for ext in exts:
for mesh in glob.glob(os.path.join(folder_path, "*." + ext)):
basename = os.path.splitext(os.path.basename(mesh))[0]
f = force
if "nice" in basename and "_h" not in basename:
f = -f
json_data["problem_params"]["neumann_boundary"][0]["value"][1] = f
# bc = os.path.join(current_folder, folder_path, basename + ".txt")
mesh = os.path.join(current_folder, mesh)
json_data["mesh"] = mesh
# json_data["bc_tag"] = bc
for discr_order in discr_orders:
json_data["discr_order"] = discr_order
for i in range(0, n_runs):
json_data["output"] = os.path.join(current_folder, out_folder, basename + "_k" + str(discr_order) + "_r" + str(i + 1) + ".json")
with tempfile.NamedTemporaryFile(suffix=".json") as tmp_json:
with open(tmp_json.name, 'w') as f:
f.write(json.dumps(json_data, indent=4))
args = [polyfem_exe,
'--json', tmp_json.name,
'--cmd']
subprocess.run(args)
| 1,856 | 28.951613 | 148 | py |
tet-vs-hex | tet-vs-hex-master/bending/plot_forces.py | import os
import glob
import numpy as np
import json
import plotly.graph_objs as go
import plotly.offline as plotly
colors = {'P1': 'rgb(9, 132, 227)', 'P2': 'rgb(108, 92, 231)', 'Q1': 'rgb(225, 112, 85)', 'Q2': 'rgb(214, 48, 49)'}
marker_shapes = {'P1': 'circle', 'P2': 'circle', 'Q1': 'star', 'Q2': 'star'}
marker_sizes = {'P1': 6, 'P2': 6, 'Q1': 10, 'Q2': 10}
def plot(f, k, slope, name):
if len(f) <= 0 or len(k) <= 0:
return []
x, y = zip(*sorted(zip(f, k))[:])
y = np.absolute(y - np.asarray(x)*slope)
w = np.polyfit(x[:], y[:], 1)
trace = go.Scatter(
x=x,
y=y,
mode='lines+markers',
name="{} {:.2e}".format(name, w[0]),
line=dict(color=(colors[name])),
marker=dict(symbol=marker_shapes[name], size=marker_sizes[name])
)
return [trace]
def plot_exact(f, kk):
mmin = np.amin(f)
mmax = np.amax(f)
x = [mmin, mmax]
y = [mmin * kk, mmax * kk]
trace = go.Scatter(
x=x,
y=y,
mode='lines',
showlegend=False,
line=dict(color='rgb(178, 190, 195)', dash='dash')
)
return [trace]
def load(mesh_name):
k1 = []
f1 = []
k2 = []
f2 = []
for json_file in glob.glob(os.path.join(out_folder, mesh_name + "_k*.json")):
with open(json_file, 'r') as f:
json_data = json.load(f)
k = json_data["discr_order"]
if "rail" in mesh_name:
disp = json_data["sol_at_node"][1]
else:
disp = json_data["sol_min"][1]
f = abs(json_data["args"]["problem_params"]["neumann_boundary"][0]["value"][1])
if k == 1:
k1.append(disp)
f1.append(f)
else:
k2.append(disp)
f2.append(f)
return f1, k1, f2, k2
if __name__ == '__main__':
# out_folder = "results"
output = None
out_folder = "ar_res"
# output = "plot20_nice"
# tri_name = "square_beam"
# hex_name = "square_beam_h"
# kk = -0.09694505138106606 / 2
# tri_name = "square_beam05"
# hex_name = "square_beam_h05"
kk = -2.345675457072445 / 2
# tri_name = "square_beam10"
# hex_name = "square_beam_h10"
# kk = -9.35041275633748 / 2
# tri_name = "square_beam20"
# hex_name = "square_beam_h20"
# kk = -37.36296341373767 / 2
tri_name = "square_beam20_nice"
hex_name = "square_beam_h20"
kk = -37.36296341373767 / 2
# tri_name = "circle_beam"
# hex_name = "circle_beam_h"
# kk = -0.130740975373922 / 2
# tri_name = "rail"
# hex_name = "rail_h"
# kk = -0.14057837735277648/2
tf1, tk1, tf2, tk2 = load(tri_name)
hf1, hk1, hf2, hk2 = load(hex_name)
layout = go.Layout(
legend=dict(x=0.01, y=0.81),
xaxis=dict(
title="Force",
exponentformat='power',
showticksuffix='all',
showtickprefix='all',
showexponent='all',
# autotick=True,
nticks=5,
tickfont=dict(
size=16
)
),
yaxis=dict(
title="Error",
# tickformat='.1e',
exponentformat='power',
ticks='',
# tick0=0,
# dtick=1,
# tickangle=-45,
tickfont=dict(
size=16
),
autorange=True
),
font=dict(
size=24
),
hovermode='closest'
)
data = []
data.extend(plot(hf1, hk1, kk, "Q1"))
data.extend(plot(hf2, hk2, kk, "Q2"))
data.extend(plot(tf1, tk1, kk, "P1"))
data.extend(plot(tf2, tk2, kk, "P2"))
# data.extend(plot_exact(tf1, kk))
fig = go.Figure(data=data, layout=layout)
if output is not None:
plotly.plot(fig, image="svg", image_filename=output)
else:
plotly.plot(fig)
| 3,879 | 21.55814 | 115 | py |
tet-vs-hex | tet-vs-hex-master/bending/print_times.py | import os
import json
import glob
import numpy as np
def get_data(p, q):
p1 = p[0]
p2 = p[1]
q1 = q[0]
q2 = q[1]
p1 = np.mean(p1)
p2 = np.mean(p2)
q1 = np.mean(q1)
q2 = np.mean(q2)
return p1, p2, q1, q2
def print_line(title, tb, ta, ts, t, e):
string = "{}&\t{:.2e}&\t{:.2e}&\t{:.2e}&\t{:.2e}&\t{:.2e}\\\\".format(title, tb, ta, ts, t, e)
string = string.replace("e-0", "e-")
string = string.replace("e+0", "e")
string = string.replace("e0", "")
print(string)
if __name__ == '__main__':
# path = "times"
# prefixes = ["square_beam", "square_beam_h", "circle_beam", "circle_beam_h", "rail", "rail_h"]
path = "ar_times"
prefixes = [
"square_beam05", "square_beam_h05",
"square_beam10", "square_beam_h10",
"square_beam20", "square_beam_h20",
"square_beam20_split", "square_beam_h20",
"square_beam20_nice", "square_beam_h20_nice",
"square_beam20_split8", "square_beam_h20_nice"]
exacts = {
"square_beam": -0.09694505138106606 / 2,
"square_beam05": -2.345675457072445 / 2,
"square_beam10": -9.35041275633748 / 2,
"square_beam20": -37.36296341373767 / 2,
"circle_beam": -0.130740975373922 / 2,
"rail": -0.14057837735277648 / 2
}
discrs = [1, 2]
basis = {}
assembly = {}
solve = {}
total = {}
errors = {}
for prefix in prefixes:
basis[prefix] = []
assembly[prefix] = []
solve[prefix] = []
total[prefix] = []
errors[prefix] = []
for prefix in prefixes:
for discr in discrs:
bb = []
aa = []
ss = []
tt = []
ee = []
for data in glob.glob(os.path.join(path, prefix + "_k" + str(discr) + "*.json")):
with open(data, 'r') as f:
json_data = json.load(f)
if "rail" in prefix:
disp = json_data["sol_at_node"][1]
else:
disp = json_data["sol_min"][1]
exact = exacts[
prefix.replace("_h", "").
replace("_nice", "").
replace("_split8", "").
replace("_split16", "").
replace("_split30", "").
replace("_split", "")]
bb.append(json_data["time_building_basis"])
aa.append(json_data["time_assembling_stiffness_mat"])
ss.append(json_data["time_solving"])
tt.append(json_data["time_building_basis"]+json_data["time_assembling_stiffness_mat"]+json_data["time_solving"])
ee.append(abs(exact-disp))
basis[prefix].append(bb)
assembly[prefix].append(aa)
solve[prefix].append(ss)
total[prefix].append(tt)
errors[prefix].append(ee)
for i in range(0, len(prefixes), 2):
pp = prefixes[i]
qq = prefixes[i+1]
print("\n\n" + pp + " " + qq + "\n")
print("&$t_b$&\t$t_a$&\t$t_s$&\t$t$&\t$e_f$\\\\")
print("\\hline")
[p1b, p2b, q1b, q2b] = get_data(basis[pp], basis[qq])
[p1a, p2a, q1a, q2a] = get_data(assembly[pp], assembly[qq])
[p1s, p2s, q1s, q2s] = get_data(solve[pp], solve[qq])
[p1t, p2t, q1t, q2t] = get_data(total[pp], total[qq])
[p1e, p2e, q1e, q2e] = get_data(errors[pp], errors[qq])
print_line("$P_1$", p1b, p1a, p1s, p1t, p1e)
print_line("$P_2$", p2b, p2a, p2s, p2t, p2e)
print_line("$Q_1$", q1b, q1a, q1s, q1t, q1e)
print_line("$Q_2$", q2b, q2a, q2s, q2t, q2e)
| 3,749 | 29.737705 | 132 | py |
tet-vs-hex | tet-vs-hex-master/bending/mesh.py | import numpy as np
import igl
import meshplot as mp
import wildmeshing as wm
mp.offline()
v=np.array([
[-10., -0.5, -50],
[-10., 0.5, -50],
[10., 0.5, -50],
[10., -0.5, -50],
#
[-10., -0.5, 50],
[-10., 0.5, 50],
[10., 0.5, 50],
[10., -0.5, 50]
])
f = np.array([
[0, 1, 2],
[2, 3, 0],
[4, 6, 5],
[6, 4, 7],
[2, 6, 7],
[3, 2, 7],
[1, 5, 6],
[1, 6, 2],
[1, 4, 5],
[4, 1, 0],
[0, 7, 4],
[7, 0, 3]
])
# igl.write_triangle_mesh("bbb.obj", v, f)
# p = mp.plot(v, f, shading={"wireframe": True, "point_size": 5}, return_plot=True, filename="plot.html")
# wm.tetrahedralize("bbb.obj", "test.mesh", edge_length_r=0.0263)
n_v = -1
index = -1
with open("test.mesh", "r") as in_file:
with open("test_snap.mesh", "w") as out_file:
for line in in_file:
if n_v == -2:
n_v = int(line)
print(n_v)
if "Vertices" in line:
n_v = -2
if index < n_v and index >= 0:
nbrs = line.split(' ')
assert(len(nbrs) == 4)
x = float(nbrs[0])
y = float(nbrs[1])
z = float(nbrs[2])
if abs(x-10) < 1e-1:
x = 10
if abs(x+10) < 1e-1:
x = -10
if abs(y-0.5) < 1e-1:
y = 0.5
if abs(y+0.5) < 1e-1:
y = -0.5
if abs(z-50) < 1e-1:
z = 50
if abs(z+50) < 1e-1:
z = -50
line = "{} {} {} {}".format(x, y, z, nbrs[3])
out_file.write(line)
if n_v > 0:
index += 1
| 1,766 | 19.546512 | 105 | py |
tet-vs-hex | tet-vs-hex-master/plate_hole/paraview.py | import re
from paraview.simple import *
group0 = GroupDatasets()
group0.Input.Clear()
group1 = GroupDatasets()
group1.Input.Clear()
group2 = GroupDatasets()
group2.Input.Clear()
group3 = GroupDatasets()
group3.Input.Clear()
sources = []
ii = 0
for k in GetSources():
s = GetSources()[k]
if s == group0 or s == group1 or s == group2 or s == group3:
continue
sources.append(s)
for s in sources:
name = s.FileName[0]
print(name)
current_group = None
if "_0_" in name:
current_group = group0
print("group " + str(0))
elif "_1_" in name:
current_group = group1
print("group " + str(1))
elif "_2_" in name:
current_group = group2
print("group " + str(2))
elif "_3_" in name:
current_group = group3
print("group " + str(3))
Hide(s)
if re.search('hole_q_\\d_k2', name):
calc = Calculator(s)
calc.Function='jHat*0'
warped = WarpByVector(calc)
current_group.Input.append(warped)
elif re.search('hole_q_\\d_k1', name):
calc = Calculator(s)
calc.Function='-iHat*5'
warped = WarpByVector(calc)
r = Reflect(warped)
r.CopyInput = False
current_group.Input.append(r)
elif re.search('hole_\\d_k2', name):
calc = Calculator(s)
calc.Function='jHat*5'
warped = WarpByVector(calc)
r = Reflect(warped)
r.CopyInput = False
r.Plane = 'Y Max'
current_group.Input.append(r)
elif re.search('hole_\\d_k1', name):
calc = Calculator(s)
calc.Function='-iHat*5 + jHat*5'
warped = WarpByVector(calc)
r = Reflect(warped)
r.CopyInput = False
r.Plane = 'Y Max'
r1 = Reflect(r)
r1.CopyInput = False
current_group.Input.append(r1)
calc = None
warped = None
r = None
r1 = None
s = None
| 1,653 | 18.232558 | 61 | py |
tet-vs-hex | tet-vs-hex-master/plate_hole/run.py | import os
import json
import glob
import subprocess
import tempfile
if __name__ == '__main__':
polyfem_exe = os.path.join(os.environ["POLYFEM_BIN_DIR"], "PolyFEM_bin")
# out_folder = "results"
out_folder = "results_inc"
folder_path = "meshes"
current_folder = cwd = os.getcwd()
discr_orders = [1, 2]
with open("plane_hole.json", 'r') as f:
json_data = json.load(f)
for mesh in glob.glob(os.path.join(folder_path, "*.obj")):
basename = os.path.splitext(os.path.basename(mesh))[0]
mesh = os.path.join(current_folder, mesh)
json_data["mesh"] = mesh
for discr_order in discr_orders:
json_data["discr_order"] = discr_order
json_data["output"] = os.path.join(current_folder, out_folder, "out_" + basename + "_k" + str(discr_order) + ".json")
json_data["export"]["vis_mesh"] = os.path.join(current_folder, out_folder, "sol_" + basename + "_k" + str(discr_order) + ".vtu")
with tempfile.NamedTemporaryFile(suffix=".json") as tmp_json:
with open(tmp_json.name, 'w') as f:
f.write(json.dumps(json_data, indent=4))
args = [polyfem_exe,
'--json', tmp_json.name,
'--cmd']
subprocess.run(args)
| 1,326 | 32.175 | 140 | py |
tet-vs-hex | tet-vs-hex-master/incompressible/paraview.py | from paraview.simple import *
import os
sources = []
for k in GetSources():
s = GetSources()[k]
sources.append(s)
warpeds = []
names = []
for s in sources:
name = s.FileName[0]
name = os.path.splitext(os.path.basename(name))[0]
print(name)
warped = WarpByVector(Input=s)
warped.Vectors = ['POINTS', 'solution']
Hide(s)
warpedDisplay = Show(warped)
ColorBy(warpedDisplay, ('POINTS', 'solution', 'Magnitude'))
warpeds.append(warped)
names.append(name)
HideAll()
RenderAllViews()
renderView = GetActiveViewOrCreate('RenderView')
renderView.InteractionMode = '2D'
renderView.CameraPosition = [0.4999999925494194, 0.5000000074505806, 3.0191335456195794]
renderView.CameraFocalPoint = [0.4999999925494194, 0.5000000074505806, 0.0]
renderView.CameraParallelScale = 0.9455052061902393
for i in range(len(warpeds)):
w = warpeds[i]
n = names[i]
Show(w)
solutionLUT = GetColorTransferFunction('solution')
solutionLUT.ApplyPreset('Rainbow Uniform', True)
solutionLUT.NumberOfTableValues = 12
solutionLUT.RescaleTransferFunction(0.0, 0.3)
# solutionLUTColorBar.Enabled = True
# solutionLUTColorBar = GetScalarBar(solutionLUT, renderView)
RenderAllViews()
SaveScreenshot('./images/' + n + '.png', renderView, ImageResolution=[2*2204, 2*960])
Hide(w)
| 1,273 | 23.037736 | 88 | py |
tet-vs-hex | tet-vs-hex-master/incompressible/print_times.py | import os
import json
import glob
import numpy as np
def get_data(p, q):
p1 = p[0]
p2 = p[1]
q1 = q[0]
q2 = q[1]
p1 = np.mean(p1)
p2 = np.mean(p2)
q1 = np.mean(q1)
q2 = np.mean(q2)
return p1, p2, q1, q2
def print_line(title, tb, ta, ts, t):
string = "{}&\t{:.2e}&\t{:.2e}&\t{:.2e}&\t{:.2e}\\\\".format(title, tb, ta, ts, t)
string = string.replace("e-0", "e-")
string = string.replace("e+0", "e")
string = string.replace("e0", "")
print(string)
if __name__ == '__main__':
path = "out"
prefixes = ["P1", "P2", "PM", "Q1", "Q2", "QM"]
basis = {}
assembly = {}
solve = {}
total = {}
for prefix in prefixes:
basis[prefix] = []
assembly[prefix] = []
solve[prefix] = []
total[prefix] = []
for prefix in prefixes:
total_time = 0
count = 0
for data in glob.glob(os.path.join(path, prefix + "*.json")):
with open(data, 'r') as f:
json_data = json.load(f)
time = json_data["time_building_basis"] + json_data["time_assembling_stiffness_mat"] + json_data["time_solving"]
total_time += time
count += 1
print(prefix)
print(round(total_time / count*100)/100)
| 1,295 | 20.966102 | 128 | py |
tet-vs-hex | tet-vs-hex-master/incompressible/run.py | import os
import json
import glob
import subprocess
import tempfile
if __name__ == '__main__':
polyfem_exe = os.path.join(os.environ["POLYFEM_BIN_DIR"], "PolyFEM_bin")
vtu_folder = "vtu"
json_folder = "out"
discr_orders = [1, 2]
exts = ["obj"]
folder_path = "meshes"
current_folder = cwd = os.getcwd()
E = 0.1
nus = [0.9, 0.99, 0.999, 0.9999]
with open("run.json", 'r') as f:
json_data = json.load(f)
for ext in exts:
for mesh in glob.glob(os.path.join(folder_path, "*." + ext)):
basename = os.path.splitext(os.path.basename(mesh))[0]
title = "P" if basename.find("quad") == -1 else "Q"
json_data["n_refs"] = 0 if basename.find("quad") == -1 else 6
# print(json_data["n_refs"])
mesh = os.path.join(current_folder, mesh)
json_data["mesh"] = mesh
for nu in nus:
json_data["params"]["E"] = E
json_data["params"]["nu"] = nu
json_data["tensor_formulation"] = "LinearElasticity"
for discr_order in discr_orders:
json_data["discr_order"] = discr_order
json_data["output"] = os.path.join(current_folder, json_folder, title + str(discr_order) + "_nu" + str(nu) + ".json")
json_data["export"]["vis_mesh"] = os.path.join(current_folder, vtu_folder, title + str(discr_order) + "_nu" + str(nu) + ".vtu")
with tempfile.NamedTemporaryFile(suffix=".json") as tmp_json:
with open(tmp_json.name, 'w') as f:
f.write(json.dumps(json_data, indent=4))
args = [polyfem_exe,
'--json', tmp_json.name,
'--cmd',
'--log_level', '1']
subprocess.run(args)
json_data["tensor_formulation"] = "IncompressibleLinearElasticity"
json_data["discr_order"] = 2
json_data["output"] = os.path.join(current_folder, json_folder, title + "M_nu" + str(nu) + ".json")
json_data["export"]["vis_mesh"] = os.path.join(current_folder, vtu_folder, title + "M_nu" + str(nu) + ".vtu")
with tempfile.NamedTemporaryFile(suffix=".json") as tmp_json:
with open(tmp_json.name, 'w') as f:
f.write(json.dumps(json_data, indent=4))
args = [polyfem_exe,
'--json', tmp_json.name,
'--cmd',
'--log_level', '1']
subprocess.run(args)
| 2,715 | 37.8 | 147 | py |
tet-vs-hex | tet-vs-hex-master/twisted_bar/paraview.py | from paraview.simple import *
colors = {'P1': [9, 132, 227], 'P2': [108, 92, 231], 'Q1': [225, 112, 85], 'Q2': [214, 48, 49]}
centre = 9.5
ii = 0
sources = []
for k in GetSources():
s = GetSources()[k]
sources.append(s)
layout = GetLayout()
layout.SplitHorizontal(0, 0.5)
SetActiveView(None)
lineChartView = CreateView('XYChartView')
layout.AssignView(2, lineChartView)
SetActiveView(lineChartView)
for source in sources:
s = None
filename = source.FileName[0]
title = filename[-6:-4].title()
hexes = filename.find('_h') > 0
if hexes:
title = "Q" + title[-1]
else:
title = "P" + title[-1]
# if hexes:
# calc = Calculator(source)
# calc.CoordinateResults = True
# calc.Function = 'coordsX*iHat+coordsY*jHat+(coordsZ-50)*kHat'
# calc.ResultArrayName='disp_' + title
# s = calc
# else:
# s = source
s = source
calc = Calculator(s)
calc.Function='atan(solution_X/solution_Y)'
calc.ResultArrayName='angle_' + title
# calc_res = Show(calc)
line = PlotOverLine(calc)
line.Source.Point1=[centre, centre, -50]
line.Source.Point2=[centre, centre, 50]
line_res = Show(line)
line_res.SeriesVisibility = ['angle_' + title]
line_res.SeriesLabel[1] = title + ' ' + str(centre)
line_res.SeriesColor[1] = str(colors[title][0]/255.)
line_res.SeriesColor[2] = str(colors[title][1]/255.)
line_res.SeriesColor[3] = str(colors[title][2]/255.)
SaveData('./data/' + title + '.csv', proxy=line, Precision=30)
| 1,438 | 22.983333 | 95 | py |
tet-vs-hex | tet-vs-hex-master/twisted_bar/plot.py | import numpy as np
import math
import pandas as pd
import plotly.graph_objs as go
import plotly.offline as plotly
colors = {'P1': 'rgb(9, 132, 227)', 'P2': 'rgb(108, 92, 231)', 'Q1': 'rgb(225, 112, 85)', 'Q2': 'rgb(214, 48, 49)', 'Spline': 'rgb(45, 52, 54)'}
marker_shapes = {'P1': 'circle', 'P2': 'circle', 'Q1': 'star', 'Q2': 'star'}
marker_sizes = {'P1': 6, 'P2': 6, 'Q1': 10, 'Q2': 10}
def plot(data, name):
if data.empty:
return []
x = data["arc_length"].values
px = data["solution:0"].values
py = data["solution:1"].values
y = -np.arctan2(py, px) / math.pi * 180
y -= 45.
# y[0] += 45
y = np.abs(y - x/100*90)
x, y = zip(*sorted(zip(x, y))[20:])
trace = go.Scatter(
x=x,
y=y,
mode='lines',
name=name,
line=dict(color=(colors[name])),
# marker=dict(symbol=marker_shapes[name], size=marker_sizes[name])
)
return [trace]
def plot_exact():
m = -0.015677840125715936
b = -0.788410994193744
mmin = 0
mmax = 100
x = [mmin, mmax]
y = [b + mmin * m, b+ mmax * m]
trace = go.Scatter(
x=x,
y=y,
mode='lines',
showlegend=False,
line=dict(color='rgb(178, 190, 195)', dash='dash')
)
return [trace]
if __name__ == '__main__':
out_folder = "data"
output = "plot_fine"
p1 = pd.read_csv(out_folder + "/P1.csv")
p2 = pd.read_csv(out_folder + "/P2.csv")
q1 = pd.read_csv(out_folder + "/Q1.csv")
q2 = pd.read_csv(out_folder + "/Q2.csv")
layout = go.Layout(
legend=dict(x=0.9, y=0.9),
xaxis=dict(
title="z",
exponentformat='power',
showticksuffix='all',
showtickprefix='all',
showexponent='all',
# autotick=True,
# type='log',
nticks=5,
tickfont=dict(
size=16
),
# autorange='reversed'
),
yaxis=dict(
title="Angle deviation from linear in degrees",
# tickformat='.1e',
exponentformat='power',
ticks='',
# tick0=0,
# dtick=1,
# tickangle=-45,
# type='log',
tickfont=dict(
size=16
),
range=[0, 3],
),
font=dict(
size=24
),
hovermode='closest'
)
data = []
data.extend(plot(p1, "P1"))
data.extend(plot(p2, "P2"))
data.extend(plot(q1, "Q1"))
data.extend(plot(q2, "Q2"))
# data.extend(plot(hs, ratio, "Spline"))
fig = go.Figure(data=data, layout=layout)
if output is not None:
plotly.plot(fig, image="svg", image_filename=output)
else:
plotly.plot(fig)
| 2,794 | 21.540323 | 144 | py |
tet-vs-hex | tet-vs-hex-master/twisted_bar/save_screen.py | from paraview.simple import *
import os
sources = []
for k in GetSources():
s = GetSources()[k]
sources.append(s)
warpeds = []
names = []
for source in sources:
name = source.FileName[0]
name = os.path.splitext(os.path.basename(name))[0]
print(name)
hexes = name.find('_h') > 0
if hexes:
calc = Calculator(source)
calc.CoordinateResults = True
calc.Function = 'coordsX*iHat+coordsY*jHat+(coordsZ-50)*kHat'
calc.ResultArrayName='disp_' + name
s = calc
else:
s = source
warped = WarpByVector(Input=s)
warped.Vectors = ['POINTS', 'solution']
Hide(s)
warpedDisplay = Show(warped)
ColorBy(warpedDisplay, ('POINTS', 'solution', 'Magnitude'))
warpeds.append(warped)
names.append(name)
HideAll()
RenderAllViews()
renderView = GetActiveViewOrCreate('RenderView')
renderView.OrientationAxesVisibility = 0
renderView.CameraPosition = [-0.0009298324584960938, -207.86145358251065, 0.0]
renderView.CameraFocalPoint = [-0.0009298324584960938, 0.00030040740966796875, 0.0]
renderView.CameraViewUp = [0.0, 0.0, 1.0]
renderView.CameraParallelScale = 53.79858068100626
for i in range(len(warpeds)):
w = warpeds[i]
n = names[i]
Show(w)
solutionLUT = GetColorTransferFunction('solution')
solutionLUT.ApplyPreset('Rainbow Uniform', True)
solutionLUT.NumberOfTableValues = 12
solutionLUT.RescaleTransferFunction(0.0, 30)
# solutionLUTColorBar.Enabled = True
# solutionLUTColorBar = GetScalarBar(solutionLUT, renderView)
RenderAllViews()
SaveScreenshot('./images/' + n + '.png', renderView, ImageResolution=[2*2204, 2*960])
Hide(w)
| 1,562 | 23.421875 | 86 | py |
tet-vs-hex | tet-vs-hex-master/orthotropic/run_forces.py | import os
import json
import glob
import subprocess
import tempfile
import numpy as np
if __name__ == '__main__':
polyfem_exe = os.path.join(os.environ["POLYFEM_BIN_DIR"], "PolyFEM_bin")
out_folder = "results"
folder_path = "meshes"
j_file = "ortho.json"
discr_orders = [1, 2]
fs = -np.arange(0.000001, 0.000021, 0.000001)
exts = [".mesh", ".HYBRID"]
current_folder = cwd = os.getcwd()
with open(j_file, 'r') as f:
json_data = json.load(f)
for ext in exts:
for mesh in glob.glob(os.path.join(folder_path, "*" + ext)):
print(mesh)
basename = os.path.splitext(os.path.basename(mesh))[0]
bc = os.path.join(current_folder, folder_path, basename + ".txt")
mesh = os.path.join(current_folder, mesh)
json_data["mesh"] = mesh
key = "Q" if "HYBRID" in ext else "P"
for discr_order in discr_orders:
json_data["export"]["sol_at_node"] = -1
for f in fs:
json_data["discr_order"] = discr_order
json_data["problem_params"]["neumann_boundary"][0]["value"][1] = f
json_data["output"] = os.path.join(current_folder, out_folder, basename + "_k" + str(discr_order) + "_f" + str(abs(f)) + ".json")
# json_data["output"] = os.path.join(current_folder, out_folder, "out_" + basename + "_k" + str(discr_order) + ".json")
with tempfile.NamedTemporaryFile(suffix=".json") as tmp_json:
with open(tmp_json.name, 'w') as f:
f.write(json.dumps(json_data, indent=4))
args = [polyfem_exe,
'--json', tmp_json.name,
'--cmd']
subprocess.run(args)
| 1,863 | 33.518519 | 149 | py |
tet-vs-hex | tet-vs-hex-master/orthotropic/run_times.py | import os
import json
import glob
import subprocess
import tempfile
if __name__ == '__main__':
polyfem_exe = os.path.join(os.environ["POLYFEM_BIN_DIR"], "PolyFEM_bin")
out_folder = "times"
folder_path = "meshes"
j_file = "ortho.json"
discr_orders = [1, 2]
force = -0.00001
exts = ["mesh", "HYBRID"]
n_runs = 10
current_folder = cwd = os.getcwd()
with open(j_file, 'r') as f:
json_data = json.load(f)
json_data["problem_params"]["neumann_boundary"][0]["value"][1] = force
for ext in exts:
for mesh in glob.glob(os.path.join(folder_path, "*." + ext)):
basename = os.path.splitext(os.path.basename(mesh))[0]
# bc = os.path.join(current_folder, folder_path, basename + ".txt")
mesh = os.path.join(current_folder, mesh)
json_data["mesh"] = mesh
# json_data["bc_tag"] = bc
for discr_order in discr_orders:
json_data["discr_order"] = discr_order
for i in range(0, n_runs):
json_data["output"] = os.path.join(current_folder, out_folder, basename + "_k" + str(discr_order) + "_r" + str(i + 1) + ".json")
with tempfile.NamedTemporaryFile(suffix=".json") as tmp_json:
with open(tmp_json.name, 'w') as f:
f.write(json.dumps(json_data, indent=4))
args = [polyfem_exe,
'--json', tmp_json.name,
'--cmd']
subprocess.run(args)
| 1,598 | 29.169811 | 148 | py |
tet-vs-hex | tet-vs-hex-master/orthotropic/plot_forces.py | import os
import glob
import numpy as np
import json
import plotly.graph_objs as go
import plotly.offline as plotly
colors = {'P1': 'rgb(9, 132, 227)', 'P2': 'rgb(108, 92, 231)', 'Q1': 'rgb(225, 112, 85)', 'Q2': 'rgb(214, 48, 49)'}
marker_shapes = {'P1': 'circle', 'P2': 'circle', 'Q1': 'star', 'Q2': 'star'}
marker_sizes = {'P1': 6, 'P2': 6, 'Q1': 10, 'Q2': 10}
def plot(f, k, slope, name):
if len(f) <= 0 or len(k) <= 0:
return []
x, y = zip(*sorted(zip(f, k))[:])
y = np.absolute(y - np.asarray(x)*slope)
w = np.polyfit(x[:], y[:], 1)
trace = go.Scatter(
x=x,
y=y,
mode='lines+markers',
name="{} {:.2e}".format(name, w[0]),
line=dict(color=(colors[name])),
marker=dict(symbol=marker_shapes[name], size=marker_sizes[name])
)
return [trace]
def plot_exact(f, kk):
mmin = np.amin(f)
mmax = np.amax(f)
x = [mmin, mmax]
y = [mmin * kk, mmax * kk]
trace = go.Scatter(
x=x,
y=y,
mode='lines',
showlegend=False,
line=dict(color='rgb(178, 190, 195)', dash='dash')
)
return [trace]
def load(mesh_name):
k1 = []
f1 = []
k2 = []
f2 = []
for json_file in glob.glob(os.path.join(os.path.dirname(__file__), out_folder, mesh_name + "_k*.json")):
with open(json_file, 'r') as f:
json_data = json.load(f)
k = json_data["discr_order"]
disp = json_data["sol_min"][1]
f = -json_data["args"]["problem_params"]["neumann_boundary"][0]["value"][1]
if f == 0:
print(json_file)
if k == 1:
k1.append(disp)
f1.append(f)
else:
k2.append(disp)
f2.append(f)
return f1, k1, f2, k2
if __name__ == '__main__':
out_folder = "results"
output = "ortho"
tri_name = "square_beam"
hex_name = "square_beam_h"
kk = -10.601655711409355 / 0.00002
tf1, tk1, tf2, tk2 = load(tri_name)
hf1, hk1, hf2, hk2 = load(hex_name)
layout = go.Layout(
legend=dict(x=0.01, y=0.81),
xaxis=dict(
title="Force",
exponentformat='power',
showticksuffix='all',
showtickprefix='all',
showexponent='all',
# autotick=True,
nticks=5,
tickfont=dict(
size=16
)
),
yaxis=dict(
title="Error",
# tickformat='.1e',
exponentformat='power',
ticks='',
# tick0=0,
# dtick=1,
# tickangle=-45,
tickfont=dict(
size=16
),
autorange=True
),
font=dict(
size=24
),
hovermode='closest'
)
data = []
data.extend(plot(hf1, hk1, kk, "Q1"))
data.extend(plot(hf2, hk2, kk, "Q2"))
data.extend(plot(tf1, tk1, kk, "P1"))
data.extend(plot(tf2, tk2, kk, "P2"))
# data.extend(plot_exact(tf1, kk))
fig = go.Figure(data=data, layout=layout)
if output is not None:
plotly.plot(fig, image="svg", image_filename=output)
else:
plotly.plot(fig)
| 3,201 | 21.871429 | 115 | py |
tet-vs-hex | tet-vs-hex-master/orthotropic/print_times.py | import os
import json
import glob
import numpy as np
def get_data(p, q):
p1 = p[0]
p2 = p[1]
q1 = q[0]
q2 = q[1]
p1 = np.mean(p1)
p2 = np.mean(p2)
q1 = np.mean(q1)
q2 = np.mean(q2)
return p1, p2, q1, q2
def print_line(title, tb, ta, ts, t, e):
string = "{}&\t{:.2e}&\t{:.2e}&\t{:.2e}&\t{:.2e}&\t{:.2e}\\\\".format(title, tb, ta, ts, t, e)
string = string.replace("e-0", "e-")
string = string.replace("e+0", "e")
string = string.replace("e0", "")
print(string)
if __name__ == '__main__':
path = "times"
prefixes = ["square_beam", "square_beam_h"]
exacts = {
"square_beam": -10.601655711409355 / 2
}
discrs = [1, 2]
basis = {}
assembly = {}
solve = {}
total = {}
errors = {}
for prefix in prefixes:
basis[prefix] = []
assembly[prefix] = []
solve[prefix] = []
total[prefix] = []
errors[prefix] = []
for prefix in prefixes:
for discr in discrs:
bb = []
aa = []
ss = []
tt = []
ee = []
for data in glob.glob(os.path.join(path, prefix + "_k" + str(discr) + "*.json")):
with open(data, 'r') as f:
json_data = json.load(f)
if "rail" in prefix:
disp = json_data["sol_at_node"][1]
else:
disp = json_data["sol_min"][1]
exact = exacts[prefix.replace("_h", "").replace("_nice", "")]
bb.append(json_data["time_building_basis"])
aa.append(json_data["time_assembling_stiffness_mat"])
ss.append(json_data["time_solving"])
tt.append(json_data["time_building_basis"]+json_data["time_assembling_stiffness_mat"]+json_data["time_solving"])
ee.append(abs(exact-disp))
basis[prefix].append(bb)
assembly[prefix].append(aa)
solve[prefix].append(ss)
total[prefix].append(tt)
errors[prefix].append(ee)
for i in range(0, len(prefixes), 2):
pp = prefixes[i]
qq = prefixes[i+1]
print("\n\n" + pp + " " + qq + "\n")
print("&$t_b$&\t$t_a$&\t$t_s$&\t$t$&\t$e_f$\\\\")
print("\\hline")
[p1b, p2b, q1b, q2b] = get_data(basis[pp], basis[qq])
[p1a, p2a, q1a, q2a] = get_data(assembly[pp], assembly[qq])
[p1s, p2s, q1s, q2s] = get_data(solve[pp], solve[qq])
[p1t, p2t, q1t, q2t] = get_data(total[pp], total[qq])
[p1e, p2e, q1e, q2e] = get_data(errors[pp], errors[qq])
print_line("$P_1$", p1b, p1a, p1s, p1t, p1e)
print_line("$P_2$", p2b, p2a, p2s, p2t, p2e)
print_line("$Q_1$", q1b, q1a, q1s, q1t, q1e)
print_line("$Q_2$", q2b, q2a, q2s, q2t, q2e)
| 2,885 | 27.019417 | 132 | py |
tet-vs-hex | tet-vs-hex-master/ars/hists.py | import os
import glob
import numpy as np
import pandas as pd
import plotly.graph_objs as go
import plotly.offline as plotly
colors = {
'tet': 'rgb(9, 132, 227)', 'P1': 'rgb(9, 132, 227)', 'P2': 'rgb(108, 92, 231)',
'hex': 'rgb(225, 112, 85)', 'Q1': 'rgb(225, 112, 85)', 'Q2': 'rgb(214, 48, 49)'}
use_logs = [
True, False, False
]
ranges = [
(3, 500),
(3, 50),
(0.6, 4)
]
steps = [
(0.05, 10),
(0.05, 1),
(0.01, 0.05),
]
def plot(data, key):
trace = []
n = -1
use_log = use_logs[key]
index = 0 if use_log else 1
mmax = ranges[key][index]
step = steps[key][index]
for k in data:
v = data[k][key]
if n == -1:
n = len(v)
else:
assert(n == len(v))
v = np.sqrt(v)
if use_log:
v = np.log10(v)
v[v >= mmax] = mmax
hist = go.Histogram(
x = v,
histnorm='percent',
name = k,
xbins=dict( # bins used for histogram
start=0,
end=mmax,
size=step
),
marker=dict(color=colors[k])
)
trace.append(hist)
return trace
def load(prefix):
res = {}
for suffix in ["tet", "hex"]:
ext = "csv" if suffix == "tet" else "txt"
data = pd.read_csv("{}_{}.{}".format(prefix, suffix, ext), delim_whitespace=suffix=="hex")
mmin = data["min"].values
mmax = data["max"].values
avg = data["avg"].values
res[suffix] = (mmin, mmax, avg)
return res
if __name__ == '__main__':
keys = {"min": 0, "max": 1, "avg": 2}
# prefix = "10k"
prefix = "hexalab"
name = "avg"
key = keys[name]
output = "{}_{}".format(prefix, name)
data = load(prefix)
layout = go.Layout(
legend=dict(x=0.81, y=0.81),
xaxis=dict(
title="Aspect ratio",
exponentformat='power',
showticksuffix='all',
showtickprefix='all',
showexponent='all',
# autotick=True,
nticks=5,
tickfont=dict(
size=16
),
tickmode='array',
# tickvals=[0, 0.5, 1, 1.5, 2, 2.5],
# ticktext=['0', '', '1', '', '1e2', '']
),
yaxis=dict(
title="Percentage",
# tickformat='.1e',
exponentformat='power',
ticks='',
# tick0=0,
# dtick=1,
# tickangle=-45,
tickfont=dict(
size=16
),
autorange=True
),
font=dict(
size=24
),
bargap=0.1,
bargroupgap=0,
hovermode='closest'
)
plot_data = plot(data, key)
fig = go.Figure(data=plot_data, layout=layout)
if output is not None:
plotly.plot(fig, image="svg", image_filename=output)
else:
plotly.plot(fig)
| 2,981 | 19.013423 | 98 | py |
tet-vs-hex | tet-vs-hex-master/fixed/print_table.py | import os
import json
def get_filed(data, other, field):
if field == "time":
vbase = data["time_building_basis"] + data["time_assembling_stiffness_mat"] +data["time_solving"]
vother = other["time_building_basis"] + other["time_assembling_stiffness_mat"] +other["time_solving"]
elif field == "memory":
vbase = data["solver_info"]["mem_total_peak"]
vother = other["solver_info"]["mem_total_peak"]
elif field == "DOF":
vbase = round(data["num_dofs"]/3)
vother = round(other["num_dofs"]/3)
elif field == "error":
vbase = abs(data["sol_min"][1] - -0.09694505138106606)
vother = abs(other["sol_min"][1] - -0.09694505138106606)
else:
vbase = data[field]
vother = other[field]
return vbase, vother
def print_line(data, other, field, is_diag, format):
[vbase, vother] = get_filed(data, other, field)
if field == "time":
vbases = "{:.2f}".format(vbase)
vothers = "{:.2f}".format(vother)
elif field == "memory":
vbases = "{:,}".format(round(vbase/1024))
vothers = "{:,}".format(round(vother/1024))
# vbases = "{:,}".format(round(vbase))
# vothers = "{:,}".format(round(vother))
elif field == "DOF":
vbases = "{:,}".format(vbase)
vothers = "{:,}".format(vother)
elif field == "error":
vbases = "{:.2e}".format(vbase)
vothers = "{:.2e}".format(vother)
else:
vbases = "{}".format(vbase)
vothers = "{}".format(vother)
if(is_diag):
string = "\\diagcol{" + vbases + "~/~" + vothers + "}"
else:
if vbase < vother:
string = "\\goodcol{" + vbases + "}~/~" + vothers
else:
string = vbases + "~/~\\goodcol{" + vothers + "}"
return string
if __name__ == '__main__':
path = "results"
discrs = [1, 2, 3]
keys = ["time", "memory", "DOF", "error"]
print("\\begin{tabular}{lr|cccc}")
print("&&time (s)&\tmemory (MB)&\tDOF&\terror\\\\")
for kk in discrs:
p = 2 if kk == 3 else kk
q = 1 if kk == 3 else kk
with open(os.path.join(path, "out_p" + str(p) + ".json"), 'r') as f:
data = json.load(f)
with open(os.path.join(path, "out_same_dof_p" + str(kk) + ".json"), 'r') as f:
same_dof = json.load(f)
with open(os.path.join(path, "out_same_err_p" + str(kk) + ".json"), 'r') as f:
same_err = json.load(f)
with open(os.path.join(path, "out_same_mem_p" + str(kk) + ".json"), 'r') as f:
same_mem = json.load(f)
with open(os.path.join(path, "out_same_time_p" + str(kk) + ".json"), 'r') as f:
same_time = json.load(f)
print("\\hline")
print("\\multirow{" + ("4" if p == 1 else "3") + "}{*}{\\rotatebox{90}{$P_"+str(p)+"$/$Q_"+str(q)+"$}}")
for k1 in keys:
print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
print("&" + k1 + "&")
if k1 == "time":
other = same_time
elif k1 == "memory":
other = same_mem
elif k1 == "DOF":
other = same_dof
else:
other = same_err
for k2 in keys:
print(print_line(data, other, k2, k1 == k2, ":") + ("\\\\" if k2 == keys[-1] else "&"))
print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
print("\\end{tabular}")
| 3,494 | 32.285714 | 112 | py |
tet-vs-hex | tet-vs-hex-master/fixed/run.py | import os
import json
import glob
import subprocess
import tempfile
if __name__ == '__main__':
polyfem_exe = os.path.join(os.environ["POLYFEM_BIN_DIR"], "PolyFEM_bin")
out_folder = "results"
discr_orders = [2]
folder_path = "meshes"
current_folder = cwd = os.getcwd()
with open("beam.json", 'r') as f:
json_data = json.load(f)
for discr_order in discr_orders:
mesh = os.path.join(current_folder, folder_path + "/mesh.mesh")
json_data["mesh"] = mesh
json_data["discr_order"] = 1 if discr_order == 3 else discr_order
json_data["output"] = os.path.join(current_folder, out_folder, "out_p" + str(discr_order) + ".json")
# with tempfile.NamedTemporaryFile(suffix=".json") as tmp_json:
# with open(tmp_json.name, 'w') as f:
# f.write(json.dumps(json_data, indent=4))
# args = [polyfem_exe,
# '--json', tmp_json.name,
# '--cmd']
# subprocess.run(args)
for mesh in glob.glob(os.path.join(folder_path, "*_p" + str(discr_order) + ".HYBRID")):
basename = os.path.splitext(os.path.basename(mesh))[0]
mesh = os.path.join(current_folder, mesh)
print("\n\n----------------------------")
print(basename)
print("----------------------------")
json_data["mesh"] = mesh
json_data["output"] = os.path.join(current_folder, out_folder, "out_" + basename + ".json")
with tempfile.NamedTemporaryFile(suffix=".json") as tmp_json:
with open(tmp_json.name, 'w') as f:
f.write(json.dumps(json_data, indent=4))
args = [polyfem_exe,
'--json', tmp_json.name,
'--cmd']
subprocess.run(args)
| 1,862 | 31.684211 | 108 | py |
tet-vs-hex | tet-vs-hex-master/matching_error/run.py | import os
import json
import glob
import subprocess
import tempfile
if __name__ == '__main__':
polyfem_exe = os.path.join(os.environ["POLYFEM_BIN_DIR"], "PolyFEM_bin")
print(polyfem_exe)
out_folder = "err"
refs = [0, 1]
p2s = [[], [] #["square_beam.off_20.mesh"]
# ["square_beam.off_1.mesh", "square_beam.off_10.mesh", "square_beam.off_20.mesh", "square_beam.off_5.mesh", "square_beam.off_50.mesh", "square_beam.off_6.mesh", "square_beam.off_7.mesh", "square_beam.off_8.mesh", "square_beam.off_9.mesh"],
# ["square_beam.off_1.mesh", "square_beam.off_5.mesh", "square_beam.off_6.mesh", "square_beam.off_7.mesh", "square_beam.off_8.mesh", "square_beam.off_9.mesh", "square_beam.off_10.mesh", "square_beam.off_20.mesh"]
]
q1s = [[], []
# ["square_beam.off_1.HYBRID", "square_beam.off_10.HYBRID", "square_beam.off_20.HYBRID", "square_beam.off_5.HYBRID", "square_beam.off_50.HYBRID", "square_beam.off_6.HYBRID", "square_beam.off_7.HYBRID", "square_beam.off_8.HYBRID", "square_beam.off_9.HYBRID"],
# ["square_beam.off_1.HYBRID", "square_beam.off_5.HYBRID", "square_beam.off_6.HYBRID", "square_beam.off_7.HYBRID", "square_beam.off_8.HYBRID", "square_beam.off_9.HYBRID", "square_beam.off_10.HYBRID"]
]
use_reduced = True
if use_reduced:
q2s = [
["spline_square_beam.off_10.HYBRID", "spline_square_beam.off_20.HYBRID", "spline_square_beam.off_50.HYBRID", "spline_square_beam.off_7.HYBRID", "spline_square_beam.off_8.HYBRID", "spline_square_beam.off_9.HYBRID"],
["spline_square_beam.off_6.HYBRID", "spline_square_beam.off_7.HYBRID", "spline_square_beam.off_8.HYBRID", "spline_square_beam.off_9.HYBRID", "spline_square_beam.off_10.HYBRID", "spline_square_beam.off_20.HYBRID"]
]
else:
q2s = [
["spline_square_beam.off_1.HYBRID", "spline_square_beam.off_10.HYBRID", "spline_square_beam.off_20.HYBRID", "spline_square_beam.off_5.HYBRID",
"spline_square_beam.off_50.HYBRID", "spline_square_beam.off_6.HYBRID", "spline_square_beam.off_7.HYBRID", "spline_square_beam.off_8.HYBRID", "spline_square_beam.off_9.HYBRID"],
["spline_square_beam.off_1.HYBRID", "spline_square_beam.off_5.HYBRID", "spline_square_beam.off_6.HYBRID", "spline_square_beam.off_7.HYBRID",
"spline_square_beam.off_8.HYBRID", "spline_square_beam.off_9.HYBRID", "spline_square_beam.off_10.HYBRID", "spline_square_beam.off_20.HYBRID"]
]
splines = [[], [] #["spline_square_beam.off_20.HYBRID"]
# ["spline_square_beam.off_1.HYBRID", "spline_square_beam.off_10.HYBRID", "spline_square_beam.off_20.HYBRID", "spline_square_beam.off_5.HYBRID", "spline_square_beam.off_50.HYBRID", "spline_square_beam.off_6.HYBRID", "spline_square_beam.off_7.HYBRID", "spline_square_beam.off_8.HYBRID", "spline_square_beam.off_9.HYBRID"],
# ["spline_square_beam.off_1.HYBRID", "spline_square_beam.off_5.HYBRID", "spline_square_beam.off_6.HYBRID", "spline_square_beam.off_7.HYBRID", "spline_square_beam.off_8.HYBRID", "spline_square_beam.off_9.HYBRID", "spline_square_beam.off_10.HYBRID", "spline_square_beam.off_20.HYBRID"]
]
n_runs = 10
folder_path = "meshes"
current_folder = os.getcwd()
with open("error.json", 'r') as f:
json_data = json.load(f)
for run in range(n_runs):
run_f = "run_{}".format(run)
# os.mkdir(os.path.join(current_folder, out_folder, run_f))
for n_refs in refs:
p2 = p2s[n_refs]
q1 = q1s[n_refs]
q2 = q2s[n_refs]
spline = splines[n_refs]
json_data["n_refs"] = n_refs
json_data["quadrature_order"] = 4
json_data["serendipity"] = False
print("------------------")
print("P2")
print("------------------")
for mesh_name in p2:
mesh = os.path.join("meshes", mesh_name)
basename = os.path.splitext(os.path.basename(mesh))[0].replace("square_beam.off_", "")
mesh = os.path.join(current_folder, mesh)
json_data["mesh"] = mesh
json_data["discr_order"] = 2
json_data["use_spline"] = False
json_data["output"] = os.path.join(current_folder, out_folder, run_f, "P2_" + basename + "_" + str(n_refs) + ".json")
with tempfile.NamedTemporaryFile(suffix=".json") as tmp_json:
with open(tmp_json.name, 'w') as f:
f.write(json.dumps(json_data, indent=4))
args = [polyfem_exe,
'--json', tmp_json.name,
'--cmd']
subprocess.run(args)
#######################################################################
print("------------------")
print("Q1")
print("------------------")
for mesh_name in q1:
mesh = os.path.join("matching_error", mesh_name)
basename = os.path.splitext(os.path.basename(mesh))[0].replace("square_beam.off_", "")
mesh = os.path.join(current_folder, mesh)
json_data["mesh"] = mesh
json_data["discr_order"] = 1
json_data["use_spline"] = False
json_data["output"] = os.path.join(current_folder, out_folder, run_f, "Q1_" + basename + "_" + str(n_refs) + ".json")
with tempfile.NamedTemporaryFile(suffix=".json") as tmp_json:
with open(tmp_json.name, 'w') as f:
f.write(json.dumps(json_data, indent=4))
args = [polyfem_exe,
'--json', tmp_json.name,
'--cmd']
subprocess.run(args)
#######################################################################
print("------------------")
print("Q2")
print("------------------")
for mesh_name in q2:
mesh = os.path.join("matching_error", mesh_name)
basename = os.path.splitext(os.path.basename(mesh))[0].replace("spline_square_beam.off_", "")
mesh = os.path.join(current_folder, mesh)
json_data["mesh"] = mesh
json_data["discr_order"] = 2
json_data["serendipity"] = True
json_data["quadrature_order"] = 2 if use_reduced else 4
json_data["use_spline"] = False
json_data["output"] = os.path.join(
current_folder, out_folder, run_f, "S" + ("R" if use_reduced else "") + "_" + basename + "_" + str(n_refs) + ".json")
with tempfile.NamedTemporaryFile(suffix=".json") as tmp_json:
with open(tmp_json.name, 'w') as f:
f.write(json.dumps(json_data, indent=4))
args = [polyfem_exe,
'--json', tmp_json.name,
'--cmd']
subprocess.run(args)
json_data["serendipity"] = False
json_data["quadrature_order"] = 4
# #######################################################################
print("------------------")
print("spline")
print("------------------")
for mesh_name in spline:
mesh = os.path.join("matching_error", mesh_name)
basename = os.path.splitext(os.path.basename(mesh))[0].replace("spline_square_beam.off_", "")
mesh = os.path.join(current_folder, mesh)
json_data["mesh"] = mesh
json_data["discr_order"] = 1
json_data["use_spline"] = True
json_data["output"] = os.path.join(current_folder, out_folder, run_f, "spline_" + basename + "_" + str(n_refs) + ".json")
with tempfile.NamedTemporaryFile(suffix=".json") as tmp_json:
with open(tmp_json.name, 'w') as f:
f.write(json.dumps(json_data, indent=4))
args = [polyfem_exe,
'--json', tmp_json.name,
'--cmd']
subprocess.run(args)
| 8,306 | 50.277778 | 329 | py |
tet-vs-hex | tet-vs-hex-master/matching_error/plot.py | import os
import json
import glob
import pandas as pd
from pandas.io.json import json_normalize
import numpy as np
import pickle
import plotly.graph_objs as go
import plotly.offline as plotly
colors = {'P1': 'rgb(9, 132, 227)', 'P2': 'rgb(108, 92, 231)', 'Q1': 'rgb(225, 112, 85)', 'Q2': 'rgb(214, 48, 49)', 'SR': 'rgb(253, 203, 110)', 'Q2R': 'rgb(255, 234, 167)', 'S': 'rgb(250, 177, 160)', 'Spline': 'rgb(45, 52, 54)'}
marker_shapes = {'P1': 'circle', 'P2': 'circle', 'Q1': 'star', 'Q2': 'star','Q2R': 'star','SR': 'star','S': 'star', 'Spline': 'square'}
marker_sizes = {'P1': 6, 'P2': 6, 'Q1': 10, 'Q2': 10, 'Q2R': 10, 'SR': 10, 'S': 10, 'Spline': 6}
def plot(k, ratio, name):
if len(k) <=0:
return []
x = []
for v in k[0]["sol_min"].values:
x.append(abs(v[1] - ratio * 2))
y = np.zeros(k[0]["time_solving"].values.shape)
for e in k:
y += e["time_building_basis"].values + e["time_assembling_stiffness_mat"].values + e["time_solving"].values
# y += e["time_solving"].values
# y += e["solver_info.mem_total_peak"].values
y /= len(k)
if name == "SR":
x, y = zip(*sorted(zip(x, y))[2:-2])
# # x = x[2:]
# # y = y[2:]
else:
x, y = zip(*sorted(zip(x, y))[2:])
trace = go.Scatter(
x=x,
y=y,
mode='lines+markers',
name=name,
line=dict(color=(colors[name])),
marker=dict(symbol=marker_shapes[name], size=marker_sizes[name])
)
return [trace]
def load(mesh_name, first, name, data_folder):
k1 = []
k2 = []
pickle_path = os.path.join(data_folder, name)
if os.path.isfile(pickle_path):
with open(pickle_path, "rb") as fp:
return pickle.load(fp)
for r in range(10):
k1t = pd.DataFrame()
k2t = pd.DataFrame()
for json_file in glob.glob(os.path.join(out_folder, "run_{}".format(r), mesh_name + "*.json")):
with open(json_file, 'r') as f:
json_data = json.load(f)
if json_data is None:
print(json_file)
k = json_data["discr_order"]
tmp = json_normalize(json_data)
if k == 1:
if k1t.empty:
k1t = tmp
else:
k1t = pd.concat([k1t, tmp])
else:
if k2t.empty:
k2t = tmp
else:
k2t = pd.concat([k2t, tmp])
k1.append(k1t)
k2.append(k2t)
tmp = k1 if first else k2
with open(pickle_path, "wb") as fp:
pickle.dump(tmp, fp)
return tmp
if __name__ == '__main__':
out_folder = "err"
data_folder = "data"
tri_name = "P2_"
hex_name = "Q1_"
reduced_name = "Q2R_"
q2_name = "Q2_"
spline_name = "spline_"
serendipity_r = "SR_"
serendipity = "S_"
# using dense P4 solution
ratio = -0.09694505138106606 / 2
output = None #"P2_Q1_S"
tk2 = load(tri_name, False, "tk2.pkl", data_folder)
hk1 = load(hex_name, True, "hk1.pkl", data_folder)
hs = load(spline_name, True, "hs.pkl", data_folder)
reduced = load(reduced_name, False, "reduced.pkl", data_folder)
q2 = load(q2_name, False, "q2.pkl", data_folder)
sr = load(serendipity_r, False, "sr.pkl", data_folder)
S = load(serendipity, False, "S.pkl", data_folder)
layout = go.Layout(
legend=dict(x=0.9, y=0.9),
xaxis=dict(
title="Error",
exponentformat='power',
showticksuffix='all',
showtickprefix='all',
showexponent='all',
# autotick=True,
type='log',
nticks=5,
tickfont=dict(
size=16
),
# autorange='reversed'
),
yaxis=dict(
title="Time",
# tickformat='.1e',
exponentformat='power',
ticks='',
# tick0=0,
# dtick=1,
# tickangle=-45,
type='log',
tickfont=dict(
size=16
),
autorange=True
),
font=dict(
size=24
),
hovermode='closest'
)
data = []
data.extend(plot(hk1, ratio, "Q1"))
data.extend(plot(tk2, ratio, "P2"))
data.extend(plot(hs, ratio, "Spline"))
# data.extend(plot(reduced, ratio, "Q2R"))
data.extend(plot(q2, ratio, "Q2"))
data.extend(plot(sr, ratio, "SR"))
data.extend(plot(S, ratio, "S"))
fig = go.Figure(data=data, layout=layout)
if output is not None:
plotly.plot(fig, image="svg", image_filename=output)
else:
plotly.plot(fig)
| 4,716 | 25.351955 | 228 | py |
tet-vs-hex | tet-vs-hex-master/stokes/paraview.py | from paraview.simple import *
import os
sources = []
for k in GetSources():
s = GetSources()[k]
sources.append(s)
models = []
names = []
for s in sources:
name = s.FileName[0]
name = os.path.splitext(os.path.basename(name))[0]
print(name)
Hide(s)
modelDisplay = Show(s)
ColorBy(modelDisplay, ('POINTS', 'solution', 'Magnitude'))
models.append(s)
names.append(name)
HideAll()
RenderAllViews()
renderView = GetActiveViewOrCreate('RenderView')
renderView.InteractionMode = '2D'
renderView.CameraPosition = [0.5, 0.5, 10000.0]
renderView.CameraFocalPoint = [0.5, 0.5, 0.0]
renderView.CameraParallelScale = 0.5843857695756589
for i in range(len(models)):
m = models[i]
n = names[i]
Show(m)
solutionLUT = GetColorTransferFunction('solution')
solutionLUT.ApplyPreset('Rainbow Uniform', True)
solutionLUT.NumberOfTableValues = 12
solutionLUT.RescaleTransferFunction(0.0, 0.25)
# solutionLUTColorBar.Enabled = True
# solutionLUTColorBar = GetScalarBar(solutionLUT, renderView)
RenderAllViews()
SaveScreenshot('./images/' + n + '.png', renderView, ImageResolution=[2*2204, 2*960])
Hide(m)
| 1,113 | 21.28 | 86 | py |
tet-vs-hex | tet-vs-hex-master/stokes/run.py | import os
import json
import glob
# import polyfempy
import subprocess
import tempfile
if __name__ == '__main__':
polyfem_exe = os.path.join(os.environ["POLYFEM_BIN_DIR"], "PolyFEM_bin")
out_folder = "results"
discr_orders = [2]
folder_path = "meshes"
current_folder = cwd = os.getcwd()
with open("experiment.json", 'r') as f:
json_data = json.load(f)
# solver = polyfempy.Solver()
# solver.set_log_level(0)
for mesh in glob.glob(os.path.join(folder_path, "*.obj")):
basename = os.path.splitext(os.path.basename(mesh))[0]
mesh = os.path.join(current_folder, mesh)
json_data["mesh"] = mesh
json_data["n_refs"] = 0 if basename.find("quad") == -1 else 6
for discr_order in discr_orders:
json_data["discr_order"] = discr_order
json_data["output"] = os.path.join(current_folder, out_folder, basename + "_k" + str(discr_order) + ".json")
json_data["export"]["vis_mesh"] = os.path.join(current_folder, out_folder, basename + "_k" + str(discr_order) + ".vtu")
# solver.load_parameters(json.dumps(json_data))
# solver.load_mesh()
# solver.solve()
# solver.export_data()
with tempfile.NamedTemporaryFile(suffix=".json") as tmp_json:
with open(tmp_json.name, 'w') as f:
f.write(json.dumps(json_data, indent=4))
args = [polyfem_exe,
'--json', tmp_json.name,
'--cmd',
'--log_level', '1']
subprocess.run(args)
| 1,628 | 31.58 | 131 | py |
tet-vs-hex | tet-vs-hex-master/stokes/plot.py | import os
import glob
import numpy as np
import json
import pandas as pd
import plotly.graph_objs as go
import plotly.offline as plotly
colors = {'P1': 'rgb(9, 132, 227)', 'P2': 'rgb(108, 92, 231)', 'Q1': 'rgb(225, 112, 85)', 'Q2': 'rgb(214, 48, 49)'}
marker_shapes = {'P1': 'circle', 'P2': 'circle', 'Q1': 'star', 'Q2': 'star'}
marker_sizes = {'P1': 6, 'P2': 6, 'Q1': 10, 'Q2': 10}
def plot(xx, yy, name, pos):
n = int(len(xx)/3)
x, y = zip(*sorted(zip(xx, yy))[:n])
trace = go.Scatter(
x=x,
y=y,
mode='lines',
name="{} {}".format(name, pos),
line=dict(color=(colors[name]), dash='solid' if name == "Q2" else 'dash'),
)
return trace
def load(folder, fname):
csv_name = os.path.join(folder, "{}.csv".format(fname))
data = pd.read_csv(csv_name)
x = np.array(data["arc_length"])
y = np.array(data["solution:1"])
return x, y
if __name__ == '__main__':
root = "data"
data = {
"q50": ["Q2", "0.5"], "t50": ["P2", "0.5"],
"q05": ["Q2", "0.05"], "t05": ["P2", "0.05"],
"q01": ["Q2", "0.01"], "t01": ["P2", "0.01"]
}
output = "stokes"
trace = []
for out_f in data:
x, y = load(root, out_f)
trace.append(plot(x, y, data[out_f][0], data[out_f][1]))
layout = go.Layout(
legend=dict(x=0.6, y=0.87),
xaxis=dict(
title="Time",
showticksuffix='all',
showtickprefix='all',
showexponent='all',
# autotick=True,
nticks=6,
tickfont=dict(
size=16
)
),
yaxis=dict(
title="Y-velocity",
# title="Error",
# type='log',
tickformat='.0e',
exponentformat='power',
ticks='',
# tick0=0,
# dtick=1,
# tickangle=-45,
tickfont=dict(
size=16
),
autorange=True,
),
font=dict(
size=24
),
hovermode='closest'
)
fig = go.Figure(data=trace, layout=layout)
if output is not None:
plotly.plot(fig, image="svg", image_filename=output)
else:
plotly.plot(fig)
| 2,267 | 22.625 | 115 | py |
tet-vs-hex | tet-vs-hex-master/L/run.py | import os
import json
import glob
import subprocess
import tempfile
if __name__ == '__main__':
polyfem_exe = os.path.join(os.environ["POLYFEM_BIN_DIR"], "PolyFEM_bin")
out_folder = "results"
vtu_folder = "vtu"
discr_orders = [1, 2]
exts = ["mesh", "HYBRID"]
folder_path = "meshes"
current_folder = cwd = os.getcwd()
with open("bar.json", 'r') as f:
json_data = json.load(f)
for ext in exts:
n_refs = 0 if ext == "mesh" else 4
name = "p" if ext == "mesh" else "q"
json_data["n_refs"] = n_refs
for mesh in glob.glob(os.path.join(folder_path, "*." + ext)):
basename = os.path.splitext(os.path.basename(mesh))[0]
mesh = os.path.join(current_folder, mesh)
json_data["mesh"] = mesh
for discr_order in discr_orders:
json_data["discr_order"] = discr_order
json_data["output"] = os.path.join(current_folder, out_folder, "out_" + basename + "_" + name + str(discr_order) + ".json")
json_data["export"]["vis_mesh"] = os.path.join(current_folder, vtu_folder, "out_" + basename + "_" + name + str(discr_order) + ".vtu")
with tempfile.NamedTemporaryFile(suffix=".json") as tmp_json:
with open(tmp_json.name, 'w') as f:
f.write(json.dumps(json_data, indent=4))
args = [polyfem_exe,
'--json', tmp_json.name,
'--cmd']
subprocess.run(args)
| 1,575 | 31.833333 | 151 | py |
tet-vs-hex | tet-vs-hex-master/L/save_screen.py | from paraview.simple import *
import os
sources = []
for k in GetSources():
s = GetSources()[k]
sources.append(s)
warpeds = []
names = []
for s in sources:
name = s.FileName[0]
name = os.path.splitext(os.path.basename(name))[0]
print(name)
warped = WarpByVector(Input=s)
warped.Vectors = ['POINTS', 'solution']
Hide(s)
warpedDisplay = Show(warped)
ColorBy(warpedDisplay, ('POINTS', 'scalar_value_avg'))
warpeds.append(warped)
names.append(name)
HideAll()
RenderAllViews()
renderView = GetActiveViewOrCreate('RenderView')
renderView.OrientationAxesVisibility = 0
renderView.CameraPosition = [4.2528865459817435, 1.758183607362867, -4.761903658658314]
renderView.CameraFocalPoint = [-0.9194307035427889, -0.0903877956036081, 1.7056893390118553]
renderView.CameraViewUp = [-0.1470083030936321, 0.9758847754930258, 0.16136128340608114]
renderView.CameraParallelScale = 1.5
for i in range(len(warpeds)):
w = warpeds[i]
n = names[i]
Show(w)
solutionLUT = GetColorTransferFunction('scalar_value_avg')
solutionLUT.ApplyPreset('Rainbow Uniform', True)
solutionLUT.NumberOfTableValues = 12
solutionLUT.RescaleTransferFunction(0, 140000)
# solutionLUTColorBar.Enabled = True
# solutionLUTColorBar = GetScalarBar(solutionLUT, renderView)
RenderAllViews()
SaveScreenshot('./images/' + n + '.png', renderView, ImageResolution=[2*2204, 2*960])
Hide(w)
| 1,374 | 24.462963 | 92 | py |
tet-vs-hex | tet-vs-hex-master/time-depentent/paraview.py | from paraview.simple import *
#### disable automatic camera reset on 'Show'
paraview.simple._DisableFirstRenderCameraReset()
# find source
#dense
sources = {
"P1": 17450,
"P2": 17450,
"Q1": 36864,
"Q2": 36864,
}
#coarse
# sources = {
# "P1": 49531,
# "P2": 49531,
# "Q1": 26896,
# "Q2": 26896,
# }
# Create a new 'Quartile Chart View'
quartileChartView = CreateView('QuartileChartView')
quartileChartView.ViewSize = [976, 426]
spreadSheetView = CreateView('SpreadSheetView')
spreadSheetView.BlockSize = 1024L
# get layout
layout = GetLayout()
layout.AssignView(2, quartileChartView)
layout.AssignView(3, spreadSheetView)
for k in sources:
source = FindSource('{}_step_*'.format(k))
# create a new 'Plot Selection Over Time'
sel = SelectPoints(query="id=={}".format(sources[k]))
plotSelectionOverTime = PlotSelectionOverTime(Input=source, Selection=sel)
# show data in view
plotSelectionOverTimeDisplay = Show(plotSelectionOverTime, quartileChartView)
plotSelectionOverTimeTable = Show(plotSelectionOverTime, spreadSheetView)
ExportView('/Users/teseo/data/tet-vs-hex/time-depentent/vtu/{}.csv'.format(k), view=spreadSheetView)
# trace defaults for the display properties.
plotSelectionOverTimeDisplay.AttributeType = 'Row Data'
plotSelectionOverTimeDisplay.UseIndexForXAxis = 0
plotSelectionOverTimeDisplay.XArrayName = 'Time'
plotSelectionOverTimeDisplay.SeriesVisibility = ['solution (1) (stats)']
plotSelectionOverTimeDisplay.SeriesLabel = ['discr (stats)', 'discr (stats)', 'scalar_value (stats)', 'scalar_value (stats)', 'scalar_value_avg (stats)', 'scalar_value_avg (stats)', 'solution (0) (stats)', 'solution (0) (stats)', 'solution (1) (stats)', k, 'solution (2) (stats)', 'solution (2) (stats)', 'solution (Magnitude) (stats)', 'solution (Magnitude) (stats)', 'tensor_value_11 (stats)', 'tensor_value_11 (stats)', 'tensor_value_12 (stats)', 'tensor_value_12 (stats)', 'tensor_value_21 (stats)', 'tensor_value_21 (stats)', 'tensor_value_22 (stats)', 'tensor_value_22 (stats)', 'vtkOriginalPointIds (stats)', 'vtkOriginalPointIds (stats)', 'X (stats)', 'X (stats)', 'Y (stats)', 'Y (stats)', 'Z (stats)', 'Z (stats)', 'N (stats)', 'N (stats)', 'Time (stats)', 'Time (stats)', 'vtkValidPointMask (stats)', 'vtkValidPointMask (stats)']
plotSelectionOverTimeDisplay.SeriesColor = ['discr (stats)', '0', '0', '0', 'scalar_value (stats)', '0.89', '0.1', '0.11', 'scalar_value_avg (stats)', '0.22', '0.49', '0.72', 'solution (0) (stats)', '0.3', '0.69', '0.29', 'solution (1) (stats)', '0.6', '0.31', '0.64', 'solution (2) (stats)', '1', '0.5', '0', 'solution (Magnitude) (stats)', '0.65', '0.34', '0.16', 'tensor_value_11 (stats)', '0', '0', '0', 'tensor_value_12 (stats)', '0.89', '0.1', '0.11', 'tensor_value_21 (stats)', '0.22', '0.49', '0.72', 'tensor_value_22 (stats)', '0.3', '0.69', '0.29', 'vtkOriginalPointIds (stats)', '0.6', '0.31', '0.64', 'X (stats)', '1', '0.5', '0', 'Y (stats)', '0.65', '0.34', '0.16', 'Z (stats)', '0', '0', '0', 'N (stats)', '0.89', '0.1', '0.11', 'Time (stats)', '0.22', '0.49', '0.72', 'vtkValidPointMask (stats)', '0.3', '0.69', '0.29']
plotSelectionOverTimeDisplay.SeriesPlotCorner = ['discr (stats)', '0', 'scalar_value (stats)', '0', 'scalar_value_avg (stats)', '0', 'solution (0) (stats)', '0', 'solution (1) (stats)', '0', 'solution (2) (stats)', '0', 'solution (Magnitude) (stats)', '0', 'tensor_value_11 (stats)', '0', 'tensor_value_12 (stats)', '0', 'tensor_value_21 (stats)', '0', 'tensor_value_22 (stats)', '0', 'vtkOriginalPointIds (stats)', '0', 'X (stats)', '0', 'Y (stats)', '0', 'Z (stats)', '0', 'N (stats)', '0', 'Time (stats)', '0', 'vtkValidPointMask (stats)', '0']
plotSelectionOverTimeDisplay.SeriesLabelPrefix = ''
plotSelectionOverTimeDisplay.SeriesLineStyle = ['discr (stats)', '1', 'scalar_value (stats)', '1', 'scalar_value_avg (stats)', '1', 'solution (0) (stats)', '1', 'solution (1) (stats)', '1', 'solution (2) (stats)', '1', 'solution (Magnitude) (stats)', '1', 'tensor_value_11 (stats)', '1', 'tensor_value_12 (stats)', '1', 'tensor_value_21 (stats)', '1', 'tensor_value_22 (stats)', '1', 'vtkOriginalPointIds (stats)', '1', 'X (stats)', '1', 'Y (stats)', '1', 'Z (stats)', '1', 'N (stats)', '1', 'Time (stats)', '1', 'vtkValidPointMask (stats)', '1']
plotSelectionOverTimeDisplay.SeriesLineThickness = ['discr (stats)', '2', 'scalar_value (stats)', '2', 'scalar_value_avg (stats)', '2', 'solution (0) (stats)', '2', 'solution (1) (stats)', '2', 'solution (2) (stats)', '2', 'solution (Magnitude) (stats)', '2', 'tensor_value_11 (stats)', '2', 'tensor_value_12 (stats)', '2', 'tensor_value_21 (stats)', '2', 'tensor_value_22 (stats)', '2', 'vtkOriginalPointIds (stats)', '2', 'X (stats)', '2', 'Y (stats)', '2', 'Z (stats)', '2', 'N (stats)', '2', 'Time (stats)', '2', 'vtkValidPointMask (stats)', '2']
plotSelectionOverTimeDisplay.SeriesMarkerStyle = ['discr (stats)', '0', 'scalar_value (stats)', '0', 'scalar_value_avg (stats)', '0', 'solution (0) (stats)', '0', 'solution (1) (stats)', '0', 'solution (2) (stats)', '0', 'solution (Magnitude) (stats)', '0', 'tensor_value_11 (stats)', '0', 'tensor_value_12 (stats)', '0', 'tensor_value_21 (stats)', '0', 'tensor_value_22 (stats)', '0', 'vtkOriginalPointIds (stats)', '0', 'X (stats)', '0', 'Y (stats)', '0', 'Z (stats)', '0', 'N (stats)', '0', 'Time (stats)', '0', 'vtkValidPointMask (stats)', '0']
# update the view to ensure updated data information
quartileChartView.Update()
#### uncomment the following to render all views
# RenderAllViews()
# alternatively, if you want to write images, you can use SaveScreenshot(...).
| 5,681 | 81.347826 | 844 | py |
tet-vs-hex | tet-vs-hex-master/time-depentent/run.py | import os
import json
import glob
import subprocess
import tempfile
if __name__ == '__main__':
polyfem_exe = os.path.join(os.environ["POLYFEM_BIN_DIR"], "PolyFEM_bin")
vtu_folder = "vtu"
json_folder = "out"
discr_orders = [1, 2]
time_steps = 40
tend = 0.5
exts = ["obj"]
folder_path = "meshes"
current_folder = cwd = os.getcwd()
with open("run.json", 'r') as f:
json_data = json.load(f)
json_data["time_steps"] = time_steps
json_data["tend"] = tend
for ext in exts:
for mesh in glob.glob(os.path.join(folder_path, "*." + ext)):
basename = os.path.splitext(os.path.basename(mesh))[0]
print(basename, "\n-----------")
title = "P" if basename.find("quad") == -1 else "Q"
json_data["n_refs"] = 0 if basename.find("quad") == -1 else 6
# json_data["n_refs"] = 0 if basename.find("quad") == -1 else 3
print(json_data["n_refs"])
mesh = os.path.join(current_folder, mesh)
json_data["mesh"] = mesh
for discr_order in discr_orders:
json_data["discr_order"] = discr_order
json_data["output"] = os.path.join(current_folder, json_folder, title + str(discr_order) + ".json")
with tempfile.NamedTemporaryFile(suffix=".json") as tmp_json:
with open(tmp_json.name, 'w') as f:
f.write(json.dumps(json_data, indent=4))
args = [polyfem_exe,
'--json', tmp_json.name,
'--cmd',
'--log_level', '1']
subprocess.run(args)
for s in range(time_steps+1):
os.rename(
os.path.join(current_folder, "step_{}.vtu".format(s)),
os.path.join(current_folder, vtu_folder, "{}{}_step_{:02d}.vtu".format(title, discr_order, s)))
os.rename(
os.path.join(current_folder, "step_{}.obj".format(s)),
os.path.join(current_folder, vtu_folder, "{}{}_step_{:02d}.obj".format(title, discr_order, s)))
| 2,206 | 35.180328 | 119 | py |
tet-vs-hex | tet-vs-hex-master/time-depentent/plot.py | import os
import glob
import numpy as np
import json
import pandas as pd
import plotly.graph_objs as go
import plotly.offline as plotly
colors = {'P1': 'rgb(9, 132, 227)', 'P2': 'rgb(108, 92, 231)', 'Q1': 'rgb(225, 112, 85)', 'Q2': 'rgb(214, 48, 49)'}
marker_shapes = {'P1': 'circle', 'P2': 'circle', 'Q1': 'star', 'Q2': 'star'}
marker_sizes = {'P1': 6, 'P2': 6, 'Q1': 10, 'Q2': 10}
def plot(t, disp, name, suffix):
x, y = zip(*sorted(zip(t, disp))[:])
trace = go.Scatter(
x=x,
y=y,
mode='lines+markers',
name="{} {}".format(name, suffix),
line=dict(color=(colors[name]), dash='solid' if suffix == "coarse" else "dash"),
marker=dict(symbol=marker_shapes[name], size=marker_sizes[name]* (1 if suffix == "coarse" else 0))
)
return trace
def load(name, folder, tend):
csv_name = os.path.join(folder, "{}.csv".format(name))
data = pd.read_csv(csv_name)
t = np.array(data["Time"])
t = t/np.max(t)*tend
disp = np.array(data["max(solution (0))"])
return name, t, disp
if __name__ == '__main__':
out_folders = {"vtu": "fine", "vtu_coarse": "coarse"}
# out_folders = {"vtu_coarse": "coarse"}
tend = 0.5
output = "plot"
trace = []
for out_f in out_folders:
for k in colors:
n, t, disp = load(k, out_f, tend)
# err = np.abs(dispp1 - exact)
trace.append(plot(t, disp, n, out_folders[out_f]))
layout = go.Layout(
legend=dict(x=0.1, y=0.87),
xaxis=dict(
title="Time",
showticksuffix='all',
showtickprefix='all',
showexponent='all',
# autotick=True,
nticks=6,
tickfont=dict(
size=16
)
),
yaxis=dict(
title="X-displacement",
# title="Error",
# type='log',
tickformat='.0e',
exponentformat='power',
ticks='',
# tick0=0,
# dtick=1,
# tickangle=-45,
tickfont=dict(
size=16
),
autorange=True,
),
font=dict(
size=24
),
hovermode='closest'
)
fig = go.Figure(data=trace, layout=layout)
if output is not None:
plotly.plot(fig, image="svg", image_filename=output)
else:
plotly.plot(fig)
| 2,420 | 24.21875 | 115 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/setup.py | from setuptools import setup, find_packages
setup(
name='pytorchts',
version='0.1.0',
description="PyTorch Probabilistic Time Series Modeling framework",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
url='https://github.com/kashif/pytorch-ts',
license='MIT',
packages=find_packages(exclude=["tests"]),
include_package_data=True,
zip_safe=True,
python_requires=">=3.6",
install_requires = [
'torch==1.4.0',
'holidays',
'numpy',
'pandas',
'scipy',
'tqdm',
'pydantic==1.4.0',
'matplotlib',
'python-rapidjson',
'tensorboard',
],
test_suite='tests',
tests_require = [
'flake8',
'pytest'
],
)
#pip install git+https://github.com/ildoonet/pytorch-gradual-warmup-lr.git
| 871 | 21.947368 | 74 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/examples/m5/load_dataset.py | import numpy as np
import pandas as pd
import copy
import os
from pathlib import Path
from utils import convert_price_file
from pts.dataset import ListDataset, FieldName
from accuracy_evaluator import calculate_and_save_data
def first_nonzero(arr, axis, invalid_val=-1):
mask = arr!=0
return np.where(mask.any(axis=axis), mask.argmax(axis=axis), invalid_val)
def get_second_sale_idx(target_values):
first_sale = first_nonzero(target_values, axis=1)
target_values_copy = copy.deepcopy(target_values)
for i in range(30490):
target_values_copy[i,first_sale[i]] = 0
second_sale = first_nonzero(target_values_copy, axis=1)
return second_sale
def make_m5_dummy_features(m5_input_path):
target_values = np.array([list(range(1969))] * 30490)
dynamic_cat = np.zeros([30490, 4, 1969])
dynamic_real = np.zeros([30490, 6, 1969])
stat_cat = np.zeros([30490, 5])
stat_cat_cardinalities = [3049, 7, 3, 10, 3]
dynamic_past = np.zeros([30490, 1, 1969])
return target_values, dynamic_real, dynamic_cat, dynamic_past, stat_cat, stat_cat_cardinalities
def make_m5_features(m5_input_path):
# First we need to convert the provided M5 data into a format that is readable by GluonTS.
# At this point we assume that the M5 data, which can be downloaded from Kaggle, is present under m5_input_path.
calendar = pd.read_csv(f'{m5_input_path}/calendar.csv')
sales_train_evaluation = pd.read_csv(f'{m5_input_path}/sales_train_evaluation.csv')
sample_submission = pd.read_csv(f'{m5_input_path}/sample_submission.csv')
# append dummy for expanding all period
for i in range(1942, 1970):
sales_train_evaluation[f"d_{i}"] = np.nan # d_1 ~ d1969
converted_price_file = Path(f'{m5_input_path}/converted_price_evaluation.csv')
if not converted_price_file.exists():
convert_price_file(m5_input_path)
converted_price = pd.read_csv(converted_price_file)
# target_value
train_df = sales_train_evaluation.drop(["id","item_id","dept_id","cat_id","store_id","state_id"], axis=1) # d_1 ~ d_1969
target_values = train_df.values
#################################
# FEAT_DYNAMIC_CAT
# Event type
event_type_to_idx = {"nan":0, "Cultural":1, "National":2, "Religious":3, "Sporting":4}
event_type1 = np.array([event_type_to_idx[str(x)] for x in calendar['event_type_1'].values])
event_type2 = np.array([event_type_to_idx[str(x)] for x in calendar['event_type_2'].values])
# Event name
event_name_to_idx = {'nan':0, 'Chanukah End':1, 'Christmas':2, 'Cinco De Mayo':3, 'ColumbusDay':4, 'Easter':5,
'Eid al-Fitr':6, 'EidAlAdha':7, "Father's day":8, 'Halloween':9, 'IndependenceDay':10, 'LaborDay':11,
'LentStart':12, 'LentWeek2':13, 'MartinLutherKingDay':14, 'MemorialDay':15, "Mother's day":16, 'NBAFinalsEnd':17,
'NBAFinalsStart':18, 'NewYear':19, 'OrthodoxChristmas':20, 'OrthodoxEaster':21, 'Pesach End':22, 'PresidentsDay':23,
'Purim End':24, 'Ramadan starts':25, 'StPatricksDay':26, 'SuperBowl':27, 'Thanksgiving':28, 'ValentinesDay':29, 'VeteransDay':30}
event_name1 = np.array([event_name_to_idx[str(x)] for x in calendar['event_name_1'].values])
event_name2 = np.array([event_name_to_idx[str(x)] for x in calendar['event_name_2'].values])
event_features = np.stack([event_type1, event_type2, event_name1, event_name2])
dynamic_cat = [event_features] * len(sales_train_evaluation)
#################################
# FEAT_DYNAMIC_REAL
# SNAP_CA, TX, WI
snap_features = calendar[['snap_CA', 'snap_TX', 'snap_WI']]
snap_features = snap_features.values.T
snap_features_expand = np.array([snap_features] * len(sales_train_evaluation)) # 30490 * 3 * T
# sell_prices
price_feature = converted_price.drop(["id","item_id","dept_id","cat_id","store_id","state_id"], axis=1).values
# normalized sell prices
normalized_price_file = Path(f'{m5_input_path}/normalized_price_evaluation.npz')
if not normalized_price_file.exists():
# normalized sell prices per each item
price_mean_per_item = np.nanmean(price_feature, axis=1, keepdims=True)
price_std_per_item = np.nanstd(price_feature, axis=1, keepdims=True)
normalized_price_per_item = (price_feature - price_mean_per_item) / (price_std_per_item + 1e-6)
# normalized sell prices per day within the same dept
dept_groups = converted_price.groupby('dept_id')
price_mean_per_dept = dept_groups.transform(np.nanmean)
price_std_per_dept = dept_groups.transform(np.nanstd)
normalized_price_per_group_pd = (converted_price[price_mean_per_dept.columns] - price_mean_per_dept) / (price_std_per_dept + 1e-6)
normalized_price_per_group = normalized_price_per_group_pd.values
np.savez(normalized_price_file, per_item = normalized_price_per_item, per_group = normalized_price_per_group)
else:
normalized_price = np.load(normalized_price_file)
normalized_price_per_item = normalized_price['per_item']
normalized_price_per_group = normalized_price['per_group']
price_feature = np.nan_to_num(price_feature)
normalized_price_per_item = np.nan_to_num(normalized_price_per_item)
normalized_price_per_group = np.nan_to_num(normalized_price_per_group)
all_price_features = np.stack([price_feature, normalized_price_per_item, normalized_price_per_group], axis=1) # 30490 * 3 * T
dynamic_real = np.concatenate([snap_features_expand, all_price_features], axis=1) # 30490 * 6 * T
#################################
# FEAT_STATIC_CAT
# We then go on to build static features (features which are constant and series-specific).
# Here, we make use of all categorical features that are provided to us as part of the M5 data.
state_ids = sales_train_evaluation["state_id"].astype('category').cat.codes.values
state_ids_un , state_ids_counts = np.unique(state_ids, return_counts=True)
store_ids = sales_train_evaluation["store_id"].astype('category').cat.codes.values
store_ids_un , store_ids_counts = np.unique(store_ids, return_counts=True)
cat_ids = sales_train_evaluation["cat_id"].astype('category').cat.codes.values
cat_ids_un , cat_ids_counts = np.unique(cat_ids, return_counts=True)
dept_ids = sales_train_evaluation["dept_id"].astype('category').cat.codes.values
dept_ids_un , dept_ids_counts = np.unique(dept_ids, return_counts=True)
item_ids = sales_train_evaluation["item_id"].astype('category').cat.codes.values
item_ids_un , item_ids_counts = np.unique(item_ids, return_counts=True)
stat_cat_list = [item_ids, dept_ids, cat_ids, store_ids, state_ids]
stat_cat = np.concatenate(stat_cat_list)
stat_cat = stat_cat.reshape(len(stat_cat_list), len(item_ids)).T
stat_cat_cardinalities = [len(item_ids_un), len(dept_ids_un), len(cat_ids_un), len(store_ids_un), len(state_ids_un)]
# [3049, 7, 3, 10, 3]
#################################
# FEAT_STATIC_REAL
# None
#################################
# FEAT_DYNAMIC_PAST
# 미래에는 사용 불가능한 feature임
# zero-sale period : 오늘까지 연속적으로 판매량이 0이였던 기간
sales_zero_period = np.zeros_like(target_values)
sales_zero_period[:, 0] = 1
for i in range(1, 1969):
sales_zero_period[:,i] = sales_zero_period[:, i-1] + 1
sales_zero_period[target_values[:,i]!=0, i] = 0
dynamic_past = np.expand_dims(sales_zero_period, 1) # 30490 * 1 * T
return target_values, dynamic_real, dynamic_cat, dynamic_past, stat_cat, stat_cat_cardinalities
def make_m5_dataset(m5_input_path="/data/m5", exclude_no_sales=False, ds_split=True, prediction_start=1942):
# make features
target_values, dynamic_real, dynamic_cat, dynamic_past, stat_cat, stat_cat_cardinalities = make_m5_features(m5_input_path)
#################################
# ACCUMULATED TARGET
# for online moving average caculation
acc_target_values = np.cumsum(target_values, dtype=float, axis=1)
#################################
# TARGET
# This is for evaluation set
# D1 ~ 1941: train , D1942 ~ 1969: test
PREDICTION_START = 1942
# exclude no sale periods
if exclude_no_sales:
second_sale = get_second_sale_idx(target_values)
second_sale = np.clip(second_sale, None, PREDICTION_START-28-1)
else:
second_sale = np.zeros(30490, dtype=np.int32)
start_date = pd.Timestamp("2011-01-29", freq='1D')
m5_dates = [start_date + pd.DateOffset(days=int(d)) for d in second_sale]
##########################################################
# Mode 1886~ 1914~ 1942~ NaN 1969
# train / val | test
if ds_split==True:
#
idx_train_end = PREDICTION_START - 1 # index from 0
idx_val_end = 1914 - 1
### Train Set
train_set = [
{
FieldName.TARGET: target[first:idx_train_end],
FieldName.START: start,
FieldName.ACC_TARGET_SUM: acc_target[first:idx_train_end],
FieldName.FEAT_DYNAMIC_REAL: fdr[...,first:idx_train_end],
FieldName.FEAT_DYNAMIC_CAT: fdc[...,first:idx_train_end],
FieldName.FEAT_DYNAMIC_PAST: fdp[...,first:idx_train_end],
FieldName.FEAT_STATIC_REAL: None,
FieldName.FEAT_STATIC_CAT: fsc
}
for i, (target, first, start, acc_target, fdr, fdc, fdp, fsc) in enumerate(zip(target_values, second_sale,
m5_dates,
acc_target_values,
dynamic_real,
dynamic_cat,
dynamic_past,
stat_cat))
]
#train_set = train_set[:20]
train_ds = ListDataset(train_set, freq="D", shuffle=False)
# reset to first day
second_sale = np.zeros(30490, dtype=np.int32)
m5_dates = [start_date + pd.DateOffset(days=int(d)) for d in second_sale]
### Validation Set
val_set = [
{
FieldName.TARGET: target[first:idx_val_end],
FieldName.START: start,
FieldName.ACC_TARGET_SUM: acc_target[first:idx_val_end],
FieldName.FEAT_DYNAMIC_REAL: fdr[...,first:idx_val_end],
FieldName.FEAT_DYNAMIC_CAT: fdc[...,first:idx_val_end],
FieldName.FEAT_DYNAMIC_PAST: fdp[...,first:idx_val_end],
FieldName.FEAT_STATIC_REAL: None,
FieldName.FEAT_STATIC_CAT: fsc
}
for i, (target, first, start, acc_target, fdr, fdc, fdp, fsc) in enumerate(zip(target_values, second_sale,
m5_dates,
acc_target_values,
dynamic_real,
dynamic_cat,
dynamic_past,
stat_cat))
]
#val_set = val_set[:20]
val_ds = ListDataset(val_set, freq="D")
return train_ds, val_ds, stat_cat_cardinalities
else:
idx_end = prediction_start-1+28
### Test Set
test_set = [
{
FieldName.TARGET: target[first:idx_end],
FieldName.START: start,
FieldName.ACC_TARGET_SUM: acc_target[first:idx_end],
FieldName.FEAT_DYNAMIC_REAL: fdr[...,first:idx_end],
FieldName.FEAT_DYNAMIC_CAT: fdc[...,first:idx_end],
FieldName.FEAT_DYNAMIC_PAST: fdp[...,first:idx_end],
FieldName.FEAT_STATIC_REAL: None,
FieldName.FEAT_STATIC_CAT: fsc
}
for i, (target, first, start, acc_target, fdr, fdc, fdp, fsc) in enumerate(zip(target_values, second_sale,
m5_dates,
acc_target_values,
dynamic_real,
dynamic_cat,
dynamic_past,
stat_cat))
]
#test_set = test_set[:20]
test_ds = ListDataset(test_set, freq="D")
return test_ds | 12,830 | 44.021053 | 153 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/examples/m5/accuracy_evaluator.py | import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix
import gc
import os
from pprint import pprint
from typing import Union
from tqdm.notebook import tqdm_notebook as tqdm
prediction_length = 28
# Memory reduction helper function:
def reduce_mem_usage(df, verbose=True):
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
start_mem = df.memory_usage().sum() / 1024**2
for col in df.columns: #columns
col_type = df[col].dtypes
if col_type in numerics: #numerics
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
end_mem = df.memory_usage().sum() / 1024**2
if verbose: print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(end_mem, 100 * (start_mem - end_mem) / start_mem))
return df
# Fucntion to calculate S weights:
def get_s(roll_mat_csr, sales, prediction_start):
# Rollup sales:
d_name = ['d_' + str(i) for i in range(1, prediction_start)]
sales_train_val = roll_mat_csr * sales[d_name].values
no_sales = np.cumsum(sales_train_val, axis=1) == 0
# Denominator of RMSSE / RMSSE
diff = np.diff(sales_train_val,axis=1)
diff = np.where(no_sales[:,1:], np.nan, diff)
weight1 = np.nanmean(diff**2,axis=1)
weight1[np.isnan(weight1)] = 1e-9
return weight1
# Functinon to calculate weights:
def get_w(roll_mat_csr, sale_usd):
"""
"""
# Calculate the total sales in USD for each item id:
total_sales_usd = sale_usd.groupby(
['id'], sort=False)['sale_usd'].apply(np.sum).values
# Roll up total sales by ids to higher levels:
weight2 = roll_mat_csr * total_sales_usd
return 12*weight2/np.sum(weight2) # weight2/(np.sum(weight2) / 12) : np.sum(weight2)은 모든 합의 12배임
# Function to do quick rollups:
def rollup(roll_mat_csr, v):
'''
v - np.array of size (30490 rows, n day columns)
v_rolledup - array of size (n, 42840)
'''
return roll_mat_csr*v #(v.T*roll_mat_csr.T).T
# Function to calculate WRMSSE:
def wrmsse(error, score_only, roll_mat_csr, s, w, sw):
'''
preds - Predictions: pd.DataFrame of size (30490 rows, N day columns)
y_true - True values: pd.DataFrame of size (30490 rows, N day columns)
sequence_length - np.array of size (42840,)
sales_weight - sales weights based on last 28 days: np.array (42840,)
'''
if score_only:
return np.sum(
np.sqrt(
np.mean(
np.square(rollup(roll_mat_csr, error))
,axis=1)) * sw)/12 #<-used to be mistake here
else:
score_matrix = (np.square(rollup(roll_mat_csr, error)) * np.square(w)[:, None])/ s[:, None]
wrmsse_i = np.sqrt(np.mean(score_matrix,axis=1))
wrmsse_raw = np.sqrt(score_matrix)
aggregation_count = [1, 3, 10, 3, 7, 9, 21, 30, 70, 3049, 9147, 30490]
idx = 0
aggregated_wrmsse = np.zeros(12)
aggregated_wrmsse_per_day = np.zeros([12, prediction_length])
for i, count in enumerate(aggregation_count):
endIdx = idx+count
aggregated_wrmsse[i] = wrmsse_i[idx:endIdx].sum()
aggregated_wrmsse_per_day[i] = wrmsse_raw[idx:endIdx, :].sum(axis=0)
idx = endIdx
# score == aggregated_wrmsse.mean()
wrmsse = np.sum(wrmsse_i)/12 #<-used to be mistake here
return wrmsse, aggregated_wrmsse, aggregated_wrmsse_per_day, score_matrix
def calculate_and_save_data(data_path, prediction_start):
# Sales quantities:
sales = pd.read_csv(data_path+'/sales_train_evaluation.csv')
# Calendar to get week number to join sell prices:
calendar = pd.read_csv(data_path+'/calendar.csv')
calendar = reduce_mem_usage(calendar)
# Sell prices to calculate sales in USD:
sell_prices = pd.read_csv(data_path+'/sell_prices.csv')
sell_prices = reduce_mem_usage(sell_prices)
# Dataframe with only last 28 days:
cols = ["d_{}".format(i) for i in range(prediction_start-28, prediction_start)]
data = sales[["id", 'store_id', 'item_id'] + cols]
# To long form:
data = data.melt(id_vars=["id", 'store_id', 'item_id'],
var_name="d", value_name="sale")
# Add week of year column from 'calendar':
data = pd.merge(data, calendar, how = 'left',
left_on = ['d'], right_on = ['d'])
data = data[["id", 'store_id', 'item_id', "sale", "d", "wm_yr_wk"]]
# Add weekly price from 'sell_prices':
data = data.merge(sell_prices, on = ['store_id', 'item_id', 'wm_yr_wk'], how = 'left')
data.drop(columns = ['wm_yr_wk'], inplace=True)
# Calculate daily sales in USD:
data['sale_usd'] = data['sale'] * data['sell_price']
# List of categories combinations for aggregations as defined in docs:
dummies_list = [sales.state_id, sales.store_id,
sales.cat_id, sales.dept_id,
sales.state_id +'_'+ sales.cat_id, sales.state_id +'_'+ sales.dept_id,
sales.store_id +'_'+ sales.cat_id, sales.store_id +'_'+ sales.dept_id,
sales.item_id, sales.state_id +'_'+ sales.item_id, sales.id]
## First element Level_0 aggregation 'all_sales':
dummies_df_list =[pd.DataFrame(np.ones(sales.shape[0]).astype(np.int8),
index=sales.index, columns=['all']).T]
# List of dummy dataframes:
for i, cats in enumerate(dummies_list):
cat_dtype = pd.api.types.CategoricalDtype(categories=pd.unique(cats.values), ordered=True)
ordered_cat = cats.astype(cat_dtype)
dummies_df_list +=[pd.get_dummies(ordered_cat, drop_first=False, dtype=np.int8).T]
#[1, 3, 10, 3, 7, 9, 21, 30, 70, 3049, 9147, 30490]
# Concat dummy dataframes in one go:
## Level is constructed for free.
roll_mat_df = pd.concat(dummies_df_list, keys=list(range(12)),
names=['level','id'])#.astype(np.int8, copy=False)
# Save values as sparse matrix & save index for future reference:
roll_index = roll_mat_df.index
roll_mat_csr = csr_matrix(roll_mat_df.values)
roll_mat_csr.shape
roll_mat_df.to_pickle(data_path + '/ordered_roll_mat_df.pkl')
del dummies_df_list, roll_mat_df
gc.collect()
S = get_s(roll_mat_csr, sales, prediction_start)
W = get_w(roll_mat_csr, data[['id','sale_usd']])
SW = W/np.sqrt(S)
sw_df = pd.DataFrame(np.stack((S, W, SW), axis=-1),index = roll_index,columns=['s','w','sw'])
sw_df.to_pickle(data_path + f'/ordered_sw_df_p{prediction_start}.pkl')
return sales, S, W, SW, roll_mat_csr
def load_precalculated_data(data_path, prediction_start):
# Load S and W weights for WRMSSE calcualtions:
if not os.path.exists(data_path+f'/ordered_sw_df_p{prediction_start}.pkl'):
calculate_and_save_data(data_path, prediction_start)
sw_df = pd.read_pickle(data_path+f'/ordered_sw_df_p{prediction_start}.pkl')
S = sw_df.s.values
W = sw_df.w.values
SW = sw_df.sw.values
# Load roll up matrix to calcualte aggreagates:
roll_mat_df = pd.read_pickle(data_path+'/ordered_roll_mat_df.pkl')
roll_index = roll_mat_df.index
roll_mat_csr = csr_matrix(roll_mat_df.values)
del roll_mat_df
return S, W, SW, roll_mat_csr
def evaluate_wrmsse(data_path, prediction, prediction_start, score_only=True):
# Loading data in two ways:
# if S, W, SW are calculated in advance, load from pickle files
# otherwise, calculate from scratch
if os.path.isfile(data_path + f'/ordered_sw_df_p{prediction_start}.pkl') and \
os.path.isfile(data_path + '/ordered_roll_mat_df.pkl'):
print('load precalculated data')
# Sales quantities:
sales = pd.read_csv(data_path+'/sales_train_evaluation.csv')
S, W, SW, roll_mat_csr = load_precalculated_data(data_path, prediction_start)
else:
print('load data from scratch')
sales, S, W, SW, roll_mat_csr = calculate_and_save_data(data_path, prediction_start)
# Ground truth:
dayCols = ["d_{}".format(i) for i in range(prediction_start, prediction_start+prediction_length)]
y_true = sales[dayCols]
error = prediction - y_true.values
results = wrmsse(error, score_only, roll_mat_csr, S, W, SW)
return results
class WRMSSEEvaluator(object):
def __init__(self, data_path, prediction_start):
# Load Dataset
sales = pd.read_csv(data_path + 'sales_train_evaluation.csv')
# append dummy
for i in range(1942, 1970):
sales[f"d_{i}"] = 0
calendar = pd.read_csv(data_path + 'calendar.csv',
dtype={'wm_yr_wk': np.int32, 'wday': np.int32,
'month': np.int32, 'year': np.int32,
'snap_CA': np.int32, 'snap_TX': np.int32,
'snap_WI': np.int32})
prices = pd.read_csv(data_path + 'sell_prices.csv',
dtype={'wm_yr_wk': np.int32,
'sell_price': np.float32})
prediction_start = prediction_start + 6 - 1 # num of heads
train_df = sales.iloc[:, :prediction_start]
valid_df = sales.iloc[:, prediction_start:prediction_start+prediction_length]
#
train_y = train_df.loc[:, train_df.columns.str.startswith('d_')]
train_target_columns = train_y.columns.tolist()
weight_columns = train_y.iloc[:, -28:].columns.tolist()
train_df['all_id'] = 'all' # for lv1 aggregation
id_columns = train_df.loc[:, ~train_df.columns.str.startswith('d_')]\
.columns.tolist()
valid_target_columns = valid_df.loc[:, valid_df.columns.str.startswith('d_')]\
.columns.tolist()
if not all([c in valid_df.columns for c in id_columns]):
valid_df = pd.concat([train_df[id_columns], valid_df],
axis=1, sort=False)
self.train_df = train_df
self.valid_df = valid_df
self.calendar = calendar
self.prices = prices
self.weight_columns = weight_columns
self.id_columns = id_columns
self.valid_target_columns = valid_target_columns
weight_df = self.get_weight_df()
self.group_ids = (
'all_id',
'state_id',
'store_id',
'cat_id',
'dept_id',
['state_id', 'cat_id'],
['state_id', 'dept_id'],
['store_id', 'cat_id'],
['store_id', 'dept_id'],
'item_id',
['item_id', 'state_id'],
['item_id', 'store_id']
)
for i, group_id in enumerate(tqdm(self.group_ids)):
train_y = train_df.groupby(group_id)[train_target_columns].sum()
scale = []
for _, row in train_y.iterrows():
series = row.values[np.argmax(row.values != 0):]
scale.append(((series[1:] - series[:-1]) ** 2).mean())
setattr(self, f'lv{i + 1}_scale', np.array(scale))
setattr(self, f'lv{i + 1}_train_df', train_y)
setattr(self, f'lv{i + 1}_valid_df', valid_df.groupby(group_id)\
[valid_target_columns].sum())
lv_weight = weight_df.groupby(group_id)[weight_columns].sum().sum(axis=1)
setattr(self, f'lv{i + 1}_weight', lv_weight / lv_weight.sum())
def get_weight_df(self) -> pd.DataFrame:
day_to_week = self.calendar.set_index('d')['wm_yr_wk'].to_dict()
weight_df = self.train_df[['item_id', 'store_id'] + self.weight_columns]\
.set_index(['item_id', 'store_id'])
weight_df = weight_df.stack().reset_index()\
.rename(columns={'level_2': 'd', 0: 'value'})
weight_df['wm_yr_wk'] = weight_df['d'].map(day_to_week)
weight_df = weight_df.merge(self.prices, how='left',
on=['item_id', 'store_id', 'wm_yr_wk'])
weight_df['value'] = weight_df['value'] * weight_df['sell_price']
weight_df = weight_df.set_index(['item_id', 'store_id', 'd'])\
.unstack(level=2)['value']\
.loc[zip(self.train_df.item_id, self.train_df.store_id), :]\
.reset_index(drop=True)
weight_df = pd.concat([self.train_df[self.id_columns],
weight_df], axis=1, sort=False)
return weight_df
def rmsse(self, valid_preds: pd.DataFrame, lv: int) -> pd.Series:
valid_y = getattr(self, f'lv{lv}_valid_df')
score_raw = ((valid_y - valid_preds) ** 2)
score = score_raw.mean(axis=1)
scale = getattr(self, f'lv{lv}_scale')
return (score / scale).map(np.sqrt), np.sqrt(score_raw / np.expand_dims(scale, 1))
def score(self, valid_preds: Union[pd.DataFrame,
np.ndarray]) -> float:
assert self.valid_df[self.valid_target_columns].shape \
== valid_preds.shape
if isinstance(valid_preds, np.ndarray):
valid_preds = pd.DataFrame(valid_preds,
columns=self.valid_target_columns)
valid_preds = pd.concat([self.valid_df[self.id_columns],
valid_preds], axis=1, sort=False)
all_scores = []
all_scores_day = np.zeros([12,28])
for i, group_id in enumerate(self.group_ids):
valid_preds_grp = valid_preds.groupby(group_id)[self.valid_target_columns].sum()
setattr(self, f'lv{i + 1}_valid_preds', valid_preds_grp)
lv_rmsse, lv_rmsse_raw = self.rmsse(valid_preds_grp, i + 1)
setattr(self, f'lv{i + 1}_rmsse', lv_rmsse)
weight = getattr(self, f'lv{i + 1}_weight')
lv_scores = pd.concat([weight, lv_rmsse], axis=1,
sort=False).prod(axis=1)
lv_scores_raw = lv_rmsse_raw * np.expand_dims(weight,1)
lv_scores_raw = lv_scores_raw.sum(axis=0)
all_scores.append(lv_scores.sum())
all_scores_day[i] = lv_scores_raw
self.all_scores = all_scores
self.all_scores_day = all_scores_day
self.wrmsse = np.mean(all_scores)
return self.wrmsse
if __name__ == '__main__':
DATA_DIR = '/data/m5/'
PREDICTION_START = 1886 #1886:offline val, 1914:validation, 1942:evaluation
prediction_pd = pd.read_csv('logs/m5/csv_test/submission_v1.csv')
prediction = np.array(prediction_pd.values[:30490,1:], dtype=np.float32)
# First Evaluator
wrmsse, aggregated_wrmsse, _, _ = evaluate_wrmsse(data_path=DATA_DIR, prediction=prediction, prediction_start=PREDICTION_START, score_only=False)
print('---------------------------------------------------')
print('First Evaluator')
print('WRMSSE:', wrmsse)
for i, val in enumerate(aggregated_wrmsse):
print(f'WRMSSE level #{i+1}: {val}')
# Second Evaluator
print('---------------------------------------------------')
print('Second Evaluator')
evaluator = WRMSSEEvaluator(data_path=DATA_DIR, prediction_start=PREDICTION_START)
wrmsse2 = evaluator.score(prediction)
print('WRMSSE:', wrmsse2)
for i, val in enumerate(evaluator.all_scores):
print(f'WRMSSE level #{i+1}: {val}')
# Show difference
print('---------------------------------------------------')
print('Difference')
print('WRMSSE diff:', wrmsse - wrmsse2)
for i, val in enumerate(zip(aggregated_wrmsse, evaluator.all_scores)):
print(f'WRMSSE level #{i+1}: {val[0] - val[1]}')
'''
First Evaluator
WRMSSE: 0.6539945520603164
WRMSSE level #1: 0.5264150554353765
WRMSSE level #2: 0.5840861168973064
WRMSSE level #3: 0.6257473379469497
WRMSSE level #4: 0.5463039489698954
WRMSSE level #5: 0.6173161892685839
WRMSSE level #6: 0.6024604778476885
WRMSSE level #7: 0.6600226507063979
WRMSSE level #8: 0.6515105177255677
WRMSSE level #9: 0.6978254036803149
WRMSSE level #10: 0.7706896289293005
WRMSSE level #11: 0.7848556693564849
WRMSSE level #12: 0.7807016279599293
''' | 17,382 | 39.709602 | 149 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/examples/m5/preprocessing.py | import numpy as np
import pandas as pd
import copy
import os
from pathlib import Path
from utils import convert_price_file
from accuracy_evaluator import calculate_and_save_data
def main(m5_input_path):
# make price file
converted_price_file = Path(f'{m5_input_path}/converted_price_evaluation.csv')
if not converted_price_file.exists():
convert_price_file(m5_input_path)
converted_price = pd.read_csv(converted_price_file)
# make rolling matrix for wrmsse
_ = calculate_and_save_data(data_path=m5_input_path, prediction_start = 1942)
# normalized sell prices
normalized_price_file = Path(f'{m5_input_path}/normalized_price_evaluation.npz')
if not normalized_price_file.exists():
# normalized sell prices per each item
price_feature = converted_price.drop(["id","item_id","dept_id","cat_id","store_id","state_id"], axis=1).values
price_mean_per_item = np.nanmean(price_feature, axis=1, keepdims=True)
price_std_per_item = np.nanstd(price_feature, axis=1, keepdims=True)
normalized_price_per_item = (price_feature - price_mean_per_item) / (price_std_per_item + 1e-6)
# normalized sell prices per day within the same dept
dept_groups = converted_price.groupby('dept_id')
price_mean_per_dept = dept_groups.transform(np.nanmean)
price_std_per_dept = dept_groups.transform(np.nanstd)
normalized_price_per_group_pd = (converted_price[price_mean_per_dept.columns] - price_mean_per_dept) / (price_std_per_dept + 1e-6)
normalized_price_per_group = normalized_price_per_group_pd.values
np.savez(normalized_price_file, per_item = normalized_price_per_item, per_group = normalized_price_per_group)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
###[Important argument]
parser.add_argument(
"--data_path",
default='/data/m5'
)
args = parser.parse_args()
main(args.data_path) | 2,003 | 38.294118 | 138 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/examples/m5/training.py | import time
import torch
import numpy as np
import pandas as pd
from tqdm import tqdm
from pprint import pprint
from pathlib import Path
import logging
import os
from pts.model import Predictor
from pts.model.deepar import DeepAREstimator
from pts.modules import TweedieOutput
from pts.trainer import Trainer
from pts.core.logging import get_log_path, set_logger
from load_dataset import make_m5_dataset
from pts.feature.time_feature import *
logger = logging.getLogger("mofl").getChild("training")
prediction_length = 28
def get_rolled_deepAR_estimator(stat_cat_cardinalities, device, log_path):
batch_size = 64
num_batches_per_epoch = 30490 // batch_size + 1
return DeepAREstimator(
input_size=102,
num_cells=120,
prediction_length=prediction_length,
dropout_rate=0.1,
freq="D",
time_features=[DayOfWeek(), DayOfMonth(), MonthOfYear(), WeekOfYear(), Year()],
distr_output = TweedieOutput(1.2),
lags_seq=[1],
moving_avg_windows=[7, 28],
scaling=False,
use_feat_dynamic_real=True,
use_feat_static_cat=True,
use_feat_dynamic_cat=True,
cardinality=stat_cat_cardinalities,
dc_cardinality=[5, 5, 31, 31], #event_type1,2 / event_name1,2
dc_embedding_dimension=[2, 2, 15, 2],
pick_incomplete=True,
trainer=Trainer(
learning_rate=1e-3,
epochs=300,
num_batches_per_epoch=num_batches_per_epoch,
betas=(0.9, 0.98),
use_lr_scheduler=True,
lr_warmup_period=num_batches_per_epoch*5,
batch_size=batch_size,
device=device,
log_path=log_path,
num_workers=4,
)
)
def get_estimator(model_name, stat_cat_cardinalities, device, base_log_path, full_log_path):
estimator = globals()["get_" + model_name + "_estimator"](stat_cat_cardinalities, device, full_log_path)
return estimator
def main(args):
# parameters
comment = args.comment
model_name = args.model
data_path = args.data_path
trial = args.trial
# set default config
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
full_log_path, base_log_path, trial_path = get_log_path(f"m5_submission/{model_name}", comment, trial)
# set logger
set_logger(full_log_path)
# make dataset
train_ds, val_ds, stat_cat_cardinalities = make_m5_dataset(m5_input_path=data_path, exclude_no_sales=True)
# get estimator
logger.info(f"Using {model_name} model...")
estimator = get_estimator(model_name, stat_cat_cardinalities, device, base_log_path, full_log_path)
# path for trained model
model_path = Path(full_log_path+"/trained_model")
model_path.mkdir()
# prediction
predictor = estimator.train(train_ds, validation_period=10)
# save model
if args.save_model:
logger.info(f"Save {model_name} model...")
model_path = Path(full_log_path+"/predictor")
model_path.mkdir()
predictor.serialize(model_path)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_path",
default='/data/m5'
)
parser.add_argument(
"--comment",
type=str,
default='drop1'
)
parser.add_argument(
"--trial",
type=str,
default='t0'
)
args = parser.parse_args()
args.save_model = True # always save model
args.model = 'rolled_deepAR'
main(args) | 3,592 | 27.744 | 110 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/examples/m5/utils.py |
import numpy as np
from pts.evaluation import Evaluator
import accuracy_evaluator
class M5Evaluator(Evaluator):
def __init__(self, prediction_length, **kwargs):
super().__init__(**kwargs)
self.prediction_length = prediction_length
def evaluate_wrmsse(self, prediction, prediction_start=1886, score_only=False, data_path='/data/m5'):
return accuracy_evaluator.evaluate_wrmsse(data_path=data_path, prediction=prediction, prediction_start=prediction_start, score_only=score_only)
def convert_price_file(m5_input_path):
# 주 단위로 되어 있는 가격정보를 sales 데이터와 동일하게 각 아이템의 매일 가격정보를 나타내는 형태로 변환
import numpy as np
import pandas as pd
# load data
calendar = pd.read_csv(f'{m5_input_path}/calendar.csv')
sales_train_evaluation = pd.read_csv(f'{m5_input_path}/sales_train_evaluation.csv')
sell_prices = pd.read_csv(f'{m5_input_path}/sell_prices.csv')
# assign price for all days
week_and_day = calendar[['wm_yr_wk', 'd']]
price_all_days_items = pd.merge(week_and_day, sell_prices, on=['wm_yr_wk'], how='left') # join on week number
price_all_days_items = price_all_days_items.drop(['wm_yr_wk'], axis=1)
# convert days to column
price_all_items = price_all_days_items.pivot_table(values='sell_price', index=['store_id', 'item_id'], columns='d')
price_all_items.reset_index(drop=False, inplace=True)
# reorder column
price_all_items = price_all_items.reindex(['store_id','item_id'] + ['d_%d' % x for x in range(1,1969+1)], axis=1)
sales_keys = ['id', 'item_id', 'dept_id', 'cat_id', 'store_id', 'state_id']
sales_keys_pd = sales_train_evaluation[sales_keys]
# join with sales data
price_converted = pd.merge(sales_keys_pd, price_all_items, on=['store_id','item_id'], how='left')
# save file
price_converted.to_csv(f'{m5_input_path}/converted_price_evaluation.csv', index=False)
| 1,931 | 36.153846 | 151 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/examples/m5/__init__.py | 0 | 0 | 0 | py | |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/examples/m5/ensemble.py | import numpy as np
import pandas as pd
from glob import glob
import os
from tqdm import tqdm
import logging
from accuracy_evaluator import load_precalculated_data
from pts.core.logging import get_log_path, set_logger
logger = logging.getLogger("mofl").getChild("ensemble")
class Ensembler(object):
def __init__(self, data_path, base_path):
self.base_path = base_path
self.data_path = data_path
# load data
_, _, self.sw, self.roll_mat_csr = load_precalculated_data(data_path=data_path, prediction_start = 1942)
self.sales_train_evaluation = pd.read_csv(f'{data_path}/sales_train_evaluation.csv')
def wrmsse(self, error):
return np.sum(
np.sqrt(
np.mean(
np.square(self.roll_mat_csr * error), axis=1)) * self.sw)/12
def calc_wrmsse(self, prediction, prediction_start):
dayCols = ["d_{}".format(i) for i in range(prediction_start, prediction_start+28)]
y_true = self.sales_train_evaluation[dayCols]
error = prediction - y_true.values
return self.wrmsse(error)
def calc_wrmsse_list(self, model_path, cv_path, corr_factor=1):
wrmsse_list = []
for prediction_start in range(1914, 1522, -28):
cv_file = os.path.join(self.base_path, model_path, cv_path, f'prediction_{prediction_start}.npy')
prediction = np.load(cv_file) * corr_factor
wrmsse_list.append(self.calc_wrmsse(prediction, prediction_start))
return wrmsse_list
def load_all_predictions(self, model, choosed_epoch):
all_predictions = []
for prediction_start in tqdm(range(1942, 1522, -28)):
model_predictions = []
for model, epochs in zip(models, choosed_epoch):
epoch_predictions = []
for epoch in epochs:
cv_path = f'CV/train_net_{epoch}/'
cv_file = os.path.join(self.base_path, model, cv_path, f'prediction_{prediction_start}.npy')
prediction = np.load(cv_file)
epoch_predictions.append(prediction)
model_predictions.append(epoch_predictions)
all_predictions.append(model_predictions)
# all_predictions.shape : period * model * epoch * predictions
return np.array(all_predictions)
def get_topK_epochs(self, models, K=3):
epoch_list = np.arange(200,300,10)
choosed_epoch = []
ens_results = []
for model in tqdm(models):
epoch_results = []
for epoch in epoch_list:
cv_path = f'CV/train_net_{epoch}/'
epoch_results.append(self.calc_wrmsse_list(model, cv_path))
epoch_results = np.array(epoch_results)
# select top K epoch
criteria = epoch_results.mean(axis=1)
topK = sorted(list(zip(criteria, range(0,20))))[:K]
topK = np.array(topK)
topK_index = np.int32(topK[:,1]) # topK epoch index
#topK_wrmsse = epoch_results[topK_index] # topK wrmsse list
topK_epoch = epoch_list[topK_index]
# ensemble best K epoch
choosed_epoch.append(topK_epoch)
return choosed_epoch
def export_final_csv(self, prediction_1914, prediction_1942, result_path):
sample_submission = pd.read_csv(f'{self.data_path}/sample_submission.csv')
sample_submission.iloc[:30490,1:] = prediction_1914
sample_submission.iloc[30490:,1:] = prediction_1942
sample_submission.to_csv(result_path + '/submission_final.csv', index=False)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_path",
default='/data/m5'
)
parser.add_argument(
"--comment",
type=str,
default='drop1'
)
args = parser.parse_args()
args.model = 'rolled_deepAR'
# initialize
base_path = 'logs/m5_submission/'
model_path = args.model + '/' + args.comment
ens = Ensembler(args.data_path, base_path)
# set logger
full_log_path = base_path + model_path
set_logger(full_log_path, text_log_file='ensemble.log')
# choose top-K epoch
models = [model_path + '/' + f.name for f in os.scandir(base_path + model_path) if f.is_dir()]
choosed_epoch = ens.get_topK_epochs(models)
# logging
logger.info(f"Selected Epochs...")
for m,e in zip(models,choosed_epoch):
logger.info(f"{m}: {e}")
# get all predictions
all_predictions = ens.load_all_predictions(models, choosed_epoch)
# ensemble (mean of predictions)
mean_predictions = np.mean(all_predictions, axis=(1,2)) # shape: period * (30490, 28)
# check final wrmsse
logger.info("Final WRMSSEs...")
ensemble_wrmsse = []
mean_predictions_1914 = mean_predictions[1:]
for p, prediction_start in enumerate(range(1914, 1522, -28)):
prediction = mean_predictions_1914[p]
wrmsse_val = ens.calc_wrmsse(prediction, prediction_start)
ensemble_wrmsse.append(wrmsse_val)
# logging
logger.info(f"{prediction_start}: {wrmsse_val}")
# export_final_csv
prediction_1942 = mean_predictions[0]
prediction_1914 = mean_predictions[1]
ens.export_final_csv(prediction_1914, prediction_1942, result_path=full_log_path)
| 5,535 | 32.756098 | 124 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/examples/m5/make_predictions.py |
import numpy as np
import torch
import os
from tqdm import tqdm
from pathlib import Path
import logging
from pts.core.logging import get_log_path
from pts.model import Predictor
from load_dataset import make_m5_dataset
from pts.evaluation.backtest import make_evaluation_predictions, make_validation_data
#test_start : Start index of the final possible data chunk. For example, for M5 dataset, correct value is 1942
TEST_START = 1942
PREDICTION_LEN = 28
def make_predictions(predictor, ds, num_samples=30, n_iter = 15, start_offset=0, log_path=None, show_progress=True):
for i in tqdm(range(start_offset, n_iter+start_offset), disable=not show_progress):
start_this = TEST_START - PREDICTION_LEN * i
#make prediction
forecast_it, ts_it = make_evaluation_predictions(
dataset=make_validation_data(ds, val_start=start_this, val_start_final=TEST_START - PREDICTION_LEN),
predictor=predictor,
num_samples=100 if start_this==1942 else num_samples)
forecasts = list(forecast_it)
#[TODO]
#is this loop necessary?
prediction = np.zeros((len(forecasts), PREDICTION_LEN))
for n in range(len(forecasts)):
prediction[n] = np.mean(forecasts[n].samples, axis=0)
# save result
if log_path is not None:
np.save(log_path / f'prediction_{start_this}.npy', prediction)
return prediction #return last prediction
def run_prediction(args, trial_path, model_idx, ds, predictor):
cv_log_path = Path(os.path.join(trial_path, 'CV', model_idx))
cv_log_path.mkdir(parents=True, exist_ok=True)
# load trained model
trained_model_path = Path(os.path.join(trial_path, 'trained_model'))
predictor.prediction_net.load_state_dict(torch.load(trained_model_path / model_idx))
if args.bs is not None:
predictor.batch_size = args.bs
predictor.prediction_net.num_parallel_samples = args.n_par
make_predictions(predictor, ds, num_samples=args.n_samples, log_path=cv_log_path)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
###[Important argument]
parser.add_argument(
"--data_path",
default='/data/m5'
)
parser.add_argument(
"--comment",
type=str,
default='drop1'
)
parser.add_argument(
"--trial",
type=str,
default='t0'
)
###[Important argument]
parser.add_argument(
"--bs",
type=int,
default=6400
)
parser.add_argument(
"--n_par",
default=30
)
parser.add_argument(
"--n_samples",
default=30
)
args = parser.parse_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# load model
model_name = 'rolled_deepAR'
trial_path, _, _ = get_log_path(f"m5_submission/{model_name}", log_comment=args.comment, trial=args.trial, mkdir=False)
print(f"Make predictions for {trial_path}")
pretrained_model_path = Path(os.path.join(trial_path, 'predictor'))
if not pretrained_model_path.exists():
assert False, "Error: Pretrained model not exist!"
predictor = Predictor.deserialize(pretrained_model_path, device)
# load data
test_ds = make_m5_dataset(m5_input_path=args.data_path, exclude_no_sales=False, ds_split=False, prediction_start=1942)
# generate predictions
for epoch in range(200,300,10):
model_idx = f"train_net_{epoch}"
run_prediction(args, trial_path, model_idx, test_ds, predictor) | 3,587 | 30.2 | 123 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/exception.py | def assert_pts(condition: bool, message: str, *args, **kwargs) -> None:
if not condition:
raise Exception(message.format(*args, **kwargs))
| 151 | 37 | 71 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/__init__.py | from pkgutil import extend_path
from pkg_resources import get_distribution, DistributionNotFound
from .exception import assert_pts
from .trainer import Trainer
__path__ = extend_path(__path__, __name__) # type: ignore
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound:
__version__ = "0.0.0-unknown" | 343 | 25.461538 | 64 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/trainer.py | import time
from typing import List, Optional, Tuple
import logging
import torch
import torch.nn as nn
from torch.optim.lr_scheduler import CosineAnnealingLR
from warmup_scheduler import GradualWarmupScheduler
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from pts.core.component import validated
import os
logger = logging.getLogger("mofl").getChild("trainer")
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
class Trainer:
@validated()
def __init__(
self,
epochs: int = 100,
batch_size: int = 32,
num_batches_per_epoch: int = 50,
num_workers: int = 0, # TODO worker>0 causes performace drop if uses iterable dataset
pin_memory: bool = False,
learning_rate: float = 1e-3,
weight_decay: float = 1e-6,
betas: Tuple[float, float] = (0.9, 0.999),
device: Optional[torch.device] = None,
log_path: Optional[str] = None,
use_lr_scheduler: bool = False,
lr_warmup_period: int = 0, # num of iterations for warmup
) -> None:
self.epochs = epochs
self.batch_size = batch_size
self.num_batches_per_epoch = num_batches_per_epoch
self.learning_rate = learning_rate
self.weight_decay = weight_decay
self.betas = betas
self.device = device
self.num_workers = num_workers
self.pin_memory = pin_memory
self.log_path = log_path
self.use_lr_scheduler = use_lr_scheduler
self.lr_warmup_period = lr_warmup_period
self.roll_mat_csr = None
def __call__(
self, net: nn.Module, input_names: List[str], training_data_loader: DataLoader, validation_period: int = 1
) -> None:
# loggin model size
net_name = type(net).__name__
num_model_param = count_parameters(net)
logger.info(
f"Number of parameters in {net_name}: {num_model_param}"
)
if torch.cuda.device_count() > 1:
logger.info("Training with %d gpus..." % (torch.cuda.device_count()))
net = nn.DataParallel(net)
optimizer = torch.optim.Adam(
net.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay, betas=self.betas, #eps=1e-9,
)
if self.use_lr_scheduler:
total_iter = self.epochs * self.num_batches_per_epoch
scheduler_cos = CosineAnnealingLR(optimizer, total_iter, eta_min=1e-6)
scheduler = GradualWarmupScheduler(optimizer, multiplier=1, total_epoch=self.lr_warmup_period, after_scheduler=scheduler_cos)
writer = SummaryWriter(log_dir=self.log_path)
#writer.add_graph(net)
def loop(
epoch_no, data_loader, is_training: bool = True
) -> float:
tic = time.time()
avg_epoch_loss = 0.0
cumlated_sqerr = 0.0
total_seq = 0
errors = []
if is_training:
net.train()
else:
net.eval()
with tqdm(data_loader, total=float("inf"),disable=os.environ.get("DISABLE_TQDM", False)) as it:
for batch_no, data_entry in enumerate(it, start=1):
optimizer.zero_grad()
inputs = [data_entry[k].to(self.device) for k in input_names]
output = net(*inputs)
if isinstance(output, (list, tuple)):
loss = output[0]
error = output[1]
cumlated_sqerr += (error ** 2).sum()
total_seq += len(inputs[0])
if not is_training:
errors.append(error)
else:
loss = output
loss = loss.sum()
avg_epoch_loss += loss.item()
lr = optimizer.param_groups[0]['lr']
it.set_postfix(
ordered_dict={
"lr": lr,
("" if is_training else "validation_")
+ "avg_epoch_loss": avg_epoch_loss / batch_no,
"epoch": epoch_no,
},
refresh=False,
)
n_iter = epoch_no*self.num_batches_per_epoch + batch_no
if n_iter % 20 == 0:
if is_training:
writer.add_scalar('Loss/train', loss.item(), n_iter)
writer.add_scalar('Learning rate', lr, n_iter)
else:
writer.add_scalar('Loss/validation', loss.item(), n_iter)
if is_training:
loss.backward()
optimizer.step()
if self.use_lr_scheduler:
scheduler.step()
if self.num_batches_per_epoch == batch_no:
#for name, param in net.named_parameters():
# writer.add_histogram(name, param.clone().cpu().data.numpy(), n_iter)
break
# mark epoch end time and log time cost of current epoch
toc = time.time()
# logging
'''logger.info(
"Epoch[%d] Elapsed time %.3f seconds",
epoch_no,
(toc - tic),
)'''
lv = avg_epoch_loss / batch_no
logger.info(
"Epoch[%d] Evaluation metric '%s'=%f",
epoch_no,
("" if is_training else "validation_") + "epoch_loss",
lv,
)
writer.add_scalar('Loss_epoch/' + ("train" if is_training else "validation") , lv, epoch_no)
if total_seq != 0:
writer.add_scalar('MSE_epoch/' + ("train" if is_training else "validation") , cumlated_sqerr / total_seq, epoch_no)
return lv
for epoch_no in range(self.epochs):
# training
epoch_loss = loop(epoch_no, training_data_loader)
if epoch_no % validation_period == 0 and epoch_no != 0:
# save model
torch.save(net.state_dict(), f"{self.log_path}/trained_model/train_net_{epoch_no}")
writer.close()
| 6,578 | 35.55 | 137 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/feature/lag.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
from typing import List, Optional
# Third-party imports
import numpy as np
from pandas.tseries.frequencies import to_offset
from .utils import get_granularity
def _make_lags(middle: int, delta: int) -> np.ndarray:
"""
Create a set of lags around a middle point including +/- delta
"""
return np.arange(middle - delta, middle + delta + 1).tolist()
def get_lags_for_frequency(
freq_str: str, lag_ub: int = 1200, num_lags: Optional[int] = None
) -> List[int]:
"""
Generates a list of lags that that are appropriate for the given frequency string.
By default all frequencies have the following lags: [1, 2, 3, 4, 5, 6, 7].
Remaining lags correspond to the same `season` (+/- `delta`) in previous `k` cycles.
Here `delta` and `k` are chosen according to the existing code.
Parameters
----------
freq_str
Frequency string of the form [multiple][granularity] such as "12H", "5min", "1D" etc.
lag_ub
The maximum value for a lag.
num_lags
Maximum number of lags; by default all generated lags are returned
"""
# Lags are target values at the same `season` (+/- delta) but in the previous cycle.
def _make_lags_for_minute(multiple, num_cycles=3):
# We use previous ``num_cycles`` hours to generate lags
return [_make_lags(k * 60 // multiple, 2) for k in range(1, num_cycles + 1)]
def _make_lags_for_hour(multiple, num_cycles=7):
# We use previous ``num_cycles`` days to generate lags
return [_make_lags(k * 24 // multiple, 1) for k in range(1, num_cycles + 1)]
def _make_lags_for_day(multiple, num_cycles=4):
# We use previous ``num_cycles`` weeks to generate lags
# We use the last month (in addition to 4 weeks) to generate lag.
return [_make_lags(k * 7 // multiple, 1) for k in range(1, num_cycles + 1)] + [
_make_lags(30 // multiple, 1)
]
def _make_lags_for_week(multiple, num_cycles=3):
# We use previous ``num_cycles`` years to generate lags
# Additionally, we use previous 4, 8, 12 weeks
return [_make_lags(k * 52 // multiple, 1) for k in range(1, num_cycles + 1)] + [
[4 // multiple, 8 // multiple, 12 // multiple]
]
def _make_lags_for_month(multiple, num_cycles=3):
# We use previous ``num_cycles`` years to generate lags
return [_make_lags(k * 12 // multiple, 1) for k in range(1, num_cycles + 1)]
# multiple, granularity = get_granularity(freq_str)
offset = to_offset(freq_str)
if offset.name == "M":
lags = _make_lags_for_month(offset.n)
elif offset.name == "W-SUN" or offset.name == "W-MON":
lags = _make_lags_for_week(offset.n)
elif offset.name == "D":
lags = _make_lags_for_day(offset.n) + _make_lags_for_week(offset.n / 7.0)
elif offset.name == "B":
# todo find good lags for business day
lags = []
elif offset.name == "H":
lags = (
_make_lags_for_hour(offset.n)
+ _make_lags_for_day(offset.n / 24.0)
+ _make_lags_for_week(offset.n / (24.0 * 7))
)
# minutes
elif offset.name == "T":
lags = (
_make_lags_for_minute(offset.n)
+ _make_lags_for_hour(offset.n / 60.0)
+ _make_lags_for_day(offset.n / (60.0 * 24))
+ _make_lags_for_week(offset.n / (60.0 * 24 * 7))
)
else:
raise Exception("invalid frequency")
# flatten lags list and filter
lags = [int(lag) for sub_list in lags for lag in sub_list if 7 < lag <= lag_ub]
lags = [1, 2, 3, 4, 5, 6, 7] + sorted(list(set(lags)))
return lags[:num_lags]
def get_fourier_lags_for_frequency(freq_str: str, num_lags: Optional[int] = None) -> List[int]:
offset = to_offset(freq_str)
granularity = offset.name
if granularity == "M":
lags = [[1, 12]]
elif granularity == "D":
lags = [[1, 7, 14]]
elif granularity == "B":
lags = [[1, 2]]
elif granularity == "H":
lags = [[1, 24, 168]]
elif granularity == "min":
lags = [[1, 4, 12, 24, 48]]
else:
lags = [[1]]
# use less lags
output_lags = list([int(lag) for sub_list in lags for lag in sub_list])
output_lags = sorted(list(set(output_lags)))
return output_lags[:num_lags]
| 4,951 | 34.625899 | 95 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/feature/utils.py | import re
from functools import lru_cache
from typing import Tuple
def get_granularity(freq_str: str) -> Tuple[int, str]:
"""
Splits a frequency string such as "7D" into the multiple 7 and the base
granularity "D".
Parameters
----------
freq_str
Frequency string of the form [multiple][granularity] such as "12H", "5min", "1D" etc.
"""
freq_regex = r"\s*((\d+)?)\s*([^\d]\w*)"
m = re.match(freq_regex, freq_str)
assert m is not None, "Cannot parse frequency string: %s" % freq_str
groups = m.groups()
multiple = int(groups[1]) if groups[1] is not None else 1
granularity = groups[2]
return multiple, granularity
@lru_cache()
def get_seasonality(freq: str) -> int:
"""
Returns the default seasonality for a given freq str. E.g. for
2H -> 12
"""
match = re.match(r"(\d*)(\w+)", freq)
assert match, "Cannot match freq regex"
mult, base_freq = match.groups()
multiple = int(mult) if mult else 1
seasonalities = {"H": 24, "D": 1, "W": 1, "M": 12, "B": 5}
if base_freq in seasonalities:
seasonality = seasonalities[base_freq]
else:
seasonality = 1
if seasonality % multiple != 0:
# logging.warning(
# f"multiple {multiple} does not divide base "
# f"seasonality {seasonality}."
# f"Falling back to seasonality 1"
# )
return 1
return seasonality // multiple
| 1,454 | 26.980769 | 93 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/feature/holiday.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from typing import List, Callable
import numpy as np
import pandas as pd
from pandas.tseries.holiday import (
TH,
SU,
EasterMonday,
GoodFriday,
Holiday,
USColumbusDay,
USLaborDay,
USMartinLutherKingJr,
USMemorialDay,
USPresidentsDay,
USThanksgivingDay,
)
from pandas.tseries.offsets import DateOffset, Day, Easter
# This is 183 to cover half a year (in both directions), also for leap years
# plus a week and a half to cover holidays offset by a week e.g. easter etc
MAX_WINDOW = 192
def distance_to_holiday(holiday):
def distance_to_day(index):
holiday_date = holiday.dates(
index - pd.Timedelta(days=MAX_WINDOW),
index + pd.Timedelta(days=MAX_WINDOW),
)
assert (
len(holiday_date) != 0
), f"No closest holiday for the date index {index} found."
# It sometimes returns two dates if it is exactly half a year after the
# holiday. In this case, the smaller distance (182 days) is returned.
return (index - holiday_date[0]).days
return distance_to_day
EasterSunday = Holiday("Easter Sunday", month=1, day=1, offset=[Easter(), Day(0)])
NewYearsDay = Holiday("New Years Day", month=1, day=1)
SuperBowl = Holiday("Superbowl", month=2, day=1, offset=DateOffset(weekday=SU(1)))
MothersDay = Holiday("Mothers Day", month=5, day=1, offset=DateOffset(weekday=SU(2)))
IndependenceDay = Holiday("Independence Day", month=7, day=4)
ChristmasEve = Holiday("Christmas", month=12, day=24)
ChristmasDay = Holiday("Christmas", month=12, day=25)
NewYearsEve = Holiday("New Years Eve", month=12, day=31)
BlackFriday = Holiday(
"Black Friday", month=11, day=1, offset=[pd.DateOffset(weekday=TH(4)), Day(1)]
)
CyberMonday = Holiday(
"Cyber Monday",
month=11,
day=1,
offset=[pd.DateOffset(weekday=TH(4)), Day(4)],
)
NEW_YEARS_DAY = "new_years_day"
MARTIN_LUTHER_KING_DAY = "martin_luther_king_day"
SUPERBOWL = "superbowl"
PRESIDENTS_DAY = "presidents_day"
GOOD_FRIDAY = "good_friday"
EASTER_SUNDAY = "easter_sunday"
EASTER_MONDAY = "easter_monday"
MOTHERS_DAY = "mothers_day"
INDEPENDENCE_DAY = "independence_day"
LABOR_DAY = "labor_day"
MEMORIAL_DAY = "memorial_day"
COLUMBUS_DAY = "columbus_day"
THANKSGIVING = "thanksgiving"
CHRISTMAS_EVE = "christmas_eve"
CHRISTMAS_DAY = "christmas_day"
NEW_YEARS_EVE = "new_years_eve"
BLACK_FRIDAY = "black_friday"
CYBER_MONDAY = "cyber_monday"
SPECIAL_DATE_FEATURES = {
NEW_YEARS_DAY: distance_to_holiday(NewYearsDay),
MARTIN_LUTHER_KING_DAY: distance_to_holiday(USMartinLutherKingJr),
SUPERBOWL: distance_to_holiday(SuperBowl),
PRESIDENTS_DAY: distance_to_holiday(USPresidentsDay),
GOOD_FRIDAY: distance_to_holiday(GoodFriday),
EASTER_SUNDAY: distance_to_holiday(EasterSunday),
EASTER_MONDAY: distance_to_holiday(EasterMonday),
MOTHERS_DAY: distance_to_holiday(MothersDay),
INDEPENDENCE_DAY: distance_to_holiday(IndependenceDay),
LABOR_DAY: distance_to_holiday(USLaborDay),
MEMORIAL_DAY: distance_to_holiday(USMemorialDay),
COLUMBUS_DAY: distance_to_holiday(USColumbusDay),
THANKSGIVING: distance_to_holiday(USThanksgivingDay),
CHRISTMAS_EVE: distance_to_holiday(ChristmasEve),
CHRISTMAS_DAY: distance_to_holiday(ChristmasDay),
NEW_YEARS_EVE: distance_to_holiday(NewYearsEve),
BLACK_FRIDAY: distance_to_holiday(BlackFriday),
CYBER_MONDAY: distance_to_holiday(CyberMonday),
}
# Kernel functions
def indicator(distance):
return float(distance == 0)
def exponential_kernel(alpha=1.0, tol=1e-9):
def kernel(distance):
kernel_value = np.exp(-alpha * np.abs(distance))
if kernel_value > tol:
return kernel_value
else:
return 0.0
return kernel
def squared_exponential_kernel(alpha=1.0, tol=1e-9):
def kernel(distance):
kernel_value = np.exp(-alpha * np.abs(distance) ** 2)
if kernel_value > tol:
return kernel_value
else:
return 0.0
return kernel
class SpecialDateFeatureSet:
"""
Implements calculation of holiday features. The SpecialDateFeatureSet is
applied on a pandas Series with Datetimeindex and returns a 2D array of
the shape (len(dates), num_features), where num_features are the number
of holidays.
Note that for lower than daily granularity the distance to the holiday is
still computed on a per-day basis.
Example use:
>>> from gluonts.time_feature.holiday import (
... squared_exponential_kernel,
... SpecialDateFeatureSet,
... CHRISTMAS_DAY,
... CHRISTMAS_EVE
... )
>>> import pandas as pd
>>> sfs = SpecialDateFeatureSet([CHRISTMAS_EVE, CHRISTMAS_DAY])
>>> date_indices = pd.date_range(
... start="2016-12-24",
... end="2016-12-31",
... freq='D'
... )
>>> sfs(date_indices)
array([[1., 0., 0., 0., 0., 0., 0., 0.],
[0., 1., 0., 0., 0., 0., 0., 0.]])
Example use for using a squared exponential kernel:
>>> kernel = squared_exponential_kernel(alpha=1.0)
>>> sfs = SpecialDateFeatureSet([CHRISTMAS_EVE, CHRISTMAS_DAY], kernel)
>>> sfs(date_indices)
array([[1.00000000e+00, 3.67879441e-01, 1.83156389e-02, 1.23409804e-04,
1.12535175e-07, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00],
[3.67879441e-01, 1.00000000e+00, 3.67879441e-01, 1.83156389e-02,
1.23409804e-04, 1.12535175e-07, 0.00000000e+00, 0.00000000e+00]])
"""
def __init__(
self,
feature_names: List[str],
kernel_function: Callable[[int], int] = indicator,
):
"""
Parameters
----------
feature_names
list of strings with holiday names for which features should be created.
kernel_function
kernel function to pass the feature value based
on distance in days. Can be indicator function (default),
exponential_kernel, squared_exponential_kernel or user defined.
"""
self.feature_names = feature_names
self.num_features = len(feature_names)
self.kernel_function = kernel_function
def __call__(self, dates):
"""
Transform a pandas series with timestamps to holiday features.
Parameters
----------
dates
Pandas series with Datetimeindex timestamps.
"""
return np.vstack(
[
np.hstack(
[
self.kernel_function(SPECIAL_DATE_FEATURES[feat_name](index))
for index in dates
]
)
for feat_name in self.feature_names
]
)
| 7,419 | 32.423423 | 85 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/feature/__init__.py | from .holiday import SPECIAL_DATE_FEATURES, SpecialDateFeatureSet
from .lag import get_lags_for_frequency, get_fourier_lags_for_frequency
from .time_feature import (
DayOfMonth,
DayOfWeek,
DayOfYear,
HourOfDay,
MinuteOfHour,
MonthOfYear,
TimeFeature,
WeekOfYear,
FourierDateFeatures,
time_features_from_frequency_str,
fourier_time_features_from_frequency_str,
)
from .utils import get_granularity, get_seasonality
| 458 | 26 | 71 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/feature/time_feature.py | from abc import ABC, abstractmethod
from typing import List
import numpy as np
import pandas as pd
from pandas.tseries.frequencies import to_offset
from pts.core.component import validated
from .utils import get_granularity
class TimeFeature(ABC):
@validated()
def __init__(self, normalized: bool = True):
self.normalized = normalized
@abstractmethod
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
pass
class MinuteOfHour(TimeFeature):
"""
Minute of hour encoded as value between [-0.5, 0.5]
"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
if self.normalized:
return index.minute / 59.0 - 0.5
else:
return index.minute.map(float)
class HourOfDay(TimeFeature):
"""
Hour of day encoded as value between [-0.5, 0.5]
"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
if self.normalized:
return index.hour / 23.0 - 0.5
else:
return index.hour.map(float)
class DayOfWeek(TimeFeature):
"""
Hour of day encoded as value between [-0.5, 0.5]
"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
if self.normalized:
return index.dayofweek / 6.0 - 0.5
else:
return index.dayofweek.map(float)
class DayOfMonth(TimeFeature):
"""
Day of month encoded as value between [-0.5, 0.5]
"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
if self.normalized:
return (index.day - 1) / 30.0 - 0.5 # day: 1~31
else:
return index.day.map(float)
class DayOfYear(TimeFeature):
"""
Day of year encoded as value between [-0.5, 0.5]
"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
if self.normalized:
return (index.dayofyear - 1) / 364.0 - 0.5 # dayofyear: 1~365
else:
return index.dayofyear.map(float)
class MonthOfYear(TimeFeature):
"""
Month of year encoded as value between [-0.5, 0.5]
"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
if self.normalized:
return (index.month - 1) / 11.0 - 0.5 # month: 1~12
else:
return index.month.map(float)
class Year(TimeFeature):
"""
year encoded as value between [-0.5, 0.5]
"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
if self.normalized:
return (index.year - 2014) / 5.0 #TODO
else:
return index.year.map(float)
class WeekOfYear(TimeFeature):
"""
Week of year encoded as value between [-0.5, 0.5]
"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
if self.normalized:
return (index.weekofyear -1) / 51.0 - 0.5 # weekofyear: 1~52
else:
return index.weekofyear.map(float)
class FourierDateFeatures(TimeFeature):
@validated()
def __init__(self, freq: str) -> None:
super().__init__()
# reoccurring freq
freqs = [
"month",
"day",
"hour",
"minute",
"weekofyear",
"weekday",
"dayofweek",
"dayofyear",
"daysinmonth",
]
assert freq in freqs
self.freq = freq
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
values = getattr(index, self.freq)
num_values = max(values) + 1
steps = [x * 2.0 * np.pi / num_values for x in values]
return np.vstack([np.cos(steps), np.sin(steps)])
def time_features_from_frequency_str(freq_str: str) -> List[TimeFeature]:
"""
Returns a list of time features that will be appropriate for the given frequency string.
Parameters
----------
freq_str
Frequency string of the form [multiple][granularity] such as "12H", "5min", "1D" etc.
"""
_, granularity = get_granularity(freq_str)
if granularity == "M":
feature_classes = [MonthOfYear]
elif granularity == "W":
feature_classes = [DayOfMonth, WeekOfYear]
elif granularity in ["D", "B"]:
feature_classes = [DayOfWeek, DayOfMonth, DayOfYear]
elif granularity == "H":
feature_classes = [HourOfDay, DayOfWeek, DayOfMonth, DayOfYear]
elif granularity in ["min", "T"]:
feature_classes = [MinuteOfHour, HourOfDay, DayOfWeek, DayOfMonth, DayOfYear]
else:
supported_freq_msg = f"""
Unsupported frequency {freq_str}
The following frequencies are supported:
M - monthly
W - week
D - daily
H - hourly
min - minutely
"""
raise RuntimeError(supported_freq_msg)
return [cls() for cls in feature_classes]
def fourier_time_features_from_frequency_str(freq_str: str) -> List[TimeFeature]:
offset = to_offset(freq_str)
granularity = offset.name
features = {
"M": ["weekofyear"],
"W-SUN": ["daysinmonth", "weekofyear"],
"W-MON": ["daysinmonth", "weekofyear"],
"D": ["dayofweek"],
"B": ["dayofweek", "dayofyear"],
"H": ["hour", "dayofweek"],
"min": ["minute", "hour", "dayofweek"],
"T": ["minute", "hour", "dayofweek"],
}
assert granularity in features, f"freq {granularity} not supported"
feature_classes: List[TimeFeature] = [
FourierDateFeatures(freq=freq) for freq in features[granularity]
]
return feature_classes
| 5,559 | 26.389163 | 93 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/modules/scaler.py | from abc import ABC, abstractmethod
from typing import Tuple
import torch
import torch.nn as nn
class Scaler(ABC, nn.Module):
def __init__(self, keepdim: bool = False):
super().__init__()
self.keepdim = keepdim
@abstractmethod
def compute_scale(
self, data: torch.Tensor, observed_indicator: torch.Tensor
) -> torch.Tensor:
pass
def forward(
self, data: torch.Tensor, observed_indicator: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Parameters
----------
data
tensor of shape (N, T, C) containing the data to be scaled
observed_indicator
observed_indicator: binary tensor with the same shape as
``data``, that has 1 in correspondence of observed data points,
and 0 in correspondence of missing data points.
Returns
-------
Tensor
Tensor containing the "scaled" data, shape: (N, T, C).
Tensor
Tensor containing the scale, of shape (N, C) if ``keepdim == False``, and shape
(N, 1, C) if ``keepdim == True``.
"""
scale = self.compute_scale(data, observed_indicator)
if self.keepdim:
scale = scale.unsqueeze(1)
return data / scale, scale
else:
return data / scale.unsqueeze(1), scale
class MeanScaler(Scaler):
"""
The ``MeanScaler`` computes a per-item scale according to the average
absolute value over time of each item. The average is computed only among
the observed values in the data tensor, as indicated by the second
argument. Items with no observed data are assigned a scale based on the
global average.
Parameters
----------
minimum_scale
default scale that is used if the time series has only zeros.
"""
def __init__(self, minimum_scale: float = 1e-10, *args, **kwargs):
super().__init__(*args, **kwargs)
self.register_buffer("minimum_scale", torch.tensor(minimum_scale))
def compute_scale(
self, data: torch.Tensor, observed_indicator: torch.Tensor
) -> torch.Tensor:
# these will have shape (N, C)
num_observed = observed_indicator.sum(dim=1)
sum_observed = (data.abs() * observed_indicator).sum(dim=1)
# first compute a global scale per-dimension
total_observed = num_observed.sum(dim=0)
denominator = torch.max(total_observed, torch.ones_like(total_observed))
default_scale = sum_observed.sum(dim=0) / denominator
# then compute a per-item, per-dimension scale
denominator = torch.max(num_observed, torch.ones_like(num_observed))
scale = sum_observed / denominator
# use per-batch scale when no element is observed
# or when the sequence contains only zeros
scale = torch.where(
sum_observed > torch.zeros_like(sum_observed),
scale,
default_scale * torch.ones_like(num_observed),
)
return torch.max(scale, self.minimum_scale).detach()
class NOPScaler(Scaler):
"""
The ``NOPScaler`` assigns a scale equals to 1 to each input item, i.e.,
no scaling is applied upon calling the ``NOPScaler``.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def compute_scale(
self, data: torch.Tensor, observed_indicator: torch.Tensor
) -> torch.Tensor:
return torch.ones_like(data).mean(dim=1)
| 3,528 | 31.376147 | 91 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/modules/lambda_layer.py | import torch.nn as nn
class LambdaLayer(nn.Module):
def __init__(self, function):
super().__init__()
self._func = function
def forward(self, x, *args):
return self._func(x, *args)
| 215 | 18.636364 | 35 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/modules/distribution_output.py | from abc import ABC, abstractmethod
from typing import Callable, Dict, Optional, Tuple
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import (
Distribution,
Beta,
NegativeBinomial,
StudentT,
Normal,
Independent,
LowRankMultivariateNormal,
MultivariateNormal,
TransformedDistribution,
AffineTransform,
Poisson
)
from pts.core.component import validated
from .lambda_layer import LambdaLayer
from .distribution import ConstantDistribution, Tweedie
class ArgProj(nn.Module):
def __init__(
self,
in_features: int,
args_dim: Dict[str, int],
domain_map: Callable[..., Tuple[torch.Tensor]],
dtype: np.dtype = np.float32,
prefix: Optional[str] = None,
**kwargs,
):
super().__init__(**kwargs)
self.args_dim = args_dim
self.dtype = dtype
self.projection = (in_features != 0)
if self.projection:
self.proj = nn.ModuleList(
[nn.Linear(in_features, dim) for dim in args_dim.values()]
)
self.domain_map = domain_map
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor]:
if self.projection:
params_unbounded = [proj(x) for proj in self.proj]
else:
params_unbounded = [x]
return self.domain_map(*params_unbounded)
class Output(ABC):
in_features: int
args_dim: Dict[str, int]
_dtype: np.dtype = np.float32
@property
def dtype(self):
return self._dtype
@dtype.setter
def dtype(self, dtype: np.dtype):
self._dtype = dtype
def get_args_proj(self, in_features: int, prefix: Optional[str] = None) -> ArgProj:
return ArgProj(
in_features=in_features,
args_dim=self.args_dim,
domain_map=LambdaLayer(self.domain_map),
prefix=prefix,
dtype=self.dtype,
)
@abstractmethod
def domain_map(self, *args: torch.Tensor):
pass
class DistributionOutput(Output, ABC):
distr_cls: type
@validated()
def __init__(self) -> None:
pass
def distribution(
self, distr_args, scale: Optional[torch.Tensor] = None
) -> Distribution:
if scale is None:
return self.distr_cls(*distr_args)
else:
distr = self.distr_cls(*distr_args)
return TransformedDistribution(distr, [AffineTransform(loc=0, scale=scale)])
class NormalOutput(DistributionOutput):
args_dim: Dict[str, int] = {"loc": 1, "scale": 1}
distr_cls: type = Normal
@classmethod
def domain_map(self, loc, scale):
scale = F.softplus(scale)
return loc.squeeze(-1), scale.squeeze(-1)
@property
def event_shape(self) -> Tuple:
return ()
class BetaOutput(DistributionOutput):
args_dim: Dict[str, int] = {"concentration1": 1, "concentration0": 1}
distr_cls: type = Beta
@classmethod
def domain_map(cls, concentration1, concentration0):
concentration1 = F.softplus(concentration1) + 1e-8
concentration0 = F.softplus(concentration0) + 1e-8
return concentration1.squeeze(-1), concentration0.squeeze(-1)
@property
def event_shape(self) -> Tuple:
return ()
class TweedieOutput(DistributionOutput):
args_dim: Dict[str, int] = {"log_mu": 1, "rho": 1} #, "dispersion": 1} TODO: add dispersion
@validated()
def __init__(self, tweedie_power=1.5) -> None:
# rho : tweedie_variance_power (1 ~ 2)
self.tweedie_power = tweedie_power
def domain_map(self, log_mu, rho):
rho = self.tweedie_power * torch.ones_like(log_mu)
return log_mu.squeeze(-1), rho.squeeze(-1)
def distribution(
self, distr_args, scale: Optional[torch.Tensor] = None
) -> Distribution:
log_mu, rho = distr_args
if scale is not None:
log_mu += torch.log(scale)
# TODO : rho scaling
return Tweedie(log_mu, rho)
@property
def event_shape(self) -> Tuple:
return ()
class NegativeBinomialOutput(DistributionOutput):
args_dim: Dict[str, int] = {"mu": 1, "alpha": 1}
@classmethod
def domain_map(cls, mu, alpha):
mu = F.softplus(mu) + 1e-8
alpha = F.softplus(alpha) + 1e-8 # alpha = 1/r
return mu.squeeze(-1), alpha.squeeze(-1)
def distribution(
self, distr_args, scale: Optional[torch.Tensor] = None
) -> Distribution:
mu, alpha = distr_args
if scale is not None:
mu *= scale
alpha /= scale # FIXME: wrong calculation
#alpha += (scale - 1) / mu # TODO: if scale < 1, alpha can be negative
#alpha = alpha.clamp(min=1e-8)
r = 1.0 / alpha
p = mu * alpha / (1.0 + mu * alpha) # p = mu / (r+mu)
return NegativeBinomial(total_count=r, probs=p)
@property
def event_shape(self) -> Tuple:
return ()
class StudentTOutput(DistributionOutput):
args_dim: Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
distr_cls: type = StudentT
@classmethod
def domain_map(cls, df, loc, scale):
scale = F.softplus(scale)
df = 2.0 + F.softplus(df)
return df.squeeze(-1), loc.squeeze(-1), scale.squeeze(-1)
@property
def event_shape(self) -> Tuple:
return ()
class LowRankMultivariateNormalOutput(DistributionOutput):
def __init__(
self, dim: int, rank: int, sigma_init: float = 1.0, sigma_minimum: float = 1e-3,
) -> None:
self.distr_cls = LowRankMultivariateNormal
self.dim = dim
self.rank = rank
self.sigma_init = sigma_init
self.sigma_minimum = sigma_minimum
self.args_dim = {"loc": dim, "cov_factor": dim * rank, "cov_diag": dim}
def domain_map(self, loc, cov_factor, cov_diag):
diag_bias = (
self.inv_softplus(self.sigma_init ** 2) if self.sigma_init > 0.0 else 0.0
)
shape = cov_factor.shape[:-1] + (self.dim, self.rank)
cov_factor = cov_factor.reshape(shape)
cov_diag = F.softplus(cov_diag + diag_bias) + self.sigma_minimum ** 2
return loc, cov_factor, cov_diag
def inv_softplus(self, y):
if y < 20.0:
return np.log(np.exp(y) - 1.0)
else:
return y
@property
def event_shape(self) -> Tuple:
return (self.dim,)
class IndependentNormalOutput(DistributionOutput):
def __init__(self, dim: int) -> None:
self.dim = dim
self.args_dim = {"loc": self.dim, "scale": self.dim}
def domain_map(self, loc, scale):
return loc, F.softplus(scale)
@property
def event_shape(self) -> Tuple:
return (self.dim,)
def distribution(
self, distr_args, scale: Optional[torch.Tensor] = None
) -> Distribution:
distr = Independent(Normal(*distr_args), 1)
if scale is None:
return distr
else:
return TransformedDistribution(distr, [AffineTransform(loc=0, scale=scale)])
class MultivariateNormalOutput(DistributionOutput):
def __init__(self, dim: int) -> None:
self.args_dim = {"loc": dim, "scale_tril": dim * dim}
self.dim = dim
def domain_map(self, loc, scale):
d = self.dim
device = scale.device
shape = scale.shape[:-1] + (d, d)
scale = scale.reshape(shape)
scale_diag = F.softplus(scale * torch.eye(d, device=device)) * torch.eye(
d, device=device
)
mask = torch.tril(torch.ones_like(scale), diagonal=-1)
scale_tril = (scale * mask) + scale_diag
return loc, scale_tril
def distribution(
self, distr_args, scale: Optional[torch.Tensor] = None
) -> Distribution:
loc, scale_tri = distr_args
distr = MultivariateNormal(loc=loc, scale_tril=scale_tri)
if scale is None:
return distr
else:
return TransformedDistribution(distr, [AffineTransform(loc=0, scale=scale)])
@property
def event_shape(self) -> Tuple:
return (self.dim,)
class FlowOutput(DistributionOutput):
def __init__(self, flow, input_size, cond_size):
self.args_dim = {"cond": cond_size}
self.flow = flow
self.dim = input_size
def domain_map(self, cond):
return (cond,)
def distribution(self, distr_args, scale=None):
cond, = distr_args
if scale is not None:
self.flow.scale = scale
self.flow.cond = cond
return self.flow
@property
def event_shape(self) -> Tuple:
return (self.dim,)
| 8,743 | 25.904615 | 95 | py |
M5_Accuracy_3rd | M5_Accuracy_3rd-master/pts/modules/__init__.py | from .distribution_output import (
ArgProj,
Output,
DistributionOutput,
NormalOutput,
StudentTOutput,
BetaOutput,
NegativeBinomialOutput,
IndependentNormalOutput,
LowRankMultivariateNormalOutput,
MultivariateNormalOutput,
FlowOutput,
TweedieOutput
)
from .feature import FeatureEmbedder, FeatureAssembler
from .flows import RealNVP, MAF
from .lambda_layer import LambdaLayer
from .scaler import MeanScaler, NOPScaler
| 465 | 23.526316 | 54 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.