repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
toulbar2 | toulbar2-master/misc/script/wcsp2lp-support.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import copy
import os
import sys
import itertools
import numpy
assert len(sys.argv) == 3, "Please specify INPUT and OUTPUT filenames."
# does the WCSP have one or more constant terms
has_constant_term = False
# classe pour écriture à largeur de texte contrôlée
class WidthFile(file):
maxcol = 80
def __init__(self, *x, **k):
file.__init__(self, *x, **k)
self.col = 0
def write(self, x):
lines = x.splitlines()
#print "outputting", lines
if (self.col + len(lines[0])) >= 80:
file.write(self, "\n")
self.col = 0
map(lambda x: file.write(self, x + '\n'), lines[:-1])
file.write(self, lines[-1])
if len(lines) > 1:
self.col = len(lines[-1])
else:
self.col += len(lines[-1])
# le nom des variables pour l'encodage des valeurs des domaines. les
# variables booléennes restent booléennes mais il faut compter les
# littéraux négatifs par ailleurs (format lp ne gère pas (1-x))
def domain_var(n, v):
return " d%i_%i " % (n, v)
def mdomain_var(coeff, n, v):
if (v == 1) and (domains[n] == 2):
return "%+i d%i_0 " % (-coeff,n)
else :
return "%+i d%i_%i " % (coeff, n, v)
# le nom des variables pour l'encodage des autres tuples
def tuple_var(tvar,tval):
tvarval = map(lambda var,val: (var,val),tvar,tval)
# normalize tuple
st = sorted(tvarval, key=lambda x: x[0])
name = "t"
for x in st:
name = name + ("_%i_%i" % x)
return name
#le produit cartésien des séquences (stockées dans une séquence vlist).
def product(vlist):
return apply(itertools.product,vlist)
#enumerate all "tuples" on tvar (for var, if it appears in tvar, a
#single value val is used instead of thh full domain) generating the
#set of support tuples.
def enum_tuples(tvar, var, val):
return product(map(lambda ovar: [val] if (var == ovar) else xrange(domains[ovar]), tvar))
# reading numbers
def read_num_vec(toks):
return map(int, toks)
def read_int_tok(tok_iter):
return int(tok_iter(1)[0])
# lire une définition de cost function. The cost table is a tuple based dictionary
def read_fun(tok_iter):
n_var = read_int_tok(tok_iter)
vars_ = read_num_vec(tok_iter(n_var))
defcost = read_int_tok(tok_iter)
if defcost == -1:
defcost = tok_iter(1)[0]
n_spec = 1
else:
n_spec = read_int_tok(tok_iter)
tvo = sorted(map(lambda var,val: (var,val),vars_,range(len(vars_))),key=lambda x: x[0])
ovars = tuple(x[0] for x in tvo)
varorder = tuple(x[1] for x in tvo)
specs = dict()
for i in xrange(n_spec):
if defcost!='knapsackp':
tc = read_num_vec(tok_iter(n_var + 1))
if isinstance(defcost, basestring):
specs = tc
else:
specs[tuple(tc[i] for i in varorder)] = tc[-1]
else :
Weight=[]
Weight.append(read_int_tok(tok_iter))
for j in range(n_var):
nbval=read_int_tok(tok_iter)
Weight.append(nbval)
for k in range(nbval):
Weight.append(read_int_tok(tok_iter))
Weight.append(read_int_tok(tok_iter))
specs=Weight
if isinstance(defcost, basestring):
return vars_, defcost, specs
else:
return ovars, defcost, specs
# parcourir une cost function table
def iter_fun(vars_, defcost, specs):
vardom = [xrange(domains[v]) for v in vars_]
for t in itertools.product(*vardom):
if t in specs:
yield t, specs[t]
else:
yield t, defcost
# parcourir une cost function table en évitant les tuples d'un coût
# donné si possible (defcost)
def iter_funavoid(vars_, defcost, specs, avoid):
if (defcost == avoid):
for t in specs:
yield t, specs[t]
else:
vardom = [xrange(domains[v]) for v in vars_]
for t in itertools.product(*vardom):
if t in specs:
yield t, specs[t]
else:
yield t, defcost
# ------------- MAIN ---------------------
def token_iter(filename):
for l in open(filename).xreadlines():
for stok in l.strip().split(" "):
for ttok in stok.strip().split("\t"):
if ttok:
yield ttok
tokens = token_iter(sys.argv[1])
def next_tokens(n):
return [tokens.next() for i in xrange(n)]
#line_iter = open(sys.argv[1]).xreadlines()
output = WidthFile(sys.argv[2], 'w')
print "File %s opened" % sys.argv[1]
# reading parameters
#params = (line_iter.next().strip().split(" "))
name = tokens.next()
n_var, max_domain_size, n_fun, upper_bound = read_num_vec(next_tokens(4))
domains = read_num_vec(next_tokens(n_var))
n_fun = int(n_fun)
ub = int(upper_bound)
print >> output, "Minimize"
all_fun = [read_fun(next_tokens) for i in xrange(n_fun)]
print "\nCost functions read."
# Output the criteria. Do not integrate zero or "infinite" cost
# components here. Zero is useless, "infinite" will be handled as
# linear constraints
negative_litterals = 0
for vars_, defcost, specs in all_fun:
if isinstance(defcost, basestring):
continue
n_vars = len(vars_)
if (n_vars == 0):
has_constant_term = 1
output.write(' +%i t ' % defcost)
else:
for t, cost in iter_funavoid(vars_, defcost, specs,0):
if cost == 0 or cost >= ub:
continue
if n_vars == 1:
output.write(mdomain_var(cost,vars_[0], t[0]))
if (t[0] == 1 and domains[vars_[0]] <= 2):
negative_litterals = negative_litterals + cost
else:
output.write(' +%i %s ' % (cost, tuple_var(vars_, t)))
if negative_litterals:
has_constant_term = 1
output.write(" +%i t" % negative_litterals)
print "Criteria generated."
output.write("\n\nSubject to\n\n")
# Set of tuple vars that need not be used
ub_tuplevars = set()
# Hard constraints: for every value with cost >=ub, we forbid it
# explicitely. Tuples variables are just removed.
for vars_, defcost, specs in all_fun:
if isinstance(defcost, basestring):
if defcost == 'knapsack':
for i, v in enumerate(vars_):
if int(specs[i+1]>=0):
output.write('+%i d%i_0' % (specs[i+1], v))
else:
output.write('%i d%i_0' % (specs[i+1], v))
output.write(' <= %i\n\n' % (sum(specs) - 2*specs[0],))
elif defcost== 'knapsackp':
last=1
tot=0
for i, v in enumerate(vars_):
nbval=specs[last]
if(domains[v]==2):
val1=0
val0=0
for j in range(nbval):
if int(specs[last+1+2*j])==1:
val1=int(specs[last+2+2*j])
tot+=val1
else:
val0=int(specs[last+2+2*j])
if val0-val1>=0:
output.write('+%i d%i_0' % (val0-val1, v))
else:
output.write('%i d%i_0' % (val0-val1, v))
else:
for j in range(nbval):
if int(specs[last+2+2*j]>=0):
output.write('+%i d%i_%i' % (specs[last+2+2*j], v,specs[last+1+2*j]))
else:
output.write('%i d%i_%i' % (specs[last+2+2*j], v,specs[last+1+2*j]))
last=last+nbval*2+1
output.write(' >= %i\n\n' % (specs[0]-tot,))
continue
n_vars = len(vars_)
for t, cost in iter_funavoid(vars_, defcost, specs,0):
if cost < ub:
continue
if n_vars == 1:
output.write('%s = %i\n\n' % (mdomain_var(1,vars_[0], t[0]), -(domains[vars_[0]] == 2 and t[0] == 1)))
else:
ub_tuplevars.add(tuple_var(vars_, t))
print "Hard constraints generated."
# Direct encoding. Exactly one value constraint. Boolean variables are not included here.
for i, dom in enumerate(domains):
if (dom > 2) :
map(lambda v: output.write(mdomain_var(1, i, v)), xrange(dom))
output.write(" = 1\n\n")
if (dom == 1) :
output.write("%s = 1\n\n" % domain_var(i,0))
print "Domain constraints generated."
# marginal consistency: one value selected iff one associated tuple selected.
# if several functions have the same cost, we need only to do it once
scopes = set(vars_ for vars_, defcost, specs in (f for f in all_fun if len(f[0]) >= 2 and not isinstance(f[1], basestring)))
print "%i different scopes detected." % len(scopes)
for vars_ in scopes:
for va in vars_:
for a in xrange(domains[va]):
map(lambda b: output.write("+1 %s " % tuple_var(vars_, b)) if tuple_var(vars_, b) not in ub_tuplevars else 0, enum_tuples(vars_,va,a))
output.write(" %s " % mdomain_var(-1, va, a))
output.write("= %i\n\n" % (domains[va] == 2 and a == 1))
print "Marginal consistency constraints generated."
if has_constant_term :
output.write("t = 1\n")
print "Tuple bounds generated."
output.write("\n\nBinary\n\n")
# indicate 0/1 variables (direct encoding).
for i, dom in enumerate(domains):
if (dom > 2) :
map(lambda v: output.write("%s " % domain_var(i, v)), xrange(dom))
else:
output.write("%s " % domain_var(i, 0))
for vars_, defcost, specs in (f for f in all_fun if len(f[0]) >= 2):
if isinstance(defcost, basestring):
continue
map(lambda b: output.write("%s " % tuple_var(vars_, b)) if tuple_var(vars_, b) not in ub_tuplevars else 0, enum_tuples(vars_,-1,-1))
if has_constant_term :
output.write("t")
output.write("\n\nEnd")
print "Domain binaries generated."
print "Finished." | 9,939 | 31.913907 | 146 | py |
toulbar2 | toulbar2-master/validation/bilevel/bilevel_mibs2.py |
import pytoulbar2 as tb2
cfn = tb2.CFN(ubinit = 1000, verbose = 0)
cfn.NoPreprocessing()
cfn.Option.btdMode = 1
cfn.Option.hbfs = 0
# create restricted leader problem
cfn.Option.bilevel = 1
cfn.AddVariable('x0',range(2))
cfn.AddVariable('x1',range(2))
cfn.AddVariable('x2',range(2))
cfn.AddLinearConstraint([7,5,2],['x0','x1','x2'],'<=',9)
cfn.Option.initialLbBLP = cfn.Option.initialLbBLP + [cfn.CFN.wcsp.getLb()]
cfn.CFN.wcsp.setLb(0)
cfn.Option.negCostBLP = cfn.Option.negCostBLP + [cfn.CFN.wcsp.getNegativeLb()]
cfn.CFN.wcsp.decreaseLb(-cfn.CFN.wcsp.getNegativeLb())
# create follower problem
cfn.Option.bilevel = 2
cfn.AddVariable('C0',range(4))
cfn.AddVariable('C1',range(3))
cfn.AddVariable('C2',range(5))
cfn.AddFunction(['C0','C1','C2'], [(0 if (11 * v0 + 4 * v1 + 6 * v2 <= 50) else 1000000) for v0 in range(4) for v1 in range(3) for v2 in range(5)])
cfn.AddFunction(['x0','C0'], [(0 if v0 <= 3*(1-x0) else 1000000) for x0 in range(2) for v0 in range(4)])
cfn.AddFunction(['x1','C1'], [(0 if v1 <= 2*(1-x1) else 1000000) for x1 in range(2) for v1 in range(3)])
cfn.AddFunction(['x2','C2'], [(0 if v2 <= 4*(1-x2) else 1000000) for x2 in range(2) for v2 in range(5)])
cfn.AddFunction(['C0'], [-8 * v0 for v0 in range(4)])
cfn.AddFunction(['C1'], [-12 * v1 for v1 in range(3)])
cfn.AddFunction(['C2'], [-3 * v2 for v2 in range(5)])
cfn.Option.initialLbBLP = cfn.Option.initialLbBLP + [cfn.CFN.wcsp.getLb()]
cfn.CFN.wcsp.setLb(0)
cfn.Option.negCostBLP = cfn.Option.negCostBLP + [cfn.CFN.wcsp.getNegativeLb()]
cfn.CFN.wcsp.decreaseLb(-cfn.CFN.wcsp.getNegativeLb())
# create negative form of follower problem
cfn.Option.bilevel = 3
cfn.AddVariable('C0neg',range(4))
cfn.AddVariable('C1neg',range(3))
cfn.AddVariable('C2neg',range(5))
cfn.AddFunction(['C0neg','C1neg','C2neg'], [(8 * v0 + 12 * v1 + 3 * v2 if (11 * v0 + 4 * v1 + 6 * v2 <= 50) else 1000000) for v0 in range(4) for v1 in range(3) for v2 in range(5)])
cfn.AddFunction(['x0','C0neg'], [(0 if v0 <= 3*(1-x0) else 1000000) for x0 in range(2) for v0 in range(4)])
cfn.AddFunction(['x1','C1neg'], [(0 if v1 <= 2*(1-x1) else 1000000) for x1 in range(2) for v1 in range(3)])
cfn.AddFunction(['x2','C2neg'], [(0 if v2 <= 4*(1-x2) else 1000000) for x2 in range(2) for v2 in range(5)])
cfn.Option.initialLbBLP = cfn.Option.initialLbBLP + [cfn.CFN.wcsp.getLb()]
cfn.CFN.wcsp.setLb(0)
cfn.Option.negCostBLP = cfn.Option.negCostBLP + [cfn.CFN.wcsp.getNegativeLb()]
cfn.CFN.wcsp.decreaseLb(-cfn.CFN.wcsp.getNegativeLb())
cfn.Option.bilevel = 4
cfn.Option.decimalPointBLP = [0,0,0]
cfn.Option.costMultiplierBLP = [1.,1.,-1.]
cfn.Option.initialUbBLP = [tb2.tb2.MAX_COST,tb2.tb2.MAX_COST,tb2.tb2.MAX_COST]
print(cfn.Option.negCostBLP)
print(cfn.Option.initialLbBLP)
cfn.CFN.wcsp.setLb(cfn.Option.initialLbBLP[0] + cfn.Option.initialLbBLP[2])
cfn.CFN.wcsp.decreaseLb(cfn.Option.negCostBLP[0] + cfn.Option.negCostBLP[2])
cfn.Option.setVarOrder('0 -1 0 1 2\n1 0 0 1 2\n2 0 0 1 2 3 4 5\n3 0 0 1 2 6 7 8\n')
cfn.Solve(showSolutions=3)
| 2,994 | 43.044118 | 181 | py |
toulbar2 | toulbar2-master/validation/bilevel/bilevel_mibs0.py |
import pytoulbar2 as tb2
cfn = tb2.CFN(ubinit = 1000, verbose = 0)
cfn.NoPreprocessing()
cfn.Option.btdMode = 1
cfn.Option.hbfs = 0
# create restricted leader problem
cfn.Option.bilevel = 1
cfn.AddVariable('x',range(9))
cfn.AddFunction(['x'],[-vx for vx in range(9)])
cfn.Option.initialLbBLP = cfn.Option.initialLbBLP + [cfn.CFN.wcsp.getLb()]
cfn.CFN.wcsp.setLb(0)
cfn.Option.negCostBLP = cfn.Option.negCostBLP + [cfn.CFN.wcsp.getNegativeLb()]
cfn.CFN.wcsp.decreaseLb(-cfn.CFN.wcsp.getNegativeLb())
# create follower problem
cfn.Option.bilevel = 2
cfn.AddVariable('y',range(6))
cfn.AddFunction(['x','y'], [(10 * vy if ((-25 * vx + 20 * vy <= 30) and (1 * vx + 2 * vy <= 10) and (2 * vx - 1 * vy <= 15) and (2 * vx + 10 * vy >= 15)) else 1000000) for vx in range(9) for vy in range(6)])
cfn.Option.initialLbBLP = cfn.Option.initialLbBLP + [cfn.CFN.wcsp.getLb()]
cfn.CFN.wcsp.setLb(0)
cfn.Option.negCostBLP = cfn.Option.negCostBLP + [cfn.CFN.wcsp.getNegativeLb()]
cfn.CFN.wcsp.decreaseLb(-cfn.CFN.wcsp.getNegativeLb())
# create negative form of follower problem
cfn.Option.bilevel = 3
cfn.AddVariable('yneg',range(6))
cfn.AddFunction(['x','yneg'], [(-10 * vy if ((-25 * vx + 20 * vy <= 30) and (1 * vx + 2 * vy <= 10) and (2 * vx - 1 * vy <= 15) and (2 * vx + 10 * vy >= 15)) else 1000000) for vx in range(9) for vy in range(6)])
cfn.Option.initialLbBLP = cfn.Option.initialLbBLP + [cfn.CFN.wcsp.getLb()]
cfn.CFN.wcsp.setLb(0)
cfn.Option.negCostBLP = cfn.Option.negCostBLP + [cfn.CFN.wcsp.getNegativeLb()]
cfn.CFN.wcsp.decreaseLb(-cfn.CFN.wcsp.getNegativeLb())
cfn.Option.bilevel = 4
cfn.Option.decimalPointBLP = [0,0,0]
cfn.Option.costMultiplierBLP = [1.,1.,-1.]
cfn.Option.initialUbBLP = [tb2.tb2.MAX_COST,tb2.tb2.MAX_COST,tb2.tb2.MAX_COST]
print(cfn.Option.negCostBLP)
print(cfn.Option.initialLbBLP)
cfn.CFN.wcsp.setLb(cfn.Option.initialLbBLP[0] + cfn.Option.initialLbBLP[2])
cfn.CFN.wcsp.decreaseLb(cfn.Option.negCostBLP[0] + cfn.Option.negCostBLP[2])
cfn.Option.setVarOrder('0 -1 0\n1 0 0\n2 0 0 1\n3 0 0 2\n')
cfn.Solve(showSolutions=3)
| 2,055 | 37.792453 | 211 | py |
toulbar2 | toulbar2-master/validation/bilevel/bilevel_mibs1.py |
import pytoulbar2 as tb2
cfn = tb2.CFN(ubinit = 1000, verbose = 0)
cfn.NoPreprocessing()
cfn.Option.btdMode = 1
cfn.Option.hbfs = 0
# create restricted leader problem
cfn.Option.bilevel = 1
cfn.AddVariable('C0',range(11))
cfn.AddFunction(['C0'],[-v for v in range(11)])
cfn.Option.initialLbBLP = cfn.Option.initialLbBLP + [cfn.CFN.wcsp.getLb()]
cfn.CFN.wcsp.setLb(0)
cfn.Option.negCostBLP = cfn.Option.negCostBLP + [cfn.CFN.wcsp.getNegativeLb()]
cfn.CFN.wcsp.decreaseLb(-cfn.CFN.wcsp.getNegativeLb())
# create follower problem
cfn.Option.bilevel = 2
cfn.AddVariable('C1',range(6))
cfn.AddFunction(['C0','C1'], [(7 * v1 if ((-3 * v0 + 2 * v1 <= 12) and (1 * v0 + 2 * v1 <= 20) and (2 * v0 - 1 * v1 <= 7) and (-2 * v0 + 4 * v1 <= 16)) else 1000000) for v0 in range(11) for v1 in range(6)]) # all cost functions and constraints on the same scope must be merged
cfn.Option.initialLbBLP = cfn.Option.initialLbBLP + [cfn.CFN.wcsp.getLb()]
cfn.CFN.wcsp.setLb(0)
cfn.Option.negCostBLP = cfn.Option.negCostBLP + [cfn.CFN.wcsp.getNegativeLb()]
cfn.CFN.wcsp.decreaseLb(-cfn.CFN.wcsp.getNegativeLb())
# create negative form of follower problem
cfn.Option.bilevel = 3
cfn.AddVariable('C1neg',range(6))
cfn.AddFunction(['C0','C1neg'], [(-7 * v1 if ((-3 * v0 + 2 * v1 <= 12) and (1 * v0 + 2 * v1 <= 20) and (2 * v0 - 1 * v1 <= 7) and (-2 * v0 + 4 * v1 <= 16)) else 1000000) for v0 in range(11) for v1 in range(6)]) # all cost functions and constraints on the same scope must be merged
cfn.Option.initialLbBLP = cfn.Option.initialLbBLP + [cfn.CFN.wcsp.getLb()]
cfn.CFN.wcsp.setLb(0)
cfn.Option.negCostBLP = cfn.Option.negCostBLP + [cfn.CFN.wcsp.getNegativeLb()]
cfn.CFN.wcsp.decreaseLb(-cfn.CFN.wcsp.getNegativeLb())
cfn.Option.bilevel = 4
cfn.Option.decimalPointBLP = [0,0,0]
cfn.Option.costMultiplierBLP = [1.,1.,-1.]
cfn.Option.initialUbBLP = [tb2.tb2.MAX_COST,tb2.tb2.MAX_COST,tb2.tb2.MAX_COST]
print(cfn.Option.negCostBLP)
print(cfn.Option.initialLbBLP)
cfn.CFN.wcsp.setLb(cfn.Option.initialLbBLP[0] + cfn.Option.initialLbBLP[2])
cfn.CFN.wcsp.decreaseLb(cfn.Option.negCostBLP[0] + cfn.Option.negCostBLP[2])
cfn.Option.setVarOrder('0 -1 0\n1 0 0\n2 0 0 1\n3 0 0 2\n')
cfn.Solve(showSolutions=3)
| 2,196 | 40.45283 | 280 | py |
toulbar2 | toulbar2-master/validation/default/weightedcspconstraint.py | VERBOSE=0
PROBLEM1="../../validation/bilevel/bilevel1b.cfn"
PROBLEM2="../../validation/bilevel/bilevel2.cfn"
LB=11
UB=20
import pytoulbar2 as tb2
cfn1 = tb2.CFN(verbose = VERBOSE)
cfn1.Read(PROBLEM1)
cfn2 = tb2.CFN(verbose = VERBOSE)
cfn2.Read(PROBLEM2)
cfn1.AddWeightedCSPConstraint(cfn2, LB, UB, True)
cfn1.Solve(showSolutions=3, allSolutions=100) # find 18 solutions
| 375 | 19.888889 | 65 | py |
toulbar2 | toulbar2-master/validation/default/clique.py |
import pytoulbar2 as tb2
m = tb2.CFN(1, verbose=0)
w=m.AddVariable('w', range(2))
x=m.AddVariable('x', range(2))
y=m.AddVariable('y', range(2))
z=m.AddVariable('z', range(2))
m.CFN.wcsp.postCliqueConstraint([x,y,z,w],'1 1 1 1 1 1 1 1 1')
for u in [w,x,y,z]:
for v in [w,x,y,z]:
if u<v:
m.AddFunction([u,v],[0, 0, 0, 1000])
m.Solve(showSolutions=1, allSolutions=16)
| 377 | 18.894737 | 62 | py |
toulbar2 | toulbar2-master/validation/default/sregular.py |
import pytoulbar2 as tb2
m = tb2.CFN(12, verbose=0)
v1=m.AddVariable('v1', range(2))
v2=m.AddVariable('v2', range(2))
v3=m.AddVariable('v3', range(2))
v4=m.AddVariable('v4', range(2))
m.AddFunction([v1], [2, 0])
m.AddFunction([v4], [0, 3])
m.CFN.wcsp.postWRegular([v1,v2,v3,v4],'var','DAG', 12, 2, [tb2.tb2.WeightedObjInt(0,0)], [tb2.tb2.WeightedObjInt(0,0), tb2.tb2.WeightedObjInt(1,0)], [tb2.tb2.DFATransition(0,0,0,0),tb2.tb2.DFATransition
(0,1,1,0),tb2.tb2.DFATransition(1,1,1,0)])
m.Solve(showSolutions=1, allSolutions=16)
| 534 | 28.722222 | 202 | py |
ns_lattice | ns_lattice-master/ns_lattice/setup.py | '''
Use of this source code is governed by a MIT-style license that can be found in the LICENSE file.
Created on Jan 27, 2017
@author: Niels Lubbes
https://python-packaging.readthedocs.io/en/latest/minimal.html
https://pypi.python.org/pypi?%3Aaction=list_classifiers
'''
from setuptools import setup
setup(
name = 'ns_lattice',
version = '4',
description = 'Algorithms for computing in Neron-Severi lattice',
classifiers = [
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Mathematics',
],
keywords = 'Neron-Severi-lattice',
url = 'http://github.com/niels-lubbes/ns_lattice',
author = 'Niels Lubbes',
license = 'MIT',
package_dir = {'ns_lattice': 'src/ns_lattice'},
packages = ['ns_lattice'],
package_data = {'ns_lattice': ['ns_tools.sobj']},
# include_package_data = True,
install_requires = ['linear_series'],
test_suite = 'nose.collector',
tests_require = ['nose'],
entry_points = {
'console_scripts': ['run-lattice=ns_lattice.__main__:main'],
},
zip_safe = False
)
| 1,250 | 29.512195 | 97 | py |
ns_lattice | ns_lattice-master/ns_lattice/src/tests/test_class_ns_tools.py | '''
Use of this source code is governed by a MIT-style license that can be found in the LICENSE file.
Created on Feb 13, 2017
@author: Niels Lubbes
'''
from ns_lattice.class_ns_tools import NSTools
class TestClassNSTools:
def test__p( self ):
NSTools.filter( None )
assert NSTools.p( 'Hello world!' ) != None
NSTools.filter( ['another_class.py'] )
assert NSTools.p( 'No output since called from another class.' ) == None
NSTools.filter_unset()
assert NSTools.p( 'Filter is disabled so output this string.' ) != None
NSTools.filter_reset()
assert NSTools.p( 'Filter is enabled again so do not output.' ) == None
NSTools.filter( ['test_class_ns_tools.py'] )
assert NSTools.p( 'Only output if called from this class' ) != None
def test__tool_dct( self ):
nt = NSTools()
nt2 = NSTools()
# watch out to not use the default file name
# otherwise it might take long to load the data
test_fname = 'test_tools'
key = 'test__tool_dct'
dct = nt.get_tool_dct( fname = test_fname )
dct[key] = True
nt.save_tool_dct( fname = test_fname )
assert key in nt.get_tool_dct( fname = test_fname )
assert key in nt2.get_tool_dct( fname = test_fname )
nt.set_enable_tool_dct( False )
assert key not in nt.get_tool_dct( fname = test_fname )
assert key not in nt2.get_tool_dct( fname = test_fname )
nt.set_enable_tool_dct( True )
assert key in nt.get_tool_dct( fname = test_fname )
assert key in nt2.get_tool_dct( fname = test_fname )
| 1,654 | 28.035088 | 97 | py |
ns_lattice | ns_lattice-master/ns_lattice/src/tests/test_class_div.py | '''
Use of this source code is governed by a MIT-style license that can be found in the LICENSE file.
Created on Feb 8, 2017
@author: Niels Lubbes
'''
from ns_lattice.sage_interface import sage_ZZ
from ns_lattice.sage_interface import sage_matrix
from ns_lattice.class_div import Div
from ns_lattice.class_ns_tools import NSTools
class TestClassDiv:
def test__new( self ):
assert Div.new( '3e0+e1+5e5-e6' ).e_lst == [3, 1, 0, 0, 0, 5, -1, 0, 0]
assert Div.new( 'e1-e2' ).e_lst == [0, 1, -1, 0, 0, 0, 0, 0, 0]
assert Div.new( '-e1+e2' ).e_lst == [0, -1, 1, 0, 0, 0, 0, 0, 0]
assert Div.new( '-3e0' ).e_lst == [-3, 0, 0, 0, 0, 0, 0, 0, 0]
assert Div.new( '-e3' ).e_lst == [0, 0, 0, -1, 0, 0, 0, 0, 0]
assert Div.new( '12' ).e_lst == [0, 1, -1, 0, 0, 0, 0, 0, 0]
assert Div.new( '-12' ).e_lst == [0, -1, 1, 0, 0, 0, 0, 0, 0]
assert Div.new( '1245' ).e_lst == [1, 0, -1, 0, -1, -1, 0, 0, 0]
assert Div.new( '214' ).e_lst == [2, 0, -1, -1, 0, -1, -1, -1, -1]
assert Div.new( '306' ).e_lst == [3, -1, -1, -1, -1, -1, -2, -1, -1]
assert Div.new( '-308' ).e_lst == [-3, 1, 1, 1, 1, 1, 1, 1, 2]
def test__get_label__True( self ):
assert Div( [3, 1, 0, 0, 0, 5, -1, 0, 0] ).get_label( True ) == '3e0+e1+5e5-e6'
assert Div( [0, 1, -1, 0, 0, 0, 0, 0, 0] ).get_label( True ) == '12'
assert Div( [0, -1, 1, 0, 0, 0, 0, 0, 0] ).get_label( True ) == '-12'
assert Div( [-3, 0, 0, 0, 0, 0, 0, 0, 0] ).get_label( True ) == '-3e0'
assert Div( [0, 0, 0, -1, 0, 0, 0, 0, 0] ).get_label( True ) == '-e3'
assert Div( [0, 1, -1, 0, 0, 0, 0, 0, 0] ).get_label( True ) == '12'
assert Div( [0, -1, 1, 0, 0, 0, 0, 0, 0] ).get_label( True ) == '-12'
assert Div( [1, 0, -1, 0, -1, -1, 0, 0, 0] ).get_label( True ) == '1245'
assert Div( [2, 0, -1, -1, 0, -1, -1, -1, -1] ).get_label( True ) == '214'
assert Div( [3, -1, -1, -1, -1, -1, -2, -1, -1] ).get_label( True ) == '306'
assert Div( [-3, 1, 1, 1, 1, 1, 1, 1, 2] ).get_label( True ) == '-308'
def test__get_abbr_label( self ):
assert Div.new( 'e1' ).get_abbr_label() == 'e1'
assert Div.new( 'e1-e2' ).get_abbr_label() == 'e12'
assert Div.new( '2e0-e1-e2-e4-e5' ).get_abbr_label() == '2e1245'
assert Div.new( 'e0-e1' ).get_abbr_label() == '1e1'
def test__lt( self ):
assert Div.new( '1123' ) < Div.new( '1124' )
assert Div.new( '12' ) < Div.new( '1123' )
assert Div.new( '12' ) < Div.new( '13' )
assert Div.new( '12' ) < Div.new( '34' )
def test__get_basis_change( self ):
B = sage_matrix( sage_ZZ, [( 1, -1, 0, 0, 0, 0 ),
( 1, 0, -1, 0, 0, 0 ),
( 1, -1, -1, 0, 0, 0 ),
( 0, 0, 0, 1, 0, 0 ),
( 0, 0, 0, 0, 1, 0 ),
( 0, 0, 0, 0, 0, 1 )] )
# (-2)-classes
assert Div.new( '1123', 6 ).get_basis_change( B ).get_label() == 'e2-e3'
assert Div.new( '1345', 6 ).get_basis_change( B ).get_label() == 'e0+e1-e2-e3-e4-e5'
assert Div.new( '12', 6 ).get_basis_change( B ).get_label() == '-e0+e1'
assert Div.new( '13', 6 ).get_basis_change( B ).get_label() == 'e1-e2-e3'
assert Div.new( '23', 6 ).get_basis_change( B ).get_label() == 'e0-e2-e3'
# (-1)-classes
assert Div.new( 'e1', 6 ).get_basis_change( B ).get_label() == 'e1-e2'
assert Div.new( 'e2', 6 ).get_basis_change( B ).get_label() == 'e0-e2'
assert Div.new( 'e3', 6 ).get_basis_change( B ).get_label() == 'e3'
assert Div.new( '2e0-e1-e2-e3-e4-e5', 6 ).get_basis_change( B ).get_label() == 'e0+e1-e3-e4-e5'
# classes of conical families
assert Div.new( 'e0-e1', 6 ).get_basis_change( B ).get_label() == 'e0'
assert Div.new( 'e0-e2', 6 ).get_basis_change( B ).get_label() == 'e1'
assert Div.new( 'e0-e3', 6 ).get_basis_change( B ).get_label() == 'e0+e1-e2-e3'
assert Div.new( '2e0-e1-e3-e4-e5', 6 ).get_basis_change( B ).get_label() == '2e0+e1-e2-e3-e4-e5'
assert Div.new( '2e0-e2-e3-e4-e5', 6 ).get_basis_change( B ).get_label() == 'e0+2e1-e2-e3-e4-e5'
def test__is_positive( self ):
assert Div.new( 'e0-e1', 6 ).is_positive()
assert Div.new( 'e1-e2', 6 ).is_positive()
assert not Div.new( '-e2+e1', 6 ).is_positive()
assert not Div.new( '-e0+e1+e2', 6 ).is_positive()
if __name__ == '__main__':
NSTools.filter( None )
TestClassDiv().test__is_positive()
pass
| 4,686 | 41.225225 | 104 | py |
ns_lattice | ns_lattice-master/ns_lattice/src/tests/test_class_dp_lattice.py | '''
Use of this source code is governed by a MIT-style license that can be found in the LICENSE file.
Created on Nov 7, 2017
@author: Niels Lubbes
'''
from ns_lattice.sage_interface import sage_QQ
from ns_lattice.sage_interface import sage_identity_matrix
from ns_lattice.sage_interface import sage_matrix
from ns_lattice.class_div import Div
from ns_lattice.dp_involutions import complete_basis
from ns_lattice.sage_interface import sage_vector
from ns_lattice.div_in_lattice import get_divs
from ns_lattice.div_in_lattice import get_ak
from ns_lattice.class_eta import ETA
from ns_lattice.sage_interface import sage_ZZ
from ns_lattice.dp_root_bases import get_dynkin_type
from ns_lattice.class_ns_tools import NSTools
from ns_lattice.class_dp_lattice import DPLattice
class TestClassDPLattice():
def test__eq( self ):
NSTools.set_enable_tool_dct( False )
Md_lst = []
M = sage_identity_matrix( sage_QQ, 4 )
dpl23 = DPLattice( [Div.new( '23', 4 )], Md_lst, M )
dpl1123 = DPLattice( [Div.new( '1123', 4 )], Md_lst, M )
dpl12 = DPLattice( [Div.new( '12', 4 )], Md_lst, M )
assert dpl23 != dpl1123
assert dpl23 == dpl12
NSTools.set_enable_tool_dct( True )
def test__get_marked_Mtype( self ):
NSTools.set_enable_tool_dct( False )
# (2A1, 4A1) Neron-Severi lattice of ring torus
rank = 6
d_lst = [ 'e2-e4', 'e3-e5', 'e0-e1-e2-e4', 'e0-e1-e3-e5']
Md_lst = ['e4-e5', 'e0-e1-e2-e3']
M = [( 2, 1, 1, 1, 0, 0 ), ( -1, 0, -1, -1, 0, 0 ), ( -1, -1, 0, -1, 0, 0 ), ( -1, -1, -1, 0, 0, 0 ), ( 0, 0, 0, 0, 0, 1 ), ( 0, 0, 0, 0, 1, 0 )]
d_lst = [ Div.new( d, rank ) for d in d_lst ]
Md_lst = [ Div.new( Md, rank ) for Md in Md_lst ]
M = sage_matrix( M )
dpl = DPLattice( d_lst, Md_lst, M )
print( dpl.get_marked_Mtype() )
print( dpl.Mtype )
assert dpl.get_marked_Mtype() == "2A1'"
NSTools.set_enable_tool_dct( True )
def test__get_bas_lst__rank_3( self ):
NSTools.set_enable_tool_dct( False )
bas_lst = DPLattice.get_bas_lst( 3 )
assert len( bas_lst ) == 2
for bas in bas_lst:
print( bas )
print( len( bas_lst ) )
NSTools.set_enable_tool_dct( True )
def test__get_bas_lst__rank_4( self ):
NSTools.set_enable_tool_dct( False )
bas_lst = DPLattice.get_bas_lst( 4 )
for bas in bas_lst:
print( bas )
print( len( bas_lst ) )
assert len( bas_lst ) == 6
type_lst = []
for bas in bas_lst:
type_lst += [( bas.Mtype, bas.type )]
print( type_lst )
assert str( type_lst ) == "[('A0', 'A0'), ('A0', 'A1'), ('A0', 'A1'), ('A0', '2A1'), ('A0', 'A2'), ('A0', 'A1+A2')]"
NSTools.set_enable_tool_dct( True )
def test__get_inv_lst__rank_4( self ):
NSTools.set_enable_tool_dct( False )
rank = 4
inv_lst = DPLattice.get_inv_lst( rank )
print( len( inv_lst ) )
for inv in inv_lst:
inv.set_attributes( 8 )
type_lst = []
for inv in inv_lst:
type_lst += [( inv.Mtype, inv.type )]
print( type_lst[-1] )
assert len( inv_lst ) == 4
assert str( type_lst ) == "[('A0', 'A0'), ('A1', 'A0'), ('A1', 'A0'), ('2A1', 'A0')]"
NSTools.set_enable_tool_dct( True )
def test__get_cls_slow__rank_3( self ):
NSTools.set_enable_tool_dct( False )
rank = 3
dpl_lst = DPLattice.get_cls_slow( rank )
for dpl in dpl_lst:
dpl.set_attributes( 8 )
type_lst = []
for dpl in dpl_lst:
type_lst += [( dpl.Mtype, dpl.type )]
print( type_lst[-1] )
print( type_lst )
assert str( type_lst ) == "[('A0', 'A0'), ('A0', 'A1'), ('A1', 'A0')]"
NSTools.set_enable_tool_dct( True )
def test__get_cls_slow__rank_4( self ):
NSTools.set_enable_tool_dct( False )
rank = 4
dpl_lst = DPLattice.get_cls_slow( rank )
for dpl in dpl_lst:
dpl.set_attributes( 8 )
type_lst = []
for dpl in dpl_lst:
type_lst += [( dpl.Mtype, dpl.type )]
print( type_lst[-1] )
print( type_lst )
assert str( type_lst ) == "[('A0', 'A0'), ('A0', 'A1'), ('A0', 'A1'), ('A0', '2A1'), ('A0', 'A2'), ('A0', 'A1+A2'), ('A1', 'A0'), ('A1', 'A1'), ('A1', 'A0'), ('A1', 'A1'), ('A1', 'A2'), ('2A1', 'A0')]"
NSTools.set_enable_tool_dct( True )
def test__get_num_types( self ):
NSTools.set_enable_tool_dct( False )
bas_lst = DPLattice.get_bas_lst( 4 )
inv_lst = DPLattice.get_inv_lst( 4 )
bas = bas_lst[1]
inv = inv_lst[-1]
assert inv.Mtype == '2A1'
assert bas.type == 'A1'
assert DPLattice.get_num_types( inv, bas, bas_lst ) == 0
bas = bas_lst[1]
inv = inv_lst[2]
assert inv.Mtype == 'A1'
assert bas.type == 'A1'
assert DPLattice.get_num_types( inv, bas, bas_lst ) == -1
NSTools.set_enable_tool_dct( True )
def test__get_part_roots( self ):
NSTools.set_enable_tool_dct( False )
inv_lst = DPLattice.get_inv_lst( 4 )
inv = inv_lst[1]
assert inv.Mtype == 'A1'
s_lst, q_lst = DPLattice.get_part_roots( inv )
assert len( s_lst ) == 1
assert q_lst == []
NSTools.set_enable_tool_dct( True )
def test__seek_bases( self ):
NSTools.set_enable_tool_dct( False )
bas = DPLattice.get_bas_lst( 4 )[-1]
assert bas.type == 'A1+A2'
inv = DPLattice.get_inv_lst( 4 )[0]
assert inv.Mtype == 'A0'
r_lst = get_divs( get_ak( bas.get_rank() ), 0, -2, True )
dpl_lst = DPLattice.seek_bases( inv, bas.d_lst, r_lst )
for dpl in dpl_lst:
dpl.set_attributes()
print( dpl.Mtype, dpl.type, dpl.d_lst )
assert len( dpl_lst ) == 1
NSTools.set_enable_tool_dct( True )
def test__get_cls__rank_3( self ):
NSTools.set_enable_tool_dct( False )
dpl_lst = DPLattice.get_cls( 3 )
type_lst = []
for dpl in dpl_lst:
type_lst += [( dpl.Mtype, dpl.type )]
print( type_lst )
assert str( type_lst ) == "[('A0', 'A0'), ('A0', 'A1'), ('A1', 'A0')]"
NSTools.set_enable_tool_dct( True )
def test__import_cls( self ):
NSTools.set_enable_tool_dct( False )
dpl_lst = DPLattice.get_cls( 3 )
type_lst = [( dpl.Mtype, dpl.type ) for dpl in dpl_lst ]
assert str( type_lst ) == "[('A0', 'A0'), ('A0', 'A1'), ('A1', 'A0')]"
inv = DPLattice.get_inv_lst( 4 )[1]
assert inv.Mtype == 'A1'
out_lst = DPLattice.import_cls( dpl_lst, inv )
assert len( out_lst ) == 1
assert out_lst[0].get_rank() == 4
assert out_lst[0].Mtype == 'A1'
assert out_lst[0].type == 'A0'
NSTools.set_enable_tool_dct( True )
def test__get_cls__rank_4( self ):
NSTools.set_enable_tool_dct( False )
dpl_lst = DPLattice.get_cls( 4 )
type_lst = []
for dpl in dpl_lst:
type_lst += [( dpl.Mtype, dpl.type )]
print( dpl.get_marked_Mtype(), dpl.type )
print( type_lst )
assert str( type_lst ) == "[('A0', 'A0'), ('A0', 'A1'), ('A0', 'A1'), ('A0', '2A1'), ('A0', 'A2'), ('A0', 'A1+A2'), ('A1', 'A0'), ('A1', 'A1'), ('A1', 'A0'), ('A1', 'A1'), ('A1', 'A2'), ('2A1', 'A0')]"
NSTools.set_enable_tool_dct( True )
def test__get_real_type( self ):
NSTools.set_enable_tool_dct( False )
dpl_lst = DPLattice.get_cls_slow( 4 )
type_lst = []
for dpl in dpl_lst:
type_lst += [( dpl.get_marked_Mtype(), dpl.get_real_type() )]
out = ''
for type in type_lst:
print( type[0] + ', ' + type[1] )
out += type[0] + ',' + type[1] + '; '
print( out )
assert out.strip() == "A0,A0; A0,{A1}; A0,{A1}; A0,2{A1}; A0,{A2}; A0,{A1}+{A2}; A1,A0; A1,{A1}; A1',A0; A1',{A1}; A1',{A2}; 2A1,A0;"
NSTools.set_enable_tool_dct( True )
def test__get_SG( self ):
NSTools.set_enable_tool_dct( False )
dpl_lst = DPLattice.get_cls( 4 )
out_lst = []
for dpl in dpl_lst:
SG, SG_data = dpl.get_SG()
out_lst += [[ dpl.Mtype, dpl.get_real_type()] + SG_data]
for out in out_lst:
print( out )
print( out_lst )
assert str( out_lst ) == "[['A0', 'A0', 3, 0, [0], [], False, False, True, True], ['A0', '{A1}', 2, 0, [0], [], False, False, True, True], ['A0', '{A1}', 3, 0, [0], [], False, False, True, True], ['A0', '2{A1}', 2, 0, [0], [], False, False, True, True], ['A0', '{A2}', 1, 0, [0], [], True, True, True, True], ['A0', '{A1}+{A2}', 1, 0, [0], [], True, True, True, True], ['A1', 'A0', 1, 0, [0], [], True, True, True, True], ['A1', '{A1}', 1, 0, [0], [], True, True, True, True], ['A1', 'A0', 3, 0, [0], [], False, False, True, True], ['A1', '{A1}', 2, 0, [0], [], False, False, True, True], ['A1', '{A2}', 1, 0, [0], [], True, True, True, True], ['2A1', 'A0', 1, 0, [0], [], True, True, True, True]]"
NSTools.set_enable_tool_dct( True )
def test__are_root_bases( self ):
NSTools.set_enable_tool_dct( False )
bas_lst = DPLattice.get_bas_lst( 4 )
for bas in bas_lst:
if bas.d_lst == []:
continue
mat = complete_basis( bas.d_lst )
r_lst = get_divs( get_ak( bas.get_rank() ), 0, -2, True )
print( bas.type, bas.d_lst, 10 * '=' )
for r in r_lst:
vec = ~mat * sage_vector( r.e_lst )
print( r.e_lst, vec, r, list( mat ) )
in_span = set( vec[len( bas.d_lst ):] ) == {0}
zz_coef = set( [elt in sage_ZZ for elt in vec ] ) == {True}
pos_coef = set( [elt >= 0 for elt in vec] ) == {True}
if in_span and zz_coef:
assert pos_coef
NSTools.set_enable_tool_dct( True )
if __name__ == '__main__':
NSTools.filter( None )
# NSTools.filter( ['class_dp_lattice.py', 'class_eta.py'] )
# TestClassDPLattice().test__eq()
# TestClassDPLattice().test__get_marked_Mtype()
# TestClassDPLattice().test__get_bas_lst__rank_3()
# TestClassDPLattice().test__get_bas_lst__rank_4()
# TestClassDPLattice().test__get_inv_lst__rank_4()
# TestClassDPLattice().test__get_cls_slow__rank_3()
# TestClassDPLattice().test__get_cls_slow__rank_4()
# TestClassDPLattice().test__get_num_types()
# TestClassDPLattice().test__get_part_roots()
# TestClassDPLattice().test__seek_bases()
# TestClassDPLattice().test__import_cls()
# TestClassDPLattice().test__get_cls__rank_3()
# TestClassDPLattice().test__get_cls__rank_4()
# TestClassDPLattice().test__get_real_type()
TestClassDPLattice().test__get_SG()
# TestClassDPLattice().test__are_root_bases()
pass
| 11,122 | 31.054755 | 706 | py |
ns_lattice | ns_lattice-master/ns_lattice/src/tests/test_div_in_lattice.py | '''
Use of this source code is governed by a MIT-style license that can be found in the LICENSE file.
Created on Feb 8, 2017
@author: Niels Lubbes
'''
from ns_lattice.class_ns_tools import NSTools
from ns_lattice.class_div import Div
from ns_lattice.div_in_lattice import get_divs
from ns_lattice.div_in_lattice import get_indecomp_divs
from ns_lattice.div_in_lattice import get_ak
class TestDivInLattice:
def test__get_divs_2_2( self ):
NSTools.set_enable_tool_dct( False )
d = Div.new( '2e0-e1-e2' )
dc = 2
cc = 2
c_lst = get_divs( d, dc, cc, True )
assert [c.get_label() for c in c_lst ] == [ '2e0-e1-e2' ]
NSTools.set_enable_tool_dct( True )
def test__get_divs__minus_1_classes__rank_4( self ):
NSTools.set_enable_tool_dct( False )
chk_lst = ['e1', 'e0-e1-e2']
out_lst = []
for div in get_divs( get_ak( 4 ), 1, -1, False ):
out_lst += [ div.get_label() ]
assert out_lst == chk_lst
NSTools.set_enable_tool_dct( True )
def test__get_divs__minus_1_classes__rank_5( self ):
NSTools.set_enable_tool_dct( False )
chk_lst = ['e1', 'e2', 'e3', 'e4',
'e0-e1-e2', 'e0-e1-e3', 'e0-e2-e3',
'e0-e1-e4', 'e0-e2-e4', 'e0-e3-e4']
out_lst = []
for div in get_divs( get_ak( 5 ), 1, -1, True ):
out_lst += [ div.get_label() ]
assert out_lst == chk_lst
NSTools.set_enable_tool_dct( True )
def test__get_divs__minus_1_classes__rank_9( self ):
NSTools.set_enable_tool_dct( False )
chk_lst = [ 'e1',
'e0-e1-e2',
'2e0-e1-e2-e3-e4-e5',
'3e0-2e1-e2-e3-e4-e5-e6-e7',
'4e0-2e1-2e2-2e3-e4-e5-e6-e7-e8',
'5e0-2e1-2e2-2e3-2e4-2e5-2e6-e7-e8',
'6e0-3e1-2e2-2e3-2e4-2e5-2e6-2e7-2e8' ]
out_lst = []
for div in get_divs( get_ak( 9 ), 1, -1, False ):
out_lst += [ div.get_label() ]
assert out_lst == chk_lst
NSTools.set_enable_tool_dct( True )
def test__get_divs__minus_2_classes__rank_5__perm_true( self ):
NSTools.set_enable_tool_dct( False )
chk_lst = [12, 23, 13, 34, 24, 14,
1123, 1124, 1134, 1234]
out_lst = []
for div in get_divs( get_ak( 5 ), 0, -2, True ):
out_lst += [ int( div.get_label( True ) ) ]
print( out_lst )
assert out_lst == chk_lst
NSTools.set_enable_tool_dct( True )
def test__get_divs__roman_surface( self ):
NSTools.set_enable_tool_dct( False )
h = Div.new( '4e0-e1-e2-e3-e4-e5-e6-e7-e8' )
out_lst = get_divs( h, 2, -2, False )
out_lst += get_divs( h, 2, -1, False )
print( out_lst )
assert str( out_lst ) == '[2e0-e1-e2-e3-e4-e5-e6, e0-e1-e2]'
NSTools.set_enable_tool_dct( True )
def test__get_divs__fam_classes__rank_6__perm_false( self ):
NSTools.set_enable_tool_dct( False )
chk_lst = ['e0-e1', '2e0-e1-e2-e3-e4']
out_lst = []
for div in get_divs( get_ak( 6 ), 2, 0, False ):
out_lst += [ div.get_label() ]
assert out_lst == chk_lst
NSTools.set_enable_tool_dct( True )
def test__get_divs__fam_classes__rank_6__perm_true( self ):
NSTools.set_enable_tool_dct( False )
chk_lst = ['e0-e1', 'e0-e2', 'e0-e3',
'e0-e4', 'e0-e5',
'2e0-e1-e2-e3-e4',
'2e0-e1-e2-e3-e5',
'2e0-e1-e2-e4-e5',
'2e0-e1-e3-e4-e5',
'2e0-e2-e3-e4-e5']
out_lst = []
for div in get_divs( get_ak( 6 ), 2, 0, True ):
out_lst += [ div.get_label() ]
print( out_lst )
assert out_lst == chk_lst
NSTools.set_enable_tool_dct( True )
def test__get_indecomp_divs( self ):
NSTools.set_enable_tool_dct( False )
c_lst = ['e0-e1', 'e0-e2', 'e0-e3',
'e0-e4', 'e0-e5',
'2e0-e1-e2-e3-e4',
'2e0-e1-e2-e3-e5',
'2e0-e1-e2-e4-e5',
'2e0-e1-e3-e4-e5',
'2e0-e2-e3-e4-e5']
c_lst = [ Div.new( c ) for c in c_lst ]
d_lst = [ 12, 1123 ]
d_lst = [ Div.new( str( d ) ) for d in d_lst ]
chk_lst = ['e0-e1', 'e0-e3', 'e0-e4', 'e0-e5',
'2e0-e1-e2-e4-e5', '2e0-e1-e3-e4-e5']
out_lst = []
for div in get_indecomp_divs( c_lst, d_lst ):
out_lst += [ div.get_label() ]
print( out_lst )
assert out_lst == chk_lst
NSTools.set_enable_tool_dct( True )
if __name__ == '__main__':
NSTools.filter( None )
# TestDivInLattice().test__get_divs__fam_classes__rank_6__perm_true()
# TestDivInLattice().test__get_divs__minus_2_classes__rank_5__perm_true()
# TestDivInLattice().test__get_divs__minus_1_classes__rank_9()
# TestDivInLattice().test__get_divs__roman_surface()
| 5,065 | 32.773333 | 97 | py |
ns_lattice | ns_lattice-master/ns_lattice/src/tests/test_class_eta.py | '''
Use of this source code is governed by a MIT-style license that can be found in the LICENSE file.
Created on Jan 27, 2018
@author: Niels Lubbes
'''
from ns_lattice.class_eta import ETA
from ns_lattice.class_ns_tools import NSTools
class TestClassETA():
def test__update( self ):
eta = ETA( 10, 2 )
for i in range( 10 ):
assert eta.counter == i
eta.update( '*test*', 3 )
assert eta.counter == i + 1
if __name__ == '__main__':
NSTools.filter( ['class_eta.py'] )
NSTools.filter( None )
TestClassETA().test__update()
| 592 | 21.807692 | 97 | py |
ns_lattice | ns_lattice-master/ns_lattice/src/tests/test_dp_root_basis.py | '''
Use of this source code is governed by a MIT-style license that can be found in the LICENSE file.
Created on Feb 13, 2017
@author: Niels Lubbes
'''
from ns_lattice.sage_interface import sage_QQ
from ns_lattice.sage_interface import sage_identity_matrix
from ns_lattice.sage_interface import sage_Graph
from ns_lattice.class_ns_tools import NSTools
from ns_lattice.class_div import Div
from ns_lattice.dp_root_bases import is_root_basis
from ns_lattice.dp_root_bases import get_graph
from ns_lattice.dp_root_bases import get_ext_graph
from ns_lattice.dp_root_bases import get_dynkin_type
from ns_lattice.dp_root_bases import convert_type
from ns_lattice.dp_root_bases import get_root_bases_orbit
class TestDPRootBasis():
def test__is_root_basis( self ):
assert is_root_basis( [] )
bas_lst = [1123 ]
assert is_root_basis( [Div.new( str( bas ), 4 ) for bas in bas_lst] )
bas_lst = [1123, 23 ]
assert is_root_basis( [Div.new( str( bas ), 4 ) for bas in bas_lst] )
bas_lst = [1123, 1123 ]
assert not is_root_basis( [Div.new( str( bas ), 4 ) for bas in bas_lst] )
bas_lst = [12, -23 ]
assert not is_root_basis( [Div.new( str( bas ), 4 ) for bas in bas_lst] )
def test__get_graph( self ):
bas_lst = [12, 23, 34 ]
d_lst = [Div.new( str( bas ), 5 ) for bas in bas_lst]
G = get_graph( d_lst )
test_G = sage_Graph()
test_G.add_vertices( [0, 1, 2] )
test_G.add_edge( 0, 1, 1 )
test_G.add_edge( 1, 2, 1 )
assert G == test_G
def test__get_ext_graph( self ):
NSTools.set_enable_tool_dct( False )
#
# example for Neron-Severi lattice of sextic weak del Pezzo surface
# The A1 root sub-systems [23] and [1123] are not equivalent.
# We use as invariant a graph.
#
M = sage_identity_matrix( sage_QQ, 4 ) # real structure is the identity
e_lst = [ 'e1', 'e0-e1-e2', 'e2', 'e0-e2-e3', 'e3', 'e0-e1-e3' ] # (-1)-classes
d_lst1 = [Div.new( s, 4 ) for s in e_lst + ['23'] ]
G1 = get_ext_graph( d_lst1, M )
d_lst2 = [Div.new( s, 4 ) for s in e_lst + ['1123'] ]
G2 = get_ext_graph( d_lst2, M )
assert not G1.is_isomorphic( G2, edge_labels = True )
NSTools.set_enable_tool_dct( True )
def test__get_dynkin_type( self ):
NSTools.set_enable_tool_dct( False )
bas_lst = [12, 23, 34 ]
d_lst = [Div.new( str( bas ), 5 ) for bas in bas_lst]
print( d_lst )
assert get_dynkin_type( d_lst ) == 'A3'
NSTools.set_enable_tool_dct( True )
def test__convert_type( self ):
NSTools.set_enable_tool_dct( False )
assert convert_type( '2A1+D4' ) == ['A1', 'A1', 'D4']
assert convert_type( '2A1+A2+A3' ) == ['A1', 'A1', 'A2', 'A3']
assert convert_type( 'A0+2A1+3A1+D4+A0' ) == 5 * ['A1'] + ['D4']
NSTools.set_enable_tool_dct( True )
def test__get_root_bases_orbit__rank_3( self ):
NSTools.set_enable_tool_dct( False )
d_lst = [12]
d_lst = [Div.new( str( d ), 3 ) for d in d_lst]
d_lst_lst = get_root_bases_orbit( d_lst, False )
print( d_lst_lst )
assert str( d_lst_lst ) == '[[e1-e2], [-e1+e2]]'
d_lst_lst = get_root_bases_orbit( d_lst, True )
print( d_lst_lst )
assert str( d_lst_lst ) == '[[e1-e2]]'
NSTools.set_enable_tool_dct( True )
def test__get_root_bases_orbit__rank_4( self ):
NSTools.set_enable_tool_dct( False )
d_lst = [12]
d_lst = [Div.new( str( d ), 4 ) for d in d_lst]
d_lst_lst = get_root_bases_orbit( d_lst, False )
print( d_lst_lst )
assert str( d_lst_lst ) == '[[e1-e2], [-e1+e2], [e1-e3], [-e2+e3], [-e1+e3], [e2-e3]]'
d_lst_lst = get_root_bases_orbit( d_lst, True )
print( d_lst_lst )
assert str( d_lst_lst ) == '[[e1-e2], [e1-e3], [e2-e3]]'
NSTools.set_enable_tool_dct( True )
if __name__ == '__main__':
NSTools.filter( None )
# TestDPRootBasis().test__get_ext_graph()
# TestDPRootBasis().test__get_root_bases_orbit__rank_3()
# TestDPRootBasis().test__get_root_bases_orbit__rank_4()
TestDPRootBasis().test__convert_type()
| 4,288 | 31.007463 | 97 | py |
ns_lattice | ns_lattice-master/ns_lattice/src/tests/test_ns_basis.py | '''
Use of this source code is governed by a MIT-style license that can be found in the LICENSE file.
Created on Feb 9, 2017
@author: Niels Lubbes
'''
import sys
from ns_lattice.sage_interface import sage_identity_matrix
from ns_lattice.sage_interface import sage_matrix
from ns_lattice.sage_interface import sage_ZZ
from ns_lattice.sage_interface import sage_QQ
from ns_lattice.sage_interface import sage_register_unpickle_override
from ns_lattice.class_div import Div
from ns_lattice.class_ns_tools import NSTools
from ns_lattice.class_dp_lattice import DPLattice
from ns_lattice.div_in_lattice import get_divs
from ns_lattice.div_in_lattice import get_ak
from ns_lattice.ns_basis import get_bases_lst
from ns_lattice.ns_basis import get_webs
from ns_lattice.ns_basis import contains_perm
from ns_lattice.ns_basis import triples
class TestNSBasis( object ):
def test__get_basis_lst__rank_4__False( self ):
NSTools.set_enable_tool_dct( False )
rank = 4
# construct DPLattice
d_lst = []
Md_lst = []
M = sage_identity_matrix( rank )
dpl = DPLattice( d_lst, Md_lst, M )
# change basis
a_lst = [ 'e0-e1', 'e0-e2']
a_lst = [ Div.new( a, rank ) for a in a_lst ]
m1_lst = get_divs( get_ak( rank ), 1, -1, True )
d_tup_lst = get_bases_lst( a_lst, M, d_lst, m1_lst, False )
B = sage_matrix( sage_ZZ, [ d.e_lst for d in d_tup_lst[0] ] )
dplB = dpl.get_basis_change( B )
int_mat = list( dplB.m1_lst[0].int_mat )
print( dplB )
print( str( d_tup_lst ) )
assert str( d_tup_lst ) == '[(e0-e1, e0-e2, e3, e0-e1-e2)]'
print( list( B ) )
assert str( list( B ) ) == '[(1, -1, 0, 0), (1, 0, -1, 0), (0, 0, 0, 1), (1, -1, -1, 0)]'
print( str( int_mat ) )
assert str( int_mat ) == '[(0, 1, 0, 0), (1, 0, 0, 0), (0, 0, -1, 0), (0, 0, 0, -1)]'
NSTools.set_enable_tool_dct( True )
def test__get_basis_lst__rank_4__True( self ):
NSTools.set_enable_tool_dct( False )
rank = 4
# construct DPLattice
d_lst = []
Md_lst = []
M = sage_identity_matrix( rank )
dpl = DPLattice( d_lst, Md_lst, M )
# change basis
a_lst = [ 'e0-e1', 'e0-e2']
a_lst = [ Div.new( a, rank ) for a in a_lst ]
m1_lst = get_divs( get_ak( rank ), 1, -1, True )
d_tup_lst = get_bases_lst( a_lst, M, d_lst, m1_lst, True )
print( d_tup_lst )
assert str( d_tup_lst ) == '[(e0-e1, e0-e2, e3, e0-e1-e2), (e0-e1, e0-e2, e0-e1-e2, e3)]'
for d_tup in d_tup_lst:
B = sage_matrix( sage_ZZ, [ d.e_lst for d in d_tup ] )
dplB = dpl.get_basis_change( B )
int_mat = list( dplB.m1_lst[0].int_mat )
print( str( int_mat ) )
assert str( int_mat ) == '[(0, 1, 0, 0), (1, 0, 0, 0), (0, 0, -1, 0), (0, 0, 0, -1)]'
NSTools.set_enable_tool_dct( True )
def test__get_webs__rank_4( self ):
NSTools.set_enable_tool_dct( False )
# sage_register_unpickle_override( 'class_div', 'Div', Div )
# sage_register_unpickle_override( 'class_dp_lattice', 'DPLattice', DPLattice )
d_lst = []
Md_lst = []
M = sage_identity_matrix( 4 )
dpl = DPLattice( d_lst, Md_lst, M )
fam_lst_lst = get_webs( dpl )
for fam_lst in fam_lst_lst:
print( fam_lst )
NSTools.set_enable_tool_dct( True )
def test__contains_perm__rank6( self ):
f_lst_lst = [['e0-e1', '2e0-e2-e3-e4-e5'], ['e0-e5', 'e0', 'e1']]
c_lst = ['e0-e2', '2e0-e1-e3-e4-e5']
rank = 6
nf_lst_lst = []
for f_lst in f_lst_lst:
nf_lst_lst += [[ Div.new( f, rank ) for f in f_lst ]]
f_lst_lst = nf_lst_lst
c_lst = [ Div.new( c, rank ) for c in c_lst ]
assert contains_perm( f_lst_lst, c_lst )
def test__triples( self ):
NSTools.set_enable_tool_dct( False )
rank = 6
# (2A1, 4A1)
d_lst = [ 'e2-e4', 'e3-e5', 'e0-e1-e2-e4', 'e0-e1-e3-e5']
Md_lst = ['e4-e5', 'e0-e1-e2-e3']
M = [( 2, 1, 1, 1, 0, 0 ), ( -1, 0, -1, -1, 0, 0 ), ( -1, -1, 0, -1, 0, 0 ), ( -1, -1, -1, 0, 0, 0 ), ( 0, 0, 0, 0, 0, 1 ), ( 0, 0, 0, 0, 1, 0 )]
d_lst = [ Div.new( d, rank ) for d in d_lst ]
Md_lst = [ Div.new( Md, rank ) for Md in Md_lst ]
M = sage_matrix( M )
dpl = DPLattice( d_lst, Md_lst, M )
t_lst = triples( dpl, 2 )
print( t_lst )
assert str( t_lst ) == '[[e0-e1, e0-e2, 2e0-e2-e3-e4-e5]]'
NSTools.set_enable_tool_dct( True )
if __name__ == '__main__':
# NSTools.filter( 'ns_basis.py' )
NSTools.filter( None )
# TestNSBasis().test__get_basis_lst__rank_4__False()
# TestNSBasis().test__get_basis_lst__rank_4__True()
# TestNSBasis().test__get_webs__rank_4()
# TestNSBasis().test__contains_perm__rank6()
# TestNSBasis().test__triples()
pass
| 5,021 | 29.071856 | 153 | py |
ns_lattice | ns_lattice-master/ns_lattice/src/tests/test_convert_to_tex.py | '''
Use of this source code is governed by a MIT-style license that can be found in the LICENSE file.
Created on Feb 8, 2017
@author: Niels Lubbes
'''
from ns_lattice.class_ns_tools import NSTools
from ns_lattice.convert_to_tex import cls_to_tex
class TestConvertToTex:
def test__cls_to_tex( self ):
if 'get_cls_9' not in NSTools.get_tool_dct():
return
out = cls_to_tex()
print( out )
if __name__ == '__main__':
NSTools.filter( None )
TestConvertToTex().test__cls_to_tex()
pass
| 543 | 16.548387 | 97 | py |
ns_lattice | ns_lattice-master/ns_lattice/src/tests/__init__.py | 0 | 0 | 0 | py | |
ns_lattice | ns_lattice-master/ns_lattice/src/tests/test_dp_involutions.py | '''
Use of this source code is governed by a MIT-style license that can be found in the LICENSE file.
Created on Feb 8, 2017
@author: Niels Lubbes
'''
from ns_lattice.sage_interface import sage_vector
from ns_lattice.sage_interface import sage_matrix
from ns_lattice.sage_interface import sage_identity_matrix
from ns_lattice.sage_interface import sage_diagonal_matrix
from ns_lattice.dp_involutions import complete_basis
from ns_lattice.dp_involutions import is_integral_involution
from ns_lattice.dp_involutions import basis_to_involution
from ns_lattice.class_div import Div
from ns_lattice.class_ns_tools import NSTools
class TestDPInvolutions():
def test__complete_basis__34_45_rank6( self ):
d_lst = [ 34, 45]
rank = 6
d_lst = [ Div.new( str( d ), rank ) for d in d_lst ]
mat = complete_basis( d_lst )
assert mat == sage_matrix( [( 0, 0, -1, 0, 0, 0 ),
( 0, 0, 0, 1, 0, 0 ),
( 0, 0, 0, 0, 1, 0 ),
( 1, 0, 0, 0, 0, 1 ),
( -1, 1, 0, 0, 0, 1 ),
( 0, -1, 0, 0, 0, 1 )] )
def test__complete_basis__23_34_45_rank6( self ):
d_lst = [ 23, 34, 45 ]
rank = 6
d_lst = [ Div.new( str( d ), rank ) for d in d_lst ]
mat = complete_basis( d_lst )
assert mat == sage_matrix( [( 0, 0, 0, -1, 0, 0 ),
( 0, 0, 0, 0, 1, 0 ),
( 1, 0, 0, 0, 0, 1 ),
( -1, 1, 0, 0, 0, 1 ),
( 0, -1, 1, 0, 0, 1 ),
( 0, 0, -1, 0, 0, 1 )] )
def test__complete_basis__1123_12_23_45_rank6( self ):
# 4A1
d_lst = [ 1123, 12, 23, 45 ]
rank = 6
d_lst = [ Div.new( str( d ), rank ) for d in d_lst ]
mat = complete_basis( d_lst )
print( mat )
assert mat == sage_matrix( [( 0, 0, 0, 1, -3, 0 ),
( 1, 0, 0, -1, 1, 0 ),
( -1, 1, 0, -1, 1, 0 ),
( 0, -1, 0, -1, 1, 0 ),
( 0, 0, 1, 0, 0, 1 ),
( 0, 0, -1, 0, 0, 1 ) ] )
def test__complete_basis__1145_23_rank6( self ):
d_lst = [ 1145, 23 ]
rank = 6
d_lst = [ Div.new( str( d ), rank ) for d in d_lst ]
mat = complete_basis( d_lst )
print( mat )
assert mat == sage_matrix( [( 0, 1, -1, 0, 0, 0 ),
( 0, -1, 0, 1, 0, 0 ),
( 1, 0, 0, 0, 1, 0 ),
( -1, 0, 0, 0, 1, 0 ),
( 0, -1, 0, 0, 0, 1 ),
( 0, -1, 1, -1, 0, -1 ) ] )
def test__complete_basis__12_23_rank4( self ):
d_lst = [ 12, 23 ]
rank = 4
d_lst = [ Div.new( str( d ), rank ) for d in d_lst ]
V = complete_basis( d_lst )
D = sage_diagonal_matrix( [-1, -1, 1, 1] )
J = sage_diagonal_matrix( [1, -1, -1, -1] )
M = V * D * ~V
assert str( list( M ) ) == "[(1, 0, 0, 0), (0, -1/3, 2/3, 2/3), (0, 2/3, -1/3, 2/3), (0, 2/3, 2/3, -1/3)]"
assert M * M == sage_identity_matrix( 4 )
assert M.T * J * M == J
assert is_integral_involution( M ) == False
def test__complete_basis__1123_12_23_rank4( self ):
d_lst = [ 1123, 12, 23 ]
rank = 4
d_lst = [ Div.new( str( d ), rank ) for d in d_lst ]
V = complete_basis( d_lst )
D = sage_diagonal_matrix( [-1, -1, -1, 1] )
J = sage_diagonal_matrix( [1, -1, -1, -1] )
M = V * D * ~V
print( ~V * sage_vector( [1, -1, -1, -1] ) )
print( ~V * sage_vector( [0, 1, 0, -1] ) )
print( ~V * sage_vector( [0, 0, 1, -1] ) )
print( V )
assert M == basis_to_involution( d_lst, rank )
assert str( list( V ) ) == "[(0, 0, 1, -3), (1, 0, -1, 1), (-1, 1, -1, 1), (0, -1, -1, 1)]"
assert str( list( ~V ) ) == "[(0, 2/3, -1/3, -1/3), (0, 1/3, 1/3, -2/3), (-1/2, -1/2, -1/2, -1/2), (-1/2, -1/6, -1/6, -1/6)]"
assert str( list( M ) ) == "[(2, 1, 1, 1), (-1, -4/3, -1/3, -1/3), (-1, -1/3, -4/3, -1/3), (-1, -1/3, -1/3, -4/3)]"
assert M * M == sage_identity_matrix( 4 )
assert M.T * J * M == J
assert is_integral_involution( M ) == False
def test__complete_basis__12__rank4( self ):
d_lst = [ 12 ]
rank = 4
d_lst = [ Div.new( str( d ), rank ) for d in d_lst ]
V = complete_basis( d_lst )
D = sage_diagonal_matrix( [-1, 1, 1, 1] )
J = sage_diagonal_matrix( [1, -1, -1, -1] )
M = V * D * ~V
assert M == basis_to_involution( d_lst, rank )
assert str( list( M ) ) == "[(1, 0, 0, 0), (0, 0, 1, 0), (0, 1, 0, 0), (0, 0, 0, 1)]"
assert M * M == sage_identity_matrix( 4 )
assert M.T * J * M == J
assert is_integral_involution( M ) == True
def test__complete_basis__1123__rank4( self ):
d_lst = [ 1123 ]
rank = 4
d_lst = [ Div.new( str( d ), rank ) for d in d_lst ]
V = complete_basis( d_lst )
D = sage_diagonal_matrix( [-1, 1, 1, 1] )
J = sage_diagonal_matrix( [1, -1, -1, -1] )
M = V * D * ~V
assert M == basis_to_involution( d_lst, rank )
assert str( list( M ) ) == "[(2, 1, 1, 1), (-1, 0, -1, -1), (-1, -1, 0, -1), (-1, -1, -1, 0)]"
assert M * M == sage_identity_matrix( 4 )
assert M.T * J * M == J
assert is_integral_involution( M ) == True
def test__complete_basis__1123_12__rank4( self ):
d_lst = [ 1123, 12 ]
rank = 4
d_lst = [ Div.new( str( d ), rank ) for d in d_lst ]
V = complete_basis( d_lst )
D = sage_diagonal_matrix( [-1, -1, 1, 1] )
J = sage_diagonal_matrix( [1, -1, -1, -1] )
M = V * D * ~V
assert M == basis_to_involution( d_lst, rank )
assert str( list( M ) ) == "[(2, 1, 1, 1), (-1, -1, 0, -1), (-1, 0, -1, -1), (-1, -1, -1, 0)]"
assert M * M == sage_identity_matrix( 4 )
assert M.T * J * M == J
assert is_integral_involution( M ) == True
if __name__ == '__main__':
NSTools.filter( None )
# TestDPInvolutions().test__complete_basis__12_23_rank4()
# TestDPInvolutions().test__complete_basis__1123_12_23_rank4()
# TestDPInvolutions().test__complete_basis__12__rank4()
# TestDPInvolutions().test__complete_basis__1123__rank4()
# TestDPInvolutions().test__complete_basis__1123_12__rank4()
pass
| 6,780 | 37.310734 | 133 | py |
ns_lattice | ns_lattice-master/ns_lattice/src/ns_lattice/class_eta.py | '''
Use of this source code is governed by a MIT-style license that can be found in the LICENSE file.
Created on Jan 27, 2018
@author: Niels Lubbes
'''
from ns_lattice.sage_interface import sage_n
from ns_lattice.class_ns_tools import NSTools
import time
class ETA( object ):
'''
For estimating the time it takes for a loop in a program to terminate
(ETA=estimated time of arrival).
During the loop feedback is printed.
'''
def __init__( self, total, ival ):
'''
Should be called before a loop in starts.
Parameters
----------
total: int
Number of times the loop needs to be traced.
ival : int
Nonzero number of traced loops in program until
feedback about etimated end time is printed
'''
# total number of loops
self.total = total
# number of loops after which eta is updated
self.ival = 1
if ival > 0:
self.ival = ival
# loop counter
self.counter = 0
# times
self.ini_time = self.time() # time when method was called
self.prv_time = self.ini_time # time which is updated after ival loops.
self.eta_time = 0 # estimated time of arrival in minutes
def time( self ):
return time.time()
def update( self, *info_lst ):
'''
Should be called inside a loop.
Prints an estimation for the time it takes for a program to
terminate (ETA for short). We refer to the program termination
as arrival.
Parameters
----------
*info_lst : string
Variable length argument list consisting of
additional information that is printed together with ETA.
'''
if self.counter % self.ival == 0:
cur_time = self.time()
ival_time = ( cur_time - self.prv_time ) / ( 60 * self.ival )
passed_time = sage_n( ( cur_time - self.ini_time ) / 60, digits = 5 )
self.eta_time = sage_n( ival_time * ( self.total - self.counter ), digits = 5 )
s = ''
for info in info_lst:
s += str( info ) + ' '
NSTools.p( 'ETA =', self.eta_time, 'm,',
'counter =', self.counter, '/', self.total, ',',
'time =', passed_time, 'm,',
'info =', s )
# update previous time
self.prv_time = cur_time
# increase counter
self.counter += 1
| 2,603 | 26.410526 | 97 | py |
ns_lattice | ns_lattice-master/ns_lattice/src/ns_lattice/dp_root_bases.py | '''
Created on Aug 11, 2016
@author: Niels
See [http://arxiv.org/abs/1302.6678] for more info.
Classification of root subsystems of root systems
of type either A1, A1+A2, A4, D5, E6, E7 or E8.
'''
import time
from ns_lattice.sage_interface import sage_VectorSpace
from ns_lattice.sage_interface import sage_vector
from ns_lattice.sage_interface import sage_QQ
from ns_lattice.sage_interface import sage_identity_matrix
from ns_lattice.sage_interface import sage_Graph
from ns_lattice.sage_interface import sage_Partitions
from ns_lattice.sage_interface import sage_RootSystem
from ns_lattice.sage_interface import sage_Subsets
from ns_lattice.sage_interface import sage_Combinations
from ns_lattice.sage_interface import sage_Permutations
from ns_lattice.class_ns_tools import NSTools
from ns_lattice.class_div import Div
from ns_lattice.div_in_lattice import get_divs
from ns_lattice.div_in_lattice import get_indecomp_divs
from ns_lattice.div_in_lattice import get_ak
def is_root_basis( d_lst ):
'''
Parameters
----------
d_lst : list<Div>
A list of lists of "Div" objects "d",
such that d*d=-2 and d*(-3h+e1+...+er)=0
where r=rank-1 and rank in [3,...,7].
Returns
-------
bool
True if input is the empty list or if divisors
in "d_lst" are linear independent as vectors
and their pairwise product is either -2, 0 or 1.
'''
# check empty-list
if d_lst == []:
return True
# check pairwise inner product
for i in range( len( d_lst ) ):
for j in range( len( d_lst ) ):
if d_lst[i] * d_lst[j] not in [0, 1, -2]:
return False
# check linear independence
# Linear independent vectors with pairwise positive intersection product
# must form a root basis. Thus vectors of positive roots in the corresponding
# root system are all positive
#
V = sage_VectorSpace( sage_QQ, d_lst[0].rank() )
W = V.subspace( [d.e_lst for d in d_lst] )
return W.rank() == len( d_lst )
def get_graph( d_lst ):
'''
Parameters
----------
d_lst : list<Div>
A list of "Div" objects.
Returns
-------
sage_Graph
A labeled "Graph()" where the elements
of "d_lst" are the vertices.
Different vertices are connected if
their corresponding intersection product
is non-zero and the edge is labeled with
the intersection product.
'''
G = sage_Graph()
G.add_vertices( range( len( d_lst ) ) );
for i in range( len( d_lst ) ):
for j in range( len( d_lst ) ):
if d_lst[i] * d_lst[j] > 0 and i != j:
G.add_edge( i, j, d_lst[i] * d_lst[j] )
return G
def get_ext_graph( d_lst, M ):
'''
Parameters
----------
d_lst : list<Div>
A list of "Div" objects of equal rank.
M : sage_matrix<sage_ZZ>
A square matrix with integral coefficients
of rank "d_lst[0].rank()"
Returns
-------
A labeled "sage_Graph()" where the elements
of "d_lst" are the vertices.
A pair of non-orthogonal vertices are connected
by and edge labeled with their
non-zero intersection product.
Two vertices which are related
via M are connected with an edge labeled 1000.
Labeled self-loops are also included.
'''
NSTools.p( 'd_lst =', len( d_lst ), d_lst, ', M =', list( M ) )
G = sage_Graph()
G.add_vertices( range( len( d_lst ) ) )
for i in range( len( d_lst ) ):
for j in range( len( d_lst ) ):
if d_lst[i] * d_lst[j] != 0:
G.add_edge( i, j, d_lst[i] * d_lst[j] )
for i in range( len( d_lst ) ):
j = d_lst.index( d_lst[i].mat_mul( M ) )
G.add_edge( i, j, 1000 )
return G
def get_dynkin_type( d_lst ):
'''
Parameters
----------
d_lst : list<Div>
A list of lists of "Div" objects "d" of
the same rank, such that
d*d=-2 and d*(-3h+e1+...+er)=0
where
r=rank-1 and rank in [3,...,9].
We assume that "is_root_basis(d_lst)==True":
linear independent, self intersection number -2
and pairwise product either 0 or 1.
Returns
-------
string
Returns a string denoting the Dynkin type of a
root system with basis "d_lst".
Returns 'A0' if "d_lst==[]".
Note
----
For example:
[<1145>, <1123>, <23>, <45>, <56>, <78>] --> '3A1+A3'
where <1145> is shorthand for "Div.new('1145')".
Raises
------
ValueError
If the Dynkin type of d_lst cannot be recognized.
'''
if d_lst == []: return 'A0'
# check whether values are cached
#
construct_dynkin_types = True
max_r = d_lst[0].rank() - 1
key = 'get_dynkin_type_' + str( max_r )
for r in range( max_r, 8 + 1 ):
if 'get_dynkin_type_' + str( r ) in NSTools.get_tool_dct():
key = 'get_dynkin_type_' + str( r )
construct_dynkin_types = False
# construct list of dynkin types if values are not cached
#
if construct_dynkin_types:
NSTools.p( 'Constructing list of Dynkin types... max_r =', max_r )
ade_lst = []
for comb_lst in sage_Combinations( max_r * ['A', 'D', 'E'], max_r ):
for perm_lst in sage_Permutations( comb_lst ):
ade_lst += [perm_lst]
#
# "ade_lst" contains all combinations of 'A', 'D', 'E'
# and looks as follows:
#
# ade_lst[0] = ['A', 'A', 'A', 'A', 'A', 'A', 'A', 'A']
# ade_lst[1] = ['A', 'A', 'A', 'A', 'A', 'A', 'A', 'D']
# ade_lst[2] = ['A', 'A', 'A', 'A', 'A', 'A', 'D', 'A']
# ...
# ade_lst[?] = ['A', 'D', 'A', 'D', 'A', 'D', 'E', 'A']
# ...
# ade_lst[-1]= ['E', 'E', 'E', 'E', 'E', 'E', 'E', 'E']
#
type_lst = []
ts_lst = []
for ade in ade_lst:
for r in range( 1, max_r + 1 ):
for p_lst in sage_Partitions( r + max_r, length = max_r ):
# obtain type list
t_lst = [( ade[i], p_lst[i] - 1 ) for i in range( max_r ) if p_lst[i] != 1]
t_lst.sort()
# obtain Root system
# or continue if invalid Cartan/Dynkin type
if ( 'D', 2 ) in t_lst or ( 'D', 3 ) in t_lst:
continue
try:
rs = sage_RootSystem( t_lst )
except ValueError as err:
continue # not a valid Cartan type
# obtain graph G
mat = list( -1 * rs.cartan_matrix() )
G = sage_Graph()
G.add_vertices( range( len( mat ) ) );
for i in range( len( mat ) ):
for j in range( len( mat[0] ) ):
if mat[i][j] == 1:
G.add_edge( i, j )
# obtain string for type
# Example: [(A,1),(A,1),(A,1),(A,3)] ---> '3A1+A3'
tmp_lst = [t for t in t_lst]
ts = ''
while len( tmp_lst ) > 0:
t = tmp_lst[0]
c = tmp_lst.count( t )
while t in tmp_lst:
tmp_lst.remove( t )
if ts != '':
ts += '+'
if c > 1:
ts += str( c )
ts += t[0] + str( t[1] )
# add to type_lst if new
if ts not in ts_lst:
type_lst += [( G, ts, t_lst )]
ts_lst += [ts]
NSTools.p( 'added to list: ', ts, '\t\t...please wait...' )
NSTools.p( 'Finished constructing list of Dynkin types.' )
# cache the constructed "type_lst"
NSTools.get_tool_dct()[key] = type_lst
NSTools.save_tool_dct()
# end if
else:
type_lst = NSTools.get_tool_dct()[key]
G1 = get_graph( d_lst )
# loop through all types and check equivalence
for ( G2, ts, t_lst ) in type_lst:
if G1.is_isomorphic( G2 ):
return ts
raise ValueError( 'Could not recognize Dynkin type: ', d_lst )
def convert_type( type ):
'''
Converts a Dynkin type string to a sorted list of
irreducible Dynkin types.
For example if type is '2A1+D4', then the output is
['A1','A1','D4']. If the type is '2A1+A2+A3',
then the output is ['A1','A1','A2','A3'].
We exclude elements that are equal to 'A0'.
Parameters
----------
type: string
A string representing a Dynkin type.
We assume that an irreducible rootsystem
occurs with multiplicity at most 9.
For example '10A1' is not allowed, but '9A1'
is allowed.
Returns
-------
list<string>
A list of string representing the Dynkin type
of an irreducible root system.
'''
t_lst = type.split( '+' )
out_lst = []
for t in t_lst:
if t[0] not in ['A', 'D', 'E']:
mult, subtype = int( t[0] ), t[1:]
else:
mult, subtype = 1, t
out_lst += mult * [ subtype ]
out_lst = [out for out in out_lst if out != 'A0']
return sorted( out_lst )
def get_root_bases_orbit( d_lst, positive = True ):
'''
Computes the orbit of a root base under the Weyl group.
Parameters
----------
d_lst : list<Div>
A list of lists of "Div" objects "d" of the same rank or the empty list.
positive : bool
Returns
-------
list<list<Div>>
A list of distinct lists of "Div" objects "d" of the same rank.
such that d*d=-2 and d*(-3h+e1+...+er)=0 where r=rank-1.
If "d_lst" is the empty list, then "[]" is returned.
Otherwise we return a list of root bases such that each root basis
is obtained as follows from a root "s" such that s*s=-2
and s*(-3h+e1+...+er)=0:
[ d + (d*s)d for d in d_lst ]
We do this for all possible roots in [s1,s2,s3,...]:
[ [ d + (d*s1)d for d in d_lst ], [ d + (d*s2)d for d in d_lst ], ... ]
Mathematically, this means that we consider the Weyl group
of the root system with Dynkin type determined by the rank of elements
in "d_lst". The Dynkin type is either
A1, A1+A2, A4, D5, E6, E7 or E8.
We return the orbit of the elements in "d_lst" under
the action of the Weyl group.
If "positive==True" then the roots in the basis are all positive
and thus of the form
<ij>, <1ijk>, <2ij>, <30i>
with i<j<k.
For example '15' and '1124' but not '-15' or '-1124'.
See "Div.get_label()" for the notation.
'''
if d_lst == []:
return [[]]
rank = d_lst[0].rank()
# in cache?
key = 'get_root_bases_orbit_' + str( d_lst ) + '_' + str( rank )
if key in NSTools.get_tool_dct():
return NSTools.get_tool_dct()[key]
# obtain list of all positive (-2)-classes
m2_lst = get_divs( get_ak( rank ), 0, -2, True )
# m2_lst += [ m2.int_mul( -1 ) for m2 in m2_lst]
NSTools.p( 'd_lst =', len( d_lst ), d_lst, ', m2_lst =', len( m2_lst ), m2_lst )
# data for ETA computation
counter = 0
total = len( m2_lst )
ival = 5000
d_lst.sort()
d_lst_lst = [d_lst]
for cd_lst in d_lst_lst:
total = len( m2_lst ) * len( d_lst_lst )
for m2 in m2_lst:
# ETA
if counter % ival == 0:
start = time.time()
counter += 1
if counter % ival == 0:
passed_time = time.time() - start
NSTools.p( 'ETA in minutes =', passed_time * ( total - counter ) / ( ival * 60 ), ', len(d_lst_lst) =', len( d_lst_lst ), ', total =', total )
#
# The action of roots on a root base is by reflection:
# cd - 2(cd*m2/m2*m2)m2
# Notice that m2*m2==-2.
#
od_lst = [ cd + m2.int_mul( cd * m2 ) for cd in cd_lst]
# print( 'm2 =', m2, ', od_lst =', od_lst, ', cd_lst =', cd_lst, ', d_lst_lst =', d_lst_lst, ' positive =', positive )
od_lst.sort()
if od_lst not in d_lst_lst:
d_lst_lst += [od_lst]
# select positive roots if positive==True
pd_lst_lst = []
for d_lst in d_lst_lst:
if positive and '-' in [ d.get_label( True )[0] for d in d_lst ]:
continue # continue with for loop since a negative root in basis
pd_lst_lst += [d_lst]
# cache output
NSTools.get_tool_dct()[key] = pd_lst_lst
NSTools.save_tool_dct()
NSTools.p( '#orbit(' + str( d_lst ) + ') =', len( pd_lst_lst ) )
return pd_lst_lst
| 13,321 | 30.56872 | 158 | py |
ns_lattice | ns_lattice-master/ns_lattice/src/ns_lattice/dp_involutions.py | '''
Created on Aug 11, 2016
@author: Niels Lubbes
Classification of unimodular involutions of Neron-Severi lattice
of weak del Pezzo surfaces.
'''
from ns_lattice.sage_interface import sage_ZZ
from ns_lattice.sage_interface import sage_QQ
from ns_lattice.sage_interface import sage_matrix
from ns_lattice.sage_interface import sage_identity_matrix
from ns_lattice.sage_interface import sage_diagonal_matrix
from ns_lattice.class_ns_tools import NSTools
from ns_lattice.class_div import Div
from ns_lattice.div_in_lattice import get_ak
def complete_basis( d_lst ):
'''
Parameters
----------
d_lst : list<Div>
A list of "Div" objects.
Returns
-------
sage_matrix<sage_QQ>
Returns a square matrix over QQ of full rank. The first columns
correspond to the elements in d_lst (where d_lst is sorted).
The appended columns are orthogonal to the first "len(d_lst)" columns.
Examples
--------
We explain with 3 examples where the dotted (:) vertical lines
denote which columns are appended.
| 0 0 : 1 0 0 0 | | 0 0 0 : 1 0 0 | | 0 0 0 1 : -3 0 |
| 0 0 : 0 -1 0 0 | | 0 0 0 : 0 -1 0 | | 1 0 0 -1 : 1 0 |
| 0 0 : 0 0 -1 0 | | 1 0 0 : 0 0 -1 | |-1 1 0 -1 : 1 0 |
| 1 0 : 0 0 0 -1 | |-1 1 0 : 0 0 -1 | | 0 -1 0 -1 : 1 0 |
|-1 1 : 0 0 0 -1 | | 0 -1 1 : 0 0 -1 | | 0 0 1 0 : 0 1 |
| 0 -1 : 0 0 0 -1 | | 0 0 -1 : 0 0 -1 | | 0 0 -1 0 : 0 1 |
'''
# sort
d_lst = [ d for d in d_lst]
d_lst.sort()
# extend with orthogonal vectors
row_lst = [ d.e_lst for d in d_lst]
ext_lst = []
for v_lst in sage_matrix( sage_ZZ, row_lst ).right_kernel().basis():
ext_lst += [ [-v_lst[0]] + list( v_lst[1:] ) ] # accounts for signature (1,rank-1)
mat = sage_matrix( sage_QQ, row_lst + ext_lst ).transpose()
# verify output
if mat.rank() < d_lst[0].rank():
raise Error( 'Matrix expected to have full rank: ', d_lst, '\n' + str( mat ) )
de_lst = [ Div( ext ) for ext in ext_lst ]
for de in de_lst:
for d in d_lst:
if d * de != 0:
raise Error( 'Extended columns are expected to be orthogonal: ', de, d, de_lst, d_lst, list( mat ) )
return mat
def is_integral_involution( M ):
'''
Parameters
----------
M : sage_matrix
A matrix M.
Returns
-------
bool
Returns True if the matrix is an involution,
preserves inner product with signature (1,r)
and has integral coefficients.
'''
nrows, ncols = M.dimensions()
# check whether involution
if M * M != sage_identity_matrix( nrows ):
return False
# check whether inner product is preserved
S = sage_diagonal_matrix( [1] + ( ncols - 1 ) * [-1] )
if M.transpose() * S * M != S:
return False
# check whether coefficients are integral
for r in range( nrows ):
for c in range( ncols ):
if M[r][c] not in sage_ZZ:
return False
# check whether canonical class is preserved
ak = get_ak( nrows )
if ak.mat_mul( M ) != ak:
return False
return True
def basis_to_involution( d_lst, rank ):
'''
Parameters
----------
d_lst : list<Div>
A list of "Div" objects of rank "rank".
rank : int
An integer in [3,...,9].
Returns
-------
sage_MATRIX<sage_QQ>
Returns matrix over QQ that correspond to an involution of
ZZ<h,e1,...,er>
here r=rank-1. The first columns
correspond to the elements in d_lst (where d_lst is sorted).
The appended columns are orthogonal to the first "len(d_lst)" columns.
'''
if d_lst == []:
return sage_identity_matrix( sage_QQ, rank )
l = len( d_lst )
V = complete_basis( d_lst )
D = sage_diagonal_matrix( l * [-1] + ( rank - l ) * [1] )
M = V * D * V.inverse() # MV=VD
return M
| 4,093 | 28.035461 | 116 | py |
ns_lattice | ns_lattice-master/ns_lattice/src/ns_lattice/__main__.py | '''
Use of this source code is governed by a MIT-style license that can be found in the LICENSE file.
Created on Aug 11, 2016
@author: Niels Lubbes
'''
import sys
import os
from ns_lattice.sage_interface import sage_matrix
from ns_lattice.sage_interface import sage_ZZ
from ns_lattice.sage_interface import sage_identity_matrix
from ns_lattice.sage_interface import sage_Subsets
from ns_lattice.sage_interface import sage_Permutations
from ns_lattice.sage_interface import sage_Combinations
from ns_lattice.sage_interface import sage_Graph
from ns_lattice.sage_interface import sage_gcd
from ns_lattice.sage_interface import sage_factor
from ns_lattice.class_ns_tools import NSTools
from ns_lattice.class_div import Div
from ns_lattice.div_in_lattice import get_divs
from ns_lattice.div_in_lattice import get_ak
from ns_lattice.class_dp_lattice import DPLattice
from ns_lattice.ns_basis import get_bases_lst
from linear_series.class_poly_ring import PolyRing
from linear_series.class_base_points import BasePointTree
from linear_series.class_linear_series import LinearSeries
def usecase__get_cls( max_rank ):
'''
Classification of root bases in root system of rank at most "max_rank".
See "DPLattice.get_cls_root_bases()".
Parameters
----------
max_rank : int
Maximal rank.
'''
row_format = '{:>6}{:>5}{:>8}{:>16}{:>5}{:>5}{:>5}{:>5}{:>6}{:>7}{:>70}{:>135}{:>340}'
rownr = 0
for rank in range( 3, max_rank + 1 ):
dpl_lst = DPLattice.get_cls( rank )
row_lst = [['rownr', 'rank', 'Mtype', 'type',
'#-2', '#-1', '#fam', '#-2R', '#-1R', '#famR',
'Md_lst', 'd_lst', 'M']]
for dpl in sorted( dpl_lst ):
row_lst += [
[rownr, rank, dpl.get_marked_Mtype(), dpl.get_real_type() ]
+ list( dpl.get_numbers() )
+ [str( dpl.Md_lst )]
+ [str( dpl.d_lst )]
+ [str( list( dpl.M ) )]
]
rownr += 1
s = ''
for row in row_lst:
s += row_format.format( *row ) + '\n'
NSTools.p( 'Classification of root bases:\n' + s )
NSTools.p( 'rank =', rank, ', len =', len( dpl_lst ) )
NSTools.p( 80 * '#' )
for rank in range( 3, max_rank + 1 ):
NSTools.p( 'rank =', rank, ', len =', len( DPLattice.get_cls( rank ) ) )
NSTools.p( 80 * '#' )
def usecase__get_classes_dp1( rank ):
'''
Computes classes in the Neron-Severi lattice with
predefined self-intersection and intersection with the
canonical class.
Parameters
----------
rank : int
'''
# canonical class
d = get_ak( rank )
# basis change
a_lst = [ 'e0-e1', 'e0-e2']
a_lst = [ Div.new( a, rank ) for a in a_lst ]
m1_lst = get_divs( d, 1, -1, True )
print( d )
M = sage_identity_matrix( rank )
d_lst = []
d_tup_lst = get_bases_lst( a_lst, M, d_lst, m1_lst, False )
B = sage_matrix( sage_ZZ, [ dt.e_lst for dt in d_tup_lst[0] ] )
# list the classes
for ( dc, cc ) in [( 2, 0 ), ( 1, -1 ), ( 0, -2 ), ( 2, 2 ), ( 2, 4 ), ( 3, 1 )]:
NSTools.p( '(dc, cc) =', ( dc, cc ) )
c_lst = get_divs( d, dc, cc, False )
for c in c_lst:
NSTools.p( '\t\t', c, '\t\t', c.get_basis_change( B ) )
def usecase__graphs( max_rank ):
'''
Lists attributes of simple family graphs.
Parameters
----------
max_rank : int
Maximal rank of DPLattice objects that are considered.
'''
row_format = '{:<6}{:<5}{:<8}{:<16}{:<7}{:<10}{:<95}{:<30}{:<15}{:<15}{:<15}{:<15}'
already_in_cache = True
dpl_lst = []
rownr = 0
row_lst = [['rownr', 'deg', 'Mtype', 'type', '#vert', '#edges', 'degrees', 'labels', 'complete', 'connected', 'vert-xfer', 'edge-xfer']]
for rank in range( 3, max_rank + 1 ):
NSTools.p( 'rank =', rank )
for dpl in DPLattice.get_cls( rank ):
already_in_cache = already_in_cache and ( dpl.SG != None )
dpl_lst += [dpl]
SG, SG_data = dpl.get_SG()
row_lst += [ [rownr, 10 - rank, dpl.get_marked_Mtype(), dpl.get_real_type() ] + SG_data ]
rownr += 1
if rank == 9 and ( rownr <= 390 or rownr % 100 == 0 ):
NSTools.p( '\t\trownr =', rownr )
s = ''
for row in row_lst:
s += row_format.format( *row ) + '\n'
NSTools.p( 'Classification of simple family graphs:\n' + s )
if not already_in_cache:
NSTools.p( 'Saving data for simple family graphs...' )
NSTools.save_tool_dct()
# example for how to plot a simple family graph
#
NSTools.p( 'Plotting a simple family graph...' )
SG, SG_data = DPLattice.get_cls( 6 )[0].get_SG()
P = SG.graphplot( vertex_size = 1,
vertex_labels = True,
edge_labels = True,
color_by_label = False,
layout = 'circular' ).plot()
P.save( os.environ['OUTPUT_PATH'] + 'graph.png' )
NSTools.p( '#components =', SG.connected_components_number() )
def usecase__analyze_graphs( max_rank ):
'''
We analyze the graphs of DPLattice objects in the output
of DPLattice.get_cls().
Parameters
----------
max_rank : int
Maximal rank of DPLattice objects that are considered.
'''
# Examine which of the graphs associated to DPLattices
# are isomorphic to one of the constructed graphs.
#
NSTools.p( '\t Compare contructed graphs with classified graphs...' )
rownr = -1
max_verts = 0
for rank in range( 3, max_rank + 1 ):
NSTools.p( '\t ---' )
for dpl in DPLattice.get_cls( rank ):
rownr += 1
# retrieve the graph SG for analysis
SG, SG_data = dpl.get_SG()
if SG.num_verts() <= 3:
continue
# check if each edge label is in [2,4]
if [e for e in SG_data[3] if e not in [2, 4]] != []:
continue
# initialize string
s = ''
s += str( rownr ) + ' ' + 'rank=' + str( rank ) + ' '
# Initialize G_lst which is a list of tuples (G,G_str)
# where G is a constructed graph and G_str is its string identifier.
# The identifiers are according Theorem 1 in arXiv:1807.05881v2.
#
G_lst = []
for nv1 in range( 1, SG.num_verts() + 1 ):
for nv2 in range( 1, SG.num_verts() + 1 ):
# determine list c_lst of 2-element subsets of [1,...,m]
# so that m is minimal under the condition that len(c_lst)>=nv1
c_lst = []
for i in range( 2 * nv1 ):
c_lst = list( sage_Combinations( i, 2 ) )
if len( c_lst ) >= nv1:
break
# construct graphs
#
Gd = sage_Graph()
Gd.add_vertices( range( nv1 ) )
G_lst += [( Gd, 'Gd:' + str( nv1 ) )]
Ge = sage_Graph()
Ge.add_vertices( range( nv1 ) )
for i in Ge.vertices():
for j in Ge.vertices():
Ge.add_edge( i, j, 2 )
G_lst += [( Ge, 'Ge:' + str( nv1 ) )]
Gf = sage_Graph()
Gf.add_vertices( range( len( c_lst ) ) )
for i in Gf.vertices():
for j in Gf.vertices():
q = len( [ c for c in c_lst[i] if c in c_lst[j] ] )
Gf.add_edge( i, j, 4 - 2 * q )
G_lst += [( Gf, 'Gf:' + str( Gf.num_verts() ) )]
Gg = sage_Graph()
Gg.add_vertices( range( len( c_lst ) ) )
for i in Gg.vertices():
for j in Gg.vertices():
q = len( [ c for c in c_lst[i] if c in c_lst[j] ] )
if q > 0:
Gg.add_edge( i, j, 2 )
G_lst += [( Gg, 'Gg:' + str( Gg.num_verts() ) )]
# construct combined graphs
#
if nv1 + nv2 > SG.num_verts():
continue
Gd2 = sage_Graph()
Gd2.add_vertices( range( nv2 ) )
Ge2 = sage_Graph()
Ge2.add_vertices( range( nv2 ) )
for i in Ge2.vertices():
for j in Ge2.vertices():
Ge2.add_edge( i, j, 2 )
if nv1 + nv2 == SG.num_verts():
if ( Gd.num_verts(), Ge2.num_verts() ) != ( 1, 1 ):
Gde = sage_Graph()
Gde.add_vertices( Ge2.vertices() )
Gde.add_edges( Ge2.edges() )
for i in range( Ge2.num_verts() - 1, -1, -1 ):
Gde.relabel( {i:i + Gd.num_verts()} )
Gde.add_vertices( Gd.vertices() )
Gde.add_edges( Gd.edges() )
for i in range( Gd.num_verts() ):
for j in range( Gd.num_verts(), Gde.num_verts() ):
Gde.add_edge( i, j, 2 )
G_lst += [( Gde, 'Gde:' + str( Gd.num_verts() ) + '+' + str( Ge2.num_verts() ) )]
if len( c_lst ) + nv2 == SG.num_verts():
Gfd = sage_Graph()
Gfd.add_vertices( Gd2.vertices() )
Gfd.add_edges( Gd2.edges() )
for i in range( Gd2.num_verts() - 1, -1, -1 ):
Gfd.relabel( {i:i + Gf.num_verts()} )
Gfd.add_vertices( Gf.vertices() )
Gfd.add_edges( Gf.edges() )
for i in range( Gf.num_verts() ):
for j in range( Gf.num_verts(), Gfd.num_verts() ):
Gfd.add_edge( i, j, 2 )
G_lst += [( Gfd, 'Gfd:' + str( Gf.num_verts() ) + '+' + str( Gd2.num_verts() ) )]
Gge = sage_Graph()
Gge.add_vertices( Ge2.vertices() )
Gge.add_edges( Ge2.edges() )
for i in range( Ge2.num_verts() - 1, -1, -1 ):
Gge.relabel( {i:i + Gg.num_verts()} )
Gge.add_vertices( Gg.vertices() )
Gge.add_edges( Gg.edges() )
for i in range( Gg.num_verts() ):
for j in range( Gg.num_verts(), Gge.num_verts() ):
Gge.add_edge( i, j, 2 )
G_lst += [( Gge, 'Gge:' + str( Gg.num_verts() ) + '+' + str( Ge2.num_verts() ) )]
# check for each of the constructed graphs whether
# it is isomorphic to dpl.get_SG()
#
for ( G, G_str ) in G_lst:
if SG.is_isomorphic( G, edge_labels = True ):
max_verts = max( max_verts, G.num_verts() )
if G_str not in s:
s += G_str + ' '
if ':' in s:
NSTools.p( '\t', s )
NSTools.p( 'max_verts =', max_verts )
def usecase__construct_surfaces():
'''
We construct a surface parametrization and its Neron-Severi lattice.
Requires the linear_series package.
'''
# Blowup of projective plane in 3 colinear points
# and 2 infinitely near points. The image of the
# map associated to the linear series is a quartic
# del Pezzo surface with 5 families of conics.
# Moreover the surface contains 8 straight lines.
#
ring = PolyRing( 'x,y,z', True )
p1 = ( -1, 0 )
p2 = ( 0, 0 )
p3 = ( 1, 0 )
p4 = ( 0, 1 )
p5 = ( 2, 0 )
bp_tree = BasePointTree()
bp_tree.add( 'z', p1, 1 )
bp_tree.add( 'z', p2, 1 )
bp_tree.add( 'z', p3, 1 )
bp = bp_tree.add( 'z', p4, 1 )
bp.add( 't', p5, 1 )
ls = LinearSeries.get( [3], bp_tree )
NSTools.p( ls.get_bp_tree() )
NSTools.p( 'implicit equation =\n\t', ls.get_implicit_image() )
# construct NS-lattice where p1=e1,...,p5=e5
rank = 6
d_lst = [ 'e0-e1-e2-e3', 'e4-e5' ] # basepoint p5 is infinitely near to p4
Md_lst = []
M = sage_identity_matrix( 6 )
d_lst = [ Div.new( d, rank ) for d in d_lst ]
Md_lst = [ Div.new( Md, rank ) for Md in Md_lst ]
M = sage_matrix( M )
dpl = DPLattice( d_lst, Md_lst, M )
NSTools.p( 'Neron-Severi lattice =', dpl )
# search representative for the equivalence class in classification
assert dpl in DPLattice.get_cls( rank )
def usecase__roman_circles():
'''
We compute circles on a Roman surface.
'''
# parametrization of the Roman surface
#
p_lst = '[ z^2+x^2+y^2, -z*x, -x*y, z*y ]'
# we consider the stereographic projection from
# S^3 = { x in P^4 | -x0^2+x1^2+x2^2+x3^2+x4^2 = 0 }
# where the center of projection is (1:0:0:0:1):
# (x0:x1:x2:x3:x4) |---> (x0-x4:x1:x2:x3)
# inverse stereographic projection into 3-sphere
#
s_lst = '[ y0^2+y1^2+y2^2+y3^2, 2*y0*y1, 2*y0*y2, 2*y0*y3, -y0^2+y1^2+y2^2+y3^2 ]'
# compose p_lst with s_lst
#
ring = PolyRing( 'x,y,z,y0,y1,y2,y3' )
x, y, z, y0, y1, y2, y3 = ring.gens()
p_lst = ring.coerce( p_lst )
s_lst = ring.coerce( s_lst )
dct = { y0:p_lst[0], y1:p_lst[1], y2:p_lst[2], y3:p_lst[3] }
sp_lst = [ s.subs( dct ) for s in s_lst ]
NSTools.p( 'sp_lst =' )
for sp in sp_lst: NSTools.p( '\t\t', sage_factor( sp ) )
NSTools.p( 'gcd(sp_lst) =', sage_gcd( sp_lst ) )
# determine base points
#
ring = PolyRing( 'x,y,z', True )
sp_lst = ring.coerce( sp_lst )
ls = LinearSeries( sp_lst, ring )
NSTools.p( ls.get_bp_tree() )
# We expect that the basepoints come from the intersection
# of the Roman surface with the absolute conic:
# A = { (y0:y1:y2:y3) in P^3 | y0=y1^2+y2^2+y3^2 = 0 }
#
# Circles are the image via p_lst of lines that pass through
# complex conjugate points.
#
ring = PolyRing( 'x,y,z', False ) # reinitialize ring with updated numberfield
a0, a1, a2, a3 = ring.root_gens()
# a0=(1-I*sqrt(3)) with conjugate a0-1 and minimal polynomial t^2-t+1
# we compute candidate classes of circles
#
h = Div.new( '4e0-e1-e2-e3-e4-e5-e6-e7-e8' )
div_lst = get_divs( h, 2, -2, False ) + get_divs( h, 2, -1, False )
NSTools.p( 'Classes of circles up to permutation:' )
for c in div_lst:
NSTools.p( '\t\t', c )
# We recover the preimages of circles in the Roman surface
# under the map p_lst, by constructing for each candidate
# class the corresponding linear series.
# 2e0-e1-e2-e3-e4-e5-e6-e7-e8
b = [( a0 - 1, -a0 ), ( -a0, a0 - 1 )]
b += [( -a0 + 1, a0 ), ( a0, -a0 + 1 )]
b += [ ( a0 - 1, a0 ), ( -a0, -a0 + 1 )]
b += [( -a0 + 1, -a0 ), ( a0, a0 - 1 )]
bp_tree = BasePointTree()
for i in range( 6 ):
bp_tree.add( 'z', b[i], 1 )
NSTools.p( 'basepoints =', b )
NSTools.p( LinearSeries.get( [2], bp_tree ) )
# e0-e1-e2
b = [( a0 - 1, -a0 ), ( -a0, a0 - 1 )]
bp_tree = BasePointTree()
bp = bp_tree.add( 'z', b[0], 1 )
bp = bp_tree.add( 'z', b[1] , 1 )
NSTools.p( 'basepoints =', b )
NSTools.p( LinearSeries.get( [1], bp_tree ) )
# e0-e3-e4
b = [( -a0 + 1, a0 ), ( a0, -a0 + 1 )]
bp_tree = BasePointTree()
bp = bp_tree.add( 'z', b[0], 1 )
bp = bp_tree.add( 'z', b[1] , 1 )
NSTools.p( 'basepoints =', b )
NSTools.p( LinearSeries.get( [1], bp_tree ) )
# e0-e5-e6
b = [ ( a0 - 1, a0 ), ( -a0, -a0 + 1 )]
bp_tree = BasePointTree()
bp = bp_tree.add( 'z', b[0], 1 )
bp = bp_tree.add( 'z', b[1] , 1 )
NSTools.p( 'basepoints =', b )
NSTools.p( LinearSeries.get( [1], bp_tree ) )
# e0-e7-e8
b = [( -a0 + 1, -a0 ), ( a0, a0 - 1 )]
bp_tree = BasePointTree()
bp = bp_tree.add( 'z', b[0], 1 )
bp = bp_tree.add( 'z', b[1] , 1 )
NSTools.p( 'basepoints =', b )
NSTools.p( LinearSeries.get( [1], bp_tree ) )
return
if __name__ == '__main__':
# Debug output settings
#
mod_lst = []
mod_lst += ['__main__.py']
# mod_lst += ['class_dp_lattice.py']
# mod_lst += ['class_eta.py']
NSTools.filter( mod_lst ) # output only from specified modules
NSTools.filter( None ) # print all verbose output, comment to disable.
# NSTools.get_tool_dct().clear() # uncomment to remove all cache!
if 'OUTPUT_PATH' not in os.environ:
os.environ['OUTPUT_PATH'] = './'
NSTools.start_timer()
#
# Should be between 3 and 9.
# computes classifications up to rank "max_rank".
#
max_rank = 9
#########################################
# #
# (Un)comment one or more use cases #
# #
#########################################
usecase__get_cls( max_rank )
usecase__get_classes_dp1( max_rank )
usecase__graphs( max_rank )
usecase__analyze_graphs( max_rank )
usecase__construct_surfaces()
# usecase__roman_circles() # takes about 3 minutes
#########################################
# #
# End of list of use case methods. #
# #
#########################################
NSTools.end_timer()
NSTools.p( 'The End' )
| 17,992 | 33.601923 | 140 | py |
ns_lattice | ns_lattice-master/ns_lattice/src/ns_lattice/convert_to_tex.py | '''
Use of this source code is governed by a MIT-style license that can be found in the LICENSE file.
Created on Dec 14, 2017
@author: Niels Lubbes
'''
from ns_lattice.class_div import Div
from ns_lattice.class_dp_lattice import DPLattice
from ns_lattice.class_ns_tools import NSTools
from ns_lattice.sage_interface import sage_identity_matrix
def cls_to_tex():
'''
Create tex code for the output of DPLattice.get_cls()
Returns
-------
string
A string representing a table of tables in Tex format.
The table represent the classification of Neron-Severi
lattice of weak del Pezzo surfaces.
'''
# create a list of occuring divisors
#
div_lst = []
for rank in range( 3, 9 + 1 ):
for dpl in DPLattice.get_cls( rank ):
# construct list for involution (e0,...,er)|-->(i0,...,ir)
i_lst = [Div( row ).mat_mul( dpl.M ) for row in sage_identity_matrix( rank ) ]
# add each divisor that occurs to div_lst
for elt in i_lst + dpl.d_lst:
div_lst += [ Div( elt.e_lst + ( 9 - len( elt.e_lst ) ) * [0] ) ]
div_lst = list( set( div_lst ) )
div_lst.sort()
e0 = Div( [1, 0, 0, 0, 0, 0, 0, 0, 0 ] )
div_lst.remove( e0 )
div_lst = [e0] + div_lst
# create dictionary characters for elements in div_lst
#
abc = 'abcdefghijklmnopqrstuvwxyz'
ch_lst = []
ch_lst += [ '\\frac{' + ch1 + '}{' + ch2 + '}\\!' for ch1 in '0123456789' for ch2 in '0123456789' ]
ch_lst += [ '\\frac{' + ch1 + '}{' + ch2 + '}\\!' for ch1 in '0123456789' for ch2 in 'abcdef' ]
NSTools.p( '(len(ch_lst), len(div_lst)) =', ( len( ch_lst ), len( div_lst ) ) )
assert len( ch_lst ) >= len( div_lst )
# create legend and dictionary
#
lgd_lst = []
sym_dct = {}
for i in range( len( div_lst ) ):
sym_dct.update( {str( div_lst[i] ):ch_lst[i]} )
lgd_lst += [['$' + ch_lst[i] + '$ :', ( '$' + str( div_lst[i] ) + '$' ).replace( 'e', 'e_' ) ]]
while len( lgd_lst ) % 3 != 0:
lgd_lst += [['', '']]
nnrows = len( lgd_lst ) / 3
# create tex for legend
#
tex_lgd = ''
tex_lgd += '\\begin{table}\n'
tex_lgd += '\\setstretch{1.4}\n'
tex_lgd += '\\tiny\n'
tex_lgd += '\\caption{Classification of Neron-Severi lattices of weak del Pezzo surfaces (see THM{nsl})}\n'
tex_lgd += '\\label{tab:nsl}\n'
tex_lgd += 'A dictionary for symbols in the columns $\\sigma_A$ and $B$:\n\\\\\n'
tex_lgd += '\\begin{tabular}{@{}l@{}l@{~~~~}l@{}l@{~~~~}l@{}l@{}}\n'
for idx in range( nnrows ):
c1, c2, c3, c4, c5, c6 = lgd_lst[idx] + lgd_lst[idx + nnrows] + lgd_lst[idx + 2 * nnrows]
tex_lgd += c1 + ' & ' + c2 + ' & ' + c3 + ' & ' + c4 + ' & ' + c5 + ' & ' + c6
tex_lgd += '\\\\\n'
tex_lgd += '\\end{tabular}\n'
tex_lgd += '\\end{table}\n\n'
# number of rows of the big table
#
nrows = 57
# dictionary for replacing string symbols
#
rep_dct = {'A':'A_', 'D':'D_', 'E':'E_', '{':'\\underline{', '[':'\\udot{', ']':'}'}
# create classification table
#
tab_lst = []
# rank 1 and 2
tab9 = [['i ', '$9$', "$A_0 $", '$A_0$', '$0$', '$1$', '']]
tab8 = [['ii ', '$8$', "$A_0 $", '$A_0$', '$0$', '$2$', ''],
['iii', '$8$', "$A_0 $", '$A_0$', '$0$', '$1$', ''],
['iv ', '$8$', "$A_0 $", '$A_0$', '$1$', '$1$', ''],
['v ', '$8$', "$A_0 $", '$A_1$', '$0$', '$1$', '']]
tab_lst += [ tab9, tab8 ]
# rank 3,4,5,6,7,8 and 9
idx = 0
Mtype_lst = ['A1', '4A1'] # for breaking up table for degree 2 case
for rank in range( 3, 9 + 1 ):
tab = []
for dpl in DPLattice.get_cls( rank ):
col1 = '$' + str( idx ) + '$'
col2 = '$' + str( dpl.get_degree() ) + '$'
col3 = '$' + str( dpl.get_marked_Mtype() ) + '$'
for key in rep_dct:
col3 = str( col3 ).replace( key, rep_dct[key] )
col4 = '$' + str( dpl.get_real_type() ) + '$'
for key in rep_dct:
col4 = str( col4 ).replace( key, rep_dct[key] )
col5 = '$' + str( dpl.get_numbers()[4] ) + '$'
col6 = '$' + str( dpl.get_numbers()[5] ) + '$'
i_lst = [ str( Div( rw ).mat_mul( dpl.M ) ) for rw in sage_identity_matrix( rank ) ]
col7 = ''
for i in i_lst:
col7 += sym_dct[i]
if col7 in ['012', '0123', '01234', '012345', '0123456', '01234567', '012345678']:
col7 = ''
col8 = ''
for d in dpl.d_lst:
col8 += sym_dct[str( d )]
# these subroot systems cannot be realized as weak del Pezzo surfaces
if col4 in ['$7\underline{A_1}$', '$8\underline{A_1}$', '$4\underline{A_1}+\underline{D_4}$']:
col1 = '$\\times$'
# break (sometimes) the table for degree 2 according to Mtype
if dpl.get_degree() == 2 and dpl.Mtype in Mtype_lst:
nheaders = len( tab ) / nrows # each header shifts the row number
while len( tab ) % nrows != nrows - 1 - nheaders: # add rows until end of table
tab += [7 * ['']]
Mtype_lst.remove( dpl.Mtype )
# add row
tab += [[col1, col2, col3, col4, col5, col6, '$' + col7 + '||\!' + col8 + '$' ]]
idx += 1
tab_lst += [ tab ]
# reformat table
#
# i d A B E G Ac%Bc
hl = '@{~}l@{~~~}l@{~~~}l@{~~}l@{~~}l@{~~}l@{~~}l@{}'
hrow = ['', 'd', '$D(A)$', '$D(B)$', '$\#E$', '$\#G$', '$\sigma_A||B$']
etab_lst = []
etab = [hrow]
tab_idx = 0
for tab in tab_lst:
for row in tab:
if len( etab ) >= nrows:
etab_lst += [etab]
etab = [hrow]
etab += [row]
if len( etab ) < nrows and tab_idx <= 3:
etab += [7 * [''], 7 * ['']] # add two empty rows to separate tables with different rank
else:
while len( etab ) < nrows:
etab += [7 * ['']] # add empty rows to fill up table
etab_lst += [etab]
etab = [hrow]
tab_idx += 1
NSTools.p( 'etab_lst: ', [len( etab ) for etab in etab_lst] )
# create tex for main classification table
#
tex_tab = ''
tab_idx = 0
for etab in etab_lst:
if tab_idx % 2 == 0:
tex_tab += '\\begin{table}\n'
tex_tab += '\\setstretch{1.6}\n'
tex_tab += '\\centering\\tiny\n'
tex_tab += '\\begin{tabular}{@{}l@{\\hspace{1cm}}l@{}}\n'
elif tab_idx % 2 == 1:
tex_tab += '&\n'
tex_tab += '\\begin{tabular}{' + hl + '}\n'
for row in etab:
col1, col2, col3, col4, col5, col6, col78 = row
tex_tab += col1 + ' & ' + col2 + ' & ' + col3 + ' & ' + col4 + ' & '
tex_tab += col5 + ' & ' + col6 + ' & ' + col78
tex_tab += ' \\\\\n'
if row == hrow:
tex_tab += '\\hline\n'
tex_tab += '\\end{tabular}\n'
if tab_idx % 2 == 1:
tex_tab += '\\end{tabular}\n'
tex_tab += '\\end{table}\n\n'
tab_idx += 1
if tab_idx % 2 == 1:
tex_tab += '&\n'
tex_tab += '\\end{tabular}\n\n'
# creating tex for commands
tex_cmd = ''
tex_cmd += '\\newcommand{\\udot}[1]{\\tikz[baseline=(todotted.base)]{\\node[inner sep=1pt,outer sep=0pt] (todotted) {$#1$};\\draw[densely dotted] (todotted.south west) -- (todotted.south east);}}'
tex_cmd += '\n'
tex_cmd += '\\newcommand{\\udash}[1]{\\tikz[baseline=(todotted.base)]{\\node[inner sep=1pt,outer sep=0pt] (todotted) {$#1$};\\draw[densely dashed] (todotted.south west) -- (todotted.south east);}}'
tex_cmd += '\n\n'
out = tex_cmd + tex_lgd + tex_tab
return out
| 7,948 | 32.682203 | 201 | py |
ns_lattice | ns_lattice-master/ns_lattice/src/ns_lattice/class_div.py | '''
Created on Aug 11, 2016
@author: Niels Lubbes
'''
from ns_lattice.sage_interface import sage_ZZ
from ns_lattice.sage_interface import sage_diagonal_matrix
from ns_lattice.sage_interface import sage_vector
class Div:
'''Element in Neron-Severi lattice.
The class represents a divisor class in the Neron-Severi lattice
with respect to the standard basis:
<e0,e1,e2,...>
Attributes
----------
e_lst : list<sage_ZZ>
List describes a divisor in terms of the standard basis.
int_mat : sage_matrix<sage_ZZ>
A matrix over ZZ of rank "len(e_lst)" represents
the unimodular intersection product for the divisor.
'''
# static variable
#
short_output = True
# static list of intersection matrices
#
int_mat_lst = []
def __init__( self, e_lst = 9 * [0], int_mat = None ):
'''
Return
------
Div
Constructor (called when instantiating object).
If "int_mat==None" then the default
diagonal matrix has signature (+-...-).
This matrix determines the intersection
product of divisors.
'''
self.e_lst = list( e_lst )
#
# equal "self.int_mat" for each instantiated Div object references
# to a unique matrix, so that no new matrix is instantiated for each
# Div object. Maybe this is already ensured by Sage library, but just
# to be on the safe side.
#
if int_mat == None:
int_mat = sage_diagonal_matrix( sage_ZZ, [1] + ( self.rank() - 1 ) * [-1] )
if int_mat not in Div.int_mat_lst:
Div.int_mat_lst += [int_mat]
idx = Div.int_mat_lst.index( int_mat )
self.int_mat = Div.int_mat_lst[idx]
@staticmethod
def new( lbl, rank = 9 ):
'''
Parameters
----------
lbl : string
A string with format as output of "self.get_label()".
rank : int
Integer representing rank of Neron-Severi lattice in which "Div" lives.
Returns
-------
The "Div" corresponding to the label
such that "len(self.e_lst)>=rank".
'''
c = Div( rank * [0] ) # zero divisor class
if 'e' in lbl:
s = lbl
if 'e0' in s:
# cases: 'e0...', '-e0...', '3e0...' or '-2e0...'
if s[0:2] == 'e0':
c.e_lst = [1]
s = s[2:]
elif s[0:3] == '-e0':
c.e_lst = [-1]
s = s[3:]
else: # '3e0...' or '-2e0...'
c.e_lst = [ int( s.split( 'e0' )[0] ) ] # [4] if lbl='4h+3e...'
s = s.split( 'e0' )[1] # for example '+3e2-2e5+6e7+e8'
else:
c.e_lst = [0]
s = lbl
coef_e = ''
idx = 0
last_i = 0 # ei
while idx < len( s ):
if s[idx] != 'e':
coef_e += s[idx]
idx += 1
elif s[idx] == 'e':
coef_i = ''
idx += 1
while idx < len( s ) and s[idx] not in ['+', '-']:
coef_i += s[idx]
idx += 1
i = int( coef_i )
if coef_e == '-': coef_e = '-1'
if coef_e in ['+', '']: coef_e = '1'
c.e_lst += ( i - last_i - 1 ) * [0] + [int( coef_e )]
coef_e = ''
last_i = i
else: # label of (-2)-class
if rank > 9:
raise ValueError( 'For (-2)-classes we expect the rank to be at most 9: ', rank )
# check whether the label is negative
if lbl[0] == '-':
neg = True
lbl = lbl[1:]
else:
neg = False
# '12' ---> e1-e2
if len( lbl ) == 2:
c.e_lst[ int( lbl[0] ) ] = 1
c.e_lst[ int( lbl[1] ) ] = -1
# '1123' ---> e0-e1-e2-e3
elif len( lbl ) == 4 and lbl[0] == '1':
c.e_lst[0] = int( lbl[0] )
c.e_lst[ int( lbl[1] ) ] = -1
c.e_lst[ int( lbl[2] ) ] = -1
c.e_lst[ int( lbl[3] ) ] = -1
# '212' ---> 2e0-e3-e4-...-e8
elif len( lbl ) == 3 and lbl[0] == '2':
c.e_lst = 9 * [-1]
c.e_lst[0] = int( lbl[0] )
c.e_lst[ int( lbl[1] ) ] = 0
c.e_lst[ int( lbl[2] ) ] = 0
if rank != 9 and set( c.e_lst[rank:] ) != set( [0] ):
raise ValueError( 'Rank too low for label: ', rank, lbl )
c.e_lst = c.e_lst[:rank]
# '308' ---> 3e0-e1-e2-...-e7-2e8
elif len( lbl ) == 3 and lbl[0] == '3' and lbl[1] == '0':
c.e_lst = 9 * [-1]
c.e_lst[0] = int( lbl[0] )
c.e_lst[ int( lbl[2] ) ] = -2
else: # unknown label
raise ValueError( 'Label has incorrect format: ', lbl )
# for example '-12'=[0,-1,1,0,0,...]
if neg:
c.e_lst = [ -e for e in c.e_lst ]
# end handling label of (-2)-class
# update rank
c.e_lst = c.e_lst + ( rank - len( c.e_lst ) ) * [0]
return c
def rank( self ):
return len( self.e_lst )
def is_positive( self ):
'''
Returns
-------
bool
Return True iff the first nonzero entry of the self.e_lst
is positive. The zero divisor is also positive.
'''
for e in self.e_lst:
if e != 0:
return e > 0
return True
@staticmethod
def get_min_rank( lbl ):
'''
Parameters
----------
lbl : string
A string with format as output of "self.get_label()".
Returns
-------
int
The minimal rank of the "Div" object with a given label.
Examples
--------
>>> get_min_rank('78')
9
>>> get_min_rank('301')
9
>>> get_min_rank('12')
3
'''
d = Div.new( lbl )
lst = [ e for e in d.e_lst ]
while lst[-1] == 0 and lst != []:
lst.pop()
return len( lst )
def get_basis_change( self, B ):
'''
Parameters
----------
B : sage_matrix
A matrix whose rows correspond to generators of
a new basis. We assume that the intersection matrix
for this basis is the default diagonal matrix with
diagonal (1,-1,...,-1).
Returns
-------
Div
A new "Div" object, which represents the current divisor
with respect to a new basis.
'''
new_int_mat = B * self.int_mat * B.T
new_e_lst = self.mat_mul( ~( B.T ) ).e_lst
return Div( new_e_lst, new_int_mat )
def __get_minus_two_label( self ):
'''
Private helper method for "get_label()"
Parameters
----------
self : Div
self*self==-2 and self.rank<=9.
Returns
-------
string
See output documents for self.get_label()
'''
if self * self != -2 or self.rank() > 9:
raise ValueError( 'Unexpected input for __get_mt_label: ', self.e_lst )
# first non-zero coefficient negative?
neg = [e < 0 for e in self.e_lst if e != 0][0]
# check whether the label should start with minus symbol
if neg:
tmp = [-e for e in self.e_lst]
else:
tmp = self.e_lst
# set of non-zero coefficients for ei.
oset = set( [ e for e in tmp[1:] if e != 0 ] )
# e1-e2 ---> '12'
if tmp[0] == 0 and oset == set( [1, -1] ):
lbl = ''
for i in range( 1, len( tmp ) ):
if tmp[i] != 0:
lbl += str( i )
# e0-e1-e2-e3 ---> '1123'
elif tmp[0] == 1 and oset == set( 3 * [-1] ):
lbl = '1'
for i in range( 1, len( tmp ) ):
if tmp[i] != 0:
lbl += str( i )
# 2e0-e3-e4-...-e8 ---> '212'
elif tmp[0] == 2 and oset == set( 6 * [-1 ] ):
lbl = '2'
for i in range( 1, len( tmp ) ):
if tmp[i] == 0:
lbl += str( i )
# 3e0-e1-e2-...-e7-2e8 ---> '308'
elif tmp[0] == 3 and oset == set( 7 * [-1 ] + [-2] ):
lbl = '30'
for i in range( 1, len( tmp ) ):
if tmp[i] == -2:
lbl += str( i )
if neg:
lbl = '-' + lbl # for example: 12 --> -12
return lbl
def get_abbr_label( self ):
'''
Returns
-------
string
We describe the output label in terms of examples.
> e1 ---> 'e1'
> e1-e2 ---> 'e12'
> 2e0-e1-e2-e4-e5 ---> '2e1245'
> e0-e1 ---> '1e1'
This options only works for special cases.
The cases which are covered are (-1)- and (-2)-classes,
and classes of conical families on weak Del Pezzo surfaces,
with respect to the basis with intersection product
defined by the diagonal matrix with diagonal (1,-1,...,-1).
'''
np1 = len( [e for e in self.e_lst[1:] if e == 1] )
nm1 = len( [e for e in self.e_lst[1:] if e == -1] )
n01 = len( [e for e in self.e_lst[1:] if e > 1 or e < -1] )
if n01 == 0 and self[0] in range( 0, 10 ):
# e1
if self[0] == 0 and np1 == 1 and nm1 == 0:
return 'e' + str( self.e_lst.index( 1 ) )
# e1-e2
if self[0] == 0 and np1 == 1 and nm1 == 1:
return 'e' + str( self.e_lst.index( 1 ) ) + str( self.e_lst.index( -1 ) )
# 2h-e1-e2-e3-e4-e5 or h-e1
if self[0] in range( 0, 10 ) and np1 == 0 and nm1 > 0:
lbl = str( self[0] ) + 'e'
for i in range( 1, len( self.e_lst ) ):
if self[i] != 0:
lbl += str( i )
return lbl
raise ValueError( 'Input is not treated by this function (use get_label() instead):', self.e_lst )
def get_label( self, abbr = False ):
'''
Parameters
----------
abbr : boolean
Returns
-------
string
We describe the output label in terms of examples.
If "abbr==True" and self*self==-2 and self.rank()<=9:
> e1-e2 ---> '12'
> -e1+e2 ---> '-12'
> e0-e1-e2-e3 ---> '1123'
> 2e0-e3-e4-...-e8 ---> '212'
> 3e0-e1-e2-...-e7-2e8 ---> '308'
> -3e0+e1+e2+...+e7+2e8 ---> '-308'
For the remaining cases not treated above:
> 3e0-2e1-13e2-4e3 ---> '3e0-2e1-13e2-4e3'
'''
divK = Div( [-3] + ( self.rank() - 1 ) * [1] )
# treat cases for (-2)-label
#
if abbr and self * self == -2 and self.rank() <= 9 and self * divK == 0:
return self.__get_minus_two_label()
# from this point on we treat the general case
#
lbl = ''
for i in range( 0, len( self.e_lst ) ):
val = self[i]
if val != 0:
if val == 1:
if lbl != '': lbl += '+'
elif val == -1:
lbl += '-'
else:
if val > 1 and lbl != '':
lbl += '+'
lbl += str( val )
lbl += 'e' + str( i )
return lbl
def mat_mul( self, M ):
'''
Parameters
----------
M : sage_matrix
A matrix with self.rank() columns.
Returns
-------
Div
Returns a "Div" object that is a result of
applying the linear transformation corresponding
to "M" to itself.
'''
v = sage_vector( self.e_lst ).column()
return Div( ( M * v ).list() )
def int_mul( self, n ):
'''
Parameters
----------
n : int
Returns
-------
Div
Returns a "Div" object that is a result of multiplying
with the scalar "n".
'''
return self.mat_mul( sage_diagonal_matrix( self.rank() * [n] ) )
# operator overloading for ==
def __eq__( self, other ):
return self.e_lst == other.e_lst
# operator overloading for !=
def __ne__( self, other ):
return not self.__eq__( other )
# operator overloading for <
# Used for sorting lists of "Div"-objects:
# <http://stackoverflow.com/questions/1227121/compare-object-instances-for-equality-by-their-attributes-in-python>
def __lt__( self, other ):
'''
Parameters
----------
other : Div
Returns
-------
bool
Here are some examples to explain
the ordering we use for div classes
e1 < e2
e0-e1-e2 < e0-e1-e3
1123 < 308
1123 < 1124
12 < 1123
12 < 13
12 < 34
'''
if self.rank() != other.rank():
return self.rank() < other.rank()
a = self.e_lst
b = other.e_lst
if sum( a ) == sum( b ) == 1 and set( a ) == set( b ) == {0, 1}:
return b < a # e1 < e2
a = [a[0]] + [ -elt for elt in reversed( a[1:] )]
b = [b[0]] + [ -elt for elt in reversed( b[1:] )]
return a < b # lexicographic order
# operator overloading for *
def __mul__( self, div ):
'''
Parameters
----------
div : Div
Returns
-------
Div
The intersection product of "self" and
"div" wrt. to matrix "self.int_mat".
'''
row_vec = sage_vector( sage_ZZ, self.e_lst ).row()
col_vec = sage_vector( sage_ZZ, div.e_lst ).column()
mat = self.int_mat
v = row_vec * mat * col_vec
return v[0][0]
# operator overload for +
def __add__( self, div ):
v = sage_vector( sage_ZZ, self.e_lst ) + sage_vector( sage_ZZ, div.e_lst )
return Div( list( v ) )
# operator overload for -
def __sub__( self, div ):
v = sage_vector( sage_ZZ, self.e_lst ) - sage_vector( sage_ZZ, div.e_lst )
return Div( list( v ) )
# operator overloading for []
def __getitem__( self, index ):
return self.e_lst[index]
# operator overloading for []
def __setitem__( self, index, item ):
self.e_lst[index] = item
# overloading for str(.): human readable string representation of object
def __str__( self ):
if Div.short_output:
return self.get_label()
else:
return str( self.e_lst )
# overloading "__repr__()" as well, since python call this for Div objects in a list
def __repr__( self ):
return self.__str__()
# so that lists of this object can be used with set()
def __hash__( self ):
return hash( self.__str__() + '__' + str( self.rank() ) )
| 16,148 | 27.683837 | 122 | py |
ns_lattice | ns_lattice-master/ns_lattice/src/ns_lattice/class_dp_lattice.py | '''
Created on Aug 15, 2016
@author: Niels Lubbes
This module is for classifying real structures and singularities
of weak Del Pezzo surfaces of degree between 1 and 7.
'''
import time
from ns_lattice.sage_interface import sage_identity_matrix
from ns_lattice.sage_interface import sage_ZZ
from ns_lattice.sage_interface import sage_QQ
from ns_lattice.sage_interface import sage_Subsets
from ns_lattice.sage_interface import sage_VectorSpace
from ns_lattice.sage_interface import sage_vector
from ns_lattice.sage_interface import sage_Graph
from ns_lattice.div_in_lattice import get_divs
from ns_lattice.div_in_lattice import get_indecomp_divs
from ns_lattice.div_in_lattice import get_ak
from ns_lattice.dp_root_bases import get_graph
from ns_lattice.dp_root_bases import get_ext_graph
from ns_lattice.dp_root_bases import get_dynkin_type
from ns_lattice.dp_root_bases import convert_type
from ns_lattice.dp_root_bases import get_root_bases_orbit
from ns_lattice.dp_root_bases import is_root_basis
from ns_lattice.dp_involutions import basis_to_involution
from ns_lattice.dp_involutions import is_integral_involution
from ns_lattice.class_ns_tools import NSTools
from ns_lattice.class_div import Div
from ns_lattice.class_eta import ETA
class DPLattice:
'''
Represents an equivalence class of the Neron-Severi lattice
of a real weak del Pezzo surface, together with an involution "M"
and a set of effective (-2)-classes "d_lst". The effective (-2)-classes
form the basis of a root system.
( ZZ<e0,e1,...,er>, M, d_lst )
From these objects it is possible to compute the remaining attributes of
this class.
If <e0,e1,...,er> is a basis for the Neron-Severi lattice of the
projective plane P^2 blown up in r points then the the canonical
class k equals
k=-3e0+e1+...+er.
The intersection product is in this case -e0^2=e1^2=...=er^2=-1 with
remaining intersections zero.
Otherwise if <e0,e1,...,er> is a basis for the Neron-Severi lattice of the
P^1xP^1 blown up in r points then the the canonical
class k equals
k=-2*(e0+e1).
The intersection product is in this case -h*e1=e2^2=...=er^2=-1 with
remaining intersections zero.
Attributes
----------
M : sage_matrix<sage_ZZ>
A matrix which correspond to an involution of the lattice
<e0,e1,...,er> with r=rank-1 and 2 <= r <= 8.
Md_lst : list<Div>
A list of "Div" objects that correspond to the eigenvectors
of eigenvalue 1 of M. These "Div" objects form a basis of
a root subsystem.
Mtype : string
A String that denotes the Dynkin type of "Md_lst".
d_lst : list<Div>
A list of "Div" objects d such that d*d==-2 and d*k=0
where k denotes the canonical class. These elements
represent effective (-2)-classes.
type : string
A String that denotes the Dynkin type of "d_lst".
m1_lst : list<Div>
A list of "Div" objects "m" such that
m*m==-1==m*k and m*d>=0 for all d in d_lst,
where k denotes the canonical class.
These elements represent (-1)-classes that cannot be written
as the sum of two effective classes.
In other words, the classes are indecomposable.
fam_lst : list<Div>
A list of "Div" objects "f" such that
f*f==0, f*(-k)==2 and m*d>=0
for all d in d_lst, where k denotes the canonical class.
real_d_lst : list<Div>
A list "Div" objects that represent indecomposable and
real (-2)-classes. Thus these classes are send to itself by M.
Geometrically these classes correspond to real isolated singularities.
real_m1_lst : list<Div>
A list "Div" objects that represent indecomposable and
real (-1)-classes. Thus these classes are send to itself by M.
Geometrically these classes correspond to real lines.
real_fam_lst : list<Div>
A list "Div" objects that represent real classes in "self.fam_lst".
Thus these classes are send to itself by M.
Geometrically these classes correspond to a real families of conics.
or_lst : list<Div>
A list of "Div" objects that represents roots that are orthogonal to
"self.d_lst".
sr_lst : list<Div>
A list of "Div" objects that represents roots that are contained in
the subspace spanned by "self.d_lst".
G : sage_GRAPH
The Cremona invariant for the current lattice.
SG : sage_GRAPH
Simple family graph (see self.get_SG()).
SG_data : [int, int, list<int>, list<int>, bool, bool, bool, bool ]
A list of of data that characterizes the simple family graph (see self.get_SG()).
'''
def __init__( self, d_lst, Md_lst, M ):
'''
Constructor.
Returns
-------
DPLattice
A DPLattice class whose attributes are set according to input:
* DPLattice.M
* DPLattice.Md_lst
* DPLattice.d_lst
The remaining attributes of DPLattice can be computed
from these attributes.
In order for this object to make sense, it is required that the
involution "M" preserves "d_lst" as a set. Geometrically this
means that the involution sends isolated singularities to isolated
singularities.
'''
self.d_lst = d_lst
self.Md_lst = Md_lst
self.M = M
self.m1_lst = None
self.fam_lst = None
self.real_d_lst = None
self.real_m1_lst = None
self.real_fam_lst = None
self.Mtype = None
self.type = None
self.or_lst = None
self.sr_lst = None
self.G = None
self.SG = None
self.SG_data = None
def set_attributes( self, level = 9 ):
'''
Sets attributes of this object, depending
on the input level.
For constructing a classification we instantiate
many DPLattice objects. This method allows us
to minimize the number of attributes that computed
(thus we use lazy evaluation).
Parameter
---------
self: DPLattice
At least self.M, self.Md_lst and self.d_lst
should be initialized.
level : int
A non-negative number.
'''
# M, Md_lst and d_lst are set.
if self.m1_lst == None:
all_m1_lst = get_divs( get_ak( self.get_rank() ), 1, -1, True )
self.m1_lst = get_indecomp_divs( all_m1_lst, self.d_lst )
if level < 1: return
if self.fam_lst == None:
all_fam_lst = get_divs( get_ak( self.get_rank() ), 2, 0, True )
self.fam_lst = get_indecomp_divs( all_fam_lst, self.d_lst )
if level < 2: return
if self.real_d_lst == None:
self.real_d_lst = [ d for d in self.d_lst if d.mat_mul( self.M ) == d ]
if level < 3: return
if self.real_m1_lst == None:
self.real_m1_lst = [ m1 for m1 in self.m1_lst if m1.mat_mul( self.M ) == m1 ]
if level < 4: return
if self.real_fam_lst == None:
self.real_fam_lst = [ f for f in self.fam_lst if f.mat_mul( self.M ) == f ]
if level < 5: return
if self.or_lst == None:
self.or_lst = []
for m2 in get_divs( get_ak( self.get_rank() ), 0, -2, True ):
if [m2 * d for d in self.d_lst] == len( self.d_lst ) * [0]:
self.or_lst += [m2]
if level < 6: return
if self.sr_lst == None:
V = sage_VectorSpace( sage_QQ, self.get_rank() )
W = V.subspace( [d.e_lst for d in self.d_lst] )
self.sr_lst = []
for m2 in get_divs( get_ak( self.get_rank() ), 0, -2, True ):
if sage_vector( m2.e_lst ) in W:
self.sr_lst += [ m2 ]
if level < 7: return
if self.type == None:
self.type = get_dynkin_type( self.d_lst )
if level < 8: return
if self.Mtype == None:
self.Mtype = get_dynkin_type( self.Md_lst )
if level < 9: return
if self.G == None:
self.G = get_ext_graph( self.d_lst + self.m1_lst, self.M )
def get_rank( self ):
'''
Parameters
----------
self : DPLattice
We expect self.M != None.
Returns
-------
int
Integer denoting rank of lattice.
'''
return self.M.dimensions()[0]
def get_degree( self ):
'''
Parameters
----------
self : DPLattice
We expect self.M != None.
Returns
-------
int
Integer denoting the degree of weak del Pezzo surface with
"self" its corresponding Neron-Severi lattice.
'''
return 10 - self.get_rank()
def get_numbers( self ):
'''
Parameters
----------
self : DPLattice
Returns
-------
list<int>
List of 6 integers:
0: #indecomposable (-2)-classes
1: #indecomposable (-1)-classes
2: #families of conics
3: #real effective (-2)-classes
4: #real indecomposable (-1)-classes
5: #real families of conics
where # stands for number of.
Note that a divisor class is indecomposable
if it is effective and cannot be written as
the sum of two effective classes.
'''
self.set_attributes( 6 )
return ( len( self.d_lst ),
len( self.m1_lst ),
len( self.fam_lst ),
len( self.real_d_lst ),
len( self.real_m1_lst ),
len( self.real_fam_lst ) )
def contains_fam_pair( self ):
'''
Parameters
----------
self : DPLattice
Returns
-------
bool
True if self.real_fam_lst contains two Div classes
with intersection one. Geometrically this means that a
weak del Pezzo surface with a Neron-Severi lattice that
is isomorphic to this one, must be birational to P1xP1
(ie. fiber product of the projective line with itself).
'''
self.set_attributes( 6 )
for f1 in self.real_fam_lst:
for f2 in self.real_fam_lst:
if f1 * f2 == 1:
return True
return False
def is_real_minimal( self ):
'''
Parameters
----------
self : DPLattice
Returns
-------
bool
True if self.m1_lst does not contain classes u and v
such that either
* u.mat_mul( self.M ) == v and u*v==0, or
* u.mat_mul( self.M ) == u.
This means that self is the DPLattice of a
real-minimal weak del Pezzo surface. Thus no
disjoint complex conjugate exceptional curves
or real exceptional curves can be contracted.
'''
self.set_attributes( 0 )
for u in self.m1_lst:
v = u.mat_mul( self.M )
if v * u == 0 or v == u:
return False
return True
def get_marked_Mtype( self ):
'''
We mark Mtype with a '-symbol to distinguish between real
structures of the same Dynkin type that are not conjugate.
'''
if self.get_degree() not in [6, 4, 2]:
return self.Mtype
self.set_attributes( 8 )
if ( self.get_degree(), self.Mtype ) not in [ ( 6, 'A1' ), ( 4, '2A1' ), ( 2, '3A1' ) ]:
return self.Mtype
mark = ''
if list( self.M.T[0] ) != [1] + ( self.get_rank() - 1 ) * [0]:
# in this case e0 is not send to e0 by the involution self.M
mark = "'"
return self.Mtype + mark
def get_real_type( self ):
'''
Gets the Dynkin type (self.type) of self.d_lst.
The components of the Dynkin diagram that are preserved by
the involution induced by the real structure are marked.
For example, {A2} means that the elements in the root bases
for the A2 root systems are preserved elementwise by the involution.
We write [A2] if the root bases is preserved by the involution
as a whole but not element wise.
We write 2A2 if the two A2 root bases are interchanged by the
involution. Instead of 3A2 we may write for example [A2]+{A2}+2A2.
Returns
-------
string
Dynkin types of components
'''
comp_lst = get_graph( self.d_lst ).connected_components()
comp_lst.reverse() # smaller components first
if comp_lst == []:
return 'A0'
# construct list of types
type_lst = []
for comp in comp_lst:
c_lst = [ self.d_lst[i] for i in comp ]
mc_lst = []
elementwise = True
for c in c_lst:
mc = c.mat_mul( self.M )
mc_lst += [mc]
if c != mc:
elementwise = False
mc_lst.sort()
type = get_dynkin_type( c_lst )
if set( mc_lst ) == set( c_lst ) and c_lst != []:
if elementwise:
type_lst += ['{' + type + '}']
else:
type_lst += ['[' + type + ']']
else:
type_lst += [type]
# construct string
out = ''
while type_lst != []:
type = type_lst[0]
num = type_lst.count( type )
if num != 1: out += str( num )
out += type + '+'
type_lst = [ elt for elt in type_lst if elt != type ]
out = out[:-1] # remove last plus
return out
def get_basis_change( self, B ):
'''
Parameters
----------
self : DPLattice
B : sage_matrix<sage_ZZ>
A matrix whose rows correspond to generators of
a new basis. We assume that the intersection
matrix for this basis is the default
diagonal matrix with diagonal (1,-1,...,-1).
Returns
-------
DPLattice
A new "DPLattice" object, which represents the current
lattice with respect to a new basis.
'''
self.set_attributes( 6 )
d_lst_B = [ d.get_basis_change( B ) for d in self.d_lst ]
Md_lst_B = [ Md.get_basis_change( B ) for Md in self.Md_lst ]
M_B = ~( B.T ) * self.M * ( B.T ) # ~B is inverse of B, new involution after coordinate change
dpl = DPLattice( d_lst_B, Md_lst_B, M_B )
dpl.Mtype = self.Mtype
dpl.type = self.type
dpl.m1_lst = [ m1.get_basis_change( B ) for m1 in self.m1_lst ]
dpl.fam_lst = [ fam.get_basis_change( B ) for fam in self.fam_lst ]
dpl.real_d_lst = [ d.get_basis_change( B ) for d in self.real_d_lst ]
dpl.real_m1_lst = [ m1.get_basis_change( B ) for m1 in self.real_m1_lst ]
dpl.real_fam_lst = [ fam.get_basis_change( B ) for fam in self.real_fam_lst ]
return dpl
def get_SG( self ):
'''
The simple family graph associated to the
Neron-Severi lattice of a weak del Pezzo surface
is defined as the incidence diagram of self.real_fam_lst,
with the edges labeled <=1 removed.
All vertices are labeled with the index of the element in
self.real_fam_lst.
In the mathematical version (see arxiv paper) the vertices
are labeled with the dimension of the linear series, which is
always 1 with one exception:
If len(self.real_fam_lst)==0 and rank==3, then
the simple family graph consists of a single vertex labeled 2.
Example
-------
# The following graph is related to the E8 root system:
#
dpl = DPLattice.get_cls( 9 )[0]
assert set(dpl.get_SG().num_verts()) == {2160}
assert set(dpl.get_SG().get_degree()) == {2095}
assert set(dpl.get_SG().edge_labels()) == {2,3,4,5,6,7,8}
Returns
-------
sage_GRAPH, [int, int, list<int>, list<int>, bool, bool, bool, bool ]
The simple family graph self.SG and a list self.SG_data
associated to the current DPLattice object.
Here self.SG_data consists of data that describes self.SG.
This method also initializes self.SG and self.SG_data.
'''
if self.SG != None:
return self.SG, self.SG_data
if self.get_rank() == 9 and self.get_numbers()[-1] > 800:
NSTools.p( 'Initializing simple family graph of current DPLattice object...', self.get_rank(), self.get_marked_Mtype(), self.get_real_type() )
f = self.real_fam_lst
f_range = range( len( f ) )
self.SG = sage_Graph()
self.SG.add_vertices( f_range )
for i in f_range:
for j in f_range:
if f[i] * f[j] > 1:
self.SG.add_edge( i, j, f[i] * f[j] )
self.SG_data = [ self.SG.num_verts(), # number of vertices
self.SG.num_edges(), # number of edges
sorted( list( set( self.SG.degree() ) ) ), # possible numbers of outgoing edges
sorted( list( set( self.SG.edge_labels() ) ) ), # possible edge labels
self.SG.is_clique(), # True iff the graph is complete.
self.SG.is_connected(),
self.SG.is_vertex_transitive(),
self.SG.is_edge_transitive()]
return self.SG, self.SG_data
@staticmethod
def get_bas_lst( rank = 9 ):
'''
See [Algorithm 5, http://arxiv.org/abs/1302.6678] for more info.
Parameters
----------
rank : int
An integer in [3,...,9].
Returns
-------
list<DPLattice>
A list of "DPLattice" objects dpl such that dpl.d_lst
is the bases of a root subsystem and dpl.Mtype == A0.
The list contains exactly one representative for all
root subsystems up to equivalence.
The list represents a classification of root
subsystems of the root system with Dynkin type either:
A1, A1+A2, A4, D5, E6, E7 or E8,
corresponding to ranks 3, 4, 5, 6, 7, 8 and 9 respectively
(eg. A1+A2 if rank equals 4, and E8 if rank equals 9).
Note that the root systems live in a subspace of
the vector space associated to the Neron-Severi lattice
of a weak Del Pezzo surface.
'''
# check whether classification of root bases is in cache
key = 'get_bas_lst__' + str( rank )
if key in NSTools.get_tool_dct():
return NSTools.get_tool_dct()[key]
NSTools.p( 'start' )
A = [ 12, 23, 34, 45, 56, 67, 78]
B = [ 1123, 1145, 1456, 1567, 1678, 278 ]
C = [ 1127, 1347, 1567, 234, 278, 308 ]
D = [ 1123, 1345, 1156, 1258, 1367, 1247, 1468, 1178 ]
dpl_lst = []
for ( lst1, lst2 ) in [ ( A, [] ), ( A, B ), ( A, C ), ( [], D ) ]:
# restrict to divisors in list, that are of rank at most "max_rank"
lst1 = [ Div.new( str( e ), rank ) for e in lst1 if rank >= Div.get_min_rank( str( e ) ) ]
lst2 = [ Div.new( str( e ), rank ) for e in lst2 if rank >= Div.get_min_rank( str( e ) ) ]
# the involution is trivial
Md_lst = []
M = sage_identity_matrix( sage_QQ, rank )
# loop through the lists
sub1 = sage_Subsets( range( len( lst1 ) ) )
sub2 = sage_Subsets( range( len( lst2 ) ) )
eta = ETA( len( sub1 ) * len( sub2 ), 20 )
for idx2_lst in sub2:
for idx1_lst in sub1:
eta.update( 'get_bas_lst rank =', rank )
d_lst = [ lst1[idx1] for idx1 in idx1_lst ]
d_lst += [ lst2[idx2] for idx2 in idx2_lst ]
if not is_root_basis( d_lst ):
continue
dpl = DPLattice( d_lst, Md_lst, M )
if dpl not in dpl_lst:
dpl.set_attributes()
dpl_lst += [dpl]
# cache output
dpl_lst.sort()
NSTools.get_tool_dct()[key] = dpl_lst
NSTools.save_tool_dct()
return dpl_lst
@staticmethod
def get_inv_lst( rank = 9 ):
'''
Outputs a list representing a classification of root
subsystems that define unimodular involutions on the
Neron-Severi lattice of a weak del Pezzo surface.
We consider root subsystems of the root system with Dynkin
type either:
A1, A1+A2, A4, D5, E6, E7 or E8,
corresponding to ranks 3, 4, 5, 6, 7, 8 and 9 respectively
(eg. A1+A2 if rank equals 4, and E8 if rank equals 9).
Note that root systems live in a subspace of
the vector space associated to the Neron-Severi lattice
of a weak Del Pezzo surface.
Parameters
----------
max_rank : int
An integer in [3,...,9].
Returns
-------
list<DPLattice>
A list of "DPLattice" objects dpl such that dpl.Md_lst
is the bases of a root subsystem and dpl.type == A0.
The list contains exactly one representative for
root subsystems up to equivalence, so that the root
subsystem defines a unimodular involution.
'''
# check cache
key = 'get_inv_lst__' + str( rank )
if False and key in NSTools.get_tool_dct():
return NSTools.get_tool_dct()[key]
bas_lst = DPLattice.get_bas_lst( rank )
NSTools.p( 'rank =', rank )
amb_lst = []
inv_lst = []
eta = ETA( len( bas_lst ), 1 )
for bas in bas_lst:
eta.update( bas.type )
M = basis_to_involution( bas.d_lst, rank )
if not is_integral_involution( M ):
continue
inv = DPLattice( [], bas.d_lst, M )
inv.set_attributes()
NSTools.p( 'Found type of involution: ', bas.type )
# real structures with different Dynkin types may be equivalent
if inv not in inv_lst:
inv_lst += [ inv ]
else:
inv_prv = [inv2 for inv2 in inv_lst if inv == inv2][0]
inv_lst = [inv2 for inv2 in inv_lst if not inv2 == inv]
amb_lst += [inv, inv_prv]
if inv > inv_prv:
inv_lst += [inv]
else:
inv_lst += [inv_prv]
NSTools.p( '\tAmbitious type:', inv.Mtype, '==', inv_prv.Mtype,
' inv>inv_prv: ', inv > inv_prv,
' ambitious types =', [ amb.Mtype for amb in amb_lst if amb == inv ] )
# store in cache
inv_lst.sort()
NSTools.get_tool_dct()[key] = inv_lst
NSTools.save_tool_dct()
return inv_lst
@staticmethod
def get_cls_slow( rank = 7 ):
'''
Use get_cls_real_dp() for a faster method. This method does not terminate
within reasonable time if rank>7. We still keep the method in order to
compare the outcomes in case rank<=9.
Parameters
----------
max_rank : int
An integer in [3,...,9].
Returns
-------
list<DPLattice>
A list of DPLattice objects corresponding to Neron-Severi lattices
of weak Del Pezzo surfaces of degree (10-rank). The list contains
exactly one representative for each equivalence class.
All the Div objects referenced in the DPLattice objects of
the output have the default intersection matrix:
diagonal matrix with diagonal: (1,-1,...,-1).
'''
# check cache
key = 'get_cls_slow__' + str( rank )
if key in NSTools.get_tool_dct():
return NSTools.get_tool_dct()[key]
inv_lst = DPLattice.get_inv_lst( rank )
bas_lst = DPLattice.get_bas_lst( rank )
# we fix an involution up to equivalence and go through
# all possible root bases for singularities.
dpl_lst = []
eta = ETA( len( bas_lst ) * len( inv_lst ), 20 )
for inv in inv_lst:
for bas in bas_lst:
orbit_lst = get_root_bases_orbit( bas.d_lst )
eta.update( 'len( orbit_lst ) =', len( orbit_lst ) )
for d_lst in orbit_lst:
# check whether involution inv.M preserves d_lst
dm_lst = [ d.mat_mul( inv.M ) for d in d_lst ]
dm_lst.sort()
if dm_lst != d_lst:
continue
# add to classification if not equivalent to objects
# in list, see "DPLattice.__eq__()".
dpl = DPLattice( d_lst, inv.Md_lst, inv.M )
if dpl not in dpl_lst:
dpl.set_attributes()
dpl_lst += [dpl]
# store in cache
dpl_lst.sort()
NSTools.get_tool_dct()[key] = dpl_lst
NSTools.save_tool_dct()
return dpl_lst
@staticmethod
def get_num_types( inv, bas, bas_lst ):
'''
Returns the number of root bases in the
eigenspace of eigenvalue 1 of the involution
defined by M.inv.
If this number is unknown, then -1 is returned.
This method is used by get_cls() before
calling seek_bases().
Parameters
----------
inv : DPLattice
bas : DPLattice
bas_lst : list<DPLattice>
We expect this to be the output of get_bas_lst()
Thus a list of inequivalent DPLattice objects
Returns
-------
int
If there does not exists a DPLattice in bas_lst whose type is
inv.Mtype and bas.type combined, then return 0.
Otherwise return either -1 or the
number of root bases in the eigenspace of eigenvalue 1
of the involution defined by M.inv.
We expect this number to be at most 3.
'''
# check whether the combined type exists in bas_lst
t1_lst = convert_type( inv.Mtype )
t2_lst = convert_type( bas.type )
type_exists = False
for bas2 in bas_lst:
if sorted( t1_lst + t2_lst ) == convert_type( bas2.type ):
type_exists = True
break
if not type_exists:
return 0
# computes the roots in the eigenspace of eigenvalue 1
# of the involution defined by inv
r_lst = get_divs( get_ak( inv.get_rank() ), 0, -2, True )
s_lst = [ r for r in r_lst if r.mat_mul( inv.M ) == r ]
if len( s_lst ) == 30: # D6 since #roots=60=2*30
if bas.type in ['2A1', 'A3', '4A1', '2A1+A3', 'A5']:
return 2
if bas.type in ['3A1', 'A1+A3']:
return 3
return 1
if len( s_lst ) == 63: # E7 since #roots=126=2*63
if bas.type in ['3A1', '4A1', 'A5', 'A1+A3', '2A1+A3', 'A1+A5']:
return 2
return 1
return -1
@staticmethod
def get_part_roots( inv ):
'''
Return two subsets of roots using the input involution.
This method is used by get_cls().
Parameters
----------
inv : DPLattice
We expect inv.type=='A0'.
We will use inv.Mtype and inv.M.
Returns
-------
list<Div>, list<Div>
Let R be defined by the list
get_divs( get_ak( inv.get_rank() ), 0, -2, True )
whose elements are Div objects.
If r is a Div object, then M(r) is shorthand notation for
r.mat_mul(inv.M).
The two returned lists correspond respectively to
S := { r in R | M(r)=r }
and
Q union Q' := { r in R | M(r) not in {r,-r} and r*M(r)>0 }
where Q = M(Q').
'''
r_lst = get_divs( get_ak( inv.get_rank() ), 0, -2, True )
s_lst = [ r for r in r_lst if r.mat_mul( inv.M ) == r ]
tq1_lst = [ r for r in r_lst if r.mat_mul( inv.M ) not in [r, r.int_mul( -1 )] ]
tq_lst = [ q for q in tq1_lst if q * q.mat_mul( inv.M ) >= 0 ]
q_lst = []
for q in sorted( tq_lst ):
if q not in q_lst and q.mat_mul( inv.M ) not in q_lst:
q_lst += [q]
# q_lst += [ q.int_mul( -1 ) for q in q_lst ]
NSTools.p( 'r_lst =', len( r_lst ), r_lst )
NSTools.p( 's_lst =', len( s_lst ), s_lst )
NSTools.p( 'tq1_lst =', len( tq1_lst ), tq1_lst )
NSTools.p( 'tq_lst =', len( tq_lst ), tq_lst )
NSTools.p( 'q_lst =', len( q_lst ), q_lst )
NSTools.p( ' M -->', len( q_lst ), [q.mat_mul( inv.M ) for q in q_lst] )
NSTools.p( 'inv.Md_lst =', inv.Mtype, inv.Md_lst, ', rank =', inv.get_rank() )
return s_lst, q_lst
@staticmethod
def seek_bases( inv, d_lst, r_lst, eq = False, num = -1, b_lst = [], bas_lst = [] ):
'''
Look for root bases in a given set of roots whose Dynkin type
is the same as a given root bases.
This method is used by get_cls().
Parameters
----------
inv : DPLattice
We use inv.Md_lst and inv.M for when creating a
new DPLattice object.
d_lst : list<Div>
We use the intersection matrix associated to d_lst.
r_lst : list<Div>
A list of roots in which to look for root bases.
eq : boolean
If True, then the returned bases are pairwise
non-equivalent. By default False, in which case
only bases that differ by a permutation of elements
are considered equivalent.
num : int
If num>0, then the method will terminate if
the number of bases found is equal to num.
If num==-1, then the method continues until
all possible bases have been reached.
b_lst : list<Div>
Used for recursive calling this method and
represents (a subset of) a candidate root bases.
bas_lst : list<DPLattice>
Used for recursive calling this method and
is the list of DPLattice objects that
is returned by this method.
Returns
-------
list<DPLattice>
A list of DPLattice objects "bas"
such that bas.type is equal to the Dynkin type of d_lst
and bas.Mtype==inv.Mtype and bas.M==inv.M.
If eq==True, then the lattice objects are pairwise
non-equivalent.
If num>0, then the method terminates if the number
of bases that are found is equal to num.
'''
# check whether the constructed basis defines a new DPLattice object
if len( b_lst ) == len( d_lst ):
# check if a permutation of b_lst occurred
if not eq:
for bas in bas_lst:
if set( bas.d_lst ) == set( b_lst ):
return bas_lst
# create a new lattice object
bas = DPLattice( b_lst, inv.Md_lst, inv.M )
# check whether there is an equivalent object in bas_lst
if eq and bas in bas_lst:
return bas_lst
# return bas_lst appended with the new DPLattice object
return bas_lst + [bas]
else:
# construct list with intersection numbers
s = d_lst[ len( b_lst ) ]
m_lst = [ d * s for d in d_lst[:len( b_lst )] ]
# go through all possible roots to build up a basis like d_lst
for r in r_lst:
# check intersection number properties
if [b * r for b in b_lst] == m_lst:
# recursive call
bas_lst = DPLattice.seek_bases( inv, d_lst, r_lst, eq, num, b_lst + [r], bas_lst )
# break out of loop if num bases are found
if num > 0 and len( bas_lst ) == num:
break
return bas_lst
@staticmethod
def import_cls( cls_lst, inv ):
'''
This method is used by get_cls().
Parameters
----------
cls_lst : list<DPLattice>
A list of DPLattice objects of rank "inv.get_rank()-1".
These lattices correspond to Neron-Severi lattices
of weak Del Pezzo surfaces.
inv : DPLattice
A DPLattice object representing an involution.
We expect inv.Md_lst to be set.
Returns
-------
list<DPLattice>
A list of compatible DPLattice objects in cls_lst that are
converted so as to have the same rank and involution matrix as
inv.get_rank() and inv.M, respectively.
The returned list always contains inv itself.
'''
out_lst = []
for cls in cls_lst:
# convert divisors to new rank
Md_lst = [ Div.new( str( d ), inv.get_rank() ) for d in cls.Md_lst ]
d_lst = [ Div.new( str( d ), inv.get_rank() ) for d in cls.d_lst ]
# import if the involution is compatible
if set( Md_lst ) == set( inv.Md_lst ):
NSTools.p( 'importing: ', ( inv.get_rank(), cls.get_marked_Mtype(), cls.get_real_type() ), Md_lst, '==', inv.Md_lst )
out = DPLattice( d_lst, inv.Md_lst, inv.M )
out.set_attributes()
out_lst += [ out ]
# always ensure that at least inv object is contained
if out_lst == []:
return [inv]
# we expect that inv is contained in the out_lst
# for correctness of the get_cls() algorithm.
assert inv in out_lst
return out_lst
@staticmethod
def get_cls( rank = 9 ):
'''
Parameters
----------
rank : int
An integer in [1,...,9].
Returns
-------
list<DPLattice>
A list of DPLattice objects corresponding to Neron-Severi lattices
of weak Del Pezzo surfaces of degree (10-rank). The list contains
exactly one representative for each equivalence class.
All the Div objects referenced in the DPLattice objects of
the output have the default intersection matrix:
diagonal matrix with diagonal: (1,-1,...,-1).
If rank<3 then the empty list is returned.
'''
if rank < 3:
return []
# check cache
key = 'get_cls_' + str( rank )
if key in NSTools.get_tool_dct():
return NSTools.get_tool_dct()[key]
NSTools.p( 'rank =', rank )
# collect all lattices with either d_lst==[] of Md_lst==[]
bas_lst = DPLattice.get_bas_lst( rank )
inv_lst = DPLattice.get_inv_lst( rank )
# we loop through all involutions
NSTools.p( 'start looping through inv_lst: ', len( inv_lst ), [inv.get_marked_Mtype() for inv in inv_lst] )
dpl_lst = []
for inv in inv_lst:
NSTools.p( 'looping through inv_lst:', ( rank, inv.get_marked_Mtype(), inv.Md_lst ) )
# recover the known classification
if inv.Mtype == 'A0':
NSTools.p( 'Since Mtype equals A0 we recover the classification from bas_lst.' )
dpl_lst += [bas for bas in bas_lst]
continue
# partition the roots into two sets
s_lst, q_lst = DPLattice.get_part_roots( inv )
# import classification for rank-1
bas1_lst = DPLattice.import_cls( DPLattice.get_cls( rank - 1 ), inv )
NSTools.p( 'looping through inv_lst continued after recursive call:', ( rank, inv.get_marked_Mtype(), inv.Md_lst ) )
# correct partition of roots (bas1_lst always contains inv)
if len( bas1_lst ) > 1:
e = Div.new( 'e' + str( rank - 1 ), inv.get_rank() )
s_lst = [ s for s in s_lst if s * e != 0 ]
q_lst = [ q for q in q_lst if q * e != 0 ]
NSTools.p( 'bas1_lst =', len( bas1_lst ), [( bas1.Mtype, bas1.type ) for bas1 in bas1_lst] )
NSTools.p( 's_lst =', len( s_lst ), s_lst )
NSTools.p( 'q_lst =', len( q_lst ), q_lst )
# collect all possible root bases in s_lst and q_lst
bas2_lst = []
bas3_lst = []
visited_type_lst = []
eta = ETA( len( bas_lst ), 1 )
for bas in bas_lst:
# display progress info
eta.update( 'get_cls seeking bases in s_lst and q_lst: ', ( rank, inv.get_marked_Mtype(), bas.get_real_type() ) )
# each type in bas_lst is treated only once
if bas.type in visited_type_lst:
continue
visited_type_lst += [bas.type]
# collect bases of type bas.type in s_lst
if DPLattice.get_num_types( inv, bas, bas_lst ) != 0:
bas2_lst += DPLattice.seek_bases( inv, bas.d_lst, s_lst )
# collect bases of type bas.type in q_lst
if 2 * len( bas.d_lst ) > rank - 1:
continue # the rank of a root subsystem is bounded by rank-1
tmp_lst = DPLattice.seek_bases( inv, bas.d_lst, q_lst )
for tmp in tmp_lst:
tmp.d_lst += [d.mat_mul( inv.M ) for d in tmp.d_lst ]
if is_root_basis( tmp.d_lst ): # the roots and their involutions might have intersection product 1
tmp.d_lst.sort()
bas3_lst += [tmp]
# debug info
NSTools.p( 'Setting Dynkin types of', len( bas2_lst + bas3_lst ), 'items...please wait...' )
eta = ETA( len( bas2_lst + bas3_lst ), len( bas2_lst + bas3_lst ) / 10 )
for bas in bas2_lst + bas3_lst:
bas.type = get_dynkin_type( bas.d_lst )
bas.Mtype = get_dynkin_type( bas.Md_lst )
eta.update( bas.get_rank(), bas.get_marked_Mtype(), bas.type )
bas1_lst.sort()
bas2_lst.sort()
bas3_lst.sort()
t_lst1 = [bas.type for bas in bas1_lst]
t_lst2 = [bas.type for bas in bas2_lst]
t_lst3 = [bas.type for bas in bas3_lst]
lst1 = sorted( list( set( [( t, t_lst1.count( t ) ) for t in t_lst1] ) ) )
lst2 = sorted( list( set( [( t, t_lst2.count( t ) ) for t in t_lst2] ) ) )
lst3 = sorted( list( set( [( t, t_lst3.count( t ) ) for t in t_lst3] ) ) )
NSTools.p( 'inv =', inv.get_marked_Mtype(), ', rank =', rank )
NSTools.p( 'bas1_lst =', len( bas1_lst ), lst1 )
NSTools.p( 'bas2_lst =', len( bas2_lst ), lst2 )
NSTools.p( 'bas3_lst =', len( bas3_lst ), lst3 )
# construct a list of combinations of DPLattice objects in bas1_lst bas2_lst and
comb_lst = []
total = len( bas1_lst ) * len( bas2_lst ) * len( bas3_lst )
step = total / 10 if total > 10 else total
eta = ETA( total, step )
for bas1 in bas1_lst:
for bas2 in bas2_lst:
for bas3 in bas3_lst:
eta.update( 'last loop in get_cls: ( bas1.type, bas2.type, bas3.type )=', ( bas1.type, bas2.type, bas3.type ) )
d_lst = bas1.d_lst + bas2.d_lst + bas3.d_lst # notice that d_lst can be equal to []
if len( d_lst ) > rank - 1:
continue # the rank of a root subsystem is bounded by rank-1
if is_root_basis( d_lst ):
dpl = DPLattice( d_lst, inv.Md_lst, inv.M )
if dpl not in dpl_lst:
dpl.set_attributes()
dpl_lst += [dpl]
NSTools.p( '\t appended: ', ( rank, dpl.get_marked_Mtype(), dpl.get_real_type() ), ', ( bas1.type, bas2.type, bas3.type ) =', ( bas1.type, bas2.type, bas3.type ) )
# store in cache
#
dpl_lst.sort()
NSTools.get_tool_dct()[key] = dpl_lst
NSTools.save_tool_dct()
return dpl_lst
# overloading of "=="
# returns True if isomorphic as Neron-Severi lattices
def __eq__( self, other ):
# compared with None?
if type( self ) != type( other ):
return False
# cardinality of classes agree?
if len( self.d_lst ) != len( other.d_lst ):
return False
self.set_attributes( 0 )
other.set_attributes( 0 )
if len( self.m1_lst ) != len( other.m1_lst ):
return False
self.set_attributes( 1 )
other.set_attributes( 1 )
if len( self.fam_lst ) != len( other.fam_lst ):
return False
self.set_attributes( 2 )
other.set_attributes( 2 )
if len( self.real_d_lst ) != len( other.real_d_lst ):
return False
self.set_attributes( 3 )
other.set_attributes( 3 )
if len( self.real_m1_lst ) != len( other.real_m1_lst ):
return False
self.set_attributes( 4 )
other.set_attributes( 4 )
if len( self.real_fam_lst ) != len( other.real_fam_lst ):
return False
self.set_attributes( 5 )
other.set_attributes( 5 )
if len( self.or_lst ) != len( other.or_lst ):
return False
self.set_attributes( 6 )
other.set_attributes( 6 )
if len( self.sr_lst ) != len( other.sr_lst ):
return False
# Dynkin type effective (-2)-classes agree?
self.set_attributes( 7 )
other.set_attributes( 7 )
if self.type != other.type:
return False
# Mtype may differ for equivalent DPLattice objects
# check Cremona invariant
self.set_attributes( 9 )
other.set_attributes( 9 )
if not self.G.is_isomorphic( other.G, edge_labels = True ):
return False
return True
# operator overloading for !=
def __ne__( self, other ):
return not self.__eq__( other )
# operator overloading for <
# Used for sorting lists of DPLattice objects:
# <http://stackoverflow.com/questions/1227121/compare-object-instances-for-equality-by-their-attributes-in-python>
def __lt__( self, other ):
if self.get_rank() != other.get_rank():
return self.get_rank() < other.get_rank()
if len( self.Md_lst ) != len( other.Md_lst ):
return len( self.Md_lst ) < len( other.Md_lst )
self.set_attributes( 8 )
other.set_attributes( 8 )
if self.Mtype != other.Mtype:
return self.Mtype < other.Mtype
if self.get_marked_Mtype() != other.get_marked_Mtype():
return self.get_marked_Mtype() < other.get_marked_Mtype()
if len( self.d_lst ) != len( other.d_lst ):
return len( self.d_lst ) < len( other.d_lst )
if self.type != other.type:
return self.type < other.type
# more real lines implies smaller self.type!
if len( self.real_m1_lst ) != len( other.real_m1_lst ):
return len( self.real_m1_lst ) > len( other.real_m1_lst )
if len( self.m1_lst ) != len( other.m1_lst ):
return len( self.m1_lst ) > len( other.m1_lst )
if len( self.real_fam_lst ) != len( other.real_fam_lst ):
return len( self.real_fam_lst ) > len( other.real_fam_lst )
if len( self.fam_lst ) != len( other.fam_lst ):
return len( self.fam_lst ) > len( other.fam_lst )
# overloading of "str()": human readable string representation of object
def __str__( self ):
self.set_attributes()
s = '\n'
s += 50 * '=' + '\n'
s += 'Degree = ' + str( self.get_degree() ) + '\n'
s += 'Rank = ' + str( self.get_rank() ) + '\n'
s += 'Intersection = ' + str( list( self.m1_lst[0].int_mat ) ) + '\n'
s += 'Real structure = ' + str( self.get_marked_Mtype() ) + '\n'
s += 'Singularities = ' + str( self.type ) + '\n'
s += 'Cardinalities = ' + '(' + str( len( self.or_lst ) ) + ', ' + str( len( self.sr_lst ) ) + ')\n'
arrow = ' ---> '
s += 'Real involution:\n'
b_lst = [Div( row ) for row in sage_identity_matrix( sage_ZZ, self.get_rank() ).rows() ]
for b in b_lst:
s += '\t' + str( b ) + arrow + str( b.mat_mul( self.M ) ) + '\n'
s += 'Indecomposable (-2)-classes:\n'
for d in self.d_lst:
s += '\t' + str( d ) + arrow + str( d.mat_mul( self.M ) ) + '\n'
s += '\t#real = ' + str( len( self.real_d_lst ) ) + '\n'
s += 'Indecomposable (-1)-classes:\n'
for m1 in self.m1_lst:
s += '\t' + str( m1 ) + arrow + str( m1.mat_mul( self.M ) ) + '\n'
s += '\t#real = ' + str( len( self.real_m1_lst ) ) + '\n'
s += 'Classes of conical families:\n'
for fam in self.fam_lst:
s += '\t' + str( fam ) + arrow + str( fam.mat_mul( self.M ) ) + '\n'
s += '\t#real = ' + str( len( self.real_fam_lst ) ) + '\n'
s += 50 * '=' + '\n'
return s
| 47,232 | 34.674471 | 195 | py |
ns_lattice | ns_lattice-master/ns_lattice/src/ns_lattice/div_in_lattice.py | '''
Use of this source code is governed by a MIT-style license that can be found in the LICENSE file.
Created on Aug 11, 2016
@author: Niels Lubbes
Algorithm for computing elements in a unimodular lattice.
We use this algorithm in the context of Neron-Severi lattices
of weak del Pezzo surfaces.
See
Arxiv: "Computing curves on real rational surfaces"
'''
import time
from ns_lattice.sage_interface import sage_Combinations
from ns_lattice.sage_interface import sage_Compositions
from ns_lattice.sage_interface import sage_Partitions
from ns_lattice.sage_interface import sage_ZZ
from ns_lattice.sage_interface import sage_Permutations
from ns_lattice.class_ns_tools import NSTools
from ns_lattice.class_div import Div
def get_divs( d, dc, cc, perm = False ):
'''
Computes divisors in unimodular lattice with prescribed intersection product.
Parameters
----------
d : Div
object d0*e0 + d1*e1 +...+ dr*er such that
* product signature equals (1,d.rank()-1)
* d0>0
* d1,...,dr<=0
dc : int
A positive integer.
cc : int
Self intersection.
perm : boolean
If True, then generators are permuted.
Returns
-------
list<Div>
Returns a sorted list of "Div" objects
* c = c0*e0 + c1*e1 +...+ cr*er
such that
* d.rank() == r+1
* dc == d*c (signature = (1,rank-1))
* cc == c*c (signature = (1,rank-1))
and each Div object satisfies exactly one
of the following conditions:
* c == ei - ej for 0>i>j>=r,
* c == ei for i>0, or
* c0 > 0, c1,...,cr <= 0
If "perm" is False, then then only one representative
for each c is returned up to permutation of ei for i>0.
For example, e0-e1-e2 and e0-e1-e3 are considered equivalent,
and only e0-e1-e2 is returned, since e0-e1-e2>e0-e1-e3
(see "get_div_set()" for the ordering). In particular,
c1 >= c2 >= ... >= cr.
Note
----
If d=[3]+8*[-1], (dc,cc)==(0,-2) and perm=False
then the Div classes are
'12', '1123', '212' and '308'.
See "Div.get_label()" for the notation.
These classes correspond to the (-2)-classes
in the Neron-Severi lattice associated to
a weak del Pezzo surface.
If perm==False then only one representative
for each q is returned up to permutation of
ei for i>0. For example, e0-e1-e2 and e0-e1-e3
are considered equivalent, and only e0-e1-e2
is returned, since e0-e1-e2>e0-e1-e3
(see "Div.__lt__()" for the ordering).
'''
# check if input was already computed
#
key = 'get_divs_' + str( ( d, dc, cc, perm ) )
if key in NSTools.get_tool_dct():
return NSTools.get_tool_dct()[key]
# construct div set
#
NSTools.p( 'Constructing div set classes for ', ( d, dc, cc, perm ) )
out_lst = []
# compute classes of the form ei or ei-ej for i,j>0
#
if ( dc, cc ) == ( 1, -1 ) or ( dc, cc ) == ( 0, -2 ):
m2_lst = [] # list of divisors of the form ei-ej for i,j>0
m1_lst = [] # list of divisors of the form ei for i>0
if perm:
# Example:
# >>> list(Combinations( [1,2,3,4], 2 ))
# [[1, 2], [1, 3], [1, 4], [2, 3], [2, 4], [3, 4]]
# Notice that r=d.rank()-1 if c = c0*e0 + c1*e1 +...+ cr*er.
#
for comb in sage_Combinations( range( 1, d.rank() ), 2 ):
m2_lst += [ Div.new( str( comb[0] ) + str( comb[1] ), d.rank() ) ]
m1_lst += [Div.new( 'e' + str( i ), d.rank() ) for i in range( 1, d.rank() )]
else:
# up to permutation of the generators
# we may assume that i==1 and j==2.
#
m2_lst += [ Div.new( '12', d.rank() ) ]
m1_lst += [ Div.new( 'e1', d.rank() ) ]
# add the classes that satisfy return
# specification to the output list
#
for c in m1_lst + m2_lst:
if ( dc, cc ) == ( d * c, c * c ):
out_lst += [c]
#
# Note: cc = c0^2 - c1^2 -...- cr^2
#
c0 = 0
cur_eq_diff = -1
while True:
c0 = c0 + 1
dc_tail = d[0] * c0 - dc # = d1*c1 +...+ dr*cr
dd_tail = d[0] ** 2 - d * d # = d1^2 +...+ dr^2
cc_tail = c0 ** 2 - cc # = c1^2 +...+ cr^2
# not possible according to io-specs.
#
if dc_tail < 0 or dd_tail < 0 or cc_tail < 0:
NSTools.p( 'continue... (c0, dc_tail, dd_tail, cc_tail) =', ( c0, dc_tail, dd_tail, cc_tail ) )
if dd_tail < 0:
raise Exception( 'dd_tail =', dd_tail )
continue
# Cauchy-Schwarz inequality <x,y>^2 <= <x,x>*<y,y> holds?
#
prv_eq_diff = cur_eq_diff
cur_eq_diff = abs( dc_tail * dc_tail - dd_tail * cc_tail )
if prv_eq_diff == -1:
prv_eq_diff = cur_eq_diff
NSTools.p( 'prv_eq_diff =', prv_eq_diff, ', cur_eq_diff =', cur_eq_diff, ', dc_tail^2 =', dc_tail * dc_tail, ', dd_tail*cc_tail =', dd_tail * cc_tail, ', (c0, dc_tail, dd_tail, cc_tail) =', ( c0, dc_tail, dd_tail, cc_tail ) )
if prv_eq_diff < cur_eq_diff and dc_tail * dc_tail > dd_tail * cc_tail:
NSTools.p( 'stop by Cauchy-Schwarz inequality...' )
break # out of while loop
# obtain all possible [d1*c1+1,...,dr*cr+1]
#
r = d.rank() - 1
if perm and len( set( d[1:] ) ) != 1:
p_lst_lst = sage_Compositions( dc_tail + r, length = r )
else:
p_lst_lst = sage_Partitions( dc_tail + r, length = r )
# data for ETA computation
total = len( p_lst_lst )
counter = 0
ival = 5000
# obtain [c1,...,cr] from [d1*c1+1,...,dr*cr+1]
#
for p_lst in p_lst_lst:
# ETA
if counter % ival == 0:
start = time.time()
counter += 1
if counter % ival == 0:
passed_time = time.time() - start
NSTools.p( 'ETA in minutes =', passed_time * ( total - counter ) / ( ival * 60 ), ' (', counter, '/', total, '), c0 =', c0, ', prv_eq_diff =', prv_eq_diff, ', cur_eq_diff =', cur_eq_diff )
# dc_tail=d1*c1 +...+ dr*cr = p1 +...+ pr with pi>=0
p_lst = [ p - 1 for p in p_lst]
# obtain c_tail=[c1,...,cr] from [p1,...,pr]
valid_part = True
c_tail = [] # =[c1,...,cr]
for i in range( 0, len( p_lst ) ):
if p_lst[i] == 0 or d[i + 1] == 0:
c_tail += [p_lst[i]]
else:
quo, rem = sage_ZZ( p_lst[i] ).quo_rem( d[i + 1] )
if rem != 0:
valid_part = False
break # out of i-for-loop
else:
c_tail += [ quo ]
if not valid_part:
continue
# add to out list if valid
#
c = Div( [c0] + c_tail )
if c.rank() == d.rank() and ( dc, cc ) == ( d * c, c * c ):
if perm and len( set( d[1:] ) ) == 1:
# since d1==...==dr we do not have to
# check each permutation.
for pc_tail in sage_Permutations( c_tail ):
out_lst += [Div( [c0] + list( pc_tail ) )]
else:
out_lst += [c]
# sort list of "Div" objects
out_lst.sort()
# cache output
NSTools.get_tool_dct()[key] = out_lst
NSTools.save_tool_dct()
return out_lst
def get_indecomp_divs( c_lst, d_lst ):
'''
Parameters
----------
c_lst : list<Div>
Typically output of "get_divs(...)"
d_lst : list<Div>
Typically a list of (-2)-classes.
Returns
-------
list<Div>
Returns a list of "Div" objects c in c_lst,
so that c*d >= 0 for all d in "d_lst".
Note
----
If the Div object represent effective divisor classes in
a the Neron-Severi lattice of a weak del Pezzo
surface and if d_lst are the classes of singularities,
then the output correspond to "indecomposable" classes.
Such classes cannot be written as the sum of effective
divisors.
'''
# check positivity against "d_lst"
out_lst = []
for c in c_lst:
indecomp = True
for d in d_lst:
if d * c < 0:
indecomp = False
break # out of for loop
if indecomp:
out_lst += [c]
return out_lst
def get_ak( rank ):
'''
Parameters
----------
rank : int
Returns
-------
Div
A Div object of given rank of the form
3e0 - e1 - ... - er
Mathematically this is the anticanonical
class of the blowup of the projective plane.
'''
return Div( [3] + ( rank - 1 ) * [-1] )
| 9,393 | 31.061433 | 233 | py |
ns_lattice | ns_lattice-master/ns_lattice/src/ns_lattice/ns_basis.py | '''
Use of this source code is governed by a MIT-style license that can be found in the LICENSE file.
Created on Feb 9, 2017
@author: Niels Lubbes
'''
from ns_lattice.sage_interface import sage_identity_matrix
from ns_lattice.sage_interface import sage_matrix
from ns_lattice.sage_interface import sage_ZZ
from ns_lattice.sage_interface import sage_Permutations
from ns_lattice.sage_interface import sage_Subsets
from ns_lattice.class_div import Div
from ns_lattice.div_in_lattice import get_indecomp_divs
from ns_lattice.div_in_lattice import get_ak
from ns_lattice.div_in_lattice import get_divs
from ns_lattice.class_dp_lattice import DPLattice
from ns_lattice.class_eta import ETA
from ns_lattice.class_ns_tools import NSTools
def get_bases_lst( a_lst, M, d_lst, m1_lst, perm = False ):
'''
Returns a list of basis with specified generators.
Parameters
----------
a_lst : list<Div>
A list of linear independent Div objects of
the same rank with 3<=rank<=9.
It is required that
"set(a_lst)==set([ a.mat_mul(M) for a in a_lst ])".
M : sage_matrix<sage_ZZ>
A unimodular matrix representing an involution.
d_lst : list<Div>
A list of Div objects d of the same rank as any
element in "a_lst", so that "d*k==0" and "d*d==-2".
These represent a root basis for the indecomposable
(-2)-classes in the Neron-Severi lattice of a
weak del Pezzo surface.
m1_lst : list<Div>
A list of Div objects d of the same rank as any
element in "a_lst", so that "d*k==d*d==-1".
These represent (-1)-classes in the Neron-Severi
lattice of a weak del Pezzo surface.
perm : bool
If False, then we consider two bases the same if the
generators of the first basis can be obtained from
the second basis via a permutation matrix.
Returns
-------
list<tuple<Div>>
A list of tuples of Div objects. Each tuple of Div objects
represents a basis for the Neron-Severi lattice determined
by d_lst and m1_lst. The bases are of the form
< a1,...,as, b1,...,bt >
with the following property
* a1,...,as are defined by the input "a_lst"
* bi is an element in m1_lst such that bi*bj=am*bi=0
for all 1<=i<j<=t and 1<=m<=s
If "a_lst==[]" then "[[]]" is returned.
'''
key = 'get_bases_lst__' + str( ( a_lst, M, d_lst, m1_lst, perm ) ) + '__' + str( M.rank() )
if key in NSTools.get_tool_dct():
return NSTools.get_tool_dct()[key]
if a_lst == []:
return [[]]
if len( a_lst ) == a_lst[0].rank():
return [tuple( a_lst )]
e_lst = []
for m1 in get_indecomp_divs( m1_lst, d_lst ):
if set( [ m1 * a for a in a_lst ] ) != {0}:
continue
if m1 * m1.mat_mul( M ) > 0:
continue
e_lst += [m1]
bas_lst = []
for e in e_lst:
Me = e.mat_mul( M )
new_d_lst = [ d for d in d_lst if d * e == d * Me == 0 ]
new_m1_lst = [ m1 for m1 in m1_lst if m1 * e == m1 * Me == 0 ]
add_lst = [e]
if e != Me: add_lst += [Me]
bas2_lst = get_bases_lst( a_lst + add_lst, M, new_d_lst, new_m1_lst, perm )
if perm:
bas_lst += bas2_lst
else:
for bas2 in bas2_lst:
found = False
for bas in bas_lst:
# check whether the two bases are the same up to
# permutation of generators
if set( bas ) == set( bas2 ):
found = True
break # break out of nearest for loop
if not found:
NSTools.p( 'found new basis: ', bas2, ', bas2_lst =', bas2_lst )
bas_lst += [bas2]
# cache output
NSTools.get_tool_dct()[key] = bas_lst
NSTools.save_tool_dct()
return bas_lst
def get_webs( dpl ):
'''
Returns lists of families of conics for each possible complex basis change.
The n-th family in each list correspond to a fixed family wrt.
different bases for each n.
Parameters
----------
dpl : DPLattice
Represents the Neron-Severi lattice of a weak del Pezzo surface.
Returns
-------
list<list<Div>>
A list of lists of Div objects.
Each Div object f has the property that
f*(3e0-e1-...-er)=2, f*f==0 and f*d>=0 for all d in dpl.d_lst.
Such a Div object corresponds geometrically to a family of conics.
For each index i, the i-th entry of each list of Div object corresponds
to the same family of conics.
'''
key = 'get_webs__' + str( dpl ).replace( '\n', '---' )
if key in NSTools.get_tool_dct():
return NSTools.get_tool_dct()[key]
ak = get_ak( dpl.get_rank() )
all_m1_lst = get_divs( ak, 1, -1, True )
akc, cc = ( 3, 1 )
M = sage_identity_matrix( dpl.get_rank() )
fam_lst_lst = []
for e0 in get_divs( ak, akc, cc, True ):
NSTools.p( 'e0 =', e0 )
for B_lst in get_bases_lst( [e0], M, dpl.d_lst, all_m1_lst, True ):
B = sage_matrix( sage_ZZ, [ d.e_lst for d in B_lst ] )
dplB = dpl.get_basis_change( B )
fam_lst_lst += [ dplB.real_fam_lst ]
# reduce fam_lst
pat_lst_lst = []
rfam_lst_lst = []
for fam_lst in fam_lst_lst:
pat_lst = [ 0 if fam[0] != 1 else 1 for fam in fam_lst ]
if pat_lst not in pat_lst_lst:
pat_lst_lst += [ pat_lst ]
rfam_lst_lst += [ fam_lst ]
# cache output
NSTools.get_tool_dct()[key] = rfam_lst_lst
NSTools.save_tool_dct()
return rfam_lst_lst
def contains_perm( f_lst_lst, c_lst ):
'''
Parameters
----------
f_lst_lst : list<list<Div>>
A list of lists containing Div objects.
c_lst : list<Div>
A list of Div objects
Returns:
--------
bool
Returns True if after a permutation of the generators
(e1,...,er) the list c_lst is contained in f_lst_lst.
For example if c_lst equals [ e0-e1, 2e0-e2-e3-e4-e5 ]
then is contained in [ ..., [e0-e2, 2e0-e1-e3-e4-e5], ... ].
'''
if c_lst == []:
return [] in f_lst_lst
for perm in sage_Permutations( range( c_lst[0].rank() - 1 ) ):
pc_lst = [ Div( [c[0]] + [ c[i + 1] for i in perm ], c.rank() ) for c in c_lst ]
for f_lst in f_lst_lst:
if set( f_lst ) == set( pc_lst ):
return True
return False
def triples( dpl, mval ):
'''
Parameters
----------
dpl : DPLattice
mval : integer
Returns
-------
list<(Div,Div,Div)>
List of triples in "dpl.fam_lst":
[ (a,b,c),... ]
so that
(1) There does not exists e in "dpl.m1_lst"
with the property that a*e==b*e==c*e==0.
(2) 1 <= max( a*b, a*c, b*c ) <= mval.
'''
key = 'triples__' + str( dpl ).replace( '\n', '---' ) + '---' + str( mval )
if key in NSTools.get_tool_dct():
return NSTools.get_tool_dct()[key]
f_lst = dpl.fam_lst
e_lst = dpl.m1_lst
# obtain list of triples (a,b,c) in f_lst
# that are not orthogonal to any element in e_lst
t_lst = []
idx_lst_lst = sage_Subsets( range( len( f_lst ) ), 3 )
eta = ETA( len( idx_lst_lst ), 500000 )
for idx_lst in idx_lst_lst:
eta.update( 't_lst' )
t = [ f_lst[idx] for idx in idx_lst ]
if t[0] * t[1] > mval: continue
if t[0] * t[2] > mval: continue
if t[1] * t[2] > mval: continue
# elements in f_lst correspond to divisor classes of curves on a
# surface and thus t[i]*t[j]>=1 for all i,j \in {0,1,2} so that i!=j.
cont = False
for e in e_lst:
if [f * e for f in t] == [0, 0, 0]:
cont = True
break
if cont: continue
if not contains_perm( t_lst, t ):
t_lst += [t]
NSTools.p( 't_lst =', t_lst )
# cache output
NSTools.get_tool_dct()[key] = t_lst
NSTools.save_tool_dct()
return t_lst
| 8,302 | 30.214286 | 97 | py |
ns_lattice | ns_lattice-master/ns_lattice/src/ns_lattice/class_ns_tools.py | '''
Use of this source code is governed by a MIT-style license that can be found in the LICENSE file.
Created on Feb 7, 2017
@author: Niels Lubbes
'''
from ns_lattice.sage_interface import sage_load
from ns_lattice.sage_interface import sage_save
import inspect
import time
import sys
import os
class NSTools():
'''
For accessing static variables in python see for example:
<http://stackoverflow.com/questions/68645/static-class-variables-in-python>
'''
# Private dictionary object for caching result
# used by ".get_tool_dct()" and ".save_tool_dct()".
# If "enable_tool_dct" is false then caching in
# disabled. This is useful for example in test
# methods. However, it should be noted that it
# could take a long time to compute the data.
#
__tool_dct = None
__enable_tool_dct = True
# private variable for timer
#
__start_time = None
__end_time = None
# private static variables used by ".p()"
# If "__filter_fname_lst" equals [] then output is surpressed.
# If "__filter_fname_lst" equals None the no output is surpressed
#
__filter_fname_lst = []
__prev_filter_fname_lst = None
@staticmethod
def filter( filter_fname_lst ):
'''
It is adviced to access this method as statically as .filter().
See .p() for more details.
Parameters
----------
filter_fname_lst : list<str>
List of file names for Python modules.
If None, then no output is surpressed by method ".p()".
'''
NSTools.__filter_fname_lst = filter_fname_lst
NSTools.__prev_filter_fname_lst = filter_fname_lst
@staticmethod
def filter_unset():
'''
Output via ".out" will not be surpressed.
'''
NSTools.__filter_fname_lst = None
@staticmethod
def filter_reset():
'''
Resets filter state to before previous ".filter_unset()" call.
'''
NSTools.__filter_fname_lst = NSTools.__prev_filter_fname_lst
@staticmethod
def p( *arg_lst ):
'''
Parameters
----------
*arg_lst
Variable length argument list.
Returns
-------
string
If ".filter_on(<fname>)" has been called and the file name
of the calling module does not coincide with <fname>,
then the output is surpressed and "None" is returned.
Otherwise, this method prints arguments to "sys.stdout"
together with reflection info from "inspect.stack()".
Additional returns the output string.
Call ".filter_off()" to turn off filter, such that
all output is send to "sys.stdout".
'''
# collect relevant info from stack trace
sk_lst_lst = inspect.stack()
file_name = os.path.basename( str( sk_lst_lst[1][1] ) ) # exclude path from file name
line = str( sk_lst_lst[1][2] )
method_name = str( sk_lst_lst[1][3] )
# only output when .p() is called from module whose
# file name is in .__filter_fname_lst
if NSTools.__filter_fname_lst != None:
if not file_name in NSTools.__filter_fname_lst:
return
# construct output string
s = method_name + '(' + line + ')' + ': '
for arg in arg_lst:
s += str( arg ) + ' '
# print output
print( s )
sys.stdout.flush()
return s
@staticmethod
def set_enable_tool_dct( enable_tool_dct ):
NSTools.filter_unset()
NSTools.p( 'Caching enabled: ', enable_tool_dct )
NSTools.filter_reset()
NSTools.__enable_tool_dct = enable_tool_dct
@staticmethod
def get_tool_dct( fname = 'ns_tools' ):
'''
Parameters
----------
fname : str
Name of file without extension.
Returns
-------
dct
Sets static private variable "__tool_dct"
in memory from file "<local path>/<fname>.sobj"
if called for the first time.
Returns ".__tool_dct" if ".__enable_tool_dct==True"
and "{}" otherwise.
'''
if not NSTools.__enable_tool_dct:
return {}
path = os.path.dirname( os.path.abspath( __file__ ) ) + '/'
file_name = path + fname
if NSTools.__tool_dct == None:
try:
NSTools.p( 'Loading from:', file_name )
NSTools.__tool_dct = sage_load( file_name )
except Exception as e:
NSTools.filter_unset()
NSTools.p( 'Cannot load ".__tool_dct": ', e )
NSTools.filter_reset()
NSTools.__tool_dct = {}
return NSTools.__tool_dct
@staticmethod
def save_tool_dct( fname = 'ns_tools' ):
'''
Saves ".__tool_dct" to "fname" if ".enable_tool_dct==True"
otherwise do nothing.
Parameters
----------
fname : str
Name of file without extension.
'''
if not NSTools.__enable_tool_dct:
return
path = os.path.dirname( os.path.abspath( __file__ ) ) + '/'
file_name = path + fname
NSTools.p( 'Saving to:', file_name )
sage_save( NSTools.__tool_dct, file_name )
@staticmethod
def start_timer():
'''
Prints the current wall clock time and starts timer.
'''
# get time
NSTools.__start_time = time.time() # set static variable.
NSTools.filter_unset()
NSTools.p( 'start time =', NSTools.__start_time )
NSTools.filter_reset()
@staticmethod
def end_timer():
'''
Prints wall clock time passed since last call of ".start_timer()".
'''
NSTools.__end_time = time.time()
passed_time = NSTools.__end_time - NSTools.__start_time
NSTools.filter_unset()
NSTools.p( 'time passed =', passed_time )
NSTools.filter_reset()
| 6,196 | 27.689815 | 97 | py |
ns_lattice | ns_lattice-master/ns_lattice/src/ns_lattice/__init__.py | 1 | 0 | 0 | py | |
ns_lattice | ns_lattice-master/ns_lattice/src/ns_lattice/sage_interface.py | '''
Use of this source code is governed by a
MIT-style license that can be found in the
LICENSE file.
Created on Jul 12, 2017
@author: Niels Lubbes
Sage has a complicated import structure and it
is not possible to simply import each need
module. It seems that "from sage.all import *"
is the only option. Therefore we introduce an
interface to Sage so that in the code, it is
clear, which libraries of Sage we use. Moreover,
we specify below from which modules in the Sage
library we import.
We explain the naming scheme with the following
two examples. The interface method for
"PolynomialRing()" is called
"sage_PolynomialRing()". However the interface
method for "sage_eval()" is not called
"sage_sage_eval()" but instead "sage__eval()".
The variable "ZZ" is called "sage_ZZ".
For the Parameters section in the documentation
of types we will use the following abbrevations:
sage_POLY:
sage.rings.polynomial.multi_polynomial_element.MPolynomial_polydict
The type of an element in sage_PolynomialRing
sage_RING:
sage.rings.*
The type of a ring. For example sage_QQ or sage_ZZ or sage_NumberField.
sage_GRAPH:
sage.graphs.graph
The type of a Graph.
'''
from sage.all import *
from sage.structure.sage_object import register_unpickle_override
#################################################
# sage.structure #
#################################################
# from sage.structure.proof.proof import proof
sage_proof = proof
# from sage.structure.sage_object import save
def sage_save( *args, **kwargs ):
return save( *args, **kwargs )
# from sage.structure.sage_object import load
def sage_load( *args, **kwargs ):
return load( *args, **kwargs )
# from sage.structure.sage_object import register_unpickle_override
def sage_register_unpickle_override( *args, **kwargs ):
register_unpickle_override( *args, **kwargs )
#################################################
# sage.misc #
#################################################
# from sage.misc.sage_eval import sage_eval
def sage__eval( *args, **kwargs ):
return sage_eval( *args, **kwargs )
# from sage.misc.functional import n
def sage_n( *args, **kwargs ):
return n( *args, **kwargs )
#################################################
# sage.symbolic #
#################################################
# from sage.symbolic.ring import SR
sage_SR = SR
# from sage.symbolic.relation import solve
def sage_solve( *args, **kwargs ):
return solve( *args, **kwargs )
#################################################
# sage.rings #
#################################################
# from sage.rings.integer_ring import ZZ
sage_ZZ = ZZ
# from sage.rings.rational_field import QQ
sage_QQ = QQ
# import sage.rings.invariant_theory
sage_invariant_theory = invariant_theory
# from sage.rings.fraction_field import FractionField
def sage_FractionField( *args, **kwargs ):
return FractionField( *args, **kwargs )
# from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
# http://doc.sagemath.org/html/en/reference/polynomial_rings/sage/rings/polynomial/polynomial_ring_constructor.html
def sage_PolynomialRing( *args, **kwargs ):
return PolynomialRing( *args, **kwargs )
# from sage.rings.number_field.number_field import NumberField
def sage_NumberField( *args, **kwargs ):
return NumberField( *args, **kwargs )
#################################################
# sage.modules #
#################################################
# from sage.modules import VectorSpace
def sage_VectorSpace( *args, **kwargs ):
return VectorSpace( *args, **kwargs )
#################################################
# sage.matrix #
#################################################
# from sage.matrix.constructor import matrix
def sage_matrix( *args, **kwargs ):
return matrix( *args, **kwargs )
# from sage.matrix.constructor import identity_matrix
def sage_identity_matrix( *args, **kwargs ):
return identity_matrix( *args, **kwargs )
# from sage.matrix.constructor import diagonal_matrix
def sage_diagonal_matrix( *args, **kwargs ):
return diagonal_matrix( *args, **kwargs )
# from sage.matrix.constructor import vector
def sage_vector( *args, **kwargs ):
return vector( *args, **kwargs )
#################################################
# sage.arith #
#################################################
# from sage.arith.misc import factor
def sage_factor( *args, **kwargs ):
return factor( *args, **kwargs )
# from sage.arith.misc import gcd
def sage_gcd( *args, **kwargs ):
return gcd( *args, **kwargs )
#################################################
# sage.calculus #
#################################################
# from sage.calculus.functional import diff
def sage_diff( *args, **kwargs ):
return diff( *args, **kwargs )
# from sage.calculus.functional import expand
def sage_expand( *args, **kwargs ):
return expand( *args, **kwargs )
# from sage.calculus.var import var
def sage_var( *args, **kwargs ):
return var( *args, **kwargs )
#################################################
# sage.combinat #
#################################################
# from sage.combinat.composition import Compositions
def sage_Compositions( *args, **kwargs ):
return Compositions( *args, **kwargs )
# from sage.combinat.combination import Combinations
def sage_Combinations( *args, **kwargs ):
return Combinations( *args, **kwargs )
# from sage.combinat.partitions import Partitions
def sage_Partitions( *args, **kwargs ):
return Partitions( *args, **kwargs )
# from sage.combinat.permutations import Partitions
def sage_Permutations( *args, **kwargs ):
return Permutations( *args, **kwargs )
# from sage.subset import Subsets
def sage_Subsets( *args, **kwargs ):
return Subsets( *args, **kwargs )
# from sage.combinat.root_system.root_system import RootSystem
def sage_RootSystem( *args, **kwargs ):
return RootSystem( *args, **kwargs )
#################################################
# sage.graphs #
#################################################
# from sage.graphs.graph import Graph
def sage_Graph( *args, **kwargs ):
return Graph( *args, **kwargs )
| 6,555 | 28.936073 | 115 | py |
MORSE | MORSE-master/Generate_Params_Par.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import multiprocessing as mp
import traceback
import logging
import gc
import time
from argparse import ArgumentParser
import numpy
from matplotlib import pyplot
#from joblib import Parallel, delayed
from scipy.signal import argrelextrema
import multiprocessing
import itertools
from constants import c, G, Msun, rho_0, rho_ns, rho_1, rho_2, rho_3, P_0
def generate_params(n, low_gamma=0.5, high_gamma=6.5, Plow=[33.5, 34.5, 35.], Phigh=[34.8, 36., 37.]):
"""
Generates an array of combinations of P1, P2 and P3, making sure that P3 > P2 > P1.
It also sets a lower and upper limit on the polytropic index.
Args:
n (int) : The number of points in logspace for each parameter.
low_gamma (float): The lower limit on the polytropic index, default is 0.5.
high_gamma (float): The upper limit on the polytropic index, default is 6.5.
Plow (array) : Array of the three lower limits for P1, P2 and P3 in log10,
default is [33.5, 34.5, 35.].
Phigh (array) : Array of the three upper limits for P1, P2 and P3 in log10,
default is [34.8, 36., 37.].
Returns:
out (ndarray) : Returns an array with all the possible combinations of P1, P2, P3.
"""
excluded = []
P_1 = numpy.logspace(Plow[0], Phigh[0], n)
P_2 = numpy.logspace(Plow[1], Phigh[1], n)
P_3 = numpy.logspace(Plow[2], Phigh[2], n)
#P_1 = numpy.linspace(10**33.5, 10**34.8, n)
#P_2 = numpy.linspace(10**34.5, 10**36., n)
#P_3 = numpy.linspace(10**35., 10**37., n)
iterables = [P_1, P_2, P_3]
permutations = []
for t in itertools.product(*iterables):
if t[0] < t[1] < t[2]:
permutations.append(t)
permutations = numpy.array(permutations)
for i in range(len(permutations)):
P_1 = permutations[i][0]
P_2 = permutations[i][1]
P_3 = permutations[i][2]
gamma_1 = numpy.log10(P_1/P_0) / numpy.log10(rho_1/rho_0)
gamma_2 = numpy.log10(P_2/P_1) / numpy.log10(rho_2/rho_1)
gamma_3 = numpy.log10(P_3/P_2) / numpy.log10(rho_3/rho_2)
if not low_gamma <= gamma_1 <= high_gamma or not low_gamma <= gamma_2 <= high_gamma or not low_gamma <= gamma_3 <= high_gamma:
excluded.append(i)
permutations = numpy.delete(permutations, excluded, axis=0)
return permutations
def calc_causal_limit(rho, P_1, P_2, P_3):
"""
Calculates if an EOS with parameters P1, P2 and P3 is causal at a given density.
Args:
rho (float): The density at which the function checks causality.
P_1 (float): The first parameter of the EOS.
P_2 (float): The second parameter of the EOS.
P_3 (float): The third parameter of the EOS.
Returns:
rho (float): If causality is not violated, the density is returned.
"""
gamma_1 = numpy.log10(P_1/P_0) / numpy.log10(rho_1/rho_0)
gamma_2 = numpy.log10(P_2/P_1) / numpy.log10(rho_2/rho_1)
gamma_3 = numpy.log10(P_3/P_2) / numpy.log10(rho_3/rho_2)
epsilon_0 = rho_0 + P_0/c**2.0 * 1./1.7
a_1 = epsilon_0/(rho_0) - 1. - P_1/((gamma_1 -1.)*rho_0*c**2.0) * (rho_0/rho_1)**gamma_1
a_2 = a_1 + P_1/((gamma_1 -1.)*rho_1*c**2.0) - P_1/((gamma_2 -1.)*rho_1*c**2.0)
a_3 = a_2 + P_1/((gamma_2 -1.)*rho_2*c**2.0) * (rho_2/rho_1)**gamma_2 - P_2/((gamma_3 -1.) * rho_2*c**2.0)
causality = 0
if rho_0 < rho <= rho_1:
pres = P_1 * (rho/rho_1)**gamma_1
epsilon = (1. + a_1) * rho + P_1/((gamma_1 - 1.)*c**2.) *(rho/rho_1)**gamma_1
cs = gamma_1*pres/(epsilon*c**2. + pres)
if gamma_1*pres/(epsilon*c**2. + pres) > 1.12:
causality = 1
if rho_1 < rho <= rho_2:
pres = P_1 * (rho/rho_1)**gamma_2
epsilon = (1. + a_2) * rho + P_1/((gamma_2 - 1.)*c**2.) *(rho/rho_1)**gamma_2
cs = gamma_2*pres/(epsilon*c**2. + pres)
if gamma_2*pres/(epsilon*c**2. + pres) > 1.12:
causality = 1
if rho > rho_2:
pres = P_2 * (rho/rho_2)**gamma_3
epsilon = (1. + a_3) * rho + P_2/((gamma_3 - 1.)*c**2.) *(rho/rho_2)**gamma_3
cs = gamma_3*pres/(epsilon*c**2. + pres)
if gamma_3*pres/(epsilon*c**2. + pres) > 1.12:
causality = 1
if causality==0:
return rho
else:
return 0.0
def calc_maxrho(parameters, n=1000, low_lim=14.31, up_lim=16.5):
"""
Generates an array of len(parameters) containing the maximum central density for each EOSs.
Args:
parameters (ndarray): An array of EOS parameters, dimensions (q,3) for q EOSs.
n (int) : The number of points in logspace for each parameter. Default is 10^3.
low_lim (float) : The log10 of the lower limit of central densities to test causality for,
must be greater than 14.3, default is 14.31.
up_lim (float) : The log10 of the lower limit of central densities to test causality for,
default is 15.5.
Returns:
out (ndarray) : Returns an array with all maximum central densities, length (q).
"""
rhocents = numpy.logspace(low_lim, up_lim, n)
max_rho = numpy.zeros(len(parameters))
for i,e in enumerate(parameters):
causal_rho = numpy.zeros(len(rhocents))
P_1 = e[0]
P_2 = e[1]
P_3 = e[2]
for j, k in enumerate(rhocents):
causal_rho[j] = calc_causal_limit(k, P_1, P_2, P_3)
if causal_rho[0]==0.0:
max_rho[i] = 0.0
continue
locmax = numpy.where(causal_rho==0.0)[0]
if len(locmax) < 1:
max_rho[i] = 10**(up_lim)
else:
max_rho[i] = causal_rho[locmax[0]-1]
max_rho = numpy.array(max_rho)
return max_rho
info = mp.get_logger().info
def main(n, low_lim, up_lim):
logger = mp.log_to_stderr()
logger.setLevel(logging.INFO)
parameters = generate_params(n, low_lim, up_lim)
print len(parameters)
nproc = mp.cpu_count()# - 1
nproc = max(1, nproc)
div_par = numpy.array_split(parameters, nproc)
ntasks = nproc
inputs = [[div_par[t], t] for t in xrange(ntasks)]
input_q = mp.Queue()
output_q = mp.Queue()
procs = [ mp.Process(target=worker, args=(input_q,output_q)) for i in xrange(nproc)]
for i in xrange(ntasks):
input_q.put(inputs[i])
for i in xrange(nproc):
input_q.put('STOP')
for p in procs:
p.start()
result = []
while ntasks > 0:
result.append(output_q.get())
ntasks -= 1
for p in procs:
p.join()
result = numpy.array(sorted(result, key=lambda x: x[1]))
max_rho_array = numpy.concatenate(result[:,0]).ravel()
numpy.save('max_rho', max_rho_array)
numpy.save('input_parameters', parameters)
def worker(input_q, output_q):
start = time.clock()
while True:
try:
tmp = input_q.get()
if 'STOP' == tmp :
break
parameters, task = tmp
max_rho = calc_maxrho(parameters)
output_q.put([max_rho, task])
except Exception as exception:
trace = str(traceback.format_exc())
info(trace)
end = (time.clock() - start)
info(end)
return
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("-n", dest="number", help="Number of values for each parameter", required=True)
args = parser.parse_args()
main(args.number, 0.5, 6.5)
| 8,000 | 28.307692 | 134 | py |
MORSE | MORSE-master/constants.py | ### Constants used throughout my project code ###
c = 3e10
G = 6.67428e-8
Msun = 1.989e33
rho_0 = 10**14.3
rho_ns = 2.7e14
rho_1 = 1.85 * rho_ns
rho_2 = 2. * rho_1
#rho_2 = 1.8 * rho_1
rho_3 = 2. * rho_2
P_0 = 1.5281267425e+33
gamma_0 = 2.68019358431
dyncm2_to_MeVfm3 = 1./(1.6022e33)
gcm3_to_MeVfm3 = 1./(1.7827e12)
oneoverfm_MeV = 197.33
| 346 | 18.277778 | 49 | py |
MORSE | MORSE-master/PosteriorProbRho.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import multiprocessing as mp
import traceback
import logging
import gc
import numpy
from matplotlib import pyplot
from argparse import ArgumentParser
from scipy.interpolate import UnivariateSpline, RegularGridInterpolator
from scipy.integrate import simps, dblquad
import time
from constants import G, Msun, c
def calc_determinant(JacPart):
indices = numpy.where(numpy.invert(numpy.isnan(JacPart).any(axis=1)))[0]
deter = numpy.zeros((len(JacPart[indices]), len(JacPart[indices]), len(JacPart[indices])))
derivs = numpy.zeros((6,6))
if indices.size==0:
return numpy.nan, numpy.nan, numpy.nan
else:
for p, i in enumerate(indices):
for r, j in enumerate(indices):
for w, k in enumerate(indices):
derivs[:,0][0:3] = JacPart[i][3:6]
derivs[:,1][0:3] = JacPart[j][3:6]
derivs[:,2][0:3] = JacPart[k][3:6]
derivs[:,3][0:3] = JacPart[i][6::]
derivs[:,4][0:3] = JacPart[j][6::]
derivs[:,5][0:3] = JacPart[k][6::]
derivs[3][[0,3]] = JacPart[i][1:3]
derivs[4][[1,4]] = JacPart[j][1:3]
derivs[5][[2,5]] = JacPart[k][1:3]
deter[p, r, w] = abs(numpy.linalg.det(derivs))
return min(JacPart[indices][:,0]), max(JacPart[indices][:,0]), deter
def calculate_norm(distribution):
def Multivariate_notNorm(R, M, distribution):
Mobs = distribution[0]
Robs = distribution[1]
sigmaM = distribution[2]
sigmaR = distribution[3]
rho = distribution[4]
return numpy.exp(-1./(2.*(1.-rho**2.)) * ((R-Robs)**2. / sigmaR**2.0 + (M-Mobs)**2. / sigmaM**2.0 - \
2.*rho*(R-Robs)*(M-Mobs)/(sigmaM*sigmaR)))
norm = numpy.zeros(3)
for i in range(3):
norm[i] = dblquad(Multivariate_notNorm, 0.5, 3.3, lambda M: 2.94*G*M*Msun/(c**2. * 100000),
lambda M: 14.3, args=([distribution[i]]))[0]
return norm
def Pobs(rho1, rho2, rho3, Jac_func, curveM, curveR, obs, norm):
meanM = numpy.array([x[0] for x in obs])
meanR = numpy.array([x[1] for x in obs])
sigmaM = numpy.array([x[2] for x in obs])
sigmaR = numpy.array([x[3] for x in obs])
rho = numpy.array([x[4] for x in obs])
Rho = numpy.array([rho1, rho2, rho3])
obs = numpy.zeros(3, dtype=object)
for i in range(3):
obs[i] = 1./norm[i] * numpy.exp(-1./(2.*(1.-rho[i]**2.)) * ((((curveM(Rho[i])-meanM[i])**2.0)/sigmaM[i]**2.0) +\
(((curveR(Rho[i])-meanR[i])**2.0)/sigmaR[i]**2.0) - (2.*rho[i]*(curveM(Rho[i])-meanM[i])*\
(curveR(Rho[i])-meanR[i])/(sigmaM[i]*sigmaR[i]))))
return obs[0] * obs[1] * obs[2] * abs(Jac_func((rho1, rho2, rho3)))
def simps_integration(points, low_lim, up_lim, Jac_func, curveM, curveR, obs, norm):
n = points
rho1s = numpy.linspace(low_lim, up_lim, n)
rho2s = numpy.linspace(low_lim, up_lim, n)
rho3s = numpy.linspace(low_lim, up_lim, n)
integral1 = numpy.zeros(len(rho1s))
integral2 = numpy.zeros(len(rho2s))
for i in range(len(rho3s)):
for j in range(len(rho2s)):
integral1[j] = simps(Pobs(rho1s, numpy.full(n, rho2s[j]), numpy.full(n, rho3s[i]), Jac_func, curveM, curveR, obs, norm), rho1s)
integral2[i] = simps(integral1, rho2s)
return simps(integral2, rho3s)
info = mp.get_logger().info
def main(MRIcurves, Jacobian, Observables, outputfile):
logger = mp.log_to_stderr()
logger.setLevel(logging.INFO)
nproc = mp.cpu_count()# - 1
nproc = max(1, nproc)
#norm = numpy.zeros(len(Observables), dtype=object)
#for i, e in enumerate(Observables):
norm = calculate_norm(Observables)
div_MR = numpy.array_split(MRIcurves, nproc)
div_jac = numpy.array_split(Jacobian, nproc)
ntasks = nproc
inputs = [[div_MR[t], div_jac[t], Observables, norm, t] for t in xrange(ntasks)]
input_q = mp.Queue()
output_q = mp.Queue()
procs = [ mp.Process(target=worker, args=(input_q,output_q)) for i in xrange(nproc)]
for i in xrange(ntasks):
input_q.put(inputs[i])
for i in xrange(nproc):
input_q.put('STOP')
for p in procs:
p.start()
result = []
while ntasks > 0:
result.append(output_q.get())
ntasks -= 1
result = numpy.array(sorted(result, key=lambda x: x[1]))
result = numpy.delete(result, 1, axis=1)
result = numpy.concatenate(result).ravel()
result = numpy.concatenate(result).ravel()
#Prob = numpy.zeros(len(Observables), dtype=object)
#for z in range(len(Observables)):
# Prob[z] = numpy.array([e[z] for e in result])
numpy.save(outputfile, result)
for p in procs:
p.join()
def worker(input_q, output_q):
start = time.clock()
while True:
try:
tmp = input_q.get()
if 'STOP' == tmp :
break
MRIcurves, Jacobian, Observables, norm, task = tmp
info(len(Jacobian))
Prob = numpy.zeros(len(Jacobian))
for h, e in enumerate(Jacobian):
Det = calc_determinant(e)
M, R, I, rhoc = MRIcurves[h]
x = numpy.log10(rhoc)
curveM = UnivariateSpline(x, M, k=3, s=0)
curveR = UnivariateSpline(x, R, k=3, s=0)
if numpy.isnan(Det[0]) or len(Det[2])<3:
continue
rhos = numpy.linspace(Det[0], Det[1], len(Det[2]))
Jac_func = RegularGridInterpolator((rhos, rhos, rhos), Det[2])
Prob[h] = simps_integration(25, min(rhos), max(rhos), Jac_func, curveM, curveR, Observables, norm)
# Hack to avoid memory leak. Explicitly delete the instance of Jac_func and collect garbage.
del Jac_func, curveM, curveR
gc.collect()
output_q.put([Prob, task])
except Exception as exception:
trace = str(traceback.format_exc())
info(trace)
end = (time.clock() - start)
info(end)
return
if __name__ == '__main__':
Observables = numpy.array([[1.5, 10.7, 0.05*1.5, 0.05*10.7, 0.0], [1.61, 10.5, 0.05*1.61, 0.05*10.5, 0.0], [1.7, 10.2, 0.05*1.7, 0.05*10.2, 0.0]])
parser = ArgumentParser()
parser.add_argument("-f", dest="outputFile", help="write probability to FILE", metavar="FILE", required=True)
parser.add_argument("-i1", dest="inputMRIcurves", help="use as input MRIcurves", required=True)
parser.add_argument("-i2", dest="inputJacobian", help="use as input Jacobian", required=True)
parser.add_argument("-i3", dest="inputObservables", help="use as input observables", required=True)
args = parser.parse_args()
MRIcurves = numpy.load(args.inputMRIcurves)
Jacobian = numpy.load(args.inputJacobian)
Observables = numpy.load(args.inputObservables)
main(MRIcurves, Jacobian, Observables, args.outputFile)
| 7,431 | 31.596491 | 150 | py |
MORSE | MORSE-master/Plotting.py | import numpy
import pandas
from matplotlib import pyplot
from scipy.interpolate import griddata
from InputPosteriors import Pobs, find_CI_level
pyplot.rcParams['xtick.direction'] = 'in'
pyplot.rcParams['xtick.minor.visible'] = True
pyplot.rcParams['ytick.direction'] = 'in'
pyplot.rcParams['ytick.minor.visible'] = True
pyplot.rcParams['xtick.major.size'] = 5
pyplot.rcParams['ytick.major.size'] = 5
pyplot.rcParams['ytick.right'] = True
pyplot.rcParams['xtick.top'] = True
pyplot.rcParams['axes.titlesize'] = 15
pyplot.rcParams['axes.labelsize'] = 24
pyplot.rcParams['xtick.labelsize'] = 20
pyplot.rcParams['ytick.labelsize'] = 20
pyplot.rcParams['text.usetex'] = True
def Plot_MRcurves(MRIcurves, num):
indices = numpy.linspace(0, len(MRIcurves)-1, num, dtype=int)
fig, ax = pyplot.subplots(1,1, figsize=(7,6))
for e, i in enumerate(indices):
M, R, I, rhoc = MRIcurves[i]
ax.plot(R, M, c='blue', zorder=0)
ax.set_xlim(5, 16)
ax.set_xlabel('Radius (km)')
ax.set_ylabel(r'Mass (M$_{\odot}$)')
pyplot.show()
def Plot_PosteriorInput(PosteriorInput, M, R):
mi = numpy.linspace(0.2, 3.6, 400)
ri = numpy.linspace(5, 16, 400)
mig, rig = numpy.meshgrid(mi, ri)
pig = Pobs(mig, rig, PosteriorInput)
fig, ax = pyplot.subplots(1,1, figsize=(7,6))
for i in range(len(pig)):
pi = numpy.concatenate(pig[i]).ravel()
ax.contour(rig, mig, pig[i], linewidth=2.0,
rstride=1, cstride=1, vmin=numpy.amin(pig[i]), vmax=numpy.amax(pig[i]),
levels=numpy.array([find_CI_level(pi)[0]]), linestyles='--',
colors=['red'], extend='max')
ax.plot(R, M, c='black')
ax.set_xlim(min(R)-3., 17)
ax.set_ylim(0., max(M)+.3)
ax.set_xlabel('Radius (km)')
ax.set_ylabel(r'Mass (M$_{\odot}$)')
ax.xaxis.set_tick_params(labelsize=15)
ax.yaxis.set_tick_params(labelsize=15)
pyplot.show()
def Plot_Prob(Prob, Parameters):
df = pandas.DataFrame({'P1':Parameters[:,0],'P2':Parameters[:,1], 'P3':Parameters[:,2], 'Prob':Prob})
fig, ax = pyplot.subplots(1,2, figsize=(12,5))
for i, e in enumerate([['P1', 'P2'], ['P2', 'P3']]):
df2 = df.groupby([e[0], e[1]]).Prob.sum().reset_index()
df2 = numpy.array(df2)
values = df2[:,2]
points = df2[:,0:2]
values=abs(values)
X = numpy.log10(points[:,0])
Y = numpy.log10(points[:,1])
Z = values
xi = numpy.linspace(X.min(),X.max(),100)
yi = numpy.linspace(Y.min(),Y.max(),100)
zi = griddata((X, Y), Z, (xi[None,:], yi[:,None]), method='cubic')
xig, yig = numpy.meshgrid(xi, yi)
surface = ax[i].contour(xig, yig, zi, linewidths=1.5, rstrid=1, cstride=1, vmin=min(Z), vmax=max(Z),
levels = find_CI_level(values), colors=('Grey', 'Steelblue'))
fmt = {}
strs = [r'2 $\sigma$', r'1 $\sigma$']
for l, s in zip(surface.levels, strs):
fmt[l] = s
ax[i].clabel(surface, inline=1, fontsize=11, fmt=fmt)
if i==0:
### P1 P2 ###
ax[0].set_xlabel('$\log($P$_{1}$) (dyn cm$^{-2}$)')
ax[0].set_ylabel('$\log($P$_{2}$) (dyn cm$^{-2}$)')
ax[0].set_yticks([34.5, 35., 35.5, 36.])
ax[0].set_xticks([33.5, 34., 34.5, 35.])
ax[0].text(34.253, 35.115, 'FPS', fontsize=12)
ax[0].set_xlim(33.5, 35.)
ax[0].set_ylim(34.2, 36.)
if i==1:
### P2 P3 ###
ax[1].set_xlabel('$\log($P$_{2}$) (dyn cm$^{-2}$)')
ax[1].set_ylabel('$\log($P$_{3}$) (dyn cm$^{-2}$)')
ax[1].set_xticks([34.5, 35., 35.5, 36.])
ax[1].set_yticks([35., 35.5, 36., 36.5, 37.])
ax[1].text(35.08, 35.89, 'FPS', fontsize=12)
ax[1].set_xlim(34.2, 36.)
ax[1].set_ylim(35., 37.)
pyplot.tight_layout()
pyplot.show()
| 3,918 | 32.784483 | 108 | py |
MORSE | MORSE-master/Create_MRcurves_Par.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import multiprocessing as mp
import traceback
import logging
import gc
import time
from constants import c, G, Msun, rho_0, rho_ns, rho_1, rho_2, rho_3, P_0, gamma_0, dyncm2_to_MeVfm3, gcm3_to_MeVfm3, oneoverfm_MeV
import numpy
from argparse import ArgumentParser
from tqdm import tqdm, trange
import itertools
from scipy.constants import pi
from scipy.interpolate import interp1d, UnivariateSpline
from scipy.signal import argrelextrema
from scipy.integrate import odeint
from matplotlib import pyplot
def print_progressbar(i, N):
pbwidth = 42
progress = float(i)/N
block = int(round(pbwidth*progress))
text = "\rProgress: [{0}] {1:.1f}%".format( "#"*block + "-"*(pbwidth-block), progress*100)
sys.stdout.write(text)
sys.stdout.flush()
if i == (N-1):
print " .. done"
def crust_EOS():
"""
Interpolates the SLy EOS to use for the crust and calculates the minimum pressure.
Returns:
EOS (function): A function of density representing the EOS
Inverse EOS (function): A function of pressure representing the EOS
P_min (float): The minimum pressure that is tabulated.
"""
Pmin = 1e2
Pmax = SLYfit(14.3)
rhotest = numpy.logspace(6, 16, 300)
prestest = 10**SLYfit(numpy.log10(rhotest))
ptest = numpy.logspace(numpy.log10(Pmin), Pmax, 500)
eos = UnivariateSpline(rhotest, prestest, k=3, s=0)
inveos = UnivariateSpline(prestest, rhotest, k=3, s=0)
return eos, inveos, Pmin
def f0(x):
return 1./(numpy.exp(x) + 1.)
def SLYfit(rho):
a = numpy.array([6.22, 6.121, 0.005925, 0.16326, 6.48, 11.4971, 19.105, 0.8938,
6.54, 11.4950, -22.775, 1.5707, 4.3, 14.08, 27.80, -1.653, 1.50,
14.67])
part1 = (a[0] + a[1]*rho + a[2]*rho**3.)/(1. + a[3]*rho) * f0(a[4]*(rho-a[5]))
part2 = (a[6] + a[7]*rho)*f0(a[8]*(a[9]-rho))
part3 = (a[10] + a[11]*rho)*f0(a[12]*(a[13]-rho))
part4 = (a[14] + a[15]*rho)*f0(a[16]*(a[17] - rho))
return part1+part2+part3+part4
def eos(rho, P_1, P_2, P_3, eos_crust):
"""
The parameterized EOS.
Args:
rho (float): The density at which to evaluate the EOS in g/cm^3.
P_1 (float): The first pressure parameter of the parameterization.
P_2 (float): The second pressure parameter of the parameterization.
P_3 (float): The third pressure parameter of the parameterization.
eos_crust (function): The EOS for the low-density part, which inputs a mass density and returns a pressure.
Returns:
rho (float): The rest-mass density in g/cm^3.
epsilon (float): The energy density in g/cm^3
"""
rho_ns = 2.7e14
rho_1 = 1.85 * rho_ns
rho_2 = 2. * rho_1
rho_3 = 2. * rho_2
gamma_1 = numpy.log10(P_1/P_0) / numpy.log10(rho_1/rho_0)
gamma_2 = numpy.log10(P_2/P_1) / numpy.log10(rho_2/rho_1)
gamma_3 = numpy.log10(P_3/P_2) / numpy.log10(rho_3/rho_2)
k1 = P_0/(rho_0**gamma_1)
pres1 = k1*rho_1**gamma_1
k2 = pres1/(rho_1**gamma_2)
pres2 = k2*rho_2**gamma_2
k3 = pres2/(rho_2**gamma_3)
#gamma0 = deriv_hpd(rho0) * rho0/P0
gamma0 = 2.7
e0 = rho_0 + P_0/c**2.0 * 1./(gamma0 - 1.)
a1 = e0/rho_0 - 1. - k1/((gamma_1-1.)*c**2.0) * rho_0**(gamma_1-1.)
e1 = (1. + a1)*rho_1 + pres1/(c**2.0 * (gamma_1 -1.))
a2 = e1/rho_1 - 1. - k2/((gamma_2-1.)*c**2.0) * rho_1**(gamma_2-1.)
e2 = (1. + a2)*rho_2 + pres2/(c**2.0 * (gamma_2 -1.))
a3 = e2/rho_2 - 1. - k3/((gamma_3-1.)*c**2.0) * rho_2**(gamma_3-1.)
if rho <= rho_0:
pres = eos_crust(rho)
gamma_05 = 1.7
epsilon = rho + pres/c**2. * 1./(gamma_05 - 1)
if rho_0 < rho <= rho_1:
pres = k1 * rho**gamma_1
epsilon = (1. + a1)*rho + pres/(c**2.0 * (gamma_1 -1.))
if rho_1 < rho <= rho_2:
pres = k2 * rho**gamma_2
epsilon = (1. + a2)*rho + pres/(c**2.0 * (gamma_2 -1.))
if rho > rho_2:
pres = k3 * rho**gamma_3
epsilon = (1. + a3)*rho + pres/(c**2.0 * (gamma_3 -1.))
return pres, epsilon
def inveos(pres, P_1, P_2, P_3, eos_crust, inveos_crust, P_min):
"""
The inverse of the parameterized EOS.
Args:
pres (float) : The pressure at which to evaluate the inverse EOS in dyn/cm^2.
P_1 (float) : The first pressure parameter of the parameterization.
P_2 (float) : The second pressure parameter of the parameterization.
P_3 (float) : The third pressure parameter of the parameterization.
eos_crust (function) : The EOS for the low-density part, which inputs a mass density and returns a pressure.
inveos_crust (function) : The inverse EOS for the low density part, which inputs a pressure and returns a mass density.
P_min (float) : The minimum pressure for which the low-density EOS function is defined.
Returns:
rho (float) : The rest-mass density in g/cm^3.
epsilon (float) : The energy density in g/cm^3
"""
gamma_1 = numpy.log10(P_1/P_0) / numpy.log10(rho_1/rho_0)
gamma_2 = numpy.log10(P_2/P_1) / numpy.log10(rho_2/rho_1)
gamma_3 = numpy.log10(P_3/P_2) / numpy.log10(rho_3/rho_2)
k1 = P_0/(rho_0**gamma_1)
pres1 = k1*rho_1**gamma_1
k2 = pres1/(rho_1**gamma_2)
pres2 = k2*rho_2**gamma_2
k3 = pres2/(rho_2**gamma_3)
#gamma0 = deriv_hpd(rho0) * rho0/P0
gamma0 = 2.7
e0 = rho_0 + P_0/c**2.0 * 1./(gamma0 - 1.)
a1 = e0/rho_0 - 1. - k1/((gamma_1-1.)*c**2.0) * rho_0**(gamma_1-1.)
e1 = (1. + a1)*rho_1 + pres1/(c**2.0 * (gamma_1 -1.))
a2 = e1/rho_1 - 1. - k2/((gamma_2-1.)*c**2.0) * rho_1**(gamma_2-1.)
e2 = (1. + a2)*rho_2 + pres2/(c**2.0 * (gamma_2 -1.))
a3 = e2/rho_2 - 1. - k3/((gamma_3-1.)*c**2.0) * rho_2**(gamma_3-1.)
if pres <= P_0:
rho = inveos_crust(pres)
#rho = 10**inveos_crust(numpy.log10(pres))
gamma_05 = 1.7
epsilon = rho + pres/c**2. * 1./(gamma_05 - 1)
if P_0 < pres <= P_1:
rho = (pres/k1)**(1./gamma_1)
epsilon = (1. + a1)*rho + pres/(c**2.0 * (gamma_1 -1.))
if P_1 < pres <= P_2:
rho = (pres/k2)**(1./gamma_2)
epsilon = (1. + a2)*rho + pres/(c**2.0 * (gamma_2 -1.))
if pres > P_2:
rho = (pres/k3)**(1./gamma_3)
epsilon = (1. + a3)*rho + pres/(c**2.0 * (gamma_3 -1.))
return rho, epsilon
### Define function to integrate
def f(initial, r, P_1, P_2, P_3, eos_crust, inveos_crust, P_min):
"""
The TOV-equations to pass on to scipy's 'odeint'.
Args:
initial (array) : Array of two values, the inital pressure and the initial mass.
r (float) : The radial coordinate of the neutron star (r = 0 is the center).
P_1 (float) : The first pressure parameter of the parameterization.
P_2 (float) : The second pressure parameter of the parameterization.
P_3 (float) : The third pressure parameter of the parameterization.
eos_crust (function) : The EOS for the low-density part, which inputs a mass density and returns a pressure.
inveos_crust (function): The inverse EOS for the low density part, which inputs a pressure and returns a mass density.
P_min (float) : The minimum pressure for which the low-density EOS function is defined.
Returns:
dpdr (float) : The derivative of the pressure with respect to the radial coordinate.
dmdr (float) : The derivative of the mass with respect to the radial coordinate.
"""
pres, m = initial
if pres < P_min:
pres = P_min
rho, eps = inveos(pres, P_1, P_2, P_3, eos_crust, inveos_crust, P_min)
dmdr = 4.*pi*r**2.0 * eps
if r==0.0:
dpdr = 0.0
else:
dpdr = -G * (eps + pres/c**2.) * (m + 4.*pi*r**3. * pres/c**2.)
dpdr = dpdr/(r*(r - 2.*G*m/c**2.))
return dpdr, dmdr
### Function to solve the TOV-equations
def tovsolve(rhocent, P_1, P_2, P_3, eos_crust, inveos_crust, P_min):
"""
Solves the TOV-equations using scipy's 'odeint' package.
Args:
rhocent (float) : The central density of the neutron star. This is the starting
value of the differential integration.
P_1 (float) : The first pressure parameter of the parameterization.
P_2 (float) : The second pressure parameter of the parameterization.
P_3 (float) : The third pressure parameter of the parameterization.
eos_crust (function) : The EOS for the low-density part, which inputs a mass density and returns a pressure.
inveos_crust (function): The inverse EOS for the low density part, which inputs a pressure and returns a mass density.
P_min (float) : The minimum pressure for which the low-density EOS function is defined.
Returns:
M (float) : The mass of the neutron star.
R (float) : The radius of the neutron star.
"""
dr = 800.
r = numpy.arange(0.0, 2500000., dr)
pcent = eos(rhocent, P_1, P_2, P_3, eos_crust)[0]
m0 = 0.0
P0 = pcent
y = P0, m0
psol = odeint(f, y, r, args=(P_1, P_2, P_3, eos_crust, inveos_crust, P_min))
indices = numpy.where(psol[:,0]>P_min)
index = indices[-1][-1]
M_max = psol[index][1]/Msun
R_max = r[index]/100000
I = MomentInertia(psol[:,0][indices[0]], psol[:,1][indices[0]], r[indices[0]], P_min)
return M_max, R_max, I
### Calculate the Masses and Radii for different central pressures
def calculate_MR(logrhomin, logrhomax, n, P_1, P_2, P_3, eos_crust, inveos_crust, P_min):
"""
Calculate a mass-radius curve by solving the TOV-equations for different central densities.
Args:
logrhomin (float) : The lower limit of the central density in log(g/cm^3).
logrhomax (float) : The upper limit of the central density in log(g/cm^3), based on causality.
n (int) : The number of points used in logspace to create the MR-curve.
P_1 (float) : The first pressure parameter of the parameterization.
P_2 (float) : The second pressure parameter of the parameterization.
P_3 (float) : The third pressure parameter of the parameterization.
eos_crust (function) : The EOS for the low-density part, which inputs a mass density and returns a pressure.
inveos_crust (function): The inverse EOS for the low density part, which inputs a pressure and returns a mass density.
P_min (float) : The minimum pressure for which the low-density EOS function is defined.
Returns:
Masses (array) : An array of length 'n' with the masses of the neutron stars.
Radii (array) : An array of length 'n' with the radii of the neutron stars.
Inert (array) : An array of length 'n' with the moments of inertia of the stars.
rhocent (array) : An array of length 'n' with the central densities of each star.
"""
rhocent = numpy.logspace(logrhomin, logrhomax, n)
Masses = numpy.zeros(len(rhocent))
Radii = numpy.zeros(len(rhocent))
Inert = numpy.zeros(len(rhocent))
for j, k in enumerate(rhocent):
Masses[j], Radii[j], Inert[j] = tovsolve(k, P_1, P_2, P_3, eos_crust, inveos_crust, P_min)
Masses = numpy.array(Masses)
Radii = numpy.array(Radii)
Inert = numpy.array(Inert)
#Masses = Masses[Masses>0.]
#Radii = Radii[Radii>0.]
#Inert = Inert[Inert>0.]
return Masses, Radii, Inert, rhocent
def MomentInertia(P, M, r, P_min):
"""
Calculate the moment of inertia of a star given a central density and an EOS.
Args:
P (array) : The pressure profile throughout the star as a function of the radial coordinate.
M (array) : The mass profile throughout the star as a function of the radial coordinate.
r (array) : The radial coordinates.
P_min (float): The minimum pressure for which the EOS is defined.
Returns:
I (float) : The moment of inertia of the star in (Msun km^2)
"""
curveM = UnivariateSpline(r, M, k=3, s=0)
curveP = UnivariateSpline(r, P, k=3, s=0)
Mns = M[-1]
Rns = r[-1]
dr = 100
rx = numpy.arange(min(r)+.1, max(r), dr)
nustart = numpy.log(1. - 2.*G*Mns/(c**2. * Rns))
curvenu = UnivariateSpline(rx, nu(rx, curveM, curveP), k=3, s=0)
nufunc = curvenu.antiderivative(1)
js = numpy.exp(-.5*(nufunc(rx)-nufunc(Rns)+nustart))*numpy.sqrt((1. - 2.*G*curveM(rx)/(c**2. *rx)))
curvej = UnivariateSpline(rx, js, k=3, s=0)
derivj = curvej.derivative(1)
sol = odeint(omega, [1., 0.], rx, args=(curvej, derivj))
w = sol[:,0]
dw = sol[:,1]
curvew = UnivariateSpline(rx, w, k=3, s=0)
curvedw = UnivariateSpline(rx, dw, k=3, s=0)
J = 1./6. * Rns**4. * curvedw(Rns)
W = curvew(Rns) + 2.*J/(Rns**3.)
#I = quad(Inertia, 0.1, Rns, args=(derivj, curvew, W))[0] *2.*c**2. /(3.*G)
I = (1. - curvew(Rns)/W)*Rns**3. * c**2./(2.*G)
return I*10**(-10)/Msun
def nu(r, curveM, curveP):
dvdr = 2*G/c**2. * (curveM(r) + 4.*numpy.pi*r**3. * curveP(r)/c**2.)/(r**2. * (1. - 2.*G*curveM(r)/(r*c**2.)))
return dvdr
def omega(initial, r, curvej, derivj):
x1, x2 = initial
if r==0.0:
dx1 = 0.
dx2 = 0.
else:
dx1 = x2
dx2 = - 4./(r*curvej(r)) * derivj(r) * x1 - 4./r * x2 -1./curvej(r) *derivj(r) *x2
return dx1, dx2
def Inertia(r, derivj, curvew, W):
return -r**3. * derivj(r)*curvew(r)/W
def calculate_MR_all(parameters, eos_crust, inveos_crust, task=0, logrhomin=14.4, logrhomax=16.5, n=100, P_min=0.0):
"""
Calculate the masses, radii and moments of inertia for all input parameters by solving the TOV-equations
for different central densities.
Args:
parameters : The array of parameters for which to solve the TOV-equations.
eos_crust (function) : The EOS for the low-density part, which inputs a pressure and returns a mass density.
inveos_crust (function) : The inverse EOS for the low density part, which inputs a mass density and returns a pressure.
task : At which line to print the progressbar, default is 0.
logrhomin (float) : The lower limit of the central density in log(g/cm^3), default is 14.4.
logrhomax (float or array): The upper limits of the central density in log(g/cm^3), based on causality.
If float, the same value for all EoSs is used, if array, every entry should
correspond to the maximum central density of an EoS.
n (int) : The number of points used in logspace to create the MR-curve.
eos_crust (function) : The EoS for the low density part of the star. The function should take density
as input and outputs pressure.
inveos_crust (function) : The inverse of the EoS for the low density part of the star.
The function should take pressure as input and output density.
P_min (float) : The lowest density for which eos_crust and inveos_crust is defined,
default is 0.0.
Returns:
MRIcurves (ndarray) : An array of length (EoS) with for each EoS an array of masses,
radii, moments of inertia and the corresponding central densities.
Parameters (ndarray) : An array of length (EoS) with the parameters for which the TOV
equations generated stable mass-radius curves.
"""
MR_curves = []
Error_params = []
with tqdm(total=len(parameters), position=task, desc='Process %d' %(task), leave=False) as pbar:
for j in range(len(parameters)):
P_1 = parameters[j,0]
P_2 = parameters[j,1]
P_3 = parameters[j,2]
try:
if isinstance(logrhomax, float):
masses, radii, inert, rhocent = calculate_MR(logrhomin, logrhomax, n, P_1, P_2, P_3, eos_crust, inveos_crust, P_min)
else:
masses, radii, inert, rhocent = calculate_MR(logrhomin, logrhomax[j], n, P_1, P_2, P_3, eos_crust, inveos_crust, P_min)
pbar.update(1)
except UnboundLocalError:
Error_params.append(j)
continue
else:
locmin = argrelextrema(masses, numpy.less)[0] #Check for EOS with local minima
if not len(locmin)==0:
Error_params.append(j)
continue
locmax = argrelextrema(masses, numpy.greater)[0]
if not len(locmax)==0:
if locmax[0] < len(masses)-1: #check to see if there is a sharp kink in the MR-curve
right = locmax[0] + 1
left = locmax[0] - 1
if abs(radii[right]-radii[left]) < 0.12:
Error_params.append(j)
continue
MR_curves.append([masses[0:locmax[0]+1], radii[0:locmax[0]+1], inert[0:locmax[0]+1], rhocent[0:locmax[0]+1]])
else:
MR_curves.append([masses, radii, inert, rhocent])
MR_curves = numpy.array(MR_curves)
Error_params = numpy.array(Error_params)
Parameters = numpy.delete(parameters, Error_params, axis=0)
return MR_curves, Parameters
#info = mp.get_logger().info
def main(parameters, maxrho, outputMR, outputPs):
#logger = mp.log_to_stderr()
#logger.setLevel(logging.INFO)
nproc = mp.cpu_count() - 1
nproc = max(1, nproc)
print len(parameters)
div_par = numpy.array_split(parameters, nproc)
div_maxrho = numpy.array_split(maxrho, nproc)
ntasks = nproc
inputs = [[div_par[t], div_maxrho[t], t] for t in xrange(ntasks)]
input_q = mp.Queue()
output_q = mp.Queue()
procs = [ mp.Process(target=worker, args=(input_q,output_q)) for i in xrange(nproc)]
for i in xrange(ntasks):
input_q.put(inputs[i])
for i in xrange(nproc):
input_q.put('STOP')
for p in procs:
p.start()
result = []
while ntasks > 0:
result.append(output_q.get())
ntasks -= 1
for p in procs:
p.join()
result = numpy.array(sorted(result, key=lambda x: x[2]))
result = numpy.delete(result, 2, axis=1)
MRcurves = []
Parameters = []
for i in xrange(nproc):
for j in range(len(result[i][0])):
Parameters.append(result[i][1][j])
MRcurves.append([numpy.array(result[i][0][j][0]), numpy.array(result[i][0][j][1]),
numpy.array(result[i][0][j][2]), numpy.array(result[i][0][j][3])])
Parameters = numpy.array(Parameters)
MRcurves = numpy.array(MRcurves)
numpy.save(outputPs, Parameters)
numpy.save(outputMR, MRcurves)
def worker(input_q, output_q):
start = time.clock()
while True:
try:
tmp = input_q.get()
if 'STOP' == tmp :
break
parameters, maxrho, task = tmp
eos_crust, inveos_crust, P_min = crust_EOS()
MR_curves, Parameters = calculate_MR_all(parameters, eos_crust, inveos_crust, task, logrhomax=maxrho, n=30, P_min=P_min)
output_q.put([MR_curves, Parameters, task])
except Exception as exception:
trace = str(traceback.format_exc())
#info(trace)
#end = (time.clock() - start)
#info(end)
return
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("-f1", dest="OutputFileMR", help="write MRIcurves to FILE", metavar="FILE", required=True)
parser.add_argument("-f2", dest="OutputFileParams", help="write parameters to FILE", metavar="FILE", required=True)
args = parser.parse_args()
parameters = numpy.load('input_parameters.npy')
maxrho = numpy.load('max_rho.npy')
maxrho = numpy.log10(maxrho)
main(parameters, maxrho, args.OutputFileMR, args.OutputFileParams)
| 21,060 | 35.755672 | 139 | py |
MORSE | MORSE-master/InputPosteriors.py | import numpy
def Create_Input_Posterior(percent, masses, radii, rho):
Robs = radii
Mobs = masses
sigmaM = numpy.zeros(3)
sigmaR = numpy.zeros(3)
sigmaM[0] = percent[0]*Mobs[0]
sigmaR[0] = percent[0]*Robs[0]
sigmaM[1] = percent[1]*Mobs[1]
sigmaR[1] = percent[1]*Robs[1]
sigmaM[2] = percent[2]*Mobs[2]
sigmaR[2] = percent[2]*Robs[2]
distribution = []
for i in range(3):
distribution.append([Mobs[i], Robs[i], sigmaM[i], sigmaR[i], rho[i]])
distribution = numpy.array(distribution)
return distribution
def find_CI_level(array):
NaN_index = numpy.isnan(array)
array[NaN_index] = 0.0
index_68 = numpy.where(numpy.cumsum(numpy.sort(array)[::-1]) < sum(array)*0.6827)[0]
index_68 = numpy.argsort(array)[::-1][index_68]
index_95 = numpy.where(numpy.cumsum(numpy.sort(array)[::-1]) < sum(array)*0.9545)[0]
index_95 = numpy.argsort(array)[::-1][index_95]
return min(array[index_95]), min(array[index_68])
def Pobs(M, R, distribution):
obs = numpy.zeros(3, dtype=object)
for l in range(3):
Mobs = distribution[l][0]
Robs = distribution[l][1]
sigmaM = distribution[l][2]
sigmaR = distribution[l][3]
rho = distribution[l][4]
obs[l] = numpy.exp(-1./(2.*(1.-rho**2.)) *\
((R-Robs)**2. / sigmaR**2.0 + (M-Mobs)**2. / sigmaM**2.0 - \
2.*rho*(R-Robs)*(M-Mobs)/(sigmaM*sigmaR)))
return obs
| 1,586 | 27.339286 | 95 | py |
MORSE | MORSE-master/JacobianRho.py | import sys
import os
import multiprocessing as mp
import traceback
import logging
import gc
import time
from argparse import ArgumentParser
import numpy
import pandas
from matplotlib import pyplot
from scipy.interpolate import UnivariateSpline
def UniqueParams(Parameters):
df = pandas.DataFrame({'P1':Parameters[:,0], 'P2':Parameters[:,1], 'P3':Parameters[:,2]})
df1 = df.drop_duplicates(['P1', 'P2'])
df2 = df.drop_duplicates(['P1', 'P3'])
df3 = df.drop_duplicates(['P2', 'P3'])
df4 = pandas.concat([df1, df2, df3])
ParametersOut = numpy.array(df4.drop_duplicates())
return ParametersOut
def calculate_deriv(t, MRcurves, changeParam, Params):
radii = numpy.zeros(len(changeParam))
masses = numpy.zeros(len(changeParam))
rhocM = numpy.zeros(len(changeParam))
rhocR = numpy.zeros(len(changeParam))
#inert = numpy.zeros(len(changeParam))
wrong = []
right = []
back = numpy.empty((len(changeParam), 4))
back.fill(numpy.nan)
for i, e in enumerate(changeParam):
M, R, I, rc = MRcurves[e]
rc = numpy.log10(rc)
curveR = UnivariateSpline(rc, R, k=3, s=1e-4)
curveM = UnivariateSpline(rc, M, k=3, s=1e-4)
derivR = curveR.derivative(1)
derivM = curveM.derivative(1)
if min(rc) <= t <= max(rc):
radii[i] = curveR(t)
masses[i] = curveM(t)
rhocM[i] = derivM(t)
rhocR[i] = derivR(t)
right.append(i)
else:
wrong.append(i)
Params2 = numpy.delete(Params, wrong, axis=0)
radii = numpy.delete(radii, wrong, axis=0)
masses = numpy.delete(masses, wrong, axis=0)
rhocM = numpy.delete(rhocM, wrong, axis=0)
rhocR = numpy.delete(rhocR, wrong, axis=0)
if Params2.size==0 or len(Params2)<4:
return back[:,0:2], back[:,2], back[:,3]
else:
curvePR = UnivariateSpline(Params2, radii, k=3, s=1e-3)
curvePM = UnivariateSpline(Params2, masses, k=3, s=1e-3)
derivPR = curvePR.derivative(1)
derivPM = curvePM.derivative(1)
back[right] = numpy.dstack([rhocM, rhocR, derivPM(Params2), derivPR(Params2)])
return back[:,0:2], back[:,2], back[:,3]
def calculate_jac(Parameters, ParamUnique, MRIcurves, rhoc):
Jac = numpy.zeros((len(Parameters), 9))
Jac.fill(numpy.nan)
for i in range(len(ParamUnique)):
P1 = ParamUnique[i][0]
P2 = ParamUnique[i][1]
P3 = ParamUnique[i][2]
Pvalues = [P1, P2, P3]
combis = [[1, 2], [0, 2], [0, 1]]
for j in range(3):
h1, h2 = combis[j]
change = numpy.where((Parameters[:,h1]==Pvalues[h1]) & (Parameters[:,h2]==Pvalues[h2]))[0]
if change.size==0:
continue
else:
Ps = numpy.log10(Parameters[change][:,j])
Jac[:,1:3][change], Jac[:,j+3][change], Jac[:,j+6][change] = calculate_deriv(rhoc, MRIcurves, change, Ps)
return Jac
info = mp.get_logger().info
def main(Parameters, MRIcurves, rhoc, outputfile):
logger = mp.log_to_stderr()
logger.setLevel(logging.INFO)
nproc = mp.cpu_count() - 1
nproc = max(1, nproc)
div_rhoc = numpy.array_split(rhoc, nproc)
ParamUnique = UniqueParams(Parameters)
ntasks = nproc
inputs = [[Parameters, ParamUnique, MRIcurves, div_rhoc[t], t] for t in xrange(ntasks)]
input_q = mp.Queue()
output_q = mp.Queue()
procs = [ mp.Process(target=worker, args=(input_q,output_q)) for i in xrange(nproc)]
for i in xrange(ntasks):
input_q.put(inputs[i])
for i in xrange(nproc):
input_q.put('STOP')
for p in procs:
p.start()
result = []
while ntasks > 0:
result.append(output_q.get())
ntasks -= 1
for p in procs:
p.join()
result = numpy.array(sorted(result, key=lambda x: x[1]))
result = numpy.delete(result, 1, axis=1)
new_jac = []
for i in xrange(nproc):
for j in range(len(result[i][0])):
new_jac.append(result[i][0][j])
new_jac = numpy.array(new_jac)
Jacobian = numpy.zeros((len(Parameters), len(rhoc), 9))
for i in range(len(Parameters)):
for j in range(len(rhoc)):
Jacobian[i][j] = new_jac[j][i]
indices = numpy.invert(numpy.any(numpy.isnan(Jacobian[i][:,1:]), axis=1))
Jacobian[i][:,0][indices] = rhoc[indices]
numpy.save(outputfile, Jacobian)
def worker(input_q, output_q):
start = time.clock()
while True:
try:
tmp = input_q.get()
if 'STOP' == tmp :
break
Parameters, ParamUnique, MRIcurves, rhoc, task = tmp
Jacobian = []
for i, e in enumerate(rhoc):
#info(e)
jacpart = calculate_jac(Parameters, ParamUnique, MRIcurves, e)
Jacobian.append(jacpart)
output_q.put([Jacobian, task])
except Exception as exception:
trace = str(traceback.format_exc())
info(trace)
end = (time.clock() - start)
info(end)
return
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("-f", dest="outputFile", help="write jacobian to FILE", metavar="FILE", required=True)
parser.add_argument("-i1", dest="inputMRIcurves", help="use as input MRIcurves", required=True)
parser.add_argument("-i2", dest="inputParams", help="use as input Parameters", required=True)
args = parser.parse_args()
Parameters = numpy.load(args.inputParams)
MRIcurves = numpy.load(args.inputMRIcurves)
rhoc = numpy.linspace(14.3, 16., 40)
print len(Parameters), len(MRIcurves)
main(Parameters, MRIcurves, rhoc, args.outputFile)
| 6,006 | 27.334906 | 123 | py |
UEDGE | UEDGE-master/setup.py | #!/usr/bin/env python
# To use:
# python setup.py install
#
import sys
import os
import os.path
import string
import site
from Forthon.compilers import FCompiler
import getopt
import logging
version='8.0.4.1'
try:
os.environ['PATH'] += os.pathsep + site.USER_BASE + '/bin'
import setuptools
import distutils
from distutils.core import setup
from distutils.core import Extension
from distutils.dist import Distribution
from distutils.command.build import build
from distutils.command.install import install
from subprocess import call
import numpy
except:
raise SystemExit("Distutils problem")
optlist, args = getopt.getopt(sys.argv[1:], 'gt:F:', ['parallel', 'petsc', 'omp'])
machine = sys.platform
debug = 0
fcomp = None
parallel = 0
petsc = 0
for o in optlist:
if o[0] == '-g':
debug = 1
elif o[0] == '-t':
machine = o[1]
elif o[0] == '-F':
fcomp = o[1]
elif o[0] == '--parallel':
parallel = 1
elif o[0] == '--petsc':
petsc = 1
elif o[0] == '--omp':
os.putenv("OMP","1")
if petsc == 1 and os.getenv('PETSC_DIR') == None:
raise SystemExit("PETSc requested but PETSC_DIR not set")
if os.getenv('PETSC_DIR') != None:
petsc = 1
if petsc == 1 and os.getenv('PETSC_ARCH') == None:
raise SystemExit("PETSc requested but PETSC_ARCH not set")
sys.argv = ['setup2.py']+args
fcompiler = FCompiler(machine=machine,
debug=debug,
fcompname=fcomp)
class uedgeInstall(build):
def run(self):
install.run(self)
logging.basicConfig(stream=sys.stderr,level=logging.INFO)
log = logging.getLogger()
log.info("test")
class uedgeBuild(build):
def run(self):
# with python2 everything is put into a single uedgeC.so file
if sys.hexversion < 0x03000000:
raise SystemExit("Python versions < 3 not supported")
else:
if petsc == 0:
call(['make', '-f','Makefile.Forthon'])
else:
call(['make', '-f', 'Makefile.PETSc'])
build.run(self)
class uedgeClean(build):
def run(self):
if sys.hexversion < 0x03000000:
raise SystemExit("Python versions < 3 not supported")
else:
if petsc == 0:
call(['make', '-f', 'Makefile.Forthon', 'clean'])
else:
call(['make', '-f', 'Makefile.PETSc', 'clean'])
uedgepkgs = ['aph', 'api', 'bbb', 'com', 'flx', 'grd', 'svr', 'wdf', 'ncl']
def makeobjects(pkg):
return [pkg+'_p.o', pkg+'pymodule.o']
uedgeobjects = []
# add here any extra dot o files other than pkg.o, pkg_p.o
if sys.hexversion < 0x03000000:
raise SystemExit("Python versions < 3 not supported")
else:
dummydist = Distribution()
dummydist.parse_command_line()
dummybuild = dummydist.get_command_obj('build')
dummybuild.finalize_options()
builddir = dummybuild.build_temp
uedgeobjects = map(lambda p: os.path.join(builddir, p), uedgeobjects)
if os.getenv('PACT_DIR') != None:
library_dirs = fcompiler.libdirs + [
os.path.join(os.getenv('PACT_DIR'), 'lib')]
libraries = ['pdb', 'pml', 'score', 'blas', 'm'] + fcompiler.libs
else:
library_dirs = fcompiler.libdirs
libraries = fcompiler.libs
if petsc:
# PETSC_DIR = '/homes/mccomic/petsc-uedge'
# PETSC_ARCH = 'linux-uedge'
PETSC_DIR = os.getenv('PETSC_DIR')
PETSC_ARCH = os.getenv('PETSC_ARCH')
library_dirs = fcompiler.libdirs + \
[os.path.join(PETSC_DIR, PETSC_ARCH, 'lib')]
libraries = ['petscts', 'petscsnes', 'petscksp', 'petscdm', 'petscmat',
'petscvec', 'petsc', 'HYPRE', 'mpich', 'lapack', 'blas', 'X11',
'pthread', 'rt', 'stdc++', 'm'] + fcompiler.libs
libraries = ['petsc'] + fcompiler.libs
if parallel:
library_dirs = fcompiler.libdirs + ['/usr/lpp/ppe.poe/lib']
libraries = fcompiler.libs + ['mpi']
# uedgeobjects = uedgeobjects + ['/usr/local/mpi/ifc_farg.o']
with open('pyscripts/__version__.py','w') as ff:
ff.write("__version__ = '%s'\n"%version)
with open('pyscripts/__src__.py','w') as ff:
ff.write("__src__ = '%s'\n"%os.getcwd())
define_macros=[("WITH_NUMERIC", "0"),
("FORTHON_PKGNAME", '\"uedgeC\"'),
("FORTHON","1")]
# check for readline
rlncom = "echo \"int main(){}\" | gcc -x c -lreadline - "
rln = os.system(rlncom)
if rln == 0:
define_macros = define_macros + [("HAS_READLINE","1")]
os.environ["READLINE"] = "-l readline"
libraries = ['readline'] + libraries
setup(name="uedge",
version=version,
author='Tom Rognlien',
author_email="trognlien@llnl.gov",
maintainer='Bill Meyer',
maintainer_email='meyer8@llnl.gov',
description="2D Fluid simulation of plasma and neutrals in magnetic fusion devices",
platforms="Unix, Windows (cygwin), Mac OSX",
packages=['uedge'],
package_dir={'uedge': 'pyscripts'},
# include_package_data=True,
scripts=['pyscripts/pdb2hdf5', 'pyscripts/bas2py', 'pyscripts/hdf52pdb'],
ext_modules=[Extension('uedge.uedgeC',
['uedgeC_Forthon.c',
os.path.join(builddir, 'Forthon.c'),
'com/handlers.c', 'com/vector.c','bbb/exmain.c'],
include_dirs=[builddir, numpy.get_include()],
library_dirs=library_dirs,
libraries=libraries,
define_macros=define_macros,
extra_objects=uedgeobjects,
extra_link_args=['-g','-DFORTHON'] +
fcompiler.extra_link_args,
extra_compile_args=fcompiler.extra_compile_args
)],
cmdclass={'build': uedgeBuild, 'clean': uedgeClean},
test_suite="pytests",
install_requires=['forthon', 'easygui'],
# note that include_dirs may have to be expanded in the line above
classifiers=['Programming Language :: Python',
'Programming Language :: Python :: 3']
)
| 6,228 | 30.780612 | 90 | py |
UEDGE | UEDGE-master/localrules.py | #rules for converting mppl to f90
# This is generic down to the UEDGE section
import copy
import os
def Use2use(s):
#Return s if this is a comment
if (not s[0].isspace()) and s[0] != "U":
return s
#Do a substitution if line contains "Use" and it is not
#part of a comment
if (s.find("Use")+1):
if (s.find("!")==-1) or (s.find("Use")<s.find("!")):
sout=s.replace("("," ")
sout=sout.replace(")"," ")
sout=sout.replace("Use"," use")
return sout
return s
def Allot(s):
# converts MPPL allot to F90 allocate
if (s.find("allot")+1):
if (s.find("call allot")+1):
s=s.replace("call allot","allocate")
s=s.replace('"','')
s=s.replace("'","")
s=s.replace(",","(")
s=s.replace(")","))")
return s
s=s.replace(", allot","")
s=s.replace(",allot","")
return s
def Nopdb(s):
if os.getenv('PACT_DIR') == None:
if s.startswith("c!nopdb"):
s=s.replace("c!nopdb"," ")
return s
def Petsc(s):
if os.getenv('PETSC_DIR') != None:
if os.getenv('PARALLEL') == None:
if s.startswith("cunipetsc"):
s=s.replace("cunipetsc","")
if s.startswith("cpetsc"):
s=s.replace("cpetsc","")
return s
def Omp(s):
if os.getenv('OMP') != None:
if s.startswith("c!omp"):
s=s.replace("c!omp"," ")
return s
saved_dec=0
in_uses=0
savedlines=[]
def MoveDecs(s):
global saved_dec,in_uses
# Return if this is a comment
if (not s[0].isspace()) and (not saved_dec) and (not in_uses):
return s
# collect lines for declarations
# if we find an "implicit none" statement, store it and remove the line
sls=s.lstrip().lower()
indfunc=sls.find("function")
indcom=sls.find("!")
functest= (indfunc == -1 or -1<indcom<indfunc)
# tests to exclude "real function" but allow "real ! function as
# part of declaration block
if (sls[0:8]=="implicit") or (sls[0:4]=="real" and functest) \
or (sls[0:7]=="integer" and functest) \
or (sls[0:9]=="character") or (sls[0:9]=="parameter") or \
(sls[0:8]=="external") or (sls[0:9] == "intrinsic") or \
(sls[0:7]=="logical" and functest) or (sls[0:9]=="dimension") or \
(sls[0:4] == "data"):
savedlines.append(s)
saved_dec=1
in_uses=0
return None
# if we are in the midst of declarations, save also comments (except for
# "Common block") and continuations and blank lines as part of
# what is moved.)
if (saved_dec==1) and (sls == "" or s[0].lower() == "c" or sls[0]=="!" \
or s[0]=="*") and (in_uses == 0) \
and (s.find("Common block")==-1):
savedlines.append(s)
return None
# Check for continuation line in midst of declarations
if (saved_dec==1) and (len(s)>6):
if (s[5].isspace() == 0):
savedlines.append(s)
return None
if (sls[0:3] == "use"):
in_uses=1
if (saved_dec==1) and (sls != "") and s[0] != "c" and sls[0] != "!" and \
(sls[0:3] != "use"):
#This is our first executable statement. Add it to our saved
# declarations lines and return them now
templines = copy.copy(savedlines)
templines.append(s)
#empty out savedlines
del savedlines[0:]
saved_dec = 0
in_uses=0
return templines
return s
inelseif = 0
savedelselines=""
def Elseifthen(s):
# put a "then" at the end of an elseif if it isn't already there
# need to check to see if next line is a continue
global inelseif,savedelselines
# return s if this is a comment
if (not s[0].isspace()):
return s
if s.find("elseif")+1:
if s.find("then")+1:
return s
# set a flag that we are in an "in-else-if" block that needs work
inelseif = 1
# If there is no "then" we need to save it to test if the next
# line is a continuation
savedelselines = s
return(None)
if (inelseif and len(s)>6 and not s[5].isspace()):
# This is a continue line, add it to savedelselines
savedelselines += s
return(None)
if (inelseif and (len(s)<6 or s[5].isspace())):
# No longer in a continue, so process lines
if savedelselines.find("then")+1:
savedelselines += s
inelseif = 0
return savedelselines
if savedelselines.split("\n")[-2].find("!")+1:
# if last line in saved lines has a comment,
# find index of last comment sign
last = savedelselines.rfind("!")
savedelselines=savedelselines[0:last]+ \
" then "+savedelselines[last:] + s
inelseif=0
return savedelselines
#Otherwise the last line has no comment so insert "then" at end
savedelselines = savedelselines[0:-1]+" then\n" + s
inelseif=0
return savedelselines
return s
M2Fsubrules = [("#","!"),Use2use,
("c!ifdef","#ifdef"),
("c!else","#else"),
("c!endif","#endif"),
("(Size4)","(kind=4)::"),
(":: function"," function"),
(" break "," exit "),
(" break\n"," exit\n"),
("while (","do while ("),
("endwhile","end do"),
(" call ruthere","c call ruthere"),
("c!include ","#include "),
Nopdb,
Petsc,
Omp,
Allot,
Elseifthen,
MoveDecs
]
#-------------------------------------
# Special for UEDGE
wordsizectr=0
def grdproc(s):
# process to eliminate ifelse write construction
global wordsizectr
if (s.find("ifelse([WORDSIZE]")+1):
s="#if WORDSIZE == 64\n 2001 format(1p3e23.15)\n#else\n 2001 format(1p3d23.15)\n#endif\n"
# s="#ifndef WORDSIZE\n 2001 format(1p3e23.15)\n#else\n 2001 format(1p3d23.15)\n#endif\n"
wordsizectr=4
return s
elif wordsizectr > 0:
wordsizectr -= 1
return None
else:
wordsizectr = 0
return s
M2Fsubrules.insert(6,grdproc)
M2Fsubrules.insert(3,("do i1=","do_i1: do i1="))
M2Fsubrules.insert(4,("break (2) ! exit do_i1","exit do_i1"))
M2Fsubrules.insert(5,("enddo ! do_i1","enddo do_i1"))
M2Fsubrules.insert(6,("float","real"))
M2Fsubrules.insert(6,("dfloat","real"))
| 6,579 | 32.917526 | 97 | py |
UEDGE | UEDGE-master/pyexamples/d3dHsm/runcase.py | #-import uedge
from uedge import *
#-import hdf5 routines
from uedge.hdf5 import *
#-import graphics, math, etc.
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import numpy as np
import os
#-import some utilities for using OS
###execfile(os.path.join(os.environ['HOME'], 'utils/python/osfun.py'))
#-in .bashrc: "export PYLIB=/home/umansky1/PyUEDGE/uedge/pylib"
execfile(os.environ['PYLIB']+"/plotmesh.py")
execfile(os.environ['PYLIB']+"/plotcontour.py")
execfile(os.environ['PYLIB']+"/plotvar.py")
execfile(os.environ['PYLIB']+"/paws.py")
execfile(os.environ['PYLIB']+"/osfun.py")
plt.ion()
#-read UEDGE settings
execfile("rd_d3dHsm_in.py")
#-do a quick preliminary run to set all internals
bbb.restart=0; bbb.ftol=1e10; bbb.dtreal = 1e-6; bbb.exmain()
#-show grid
plotmesh(iso=1)
wait = raw_input("PAUSING, PRESS ENTER TO CONTINUE...")
#-run to steady state
bbb.restart=1; bbb.ftol=1e-8;
bbb.isbcwdt=1
bbb.dtreal = 1e-14; bbb.itermx=30; bbb.exmain()
bbb.t_stop=1e0
bbb.rundt()
bbb.dtreal=1e20; bbb.isbcwdt=0; bbb.exmain()
#-show some results
plotvar(bbb.te/bbb.ev)
#-export the solution in hdf5 file
hdf5_save('mycase.h5')
#-can be imported with this command
#hdf5_restore('mycase.h5')
###-refine the grid, interpolate to new grid, and restart:
#com.nxleg[0,0]=20; bbb.newgeo=1; bbb.icntnunk=0
#bbb.dtreal = 1e-14; bbb.isbcwdt=1; bbb.itermx=30; bbb.exmain()
###-time advance by another second
#bbb.t_stop=2e0; bbb.rundt()
###-now to steady state (infinite time)
#bbb.dtreal=1e20; bbb.isbcwdt=0; bbb.exmain()
###-show some results
#plotvar(bbb.te/bbb.ev)
| 1,603 | 21.591549 | 70 | py |
UEDGE | UEDGE-master/pyexamples/d3dHsm/rd_d3dHsm_in.py | #
#
###########################################################################
# DESCRIPTION OF PROBLEM (d3dHsm) from FACETS test suite:
# DIII-D single-null geometry with 5 variables (ni,upi,te,ti,ng) and a
# (16+2)*(8+2)=18x10 [poloidal*radial] mesh yielding 900 variables.
# Solver used is Newton Krylov (svrpkg="nksol") and preconditioner uses a
# direct banded solver for the LU decomposition (premeth="banded"). Iterates
# to steady-state solution from an initial profile file (HF5).
###########################################################################
###import uedge
from uedge import *
# Set the geometry
bbb.mhdgeo = 1 #=1 use MHD equilibrium files
#flx.aeqdskfname = "aeqdskd3d" #name of EFIT 'a' file for flux-surface mesh
#flx.geqdskfname = "neqdskd3d" #name of EFIT 'g' or 'n' file for flux-sur mesh
flx.psi0min1 = 0.98 #normalized flux on core bndry
flx.psi0min2 = 0.98 #normalized flux on pf bndry
flx.psi0sep = 1.00001 #normalized flux at separatrix
flx.psi0max = 1.07 #normalized flux on outer wall bndry
bbb.ngrid = 1 #number of mesh sequenc. (always set to 1)
com.nxleg[0,0] = 4 #pol. mesh pts from inner plate to x-point
com.nxcore[0,0] = 4 #pol. mesh pts from x-point to top on inside
com.nxcore[0,1] = 4 #pol. mesh pts from top to x-point on outside
com.nxleg[0,1] = 4 #pol. mesh pts from x-point to outer plate
com.nysol[0] = 6 #rad. mesh pts in SOL
com.nycore[0] = 2 #rad. mesh pts in core
# Finite-difference algorithms (upwind, central diff, etc.)
bbb.methn = 33 #ion continuty eqn
bbb.methu = 33 #ion parallel momentum eqn
bbb.methe = 33 #electron energy eqn
bbb.methi = 33 #ion energy eqn
bbb.methg = 33 #neutral gas continuity eqn
# Boundary conditions
bbb.ncore[0] = 2.5e19 #hydrogen ion density on core
## iflcore = 0 #flag; =0, fixed Te,i; =1, fixed power on core
bbb.tcoree = 100. #core Te
bbb.tcorei = 100. #core Ti
bbb.tedge = 2. #fixed wall,pf Te,i if istewcon=1, etc
bbb.recycp[0] = 0.8 #hydrogen recycling coeff at plates
# Transport coefficients (m**2/s)
bbb.difni[0] = 1. #D for radial hydrogen diffusion
bbb.kye = 1. #chi_e for radial elec energy diffusion
bbb.kyi = 1. #chi_i for radial ion energy diffusion
bbb.travis[0] = 1. #eta_a for radial ion momentum diffusion
# Flux limits
bbb.flalfe = 0.21 #electron parallel thermal conduct. coeff
bbb.flalfi = 0.21 #ion parallel thermal conduct. coeff
bbb.flalfv = 1. #ion parallel viscosity coeff
bbb.flalfgx = 1.e20 #neut. gas in poloidal direction
bbb.flalfgy = 1.e20 #neut. gas in radial direction
# Solver package
bbb.svrpkg = "nksol" #Newton solver using Krylov method
bbb.premeth = "banded" #Solution method for precond. Jacobian matrix
# Restart from a HDF5 or PDB savefile
bbb.restart = 1 #Begin from savefile, not estimated profiles
bbb.allocate() #allocates storage for arrays
#from uedge.hdf5 import *
#hdf5_restore("d3dHsm.h5")
if (0):
ue.restore("d3dHsm.h5")
bbb.dtreal = 1e20; bbb.exmain()
else:
#-set up some initial state
###ev=1.6022e-19
bbb.ngs=1e14; bbb.ng=1e14
bbb.nis=1e20; bbb.ni=1e20
bbb.ups=0.0; bbb.up=0.0
bbb.tes=bbb.ev; bbb.te=bbb.ev
bbb.tis=bbb.ev; bbb.ti=bbb.ev
# Atomic data switches
com.istabon = 0 #-analytic rates
###com.istabon = 10 #=10 specifics hydrogen data file ehr2.dat
| 3,347 | 36.617978 | 80 | py |
UEDGE | UEDGE-master/pyexamples/box2/plotcontour.py | import matplotlib
import matplotlib.gridspec as gridspec
gs=gridspec.GridSpec(2, 2)
plt.figure(10)
plt.subplot(gs[0,0])
CS = plt.contour(com.zm[:,:,0], com.rm[:,:,0], bbb.te/ev)
plt.clabel(CS, inline=1, fontsize=10)
params = {'mathtext.default': 'regular' }
plt.rcParams.update(params)
plt.title('T$\mathregular{_e}$ [ev]')
plt.ylabel('R [m]')
plt.grid(True)
plt.subplot(gs[0,1])
CS = plt.contour(com.zm[:,:,0], com.rm[:,:,0], bbb.ti/ev)
plt.clabel(CS, inline=1, fontsize=10)
plt.title('T$\mathregular{_i}$ [ev]')
plt.grid(True)
plt.subplot(gs[1,0])
CS = plt.contour(com.zm[:,:,0], com.rm[:,:,0], bbb.ni[:,:,0]/1e20)
plt.clabel(CS, inline=1, fontsize=10)
plt.title('N$\mathregular{_i}$/1e20 [m-3]')
plt.xlabel('Z [m]')
plt.ylabel('R [m]')
plt.grid(True)
plt.subplot(gs[1,1])
CS = plt.contour(com.zm[:,:,0], com.rm[:,:,0], bbb.up[:,:,0]/1e3)
plt.clabel(CS, inline=1, fontsize=10)
plt.title('U$\mathregular{_p}$/1e3 [m/s]')
plt.xlabel('Z [m]')
plt.grid(True)
plt.show()
| 987 | 23.097561 | 66 | py |
UEDGE | UEDGE-master/pyexamples/box2/runcase.py | #import uedge
from uedge import *
#-import hdf5 routines
from uedge.hdf5 import *
#-import graphics, math, etc.
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import numpy as np
import os
#-import some utilities for using OS
###execfile(os.path.join(os.environ['HOME'], 'utils/python/osfun.py'))
##-how to do this better?
#-in .bashrc: "export PYLIB=/home/umansky1/PyUEDGE/uedge/pylib"
execfile(os.environ['PYLIB']+"/plotmesh.py")
execfile(os.environ['PYLIB']+"/plotcontour.py")
execfile(os.environ['PYLIB']+"/plotvar.py")
execfile(os.environ['PYLIB']+"/paws.py")
execfile(os.environ['PYLIB']+"/osfun.py")
#execfile("../../plotmesh.py")
#execfile("../../pylib/plotvar.py")
#execfile("../../pylib/plotr.py")
#execfile("../../pylib/showrange.py")
#execfile("../../pylib/paws.py")
plt.ion()
#-read UEDGE settings
execfile("box2_in.py")
#-do a quick preliminary run to set all internals
bbb.restart=0; bbb.ftol=1e10; bbb.dtreal = 1e-6; bbb.exmain()
#-show grid
plotmesh()
wait = raw_input("PAUSING, PRESS ENTER TO CONTINUE...")
#-this should be done in uefacets
#ev=1.6022e-19
if (0):
hdf5_restore('mycase.h5')
bbb.dtreal = 1e20; bbb.exmain()
else:
#-set up some initial state
bbb.ngs=1e14; bbb.ng=1e14
bbb.nis=1e20; bbb.ni=1e20
bbb.ups=0.0; bbb.up=0.0
bbb.tes=bbb.ev; bbb.te=bbb.ev
bbb.tis=bbb.ev; bbb.ti=bbb.ev
#
#-Note: if you make a gap here then it will change the logic of if-else!
#
#-run to steady state
bbb.restart=1; bbb.ftol=1e-8;
bbb.isbcwdt=1
bbb.dtreal = 1e-14; bbb.itermx=30; bbb.exmain()
bbb.t_stop=1e0
bbb.rundt()
bbb.dtreal=1e20; bbb.isbcwdt=0; bbb.exmain()
hdf5_save('mycase.h5')
###execfile('plotcontour.py')
plotcontour()
paws()
##-now refine the solution on a larger grid
#com.nycore[0]=2
#com.nysol[0]=6
#com.nxleg[0,1]=8
#com.nxcore[0,1]=8
#bbb.restart=1; bbb.newgeo=1; bbb.icntnunk=0
#bbb.dtreal = 1e-14; bbb.ftol=1e-10;
#bbb.isbcwdt=1; bbb.itermx=30; bbb.exmain()
#plotmesh()
#paws()
#bbb.t_stop=2e0; bbb.ftol=1e-8; bbb.rundt()
#bbb.dtreal=1e20; bbb.isbcwdt=0; bbb.exmain()
#execfile('plotcontour.py')
#paws()
#==========================================================================#
| 2,306 | 23.284211 | 76 | py |
UEDGE | UEDGE-master/pyexamples/box2/box2_in.py | ###########################################################################
# DESCRIPTION OF PROBLEM (box2):
#
# This is a Python version of the box case from Andreas Holm
###########################################################################
#-Geometry
bbb.mhdgeo=-1 #-set cartesian geometry
bbb.isfixlb=2 #left boundary as sym. plane; no flux at cut
grd.radx= 4.e-2 #-outer "radial" wall
grd.rad0=0.0 #-location of 'radial' separ'x for cylinder or slab
grd.radm=-1.e-2 #-minimum "radial" position
grd.za0 = 0. #-poloidal symmetry plane location
grd.zax=3.0 #-poloidal location of divertor plate
grd.zaxpt=2.25 #-poloidal location of x-point
grd.alfyt=-2.0 #radial nonuniformity factor; < 0 => expanding
grd.alfxt=2.76 #poliodal nonuniformity factor; to make smooth
#transition to exp. grid, alfxt should satisfy
#the eqn dzun = (zax-zaxpt+dzun)
# (1-exp(-alfxt/(nx-ixpt2+1))) /
# (1-exp(-alfxt))
#where dzun = (zaxpt-za0)/ixpt2 and
#ixpt2 = ncore(1,2).
grd.btfix = 2. #constant total B-field
grd.bpolfix = .2 #constant poloidal B-field
#-Grid
bbb.gengrid=1; #-Note: for slab the grid is not saved in gridue
bbb.ngrid=1
com.nycore[0]=2
com.nysol[0]=4
com.nxleg[0,1]=3
com.nxcore[0,1]=3
#-Boundary conditions
bbb.isnicore[0]=1 #-same density at all core points
bbb.ncore=1.1e19 #-density on core boundary
bbb.iflcore=1 #if=1, specify core power
bbb.tcoree=25.0 #-used if iflcore=0
bbb.tcorei=25.0 #-used if iflcore=0
bbb.pcoree = 2.5e4 #-used if iflcore=1
bbb.pcorei = 2.5e4 #-used if iflcore=1
bbb.recycp=0.98 #-recycling coef at plates if ndatlb,rb=0
bbb.albdsi=0.99 #-albedos at inner gas source locations
bbb.albdso=0.99 #-albedos at inner gas source locations
bbb.istepfc=0; bbb.istipfc=0 #-priv. flux has zero temp. deriv.
bbb.istewc=0; bbb.istiwc=0 #-wall has zero temp. deriv.
bbb.bcee = 4.; bbb.bcei = 2.5 #-energy transmission coeffs.
bbb.bcen = 0. #-energy transmission coefficint for neutrals
bbb.isupss = 0 #-parallel vel sonic
bbb.isupcore = 0 #-parallel vel =0 on core bndry
#-Transport coefficients
bbb.difni=0.5
bbb.kye=0.7
bbb.kyi=0.7
bbb.travis=1.0
bbb.parvis=1.0
#-Flux limits
bbb.flalfe=0.2
bbb.flalfi=0.2
bbb.flalfgx=1.e0
bbb.flalfgy=1.e0
bbb.flalfgxy=1.e0
bbb.flalfv=0.5
# Finite difference algorithms
bbb.methe=33;bbb.methu=33;bbb.methg=33
bbb.methn=33;bbb.methi=33
#-Solver package
bbb.svrpkg = "nksol" #Newton solver using Krylov method
bbb.mfnksol=-3
bbb.epscon1=0.005
bbb.ftol=1e-10
bbb.premeth = "ilut" #Solution method for precond. Jacobian matrix
bbb.runtim=1e-07
bbb.rlx=0.9
###bbb.del=1.e-8 #-this one causes syntax error!
#-Neutral gas propeties
bbb.tfcx=5.;bbb.tfcy=5. #Franck-Condon temperatures
bbb.cngfx=1.;bbb.cngfy=1. #turn-on grad(T_g) flux if =1
bbb.cngflox=1.;bbb.cngfloy=0. #turn-on drift with ions if =1
bbb.cngmom = 1. #ion-gas momentum transfer
bbb.eion = 5. #birth energy of ions
bbb.ediss = 10. #dissoc. energy lost from elecs (eion=2*ediss)
bbb.isrecmon = 1 #=1 turns on recombination
bbb.cfupcx=1.0 # factor multipling momentum cx
bbb.cfticx=1.0 # factor multipling cx terms in ion energy eqn
#-Parallel neutral momentum equation
bbb.isupgon[0]=1
if (bbb.isupgon[0] == 1):
bbb.isngon=0
com.ngsp=1
com.nhsp=2
###bbb.ziin[com.nhsp-1]=1
bbb.ziin[0]=1
bbb.ziin[1]=0
#-the following are probably default, set them anyway to be sure
bbb.cngmom=0
bbb.cmwall=0
bbb.cngtgx=0
bbb.cngtgy=0
bbb.kxn=0
bbb.kyn=0
#-Currents and potential parameters
bbb.isphion=0
bbb.rsigpl=1.e-8 #anomalous cross-field conductivity
bbb.cfjhf=0. #turn-on heat flow from current (fqp)
bbb.jhswitch=0 #Joule Heating switch
# Atomic physics packages
#com.istabon=10 #DEGAS rates
com.istabon=0 #-analytic rates
#-Misc
bbb.restart=0
| 4,169 | 27.758621 | 75 | py |
UEDGE | UEDGE-master/pyscripts/paws.py | def paws():
programPause = raw_input("Press the <ENTER> key to continue...")
| 81 | 26.333333 | 68 | py |
UEDGE | UEDGE-master/pyscripts/uedge_lists.py | """
This module uses some of the Forthon methods to provide routines for
listing and searching the Uedge compiled packages.
"""
import uedge
import re
packages = [uedge.com,uedge.aph,uedge.api,uedge.bbb,uedge.flx,uedge.grd,uedge.svr,uedge.wdf,uedge.ncl]
def packagename2object(package):
for p in packages:
if p.name() == package: return p
return None
def list_packages(objects=None):
"""
Return list of package string names
or objects if object argument set.
"""
if objects != None: return packages
pnames = []
for p in packages:
pnames.append(p.name())
return pnames
def list_package_variables(package,attribute='',vars=None):
"""
Return list of variable string names from package.
package - string name of Uedge package.
attribute='search string' can be either either the
group name or an attribute. Search is case
sensitive and must be exact.
vars=[varlist] selection limited to varlist
"""
ret = []
if type(package) == type(''):
p = packagename2object(package)
if p != None: ret.extend(p.varlist(attribute))
else:
ret.extend(package.varlist(attribute))
if vars == None:
return ret
else:
return list(set(ret) & set(vars))
def list_variable(var):
"""
Print variable information of name passed as a string
Do not include the package in the variable name.
"""
for p in packages:
if var in p.varlist():
print(p.listvar(var))
def list_variables_glob(s,verbose=False,veryverbose=False,vars=None):
"""
Print variables where variable name contains string
Case insensitive
verbose=True will cause variable comment to print
veryverbose=True will cause all variable info to print
vars=[varlist] search limited to varlist
"""
ret = []
for p in packages:
for var in p.varlist():
if s.upper() in var.upper():
if verbose: print(var+' : '+p.getvardoc(var))
if veryverbose: print(p.listvar(var))
ret.append(var)
if vars == None:
return ret
else:
return list(set(ret) & set(vars))
def list_variables_apropos(s,verbose=False,veryverbose=False,vars=None):
"""
Print variables where comment contains string
Case insensitive
verbose=True will cause variable comment to print
veryverbose=True will cause all variable info to print
vars=[varlist] search limited to varlist
"""
ret = []
for p in packages:
for var in p.varlist():
if s.upper() in p.getvardoc(var).upper():
if verbose: print(var+' : '+p.getvardoc(var))
if veryverbose: print(p.listvar(var))
ret.append(var)
if vars == None:
return ret
else:
return list(set(ret) & set(vars))
def list_variables_regex(r,verbose=False,veryverbose=False,vars=None):
"""
Print variables where comment matches regular expression.
verbose=True will cause variable comment to print
veryverbose=True will cause all variable info to print
vars=[varlist] search limited to varlist
"""
ret = []
for p in packages:
for var in p.varlist():
if re.search(r,p.getvardoc(var)):
if verbose: print(var+' : '+p.getvardoc(var))
if veryverbose: print(p.listvar(var))
ret.append(var)
if vars == None:
return ret
else:
return list(set(ret) & set(vars))
def varlistattr(a):
"""
Return list of variables with the given attribute. Includes the package
prefix for use in file save functions.
"""
ret = []
for p in packages:
for var in p.varlist():
if a in p.getvarattr(var).split():
ret.append(p.name()+'.'+var)
return ret
| 4,004 | 30.289063 | 103 | py |
UEDGE | UEDGE-master/pyscripts/osfun.py | def date():
os.system("date")
def ls(opts=""):
os.system("ls " + opts)
def more(fname):
os.system("more " + fname)
def pwd():
os.system("pwd")
def cp(opts=""):
os.system("cp " + opts)
def mv(opts=""):
os.system("mv " + opts)
| 254 | 13.166667 | 30 | py |
UEDGE | UEDGE-master/pyscripts/convert1.py | #!/usr/bin/env python
#
# $Id: convert1.py,v 7.1 2019/11/01 22:38:19 meyer8 Exp $
#
# To try solving linear critical gradient
import sys
import os
import getopt
import string
from . import convert
from .convert import *
# define the mppl to f90 class
class M2F(generic):
suffixin = "m"
suffixout = "F"
subrules = globalsubrules + M2Fsubrules
def usage():
print("Usage: convert1.py -i <indir> -o <outdir> <infile>")
r"""
main(argv: array of strings)
"""
def main(argv):
try:
opts, args = getopt.getopt(sys.argv[1:], "hi:o:", ["help",
"indir=", "outdir="])
except getopt.GetoptError:
# print help information and exit:
usage()
sys.exit(2)
indir = "."
outdir = "."
# Go through args
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
if o in ("-i", "--indir"):
indir = a
elif o in ("-o", "--outdir"):
outdir = a
# print "args =", args
# Extract the file
fn = args[0]
# print "Converting " + fn
# Do the conversion
convert.M2F = M2F
m2f = M2F(indir, outdir)
# m2f.outdir = outdir
m2f.processfile(fn)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 1,164 | 17.492063 | 62 | py |
UEDGE | UEDGE-master/pyscripts/bas2py_rules.py | from uedge import *
import uedge.uedge_lists as ul
subrules = [
['\(','['],
['\)',']'],
[';','\n'],
['^!','#!'],
['^ *',''],
['^\t*',''],
[r'\ballocate\b','bbb.allocate()'],
[r'\bexmain\b','bbb.exmain()'],
[r'\bexponseed\b','grd.exponseed()'],
]
warnrules = []
def raw_string(s):
s = s.encode('unicode-escape').decode()
return s
for p in ul.list_packages():
subrules.append([r'\b'+raw_string('package '+ p)+r'\b','from uedge import '+p])
po = ul.packagename2object(p)
for v in ul.list_package_variables(p):
subrules.append([r'\b'+raw_string(v)+r'\b',p+'.'+v])
if "Dimension:" in po.listvar(v):
d = po.listvar(v).split("Dimension:")[1].split("\n")
if "0:" in d[0]:
warnrules.append([r'\b'+raw_string(v)+r'\b','base 0, '+d[0]])
subrules.append([r'\bbbb.del\b','bbb.delpy'])
| 878 | 25.636364 | 82 | py |
UEDGE | UEDGE-master/pyscripts/rdinitdt.py | # Setup file to run time-dependently using dtreal
# Change dtreal for starting dt and savefname to change pfb file name
# Once variables are set, read rdrundt to execute a time-dependent run
from uedge import *
i_stor = 0
nfe_tot = 0
savefn = "savedt.hdf5" # name of hdf5 savefile written every timestep
bbb.rdtphidtr = 1e20 # ratio dtphi/dtreal
bbb.ismfnkauto = 1 # if =1, mfnksol=3 for dtreal<dtmfnk3, otherwise=-3
bbb.dtmfnk3 = 5.e-4 # dtreal for mfnksol sign change if ismfnkauto=1
bbb.mult_dt = 3.4 # factor expanding dtreal after ii2max steps
bbb.ii1max = 500 # number of changes to dtreal
bbb.ii2max = 5 # number of timesteps at current dtreal
bbb.itermxrdc = 7 # value of itermx used by rdcontdt
bbb.incpset = 7 # iterations until Jacobian is recomputed
bbb.ftol_dt = 1.e-5 # fnrm tolerance for the time-dependent steps
bbb.ftol_min = 1e-9 # value of fnrm where time advance will stop
bbb.dt_tot = 0. # tot time accumulated for run (output, not input)
bbb.t_stop = 100. # value of dt_tot (sec) where calculation will stop
bbb.dt_max = 100. # maximum time step for dtreal
bbb.dt_kill = 1e-14 # min allowed time step; rdcontdt stops if reached
bbb.deldt_min = 0.04 # minimum relative change allowed for model_dt > 0
bbb.initjac = 0 # if=1, calc initial Jac upon reading rdcontdt
bbb.numrevjmax = 2 # number of dt reductions before Jac recalculated
bbb.numfwdjmax = 1 # number of dt increases before Jac recalculated
###bbb.ismmaxuc = 1 # =1 for intern calc mmaxu; =0,set mmaxu & dont chng
bbb.irev = -1 # flag to allow reduced dt advance after cutback
bbb.rlx = 0.9 # max. change in variable at each linear iteration
bbb.itermx = 7 # max. number of linear iterations allowed
bbb.tstor_s = 1e-5 # beginning time for storing solution
bbb.tstor_e = 1e-3 # ending time for storing solution
bbb.n_stor = 0 # number of linearly spaced storage points
bbb.ipt = 1 # index of variable; value printed at step
# if ipt not reset from unity, ipt=idxte(nx,iysptrx+1)
| 2,050 | 52.973684 | 76 | py |
UEDGE | UEDGE-master/pyscripts/ruthere.py | from numpy import *
import time
import signal
import sys
#############################################################################
# From Dave Grote:
# --- Setup signal handler to capture Control-C
# --- To use, first call arminterrupt(). Then at the place where the interrupt
# --- is allowed, call ruthere(). This will raise a KeyboardInterrupt if
# --- Control-C had been pressed.
# --- When a interrupt request is received, all this handler does is set a
# --- flag to that effect. Then, a subsequent call to ruthere will check
# --- that flag, and if set, raise an exception. This allows a graceful
# --- stop with the current time step completed.
# --- Set the following two in case ruthere is called before arminterrupt.
_defaultcontrolC = signal.getsignal(signal.SIGINT)
_controlCrecieved = False
savetracebacklimit = 0
def _handlecontrolC(signum, frame):
global _controlCrecieved
_controlCrecieved = True
def ruthere(reset=True):
"""
Checks if an interrupt was requested (usually control-C). If so, then raise
an exception. If reset is True, restore the original interrupt handler so that the
calling code does not have to, and so that, if there is an exception, it gets
restored (since the calling code is not returned to).
"""
global _controlCrecieved
global _defaultcontrolC
global savetracebacklimit
if _controlCrecieved:
if reset:
signal.signal(signal.SIGINT, _defaultcontrolC)
_controlCrecieved = False
raise KeyboardInterrupt("Interrupt requested")
def arminterrupt():
global _controlCrecieved
global _defaultcontrolC
global savetracebacklimit
_controlCrecieved = False
_defaultcontrolC = signal.getsignal(signal.SIGINT)
try:
savetracebacklimit = sys.tracebacklimit
except:
savetracebacklimit = None
signal.signal(signal.SIGINT, _handlecontrolC)
sys.tracebacklimit = 0
def disarminterrupt():
global _defaultcontrolC
global savetracebacklimit
signal.signal(signal.SIGINT, _defaultcontrolC)
sys.tracebacklimit = savetracebacklimit
#=========================================================================
arminterrupt()
| 2,199 | 31.835821 | 82 | py |
UEDGE | UEDGE-master/pyscripts/__version__.py | __version__ = '8.0.0'
| 22 | 10.5 | 21 | py |
UEDGE | UEDGE-master/pyscripts/checkver.py |
import json
pkg = 'uedge'
try:
import importlib.metadata
thisver = importlib.metadata.version(pkg)
except:
import pkg_resources
thisver = pkg_resources.get_distribution(pkg).version
try:
import urllib.request
contents = urllib.request.urlopen('https://pypi.org/pypi/'+pkg+'/json').read()
data = json.loads(contents.decode())
thatver = data['info']['version']
except:
import urllib
contents = urllib.urlopen('https://pypi.org/pypi/'+pkg+'/json').read()
data = json.loads(contents)
thatver = str(data['info']['version'])
print()
if thisver > thatver:
#print('Uedge version '+thisver+' is newer than available with pip ('+thatver+')')
pass
elif thisver == thatver:
#print('Uedge version '+thisver+' is up-to-date')
pass
elif thisver < thatver:
print('Uedge version '+thisver+', an update is available to '+thatver)
else:
print('Some error checking pypi version')
print()
| 928 | 24.108108 | 85 | py |
UEDGE | UEDGE-master/pyscripts/uedge.py | try:
import IPython
from IPython.terminal.prompts import Prompts,Token
from IPython.terminal.embed import InteractiveShellEmbed
except:
pass
try:
from traitlets.config.loader import Config
except:
pass
import sys,os,__main__
import numpy as np
from numpy import array,tanh,exp,arange
ArrayType = np.ndarray
if sys.hexversion >= 0x03000000:
# --- With Python3, the so files of each Fortran package are imported
# --- separately. The dlopen flag needs to be set so that cross references
# --- among the packages can be satisfied.
sys.setdlopenflags(os.RTLD_LAZY | os.RTLD_GLOBAL)
from . import uedgeC
from .uedgeC import *
#from Forthon import *
if sys.hexversion >= 0x03000000:
from .compy import com
from .grdpy import grd
from .flxpy import flx
from .bbbpy import bbb
from .svrpy import svr
from .wdfpy import wdf
from .apipy import api
from .aphpy import aph
from .nclpy import ncl
else:
from wdfpy import wdf
from grdpy import grd
from flxpy import flx
from bbbpy import bbb
from svrpy import svr
from apipy import api
from aphpy import aph
from compy import com
from nclpy import ncl
import time
import os.path
import __main__
# import all of the neccesary packages
def gettypecode(x):
return x.dtype.char
def oldnonzero(a):
return a.nonzero()[0]
# Import the uedgeC shared object which contains all of UEDGE
try:
import PyPDB
from PyPDB import PW, PR
from PyPDB.pypdb import *
except:
# print "Unable to import PyPDB or * from PyPDB.pypdb."
# print "Will proceed to try to import pypdb in case of old installation."
try:
from pypdb import *
except:
# print "pypdb not found."
pass
# --- The UEDGE modules must be imported in the order below because of
# --- linking dependencies.
# --- Set default runid to first filename in the command line, stripping off
# --- the .py suffix.
if sys.argv[0]:
if sys.argv[0][-3:] == '.py':
h, t = os.path.split(sys.argv[0][:-3])
runid = t
del h, t
else:
h, t = os.path.split(sys.argv[0])
runid = t
del h, t
# --- Check if the compiler was ifort - if so, set the stacksize unlimited
# --- The fcompname is not yet be available yet if Forthon is not up to date
try:
if fcompname == 'ifort':
import resource
resource.setrlimit(resource.RLIMIT_STACK, (-1, -1))
except:
pass
try:
class MyPrompt(Prompts):
def in_prompt_tokens(self, cli=None):
return [(Token.Prompt, 'UEDGE>>> ')]
def out_prompt_tokens(self, cli=None):
return [(Token.Prompt, 'UEDGE>>> ')]
get_ipython
except:
sys.ps1='UEDGE>>> '
else:
ip = get_ipython()
ip.prompts = MyPrompt(ip)
##############################################################################
###### Don't put anything below this line!!! ################################
##############################################################################
| 3,031 | 23.650407 | 78 | py |
UEDGE | UEDGE-master/pyscripts/uexec.py |
import sys
try:
from importlib import reload,import_module
except:
from importlib import import_module
import builtins
def uexec(mname,returns=globals()):
if mname in sys.modules:
_m = reload(sys.modules[mname])
else:
_m = import_module(mname)
# is there an __all__? if so respect it
if "__all__" in _m.__dict__:
names = _m.__dict__["__all__"]
else:
# otherwise we import all names that don't begin with _
names = [x for x in _m.__dict__ if not x.startswith("_")]
# now drag them in
for k in names:
#print k,getattr(_m,k)
returns[k] = getattr(_m,k)
| 652 | 20.064516 | 65 | py |
UEDGE | UEDGE-master/pyscripts/uedgeplots.py | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.axes as ax
import sys
from matplotlib.collections import PolyCollection
from matplotlib.colors import LinearSegmentedColormap
from matplotlib import rcdefaults
from matplotlib import interactive
from uedge import *
from numpy import sin, cos
from scipy import spatial
from skimage.util import img_as_ubyte as bytescale
# This file defines a function to plot the UEDGE mesh, and # then calls the function to plot the entire mesh.
# To use this file in a simple way, give the following ands:
# read plotmesh
# nf
# The function could then be used in a more sophisticated way
# to plot portions of the mesh, possibly with customized plot limits
# (by resetting acom.ny of r_min, r_max, z_min, and z_max):
# call plotmesh(ixmn,ixmx,iymn,iymx)
# nf
# where ixmn, ixmx, iymn, and iymx are integer variables or
# expressions. Always give an "nf" and after reading the file
# plotmesh or calling the function plotmesh.
# DEFINE THE PLOT FUNCTION --
def plotmesh(ixmin=None, ixmax=None, iymin=None, iymax=None,
r_min=None, r_max=None, z_min=None, z_max=None, title=None,
block=False, figsize=(4.0, 8.0),xlabel=None,ylabel=None):
"""
plotmesh(ixmin=<int>,ixmax=<int>,iymin=<int>,iymax=<int>
title=<string>,r_min=<val>,r_max=<val>,z_min=<val>,z_max=<val>,
block=<True|False>,xlabel=None,ylabel=None,zlabel=None)
Plot the uedge grid.
where ixmin, ixmax, iymin, and iymax are integer variables or
expressions used to plot a portion of the grid. title is used as
both the title and the figure name. Block default is True.
The plot axis limits may be specified with r_rmin,r_max,z_min,z_max.
"""
zrefl = com.zm
zlim = com.ylim
zreflbdry = com.zbdry
if str(com.geometry) == str([b'uppersn ']):
zrefl = 2.0 * com.zmid - com.zm
zlim = 2.0 * com.zmid - com.ylim
zreflbdry = 2.0 * com.zmid - com.zbdry
if ixmin == None:
ixmin = com.nxomit
if ixmax == None:
ixmax = (com.nxm-1)
if iymin == None:
iymin = 0
if iymax == None:
iymax = (com.ny-1)
if r_min == None:
r_min = com.rm.min()
if r_max == None:
r_max = com.rm.max()
if z_min == None:
z_min = zrefl.min()
if z_max == None:
z_max = zrefl.max()
rcdefaults()
if title == None:
title = 'Uedge Grid'
fig,ax = plt.subplots(figsize=figsize)
ax.set_title(title)
try:
ax.plot(com.xlim, zlim, 'k-', label='Limiter', linewidth=3)
ax.plot(com.xlim, zlim, 'y-', label='Limiter', linewidth=1)
ax.plot(com.rbdry, zreflbdry, 'b-', label='Last Closed')
except:
pass
for ix in range(ixmax-ixmin+1):
for iy in range(iymax-iymin+1):
r0 = [com.rm[ix, iy, 1], com.rm[ix, iy, 2],
com.rm[ix, iy, 4], com.rm[ix, iy, 3], com.rm[ix, iy, 1]]
z0 = [zrefl[ix, iy, 1], zrefl[ix, iy, 2],
zrefl[ix, iy, 4], zrefl[ix, iy, 3], zrefl[ix, iy, 1]]
ax.plot(r0, z0, 'k-', label='Grid', linewidth=1)
if ylabel == None: ax.set_ylabel('Z (m)')
else: ax.set_ylabel(ylabel)
if xlabel == None: ax.set_xlabel('R (m)')
else: ax.set_xlabel(xlabel)
ax.set_ylim(z_min, z_max)
ax.set_xlim(r_min, r_max)
ax.set_aspect('equal')
plt.ion()
plt.show(block=block)
plt.pause(0.001)
def plotanymesh(verts, r_min=None, r_max=None, z_min=None, z_max=None, title=None,
block=False, figsize=(4.0, 8.0),xlabel=None,ylabel=None):
"""
plotanymesh(verts, title=<string>,r_min=<val>,r_max=<val>,z_min=<val>,z_max=<val>,
block=<True|False>,xlabel=None,ylabel=None)
Plot any polynomial NxM grid. verts dimensions are [0:N,0:M,0:nverts,0:2].
Last dim is [:,:,:,0] is R array, [:,:,:,1] is Z array
title is used as both the title and the figure name. Block default is True.
The plot axis limits may be specified with r_rmin,r_max,z_min,z_max.
"""
zrefl = com.zm
zlim = com.ylim
zreflbdry = com.zbdry
if str(com.geometry) == str([b'uppersn ']):
zrefl = 2.0 * com.zmid - com.zm
zlim = 2.0 * com.zmid - com.ylim
zreflbdry = 2.0 * com.zmid - com.zbdry
if r_min == None:
r_min = np.min(verts[:,:,:,0])
if r_max == None:
r_max = np.max(verts[:,:,:,0])
if z_min == None:
z_min = np.min(verts[:,:,:,1])
if z_max == None:
z_max = np.max(verts[:,:,:,1])
rcdefaults()
if title == None:
title = 'Grid'
fig,ax = plt.subplots(figsize=figsize)
ax.set_title(title)
try:
ax.plot(com.xlim, zlim, 'k-', label='Limiter', linewidth=3)
ax.plot(com.xlim, zlim, 'y-', label='Limiter', linewidth=1)
ax.plot(com.rbdry, zreflbdry, 'b-', label='Last Closed')
except:
pass
s = verts.shape
xlen = s[0]
ylen = s[1]
for ix in range(xlen):
for iy in range(ylen):
r0 = [verts[ix, iy, 0, 0], verts[ix, iy, 1, 0],
verts[ix, iy, 2, 0], verts[ix, iy, 3, 0],
verts[ix, iy, 0, 0]]
z0 = [verts[ix, iy, 0, 1], verts[ix, iy, 1, 1],
verts[ix, iy, 2, 1], verts[ix, iy, 3, 1],
verts[ix, iy, 0, 1]]
ax.plot(r0, z0, 'k-', label='Grid', linewidth=1)
if ylabel == None: ax.set_ylabel('Z (m)')
else: ax.set_ylabel(ylabel)
if xlabel == None: ax.set_xlabel('R (m)')
else: ax.set_xlabel(xlabel)
ax.set_ylim(z_min, z_max)
ax.set_xlim(r_min, r_max)
ax.set_aspect('equal')
plt.ion()
plt.show(block=block)
plt.pause(0.001)
def plotmeshval(val, ixmin=None, ixmax=None, iymin=None, iymax=None,
r_min=None, r_max=None, z_min=None, z_max=None, title=None, units=None,
block=False,xlabel=None,ylabel=None,zlabel=None,figsize=(5.0,8.0)):
"""
plotmeshval(val,ixmin=<int>,ixmax=<int>,iymin=<int>,iymax=<int>
title=<string>,units=<string>,block=<True|False>
xlabel=None,ylabel=None,zlabel=None)
Display Uedge 2-D quantity using polyfill.
where ixmin, ixmax, iymin, and iymax are integer variables or
expressions used to plot a portion of the grid. title is used as
both the title and the figure name. Units are displayed in the
side colorbar. Block default is True.
The plot axis limits may be specified with r_rmin,r_max,z_min,z_max.
"""
zrefl = com.zm
zlim = com.ylim
zreflbdry = com.zbdry
if str(com.geometry) == str([b'uppersn ']):
zrefl = 2.0 * com.zmid - com.zm
zlim = 2.0 * com.zmid - com.ylim
zreflbdry = 2.0 * com.zmid - com.zbdry
if ixmin == None:
ixmin = com.nxomit
if ixmax == None:
ixmax = (com.nxm-1)
if iymin == None:
iymin = 0
if iymax == None:
iymax = (com.ny-1)
if r_min == None:
r_min = com.rm.min()
if r_max == None:
r_max = com.rm.max()
if z_min == None:
z_min = zrefl.min()
if z_max == None:
z_max = zrefl.max()
rcdefaults()
if title == None:
title = 'Uedge'
fig, ax = plt.subplots(figsize=figsize)
verts = np.array([])
z = np.array([])
for ix in range(ixmax-ixmin+1):
for iy in range(iymax-iymin+1):
v = []
v.append([com.rm[ix, iy, 1], zrefl[ix, iy, 1]])
v.append([com.rm[ix, iy, 2], zrefl[ix, iy, 2]])
v.append([com.rm[ix, iy, 4], zrefl[ix, iy, 4]])
v.append([com.rm[ix, iy, 3], zrefl[ix, iy, 3]])
verts = np.append(verts, v)
z = np.append(z, val[ix, iy])
verts = verts.reshape(len(z), 4, 2)
ax.set_title(title)
if ylabel == None: ax.set_ylabel('Z (m)')
else: ax.set_ylabel(ylabel)
if xlabel == None: ax.set_xlabel('R (m)')
else: ax.set_xlabel(xlabel)
try:
ax.plot(com.xlim, zlim, 'k-', label='Limiter', linewidth=3)
ax.plot(com.xlim, zlim, 'y-', label='Limiter', linewidth=1)
ax.plot(com.rbdry, zreflbdry, 'b-', label='Last Closed')
except:
pass
coll = PolyCollection(verts, array=z, cmap=cm.jet, edgecolors='face')
ax.add_collection(coll)
ax.autoscale_view()
cbar = fig.colorbar(coll, ax=ax,label=zlabel)
# if units != None: cbar.ax.set_ylabel(units,rotation=-90,va='bottom')
if units != None:
cbar.ax.set_ylabel(units, va='bottom')
ax.set_ylim(z_min, z_max)
ax.set_xlim(r_min, r_max)
ax.set_aspect('equal')
plt.ion()
plt.show(block=block)
plt.pause(0.001)
def plotanymeshval(verts,z, r_min=None, r_max=None, z_min=None, z_max=None, title=None, units=None,
block=False,xlabel=None,ylabel=None,zlabel=None):
"""
plotanymeshval(verts, val, title=<string>,units=<string>,block=<True|False>,
xlabel=None,ylabel=None,zlabel=None)
Display 2-D (NxM) quantity, val, using polyfill of NxM polynomial grid verts
verts dimensions are [0:N,0:M,0:nverts,0:2].
Last dim is [:,:,:,0] is R array, [:,:,:,1] is Z array
title is used as both the title and the figure name. Units are displayed in the
side colorbar. Block default is True.
The plot axis limits may be specified with r_rmin,r_max,z_min,z_max.
"""
zrefl = com.zm
zlim = com.ylim
zreflbdry = com.zbdry
if str(com.geometry) == str([b'uppersn ']):
zrefl = 2.0 * com.zmid - com.zm
zlim = 2.0 * com.zmid - com.ylim
zreflbdry = 2.0 * com.zmid - com.zbdry
if r_min == None:
r_min = com.rm.min()
if r_max == None:
r_max = com.rm.max()
if z_min == None:
z_min = zrefl.min()
if z_max == None:
z_max = zrefl.max()
rcdefaults()
if title == None:
title = 'Uedge'
fig, ax = plt.subplots()
ax.set_title(title)
if ylabel == None: ax.set_ylabel('Z (m)')
else: ax.set_ylabel(ylabel)
if xlabel == None: ax.set_xlabel('R (m)')
else: ax.set_xlabel(xlabel)
try:
ax.plot(com.xlim, zlim, 'k-', label='Limiter', linewidth=3)
ax.plot(com.xlim, zlim, 'y-', label='Limiter', linewidth=1)
ax.plot(com.rbdry, zreflbdry, 'b-', label='Last Closed')
except:
pass
coll = PolyCollection(verts, array=z, cmap=cm.jet, edgecolors='face')
ax.add_collection(coll)
ax.autoscale_view()
cbar = fig.colorbar(coll, ax=ax,label=zlabel)
# if units != None: cbar.ax.set_ylabel(units,rotation=-90,va='bottom')
if units != None:
cbar.ax.set_ylabel(units, va='bottom')
ax.set_ylim(z_min, z_max)
ax.set_xlim(r_min, r_max)
ax.set_aspect('equal')
plt.ion()
plt.show(block=block)
plt.pause(0.001)
def mkdensityfile(filename, ival, renmin=None, renmax=None, samples=[500, 500, 500],
xrange=[-2.4, 2.4], yrange=[-2.4, 2.4], zrange=[0, 3.2], tree=None):
"""
mkdensityfile(filename, ival,renmin=<float>,renmax=<float>,
samples=[<xsamps>,<ysamps>,<zsamps>],
xrange=[xmin,xmax],yrange=[ymin,ymax],zrange=[zmin,zmax],
tree=<cKDTree object> )
Output Povray include and density field file (df3) for rendering.
where: renmin,renmax are the values scaled to 0,255 in the final
bytescaling
samples is an array of three values giving the volume sampling
for the density file (def [500,500,500])
xrange, yrange, zrange are the vessel dimensions of the sampled
volume (m) (def xrange[-2.4,2.4], yrange[-2.4,2.4],
zrange[0,3.2])
tree is returned and may be reused for another call for efficiency
The defaults are set for DIII-D and will sample the full torus at
about 1cm r resolution and .6cm in z.
"""
zrefl = com.zm
zlim = com.ylim
zreflbdry = com.zbdry
if str(com.geometry) == str([b'uppersn ']):
zrefl = 2.0 * com.zmid - com.zm
zlim = 2.0 * com.zmid - com.ylim
zreflbdry = 2.0 * com.zmid - com.zbdry
if renmin == None:
renmin = np.min(ival)
if renmax == None:
renmax = np.max(ival)
nx, ny, nz = samples
dims = np.array([nx, ny, nz], dtype=np.int16)
file = open(filename, 'wb')
if sys.byteorder == 'little':
file.write(dims.byteswap(True))
else:
file.write(dims)
rrm = com.rm[:, :, 0].ravel()
rzm = zrefl[:, :, 0].ravel()
if tree == None:
tree = spatial.cKDTree(list(zip(rrm, rzm)))
treelen = tree.data.shape[0]
rpts = np.array([])
zpts = np.array([])
z, x, y = np.mgrid[
zrange[0]:zrange[1]:complex(0, nz),
xrange[0]:xrange[1]:complex(0, nx),
yrange[0]:yrange[1]:complex(0, ny)
]
r = (x*x + y*y)**0.5
pts = list(zip(r.ravel(), z.ravel()))
#d,i = tree.query(pts,k=1,distance_upper_bound=0.1)
d, i = tree.query(pts, k=1)
val = ival
val[0, :] = 0
val[-1, :] = 0
val[:, 0] = 0
val[:, -1] = 0
vf = np.append(val.ravel(), [renmin])
#dens = bytescale(np.average(vf[i],axis=1,weights=1./d),cmin=renmin,cmax=renmax)
dens = bytescale((vf[i] - renmin)/(renmax - renmin))
file.write(dens)
file.close()
return tree
def profile(rval, zval, title=None, style=None, linewidth=None, xlabel=None, ylabel=None, figsize=(4.0, 8.0), block=False,marker=None):
"""
profile(xval,yval,title=<None>,style=<None>,linewidth=<None>,xlabel=<None>,ylabel=<None>,block=<True|False>,marker=<none>)
title is used as both the title and the figure name.
Interactive is turned on so subsequent calls go to the same plot
Style encoded color, line, and marker. See matplotlib documention.
examples: black solid line - style='k-'
red circle marks - style='ro'
green x marks and dotted line - style='gx--'
"""
rcdefaults()
interactive(True)
if title == None:
title = 'Uedge Profile'
if style == None:
style = 'k-'
if linewidth == None:
lw = 1
fig,ax = plt.subplots(figsize=figsize)
ax.set_title(title)
try:
ax.plot(rval, zval, style, linewidth=lw,marker=marker)
except:
pass
if ylabel != None:
ax.set_ylabel(ylabel)
if xlabel != None:
ax.set_xlabel(xlabel)
plt.ion()
plt.show(block=block)
plt.pause(0.001)
| 14,603 | 33.524823 | 135 | py |
UEDGE | UEDGE-master/pyscripts/rundt.py | # Holm10 Nov 5 2019, based on rdcontdt.py
# 191121 - Created hdf5-routines to read and save time-dependent data
# Writes and reads dictionary with multi-dimensional arrays
# containing all restore-parameters.
# 230210 - Updated old rundt function to RunData class, modularizing
# all functionalities. Added a comprehensive diagnostics suite
# plotting fnrm evolution as function of exmains(), plasma
# time, and wall-clock time. Still need to test time-slicing
# ` procedures
# 230327 - Removed old routine, created wrapper function rundt for
# object. Renamed Object to UeRun.
# 230522 - Fixed bug associated with itroub, improved itroub visualization
from matplotlib.pyplot import ion
ion()
class UeRun():
''' Class containing information on run '''
def __init__(self, n_stor = False):
from time import time
from numpy import array
from uedge import bbb, com
# TODO: Add restore/recover from timeslice
# TODO: Add plot timeslice directly
# NOTE: No -> Utilize direct I/O from file instead
self.tstart = time()
self.numvar = bbb.numvar
self.nx = com.nx
self.ny = com.ny
self.ixpt1 = com.ixpt1[0]
self.ixpt2 = com.ixpt2[0]
self.iysptrx = com.iysptrx
self.equationkey = array([b'te', b'ti', b'phi', b'up', b'ni', b'ng',
b'tg'])
self.classvars = ['slice_ni', 'slice_ng', 'slice_up', 'slice_te',
'slice_ti', 'slice_tg', 'slice_phi', 'slice_dttot', 'time',
'fnorm', 'nfe', 'dt_tot', 'dtreal', 'ii1', 'ii2', 'ii1fail',
'ii2fail', 'dtrealfail', 'itrouble', 'troubleeq', 'troubleindex',
'ylfail', 'isteon', 'istion', 'isupon', 'isphion', 'isupgon',
'isngon', 'istgon', 'ishymol', 'nisp', 'ngsp', 'nhsp', 'nhgsp',
'nzsp', 'b0', 'ncore', 'pcoree', 'pcorei', 'internaleq',
'internalspecies', 'yldotsfscalfail']
# Intiialize all variables to empty lists in class
for var in self.classvars:
self.__setattr__(var, [])
def itroub(self):
''' Function that displays information on the problematic equation '''
from numpy import mod, argmax, where, array, argmin
from uedge import bbb
from copy import deepcopy
self.equations = [bbb.idxte, bbb.idxti, bbb.idxphi,
bbb.idxu, bbb.idxn, bbb.idxg, bbb.idxtg]
equationsdescription = [ 'Electron energy', 'Ion energy', 'Potential',
'Ion momentum', 'Ion density', 'Gas density', 'Gas temperature']
# Find the fortran index of the troublemaking equation
self.neq = bbb.neq
self.itrouble.append(deepcopy(argmax(abs(bbb.yldot*\
bbb.sfscal)[:bbb.neq])+1))
print("** Fortran index of trouble making equation is:\n{}".format(\
self.itrouble[-1]))
# Print equation information
print("** Number of equations solved per cell:\n numvar = {}\n"\
.format(self.numvar))
self.troubleeq.append(mod(self.itrouble[-1]-1, bbb.numvar)+1)
species = ''
self.internaleq.append([abs(x - self.itrouble[-1]).min() for x in \
self.equations].index(0))
if self.equations[self.internaleq[-1]].ndim == 3:
self.internalspecies.append( where(\
self.equations[self.internaleq[-1]] == self.itrouble[-1])\
[-1][0] + 1)
species = ' of species {}'.format(self.internalspecies[-1])
else:
self.internalspecies.append(0)
print('** Troublemaker equation is:\n{} equation{}: iv_t={}\n'\
.format(equationsdescription[self.internaleq[-1]], species,
self.troubleeq[-1]))
# Display additional information about troublemaker cell
self.troubleindex.append(deepcopy(bbb.igyl[self.itrouble[-1]-1,]))
self.dtrealfail.append(deepcopy(bbb.dtreal))
self.ylfail.append(deepcopy(bbb.yl[self.itrouble[-1]-1]))
self.yldotsfscalfail.append(deepcopy((bbb.yldot*bbb.sfscal)\
[self.itrouble[-1]-1]))
print('** Troublemaker cell (ix,iy) is:\n' + \
'{}\n'.format(self.troubleindex[-1]))
print('** Timestep for troublemaker equation:\n' + \
'{:.4e}\n'.format(self.dtrealfail[-1]))
print('** yl for troublemaker equation:\n' + \
'{:.4e}\n'.format(self.ylfail[-1]))
print('** yl*sfscal for troublemaker equation:\n' + \
'{:.4e}\n'.format(self.yldotsfscalfail[-1]))
def savesuccess(self, ii1, ii2, savedir, savename, fnrm=None):
from time import time
from uedge import bbb
from copy import deepcopy
self.time.append(time())
if fnrm is None:
bbb.pandf1 (-1, -1, 0, bbb.neq, 1., bbb.yl, bbb.yldot)
self.fnorm.append(deepcopy((sum((bbb.yldot[:bbb.neq]*\
bbb.sfscal[:bbb.neq])**2))**0.5))
else:
self.fnorm.append(fnrm)
self.nfe.append(deepcopy(bbb.nfe))
self.dt_tot.append(deepcopy(bbb.dt_tot))
self.dtreal.append(deepcopy(bbb.dtreal))
self.ii1.append(ii1)
self.ii2.append(ii2)
self.neq = bbb.neq
try:
self.save('{}_UeCase.hdf5'.format(savefname.split('.')[0]))
except:
pass
self.save_intermediate(savedir, savename)
def store_timeslice(self):
from copy import deepcopy
from uedge import bbb
self.slice_ni.append(deepcopy(bbb.ni))
self.slice_ng.append(deepcopy(bbb.ng))
self.slice_up.append(deepcopy(bbb.up))
self.slice_te.append(deepcopy(bbb.te))
self.slice_ti.append(deepcopy(bbb.ti))
self.slice_tg.append(deepcopy(bbb.tg))
self.slice_phi.append(deepcopy(bbb.phi))
self.slice_dttot.append(deepcopy(bbb.dt_tot))
def save_intermediate(self, savedir, savename):
from uedge.hdf5 import hdf5_save
from uedge import bbb, com
from h5py import File
for var in [ 'isteon', 'istion', 'isupon', 'isphion', 'isupgon',
'isngon', 'istgon', 'ishymol']:
self.__setattr__(var, bbb.__getattribute__(var))
for var in [ 'nisp', 'ngsp', 'nhsp', 'nhgsp', 'nzsp']:
self.__setattr__(var, com.__getattribute__(var))
try:
hdf5_save('{}/{}_last_ii2.hdf5'.format(savedir,savename))
except:
print('Folder {} not found, saving output to cwd...'\
.format(savedir))
hdf5_save('{}_last_ii2.hdf5'.format(savename))
# Try to store ready-made Case-file, if possible
try:
self.save('{}/{}_last_ii2_Case.hdf5'.format(savedir,savename))
except:
pass
try:
file = File('{}/{}_last_ii2.hdf5'.format(savedir, savename), 'r+')
except:
file = File('{}_last_ii2.hdf5'.format(savename), 'r+')
file.require_group('convergence')
group = file['convergence']
group.create_dataset('t_start', data=self.tstart)
group.create_dataset('numvar', data=self.numvar)
group.create_dataset('neq', data=self.neq)
group.create_dataset('nx', data=self.nx)
group.create_dataset('ny', data=self.ny)
group.create_dataset('ixpt1', data=self.ixpt1)
group.create_dataset('ixpt2', data=self.ixpt2)
group.create_dataset('iysptrx', data=self.iysptrx)
group.create_dataset('equationkey', data=self.equationkey)
group.create_dataset('itermx', data=self.itermx)
group.create_dataset('incpset', data=self.incpset)
group.create_dataset('ii1max', data=self.ii1max)
group.create_dataset('ii2max', data=self.ii1max)
group.create_dataset('numrevjmax', data=self.numrevjmax)
group.create_dataset('numfwdjmax', data=self.numfwdjmax)
group.create_dataset('numtotjmax', data=self.numtotjmax)
group.create_dataset('rdtphidtr', data=self.rdtphidtr)
group.create_dataset('deldt_min', data=self.deldt_min)
group.create_dataset('rlx', data=self.rlx)
for var in self.classvars:
group.create_dataset(var, data=self.__getattribute__(var))
file.close()
def convergenceanalysis(savefname, savedir='../solutions', fig=None,
xaxis = 'exmain', logx = False, color='k', label=None,
ylim = (None, None)):
from h5py import File
from matplotlib.pyplot import subplots
from datetime import timedelta
from matplotlib.ticker import FuncFormatter
from numpy import cumsum, ones
if fig is None:
f, ax = subplots(1, 3, figsize=(15, 5))
else:
ax = fig.get_axes()
if len(ax) < 3:
print('Three subplots required for plots! Aborting...')
return
f = fig
try:
file = File('{}/{}'.format(savedir, savefname), 'r')
except:
print('File {}/{} not found. Aborting!'.format(savedir,
savefname))
return
data = file['convergence']
try:
data = file['convergence']
except:
print('Convergence data not found in {}/{}. Aborting!'.format(\
savedir, savefname))
return
if xaxis == 'exmain':
xlabel = 'Exmain calls'
xones = ones(data['ii2'][()].shape)
x = cumsum(xones)
elif xaxis == 'nfe':
xlabel = 'nfe internal iterations'
x = cumsum(data['nfe'][()][:, 0, 0])
elif xaxis == 'time':
xlabel = 'Total wall-clock time [HH:MM]'
x = [timedelta(t - data['t_start'][()]) for t in data['time'][()]]
x = data['time'][()] - data['t_start'][()]
if logx is True:
ax[0].loglog(x, data['fnorm'][()], '-', color=color, label=label)
ax[1].loglog(data['dt_tot'][()], data['fnorm'][()], '-',
color=color, label=label)
ax[2].loglog(x, data['dtreal'][()], '-', color=color, label=label)
else:
ax[0].semilogy(x, data['fnorm'][()], '-', color=color, label=label)
ax[1].semilogy(data['dt_tot'][()], data['fnorm'][()], '-',
color=color, label=label)
ax[2].semilogy(x, data['dtreal'][()], '-', color=color,
label=label)
ax[0].set_xlabel(xlabel)
ax[1].set_xlabel('Accumulated plasma simualtion time [s]')
ax[2].set_xlabel(xlabel)
ax[1].set_title('Total exmain evaluations: {}'.format\
(len(data['dtreal'][()])))
ax[0].set_ylabel('Initial fnorm')
ax[1].set_ylabel('Initial fnorm')
ax[2].set_ylabel('Time-step (dtreal) [s]')
ax[0].set_ylim(ylim)
ax[1].set_ylim(ylim)
if xaxis == 'time':
ax[0].xaxis.set_major_formatter(FuncFormatter(lambda t, pos :\
str(timedelta(seconds=t))[:-3]))
ax[2].xaxis.set_major_formatter(FuncFormatter(lambda t, pos :\
str(timedelta(seconds=t))[:-3]))
if label is not None:
ax[0].legend()
return f
def failureanalysis(savefname, savedir='../solutions', equation=None):
from h5py import File
from matplotlib.pyplot import subplots
from numpy import histogram, zeros
from matplotlib.collections import PolyCollection
f, ax = subplots(2,1, figsize=(10, 7))
try:
file = File('{}/{}'.format(savedir, savefname), 'r')
except:
print('File {}/{} not found. Aborting!'.format(savedir,
savefname))
try:
data = file['convergence']
except:
print('Convergence data not found in {}/{}. Aborting!'.format(\
savedir, savefname))
return
if equation is not None:
iequation = [x.decode('UTF-8') for x in data['equationkey']]\
.index(equation)
# Bin the equation errors
nspecies = 1/(data['nisp'][()] + 1)
nbins = 7*data['nisp'][()]
counts, bins = histogram((data['internaleq'][()]+\
data['internalspecies']*nspecies)-0.5, bins=nbins,
range=(-0.5,6.5))
h, e = histogram(data['internaleq'][()] - 0.5, bins=7,
range=(-0.5,6.5))
ax[0].bar([x for x in range(7)], h, width=1, edgecolor='k',
color=(0, 87/255, 183/255))
ax[0].hist(bins[3*data['nisp'][()]:-1], bins[3*data['nisp'][()]:],
weights=counts[3*data['nisp'][()]:], edgecolor='k',
color=(255/255, 215/255, 0))
ax[0].set_xticks(range(7))
ax[0].set_xticklabels([x.decode('UTF-8') for x in \
data['equationkey'][()]])
ax[0].grid(linestyle=':', linewidth=0.5, axis='y')
ax[0].set_xlim((-0.5,6.5))
ax[0].set_ylabel('Counts')
for i in range(7):
ax[0].axvline(i-0.5, linewidth=1, color='k')
# Visualize error locations
nx = data['nx'][()]
ny = data['ny'][()]
ixpt1 = data['ixpt1'][()]
ixpt2 = data['ixpt2'][()]
iysptrx = data['iysptrx'][()]
frequency = zeros((nx+2, ny+2))
cells = []
for i in range(nx+2):
for j in range(ny+2):
cells.append([[i-.5, j-.5], [i+.5, j-.5],
[i+.5, j+.5], [i-.5, j+.5]])
polys = PolyCollection(cells, edgecolors='k', linewidth=0.5,
linestyle=':')
for i in range(len(data['itrouble'])):
coord = data['troubleindex'][()][i]
if equation is None:
frequency[coord[0], coord[1]] += 1
elif iequation == data['internaleq'][()][i]:
frequency[coord[0], coord[1]] += 1
polys.set_cmap('binary')
polys.set_array(frequency.reshape(((nx+2)*(ny+2),)))
cbar = f.colorbar(polys, ax=ax[1])
cbar.ax.set_ylabel('N trouble'+' for {}'.format(equation)*\
(equation is not None), va='bottom', labelpad=20)
ax[1].plot([.5, nx+.5, nx+.5, .5, .5], [.5, .5, ny+.5, ny+.5, .5],
'k-', linewidth=1)
ax[1].set_xlabel('Poloidal index')
ax[1].set_ylabel('Radial index')
ax[1].add_collection(polys)
ax[1].plot([.5, nx+.5],[iysptrx+.5, iysptrx+.5], 'k-',
linewidth=1)
ax[1].plot([ixpt1+.5, ixpt1+.5], [.5, iysptrx+.5], 'k-',
linewidth=1)
ax[1].plot([ixpt2+.5, ixpt2+.5], [.5, iysptrx+.5], 'k-',
linewidth=1)
file.close()
return f
def converge(self, dtreal=2e-9, ii1max=5000, ii2max=5, itermx=7, ftol=1e-5,
dt_kill=1e-14, t_stop=100, dt_max=100, ftol_min = 1e-9, incpset=7,
n_stor=0, storedist='lin', numrevjmax=2, numfwdjmax=1, numtotjmax=0,
tstor=(1e-3, 4e-2), ismfnkauto=True, dtmfnk3=5e-4, mult_dt=3.4,
reset=True, initjac=False, rdtphidtr=1e20, deldt_min=0.04, rlx=0.9,
tsnapshot=None, savedir='../solutions', ii2increase=0):
''' Converges the case by increasing dt
dtreal : float [1e-9]
Original time-step size
ii1max : int [500]
Outer loop iterations, i.e. time-step changes
ii2max : int [5]
Inner loop iterations, i.e. time-steps per time-step change
dt_kill : float [1e-14]
Time-step limit for aborting simulation
itermx : int [7]
Maximum iterations per time-step used internally in routine
ftol : float [1e-5]
Internal fnrm tolerance for time-steps
incpset : int [7]
savedir : str ['../solutions']
numtotjmax : int [None]
ii2increase : float [1.5]
ftol_min : float [1e-9]
Value of fnrm where time-advance will stop
t_stop : float [100.]
Maximum total accumulated plasma-time before stopping if
fnorm has not decreased below ftol_min
dt_max : float [100.]
Maximum allowable time-step size
numrevjmax : int [2]
Number of time-step reducitons before Jacobian is recomputed
numfwdjmax : int [2]
Number of time-step increases before Jacobian is recomputed
n_stor : int [0]
Number of time-slices to be stored in interval tstor
tstor : tuple of floats [(1e-3, 4e-2)]
Time-interval in which time-slices are stored (lower, upper)
storedist : str ['lin']
Distribution of time-slices in tstor. Options are 'lin' and
'log' for linear and logarithmic distributions, respectively
reset : bool [True]
Switch whether to reset the total time etc of the case
initjac : bool [False]
Switch to re-evaluate Jacobian on first convegnec time-step
or not
ismfnkauto : bool [True]
If True, sets mfnksol=3 for time-steps smaller that dtmfnk3,
mfnksol=-3 for larger time-step sizes
dtmfnk3 : float [5e-4]
Time-step size for which ismfnkauto controls mfnksol if
ismfnkauto is True
mult_dt : float [3.4]
Time-step size increase factor after successful inner loop
rdtphidtr : float [1e20]
Ratio of potential-equation time-step to plasma equaiton time-step
size: dtphi/dtreal
deldt_min : float [0.04]
Minimum relative change allowed for model_dt>0
rlx : float [0.9]
Maximum change in variable at each internal linear iteration
tsnapshot : list [None]
If None, uses linear/logarithmic interpolation according to
storedist in the interval tstor. Snapshot times can be defined in
a list and supplied. Then, the tnsaphsot list defines the
time-slices
'''
from numpy import linspace, logspace, log10, append
from copy import deepcopy
from uedge import bbb
from os.path import exists
# Check if requested save-directory exists: if not, write to cwd
if not exists(savedir):
print('Requested save-path {} not found, writing to cwd!'.format(\
savedir))
savedir = '.'
self.orig = {}
self.orig['itermx'] = deepcopy(bbb.itermx)
self.orig['dtreal'] = deepcopy(bbb.dtreal)
self.orig['icntnunk'] = deepcopy(bbb.icntnunk)
self.orig['ftol'] = deepcopy(bbb.ftol)
self.orig['mfnksol'] = deepcopy(bbb.mfnksol)
self.orig['rlx'] = deepcopy(bbb.rlx)
self.orig['deldt'] = deepcopy(bbb.deldt)
self.orig['isdtsfscal'] = deepcopy(bbb.isdtsfscal)
self.orig['incpset'] = deepcopy(bbb.incpset)
if numtotjmax == 0:
numtotjmax = numrevjmax + numfwdjmax
self.itermx = itermx
self.incpset = incpset
self.ii1max = ii1max
self.ii2max = ii2max
self.numrevjmax = numrevjmax
self.numfwdjmax = numfwdjmax
self.numtotjmax = numtotjmax
self.rdtphidtr = rdtphidtr
self.deldt_min = deldt_min
self.rlx = rlx
# TODO: Add variable to control reduciton factor?
# TODO: Should dtreal = min(x, t_stop) actually be t_stop or dt_max?
def restorevalues(self):
''' Restores the original UEDGE values '''
for key, value in self.orig.items():
bbb.__setattr__(key, value)
def message(string, separator='-', pad='', seppad = '',
nseparator=1):
''' Prints formatted message to stdout '''
# TODO: add formatting for len>75 strings
message = pad.strip() + ' ' + string.strip() + ' ' + pad.strip()
for i in range(nseparator):
print(seppad + separator*(len(message)-2*len(seppad)) + seppad)
print(message)
print(seppad + separator*(len(message)-2*len(seppad)) + seppad)
def scale_timestep(scaling):
''' Increases/decreases time-step '''
bbb.dtreal *= scaling
def exmain_isaborted(self):
''' Checks if abort is requested '''
from uedge import bbb
bbb.exmain()
# Abort flag set, abort case
if bbb.exmain_aborted == 1:
# Reset flag
bbb.exmain_aborted == 0
# Restore parameters modified by script
restorevalues(self)
return True
def issuccess(self, t_stop, ftol_min):
''' Checks if case is converged '''
from datetime import timedelta
from time import time
if (bbb.iterm == 1):
bbb.ylodt = bbb.yl
bbb.dt_tot += bbb.dtreal
bbb.pandf1 (-1, -1, 0, bbb.neq, 1., bbb.yl, bbb.yldot)
self.fnrm_old = sum((bbb.yldot[:bbb.neq-1]*\
bbb.sfscal[:bbb.neq-1])**2)**0.5
self.savesuccess(ii1, ii2, savedir, bbb.label[0].strip(\
).decode('UTF-8'), self.fnrm_old)
if (bbb.dt_tot>=t_stop or self.fnrm_old<ftol_min):
print('')
message('SUCCESS: ' + 'fnrm < bbb.ftol'\
*(self.fnrm_old<ftol_min) + \
'dt_tot >= t_stop'*(bbb.dt_tot >= t_stop), pad='**',
separator='*')
print('Total runtime: {}'.format(timedelta(
seconds=round(time()-self.tstart))))
restorevalues(self)
return True
def isfail(dt_kill):
''' Checks whether to abandon case '''
if (bbb.dtreal < dt_kill):
message('FAILURE: time-step < dt_kill', pad='**',
separator='*')
restorevalues(self)
return True
def setmfnksol(ismfnkauto, dtmfnk3):
''' Sets mfnksol according to setup '''
if ismfnkauto is True:
bbb.mfnksol = 3*(-1)**(bbb.dtreal > dtmfnk3)
def calc_fnrm():
''' Calculates the initial fnrm '''
from uedge import bbb
bbb.pandf1 (-1, -1, 0, bbb.neq, 1., bbb.yl, bbb.yldot)
return sum((bbb.yldot[:bbb.neq-1]*bbb.sfscal[:bbb.neq-1])**2)**0.5
''' TIME-SLICING SETUP '''
if tsnapshot is None:
if storedist == 'lin':
# Linearly spaced time slices for writing
dt_stor = linspace(tstor[0], tstor[1], n_stor)
elif storedist == 'log':
# Logarithmically spaced time-slices
dt_stor = logspace(log10(tstor[0]), log10(tstor[1]), n_stor)
else:
dt_stor = tsnapshot
# Add end-point to avoid tripping on empty arrays
dt_stor = append(dt_stor, 1e20)
if reset is True:
bbb.dt_tot = 0
''' TIME-STEP INITIALIZATION '''
bbb.rlx = rlx
bbb.dtreal = dtreal
bbb.ftol = ftol
if (bbb.iterm == 1) and (bbb.ijactot > 0):
message('Initial successful time-step exists', separator='')
else:
message('Need to take initial step with Jacobian; ' + \
'trying to do here', seppad='*')
# Ensure time-step is taken
bbb.icntnunk = 0
# Take timestep and see if abort requested
if exmain_isaborted(self):
return
# Increase time
# Verify time-step was successful
if (bbb.iterm != 1):
restorevalues(self)
message('Error: converge an initial time-step first; then ' + \
're-execute command', seppad='*')
return
bbb.incpset = incpset
bbb.itermx = itermx
deldt_0 = deepcopy(bbb.deldt)
isdtsf_sav = deepcopy(bbb.isdtsfscal)
# TODO: Replace with some more useful information?
# if (bbb.ipt==1 and bbb.isteon==1): # set ipt to te(nx,iysptrx+1)
# #if no user value
# ipt = bbb.idxte[nx-1,com.iysptrx] #note: ipt is local,
# # bbb.ipt global
bbb.dtphi = rdtphidtr*bbb.dtreal
svrpkg=bbb.svrpkg.tostring().strip()
bbb.ylodt = bbb.yl
self.fnrm_old = calc_fnrm()
if initjac is True:
self.fnrm_old = 1e20
else:
bbb.newgeo=0
# Intialize counters
irev = -1
numfwd = 0
numrev = 0
numrfcum = 0
# Compensate for first time-step before entering loop
scale_timestep(1/(3*(irev == 0) + mult_dt*(irev != 0)))
''' OUTER LOOP - MODIFY TIME-STEP SIZE'''
# TODO: Add logic to always go back to last successful ii2 to
# precondition the Jacobian, to avoid downwards cascades?
# NOTE: experomental functionality
successivesuccesses = 0
for ii1 in range(ii1max):
setmfnksol(ismfnkauto, dtmfnk3)
# adjust the time-step
# dtmult=3 only used after a dt reduc. success. completes loop ii2
# for fixed dt either increase or decrease dtreal; depends
# on mult_dt
scale_timestep(3*(irev == 0) + mult_dt*(irev != 0))
bbb.dtreal = min([bbb.dtreal, dt_max])
bbb.dtphi = rdtphidtr*bbb.dtreal
bbb.deldt = min([bbb.deldt, deldt_0, deldt_min])
message('Number of time-step changes = ''{} New time-step: {:.2E}\n'\
.format((ii1+1), bbb.dtreal), pad='***', nseparator=1)
# Enter for every loop except first, unless intijac == True
if ii1 > -int(initjac):
# Main time-stepping switch: controls increase/decrease in
# dtreal and Jacobian preconditioning
if (irev == 1): # decrease in bbb.dtreal
if (numrev < numrevjmax and \
numrfcum < numtotjmax): #dont recom bbb.jac
bbb.icntnunk = 1
numrfcum += 1
else: # force bbb.jac calc, reset numrev
bbb.icntnunk = 0
numrev = -1 # yields api.zero in next statement
numrfcum = 0
numrev += 1
numfwd = 0
else: # increase in bbb.dtreal
if (numfwd < numfwdjmax and \
numrfcum < numtotjmax): #dont recomp bbb.jac
bbb.icntnunk = 1
numrfcum += 1
else:
bbb.icntnunk = 0 # recompute jacobian for increase dt
numfwd = -1
numrfcum = 0
numfwd += 1
numrev = 0 #bbb.restart counter for dt reversals
bbb.isdtsfscal = isdtsf_sav
# Dynamically decrease ftol as the initial ftol decreases
bbb.ftol = max(min(ftol, 0.01*self.fnrm_old),ftol_min)
# Take timestep and see if abort requested
if exmain_isaborted(self):
return
if issuccess(self, t_stop, ftol_min):
return
bbb.icntnunk = 2
bbb.isdtsfscal = 0
# NOTE: experomental functionality
bbb.ii2max = ii2max + round(ii2increase*successivesuccesses)
# Take ii2max time-steps at current time-step size while
# time-steps converge: if not, drop through
for ii2 in range(bbb.ii2max):
if (bbb.iterm == 1):
bbb.ftol = max(min(ftol, 0.01*self.fnrm_old),ftol_min)
# Take timestep and see if abort requested
message("Inner iteration #{}".format(ii2+1), nseparator=0,
separator='')
if exmain_isaborted(self):
return
if issuccess(self, t_stop, ftol_min):
return
message("Total time = {:.4E}; Timestep = {:.4E}".format(\
bbb.dt_tot-bbb.dtreal,bbb.dtreal), nseparator=0,
separator='')
# TODO: replace with more useful information
# print("variable index ipt = ",ipt, " bbb.yl[ipt] = ",
# bbb.yl[ipt])
# Store variable if threshold has been passed
if (bbb.dt_tot >= dt_stor[0]):
# Remove storing time-points smaller than current
# simulation time
while bbb.dt_tot >= dt_stor[0]:
dt_stor = dt_stor[1:]
self.store_timeslice()
irev -= 1
# Output and store troublemaker info
# NOTE: experomental functionality
successivesuccesses += 1
if (bbb.iterm != 1):
# NOTE: experomental functionality
successivesuccesses = 0
self.itroub()
''' ISFAIL '''
if isfail(dt_kill):
self.save_intermediate(savedir, bbb.label[0].strip()\
.decode('UTF-8'))
break
irev = 1
message('Converg. fails for bbb.dtreal; reduce time-step by '+\
'3, try again', pad = '***', nseparator=0)
scale_timestep(1/(3*mult_dt))
bbb.dtphi = rdtphidtr*bbb.dtreal
bbb.deldt *= 1/(3*mult_dt)
setmfnksol(ismfnkauto, dtmfnk3)
# bbb.iterm = 1
# bbb.iterm = -1 # Ensure subsequent repetitions work as intended
def rundt(**kwargs):
runcase=UeRun()
runcase.converge(**kwargs)
| 30,206 | 40.83795 | 81 | py |
UEDGE | UEDGE-master/pyscripts/hdf5.py | import numpy as np
import h5py
import uedge
try:
import __version__ as pyv
pyver = pyv.__version__
except:
pyver = uedge.__version__
from .uedge import bbb
from .uedge import com
from .uedge_lists import *
import time
from Forthon import packageobject
def hdf5_restore(file):
"""
Read a hdf5 file previously written from pyUedge. This reads the file recursively
and will attempt to restore all datasets. This will restore a file saved by either
hdf5_save or hdf5_dump.
"""
try:
hf = h5py.File(file, 'r')
except:
print("Couldn't open hdf5 file ", file)
raise
try:
dummy = hf['bbb'] # force an exception if the group not there
hfb = hf.get('bbb')
try:
for var in ['tes', 'tis', 'ups', 'nis', 'phis', 'ngs', 'tgs']:
packageobject('bbb').__setattr__(var, hf['bbb'][var][()])
except:
raise
except:
print("Old style hdf5 file")
try:
bbb.ngs[...] = np.array(hf.get('ngs@bbb'))
except ValueError as error:
print("Couldn't read ngs from ", file)
print(error)
except:
print("Couldn't read ngs from ", file)
try:
bbb.nis[...] = np.array(hf.get('nis@bbb'))
except ValueError as error:
print("Couldn't read nis from ", file)
print(error)
except:
print("Couldn't read nis from ", file)
try:
bbb.phis[...] = np.array(hf.get('phis@bbb'))
except ValueError as error:
print("Couldn't read phis from ", file)
print(error)
except:
print("Couldn't read phis from ", file)
try:
bbb.tes[...] = np.array(hf.get('tes@bbb'))
except ValueError as error:
print("Couldn't read tes from ", file)
print(error)
except:
print("Couldn't read tes from ", file)
try:
bbb.tis[...] = np.array(hf.get('tis@bbb'))
except ValueError as error:
print("Couldn't read tis from ", file)
print(error)
except:
print("Couldn't read tis from ", file)
try:
bbb.ups[...] = np.array(hf.get('ups@bbb'))
except ValueError as error:
print("Couldn't read ups from ", file)
print(error)
except:
print("Couldn't read ups from ", file)
try:
bbb.tgs[...] = np.array(hf.get('tgs@bbb'))
except ValueError as error:
print("Couldn't read tgs from ", file)
print(error)
except:
print("Couldn't read tgs from ", file)
try:
bbb.tipers[...] = np.array(hf.get('tipers@bbb'))
except ValueError as error:
print("Couldn't read tipers from ", file)
print(error)
except:
print("Couldn't read tipers from ", file)
hf.close()
return True
def hdf5_save(file, varlist=['bbb.ngs', 'bbb.ng',
'bbb.ni', 'bbb.nis',
'bbb.phi', 'bbb.phis',
'bbb.te', 'bbb.tes',
'bbb.ti', 'bbb.tis',
'bbb.up', 'bbb.ups',
'bbb.tg', 'bbb.tgs',
'bbb.ev', 'bbb.prad',
'bbb.pradhyd', 'bbb.tipers','com.nx',
'com.ny', 'com.rm', 'com.zm'],
addvarlist=[]):
"""
Save HDF5 output for restarting and plotting.
varlist=[] a list of variables to save specified as strings.
package prefix required. Default list is usual
variable list. Example use:
varlist=['bbb.ni','bbb.te']
addvarlist=[] a list of variables to save in addition to the ones
in varlist. Syntax is the same as varlist parameter.
Envisioned use is to add output in addition
to the default list in varlist.
"""
grps = {}
vars = {}
try:
hf = h5py.File(file, 'w')
hfb = hf.create_group('bbb')
grps['bbb'] = {'h5': hfb}
grps['bbb']['vars'] = ['uedge_ver']
grps['bbb']
hfb.attrs['time'] = time.time()
hfb.attrs['ctime'] = time.ctime()
hfb.attrs['code'] = 'UEDGE'
hfb.attrs['ver'] = bbb.uedge_ver
try:
hfb.attrs['pyver'] = pyver
grps['bbb']['vars'].append('pyver')
except:
print("couldn\'t write pyver to header")
except ValueError as error:
print("HDF5 file open failed to ", file)
print(error)
raise
except:
print("HDF5 file open failed to ", file)
raise
for lvt in varlist:
try:
vt = lvt.split('.')
if not vt[0] in grps.keys():
hfb = hf.create_group(vt[0])
grps[vt[0]] = {'h5': hfb}
grps[vt[0]]['vars'] = []
else:
hfb = grps[vt[0]]['h5']
pck = packagename2object(vt[0])
po = pck.getpyobject(vt[1])
if vt[1] in grps[vt[0]]['vars']:
print(vt[1], " already written, skipping")
else:
grps[vt[0]]['vars'].append(vt[1])
d = hfb.create_dataset(vt[1], data=po)
d.attrs['units'] = pck.getvarunit(vt[1])
d.attrs['comment'] = pck.getvardoc(vt[1])
except ValueError as error:
print("HDF5 write failed to ", file, ' var ', vt[1])
print(error)
except:
print("HDF5 write failed to ", file, ' var ', vt[1])
for lvt in addvarlist:
try:
vt = lvt.split('.')
if not vt[0] in grps.keys():
hfb = hf.create_group(vt[0])
grps[vt[0]] = {'h5': hfb}
grps[vt[0]]['vars'] = []
else:
hfb = grps[vt[0]]['h5']
pck = packagename2object(vt[0])
po = pck.getpyobject(vt[1])
if vt[1] in grps[vt[0]]['vars']:
print(vt[1], " already written, skipping")
else:
grps[vt[0]]['vars'].append(vt[1])
d = hfb.create_dataset(vt[1], data=po)
d.attrs['units'] = pck.getvarunit(vt[1])
d.attrs['comment'] = pck.getvardoc(vt[1])
except ValueError as error:
print("HDF5 write failed to ", file, ' var ', vt[1])
print(error)
except:
print("HDF5 write failed to ", file, ' var ', vt[1])
hf.close()
return True
def hdf5_dump(file, packages=list_packages(objects=1), vars=None, globals=None):
"""
Dump all variables from a list of package objects into a file.
Default packages are output of uedge.uedge_lists.list_packages()
vars=[varlist] dump limited to intersection of varlist and packages
"""
try:
hf = h5py.File(file, 'w')
except ValueError as error:
print("Couldn't open hdf5 file ", file)
print(error)
raise
except:
print("Couldn't open hdf5 file ", file)
raise
for p in packages:
hfg = hf.create_group(p.name())
hfg.attrs['time'] = time.time()
hfg.attrs['ctime'] = time.ctime()
hfg.attrs['code'] = 'UEDGE'
hfg.attrs['ver'] = bbb.uedge_ver
try:
hfg.attrs['pyver'] = pyver
except:
pass
for v in list_package_variables(p, vars=vars):
if p.allocated(v):
try:
d = hfg.create_dataset(v, data=p.getpyobject(v))
d.attrs['units'] = p.getvarunit(v)
d.attrs['comment'] = p.getvardoc(v)
except ValueError as error:
print("Couldn't write out: "+p.name()+'.'+v)
print(error)
except:
print("Couldn't write out: "+p.name()+'.'+v)
else:
print(p.name()+'.'+v+" is not allocated")
if globals != None:
hfg = hf.create_group('globals')
hfg.attrs['time'] = time.time()
hfg.attrs['ctime'] = time.ctime()
hfg.attrs['code'] = 'UEDGE'
hfg.attrs['ver'] = bbb.uedge_ver
try:
hfg.attrs['pyver'] = pyver
except:
pass
for v in list(set(globals.keys()) & set(vars)):
try:
d = hfg.create_dataset(v, data=globals[v])
d.attrs['units'] = 'none'
d.attrs['comment'] = 'Global Variable'
except ValueError as error:
print("Couldn't write out: "+p.name()+'.'+v)
print(error)
except:
print("Couldn't write out: "+p.name()+'.'+v)
hf.close()
return True
def h5py_dataset_iterator(g, prefix=''):
for key in g.keys():
item = g[key]
path = '{}/{}'.format(prefix, key)
if isinstance(item, h5py.Dataset): # test for dataset
yield (path, item)
elif isinstance(item, h5py.Group): # test for group (go down)
# following yield is not python 2.7 compatible
# yield from h5py_dataset_iterator(item, path)
for (path, item) in h5py_dataset_iterator(item, path):
yield (path, item)
def hdf5_restore_dump(file, scope=None, hdffile=None,quiet=False):
"""
Restore all variables from a previously saved HDF5 file.
This is called by hdf5_restore and the recommended way
to restore.
"""
if hdffile == None:
try:
hf = h5py.File(file, 'r')
except:
print("Couldn't open hdf5 file ", file)
raise
else:
hf = hdffile
try:
try:
g = hf['bbb']
if not quiet:
prfileattrs = False
print('File attributes:')
print(' written on: ', g.attrs['ctime'])
print(' by code: ', g.attrs['code'])
print(' version: ', np.char.strip(g.attrs['ver']))
print(' physics tag: ', np.char.strip(g.attrs['ver']))
print(' python version: ', np.char.strip(g.attrs['pyver']))
except:
if not quiet:
print('No file attributes, trying to restore')
for (path, dset) in h5py_dataset_iterator(hf):
vt = path.split('/')
if vt[1] == 'globals':
if scope != None:
if dset.size > 1:
scope[vt[2]] = np.array(dset[()])
else:
scope[vt[2]] = dset[()]
else:
try:
pck = packagename2object(vt[1])
po = pck.getpyobject(vt[2])
if dset.size > 1:
po[...] = np.array(dset[()])
else:
setattr(pck, vt[2], dset[()])
except ValueError as error:
print("Couldn't read tes from ", file)
print(error)
except:
print('Couldn\'t read dataset ', path)
raise
except:
print("Couldn't read hdf5 file ", file)
raise
if hdffile == None:
hf.close()
return True
| 11,545 | 33.363095 | 90 | py |
UEDGE | UEDGE-master/pyscripts/filelists.py | # Methods to create a list of files in a directory, and a sub-list containing sspecified suffixes.
import os
import string
def filelist(path):
return os.listdir(path)
def filesublist(path,suffix):
sublist = []
biglist = filelist(path)
for filename in biglist:
filesplit = filename.split(".")
if filesplit[-1] == suffix:
sublist.append(filename)
return sublist
| 404 | 21.5 | 98 | py |
UEDGE | UEDGE-master/pyscripts/convert.py | # A Python translator that will convert files following a set
# of file-suffix-specific substitution rules
# Written by Ron Cohen, February 2007. Last revision March 15, 2007.
# import utilities to create filtered list of files
from filelists import *
import os
import filecmp
"""
create a list of classes to enstantiate which will have conversion rules
for different language files.
Usage:
Create a file "localrules.py" that defines globalsubrules to apply to
all file types and any of cppsubrules, pythonsubrules,fortransubrules,
mpplsubrules,F90subrules that you want to apply to specific file
types. These rules are of the form:
globalsubrules = [("in1","out1"),("in2","out2"),
...] where string in1 will be converted to string out1, etc. It is
an ordered list which will be executed first to last.
Another option is that an entry in globalsubrules can be the name
of a function of one argument (the string being processed).
Note currently, if the ordered list returns "None" (the python None,
not the string "None")for a line,
no further proceessing of that line is done and no line is written.
This can be used to re-order lines, since a function entry used in
globalsubrules can be used to return a list of lines, or "None". But
no further processing is done of the returned list of lines, so any
such re-ordering must be done as the last of the rules..
Note in additon to duples, one can also include in the subrules methods
which take as its sole argument the string the method operates on;
use this for more complicated operations than simple replacements
Also create a local script in the directory where you want to
convert files. Begin that script with "from convert import *",
making sure that convert.py and localrules.py are in your path.
Warning: don't end the local script with the suffix .py or the convertor
will appy your rules to `your local script (assuming you use the default
suffix as the suffxin for class Py).
Further notes
1.If desired, the default suffixes searched to designate files of
a specific type can be edited in your local script. to do so
import convert, edit the suffix for the sub-class (for example,
convert.py.suffix = "newsuffix"), then type "from convert import *".
2.Create sub-classes that inherit from class generic, for any additional
file suffix that will be converted. In each subclass, set suffixin
to a string that gives the suffix of files to be converted, e.g. ".cc"
or ".py". If the processed files are to have a different suffix,
set suffixout to the desired suffix.
3.Append processall.classlist, a list of sub-class names that will
be processed to do the conversion, to add the names of any classes
added in step 2.
Then, to proces files:
1.Execute processall(indir,outdir) to process all the specified file
types. indir and outdir are optional arguments to specify paths
for the directory to be processed and the directory where processed
files will be written. By default indir = ".", the current directory,
and outdir = "./converted".
2.You can also create instances of specific classes created in step 2, and
just process those files. For example a = Py(indir,outdir). See
documentation for class generic to see available methods.
WARNING: If you've run the script before and already created the outdir,
the script will overwrite files in outdir with the same name
3.The script will only process files if the source file is newer than
the target (or the target doesn't exist).
4.You can also use processdirs(dirlist) to run processall in a list of
directory names specified in dirlist (names in quotes).
"""
# Global substitution dictionary, globalsubdict = {"instring1:outstring1",...}
globalsubdict = {}
# subrules of form [("in1","out1"),("in2","out2"), ...]
globalsubrules = []
# substitution rules to be appended to globalsubrules for each language
cppsubrules = []
# e.g. cppsubrules = [ ("hier::Box<NDIM>", "Box"),
# ("hier::IntVector<NDIM>", "IntVect"),
# ("hier::Index<NDIM>", "IntVect") ]
pythonsubrules = []
fortransubrules = []
F90subrules = []
F90_90subrules = []
TupleType = type((0,0))
ListType = type([0,0])
StringType = type("fubar")
MPPLsubrules = []
try:
from localrules import *
except:
print("No file or problem with 'localrules.py'; proceeding with default rules")
def fnconvert(name,suffixout):
# Converts a file name "name" by substituting suffixout for the
# existing suffix, or if there is no suffix, appending suffixout
suffixin = name.split(".")[-1]
if (suffixin != name):
nameout = name.rstrip(suffixin)+suffixout
# this coding makes sure only last occurence of suffix is repalced
else:
nameout = name+"."+suffixout
return nameout
from stat import ST_MTIME
from numpy import greater
def newer(file0,file1):
# returns 1 if file0 is newer than file1, 0 otherwise
# if file0 does not exist, raise a standard python error
# if file1 does not exist, return 2
time0 = os.stat(file0)[ST_MTIME]
try:
time1 = os.stat(file1)[ST_MTIME]
except:
return 2
return greater(time0,time1)
class processdirs:
# Process a list of directories to do conversions with processall
# The default list of directories is ".", the current directory
# Establishes a set of subdirectories with processed files
# in a root directory whose default is called "converted" and
# is parallel to ".".
def __init__(self,indirs = ["."],outroot = "../converted",clean=None):
# create the output root directory if it needs to be
curpath = os.getenv("PWD")
try:
os.mkdir(outroot)
print("Creating directory "+outroot)
except:
try:
os.chdir(outroot)
print("Output directory already exists; proceeding")
os.chdir(curpath)
except:
raise "Can't create output directory"
for direc in indirs:
print("Entering directory "+direc)
if direc == ".":
curdir = curpath.split("/")[-1]
outdir = outroot+"/"+curdir
else:
outdir = outroot + "/"+direc
processall(direc,outdir,clean)
class processall:
# Process all files in a directory with rules according to file type
# as designated by suffix.
# List of classes of distict file types
classlist = ["Py","Cpp","Cpp_h","Cpp_hh","Fortran","F90","F90_90","MPPL"]
def __init__(self,indir=".",outdir="./converted",clean=None):
for entry in self.classlist:
a=eval(entry+"("+repr(indir)+","+repr(outdir)+",clean="+repr(clean)+")")
# Only process file types with non-empty substition rules
if (a.subrules != []):
print("processing for file type "+entry)
a.process()
class generic:
"""
generic class for replacing strings in a series of files.
Methods:
process(): processes the list of files in the directory indir with
suffix suffixin, and for each one creates a new file in directory
outdir with the same root name appended by
the string suffixout (which by default is suffixin).
processfile(filename): processes a specific file
Notes:
indir is string with path. By default it is ".", the current directory
outdir is by default "./converted". If the output directory does not
exist it will be created. Can be overwritten after instance created
"""
suffixin = ""
suffixout = ""
subrules = globalsubrules
def __init__(self,indir=".",outdir="./converted",clean=None):
self.indir = indir
self.outdir = outdir
self.clean=clean
self.doclean = 0 # default is no removal of duplicate files
if (self.suffixout == ""):
self.suffixout = self.suffixin
# create the output directory if it hasn't been created
try:
os.mkdir(self.outdir)
except:
pass
def process(self):
# get the list of files to process
self.filelist = filesublist(self.indir,self.suffixin)
print( "processing directory" + self.indir+ "to directory" + self.outdir)
# exclude convert.py, localrules.py and filelists.py from filelist
if (self.suffixin == "py"):
try:
self.filelist.remove("convert.py")
except:
pass
try:
self.filelist.remove("localrules.py")
except:
pass
try:
self.filelist.remove("filelists.py")
except:
pass
# Set a flag to remove unchanged files if input and output directories are different
if self.clean and (self.outdir != self.indir):
# if the input and output directories are different, eliminate files
# from the output directory that duplicate those in the input directory.
self.doclean = 1
else:
self.doclean = 0
# Now process files remaining
for file in self.filelist:
self.processfile(file)
def processfile(self,filename):
# Convert an individual file
self.infile=self.indir+"/"+filename
if (self.suffixout == self.suffixin):
outfilename=filename
else:
outfilename=fnconvert(filename,self.suffixout)
self.outfile = self.outdir+"/"+outfilename
if (newer(self.infile,self.outfile)):
# Only process infile if it is newer than outfile
print("converting file "+self.infile+" to "+self.outfile)
f=open(self.infile,"r")
g=open(self.outfile,"w")
lines=f.readlines()
iline = 0 # just a diagnostic, counts lines
for line in lines:
iline = iline + 1
for rule in self.subrules:
if (line != None):
if type(rule)==TupleType or type(rule)==ListType:
line = line.replace(rule[0],rule[1])
else:
# if it's not a tuple or list, assume it is a method
# that executes something
# more complicated than a simple replace
line = rule(line)
if (line != None):
if (type(line) == StringType):
g.write(line)
if (type(line) == ListType):
g.writelines(line)
g.close()
f.close()
if self.doclean == 1:
if filecmp.cmp(self.outfile,self.infile) and self.outfile != self.infile:
print(self.outfile + " is the same as " + self.infile + ", removing")
os.remove(self.outfile)
class Py(generic):
suffixin="py"
subrules = globalsubrules + pythonsubrules
class Cpp(generic):
suffixin = "C"
subrules = globalsubrules + cppsubrules
class Cpp_h(Cpp):
suffixin = "h"
class Cpp_hh(Cpp):
suffixin = "hh"
class Fortran(generic):
suffixin = "f"
subrules = globalsubrules + fortransubrules
class F90(generic):
suffixin = "F"
subrules = globalsubrules + F90subrules
class F90_90(F90):
suffixin = "F90"
class MPPL(generic):
suffixin = "m"
subrules = globalsubrules + MPPLsubrules
| 11,542 | 37.734899 | 92 | py |
UEDGE | UEDGE-master/pyscripts/double.py | from uedge import *
def uedouble():
com.nxleg=2*com.nxleg
com.nxcore=2*com.nxcore
com.nycore=2*com.nycore
com.nysol=2*com.nysol
if com.nxomit > 0:
if com.geometry=="dnbot":
com.nxomit = com.nxleg[0,0]+com.nxcore[0,0] + 2*com.nxxptxx + 1
else:
com.nxomit=2*(com.nxomit-2*com.nxxptxx) + 2*com.nxxpt # assumes com.nxomit removes 1/2 SOL
if com.nyomitmx == 1: com.nysol = 1
if grd.kxmesh == 4:
dxgasold=grd.dxgas
alfxold=grd.alfx
grd.alfx=alfxold/2.
grd.dxgas=dxgasold*(exp(grd.alfx)-1)/(exp(alfxold)-1)
grd.nxgas=2*grd.nxgas
bbb.restart=1
bbb.newgeo=1
bbb.gengrid=1
bbb.isnintp=1
grd.ixdstar = com.nxcore[0,1]+1
| 739 | 28.6 | 103 | py |
UEDGE | UEDGE-master/pyscripts/__init__.py | from .uedge import *
from os import path
from pathlib import Path
try:
from uedge.__version__ import __version__
from uedge.__src__ import __src__
import uedge.checkver
except:
try:
from __version__ import __version__
from __src__ import __src__
import checkver
except:
__version__ = 'unknown'
__src__ = 'unknown'
#
# Load the startup file .uedgerc.py from cwd or home.
#
_homepath = path.expanduser('~')
_homefile = Path('{}/.uedgerc.py'.format(_homepath))
_localpath = path.expanduser('.')
_localfile = Path('{}/.uedgerc.py'.format(_localpath))
if path.exists(_localfile):
with open(_localfile) as f:
exec(open(_localfile).read())
elif path.exists(_homefile):
with open(_homefile) as f:
exec(open(_homefile).read())
| 805 | 24.1875 | 54 | py |
UEDGE | UEDGE-master/pyscripts/__src__.py | __src__ = '/Users/meyer8/gitstuff/UEDGE'
| 41 | 20 | 40 | py |
UEDGE | UEDGE-master/pyscripts/cdf4.py |
import numpy as np
import netCDF4 as nc
from .uedge import bbb
def cdf4_restore(file):
"""
Read a cdf4 file previously written from Uedge. This reads the file and puts
the 6 standard variables into the correct format.
"""
try:
cf = nc.Dataset(file)
except:
print("Couldn't open cdf4 file ",file)
return
try:
bbb.ngs[...] = np.array(cf.variables['ngs'])
except:
print("Couldn't read ngs from ",file)
try:
bbb.nis[...] = np.array(cf.variables['nis'])
except:
print("Couldn't read nis from ",file)
try:
bbb.phis[...] = np.array(cf.variables['phis'])
except:
print("Couldn't read phis from ",file)
try:
bbb.tes[...] = np.array(cf.variables['tes'])
except:
print("Couldn't read tes from ",file)
try:
bbb.tis[...] = np.array(cf.variables['tis'])
except:
print("Couldn't read tis from ",file)
try:
bbb.ups[...] = np.array(cf.variables['ups'])
except:
print("Couldn't read ups from ",file)
try:
bbb.tgs[...] = np.array(cf.variables['tgs'])
except:
print("Couldn't read tgs from ",file)
cf.close()
def cdf4_save(file):
"""
Write the 6 standard variables into an cdf4 file.
"""
try:
hf = h5py.File(file,'w')
hfg = hf.create_group('bbb')
except:
print("Couldn't open cdf4 file ",file)
try:
hfg.create_dataset('ngs',data=bbb.ngs)
except:
print("Couldn't write ngs to ",file)
try:
hfg.create_dataset('nis',data=bbb.nis)
except:
print("Couldn't write nis to ",file)
try:
hfg.create_dataset('phis',data=bbb.phis)
except:
print("Couldn't write phis to ",file)
try:
hfg.create_dataset('tes',data=bbb.tes)
except:
print("Couldn't write tes to ",file)
try:
hfg.create_dataset('tis',data=bbb.tis)
except:
print("Couldn't write tis to ",file)
try:
hfg.create_dataset('ups',data=bbb.ups)
except:
print("Couldn't write ups to ",file)
try:
hfg.create_dataset('tgs',data=bbb.tgs)
except:
print("Couldn't write ups to ",file)
hf.close()
| 2,293 | 22.649485 | 84 | py |
UEDGE | UEDGE-master/pyscripts/sources.py |
#
# Bill Meyer - 7/24/2019
# meyer8@llnl.gov
#
#
from __future__ import print_function
import sys
def sources():
"""
This routine simply dumps all the modules as a sorted list along
with the "__file__" attribute. Prints "unknown" for those that don't
have this attribute. This is just for debug. When a user reports a
problem it may be useful to have them run this to make sure they are
getting the modules from expected places.
"""
for m in sorted(sys.modules.keys()):
t = sys.modules[m]
try:
f = t.__file__
except:
f = 'unknown'
finally:
print(m,'\t--\t',f)
| 670 | 20.645161 | 75 | py |
UEDGE | UEDGE-master/pyscripts/pdb2h5.py | #!/usr/bin/env python
# @description This script will convert a pdb data file into an hdf5
# data file
#
# @keywords Py-UEDGE
#
# @contact John Cary
#
# @version $id: $
# import sys for system commands
import sys, os
# Import uefacets for interface convenience
from . import uefacets
# import getopts for cmd line parsing
import getopt
# import uedge and all its structures
from .uedge import *
def usage(code):
print("pdb2h5 [-h] -i <infile> -o <outfile> [-p <paramfile>]")
print(" -h: Print this help")
sys.exit(code)
try:
olst, _ = getopt.getopt(sys.argv[1:], "hi:o:p:")
except:
usage(1)
for o in olst:
if o[0] == "-h":
usage(0)
if o[0] == "-i":
infile = o[1]
if o[0] == "-o":
outfile = o[1]
if o[0] == "-p":
prmfile = o[1]
# initialize the uedge object
uefacets.init()
ue = uefacets.Uedge()
try:
ue.readParams(prmfile)
except NameError as _:
print("No parameter file specified, continuing...")
ue.buildData()
ue.restore(infile)
ue.dump(outfile)
uefacets.final()
sys.exit(0)
| 1,094 | 16.380952 | 68 | py |
UEDGE | UEDGE-master/pyscripts/rdcontdt.py | # This file runs a time-dependent case using dtreal. First, obtain a converged
# solution for a (usually small) dtreal; xuedge must report iterm=1 at the end.
# Then adjust control parameters in rdinitdt; read this file, which reads rdinitdt.
# If a mistake is made, to restart this file without a Jacobian evaluation,
# be sure to reset iterm=1 (=> last step was successful)
# IMPORT UEDGE (assuming starting from ipython before any imports)
from .uedge import *
from .ruthere import *
from .uexec import *
from numpy import zeros
# IMPORT HDF5 routines for saving solutions below
from .hdf5 import *
# INITIALIZE PARAMS -- SHOULD BE DONE IN MASTER SCRIPT OR TERMINAL SESSION
# BEFORE INVOKING THIS SCRIPT
uexec("uedge.rdinitdt",returns=globals())
no = 0;yes = 1
echo = no
# Set precisions of floating point output
###import print_options
###print_options.set_float_precision(4)
# Check if successful time-step exists (bbb.iterm=1)
if (bbb.iterm == 1):
print("Initial successful time-step exists")
bbb.dtreal = bbb.dtreal*bbb.mult_dt #compensates dtreal divided by mult_dt below
else:
print("*---------------------------------------------------------*")
print("Need to take initial step with Jacobian; trying to do here")
print("*---------------------------------------------------------*")
bbb.icntnunk = 0
bbb.exmain()
ruthere()
bbb.dtreal = bbb.dtreal*bbb.mult_dt #compensates dtreal divided by mult_dt below
if (bbb.iterm != 1):
print("*--------------------------------------------------------------*")
print("Error: converge an initial time-step first; then retry rdcontdt")
print("*--------------------------------------------------------------*")
exit()
nx=com.nx;ny=com.ny;nisp=com.nisp;ngsp=com.ngsp;numvar=bbb.numvar
isteon=bbb.isteon
if (i_stor==0):
ni_stor = zeros((bbb.n_stor,nx+1+1,ny+1+1,nisp),"d") # set time storage arrays
up_stor = zeros((bbb.n_stor,nx+1+1,ny+1+1,nisp),"d")
te_stor = zeros((bbb.n_stor,nx+1+1,ny+1+1),"d")
ti_stor = zeros((bbb.n_stor,nx+1+1,ny+1+1),"d")
ng_stor = zeros((bbb.n_stor,nx+1+1,ny+1+1,ngsp),"d")
tg_stor = zeros((bbb.n_stor,nx+1+1,ny+1+1,ngsp),"d")
phi_stor = zeros((bbb.n_stor,nx+1+1,ny+1+1),"d")
tim_stor = zeros((bbb.n_stor),"d")
dtreal_stor = zeros((bbb.n_stor),"d")
nfe_stor = zeros((bbb.n_stor),"l")
dt_stor = (bbb.tstor_e - bbb.tstor_s)/(bbb.n_stor - 1)
i_stor = max(i_stor,1) # set counter for storage arrays
bbb.dt_tot = max(bbb.dt_tot,0.)
nfe_tot = max(nfe_tot,0)
deldt_0 = bbb.deldt
isdtsf_sav = bbb.isdtsfscal
if (bbb.ipt==1 and bbb.isteon==1): # set ipt to te(nx,iysptrx+1) if no user value
ipt = bbb.idxte[nx-1,com.iysptrx] #note: ipt is local, bbb.ipt global
bbb.irev = -1 # forces second branch of irev in ii1 loop below
if (bbb.iterm == 1): # successful initial run with dtreal
bbb.dtreal = bbb.dtreal/bbb.mult_dt # gives same dtreal after irev loop
else: # unsuccessful initial run; reduce dtreal
bbb.dtreal = bbb.dtreal/(3*bbb.mult_dt) # causes dt=dt/mult_dt after irev loop
if (bbb.initjac == 0): bbb.newgeo=0
dtreal_sav = bbb.dtreal
bbb.itermx = bbb.itermxrdc
bbb.dtreal = bbb.dtreal/bbb.mult_dt #adjust for mult. to follow; mult_dt in rdinitdt
bbb.dtphi = bbb.rdtphidtr*bbb.dtreal
neq=bbb.neq
svrpkg=bbb.svrpkg.tostring().strip()
#
bbb.ylodt = bbb.yl
bbb.pandf1 (-1, -1, 0, bbb.neq, 1., bbb.yl, bbb.yldot)
fnrm_old = sqrt(sum((bbb.yldot[0:neq]*bbb.sfscal[0:neq])**2))
if (bbb.initjac == 1): fnrm_old=1.e20
print("initial fnrm =",fnrm_old)
for ii1 in range( 1, bbb.ii1max+1):
if (bbb.ismfnkauto==1): bbb.mfnksol = 3
# adjust the time-step
if (bbb.irev == 0):
# Only used after a dt reduc. success. completes loop ii2 for fixed dt
bbb.dtreal = min(3*bbb.dtreal,bbb.t_stop) #first move forward after reduction
bbb.dtphi = bbb.rdtphidtr*bbb.dtreal
if (bbb.ismfnkauto==1 and bbb.dtreal > bbb.dtmfnk3): bbb.mfnksol = -3
bbb.deldt = 3*bbb.deldt
else:
# either increase or decrease dtreal; depends on mult_dt
bbb.dtreal = min(bbb.mult_dt*bbb.dtreal,bbb.t_stop)
bbb.dtphi = bbb.rdtphidtr*bbb.dtreal
if (bbb.ismfnkauto==1 and bbb.dtreal > bbb.dtmfnk3): bbb.mfnksol = -3
bbb.deldt = bbb.mult_dt*bbb.deldt
bbb.dtreal = min(bbb.dtreal,bbb.dt_max)
bbb.dtphi = bbb.rdtphidtr*bbb.dtreal
if (bbb.ismfnkauto==1 and bbb.dtreal > bbb.dtmfnk3): bbb.mfnksol = -3
bbb.deldt = min(bbb.deldt,deldt_0)
bbb.deldt = max(bbb.deldt,bbb.deldt_min)
nsteps_nk=1
print('--------------------------------------------------------------------')
print('--------------------------------------------------------------------')
print(' ')
print('*** Number time-step changes = ',ii1,' New time-step = ', bbb.dtreal)
print('--------------------------------------------------------------------')
bbb.itermx = bbb.itermxrdc
if (ii1>1 or bbb.initjac==1): # first time calc Jac if initjac=1
if (bbb.irev == 1): # decrease in bbb.dtreal
if (bbb.numrev < bbb.numrevjmax and \
bbb.numrfcum < bbb.numrevjmax+bbb.numfwdjmax): #dont recom bbb.jac
bbb.icntnunk = 1
bbb.numrfcum = bbb.numrfcum + 1
else: # force bbb.jac calc, reset numrev
bbb.icntnunk = 0
bbb.numrev = -1 # yields api.zero in next statement
bbb.numrfcum = 0
bbb.numrev = bbb.numrev + 1
bbb.numfwd = 0
else: # increase in bbb.dtreal
if (bbb.numfwd < bbb.numfwdjmax and \
bbb.numrfcum < bbb.numrevjmax+bbb.numfwdjmax): #dont recomp bbb.jac
bbb.icntnunk = 1
bbb.numrfcum = bbb.numrfcum + 1
else:
bbb.icntnunk = 0 #recompute jacobian for increase dt
bbb.numfwd = -1
bbb.numrfcum = 0
bbb.numfwd = bbb.numfwd + 1
bbb.numrev = 0 #bbb.restart counter for dt reversals
bbb.isdtsfscal = isdtsf_sav
bbb.ftol = min(bbb.ftol_dt, 0.01*fnrm_old)
bbb.ftol = max(bbb.ftol, bbb.ftol_min)
exmain() # take a single step at the present bbb.dtreal
ruthere()
if (bbb.iterm == 1):
bbb.dt_tot = bbb.dt_tot + bbb.dtreal
nfe_tot = nfe_tot + bbb.nfe[0,0]
bbb.ylodt = bbb.yl
bbb.pandf1 (-1, -1, 0, bbb.neq, 1., bbb.yl, bbb.yldot)
fnrm_old = sqrt(sum((bbb.yldot[0:neq-1]*bbb.sfscal[0:neq-1])**2))
if (bbb.dt_tot>=0.9999999*bbb.t_stop or fnrm_old<bbb.ftol_min):
print(' ')
print('*****************************************************')
print('** SUCCESS: frnm < bbb.ftol; or dt_tot >= t_stop **')
print('*****************************************************')
break
bbb.icntnunk = 1
bbb.isdtsfscal = 0
for ii2 in range( 1, bbb.ii2max+1): #take ii2max steps at the present time-step
if (bbb.iterm == 1):
bbb.itermx = bbb.itermxrdc
bbb.ftol = min(bbb.ftol_dt, 0.01*fnrm_old)
bbb.ftol = max(bbb.ftol, bbb.ftol_min)
bbb.exmain()
ruthere()
if (bbb.iterm == 1):
bbb.ylodt = bbb.yl
bbb.pandf1 (-1, -1, 0, bbb.neq, 1., bbb.yl, bbb.yldot)
fnrm_old = sqrt(sum((bbb.yldot[0:neq-1]*bbb.sfscal[0:neq-1])**2))
print("Total time = ",bbb.dt_tot,"; Timestep = ",bbb.dtreal)
print("variable index ipt = ",ipt, " bbb.yl[ipt] = ",bbb.yl[ipt])
dtreal_sav = bbb.dtreal
bbb.dt_tot = bbb.dt_tot + bbb.dtreal
nfe_tot = nfe_tot + bbb.nfe[0,0]
hdf5_save(savefn)
if (bbb.dt_tot>=0.999999999999*bbb.t_stop or fnrm_old<bbb.ftol_min):
print(' ')
print('*****************************************************')
print('** SUCCESS: frnm < bbb.ftol; or dt_tot >= t_stop **')
print('*****************************************************')
break
print(" ")
## Store variables if a storage time has been crossed
if (bbb.dt_tot >= dt_stor*i_stor and i_stor<=bbb.n_stor):
i_stor1 = i_stor-1
ni_stor[i_stor1,:,:,:] = ni
up_stor[i_stor1,:,:,:] = up
te_stor[i_stor1,:,:] = te
ti_stor1[i_stor1,:,:] = ti
ng_stor[i_stor1,:,:,:] = ng
phi_stor1[i_stor1,:,:] = phi
tim_stor[i_stor1] = bbb.dt_tot
nfe_stor[i_stor1] = nfe_tot
dtreal_stor[i_stor1] = bbb.dtreal
i_stor = i_stor + 1
## End of storage section
if (bbb.dt_tot>=bbb.t_stop or fnrm_old<bbb.ftol_min): break # need for both loops
bbb.irev = bbb.irev-1
if (bbb.iterm != 1): #print bad eqn, cut dtreal by 3, set irev flag
####### a copy of idtroub script ########################
oldecho=echo
echo=no
# integer ii
# real8 ydmax
scalfac = bbb.sfscal
if (svrpkg != "nksol"): scalfac = 1/(bbb.yl + 1.e-30) # for time-dep calc.
ydmax = 0.999999999*max(abs(bbb.yldot*scalfac))
itrouble = 0
for ii in range(neq):
if (abs(bbb.yldot[ii]*scalfac[ii]) > ydmax):
itrouble=ii
print("** Fortran index of trouble making equation is:")
print(itrouble+1)
break
print("** Number of variables is:")
print("numvar = ", numvar)
print(" ")
iv_t = (itrouble).__mod__(numvar) + 1
print("** Troublemaker equation is:")
print("iv_t = ",iv_t)
print(" ")
print("** Troublemaker cell (ix,iy) is:")
print(bbb.igyl[itrouble,])
print(" ")
print("** Timestep for troublemaker equation:")
print(bbb.dtuse[itrouble])
print(" ")
print("** yl for troublemaker equation:")
print(bbb.yl[itrouble])
print(" ")
echo=oldecho
######## end of idtroub script ##############################
if (bbb.dtreal < bbb.dt_kill):
print(' ')
print('*************************************')
print('** FAILURE: time-step < dt_kill **')
print('*************************************')
break
bbb.irev = 1
print('*** Converg. fails for bbb.dtreal; reduce time-step by 3, try again')
print('----------------------------------------------------------------- ')
bbb.dtreal = bbb.dtreal/(3*bbb.mult_dt)
bbb.dtphi = bbb.rdtphidtr*bbb.dtreal
if (bbb.ismfnkauto==1 and bbb.dtreal > bbb.dtmfnk3): bbb.mfnksol = -3
bbb.deldt = bbb.deldt/(3*bbb.mult_dt)
bbb.iterm = 1
echo = yes
| 10,702 | 41.472222 | 88 | py |
UEDGE | UEDGE-master/test/Forthon_cases/Forthon_case3/rd_forthon_case3.py | #Typical mesh (nx=64, ny=32) for DIII-D MHD equilibrium
#Uses inertial neutrals, so six variables (ni,upi,Te,Ti,ng,upg)
#
##package flx;package grd;package bbb
# Initialize pyuedge
from uedge import *
# Set the geometry
bbb.mhdgeo = 1 #use MHD equilibrium
os.system('rm -f aeqdsk neqdsk') #change names of MHD eqil. files
os.system('cp aeqdskd3d aeqdsk') # (Cannot tab or indent these 3 lines)
os.system('cp neqdskd3d neqdsk')
flx.psi0min1 = 0.98 #normalized flux on core bndry
flx.psi0min2 = 0.98 #normalized flux on pf bndry
flx.psi0sep = 1.00001 #normalized flux at separatrix
flx.psi0max = 1.07 #normalized flux on outer wall bndry
flx.alfcy = 3.
bbb.ngrid = 1 #number of meshes (always set to 1)
com.nxleg[0,0] = 16 #pol. mesh pts from inner plate to x-point
com.nxcore[0,0] = 16 #pol. mesh pts from x-point to top on inside
com.nxcore[0,1] = 16 #pol. mesh pts from top to x-point on outside
com.nxleg[0,1] = 16 #pol. mesh pts from x-point to outer plate
com.nysol[0] = 24 #rad. mesh pts in SOL
com.nycore[0] =8 #rad. mesh pts in core
# Finite-difference algorithms (upwind, central diff, etc.)
bbb.methn = 33 #ion continuty eqn
bbb.methu = 33 #ion parallel momentum eqn
bbb.methe = 33 #electron energy eqn
bbb.methi = 33 #ion energy eqn
bbb.methg = 33 #neutral gas continuity eqn
# Boundary conditions
bbb.ncore[0] = 4.0e19 #hydrogen ion density on core
bbb.isnwcono = 3 #=3; use lyne
bbb.lyni = 0.05
## iflcore = 0 #flag; =0, fixed Te,i; =1, fixed power on core
bbb.tcoree = 300. #core Te
bbb.tcorei = 300. #core Ti
bbb.istewc = 3 #=3 for gradient-length = lyte
bbb.istiwc = 3 #=3 for gradient-length = lyti
bbb.lyte = 0.05
bbb.lyti = 0.05
bbb.recycp[0] = 1.0 #hydrogen recycling coeff at plates
bbb.recycw[0] = 1.0
bbb.nwsor = 1
bbb.matwso[0] = 1
# Transport coefficients
bbb.difni[0] = 0.3 #D for radial hydrogen diffusion
bbb.kye = 0.5 #chi_e for radial elec energy diffusion
bbb.kyi = 0.5 #chi_i for radial ion energy diffusion
bbb.travis[0] = 1. #eta_a for radial ion momentum diffusion
# Flux limits
bbb.flalfe = 0.21 #electron parallel thermal conduct. coeff
bbb.flalfi = 0.21 #ion parallel thermal conduct. coeff
bbb.flalfv = 0.5 #ion parallel viscosity coeff
bbb.flalfgx = 1. #neut. gas in poloidal direction
bbb.flalfgy = 1. #neut. gas in radial direction
bbb.lgmax = 0.1
bbb.lgtmax = 0.1
bbb.lgvmax = 0.1
# Solver package
bbb.svrpkg = "nksol" #Newton solver using Krylov method
bbb.premeth = "ilut" #Solution method for precond. Jacobian matrix
# Neutral parallel momentum eqn
bbb.isupgon[0] = 1
bbb.isngon[0] = 0
com.ngsp = 1
com.nhsp = 2
bbb.ziin[com.nhsp-1] = 0
bbb.cfnidh = 0. #coeff. heating from charge-exchange
# Restart from a pfb savefile
bbb.restart = 1 #Begin from savefile, not estimated profiles
bbb.allocate() #allocate space for savevariables
restore('pfd3d_ex_upg.64x32m') #read in the solution from pfb file
###os.system('ln -s ~/Uedge/uedge/in/aph aph6')
aph.isaphdir = 0 #=0 if atomic data file in run directory
com.istabon = 10
# Execute uedge
bbb.exmain()
# Print out a few variables across outer midplane
print''
print'*** Printing variables versus radial index at outer midplane'
print''
print '************\nradial position relative to separatrix [m]'
print(com.yyc)
print '************\n ion density, ni [m**-3] = '
print(bbb.ni[bbb.ixmp,])
print '************\n parallel ion velocity, up [m/s] = '
print(bbb.up[bbb.ixmp,])
print '************\n electron temp, te [eV] = '
print(bbb.te[bbb.ixmp,]/bbb.ev)
print '************\n ion temp, ti [eV] = '
print(bbb.ti[bbb.ixmp,]/bbb.ev)
print '************\n gas density, ng [m**-3] = '
print(bbb.ng[bbb.ixmp,])
| 3,833 | 34.831776 | 74 | py |
UEDGE | UEDGE-master/test/Forthon_cases/Forthon_case2/rd_forthon_case2.py | #Coarse mesh (nx=16, ny=8) for DIII-D MHD equilibrium
#Uses diffusive neutrals, so five variables (ni,upi,Te,Ti,ng)
#
##package flx;package grd;package bbb # Initialize pyuedge
from uedge import *
#from uefacets import *
from uedge.pdb_restore import *
from uedge.hdf5 import *
#bbb.uedge_petscInit()
# Set the geometry
bbb.mhdgeo = 1 #use MHD equilibrium
os.system('rm -f aeqdsk neqdsk') #change names of MHD eqil. files
os.system('cp aeqdskd3d aeqdsk') # (Cannot tab or indent these 3 lines)
os.system('cp neqdskd3d neqdsk')
flx.psi0min1 = 0.98 #normalized flux on core bndry
flx.psi0min2 = 0.98 #normalized flux on pf bndry
flx.psi0sep = 1.00001 #normalized flux at separatrix
flx.psi0max = 1.07 #normalized flux on outer wall bndry
bbb.ngrid = 1 #number of meshes (always set to 1)
com.nxleg[0,0] = 4 #pol. mesh pts from inner plate to x-point
com.nxcore[0,0] = 4 #pol. mesh pts from x-point to top on inside
com.nxcore[0,1] = 4 #pol. mesh pts from top to x-point on outside
com.nxleg[0,1] = 4 #pol. mesh pts from x-point to outer plate
com.nysol[0] = 6 #rad. mesh pts in SOL
com.nycore[0] =2 #rad. mesh pts in core
# Finite-difference algorithms (upwind, central diff, etc.)
bbb.methn = 33 #ion continuty eqn
bbb.methu = 33 #ion parallel momentum eqn
bbb.methe = 33 #electron energy eqn
bbb.methi = 33 #ion energy eqn
bbb.methg = 33 #neutral gas continuity eqn
# Boundary conditions
bbb.ncore[0] = 2.5e19 #hydrogen ion density on core
## iflcore = 0 #flag; =0, fixed Te,i; =1, fixed power on core
bbb.tcoree = 100. #core Te
bbb.tcorei = 100. #core Ti
bbb.tedge = 2. #fixed wall,pf Te,i if istewcon=1, etc
bbb.recycp[0] = 0.8 #hydrogen recycling coeff at plates
# Transport coefficients
bbb.difni[0] = 1. #D for radial hydrogen diffusion
bbb.kye = 1. #chi_e for radial elec energy diffusion
bbb.kyi = 1. #chi_i for radial ion energy diffusion
bbb.travis[0] = 1. #eta_a for radial ion momentum diffusion
# Flux limits
bbb.flalfe = 0.21 #electron parallel thermal conduct. coeff
bbb.flalfi = 0.21 #ion parallel thermal conduct. coeff
bbb.flalfv = 1. #ion parallel viscosity coeff
bbb.flalfgx = 1.e20 #neut. gas in poloidal direction
bbb.flalfgy = 1.e20 #neut. gas in radial direction
# Solver package
bbb.svrpkg = "nksol" #Newton solver using Krylov method
#bbb.svrpkg = "petsc" #Newton solver using Krylov method
bbb.premeth = "banded" #Solution method for precond. Jacobian matrix
# Restart from a pfb savefile
bbb.restart = 1 #Begin from savefile, not estimated profiles
bbb.allocate() #allocate space for savevariables
bbb.nis.shape
#pdb_restore('./pfd3d_ex.16x8') #read in the solution from pfb file
hdf5_restore('./h5d3d_ex.16x8')
#hdf5_restore('./testhdf5')
#hdf5_save('testhdf5')
#restore('pfd3d_ex.16x8') #read in the solution from pfb file
os.system('ln -s ~/Uedge/uedge/in/aph aph6')
com.istabon = 10
aph.isaphdir = 0 #=0 specifies atomic data file is in run directory
# Execute uedge
bbb.exmain()
# Print out a few variables across outer midplane
print''
print'*** Printing variables versus radial index at outer midplane'
print''
print '************\nradial position relative to separatrix [m]'
print(com.yyc)
print '************\n ion density, ni [m**-3] = '
print(bbb.ni[bbb.ixmp,])
print '************\n parallel ion velocity, up [m/s] = '
print(bbb.up[bbb.ixmp,])
print '************\n electron temp, te [eV] = '
print(bbb.te[bbb.ixmp,]/bbb.ev)
print '************\n ion temp, ti [eV] = '
print(bbb.ti[bbb.ixmp,]/bbb.ev)
| 3,496 | 36.202128 | 74 | py |
UEDGE | UEDGE-master/test/Forthon_cases/Forthon_case6/rdtimedpd_1.py | #Coarse mesh (nx=16, ny=8) for DIII-D MHD equilibrium
#Uses diffusive neutrals, so five variables (ni,upi,Te,Ti,ng)
#
##package flx;package grd;package bbb
# Initialize pyuedge
from uedge import *
# Set the geometry
bbb.mhdgeo = 1 #use MHD equilibrium
os.system('rm -f aeqdsk neqdsk') #change names of MHD eqil. files
os.system('cp aeqdskd3d aeqdsk') # (Cannot tab or indent these 3 lines)
os.system('cp neqdskd3d neqdsk')
flx.psi0min1 = 0.98 #normalized flux on core bndry
flx.psi0min2 = 0.98 #normalized flux on pf bndry
flx.psi0sep = 1.00001 #normalized flux at separatrix
flx.psi0max = 1.07 #normalized flux on outer wall bndry
bbb.ngrid = 1 #number of meshes (always set to 1)
com.nxleg[0,0] = 4 #pol. mesh pts from inner plate to x-point
com.nxcore[0,0] = 4 #pol. mesh pts from x-point to top on inside
com.nxcore[0,1] = 4 #pol. mesh pts from top to x-point on outside
com.nxleg[0,1] = 4 #pol. mesh pts from x-point to outer plate
com.nysol[0] = 6 #rad. mesh pts in SOL
com.nycore[0] =2 #rad. mesh pts in core
# Finite-difference algorithms (upwind, central diff, etc.)
bbb.methn = 33 #ion continuty eqn
bbb.methu = 33 #ion parallel momentum eqn
bbb.methe = 33 #electron energy eqn
bbb.methi = 33 #ion energy eqn
bbb.methg = 33 #neutral gas continuity eqn
# Boundary conditions
bbb.ncore[0] = 2.5e19 #hydrogen ion density on core
## iflcore = 0 #flag; =0, fixed Te,i; =1, fixed power on core
bbb.tcoree = 100. #core Te
bbb.tcorei = 100. #core Ti
bbb.tedge = 2. #fixed wall,pf Te,i if istewcon=1, etc
bbb.recycp[0] = 0.8 #hydrogen recycling coeff at plates
# Transport coefficients
bbb.difni[0] = 1. #D for radial hydrogen diffusion
bbb.kye = 1. #chi_e for radial elec energy diffusion
bbb.kyi = 1. #chi_i for radial ion energy diffusion
bbb.travis[0] = 1. #eta_a for radial ion momentum diffusion
# Flux limits
bbb.flalfe = 0.21 #electron parallel thermal conduct. coeff
bbb.flalfi = 0.21 #ion parallel thermal conduct. coeff
bbb.flalfv = 1. #ion parallel viscosity coeff
bbb.flalfgx = 1.e20 #neut. gas in poloidal direction
bbb.flalfgy = 1.e20 #neut. gas in radial direction
# Solver package
bbb.svrpkg = "nksol" #Newton solver using Krylov method
bbb.premeth = "banded" #Solution method for precond. Jacobian matrix
# Restart from a pfb savefile
bbb.restart = 1 #Begin from savefile, not estimated profiles
bbb.allocate() #allocate space for savevariables
restore('pfd3d_ex.16x8') #read in the solution from pfb file
###os.system('ln -s ~/Uedge/uedge/in/aph aph6')
com.istabon = 10
aph.isaphdir = 0 #=0 specifies atomic data file is in run directory
# Set initial time-step and execute uedge
bbb.dtreal=1e-4
bbb.exmain()
# Read the initialization file for time-dependent parameters, and run to S.S.
execfile("rdinitdt.py")
execfile("rdcontdt.py")
| 2,813 | 35.545455 | 77 | py |
UEDGE | UEDGE-master/test/Forthon_cases/Forthon_case6/rdtimedpd_2.py | #Coarse mesh (nx=16, ny=8) for DIII-D MHD equilibrium
#Uses diffusive neutrals, so five variables (ni,upi,Te,Ti,ng)
#
##package flx;package grd;package bbb
# Initialize pyuedge
from uedge import *
# Set the geometry
bbb.mhdgeo = 1 #use MHD equilibrium
os.system('rm -f aeqdsk neqdsk') #change names of MHD eqil. files
os.system('cp aeqdskd3d aeqdsk') # (Cannot tab or indent these 3 lines)
os.system('cp neqdskd3d neqdsk')
flx.psi0min1 = 0.98 #normalized flux on core bndry
flx.psi0min2 = 0.98 #normalized flux on pf bndry
flx.psi0sep = 1.00001 #normalized flux at separatrix
flx.psi0max = 1.07 #normalized flux on outer wall bndry
bbb.ngrid = 1 #number of meshes (always set to 1)
com.nxleg[0,0] = 4 #pol. mesh pts from inner plate to x-point
com.nxcore[0,0] = 4 #pol. mesh pts from x-point to top on inside
com.nxcore[0,1] = 4 #pol. mesh pts from top to x-point on outside
com.nxleg[0,1] = 4 #pol. mesh pts from x-point to outer plate
com.nysol[0] = 6 #rad. mesh pts in SOL
com.nycore[0] =2 #rad. mesh pts in core
# Finite-difference algorithms (upwind, central diff, etc.)
bbb.methn = 33 #ion continuty eqn
bbb.methu = 33 #ion parallel momentum eqn
bbb.methe = 33 #electron energy eqn
bbb.methi = 33 #ion energy eqn
bbb.methg = 33 #neutral gas continuity eqn
# Boundary conditions
bbb.ncore[0] = 10.e19 #hydrogen ion density on core
## iflcore = 0 #flag; =0, fixed Te,i; =1, fixed power on core
bbb.tcoree = 100. #core Te
bbb.tcorei = 100. #core Ti
bbb.tedge = 2. #fixed wall,pf Te,i if istewcon=1, etc
bbb.recycp[0] = 0.8 #hydrogen recycling coeff at plates
# Transport coefficients
bbb.difni[0] = 1. #D for radial hydrogen diffusion
bbb.kye = 1. #chi_e for radial elec energy diffusion
bbb.kyi = 1. #chi_i for radial ion energy diffusion
bbb.travis[0] = 1. #eta_a for radial ion momentum diffusion
# Flux limits
bbb.flalfe = 0.21 #electron parallel thermal conduct. coeff
bbb.flalfi = 0.21 #ion parallel thermal conduct. coeff
bbb.flalfv = 1. #ion parallel viscosity coeff
bbb.flalfgx = 1.e20 #neut. gas in poloidal direction
bbb.flalfgy = 1.e20 #neut. gas in radial direction
# Solver package
bbb.svrpkg = "nksol" #Newton solver using Krylov method
bbb.premeth = "banded" #Solution method for precond. Jacobian matrix
# Restart from a pfb savefile
bbb.restart = 1 #Begin from savefile, not estimated profiles
bbb.allocate() #allocate space for savevariables
restore('pfd3d_ex.16x8') #read in the solution from pfb file
###os.system('ln -s ~/Uedge/uedge/in/aph aph6')
com.istabon = 10
aph.isaphdir = 0 #=0 specifies atomic data file is in run directory
# Set initial time-step and execute uedge
bbb.dtreal=1e-2
bbb.itermx=10
bbb.exmain()
# Read the initialization file for time-dependent parameters, and run to S.S.
execfile("rdinitdt.py")
execfile("rdcontdt.py")
| 2,827 | 35.25641 | 77 | py |
UEDGE | UEDGE-master/test/Forthon_cases/Forthon_case5/plate.iter-feat.py | # define divertor plate for ITER-FEAT
# derived from equilibrium from Kukushkin Jan 2002
###integer oldecho=echo
###echo=no
# for inboard half of mesh:
grd.nplate1=15
grd.gchange("Mmod",0)
grd.rplate1=[\
3.96500E+00, 4.06360E+00, 4.29140E+00, 4.44480E+00, 4.48730E+00, \
4.40860E+00, 4.23980E+00, 4.07090E+00, 4.34190E+00, 4.42400E+00, \
4.47660E+00, 4.52350E+00, 4.55570E+00, 4.76130E+00, 4.96790E+00]
grd.zplate1=[\
2.94670E+00, 2.93000E+00, 2.83000E+00, 2.63410E+00, 2.38890E+00, \
2.15280E+00, 1.88820E+00, 1.62250E+00, 1.37820E+00, 1.40330E+00, \
1.61430E+00, 1.80290E+00, 1.84040E+00, 1.88310E+00, 1.84590E+00]
# for outboard half of mesh:
grd.nplate2=14
grd.gchange("Mmod",0)
grd.rplate2=[\
4.96790E+00, 5.14560E+00, 5.26870E+00, 5.26710E+00, \
5.15840E+00, 5.04220E+00, 5.07300E+00, 5.56490E+00, 5.56450E+00, \
5.56350E+00, 5.61140E+00, 5.74950E+00, 5.95820E+00, 6.20770E+00]
grd.zplate2=[\
1.84590E+00, 1.73420E+00, 1.56400E+00, 1.51460E+00, \
1.31340E+00, 1.09830E+00, 1.02620E+00, 8.91300E-01, 1.24570E+00, \
1.59910E+00, 1.85160E+00, 2.06830E+00, 2.21830E+00, 2.28010E+00]
###echo=oldecho
| 1,132 | 31.371429 | 68 | py |
UEDGE | UEDGE-master/test/Forthon_cases/Forthon_case5/rd_forthon_case5.py | ###character*6 probname="itfa40"
#Case of full toroidal equilibrium for ITER-FEAT with multi-species Carbon
#
####package flx;package grd;package bbb
# Initial pyuedge
from uedge import *
from pdb_restore import *
bbb.mhdgeo=1
bbb.isfixlb=0
bbb.isfixrb=0
os.system('rm -f aeqdsk neqdsk')
os.system('cp aeq_iter-feat aeqdsk')
os.system('cp geq_iter-feat neqdsk')
###character*9 machine="bbb.iter-feat"
# Set the geometry
bbb.ngrid = 1
grd.kxmesh = 1 #=4 for exponential grid in leg regions
grd.dxgas[0] = 1.2e-03
grd.dxgas[1] = 1.2e-03
grd.nxgas[0] = 11
grd.nxgas[1] = 11
grd.alfx[0] = .64
grd.alfx[1] = .64
com.nxleg[0,0]=17
com.nxleg[0,1]=17
com.nxcore[0,0]=14
com.nxcore[0,1]=14
com.nycore[0]=10
com.nysol[0]=16
flx.psi0min1 = 0.95 #core minimum psi
flx.psi0min2 = 0.992 #private flux minimum psi
flx.psi0max = 1.035 #maximum flux at wall
flx.alfcy = 2.0 #nonuniformity factor for radial mesh
grd.slpxt=1.2
# Mesh construction--non orthogonal mesh
com.ismmon=3
grd.istream=0
grd.iplate=1
grd.nsmooth=3
grd.wtmesh1=0.75
grd.dmix0=1.0
execfile('plate.iter-feat.py')
com.isnonog=1 # non orthogonal differencing
# Boundary conditions
bbb.isnicore[0] = 1 #=1uses ncore for density BC
bbb.isngcore[0]=2 # use ionization scale length for gas
bbb.ncore[0] = 6.0e19 #value of core density if isnicore=1
bbb.curcore = 0. #core particle current if isnicore=0
bbb.iflcore = 1 #specify core power
bbb.pcoree = 5.0e7 #electron power across core
bbb.pcorei = 5.0e7 #ion power across core
bbb.recycp=1.0
bbb.istepfc=3;bbb.istipfc=3 #priv. flux has fixed temperature scale length.
bbb.istewc=3;bbb.istiwc=3 #wall has fixed temperature scale length.
bbb.isnwcono=3;bbb.isnwconi=3 #walls have fixed density scale length
bbb.lyni=0.05;bbb.lyte=0.05;bbb.lyti=0.05
# set walls into 7 zones
bbb.nwsor=7
bbb.xgaso[0:7]=[1.533E-01, 4.599E-01, 3.506E+00, 9.291E+00, 1.508E+01, 1.817E+01, 1.858E+01]
bbb.wgaso[0:7]=[3.066E-01, 3.066E-01, 5.790E+00, 5.790E+00, 5.790E+00, 5.000E-01, 4.089E-01]
bbb.albdso[0:7]=1.
bbb.matwso[0:7]=1.
bbb.xgasi[0:7]=[1.213E-01, 3.638E-01, 6.063E-01, 9.024E-01, 1.246E+00, 1.541E+00, 1.837E+00]
bbb.wgasi[0:7]=[2.425E-01, 2.425E-01, 2.425E-01, 3.705E-01, 2.955E-01, 2.955E-01, 2.955E-01]
bbb.albdsi[0:7]=[0.98,0.98,1.0,1.0,1.0,0.98,0.98]
bbb.matwsi[0:7]=1
bbb.recycw[0]=1.0
bbb.bcee = 5.; bbb.bcei = 3.5 #energy transmission coeffs.
bbb.isupss = 1 #parallel vel can be supersonic
# Transport coefficients
bbb.difni[0] = 0.3
bbb.kye = 1.; bbb.kyi = 1.
bbb.travis[0]=1.;bbb.parvis[0]=1.
# Flux limits
bbb.flalfe=0.21;bbb.flalfi=0.21;bbb.flalfgx=1.;bbb.flalfgy=1.;bbb.flalfgxy=1.;bbb.flalfv=0.5
bbb.lgmax=0.05
bbb.isplflxl=0
# Finite difference algorithms
bbb.methe=33;bbb.methu=33;bbb.methg=66
bbb.methn=33;bbb.methi=33
# Solver package
bbb.svrpkg="nksol"
bbb.mfnksol = 3
bbb.epscon1 = .005
bbb.ftol = 1.e-8
bbb.iscolnorm = 3 # set to 3 for nksol
bbb.premeth="ilut"
bbb.lfililut = 100
bbb.lenpfac=75
bbb.runtim=1.e-7
bbb.rlx=0.9
###bbb.del=1.e-8
# Neutral gas properties
bbb.tfcx=5.;bbb.tfcy=5. #Franck-Condon temperatures
bbb.eion = 5. #F-C energy to each born ion
bbb.ediss = 10. #diss. energy from elec. (ediss=2*eion)
bbb.isrecmon = 1 #e-i recombination (=1 is on)
bbb.ngbackg=1.e12 # minimum floor neutral density
bbb.ingb=4 # parameter used to force floor density
# Inertial neutral model
bbb.isupgon[0]=1;bbb.isngon[0]=0;com.ngsp=1;com.nhsp=2;bbb.ziin[com.nhsp-1]=0
bbb.cngmom=0;bbb.cmwall=0;bbb.cngtgx=0;bbb.cngtgy=0;bbb.cngflox=0;bbb.cngfloy=0;bbb.cfbgt=0
bbb.kxn=0;bbb.kyn=0
bbb.flalftgx=10.0;bbb.flalftgy=10.0
# Currents and potential parameters
bbb.isphion=0
bbb.rsigpl=1.e-8 #anomalous cross-field conductivity
bbb.cfjhf=0. #turn-on heat flow from current (fqp)
bbb.jhswitch=0 #Joule Heating switch
# Hydrogenic ions
bbb.minu[0:2] = 2.5
# Atomic physics packages
com.istabon=10 #Stotler's rates for istabon=10
aph.isaphdir = 0 #=0 specifies atomic data file is in run directory
## Impurity gas
com.ngsp = 2 #total number of gas species
bbb.isngon[1] = 1 #turns on impurity gas
bbb.ngbackg[1] = 1.e9
bbb.ingb = 2
bbb.istgcon[1] = 1 #=1 for constant tg(2) at tgas(2)
bbb.tgas[1] = 1. #value for tg when istgcon=1
bbb.rcxighg = 0. # best value; ratio of imp cx to hyd cx
bbb.kelighi[1] = 5.e-16 #elastic sig_v for imp_gas/h_ion
bbb.kelighg[1] = 5.e-16 #elastic sig_v for imp_gas/h_gas
bbb.n0g[1] = 1.e16 #imp. gas density normalization
# Impurity gas boundary conditions
bbb.recycp[1] = 0.01 #plate recycling of impurities
bbb.recycw[1] = 1e-4 #wall recycling; matwsi,o set above for hyd
bbb.isch_sput[1]=7;bbb.isph_sput[1]=3 # Haasz/Davis sputtering model
bbb.t_wall=300;bbb.t_plat=500 #wall and plate temperatures
bbb.crmb=bbb.minu[0] #mass of bombarding particles
bbb.allocate() #allocate chemywi,o etc
bbb.fchemywi=1.;bbb.fchemywo=1. #scaling factor for chem sputt walls
bbb.fchemylb=1.;bbb.fchemyrb=1. #scaling factor for chem sputt plates
## Impurity ions
bbb.isimpon = 6 #Use force-balance only
com.nzsp[0] = 6 #number chrg states impurity isotope #1
bbb.csfaclb[2:8] = 2.191 #at plate csout=sqrt(mi_imp/m_dt) for up=up_imp
bbb.csfacrb[2:8] = 2.191 #at plate csout=sqrt(mi_imp/m_dt) for up=up_imp
bbb.minu[2:8] = 12. #mass in AMU; python doesn't fill last place of [2:8]
bbb.ziin[2:8] = [1,2,3,4,5,6] #charge per ion
bbb.znuclin[0:2] = 1 #nuclear charge for hydrogen species (python fills 2-1)
bbb.znuclin[2:8] = 6 #nuclear charge for impurity (python fills 8-1)
bbb.n0[2:8] = 1.e17 #global density normalization
bbb.nzbackg = 1.e9 #background density for impurities
bbb.inzb = 2 #exponent for switching on nzbackg
bbb.ismctab = 2 # use Braams' rate tables
com.mcfilename[0] = "C" # Imp rate file name
com.mcfilename[1] = "_"
com.mcfilename[2] = "r"
com.mcfilename[3] = "a"
com.mcfilename[4] = "t"
com.mcfilename[5] = "e"
com.mcfilename[6] = "s"
com.mcfilename[7] = "."
com.mcfilename[8] = "a"
com.mcfilename[9] = "d"
com.mcfilename[10] = "a"
com.mcfilename[11] = "s"
com.mcfilename = 'C_rates_adas\0'
# Impurity ion boundary conditions
###bbb.isnicore(com.nhsp+com.nzsp[0])= 3 #=3 for flux=curcore & constant ni
###bbb.curcore(com.nhsp+com.nzsp[0]) = 0. #Rcurrent for isnicore=3
bbb.isnicore[com.nhsp+com.nzsp[0]-1]= 3 #=3 for flux=curcore & constant ni
bbb.curcore[com.nhsp+com.nzsp[0]-1] = 0. #Rcurrent for isnicore=3
bbb.isnwcono[2:8] = 3 #use lyni scale-length (set above); outer wall
bbb.isnwconi[2:8] = 3 #use lyni scale-length (set above); inner wall
bbb.nwomin[2:8] = 1e7 #minimum ni at outer sidewall
bbb.nwimin[2:8] = 1e7 #minimum ni at inner sidewall
# Restart from a save file
bbb.restart = 1
bbb.allocate()
pdb_restore ('pfiter_msC.15')
# Execute UEDGE for this case
bbb.exmain()
# Print out a few variables across outer midplane
print''
print'*** Printing variables versus radial index at outer midplane'
print''
print '************\nradial position relative to separatrix [m]'
print(com.yyc)
print '************\n ion densities, ni [m**-3] = '
print(bbb.ni[bbb.ixmp,])
print '************\n parallel ion velocity, up [m/s] = '
print(bbb.up[bbb.ixmp,])
print '************\n electron temp, te [eV] = '
print(bbb.te[bbb.ixmp,]/bbb.ev)
print '************\n ion temp, ti [eV] = '
print(bbb.ti[bbb.ixmp,]/bbb.ev)
print '************\n gas densities, ng [m**-3] = '
print(bbb.ng[bbb.ixmp,])
| 7,524 | 34.163551 | 92 | py |
UEDGE | UEDGE-master/test/Forthon_cases/Forthon_case1/rd_forthon_case1.py | # This input file sets the parameters for the parallel test case
#
from uedge import *
# flags also set in the workstation version.
bbb.mhdgeo=-1
bbb.isnion=1
bbb.isupon=1
bbb.isteon=1
bbb.istion=1
bbb.isngon=0
bbb.svrpkg="vodpk"
bbb.premeth="ilut"
bbb.epscon1=1.e-2
bbb.ireorder=0
bbb.ncore=2.e19
bbb.tcoree=100
bbb.tcorei=100
bbb.isfixlb=2
bbb.recycp=.9
bbb.runtim=1.e-7
##bbb.trange=4.e6
bbb.trange=4.e3
bbb.nsteps=30
bbb.n0g=1.e16
bbb.difni=1.
bbb.kye=1.
bbb.flalfe=0.21
bbb.flalfi=0.21
bbb.flalfgx=1.e10
bbb.flalfgy=1.e10
com.nycore=0
com.nysol=10
com.nxleg[0,0]=0
com.nxleg[0,1]=2
com.nxcore[0,0]=0
com.nxcore[0,1]=4
grd.zax=1.
grd.zaxpt=.75
grd.alfyt=-1.e-5
print "Finished setting variables"
print "Allocate Storage."
bbb.allocate ()
bbb.restart=0
bbb.exmain()
| 773 | 14.795918 | 64 | py |
UEDGE | UEDGE-master/test/Forthon_cases/Forthon_case4/plate.iter-feat.py | # define divertor plate for ITER-FEAT
# derived from equilibrium from Kukushkin Jan 2002
###integer oldecho=echo
###echo=no
# for inboard half of mesh:
grd.nplate1=15
grd.gchange("Mmod",0)
grd.rplate1=[\
3.96500E+00, 4.06360E+00, 4.29140E+00, 4.44480E+00, 4.48730E+00, \
4.40860E+00, 4.23980E+00, 4.07090E+00, 4.34190E+00, 4.42400E+00, \
4.47660E+00, 4.52350E+00, 4.55570E+00, 4.76130E+00, 4.96790E+00]
grd.zplate1=[\
2.94670E+00, 2.93000E+00, 2.83000E+00, 2.63410E+00, 2.38890E+00, \
2.15280E+00, 1.88820E+00, 1.62250E+00, 1.37820E+00, 1.40330E+00, \
1.61430E+00, 1.80290E+00, 1.84040E+00, 1.88310E+00, 1.84590E+00]
# for outboard half of mesh:
grd.nplate2=14
grd.gchange("Mmod",0)
grd.rplate2=[\
4.96790E+00, 5.14560E+00, 5.26870E+00, 5.26710E+00, \
5.15840E+00, 5.04220E+00, 5.07300E+00, 5.56490E+00, 5.56450E+00, \
5.56350E+00, 5.61140E+00, 5.74950E+00, 5.95820E+00, 6.20770E+00]
grd.zplate2=[\
1.84590E+00, 1.73420E+00, 1.56400E+00, 1.51460E+00, \
1.31340E+00, 1.09830E+00, 1.02620E+00, 8.91300E-01, 1.24570E+00, \
1.59910E+00, 1.85160E+00, 2.06830E+00, 2.21830E+00, 2.28010E+00]
###echo=oldecho
| 1,132 | 31.371429 | 68 | py |
UEDGE | UEDGE-master/test/Forthon_cases/Forthon_case4/rd_forthon_case4.py | ###character*6 probname="itfa40"
#Case of full toroidal equilibrium for ITER-FEAT
#
####package flx;package grd;package bbb
# Initialize pyuedge
from uedge import *
bbb.mhdgeo=1
bbb.isfixlb=0
bbb.isfixrb=0
os.system('rm -f aeqdsk neqdsk')
os.system('cp aeq_iter-feat aeqdsk')
os.system('cp geq_iter-feat neqdsk')
###character*9 machine="bbb.iter-feat"
# Set the geometry
bbb.ngrid = 1
grd.kxmesh = 1 #=4 for exponential grid in leg regions
grd.dxgas[0] = 1.2e-03
grd.dxgas[1] = 1.2e-03
grd.nxgas[0] = 11
grd.nxgas[1] = 11
grd.alfx[0] = .64
grd.alfx[1] = .64
com.nxleg[0,0]=17
com.nxleg[0,1]=17
com.nxcore[0,0]=14
com.nxcore[0,1]=14
com.nycore[0]=10
com.nysol[0]=16
flx.psi0min1 = 0.95 #core minimum psi
flx.psi0min2 = 0.992 #private flux minimum psi
flx.psi0max = 1.035 #maximum flux at wall
flx.alfcy = 2.0 #nonuniformity factor for radial mesh
grd.slpxt=1.2
# Mesh construction--non orthogonal mesh
com.ismmon=3
grd.istream=0
grd.iplate=1
grd.nsmooth=3
grd.wtmesh1=0.75
grd.dmix0=1.0
execfile('plate.iter-feat.py')
com.isnonog=1 # non orthogonal differencing
# Boundary conditions
bbb.isnicore[0] = 1 #=1uses ncore for density BC
bbb.isngcore[0]=2 # use ionization scale length for gas
bbb.ncore[0] = 6.0e19 #value of core density if isnicore=1
bbb.curcore = 0. #core particle current if isnicore=0
bbb.iflcore = 1 #specify core power
bbb.pcoree = 5.0e7 #electron power across core
bbb.pcorei = 5.0e7 #ion power across core
bbb.recycp=1.0
bbb.istepfc=3;bbb.istipfc=3 #priv. flux has fixed temperature scale length.
bbb.istewc=3;bbb.istiwc=3 #wall has fixed temperature scale length.
bbb.isnwcono=3;bbb.isnwconi=3 #walls have fixed density scale length
bbb.lyni=0.05;bbb.lyte=0.05;bbb.lyti=0.05
# set walls into 7 zones
bbb.nwsor=7
bbb.xgaso[0:7]=[1.533E-01, 4.599E-01, 3.506E+00, 9.291E+00, 1.508E+01, 1.817E+01, 1.858E+01]
bbb.wgaso[0:7]=[3.066E-01, 3.066E-01, 5.790E+00, 5.790E+00, 5.790E+00, 5.000E-01, 4.089E-01]
bbb.albdso[0:7]=1.
bbb.matwso[0:7]=1.
bbb.xgasi[0:7]=[1.213E-01, 3.638E-01, 6.063E-01, 9.024E-01, 1.246E+00, 1.541E+00, 1.837E+00]
bbb.wgasi[0:7]=[2.425E-01, 2.425E-01, 2.425E-01, 3.705E-01, 2.955E-01, 2.955E-01, 2.955E-01]
bbb.albdsi[0:7]=[0.98,0.98,1.0,1.0,1.0,0.98,0.98]
bbb.matwsi[0:7]=1
bbb.recycw[0]=1.0
bbb.bcee = 5.; bbb.bcei = 3.5 #energy transmission coeffs.
bbb.isupss = 1 #parallel vel can be supersonic
# Transport coefficients
bbb.difni[0] = 0.3
bbb.kye = 1.; bbb.kyi = 1.
bbb.travis[0]=1.;bbb.parvis[0]=1.
# Flux limits
bbb.flalfe=0.21;bbb.flalfi=0.21;bbb.flalfgx=1.;bbb.flalfgy=1.;bbb.flalfgxy=1.;bbb.flalfv=0.5
bbb.lgmax=0.05
bbb.isplflxl=0
# Finite difference algorithms
bbb.methe=33;bbb.methu=33;bbb.methg=66
bbb.methn=33;bbb.methi=33
# Solver package
bbb.svrpkg="nksol"
bbb.mfnksol = 3
bbb.epscon1 = .005
bbb.ftol = 1.e-8
bbb.iscolnorm = 3 # set to 3 for nksol
bbb.premeth="ilut"
bbb.lfililut = 100
bbb.lenpfac=75
bbb.runtim=1.e-7
bbb.rlx=0.9
###bbb.del=1.e-8
# Neutral gas properties
bbb.tfcx=5.;bbb.tfcy=5. #Franck-Condon temperatures
bbb.eion = 5. #F-C energy to each born ion
bbb.ediss = 10. #diss. energy from elec. (ediss=2*eion)
bbb.isrecmon = 1 #e-i recombination (=1 is on)
bbb.ngbackg=1.e12 # minimum floor neutral density
bbb.ingb=4 # parameter used to force floor density
# Inertial neutral model
bbb.isupgon[0]=1;bbb.isngon[0]=0;com.ngsp=1;com.nhsp=2;bbb.ziin[com.nhsp-1]=0
bbb.cngmom=0;bbb.cmwall=0;bbb.cngtgx=0;bbb.cngtgy=0;bbb.cngflox=0;bbb.cngfloy=0;bbb.cfbgt=0
bbb.kxn=0;bbb.kyn=0
bbb.flalftgx=10.0;bbb.flalftgy=10.0
# Currents and potential parameters
bbb.isphion=0
bbb.rsigpl=1.e-8 #anomalous cross-field conductivity
bbb.cfjhf=0. #turn-on heat flow from current (fqp)
bbb.jhswitch=0 #Joule Heating switch
# Hydrogenic ions
bbb.minu[0:2] = 2.5
# Atomic physics packages
aph.isaphdir = 0 #=0 specifies atomic data file is in run directory
com.istabon=10 #Stotler's rates for istabon=10
# turn on impurities
bbb.isimpon=2 #=2 for fixed fraction model
bbb.allocate()
# set impurity concentration
bbb.afracs = 0.03
# Restart from a save file
bbb.restart = 1
restore ('pfiter_ffC.5')
# Run this case
bbb.exmain()
# Print out a few variables across outer midplane
print''
print'*** Printing variables versus radial index at outer midplane'
print''
print '************\nradial position relative to separatrix [m]'
print(com.yyc)
print '************\n ion density, ni [m**-3] = '
print(bbb.ni[bbb.ixmp,])
print '************\n parallel ion velocity, up [m/s] = '
print(bbb.up[bbb.ixmp,])
print '************\n electron temp, te [eV] = '
print(bbb.te[bbb.ixmp,]/bbb.ev)
print '************\n ion temp, ti [eV] = '
print(bbb.ti[bbb.ixmp,]/bbb.ev)
print '************\n gas density, ng [m**-3] = '
print(bbb.ng[bbb.ixmp,])
| 4,740 | 28.08589 | 92 | py |
UEDGE | UEDGE-master/test/facets/example.py | # This is a simple example exercising the methods in uefacets.py
from uefacets import *
from Numeric import *
print "Creating UEDGE instance"
ue = Uedge()
print "Reading input file 'testin.py' which is in the working directory"
print "(Note: alternatively give the full path)"
ue.ReadParams("testin.py")
print "Set up MPI. This does nothing for now since UEDGE is running serial"
ue.SetComm()
print "Set some boundary conditions different from those in testin.py"
ni = array([3.e19])
ng = array([1.e15])
ti = array([100.])
ui = array([0.])
print "Specifically, ni = ",ni[0], ", ng = ",ng[0],","
print " Te=Ti=", ti[0], ", ui = ",ui[0]
ue.SetData(ni,ui,ng,ti,ti,"val","val","val","val")
print "Take a step to time t=0.001"
niflux,uiflux,ngflux,tiflux,teflux = ue.Advance(.001)
print "The output fluxes are:"
print "niflux = ",niflux
print "uiflux = ",uiflux
print "ngflux = ",ngflux
print "tiflux = ",tiflux
print "teflux = ",teflux
print "Let's not accept this result and reset"
ue.Reset()
print "Change n_i to 2e19 and retake step"
ni = array([2.e19])
ue.SetData(ni,ui,ng,ti,ti,"val","val","val","val")
niflux,uiflux,ngflux,tiflux,teflux = ue.Advance(.001)
print "The output fluxes are:"
print "niflux = ",niflux
print "uiflux = ",uiflux
print "ngflux = ",ngflux
print "tiflux = ",tiflux
print "teflux = ",teflux
print "create dumpfiles dump.pdb and dump.hdf"
print "Note this requires that data to be dumped has been flagged in dot v files"
ue.Dump("dump")
print "Illustrating use of 'DoCommand' to print the sum of tiflux and teflux"
ue.DoCommand("print 'tiflux+teflux = ',tiflux+teflux")
| 1,598 | 31.632653 | 81 | py |
UEDGE | UEDGE-master/test/facets/testin.py | #Coarse mesh (nx=16, ny=8) for DIII-D MHD equilibrium
#Uses diffusive neutrals, so five variables (ni,upi,Te,Ti,ng)
#
##package flx;package grd;package bbb
# Initialize pyuedge
from uedge import *
# Set the geometry
bbb.mhdgeo = 1 #use MHD equilibrium
os.system('rm -f aeqdsk neqdsk') #change names of MHD eqil. files
os.system('cp aeqdskd3d aeqdsk') # (Cannot tab or indent these 3 lines)
os.system('cp neqdskd3d neqdsk')
flx.psi0min1 = 0.98 #normalized flux on core bndry
flx.psi0min2 = 0.98 #normalized flux on pf bndry
flx.psi0sep = 1.00001 #normalized flux at separatrix
flx.psi0max = 1.07 #normalized flux on outer wall bndry
bbb.ngrid = 1 #number of meshes (always set to 1)
com.nxleg[0,0] = 4 #pol. mesh pts from inner plate to x-point
com.nxcore[0,0] = 4 #pol. mesh pts from x-point to top on inside
com.nxcore[0,1] = 4 #pol. mesh pts from top to x-point on outside
com.nxleg[0,1] = 4 #pol. mesh pts from x-point to outer plate
com.nysol[0] = 6 #rad. mesh pts in SOL
com.nycore[0] =2 #rad. mesh pts in core
# Finite-difference algorithms (upwind, central diff, etc.)
bbb.methn = 33 #ion continuty eqn
bbb.methu = 33 #ion parallel momentum eqn
bbb.methe = 33 #electron energy eqn
bbb.methi = 33 #ion energy eqn
bbb.methg = 33 #neutral gas continuity eqn
# Boundary conditions
bbb.ncore[0] = 2.5e19 #hydrogen ion density on core
## iflcore = 0 #flag; =0, fixed Te,i; =1, fixed power on core
bbb.tcoree = 100. #core Te
bbb.tcorei = 100. #core Ti
bbb.tedge = 2. #fixed wall,pf Te,i if istewcon=1, etc
bbb.recycp[0] = 0.8 #hydrogen recycling coeff at plates
# Transport coefficients
bbb.difni[0] = 1. #D for radial hydrogen diffusion
bbb.kye = 1. #chi_e for radial elec energy diffusion
bbb.kyi = 1. #chi_i for radial ion energy diffusion
bbb.travis[0] = 1. #eta_a for radial ion momentum diffusion
# Flux limits
bbb.flalfe = 0.21 #electron parallel thermal conduct. coeff
bbb.flalfi = 0.21 #ion parallel thermal conduct. coeff
bbb.flalfv = 1. #ion parallel viscosity coeff
bbb.flalfgx = 1.e20 #neut. gas in poloidal direction
bbb.flalfgy = 1.e20 #neut. gas in radial direction
# Solver package
bbb.svrpkg = "nksol" #Newton solver using Krylov method
bbb.premeth = "banded" #Solution method for precond. Jacobian matrix
# Restart from a pfb savefile
bbb.restart = 1 #Begin from savefile, not estimated profiles
bbb.allocate() #allocate space for savevariables
restore('pfd3d_ex.16x8') #read in the solution from pfb file
###os.system('ln -s ~/Uedge/uedge/in/aph aph6')
com.istabon = 10
aph.isaphdir = 0 #=0 specifies atomic data file is in run directory
# Execute uedge
#bbb.exmain()
"""
# Print out a few variables across outer midplane
print''
print'*** Printing variables versus radial index at outer midplane'
print''
print '************\nradial position relative to separatrix [m]'
print(com.yyc)
print '************\n ion density, ni [m**-3] = '
print(bbb.ni[bbb.ixmp,])
print '************\n parallel ion velocity, up [m/s] = '
print(bbb.up[bbb.ixmp,])
print '************\n electron temp, te [eV] = '
print(bbb.te[bbb.ixmp,]/bbb.ev)
print '************\n ion temp, ti [eV] = '
print(bbb.ti[bbb.ixmp,]/bbb.ev)
"""
| 3,181 | 35.574713 | 74 | py |
UEDGE | UEDGE-master/pytests/test_template.py | import unittest
import os
import numpy as np
import sys,getopt
def prhelp():
print("""
Usage: python test_slab.py [-r|--ref] [-h] or
pytest --forked test_slab.py
-r|--ref to produce reference files for future test runs
""")
#
# Note the numbering of the tests. Purists will say that
# tests should be order independent. Because Uedge variables
# are in shared objects it is naturally stateful. Because of
# this stateful feature the order needs to be controlled.
#
class TestRun(unittest.TestCase):
def setUp(self):
"""
This is run pre-test.
"""
def test_one(self):
"""
Test that uedge will import
"""
global ftol
try:
import uedge as ue
assert True
except:
assert False
if __name__ == '__main__':
try:
opts, args = getopt.getopt(sys.argv[1:],'hr',['ref'])
except getopt.GetoptError:
prhelp()
sys.exit(2)
for opt,arg in opts:
if opt in ('-h'):
prhelp()
sys.exit(2)
elif opt in ('-r','--ref'):
"""
Do something that will store reference data
"""
sys.exit(2)
unittest.main()
| 1,282 | 18.439394 | 72 | py |
UEDGE | UEDGE-master/pytests/testscripts/rdinitdt.py | # Setup file to run time-dependently using dtreal
# Change dtreal for starting dt and savefname to change pfb file name
# Once variables are set, read rdrundt to execute a time-dependent run
# IMPORT UEDGE (needed to define group name bbb)
from uedge import *
i_stor = 0
nfe_tot = 0
savefn = "savedt.hdf5" # name of hdf5 savefile written every timestep
bbb.rdtphidtr = 1e20 # ratio dtphi/dtreal
bbb.ismfnkauto = 1 # if =1, mfnksol=3 for dtreal<dtmfnk3, otherwise=-3
bbb.dtmfnk3 = 5.e-4 # dtreal for mfnksol sign change if ismfnkauto=1
bbb.mult_dt = 3.4 # factor expanding dtreal after ii2max steps
bbb.ii1max = 500 # number of changes to dtreal
bbb.ii2max = 5 # number of timesteps at current dtreal
bbb.itermxrdc = 7 # value of itermx used by rdcontdt
bbb.incpset = 7 # iterations until Jacobian is recomputed
bbb.ftol_dt = 1.e-5 # fnrm tolerance for the time-dependent steps
bbb.ftol_min = 1e-9 # value of fnrm where time advance will stop
bbb.dt_tot = 0. # tot time accumulated for run (output, not input)
bbb.t_stop = 100. # value of dt_tot (sec) where calculation will stop
bbb.dt_max = 100. # maximum time step for dtreal
bbb.dt_kill = 1e-14 # min allowed time step; rdcontdt stops if reached
bbb.deldt_min = 0.04 # minimum relative change allowed for model_dt > 0
bbb.initjac = 0 # if=1, calc initial Jac upon reading rdcontdt
bbb.numrevjmax = 2 # number of dt reductions before Jac recalculated
bbb.numfwdjmax = 1 # number of dt increases before Jac recalculated
###bbb.ismmaxuc = 1 # =1 for intern calc mmaxu; =0,set mmaxu & dont chng
bbb.irev = -1 # flag to allow reduced dt advance after cutback
bbb.rlx = 0.9 # max. change in variable at each linear iteration
bbb.itermx = 7 # max. number of linear iterations allowed
bbb.tstor_s = 1e-5 # beginning time for storing solution
bbb.tstor_e = 1e-3 # ending time for storing solution
bbb.n_stor = 0 # number of linearly spaced storage points
bbb.ipt = 1 # index of variable; value printed at step
# if ipt not reset from unity, ipt=idxte(nx,iysptrx+1)
| 2,100 | 51.525 | 76 | py |
UEDGE | UEDGE-master/pytests/testscripts/uetests.py | import unittest
import os
import numpy as np
import sys,getopt
import uuid
from importlib import import_module as im
from multiprocessing import Process
def saverefs(filename):
import uedge as ue
import uedge.hdf5 as h5
fnrm = ue.bbb.get_fnrm(ue.bbb.dtreal)
nfe = ue.bbb.nfe[0][0]
sclyl = ue.bbb.yldot * ue.bbb.sfscal
nodeid = uuid.getnode()
h5.hdf5_dump(filename,vars=['nodeid','fnrm','nfe','sclyl'],globals=locals())
def prhelp():
print("""
Usage: python test_slab.py [-r|--ref] [-h|--help] or
pytest --forked test_slab.py
-r|--ref to produce reference files for future test runs
""")
def startup(case,ftol=1.e-9):
"""
startup("<case>",ftol=1.e-9)
Reconverge specified case. Done for every test.
"""
import uedge as ue
im(case)
ue.bbb.ftol = ftol
ue.bbb.exmain()
def perturb(case,ftol=1.e-9):
"""
perturn("<case>",ftol=1.e-9)
Perturb solution and Reconverge specified case.
"""
import uedge as ue
ue.bbb.ngs = ue.bbb.ngs*1.02
ue.bbb.nis = ue.bbb.nis*1.02
ue.bbb.phis = ue.bbb.phis*1.02
ue.bbb.tes = ue.bbb.tes*1.02
ue.bbb.tis = ue.bbb.tis*1.02
ue.bbb.ups = ue.bbb.ups*1.02
ue.bbb.ftol = ftol
def steadystate():
"""
steadystate()
Evolve current case to steady state with rdcontdt.
"""
import uedge as ue
import rdinitdt
import rdcontdt
def makeperturb(filename,case,ftol=1.e-9):
import uedge as ue
startup(case,ftol=ftol)
perturb(case,ftol=ftol)
steadystate()
saverefs(filename)
def makeref(filename,case,ftol=1.e-9):
"""
Produce and save reference data for comparison in future test runs.
"""
import uedge as ue
startup(case,ftol=ftol)
saverefs(filename)
identical=1.e-10
close=0.5
def check_fnorm(name,filename,case,doassert=True):
import uedge as ue
import uedge.hdf5 as h5
ref={}
h5.hdf5_restore_dump(filename,scope=ref)
fnrm = ue.bbb.get_fnrm(ue.bbb.dtreal)
nfe = ue.bbb.nfe[0][0]
if np.isclose(fnrm,ref['fnrm'],atol=0.0,rtol=identical):
#
# If it makes it here then fnorm is basically identical.
# Most likely running on the same os/compiler versions
# as when the reference files were produced.
#
if doassert:
print()
print(name,' fnorm identical.')
assert True
else:
return True
elif fnrm > ue.bbb.ftol:
if doassert:
print('Relative change in Fnorm too large. Threshold is ',close)
print('fnrm: ',fnrm,' ref: ',ref['fnrm'])
print(' rel change: ',np.abs(fnrm - ref['fnrm'])/np.abs(ref['fnrm']))
print(' abs change: ',np.abs(fnrm - ref['fnrm']))
print()
sclyl = ue.bbb.yldot * ue.bbb.sfscal
rsclyl = ref['sclyl']
iva = np.abs(rsclyl - sclyl) / (np.abs(rsclyl) + np.abs(sclyl) + 1e-20)
ind = np.where(iva == np.max(iva))
iv = ind[0][0]
(ix,iy) = ue.bbb.igyl[iv,0:2]
loc_troub_eqn = np.mod(iv,ue.bbb.numvar)+1
numvar = ue.bbb.numvar
if doassert:
print("** Number of variables is:")
print("numvar = ", numvar)
print(" ")
print("** Troublemaker equation is:")
print("loc_troub_eqn = ",loc_troub_eqn)
print(" ")
print("** Troublemaker cell (ix,iy) is:")
print(ue.bbb.igyl[iv,:])
print(" ")
print("** Timestep for troublemaker equation:")
print(ue.bbb.dtuse[iv])
print(" ")
print("** yl for troublemaker equation:")
print(ue.bbb.yl[iv])
print(" ")
assert False
else:
return False
else:
if doassert:
assert True
else:
return True
def check_nfe(name,filename,case,doassert=None):
import uedge as ue
import uedge.hdf5 as h5
ref={}
h5.hdf5_restore_dump(filename,scope=ref)
fnrm = ue.bbb.get_fnrm(ue.bbb.dtreal)
nfe = ue.bbb.nfe[0][0]
if np.isclose(fnrm,ref['fnrm'],atol=0.0,rtol=identical):
#
# If it makes it here then fnorm is basically identical.
# Most likely running on the same os/compiler versions
# as when the reference files were produced.
#
if doassert:
print()
print(name,' fnorm identical.')
assert True
else:
return True
elif not np.isclose(nfe,ref['nfe'],atol=0.0,rtol=0.02):
if doassert:
print("The number of Krylov iterations for a 2% perturbation is ",nfe/ref['nfe']," times the ref case")
assert False
else:
return False
else:
if doassert:
assert True
else:
return True
| 4,934 | 24.050761 | 114 | py |
UEDGE | UEDGE-master/pytests/testscripts/rdcontdt.py | # This file runs a time-dependent case using dtreal. First, obtain a converged
# solution for a (usually small) dtreal; xuedge must report iterm=1 at the end.
# Then adjust control parameters in rdinitdt; read this file, which reads rdinitdt.
# If a mistake is made, to restart this file without a Jacobian evaluation,
# be sure to reset iterm=1 (=> last step was successful)
# IMPORT UEDGE (assuming starting from ipython before any imports)
from uedge import *
from numpy import zeros,sqrt
# IMPORT HDF5 routines for saving solutions below
from uedge.hdf5 import *
i_stor = 0
nfe_tot = 0
savefn = "savedt.hdf5" # name of hdf5 savefile written every timestep
no = 0;yes = 1
echo = no
# Set precisions of floating point output
###import print_options
###print_options.set_float_precision(4)
nx=com.nx;ny=com.ny;nisp=com.nisp;ngsp=com.ngsp;numvar=bbb.numvar
isteon=bbb.isteon
if (i_stor==0):
ni_stor = zeros((bbb.n_stor,nx+1+1,ny+1+1,nisp),"d") # set time storage arrays
up_stor = zeros((bbb.n_stor,nx+1+1,ny+1+1,nisp),"d")
te_stor = zeros((bbb.n_stor,nx+1+1,ny+1+1),"d")
ti_stor = zeros((bbb.n_stor,nx+1+1,ny+1+1),"d")
ng_stor = zeros((bbb.n_stor,nx+1+1,ny+1+1,ngsp),"d")
phi_stor = zeros((bbb.n_stor,nx+1+1,ny+1+1),"d")
tim_stor = zeros((bbb.n_stor),"d")
dtreal_stor = zeros((bbb.n_stor),"d")
nfe_stor = zeros((bbb.n_stor),"l")
dt_stor = (bbb.tstor_e - bbb.tstor_s)/(bbb.n_stor - 1)
i_stor = max(i_stor,1) # set counter for storage arrays
bbb.dt_tot = max(bbb.dt_tot,0.)
nfe_tot = max(nfe_tot,0)
deldt_0 = bbb.deldt
isdtsf_sav = bbb.isdtsfscal
if (bbb.ipt==1 and bbb.isteon==1): # set ipt to te(nx,iysptrx+1) if no user value
ipt = bbb.idxte[nx-1,com.iysptrx] #note: ipt is local, bbb.ipt global
bbb.irev = -1 # forces second branch of irev in ii1 loop below
if (bbb.iterm == 1): # successful initial run with dtreal
bbb.dtreal = bbb.dtreal/bbb.mult_dt # gives same dtreal after irev loop
else: # unsuccessful initial run; reduce dtreal
bbb.dtreal = bbb.dtreal/(3*bbb.mult_dt) # causes dt=dt/mult_dt after irev loop
if (bbb.initjac == 0): bbb.newgeo=0
dtreal_sav = bbb.dtreal
bbb.itermx = bbb.itermxrdc
bbb.dtreal = bbb.dtreal/bbb.mult_dt #adjust for mult. to follow; mult_dt in rdinitdt
bbb.dtphi = bbb.rdtphidtr*bbb.dtreal
neq=bbb.neq
svrpkg=bbb.svrpkg.tostring().strip()
#
bbb.ylodt = bbb.yl
bbb.pandf1 (-1, -1, 0, bbb.neq, 1., bbb.yl, bbb.yldot)
fnrm_old = sqrt(sum((bbb.yldot[0:neq]*bbb.sfscal[0:neq])**2))
if (bbb.initjac == 1): fnrm_old=1.e20
print(( "initial fnrm =",fnrm_old))
for ii1 in range( 1, bbb.ii1max+1):
if (bbb.ismfnkauto==1): bbb.mfnksol = 3
# adjust the time-step
if (bbb.irev == 0):
# Only used after a dt reduc. success. completes loop ii2 for fixed dt
bbb.dtreal = min(3*bbb.dtreal,bbb.t_stop) #first move forward after reduction
bbb.dtphi = bbb.rdtphidtr*bbb.dtreal
if (bbb.ismfnkauto==1 and bbb.dtreal > bbb.dtmfnk3): bbb.mfnksol = -3
bbb.deldt = 3*bbb.deldt
else:
# either increase or decrease dtreal; depends on mult_dt
bbb.dtreal = min(bbb.mult_dt*bbb.dtreal,bbb.t_stop)
bbb.dtphi = bbb.rdtphidtr*bbb.dtreal
if (bbb.ismfnkauto==1 and bbb.dtreal > bbb.dtmfnk3): bbb.mfnksol = -3
bbb.deldt = bbb.mult_dt*bbb.deldt
bbb.dtreal = min(bbb.dtreal,bbb.dt_max)
bbb.dtphi = bbb.rdtphidtr*bbb.dtreal
if (bbb.ismfnkauto==1 and bbb.dtreal > bbb.dtmfnk3): bbb.mfnksol = -3
bbb.deldt = min(bbb.deldt,deldt_0)
bbb.deldt = max(bbb.deldt,bbb.deldt_min)
nsteps_nk=1
print('--------------------------------------------------------------------')
print('--------------------------------------------------------------------')
print(' ')
print(('*** Number time-step changes = ',ii1,' New time-step = ', bbb.dtreal))
print('--------------------------------------------------------------------')
bbb.itermx = bbb.itermxrdc
if (ii1>1 or bbb.initjac==1): # first time calc Jac if initjac=1
if (bbb.irev == 1): # decrease in bbb.dtreal
if (bbb.numrev < bbb.numrevjmax and \
bbb.numrfcum < bbb.numrevjmax+bbb.numfwdjmax): #dont recom bbb.jac
bbb.icntnunk = 1
bbb.numrfcum = bbb.numrfcum + 1
else: # force bbb.jac calc, reset numrev
bbb.icntnunk = 0
bbb.numrev = -1 # yields api.zero in next statement
bbb.numrfcum = 0
bbb.numrev = bbb.numrev + 1
bbb.numfwd = 0
else: # increase in bbb.dtreal
if (bbb.numfwd < bbb.numfwdjmax and \
bbb.numrfcum < bbb.numrevjmax+bbb.numfwdjmax): #dont recomp bbb.jac
bbb.icntnunk = 1
bbb.numrfcum = bbb.numrfcum + 1
else:
bbb.icntnunk = 0 #recompute jacobian for increase dt
bbb.numfwd = -1
bbb.numrfcum = 0
bbb.numfwd = bbb.numfwd + 1
bbb.numrev = 0 #bbb.restart counter for dt reversals
bbb.isdtsfscal = isdtsf_sav
bbb.ftol = min(bbb.ftol_dt, 0.01*fnrm_old)
bbb.exmain() # take a single step at the present bbb.dtreal
if (bbb.iterm == 1):
bbb.dt_tot = bbb.dt_tot + bbb.dtreal
nfe_tot = nfe_tot + bbb.nfe[0,0]
bbb.ylodt = bbb.yl
bbb.pandf1 (-1, -1, 0, bbb.neq, 1., bbb.yl, bbb.yldot)
fnrm_old = sqrt(sum((bbb.yldot[0:neq-1]*bbb.sfscal[0:neq-1])**2))
if (bbb.dt_tot>=0.9999999*bbb.t_stop or fnrm_old<bbb.ftol_min):
print(' ')
print('*****************************************************')
print('** SUCCESS: frnm < bbb.ftol; or dt_tot >= t_stop **')
print('*****************************************************')
break
bbb.icntnunk = 1
bbb.isdtsfscal = 0
for ii2 in range( 1, bbb.ii2max+1): #take ii2max steps at the present time-step
if (bbb.iterm == 1):
bbb.itermx = bbb.itermxrdc
bbb.ftol = min(bbb.ftol_dt, 0.01*fnrm_old)
bbb.exmain()
if (bbb.iterm == 1):
bbb.ylodt = bbb.yl
bbb.pandf1 (-1, -1, 0, bbb.neq, 1., bbb.yl, bbb.yldot)
fnrm_old = sqrt(sum((bbb.yldot[0:neq-1]*bbb.sfscal[0:neq-1])**2))
print("Total time = ",bbb.dt_tot,"; Timestep = ",bbb.dtreal)
print("variable index ipt = ",ipt, " bbb.yl[ipt] = ",bbb.yl[ipt])
dtreal_sav = bbb.dtreal
bbb.dt_tot = bbb.dt_tot + bbb.dtreal
nfe_tot = nfe_tot + bbb.nfe[0,0]
if (bbb.dt_tot>=0.999999999999*bbb.t_stop or fnrm_old<bbb.ftol_min):
break
## Store variables if a storage time has been crossed
if (bbb.dt_tot >= dt_stor*i_stor and i_stor<=bbb.n_stor):
i_stor1 = i_stor-1
ni_stor[i_stor1,:,:,:] = ni
up_stor[i_stor1,:,:,:] = up
te_stor[i_stor1,:,:] = te
ti_stor1[i_stor1,:,:] = ti
ng_stor[i_stor1,:,:,:] = ng
phi_stor1[i_stor1,:,:] = phi
tim_stor[i_stor1] = bbb.dt_tot
nfe_stor[i_stor1] = nfe_tot
dtreal_stor[i_stor1] = bbb.dtreal
i_stor = i_stor + 1
## End of storage section
if (bbb.dt_tot>=bbb.t_stop or fnrm_old<bbb.ftol_min): break # need for both loops
bbb.irev = bbb.irev-1
if (bbb.iterm != 1): #print bad eqn, cut dtreal by 3, set irev flag
####### a copy of idtroub script ########################
oldecho=echo
echo=no
# integer ii
# real8 ydmax
scalfac = bbb.sfscal
if (svrpkg != "nksol"): scalfac = 1/(bbb.yl + 1.e-30) # for time-dep calc.
ydmax = 0.999999999*max(abs(bbb.yldot*scalfac))
itrouble = 0
for ii in range(neq):
if (abs(bbb.yldot[ii]*scalfac[ii]) > ydmax):
itrouble=ii
print("** Fortran index of trouble making equation is:")
print(itrouble+1)
break
print("** Number of variables is:")
print("numvar = ", numvar)
print(" ")
iv_t = (itrouble).__mod__(numvar) + 1
print("** Troublemaker equation is:")
print("iv_t = ",iv_t)
print(" ")
print("** Troublemaker cell (ix,iy) is:")
print(bbb.igyl[itrouble,])
print(" ")
print("** Timestep for troublemaker equation:")
print(bbb.dtuse[itrouble])
print(" ")
print("** yl for troublemaker equation:")
print(bbb.yl[itrouble])
print(" ")
echo=oldecho
######## end of idtroub script ##############################
if (bbb.dtreal < bbb.dt_kill):
print(' ')
print('*************************************')
print('** FAILURE: time-step < dt_kill **')
print('*************************************')
break
bbb.irev = 1
print('*** Converg. fails for bbb.dtreal; reduce time-step by 3, try again')
print('----------------------------------------------------------------- ')
bbb.dtreal = bbb.dtreal/(3*bbb.mult_dt)
bbb.dtphi = bbb.rdtphidtr*bbb.dtreal
if (bbb.ismfnkauto==1 and bbb.dtreal > bbb.dtmfnk3): bbb.mfnksol = -3
bbb.deldt = bbb.deldt/(3*bbb.mult_dt)
bbb.iterm = 1
echo = yes
| 9,290 | 41.040724 | 88 | py |
UEDGE | UEDGE-master/pytests/fast_tests/d3d_snull_D_only/j.py | import uedge
import rd_d3dHsm_in
import uedge
uedge.bbb.exmain()
import uedge.uedgeplots as up
up.plotmesh()
| 110 | 12.875 | 29 | py |
UEDGE | UEDGE-master/pytests/fast_tests/d3d_snull_D_only/rd_d3dHsm_in.py | #
#
###########################################################################
# DESCRIPTION OF PROBLEM (d3dHsm) from FACETS test suite:
# DIII-D single-null geometry with 5 variables (ni,upi,te,ti,ng) and a
# (16+2)*(8+2)=18x10 [poloidal*radial] mesh yielding 900 variables.
# Solver used is Newton Krylov (svrpkg="nksol") and preconditioner uses a
# direct banded solver for the LU decomposition (premeth="banded"). Iterates
# to steady-state solution from an initial profile file (HF5).
###########################################################################
import uedge
from uedge import *
# Set the geometry
bbb.mhdgeo = 1 #=1 use MHD equilibrium files
##flx.aeqdskfname = "aeqdskd3d" #name of EFIT 'a' file for flux-surface mesh
##flx.geqdskfname = "neqdskd3d" #name of EFIT 'g' or 'n' file for flux-sur mesh
flx.psi0min1 = 0.98 #normalized flux on core bndry
flx.psi0min2 = 0.98 #normalized flux on pf bndry
flx.psi0sep = 1.00001 #normalized flux at separatrix
flx.psi0max = 1.07 #normalized flux on outer wall bndry
bbb.ngrid = 1 #number of mesh sequenc. (always set to 1)
com.nxleg[0,0] = 4 #pol. mesh pts from inner plate to x-point
com.nxcore[0,0] = 4 #pol. mesh pts from x-point to top on inside
com.nxcore[0,1] = 4 #pol. mesh pts from top to x-point on outside
com.nxleg[0,1] = 4 #pol. mesh pts from x-point to outer plate
com.nysol[0] = 6 #rad. mesh pts in SOL
com.nycore[0] = 2 #rad. mesh pts in core
# Finite-difference algorithms (upwind, central diff, etc.)
bbb.methn = 33 #ion continuty eqn
bbb.methu = 33 #ion parallel momentum eqn
bbb.methe = 33 #electron energy eqn
bbb.methi = 33 #ion energy eqn
bbb.methg = 33 #neutral gas continuity eqn
# Boundary conditions
bbb.ncore[0] = 2.5e19 #hydrogen ion density on core
## iflcore = 0 #flag; =0, fixed Te,i; =1, fixed power on core
bbb.tcoree = 100. #core Te
bbb.tcorei = 100. #core Ti
bbb.tedge = 2. #fixed wall,pf Te,i if istewcon=1, etc
bbb.recycp[0] = 0.8 #hydrogen recycling coeff at plates
# Transport coefficients (m**2/s)
bbb.difni[0] = 1. #D for radial hydrogen diffusion
bbb.kye = 1. #chi_e for radial elec energy diffusion
bbb.kyi = 1. #chi_i for radial ion energy diffusion
bbb.travis[0] = 1. #eta_a for radial ion momentum diffusion
# Flux limits
bbb.flalfe = 0.21 #electron parallel thermal conduct. coeff
bbb.flalfi = 0.21 #ion parallel thermal conduct. coeff
bbb.flalfv = 1. #ion parallel viscosity coeff
bbb.flalfgx = 1.e20 #neut. gas in poloidal direction
bbb.flalfgy = 1.e20 #neut. gas in radial direction
# Solver package
bbb.svrpkg = "nksol" #Newton solver using Krylov method
bbb.premeth = "banded" #Solution method for precond. Jacobian matrix
# Restart from a HDF5 or PDB savefile
bbb.restart = 1 #Begin from savefile, not estimated profiles
bbb.allocate() #allocates storage for arrays
from uedge.hdf5 import *
hdf5_restore("d3dHsm.h5")
# Atomic data switches
com.istabon = 10 #=10 specifics hydrogen data file ehr2.dat
| 2,997 | 41.828571 | 81 | py |
UEDGE | UEDGE-master/pytests/fast_tests/d3d_snull_D_only/test_d3d_snull_D_only.py | import unittest
import os
import numpy as np
import sys,getopt
from multiprocessing import Process
thisfile=os.path.realpath(__file__)
thispath=os.path.dirname(thisfile)
sys.path.insert(0,thispath)
sys.path.insert(0,os.path.dirname(os.path.dirname(thispath))+'/testscripts')
import uetests as uet
ftol = 1.e-9
name = 'd3d_snull_D_only'
restart = 'rd_d3dHsm_in'
def prhelp():
print("""
Usage: python test_slab.py [-r|--ref] [-h|--help] or
pytest --forked test_slab.py
-r|--ref to produce reference files for future test runs
""")
#
# Note the numbering of the tests. Purists will say that
# tests should be order independent. Because Uedge variables
# are in shared objects it is naturally stateful. Because of
# this stateful feature the order needs to be controlled.
#
class TestRun(unittest.TestCase):
def setUp(self):
"""
This is run pre-test. Reads in restart data and
re-converges solution.
"""
import uedge as ue
os.chdir(thispath)
def test_reconv(self):
"""
Test that initial re-converged solution fnrm is low.
"""
global ftol
import uedge as ue
uet.startup(restart,ftol=ftol)
uet.check_fnorm(name+' Reconverge','ref_reconv.h5',restart,doassert=True)
def test_perturb(self):
"""
Test that initial re-converged solution fnrm is low.
"""
global ftol
import uedge as ue
uet.startup(restart,ftol=ftol)
uet.perturb(restart,ftol=ftol)
uet.steadystate()
uet.check_nfe(name+' Perturb','ref_perturb.h5',restart,doassert=True)
if __name__ == '__main__':
try:
opts, args = getopt.getopt(sys.argv[1:],'hr',['help,ref'])
except getopt.GetoptError:
prhelp()
sys.exit(2)
for opt,arg in opts:
if opt in ('-h','--help'):
prhelp()
sys.exit(2)
elif opt in ('-r','--ref'):
kargs = {'ftol':ftol}
p1 = Process(target=uet.makeref,args=('ref_reconv.h5',restart),kwargs=kargs)
p1.start()
p2 = Process(target=uet.makeperturb,args=('ref_perturb.h5',restart),kwargs=kargs)
p2.start()
p1.join()
p2.join()
sys.exit(2)
unittest.main()
| 2,339 | 25.292135 | 92 | py |
UEDGE | UEDGE-master/pytests/fast_tests/d3d_snull_D+C_orthog/test_d3d_snull_D+C_orthog.py | import unittest
import os
import numpy as np
import sys,getopt
from multiprocessing import Process
thisfile=os.path.realpath(__file__)
thispath=os.path.dirname(thisfile)
sys.path.insert(0,thispath)
sys.path.insert(0,os.path.dirname(os.path.dirname(thispath))+'/testscripts')
import uetests as uet
ftol = 1.e-9
name = 'd3d_snull_D+C_orthog'
restart = 'rd_d3dHmsCog_strahl_Rp95'
def prhelp():
print("""
Usage: python test_slab.py [-r|--ref] [-h|--help] or
pytest --forked test_slab.py
-r|--ref to produce reference files for future test runs
""")
#
# Note the numbering of the tests. Purists will say that
# tests should be order independent. Because Uedge variables
# are in shared objects it is naturally stateful. Because of
# this stateful feature the order needs to be controlled.
#
class TestRun(unittest.TestCase):
def setUp(self):
"""
This is run pre-test. Reads in restart data and
re-converges solution.
"""
import uedge as ue
os.chdir(thispath)
def test_reconv(self):
"""
Test that initial re-converged solution fnrm is low.
"""
global ftol
import uedge as ue
uet.startup(restart,ftol=ftol)
uet.check_fnorm(name+' Reconverge','ref_reconv.h5',restart,doassert=True)
def test_perturb(self):
"""
Test that initial re-converged solution fnrm is low.
"""
global ftol
import uedge as ue
uet.startup(restart,ftol=ftol)
uet.perturb(restart,ftol=ftol)
uet.steadystate()
uet.check_nfe(name+' Perturb','ref_perturb.h5',restart,doassert=True)
if __name__ == '__main__':
try:
opts, args = getopt.getopt(sys.argv[1:],'hr',['help,ref'])
except getopt.GetoptError:
prhelp()
sys.exit(2)
for opt,arg in opts:
if opt in ('-h','--help'):
prhelp()
sys.exit(2)
elif opt in ('-r','--ref'):
kargs = {'ftol':ftol}
p1 = Process(target=uet.makeref,args=('ref_reconv.h5',restart),kwargs=kargs)
p1.start()
p2 = Process(target=uet.makeperturb,args=('ref_perturb.h5',restart),kwargs=kargs)
p2.start()
p1.join()
p2.join()
sys.exit(2)
unittest.main()
| 2,355 | 25.47191 | 92 | py |
UEDGE | UEDGE-master/pytests/fast_tests/d3d_snull_D+C_orthog/rd_d3dHmsCog_strahl_Rp95.py | #
#
###########################################################################
# DESCRIPTION OF PROBLEM (d3dHmsCnog) using Strahl imp data; from FACETS :
# DIII-D single-null geometry with 6 hydrogen variables (ni,ng,upi,upg,te,ti)
# and 7 carbon variables (six charge-state densities ni and one ng) on a
# (16+2)*(8+2)=18x10 [poloidal*radial] mesh yielding 2340 variables.
# Solver used is Newton Krylov (svrpkg="nksol") and preconditioner uses an
# iterative solver ILUT for Jacobian LU decomposition. Also includes tilted
# divertor plates wrt flux-surface normal, thus testing the nonorthogonal
# finite-volume difference stencil. Iterates to steady-state solution from
# an initial profile file (HF5).
###########################################################################
# Import uedge into python and make variables active
import uedge
from uedge import *
# Begin uedge parameter input
# Set the geometry
bbb.mhdgeo = 1 #=1 use MHD equilibrium files
##flx.aeqdskfname = "a110465.03500" #EFIT "a" file for flux-surface mesh
##flx.geqdskfname = "g110465.03500" #EFIT "g" or "n" file for flux-sur mesh
flx.psi0min1 = 0.96 #normalized flux on core bndry
flx.psi0min2 = 0.98 #normalized flux on pf bndry
flx.psi0sep = 1.00001 #normalized flux at separatrix
flx.psi0max = 1.07 #normalized flux on outer wall bndry
bbb.ngrid = 1 #number of mesh sequenc. (always set to 1)
bbb.recycm = 0.1
com.nxleg[0,0] = 4 #pol. mesh pts from inner plate to x-point
com.nxcore[0,0] = 4 #pol. mesh pts from x-point to top on inside
com.nxcore[0,1] = 4 #pol. mesh pts from top to x-point on outside
com.nxleg[0,1] = 4 #pol. mesh pts from x-point to outer plate
com.nysol[0] = 4 #rad. mesh pts in SOL
com.nycore[0] = 4 #rad. mesh pts in core
flx.alfcy = 2 #>0 concentrates y-mesh near separatrix
# Mesh construction--non orthogonal mesh
##com.ismmon = 3 #controls transition from nonorthog to orthog mesh
##com.isnonog = 1 # non orthogonal differencing
##grd.istream = 0
##grd.iplate = 1
##grd.nsmooth = 3
##grd.wtmesh1 = 0.75
##grd.dmix0 = 1.0
# Set params for line-segments defining inner(1) and outer(2) plots
##grd.nplate1 = 2
##grd.nplate2 = 2
# Finite-difference algorithms (upwind, central diff, etc.)
bbb.methn = 33 #ion continuty eqn
bbb.methu = 33 #ion parallel momentum eqn
bbb.methe = 33 #electron energy eqn
bbb.methi = 33 #ion energy eqn
##bbb.methg = 66
bbb.methg = 33
# Boundary conditions
bbb.ncore[0] = 2.e19 #hydrogen ion density on core
bbb.iflcore = 0 #flag; =0, fixed Te,i; =1, fixed power on core
bbb.tcoree = 600. #core Te
bbb.tcorei = 600. #core Ti
bbb.tedge = 2. #fixed wall,pf Te,i if istewcon=1, etc
bbb.recycp[0] = 0.95 #hydrogen recycling coeff at plates
# Transport coefficients (m**2/s)
bbb.difni[0] = 1. #D for radial hydrogen diffusion
bbb.kye = 1. #chi_e for radial elec energy diffusion
bbb.kyi = 1. #chi_i for radial ion energy diffusion
bbb.travis[0] = 1. #eta_a for radial ion momentum diffusion
# Flux limits
bbb.flalfe = 0.21 #electron parallel thermal conduct. coeff
bbb.flalfi = 0.21 #ion parallel thermal conduct. coeff
bbb.flalfv = 0.5 #ion parallel viscosity coeff
bbb.flalfgx = 1. #neut. gas part. flux in poloidal direction
bbb.flalfgy = 1. #neut. gas part. flux in radial direction
bbb.flalfgxy = 1. #neut. gas part. flux in mixed derivatives
bbb.flalftgx = 1. #neut. gas thermal flux, poloidal direction
bbb.flalftgy = 1. #neut. gas thermal flux, radial direction
bbb.lgmax = 0.1 #max scale length for flalfgx,y
bbb.lgtmax = 0.1 #max scale length for flalftgx,y
bbb.lgvmax = 0.1 #max scale length for flalfvgx,y
# Solver package
bbb.svrpkg = "nksol" #Newton solver using Krylov method
bbb.premeth = "ilut" #Solution method for precond. Jacobian matrix
# Parallel neutral momentum equation
bbb.isupgon[0] = 1
bbb.ineudif = 2 #=2 for evolving pg=ng*tg variable
bbb.isngon[0] = 0
com.ngsp = 1
com.nhsp = 2
bbb.ziin[1] = 0
## Impurity gas basics
com.ngsp = 2 #total number of gas species
bbb.isngon[1] = 1 #turns on impurity gas
bbb.ngbackg[1] = 1.e9 #neutral impurity background for added source
bbb.ingb = 2 #exponent for strength of ngbackg turn-on
bbb.istgcon[1] = 1 #=1 for constant tg(2) at tgas(2)
bbb.tgas[1] = 1. #value for tg when istgcon=1
bbb.rcxighg = 0. # best value; ratio of imp cx to hyd cx
bbb.kelighi[1] = 5.e-16 #elastic sig_v for imp_gas/h_ion
bbb.kelighg[1] = 5.e-16 #elastic sig_v for imp_gas/h_gas
bbb.n0g[1] = 1.e16 #imp. gas density normalization
# Impurity gas boundary conditions
bbb.recycp[1] = 0.01 #plate recycling of impurities
bbb.recycw[1] = 1e-4 #wall recycling; matwsi,o set above for hyd
bbb.isch_sput[1]=7 # Haasz/Davis chemical sputtering model
bbb.isph_sput[1]=3 # physical sputtering model
bbb.t_wall = 300.
bbb.t_plat = 500.
bbb.crmb = 2.
## Impurity ions
bbb.isimpon = 6 #Use force-balance only
com.nzsp[0] = 6 #number chrg states impurity isotope #1
bbb.csfaclb[2,0] = 2.191
bbb.csfaclb[3,0] = 2.191
bbb.csfaclb[4,0] = 2.191
bbb.csfaclb[5,0] = 2.191
bbb.csfaclb[6,0] = 2.191
bbb.csfaclb[7,0] = 2.191
bbb.csfacrb[2,0] = 2.191
bbb.csfacrb[3,0] = 2.191
bbb.csfacrb[4,0] = 2.191
bbb.csfacrb[5,0] = 2.191
bbb.csfacrb[6,0] = 2.191
bbb.csfacrb[7,0] = 2.191
bbb.csfaclb[2,1] = 2.191
bbb.csfaclb[3,1] = 2.191
bbb.csfaclb[4,1] = 2.191
bbb.csfaclb[5,1] = 2.191
bbb.csfaclb[6,1] = 2.191
bbb.csfaclb[7,1] = 2.191
bbb.csfacrb[2,1] = 2.191
bbb.csfacrb[3,1] = 2.191
bbb.csfacrb[4,1] = 2.191
bbb.csfacrb[5,1] = 2.191
bbb.csfacrb[6,1] = 2.191
bbb.csfacrb[7,1] = 2.191
bbb.minu[2] = 12.
bbb.minu[3] = 12.
bbb.minu[4] = 12.
bbb.minu[5] = 12.
bbb.minu[6] = 12.
bbb.minu[7] = 12.
bbb.ziin[2] = 1
bbb.ziin[3] = 2
bbb.ziin[4] = 3
bbb.ziin[5] = 4
bbb.ziin[6] = 5
bbb.ziin[7] = 6
bbb.znuclin[0] = 1
bbb.znuclin[1] = 1
bbb.znuclin[2] = 6
bbb.znuclin[3] = 6
bbb.znuclin[4] = 6
bbb.znuclin[5] = 6
bbb.znuclin[6] = 6
bbb.znuclin[7] = 6
bbb.n0[2] = 1.e17
bbb.n0[3] = 1.e17
bbb.n0[4] = 1.e17
bbb.n0[5] = 1.e17
bbb.n0[6] = 1.e17
bbb.n0[7] = 1.e17
bbb.nzbackg = 1.e9 #background density for impurities
bbb.inzb = 2 #exponent for switching on nzbackg
bbb.ismctab = 2 # use Braams" rate tables
com.mcfilename[0] = "C_rates.strahl" # Imp rate file name
bbb.isnicore[7] = 3
bbb.curcore[7] = 0.
bbb.isnwcono[2] = 3
bbb.isnwcono[3] = 3
bbb.isnwcono[4] = 3
bbb.isnwcono[5] = 3
bbb.isnwcono[6] = 3
bbb.isnwcono[7] = 3
bbb.isnwconi[2] = 3
bbb.isnwconi[3] = 3
bbb.isnwconi[4] = 3
bbb.isnwconi[5] = 3
bbb.isnwconi[6] = 3
bbb.isnwconi[7] = 3
bbb.nwomin[2] = 1.e7
bbb.nwomin[3] = 1.e7
bbb.nwomin[4] = 1.e7
bbb.nwomin[5] = 1.e7
bbb.nwomin[6] = 1.e7
bbb.nwomin[7] = 1.e7
bbb.nwimin[2] = 1.e7
bbb.nwimin[3] = 1.e7
bbb.nwimin[4] = 1.e7
bbb.nwimin[5] = 1.e7
bbb.nwimin[6] = 1.e7
bbb.nwimin[7] = 1.e7
bbb.restart = 1 #Begin from savefile, not estimated profiles
# Filling newly allocated arrays as desired
bbb.ftol = 1.e-8
bbb.allocate() #allocates storage for arrays
from uedge.hdf5 import *
hdf5_restore("d3dHm_Cog_strahl_Rp95.h5")
# Atomic data switches
com.istabon = 10 #=10 specifics hydrogen data file ehr2.dat
# Scale factor converting (upi-upg)**2 energy to thermal energy
bbb.cfnidh = 0.2
| 7,614 | 32.995536 | 77 | py |
UEDGE | UEDGE-master/pytests/fast_tests/Slab_geometry/test_sgeom.py | import unittest
import os
import numpy as np
import sys,getopt
from multiprocessing import Process
thisfile=os.path.realpath(__file__)
thispath=os.path.dirname(thisfile)
sys.path.insert(0,thispath)
sys.path.insert(0,os.path.dirname(os.path.dirname(thispath))+'/testscripts')
import uetests as uet
ftol = 2.e-9
def prhelp():
print("""
Usage: python test_slab.py [-r|--ref] [-h|--help] or
pytest --forked test_slab.py
-r|--ref to produce reference files for future test runs
""")
#
# Note the numbering of the tests. Purists will say that
# tests should be order independent. Because Uedge variables
# are in shared objects it is naturally stateful. Because of
# this stateful feature the order needs to be controlled.
#
class TestRun(unittest.TestCase):
def setUp(self):
"""
This is run pre-test. Reads in restart data and
re-converges solution.
"""
import uedge as ue
os.chdir(thispath)
def test_reconv(self):
"""
Test that initial re-converged solution fnrm is low.
"""
global ftol
import uedge as ue
uet.startup('rd_slabH_in_w_h5',ftol=ftol)
uet.check_fnorm('Slab Geometry Reconverge','ref_reconv.h5','rd_slabH_in_w_h5',doassert=True)
def test_perturb(self):
"""
Test that initial re-converged solution fnrm is low.
"""
global ftol
import uedge as ue
uet.startup('rd_slabH_in_w_h5',ftol=ftol)
uet.perturb('rd_slabH_in_w_h5',ftol=ftol)
uet.steadystate()
uet.check_nfe('Slab Geometry Perturb','ref_perturb.h5','rd_slabH_in_w_h5',doassert=True)
if __name__ == '__main__':
try:
opts, args = getopt.getopt(sys.argv[1:],'hr',['help,ref'])
except getopt.GetoptError:
prhelp()
sys.exit(2)
for opt,arg in opts:
if opt in ('-h','--help'):
prhelp()
sys.exit(2)
elif opt in ('-r','--ref'):
kargs = {'ftol':ftol}
p1 = Process(target=uet.makeref,args=('ref_reconv.h5','rd_slabH_in_w_h5'),kwargs=kargs)
p1.start()
p2 = Process(target=uet.makeperturb,args=('ref_perturb.h5','rd_slabH_in_w_h5'),kwargs=kargs)
p2.start()
p1.join()
p2.join()
sys.exit(2)
unittest.main()
| 2,382 | 26.079545 | 103 | py |
UEDGE | UEDGE-master/pytests/fast_tests/Slab_geometry/rd_slabH_in_w_h5.py | #
#
###########################################################################
# DESCRIPTION OF PROBLEM (slabH):
# Slab model with 4 hydrogen variables (ni,upi,te,ti) and a (6+2)*(10+2)
# = 8x12 [poloidal*radial] mesh yielding 384 variables. The +2 in the mesh
# size description arises from one guard-cell at each end of the domain used
# to set boundary conditions. This case starts from generic internal initial
# profiles (bbb.restart=0), which is generally the "hard way" to start if a
# similar solution has been saved previous as an HDF5 (or PDB) file; see
# ../tokgeo_H case. Also, this case has the neutrals frozen (bbb.isngon=0)
# and uses a simple internal hydrogen ionization function for any residual
# ionization from the frozen neutrals (small).
# Solver is the time-dependent, Newton-Krylov ODE routine VODPK
# (svrpkg="vodpk") and the preconditioner approximate LU decomposition is
# provided by the ILUT sparse solver (premeth="ilut").
###########################################################################
import uedge
from uedge import *
bbb.mhdgeo=-1
bbb.isnion=1
bbb.isupon=1
bbb.isteon=1
bbb.istion=1
bbb.isngon=0
bbb.svrpkg="nksol"
bbb.premeth="ilut"
bbb.epscon1=1.e-2
bbb.ireorder=0
bbb.ncore=2.e19
bbb.tcoree=100
bbb.tcorei=100
bbb.isfixlb=2
bbb.recycp=.9
bbb.trange=4.e3
bbb.nsteps=1
bbb.n0g=1.e16
bbb.difni=1.
bbb.kye=1.
bbb.flalfe=0.21
bbb.flalfi=0.21
bbb.flalfgx=1.e10
bbb.flalfgy=1.e10
com.nycore=0
com.nysol=10
com.nxleg[0,0]=0
com.nxleg[0,1]=2
com.nxcore[0,0]=0
com.nxcore[0,1]=4
grd.zax=1.
grd.zaxpt=.75
grd.alfyt=-1.e-5
bbb.restart=0
bbb.ftol = 1.e-5
bbb.dtreal = 1.e2
bbb.allocate() #allocates storage for arrays
bbb.restart = 1 #use initial solution in slabH.h5
from uedge.hdf5 import *
hdf5_restore("slabH.h5")
# perform on time-step
bbb.exmain()
#now should have starting profile at dtreal=1.e-9; restart from that soln
bbb.restart = 1
# Increase bbb.dtreal for more time-steps or read bbb.rdinitdt and bbb.rdcontdt
| 1,982 | 26.929577 | 79 | py |
UEDGE | UEDGE-master/pytests/fast_tests/d3d_snull_D+C_nonorthog/plate_d3d_2.py | # Set params for inner (1) and outer (2) plate line-segments
from uedge import * #needed to define variables within this file
grd.nplate1 = 5
grd.gchange("Mmod",0)
grd.rplate1=[\
1.600E+00, 1.27300E+00,
1.15310E+00, 1.01600E+00, 1.01600E+00]
grd.zplate1=[\
2.34100E-01, 2.34100E-01,
2.34100E-01, 3.71200E-01, 1.0000]
grd.nplate2 = 10
grd.gchange("Mmod,0")
grd.rplate2=[\
2.13690E+00, 1.78570E+00, 1.76800E+00, 1.76800E+00,
1.68100E+00, 1.67500E+00, 1.67200E+00, 1.67200E+00,
1.55500E+00, 1.21200E+00]
grd.zplate2=[\
6.28600E-01, 4.25600E-01, 3.89300E-01, 3.46000E-01,
3.46000E-01, 3.43000E-01, 3.37000E-01, 2.34100E-01,
2.34100E-01, 2.34100E-01]
| 712 | 25.407407 | 66 | py |
UEDGE | UEDGE-master/pytests/fast_tests/d3d_snull_D+C_nonorthog/test_d3d_snull_D+C_nonorthog.py | import unittest
import os
import numpy as np
import sys,getopt
from multiprocessing import Process
thisfile=os.path.realpath(__file__)
thispath=os.path.dirname(thisfile)
sys.path.insert(0,thispath)
sys.path.insert(0,os.path.dirname(os.path.dirname(thispath))+'/testscripts')
import uetests as uet
ftol = 1.e-9
name = 'd3d_snull_D+C_nonorthog'
restart = 'rd_d3dHmsCnog_strahl_Rp95'
def prhelp():
print("""
Usage: python test_slab.py [-r|--ref] [-h|--help] or
pytest --forked test_slab.py
-r|--ref to produce reference files for future test runs
""")
#
# Note the numbering of the tests. Purists will say that
# tests should be order independent. Because Uedge variables
# are in shared objects it is naturally stateful. Because of
# this stateful feature the order needs to be controlled.
#
class TestRun(unittest.TestCase):
def setUp(self):
"""
This is run pre-test. Reads in restart data and
re-converges solution.
"""
import uedge as ue
os.chdir(thispath)
def test_reconv(self):
"""
Test that initial re-converged solution fnrm is low.
"""
global ftol
import uedge as ue
uet.startup(restart,ftol=ftol)
uet.check_fnorm(name+' Reconverge','ref_reconv.h5',restart,doassert=True)
def test_perturb(self):
"""
Test that initial re-converged solution fnrm is low.
"""
global ftol
import uedge as ue
uet.startup(restart,ftol=ftol)
uet.perturb(restart,ftol=ftol)
uet.steadystate()
uet.check_nfe(name+' Perturb','ref_perturb.h5',restart,doassert=True)
if __name__ == '__main__':
try:
opts, args = getopt.getopt(sys.argv[1:],'hr',['help,ref'])
except getopt.GetoptError:
prhelp()
sys.exit(2)
for opt,arg in opts:
if opt in ('-h','--help'):
prhelp()
sys.exit(2)
elif opt in ('-r','--ref'):
kargs = {'ftol':ftol}
p1 = Process(target=uet.makeref,args=('ref_reconv.h5',restart),kwargs=kargs)
p1.start()
p2 = Process(target=uet.makeperturb,args=('ref_perturb.h5',restart),kwargs=kargs)
p2.start()
p1.join()
p2.join()
sys.exit(2)
unittest.main()
| 2,359 | 25.516854 | 92 | py |
UEDGE | UEDGE-master/pytests/fast_tests/d3d_snull_D+C_nonorthog/rd_d3dHmsCnog_strahl_Rp95.py | #
#
###########################################################################
# DESCRIPTION OF PROBLEM (d3dHmsCnog) from FACETS test suite:
# DIII-D single-null geometry with 6 hydrogen variables (ni,ng,upi,upg,te,ti)
# and 7 carbon variables (six charge-state densities ni and one ng) on a
# (16+2)*(8+2)=18x10 [poloidal*radial] mesh yielding 2340 variables.
# Solver used is Newton Krylov (svrpkg="nksol") and preconditioner uses an
# iterative solver ILUT for Jacobian LU decomposition. Also includes tilted
# divertor plates wrt flux-surface normal, thus testing the nonorthogonal
# finite-volume difference stencil. Iterates to steady-state solution from
# an initial profile file (HF5).
###########################################################################
# Import uedge into python and make variables active
import uedge
from uedge import *
# Begin uedge parameter input
# Set the geometry
bbb.mhdgeo = 1 #=1 use MHD equilibrium files
##flx.aeqdskfname = "a110465.03500" #EFIT "a" file for flux-surface mesh
##flx.geqdskfname = "g110465.03500" #EFIT "g" or "n" file for flux-sur mesh
flx.psi0min1 = 0.96 #normalized flux on core bndry
flx.psi0min2 = 0.98 #normalized flux on pf bndry
flx.psi0sep = 1.00001 #normalized flux at separatrix
flx.psi0max = 1.07 #normalized flux on outer wall bndry
bbb.ngrid = 1 #number of mesh sequenc. (always set to 1)
bbb.recycm = 0.1
com.nxleg[0,0] = 4 #pol. mesh pts from inner plate to x-point
com.nxcore[0,0] = 4 #pol. mesh pts from x-point to top on inside
com.nxcore[0,1] = 4 #pol. mesh pts from top to x-point on outside
com.nxleg[0,1] = 4 #pol. mesh pts from x-point to outer plate
com.nysol[0] = 4 #rad. mesh pts in SOL
com.nycore[0] = 4 #rad. mesh pts in core
flx.alfcy = 2 #>0 concentrates y-mesh near separatrix
# Mesh construction--non orthogonal mesh
com.ismmon = 3 #controls transition from nonorthog to orthog mesh
com.isnonog = 1 # non orthogonal differencing
grd.istream = 0
grd.iplate = 1
grd.nsmooth = 3
grd.wtmesh1 = 0.75
grd.dmix0 = 1.0
# Define divertor-plate geometry from line-segment file
from plate_d3d_2 import *
# Finite-difference algorithms (upwind, central diff, etc.)
bbb.methn = 33 #ion continuty eqn
bbb.methu = 33 #ion parallel momentum eqn
bbb.methe = 33 #electron energy eqn
bbb.methi = 33 #ion energy eqn
bbb.methg = 66
# Boundary conditions
bbb.ncore[0] = 2.e19 #hydrogen ion density on core
bbb.iflcore = 0 #flag; =0, fixed Te,i; =1, fixed power on core
bbb.tcoree = 401. #core Te
bbb.tcorei = 401. #core Ti
bbb.tedge = 2. #fixed wall,pf Te,i if istewcon=1, etc
bbb.recycp[0] = 0.95 #hydrogen recycling coeff at plates
# Transport coefficients (m**2/s)
bbb.difni[0] = 1. #D for radial hydrogen diffusion
bbb.kye = 1. #chi_e for radial elec energy diffusion
bbb.kyi = 1. #chi_i for radial ion energy diffusion
bbb.travis[0] = 1. #eta_a for radial ion momentum diffusion
# Flux limits
bbb.flalfe = 0.21 #electron parallel thermal conduct. coeff
bbb.flalfi = 0.21 #ion parallel thermal conduct. coeff
bbb.flalfv = 0.5 #ion parallel viscosity coeff
bbb.flalfgx = 1. #neut. gas part. flux in poloidal direction
bbb.flalfgy = 1. #neut. gas part. flux in radial direction
bbb.flalfgxy = 1. #neut. gas part. flux in mixed derivatives
bbb.flalftgx = 1. #neut. gas thermal flux, poloidal direction
bbb.flalftgy = 1. #neut. gas thermal flux, radial direction
bbb.lgmax = 0.1 #max scale length for flalfgx,y
bbb.lgtmax = 0.1 #max scale length for flalftgx,y
bbb.lgvmax = 0.1 #max scale length for flalfvgx,y
# Solver package
bbb.svrpkg = "nksol" #Newton solver using Krylov method
bbb.premeth = "ilut" #Solution method for precond. Jacobian matrix
# Parallel neutral momentum equation
bbb.isupgon[0] = 1
bbb.ineudif = 2 #=2 for evolving pg=ng*tg variable
bbb.isngon[0] = 0
com.ngsp = 1
com.nhsp = 2
bbb.ziin[1] = 0
## Impurity gas basics
com.ngsp = 2 #total number of gas species
bbb.isngon[1] = 1 #turns on impurity gas
bbb.ngbackg[1] = 1.e9 #neutral impurity background for added source
bbb.ingb = 2 #exponent for strength of ngbackg turn-on
bbb.istgcon[1] = 1 #=1 for constant tg(2) at tgas(2)
bbb.tgas[1] = 1. #value for tg when istgcon=1
bbb.rcxighg = 0. # best value; ratio of imp cx to hyd cx
bbb.kelighi[1] = 5.e-16 #elastic sig_v for imp_gas/h_ion
bbb.kelighg[1] = 5.e-16 #elastic sig_v for imp_gas/h_gas
bbb.n0g[1] = 1.e16 #imp. gas density normalization
# Impurity gas boundary conditions
bbb.recycp[1] = 0.01 #plate recycling of impurities
bbb.recycw[1] = 1e-4 #wall recycling; matwsi,o set above for hyd
bbb.isch_sput[1]=7 # Haasz/Davis chemical sputtering model
bbb.isph_sput[1]=3 # physical sputtering model
bbb.t_wall = 300.
bbb.t_plat = 500.
## Impurity ions
bbb.isimpon = 6 #Use force-balance only
com.nzsp[0] = 6 #number chrg states impurity isotope #1
bbb.csfaclb[2,0] = 2.191
bbb.csfaclb[3,0] = 2.191
bbb.csfaclb[4,0] = 2.191
bbb.csfaclb[5,0] = 2.191
bbb.csfaclb[6,0] = 2.191
bbb.csfaclb[7,0] = 2.191
bbb.csfacrb[2,0] = 2.191
bbb.csfacrb[3,0] = 2.191
bbb.csfacrb[4,0] = 2.191
bbb.csfacrb[5,0] = 2.191
bbb.csfacrb[6,0] = 2.191
bbb.csfacrb[7,0] = 2.191
bbb.csfaclb[2,1] = 2.191
bbb.csfaclb[3,1] = 2.191
bbb.csfaclb[4,1] = 2.191
bbb.csfaclb[5,1] = 2.191
bbb.csfaclb[6,1] = 2.191
bbb.csfaclb[7,1] = 2.191
bbb.csfacrb[2,1] = 2.191
bbb.csfacrb[3,1] = 2.191
bbb.csfacrb[4,1] = 2.191
bbb.csfacrb[5,1] = 2.191
bbb.csfacrb[6,1] = 2.191
bbb.csfacrb[7,1] = 2.191
bbb.minu[2] = 12.
bbb.minu[3] = 12.
bbb.minu[4] = 12.
bbb.minu[5] = 12.
bbb.minu[6] = 12.
bbb.minu[7] = 12.
bbb.ziin[2] = 1
bbb.ziin[3] = 2
bbb.ziin[4] = 3
bbb.ziin[5] = 4
bbb.ziin[6] = 5
bbb.ziin[7] = 6
bbb.znuclin[0] = 1
bbb.znuclin[1] = 1
bbb.znuclin[2] = 6
bbb.znuclin[3] = 6
bbb.znuclin[4] = 6
bbb.znuclin[5] = 6
bbb.znuclin[6] = 6
bbb.znuclin[7] = 6
bbb.n0[2] = 1.e17
bbb.n0[3] = 1.e17
bbb.n0[4] = 1.e17
bbb.n0[5] = 1.e17
bbb.n0[6] = 1.e17
bbb.n0[7] = 1.e17
bbb.nzbackg = 1.e9 #background density for impurities
bbb.inzb = 2 #exponent for switching on nzbackg
bbb.ismctab = 2 # use Braams" rate tables
com.mcfilename[0] = "C_rates.strahl" # Imp rate file name
bbb.isnicore[7] = 3
bbb.curcore[7] = 0.
bbb.isnwcono[2] = 3
bbb.isnwcono[3] = 3
bbb.isnwcono[4] = 3
bbb.isnwcono[5] = 3
bbb.isnwcono[6] = 3
bbb.isnwcono[7] = 3
bbb.isnwconi[2] = 3
bbb.isnwconi[3] = 3
bbb.isnwconi[4] = 3
bbb.isnwconi[5] = 3
bbb.isnwconi[6] = 3
bbb.isnwconi[7] = 3
bbb.nwomin[2] = 1.e7
bbb.nwomin[3] = 1.e7
bbb.nwomin[4] = 1.e7
bbb.nwomin[5] = 1.e7
bbb.nwomin[6] = 1.e7
bbb.nwomin[7] = 1.e7
bbb.nwimin[2] = 1.e7
bbb.nwimin[3] = 1.e7
bbb.nwimin[4] = 1.e7
bbb.nwimin[5] = 1.e7
bbb.nwimin[6] = 1.e7
bbb.nwimin[7] = 1.e7
bbb.restart = 1 #Begin from savefile, not estimated profiles
# Filling newly allocated arrays as desired
bbb.ftol = 1.e-8
bbb.allocate() #allocates storage for arrays
from uedge.hdf5 import *
hdf5_restore("d3dHm_Cnog_strahl_Rp95.h5")
# Atomic data switches
com.istabon = 10 #=10 specifics hydrogen data file ehr2.dat
# Scale factor converting (upi-upg)**2 energy to thermal energy
bbb.cfnidh = 0.2
| 7,525 | 33.054299 | 77 | py |
UEDGE | UEDGE-master/jupyter/case_setup.py | #Coarse mesh [com.nx=8, com.ny=4] for DIII-D MHD equilibrium
#Uses diffusive neutrals, so five variables [bbb.ni,bbb.upi,Te,Ti,bbb.ng]
#
import sys,os
from uedge import *
# Set the com.geometry
bbb.mhdgeo = 1 #use MHD equilibrium
#com.rm -flx.f aeqdsk neqdsk #change names of MHD eqil. files
flx.psi0min1 = 0.98 #normalized flux on core bndry
flx.psi0min2 = 0.98 #normalized flux on pf bndry
flx.psi0sep = 1.00001 #normalized flux at separatrix
flx.psi0max = 1.07 #normalized flux on outer wall bndry
bbb.ngrid = 1 #number of meshes [always set to 1]
com.nxleg[0,0] = 4 #pol. mesh pts from inner plate to flx.x-point
com.nxcore[0,0] = 4 #pol. mesh pts from flx.x-point to top on inside
com.nxcore[0,1] = 4 #pol. mesh pts from top to flx.x-point on outside
com.nxleg[0,1] = 4 #pol. mesh pts from flx.x-point to outer plate
com.nysol[0] = 6 #rad. mesh pts in SOL
com.nycore[0] =2 #rad. mesh pts in core
# Finite-difference algorithms [upwind, central diff, etc.]
bbb.methn = 33 #ion continuty eqn
bbb.methu = 33 #ion parallel momentum eqn
bbb.methe = 33 #electron energy eqn
bbb.methi = 33 #ion energy eqn
bbb.methg = 33 #neutral gas continuity eqn
# Boundary conditions
bbb.ncore[0] = 2.e19 #hydrogen ion density on core
bbb.iflcore = 0 #flag =0, fixed Te,i =1, fixed power on core
bbb.tcoree = 100. #core Te
bbb.tcorei = 100. #core Ti
bbb.tedge = 2. #fixed wall,pf Te,i if istewcon=1, etc
bbb.istepfc = 3 #=3 sets scale length bbb.lyte
bbb.lyte = 0.03 #radial scale-length for Te on PF boundary
bbb.recycp[0] = 0.98 #hydrogen recycling grd.coeff at plates
bbb.recycw[0] = 0.9 #wall recycling if bbb.matwso,i=1
bbb.matwso[0] = 1 #recycle on main-chamber wall
bbb.isnwcono = 1 #if=1, set bbb.ni[,,com.ny+1]=bbb.nwallo
bbb.isnwconi = 1 #if=1, set PF wall bbb.ni=bbb.nwalli
bbb.allocate() #bbb.allocate() space of bbb.nwallo,i
bbb.nwallo = 1.e18
bbb.nwalli = 1.e18
# Transport coefficients
bbb.difni[0] = 1. #D for radial hydrogen diffusion
bbb.kye = 1. #chi_e for radial elec energy diffusion
bbb.kyi = 1. #chi_i for radial ion energy diffusion
bbb.travis[0] = 1. #eta_a for radial ion momentum diffusion
# Flux limits
bbb.flalfe = 0.21 #electron parallel thermal conduct. grd.coeff
bbb.flalfi = 0.21 #ion parallel thermal conduct. grd.coeff
bbb.flalfv = 1. #ion parallel viscosity grd.coeff
bbb.flalfgx = 1. #neut. density poloidal diffusion
bbb.flalfgy = 1. #neut. density radial diffusion
bbb.flalfvgx = 1. #neut. momentum poloidal viscosity
bbb.flalfvgy = 1. #neut. momentum radial viscosity
bbb.flalftgx = 1. #neut. particle poloidal heat diffusion
bbb.flalftgy = 1. #neut. particle radial heat diffusion
# Solver package
bbb.svrpkg = "nksol" #Newton solver using Krylov method
bbb.premeth = "ilut" #Solution method for precond. Jacobian matrix
bbb.mfnksol = 3
# Parallel neutral momentum equation
bbb.ineudif = 2
bbb.isupgon[0]=1
if bbb.isupgon[0] == 1:
bbb.isngon[0]=0
com.ngsp=1
com.nhsp=2
bbb.ziin[com.nhsp-1]=0
bbb.travis[1] = 0. #shouldn't be used for neutrals - to be sure
# The following are probably default, set them anyway to be sure
bbb.cngmom=0
bbb.cmwall=0
bbb.cngtgx=0
bbb.cngtgy=0
bbb.kxn=0
bbb.kyn=0
## bbb.ingb = 0
# Currents and potenial parameters
bbb.isphion = 1
bbb.b0 = 1. # =1 for normal direction of B-field
bbb.rsigpl=1.e-8 #anomalous cross-field conductivity
bbb.cfjhf=1. #turn-on heat flow from current [bbb.fqp]
bbb.cfjve = 1. #makes bbb.vex = vix - bbb.cfjve*bbb.fqx
bbb.jhswitch=1 #Joule Heating switch
bbb.cfjpy = 0. #diamag. cur. in flx.y-direction
bbb.cfjp2 = 0. #diamag. cur. in 2-direction
bbb.newbcl=1
bbb.newbcr=1 #Sheath BC [bee,i] from current equation
bbb.isfdiax =1. #Factor to turn on diamag. contrib. to sheath
bbb.cfyef = 1.0 #ExB drift in flx.y-dir.
bbb.cf2ef = 1.0 #ExB drift in 2-dir.
bbb.cfydd = 0. #Diamag. drift in flx.y-dir. [always=0]
bbb.cf2dd = 0. #Diamag. drift in 2-dir. [always=0]
bbb.cfrd = 0. #Resistive drift in flx.y and 2 dirs.
bbb.cfbgt = 0. #Diamag. energy drift [always=0]
bbb.cfybf = 1. #turns on bbb.vycb - radial grad_B drift
bbb.cf2bf = 1. #turns on bbb.v2cb - perp grad_B drift [nearly pol]
bbb.cfqybf = 1. #turns on bbb.vycb contrib to radial current
bbb.cfq2bf = 1. #turns on bbb.v2cb contrib to perp["2"] current
bbb.isnewpot = 1
bbb.rnewpot = 1.
bbb.iphibcc = 3 #set bbb.phi[,1] uniform poloidally
# Restart from bbb.a pfb savefile
bbb.restart = 1 #Begin from savefile, not estimated profiles
bbb.allocate() #bbb.allocate() space for savevariables
| 4,590 | 38.239316 | 79 | py |
StyleFusion | StyleFusion-master/src/main.py | from shared import *
from tf_lib import *
from dataset import *
from model import *
#from dialog_gui import *
from classifier import load_classifier
"""
AUTHOR: Xiang Gao (xiag@microsoft.com) at Microsoft Research
"""
def run_master(mode, args):
if mode not in ['train','continue'] and args.restore != '':
aa = args.restore.split('/')
bb = []
for a in aa:
if len(a) > 0:
bb.append(a)
fld = '/'.join(bb[:-1])
if mode in ['vali','vis']:
vocab_only = False
fld_data, _, _ = get_model_fld(args)
path_bias_vocab = fld_data + '/vocab_bias.txt'
else:
vocab_only = True
fld_data = fld
path_bias_vocab = fld + '/vocab_bias.txt'
else:
vocab_only = False
fld_data, fld_model, subfld = get_model_fld(args)
fld = fld_model + '/' + subfld
path_bias_vocab = fld_data + '/vocab_bias.txt'
if os.path.exists(path_bias_vocab):
allowed_words = [line.strip('\n').strip('\r') for line in open(path_bias_vocab, encoding='utf-8')]
else:
allowed_words = None
model_class = args.model_class.lower()
if model_class.startswith('fuse'):
Master = StyleFusion
elif model_class == 'mtask':
Master = VanillaMTask
elif model_class == 's2s':
Master = Seq2Seq
elif model_class == 'lm':
Master = LanguageModel
elif model_class == 's2s+lm':
pass
else:
raise ValueError
if model_class == 's2s+lm':
master = Seq2SeqLM(args, allowed_words)
else:
dataset = Dataset(fld_data,
max_ctxt_len=args.max_ctxt_len, max_resp_len=args.max_resp_len,
vocab_only=vocab_only, noisy_vocab=args.noisy_vocab)
master = Master(dataset, fld, args, new=(mode=='train'), allowed_words=allowed_words)
if mode != 'train':
if args.restore.endswith('.npz') or model_class == 's2s+lm':
restore_path = args.restore
else:
restore_path = master.fld + '/models/%s.npz'%args.restore
master.load_weights(restore_path)
if mode in ['vis', 'load']:
return master
if args.clf_name.lower() == 'holmes':
CLF_NAMES = ['classifier/Reddit_vs_Holmes/neural', 'classifier/Reddit_vs_Holmes/ngram']
elif args.clf_name.lower() == 'arxiv':
CLF_NAMES = ['classifier/Reddit_vs_arxiv/neural', 'classifier/Reddit_vs_arxiv/ngram']
else:
CLF_NAMES = [args.clf_name]
print('loading classifiers '+str(CLF_NAMES))
master.clf_names = CLF_NAMES
master.classifiers = []
for clf_name in CLF_NAMES:
master.classifiers.append(load_classifier(clf_name))
print('\n'+fld+'\n')
if mode in ['continue', 'train']:
ss = ['', mode + ' @ %i'%time.time()]
for k in sorted(args.__dict__.keys()):
ss.append('%s = %s'%(k, args.__dict__[k]))
with open(master.fld + '/args.txt', 'a') as f:
f.write('\n'.join(ss)+'\n')
if args.debug:
batch_per_load = 1
else:
if PHILLY:
n_sample = 1280 # philly unstable for large memory
else:
n_sample = 2560
batch_per_load = int(n_sample/BATCH_SIZE)
if mode == 'continue':
master.vali()
master.train(batch_per_load)
elif 'summary' == mode:
print(master.model.summary())
elif mode in ['cmd', 'test', 'vali']:
classifiers = []
for clf_name in CLF_NAMES:
classifiers.append(load_classifier(clf_name))
if 'vali' == mode:
data = master.get_vali_data()
s_decoded = eval_decoded(master, data,
classifiers=classifiers, corr_by_tgt=True, r_rand=args.r_rand,
calc_retrieval=('holmes' in args.data_name.lower())
)[0]
s_surrogate = eval_surrogate(master, data)[0]
print(restore_path)
print()
print(s_decoded)
print()
print(s_surrogate)
return
"""
if model_class != 's2s+lm':
with tf.variable_scope('base_rankder', reuse=tf.AUTO_REUSE):
fld_base_ranker = 'restore/%s/%s/pretrained/'%(args.model_class.replace('fuse1','fuse'), args.data_name)
dataset_base_ranker = Dataset(fld_base_ranker,
max_ctxt_len=args.max_ctxt_len, max_resp_len=args.max_resp_len,
vocab_only=True, noisy_vocab=False)
base_ranker = Master(dataset_base_ranker, fld_base_ranker, args, new=False, allowed_words=master.allowed_words)
path = fld_base_ranker + '/' + open(fld_base_ranker+'/base_ranker.txt').readline().strip('\n')
base_ranker.load_weights(path)
print('*'*10 + ' base_ranker loaded from: '+path)
else:
"""
base_ranker = None
def print_results(results):
ss = ['total', 'logP', 'logP_c', 'logP_b', 'rep', 'len',] + ['clf%i'%i for i in range(len(CLF_NAMES))]
print('; '.join([' '*(6-len(s))+s for s in ss]))
for score, resp, terms in results:
print('%6.3f; '%score + '; '.join(['%6.3f'%x for x in terms]) + '; ' + resp)
if 'cmd' == mode:
while True:
print('\n---- please input ----')
inp = input()
infer_args = parse_infer_args()
if inp == '':
break
results = infer_rank(inp, master, infer_args, base_ranker=base_ranker)
print_results(results)
elif 'test' == mode:
infer_args = parse_infer_args()
path_out = args.path_test+'.hyp'
open(path_out, 'w', encoding='utf-8')
for line in open(args.path_test, encoding='utf-8'):
line = line.strip('\n')
inp = line.split('\t')[0]
results = infer_rank(inp, master, infer_args, base_ranker=base_ranker)
lines = []
for _, hyp, _ in results[:min(10, len(results))]:
lines.append(line + '\t' + hyp.replace(' _EOS_','').strip())
with open(path_out, 'a', encoding='utf-8') as f:
f.write('\n'.join(lines) + '\n')
"""
path_in = DATA_PATH + '/test/' + args.test_fname
if not PHILLY:
fld_out = master.fld + '/eval2/'
else:
fld_out = OUT_PATH
makedirs(fld_out)
npz_name = args.restore.split('/')[-1].replace('.npz','')
path_out = fld_out + '/' + args.test_fname + '_' + npz_name
test_master(master, path_in, path_out, max_n_src=args.test_n_max, base_ranker=base_ranker, baseline=args.baseline, r_rand=args.r_rand)
"""
else:
raise ValueError
def get_model_fld(args):
data_name = args.data_name
if PHILLY:
data_name = data_name.replace('+','').replace('_','')
fld_data = DATA_PATH +'/' + data_name
master_config = 'width%s_depth%s'%(
(args.token_embed_dim, args.rnn_units),
(args.encoder_depth, args.decoder_depth))
if args.max_ctxt_len != 90 or args.max_resp_len != 30:
master_config += '_len' + str((args.max_ctxt_len, args.max_resp_len))
master_config = master_config.replace("'",'')
fld_model = OUT_PATH
if args.debug:
fld_model += '/debug'
fld_model += '/' + args.data_name.replace('../','') + '_' + master_config
subfld = []
"""
if args.randmix:
s_mix = 'randmix'
if args.ratio05 > 0:
s_mix += '(0.5=%.2f)'%args.ratio05
else:
"""
s_mix = 'mix'
model_class = args.model_class.lower()
if model_class == 's2s':
subfld = ['s2s_%s(%.2f)'%(s_mix, args.conv_mix_ratio)] # no conv data
else:
subfld = ['%s_%s(%.2f,%.2f)'%(model_class, s_mix, args.conv_mix_ratio, args.nonc_mix_ratio)]
if args.noisy_vocab > 0:
subfld.append('unk%.1fk'%(args.noisy_vocab/1000))
if model_class.startswith('fuse'):
subfld.append('std%.1f'%args.stddev)
if args.reld:
subfld.append('reld')
subfld.append('lr'+str(args.lr))
if len(args.fld_suffix) > 0:
subfld.append(args.fld_suffix)
subfld = '_'.join(subfld)
return fld_data, fld_model.replace(' ',''), subfld.replace(' ','')
if __name__ == '__main__':
parser.add_argument('mode')
parser.add_argument('--skip', type=float, default=0.0)
parser.add_argument('--test_fname', default='')
parser.add_argument('--r_rand', '-r', type=float, default=-1)
parser.add_argument('--test_n_max', '-n', type=int, default=2000)
args = parser.parse_args()
run_master(args.mode, args)
| 7,523 | 28.73913 | 137 | py |
StyleFusion | StyleFusion-master/src/decode.py | from shared import *
from nltk.translate.bleu_score import SmoothingFunction
"""
AUTHOR:
Sean Xiang Gao (xiag@microsoft.com) at Microsoft Research
"""
class Decoder:
def __init__(self, dataset, model, decoder_depth, latent_dim, allowed_words=None):
self.dataset = dataset
self.model = model
self.decoder_depth = decoder_depth
self.latent_dim = latent_dim
if allowed_words is None:
self.mask = np.array([1.] * (self.dataset.num_tokens + 1))
else:
self.mask = np.array([0.] * (self.dataset.num_tokens + 1))
for word in allowed_words:
ix = self._ix(word)
if ix is not None:
self.mask[ix] = 1.
print('allowed words %i/%i'%(sum(self.mask), len(self.mask)))
default_forbid = [UNK_token, '(', '__url__', ')', EQN_token, CITE_token, IX_token] #+ ['queer', 'holmes', 'sherlock', 'john', 'watson', 'bannister']
for word in default_forbid:
ix = self._ix(word)
if ix is not None:
self.mask[ix] = 0. # in either case, UNK is not allowed
def _ix(self, token):
return self.dataset.token2index.get(token, None)
def predict(self, latents, sampling=False, softmax_temperature=1, lm_wt=None):
# autoregressive in parallel, greedy or softmax sampling
latents = np.reshape(latents, (-1, self.latent_dim)) # (n, dim)
n = latents.shape[0]
n_vocab = len(self.mask)
prev = np.zeros((n, 1)) + self._ix(SOS_token)
states = [latents] * self.decoder_depth # list of state, each is [n, dim]
mask = np.repeat(np.reshape(self.mask, (1, -1)), n, axis=0) # (n, vocab)
logP = [0.] * n
stop = [False] * n
hyp = []
for _ in range(n):
hyp.append([])
def sample_token_index_softmax(prob):
if softmax_temperature != 1:
prob = np.exp(np.log(prob) * softmax_temperature)
return np.random.choice(n_vocab, 1, p=prob/sum(prob))[0]
def sample_token_index_greedy(prob):
return np.argmax(prob)
if sampling:
sample_token_index = sample_token_index_softmax
else:
sample_token_index = sample_token_index_greedy
for _ in range(self.dataset.max_resp_len):
out = self.model.predict([prev] + states)
states = out[1:]
tokens_proba = np.squeeze(out[0]) * mask # squeeze: (n, 1, vocab) => (n, vocab)
prev = [0] * n
for i in range(n):
if stop[i]:
continue
prob = tokens_proba[i,:].ravel()
ix = sample_token_index(prob)
logP[i] += np.log(prob[ix])
hyp[i].append(ix)
prev[i] = ix
if ix == self._ix(EOS_token):
stop[i] = True
prev = np.reshape(prev, (n, 1))
return [logP[i]/len(hyp[i]) for i in range(n)], hyp
def evaluate(self, latents, tgt_seqs):
# teacher-forcing in parallel
latents = np.reshape(latents, (-1, self.latent_dim)) # (n, dim)
n = latents.shape[0]
states = [latents] * self.decoder_depth # list of state, each is [n, dim]
logP = [0.] * n
prev = np.zeros((n, 1)) + self._ix(SOS_token)
lens = [len(seq) for seq in tgt_seqs]
epsilon = 1e-6
for t in range(self.dataset.max_resp_len):
out = self.model.predict([prev] + states)
states = out[1:]
tokens_proba = np.reshape(out[0], (n, -1)) # squeeze: (n, 1, vocab) => (n, vocab)
prev = [0] * n
for i in range(n):
if t < lens[i]:
ix = tgt_seqs[i][t]
logP[i] += np.log(max(epsilon, tokens_proba[i, ix]))
prev[i] = ix
prev = np.reshape(prev, (n, 1))
return [logP[i]/lens[i] for i in range(n)]
#return [logP[i]/self.dataset.max_resp_len for i in range(n)]
def predict_beam(self, latents, beam_width=10, n_child=3, max_n_hyp=100):
# multi-head beam search, not yet parallel
prev = np.atleast_2d([self._ix(SOS_token)])
beam = []
for latent in latents:
latent = np.atleast_2d(latent)
states = [latent] * self.decoder_depth
node = {'states':states[:], 'prev':prev, 'logP':0, 'hyp':[]}
beam.append(node)
print('beam search initial n = %i'%len(beam))
results = queue.PriorityQueue()
t = 0
while True:
t += 1
if t > 20:#self.dataset.max_tgt_len:
break
if len(beam) == 0:
break
pq = queue.PriorityQueue()
for node in beam:
out = self.model.predict([node['prev']] + node['states'])
tokens_proba = out[0].ravel()
states = out[1:]
tokens_proba = tokens_proba * self.mask
tokens_proba = tokens_proba/sum(tokens_proba)
top_tokens = np.argsort(-tokens_proba)
for ix in top_tokens[:n_child]:
logP = node['logP'] + np.log(tokens_proba[ix])
hyp = node['hyp'][:] + [ix]
if ix == self._ix(EOS_token):
results.put((logP/t, hyp))
if results.qsize() > max_n_hyp:
results.get() # pop the hyp of lowest logP/t
continue
pq.put((
logP, # no need to normalize to logP/t as every node is at the same t
np.random.random(), # to avoid the case logP is the same
{
'states':states,
'prev':np.atleast_2d([ix]),
'logP':logP,
'hyp':hyp,
}
))
if pq.qsize() > beam_width:
pq.get() # pop the node of lowest logP to maintain at most beam_width nodes => but this will encourage bland response
beam = []
while not pq.empty():
_, _, node = pq.get()
beam.append(node)
logPs = []
hyps = []
while not results.empty():
logP, hyp = results.get()
logPs.append(logP)
hyps.append(hyp)
return logPs, hyps
def rank_nbest(hyps, logP, logP_center, master, inp, infer_args=dict(), base_ranker=None):
# make sure hyps are list of str, and inp is str
# as base_ranker, master, and clf may not share the same vocab
assert(isinstance(hyps, list))
assert(isinstance(hyps[0], str))
assert(isinstance(inp, str))
hyps_no_ie = []
for hyp in hyps[:]:
hyps_no_ie.append((' '+hyp+' ').replace(' i . e . ,',' ').replace(' i . e. ',' ').strip())
hyps = hyps_no_ie[:]
wt_clf = infer_args.get('wt_clf', 0) / len(master.classifiers)
wt_rep = infer_args.get('wt_rep', 0)
wt_len = infer_args.get('wt_len', 0)
wt_center = infer_args.get('wt_center', 0)
wt_base = infer_args.get('wt_base', 0)
n = len(logP)
clf_score = []
max_tgt_len = 30
for clf in master.classifiers:
clf_score.append(clf.predict(hyps).ravel())
if base_ranker is not None:
hyp_seqs_base = [base_ranker.dataset.txt2seq(hyp) for hyp in hyps]
inp_seq_base = base_ranker.dataset.txt2seq(inp)
latent_base = base_ranker.model_encoder['S2S'].predict(np.atleast_2d(inp_seq_base))
logP_base = base_ranker.decoder.evaluate([latent_base]*n, hyp_seqs_base)
else:
logP_base = [0] * n
pq = queue.PriorityQueue()
for i in range(n):
hyp = hyps[i]
rep = repetition_penalty(hyp)
l = min(max_tgt_len, len(hyp.split()))/max_tgt_len
score = logP[i] + wt_center * logP_center[i] + wt_rep * rep + wt_len * l + wt_base * logP_base[i]
clf_score_ = []
for k in range(len(master.classifiers)):
s = clf_score[k][i]
score += wt_clf * s
clf_score_.append(s)
pq.put((-score, hyp, (logP[i], logP_center[i], logP_base[i], rep, l) + tuple(clf_score_)))
results = []
while not pq.empty():
neg_score, hyp, terms = pq.get()
#if len(set(['queer', 'holmes', 'sherlock', 'john', 'watson', 'bannister']) & set(hyp.split())) > 0:
# continue
hyp = (' ' + hyp + ' ').replace(' to day ',' today ').replace(' to morrow ',' tomorrow ')#.replace('mr barker','')
results.append((-neg_score, hyp, terms))
return results
def repetition_penalty(hyp):
# simplified from https://sunlamp.visualstudio.com/sunlamp/_git/sunlamp?path=%2Fsunlamp%2Fpython%2Fdynamic_decoder_custom.py&version=GBmaster
# ratio of unique 1-gram
ww = hyp.split()
return np.log(min(1.0, len(set(ww)) / len(ww)))
def infer(latent, master, method='greedy', beam_width=10, n_rand=20, r_rand=1.5, softmax_temperature=1, lm_wt=0.5):
if method == 'greedy':
return master.decoder.predict(latent, lm_wt=lm_wt)
elif method == 'softmax':
return master.decoder.predict([latent] * n_rand, sampling=True, lm_wt=lm_wt)
elif method == 'beam':
return master.decoder.predict_beam([latent], beam_width=beam_width)
elif method.startswith('latent'):
latents = []
if r_rand >= 0:
rr = [r_rand] * n_rand
else:
rr = np.linspace(0, 5, n_rand)
for r in rr:
latents.append(rand_latent(latent, r, limit=True))
if 'beam' in method:
return master.decoder.predict_beam(latents, beam_width=beam_width)
else:
return master.decoder.predict(latents, sampling=('softmax' in method), softmax_temperature=softmax_temperature, lm_wt=lm_wt)
else:
raise ValueError
def infer_comb(inp, master):
inp_seq = master.dataset.txt2seq(inp)
latent = master.model_encoder['S2S'].predict(np.atleast_2d(inp_seq))
reset_rand()
logP, hyp_seqs = infer(latent, master, method='latent', n_rand=10, r_rand=-1)
logP, hyp_seqs = remove_duplicate_unfished(logP, hyp_seqs, master.dataset.token2index[EOS_token])
results = sorted(zip(logP, hyp_seqs), reverse=True)
s = '-'*10 + '\n' + inp + '\n'
for i, (logP, seq) in enumerate(results):
hyp = master.dataset.seq2txt(seq)
s += '%.3f'%logP + '\t' + hyp + '\n'
if i == 4:
break
s += '-'*5 + '\n'
return s
def remove_duplicate_unfished(logP, hyp_seqs, ix_EOS):
d = dict()
for i in range(len(logP)):
k = tuple(hyp_seqs[i])
if k[-1] != ix_EOS:
continue
if k not in d or logP[i] > d[k]:
d[k] = logP[i]
logP0, hyp0 = logP[0], hyp_seqs[0][:]
logP = []
hyp_seqs = []
for k in d:
logP.append(d[k])
hyp_seqs.append(list(k))
if len(logP) == 0:
return [logP0], [hyp0]
else:
return logP, hyp_seqs
def parse_infer_args():
arg = {'prefix':'S2S'}
for line in open('src/infer_args.csv'):
if line.startswith('#'):
continue
if ',' not in line:
continue
k, v = line.strip('\n').split(',')
if k != 'method':
if k in ['beam_width', 'n_rand']:
v = int(v)
else:
v = float(v)
arg[k] = v
return arg
def infer_rank(inp, master, infer_args, base_ranker=None, unique=True, verbose=True):
if verbose:
print('infer_args = '+str(infer_args))
inp_seq = master.dataset.txt2seq(inp)
latent = master.model_encoder['S2S'].predict(np.atleast_2d(inp_seq))
reset_rand()
if verbose:
print('infering...')
t0 = datetime.datetime.now()
logP, hyp_seqs = infer(latent, master, method=infer_args['method'],
beam_width=infer_args.get('beam_width'), n_rand=infer_args.get('n_rand'), r_rand=infer_args.get('r_rand'),
softmax_temperature=infer_args.get('softmax_temperature'), lm_wt=infer_args.get('lm_wt'))
t1 = datetime.datetime.now()
if verbose:
print('*'*10 + ' infer spent: '+str(t1-t0))
n_raw = len(logP)
logP, hyp_seqs = remove_duplicate_unfished(logP, hyp_seqs, master.dataset.token2index[EOS_token])
if verbose:
print('kept %i/%i after remove deuplication/unfisihed'%(len(logP), n_raw))
hyps = [master.dataset.seq2txt(seq) for seq in hyp_seqs]
if len(hyps) == 0:
return []
n_results = len(logP)
if infer_args['method'] == 'latent' and infer_args['r_rand'] > 0:
if verbose:
print('calculating tf_logP...')
logP_center = master.decoder.evaluate([latent]*n_results, hyp_seqs)
else:
logP_center = logP
t2 = datetime.datetime.now()
if verbose:
print('*'*10 + ' logP_center spent: '+str(t2-t1))
wts_classifier = []
for clf_name in master.clf_names:
wts_classifier.append(infer_args.get(clf_name, 0))
if verbose:
print('ranking...')
results = rank_nbest(hyps, logP, logP_center, master, inp, infer_args, base_ranker)
t3 = datetime.datetime.now()
if verbose:
print('*'*10 + ' ranking spent: '+str(t3-t2))
return results
| 11,359 | 29.455764 | 150 | py |
StyleFusion | StyleFusion-master/src/vis.py | from shared import *
from tf_lib import *
from main import run_master, get_model_fld
from scipy.optimize import fmin_powell as fmin
from mpl_toolkits.mplot3d import Axes3D
from sklearn import manifold
import scipy
"""
AUTHOR:
Sean Xiang Gao (xiag@microsoft.com) at Microsoft Research
"""
def dist_mat(coord):
n = coord.shape[0]
dist_T2T = np.zeros((n, n))
for i in range(n):
for j in range(i + 1, n):
d = euc_dist(coord[i, :], coord[j, :])
dist_T2T[i, j] = d
dist_T2T[j, i] = d
return dist_T2T
def interp(master, model_name, fld_save, type_='resp'):
n = 1000
print('building data...')
_, d_inp_enc, d_inp_dec, d_out_dec, _ = master.dataset.feed_data('test', max_n=n, check_src=True)
if type_ == 'resp':
vec_u0 = master.model_encoder['S2S'].predict(d_inp_enc['ctxt'])
vec_u1 = master.model_encoder['AE'].predict(d_inp_enc['resp'])
elif type_ == 'stry':
vec_u0 = master.model_encoder['AE'].predict(d_inp_enc['resp'])
vec_u1 = master.model_encoder['AE'].predict(d_inp_enc['stry'])
else:
raise ValueError
print('evaluating...')
uu = np.linspace(0, 1, 11)
NLL = []
for u in uu:
latent = vec_u0 + u * np.ones(vec_u0.shape) * (vec_u1 - vec_u0)
NLL_resp = master.model_decoder_tf.evaluate(
[latent, d_inp_dec['resp']],
d_out_dec['resp'],
verbose=0)
if type_ == 'resp':
NLL_ = NLL_resp
else:
NLL_stry = master.model_decoder_tf.evaluate(
[latent, d_inp_dec['stry']],
d_out_dec['stry'],
verbose=0)
NLL_ = NLL_resp * (1. - u) + u * NLL_stry
print('u = %.3f, NLL = %.3f'%(u, NLL_))
NLL.append(NLL_)
fig = plt.figure(figsize=(6,3))
ax = fig.add_subplot(111)
ax.plot(uu, NLL,'k.-')
print(uu)
print(NLL)
ax.plot(0, NLL[0], 'ro')
ax.plot(1, NLL[-1], 'bo')
ax.text(0, NLL[0] + 0.5, ' '+r'$S$', color='r')
ax.text(1, NLL[-1], ' '+r'$T$', color='b')
plt.xlabel(r'$u$')
plt.ylabel('NLL')
plt.title(model_name+'\nNLL of interpolation: '+r'$S+u(T-S)$')
plt.subplots_adjust(top=0.8)
plt.subplots_adjust(bottom=0.2)
plt.savefig(fld_save+'/interp_%s.png'%type_)
with open(fld_save+'/interp_%s.tsv'%type_,'w') as f:
f.write('\t'.join(['u'] + ['%.3f'%u for u in uu])+'\n')
f.write('\t'.join(['NLL'] + ['%.3f'%l for l in NLL])+'\n')
plt.show()
def clusters(master, model_name, fld_save, D=2, use_bias=True, n_batch=1):
n_sample = BATCH_SIZE * n_batch
method = 'MDS'
#method = 'tSNE'
#method = 'isomap'
latent_d = dict()
colors = {
'base_conv': 'y',
'base_resp': 'r',
'bias_conv': 'k',
'bias_nonc': 'b',
}
print('building data...')
d_inp_enc = master.dataset.feed_data('test', max_n=n_sample, check_src=True, mix_ratio=(0.,1.))['inp_enc']
latent_d['base_conv'] = master.model_encoder['S2S'].predict(d_inp_enc['ctxt'])
if use_bias and 'AE' in master.prefix:
latent_d['bias_nonc'] = master.model_encoder['AE'].predict(d_inp_enc['nonc'])
#if use_bias and 'bias_conv' in master.dataset.files['test']:
# d_inp_enc = master.dataset.feed_data('test', max_n=n_sample, check_src=True, mix_ratio=(1.,0.))['inp_enc']
# latent_d['bias_conv'] = master.model_encoder['S2S'].predict(d_inp_enc['ctxt'])
#else:
d_inp_enc = master.dataset.feed_data('test', max_n=n_sample, check_src=True, mix_ratio=(0.,0.))['inp_enc']
if 'AE' in master.prefix:
#latent_d['base_nonc'] = master.model_encoder['AE'].predict(d_inp_enc['nonc'])
latent_d['base_resp'] = master.model_encoder['AE'].predict(d_inp_enc['resp'])
labels = list(sorted(latent_d.keys()))
fname_suffix = args.restore.split('/')[-1].replace('.npz','')
if use_bias:
fname_suffix += '_wbias'
n_labels = len(labels)
latent = np.concatenate([latent_d[k] for k in labels], axis=0)
print('latent.shape',latent.shape)
print('plotting bit hist...')
bins = np.linspace(-1,1,31)
for k in latent_d:
l = latent_d[k].ravel()
freq, _, _ = plt.hist(l, bins=bins, color='w')
plt.plot(bins[:-1], 100.*freq/sum(freq), colors[k]+'.-')
plt.ylim([0,50])
plt.savefig(fld_save+'/hist_%s.png'%fname_suffix)
plt.close()
print('plotting dist mat...')
d_norm = np.sqrt(latent.shape[1])
f, ax = plt.subplots()
cax = ax.imshow(dist_mat(latent)/d_norm, cmap='bwr')
#ax.set_title(model_name)
f.colorbar(cax)
ticks = []
ticklabels = []
n_prev = 0
for i in range(n_labels):
ticks.append(n_prev + n_sample/2)
ticklabels.append(labels[i]+'\n')
ticks.append(n_prev + n_sample)
ticklabels.append('%i'%(n_sample * (i+1)))
n_prev = n_prev + n_sample
ax.set_xticks(ticks)
ax.set_xticklabels(ticklabels)
ax.xaxis.tick_top()
ax.set_yticks(ticks)
ax.set_yticklabels([s.strip('\n') for s in ticklabels])
plt.savefig(fld_save+'/dist_%s.png'%fname_suffix)
plt.close()
if method == 'tSNE':
approx = manifold.TSNE(init='pca', verbose=1).fit_transform(latent)
elif method == 'MDS':
approx = manifold.MDS(D, verbose=1, max_iter=500, n_init=1).fit_transform(latent)
elif method == 'isomap':
approx = manifold.Isomap().fit_transform(latent)
else:
raise ValueError
f, ax = plt.subplots()
for k in labels:
ax.plot(np.nan, np.nan, colors[k]+'.', label=k)
jj = list(range(approx.shape[0]))
np.random.shuffle(jj)
for j in jj:
i_label = int(j/n_sample)
ax.plot(approx[j, 0], approx[j, 1], colors[labels[i_label]]+'.')
#plt.legend(loc='best')
plt.title(model_name)
#ax.set_xticks([])
#ax.set_yticks([])
plt.savefig(fld_save+'/%s_%s.png'%(method, fname_suffix))
plt.show()
def cos_sim(a, b):
#return 1. - scipy.spatial.distance.cosine(a, b)
return np.inner(a, b)/np.linalg.norm(a)/np.linalg.norm(b)
def angel_hist(master, model_name, fld_save):
from rand_decode import load_1toN_data
data = load_1toN_data(master.dataset.generator['test'])
angel = []
n_sample = 1000
extra_info = []
for i in range(n_sample):
if i%10 == 0:
print(i)
d = data[i]
src_seq = np.reshape(d['src_seq'], [1,-1])
latent_src = np.ravel(master.model_encoder['dial'].predict(src_seq))
diff = []
for ref_seq in d['ref_seqs']:
ref_seq = np.reshape(ref_seq, [1,-1])
latent_ref = np.ravel(master.model_encoder['auto'].predict(ref_seq))
diff.append(latent_ref - latent_src)
for i in range(len(diff) - 1):
for j in range(i + 1, len(diff)):
if str(d['ref_seqs'][i]) == str(d['ref_seqs'][j]):
continue
angel.append(cos_sim(diff[i], diff[j]))
extra_info.append('%i\t%i'%(i, len(d['ref_seqs'])))
with open(fld_save+'/angel.txt', 'w') as f:
f.write('\n'.join([str(a) for a in angel]))
with open(fld_save+'/angel_extra.txt', 'w') as f:
f.write('\n'.join(extra_info))
plt.hist(angel, bins=30)
plt.title(model_name)
plt.savefig(fld_save+'/angel.png')
plt.show()
def plot_history(paths, labels, k, ix=-1, ax=None):
if isinstance(paths, str):
paths = [paths]
import matplotlib.pyplot as plt
def MA(y):
window = 30
ret = [np.nan] * len(y)
for i in range(window, len(y)):
ret[i] = np.mean(y[max(0, i - window + 1): i + 1])
return ret
def read_log(path, k):
trained = np.nan
xx = []
yy = [[] for _ in range(4)]
m = None
for line in open(path):
if line.startswith('***** trained '):
trained = float(line.split(',')[0].split()[-2])
if line.startswith(k):
vv = [float(v) for v in line.replace(':','=').split('=')[-1].split(',')]
if m is None:
m = len(vv)
print('expecting %i values'%m)
else:
if m!=len(vv):
continue
xx.append(trained)
for i in range(len(vv)):
yy[i].append(vv[i])
return xx, yy[:m]
if ax is None:
_, ax = plt.subplots()
color = ['r','b','k','m']
if len(paths) > 0:
for i, path in enumerate(paths):
xx, yy = read_log(path, k)
ss = path.split('/')
label = ss[-1].replace('.txt','')
ax.plot(xx, yy[ix], color=color[i], linestyle=':', alpha=0.5)
ax.plot(xx, MA(yy[ix]), color=color[i], label=labels[i])
ax.set_title(k + '[%i]'%ix)
else:
xx, yy = read_log(paths[0], k)
for i in range(len(yy)):
ax.plot(xx, yy[i], color=color[i], linestyle=':')
ax.plot(xx, MA(yy[i]), color=color[i], label=str(i + 1))
def plot_multiple(kk, paths, labels):
n_col = 4
n_row = int(len(kk)/n_col)
n_row = int(np.ceil(len(kk)/n_col))
print('n_row = %i'%n_row)
_, axs = plt.subplots(n_row, n_col, sharex=True)
for i in range(len(kk)):
k = kk[i]
col = i%n_col
row = int(i/n_col)
ax = axs[row][col]
if k.startswith('bleu') or k.startswith('corr'):
ix = 2
else:
ix = -1
plot_history(paths, labels, k, ix, ax=ax)
#if i == 0:
# ax.legend(loc='best')
ax.grid(True)
plt.show()
if __name__ == '__main__':
parser.add_argument('--vis_tp', default='clusters')
parser.add_argument('--use_bias', type=int, default=1)
parser.add_argument('--n_batch', type=int, default=5)
args = parser.parse_args()
print('>>>>> Not using GPU')
os.environ["CUDA_VISIBLE_DEVICES"]="-1"
master = run_master('vis', args)
#if args.cpu_only:
#fld = os.path.join(fld_model, model_name, 'vis')
model_name = ''
fld = master.fld + '/vis'
print(fld)
makedirs(fld)
if args.vis_tp.startswith('interp'):
if 'stry' in args.vis_tp:
interp(master, model_name, fld, type_='stry')
else:
interp(master, model_name, fld, type_='resp')
elif args.vis_tp == 'clusters':
clusters(master, model_name, fld, use_bias=bool(args.use_bias), n_batch=args.n_batch)
elif args.vis_tp == 'angel':
angel_hist(master, model_name, fld)
else:
raise ValueError | 9,316 | 25.850144 | 109 | py |
StyleFusion | StyleFusion-master/src/model.py | from shared import *
from tf_lib import *
from dataset import *
from decode import *
from evaluate import *
"""
AUTHOR: Xiang Gao (xiag@microsoft.com) at Microsoft Research
"""
class ModelBase:
def __init__(self):
self.fld = None # str
self.n_trained = None # int
self.max_n_trained = None # int
self.dataset = None # Dataset obj
self.extra = None # list of str
self.vali_data = None # dict of list
self.layers = None
def init_log(self, new, args):
# deal with existing fld
if new and os.path.exists(self.fld):
if PHILLY:
suffix = 0
while True:
fld = self.fld + '_%i'%suffix
if not os.path.exists(fld):
self.fld = fld
break
else:
if not PHILLY and not self.debug:
print('%s\nalready exists, do you want to delete the folder? (y/n)'%self.fld)
ans = input()
if not ans.lower() == 'y':
exit()
print('deleting fld: '+self.fld)
shutil.rmtree(self.fld)
time.sleep(0.1)
print('fld deleted')
self.log_train = self.fld + '/train.txt'
if new or PHILLY or hostname != 'MININT-3LHNLKS':
makedirs(os.path.join(self.fld, 'models'))
open(self.log_train, 'w')
if not os.path.exists(self.fld + '/vocab.txt'):
shutil.copyfile(self.dataset.path_vocab, self.fld + '/vocab.txt')
ss = []
for k in sorted(args.__dict__.keys()):
ss.append('%s = %s'%(k, args.__dict__[k]))
with open(self.fld + '/args.txt', 'w') as f:
f.write('\n'.join(ss))
if PHILLY:
with open(self.log_train, 'a') as f:
f.write('hostname: %s\n'%hostname)
f.write('data_path: %s\n'%DATA_PATH)
f.write('out_path: %s\n'%OUT_PATH)
def train(self, batch_per_load=100):
self.vali()
while self.n_trained < self.max_n_trained:
s = '\n***** trained %.3f M'%(self.n_trained/1e6)
for tp in self.dataset.n_reset['train']:
s += ', %s = %i'%(tp, self.dataset.n_reset['train'][tp])
s += ' *****'
write_log(self.log_train, s)
self.train_a_load(batch_per_load)
if self.debug:
exit()
def load_weights(self, path):
self.prev_wt_fuse = None
print('loading weights from %s'%path)
npz = np.load(path, encoding='latin1', allow_pickle=True)
print(npz.files)
weights = npz['layers'].item()
for k in weights:
s = ' '*(20-len(k)) + k + ': %i params: '%len(weights[k])
for wt in weights[k]:
s += str(wt.shape) + ', '
print(s)
for attr in self.extra:
if attr in npz:
if attr not in ['name']:
setattr(self, attr, npz[attr])
else:
print('WARNING! attr %s not in npz'%attr)
self.build_model(weights)
self.build_model_test()
def extract_weights(self):
weights = dict()
if self.layers is None:
return weights
for k in self.layers:
weights[k] = self.layers[k].get_weights()
return weights
def save_weights(self):
path = self.fld + '/models/%.1fM.npz'%(self.n_trained/1e6)
weights = self.extract_weights()
to_save = {'layers':weights}
for attr in self.extra:
to_save[attr] = getattr(self, attr)
n_try = 0
while n_try < 3:
try:
np.savez(path, **to_save)
print('saved to: '+path)
break
except:
n_try += 1
print('cannot save, try %i'%n_try)
return path
def build_model_test(self):
pass
def build_model(self, weights=dict()):
pass
def train_a_load(self, batch_per_load):
pass
def set_extra(self, npz):
pass
class Seq2SeqBase(ModelBase):
def __init__(self, dataset, fld, args, new=False, allowed_words=None):
self.dataset = dataset
self.fld = fld
self.allowed_words = allowed_words
self.layers = None
self.history = LossHistory()
self.vali_data = None
self.classifiers = []
self.n_batch = 0
self.prev_n_batch = 0
self.dn_batch_vali = 100
self.bias_conv = False # hasattr(self.dataset, 'files') and ('bias_conv' in self.dataset.files['train'])
self.debug = args.debug
self.token_embed_dim = args.token_embed_dim
self.rnn_units = args.rnn_units
self.encoder_depth = args.encoder_depth
self.decoder_depth = args.decoder_depth
self.lr = args.lr
self.max_n_trained = args.max_n_trained
self.randmix = False
self.mix_ratio = (args.conv_mix_ratio, args.nonc_mix_ratio)
if not self.bias_conv:
assert(args.conv_mix_ratio == 0.)
self.extra = ['name']
self.init_extra(args)
if hasattr(args, 'skip'):
skip = int(1e6*args.skip)
else:
skip = 0
self.dataset.skip(skip, self.mix_ratio, conv_only=(self.name=='s2s'))
self.n_trained = skip
self.init_log(new, args)
self.build_model()
def get_mix_ratio(self):
if self.randmix:
ret = []
for ratio in self.mix_ratio:
p = [1. - ratio, ratio]
ret.append(np.random.choice([0.,1.], 1, p=p)[0])
return tuple(ret)
else:
return self.mix_ratio
def fit(self, inputs, outputs):
n_try = 0
if self.debug:
self.model.fit(
inputs,
outputs,
batch_size=BATCH_SIZE,
callbacks=[self.history],
verbose=FIT_VERBOSE)
return
while n_try < 3:
try:
self.model.fit(
inputs,
outputs,
batch_size=BATCH_SIZE,
callbacks=[self.history],
verbose=FIT_VERBOSE)
return
except Exception as e:
print('got error, sleeping')
print('E'*20)
print(e)
print('E'*20)
time.sleep(1)
n_try += 1
def _stacked_rnn(self, rnns, inputs, initial_states=None):
if initial_states is None:
initial_states = [None] * len(rnns)
outputs, state = rnns[0](inputs, initial_state=initial_states[0])
states = [state]
for i in range(1, len(rnns)):
outputs, state = rnns[i](outputs, initial_state=initial_states[i])
states.append(state)
return outputs, states
def _build_encoder(self, inputs, prefix):
_, encoder_states = self._stacked_rnn(
[self.layers['%s_encoder_rnn_%i'%(prefix, i)] for i in range(self.encoder_depth)],
self.layers['embedding'](inputs))
latent = encoder_states[-1]
return latent
def _build_decoder(self, input_seqs, input_states):
"""
for auto-regressive, states are returned and used as input for the generation of the next token
for teacher-forcing, token already given, so only need init states
"""
decoder_outputs, decoder_states = self._stacked_rnn(
[self.layers['decoder_rnn_%i'%i] for i in range(self.decoder_depth)],
self.layers['embedding'](input_seqs),
input_states)
decoder_outputs = self.layers['decoder_softmax'](decoder_outputs)
return decoder_outputs, decoder_states
def _create_layers(self, weights=dict()):
layers = dict()
name = 'embedding'
params = _params(name, weights, {'mask_zero':True})
layers[name] = Embedding(
self.dataset.num_tokens + 1, # +1 as mask_zero
self.token_embed_dim,
**params)
for i in range(self.decoder_depth):
name = 'decoder_rnn_%i'%i
params = _params(name, weights, {'return_state':True, 'return_sequences':True})
layers[name] = GRU(
self.rnn_units,
**params)
for prefix in self.prefix:
for i in range(self.encoder_depth):
name = '%s_encoder_rnn_%i'%(prefix, i)
params = _params(name, weights, {'return_state':True, 'return_sequences':True})
layers[name] = GRU(
self.rnn_units,
**params)
name = 'decoder_softmax'
params = _params(name, weights, {'activation':'softmax'})
layers[name] = Dense(
self.dataset.num_tokens + 1, # +1 as mask_zero
**params)
return layers
def build_model_test(self):
#self.refresh_session()
decoder_inputs = Input(shape=(None,), name='decoder_inputs')
# encoder
self.model_encoder = dict()
self.model_tf = dict()
self.tf_history = dict()
for prefix in self.prefix:
encoder_inputs = Input(shape=(None,), name=prefix+'_encoder_inputs')
latent = self._build_encoder(encoder_inputs, prefix=prefix)
self.model_encoder[prefix] = Model(encoder_inputs, latent)
self.model_encoder[prefix]._make_predict_function()
decoder_outputs, _ = self._build_decoder(decoder_inputs, [latent]*self.decoder_depth)
self.model_tf[prefix] = Model([encoder_inputs, decoder_inputs], decoder_outputs)
for layer in self.model_tf[prefix].layers:
layer.trainable = False
self.model_tf[prefix].compile(Adam(lr=0.), loss=_dec_loss) # lr = 0 to use '.fit', which has callbacks, as '.evaluate'
self.tf_history[prefix] = LossHistory()
# decoder: autoregressive
decoder_inital_states = []
for i in range(self.decoder_depth):
decoder_inital_states.append(Input(shape=(self.rnn_units,), name="decoder_inital_state_%i"%i))
decoder_outputs, decoder_states = self._build_decoder(decoder_inputs, decoder_inital_states)
model_decoder = Model(
[decoder_inputs] + decoder_inital_states,
[decoder_outputs] + decoder_states)
model_decoder._make_predict_function()
self.decoder = Decoder(self.dataset, model_decoder,
self.decoder_depth, self.rnn_units, allowed_words=self.allowed_words)
def get_vali_data(self):
if self.vali_data is not None:
#print('returning self.vali_data', self.vali_data)
return self.vali_data
print('getting vali data...')
def _feed_vali(k):
self.dataset.reset('vali')
d = self.dataset.feed_data('vali', max_n=vali_size, check_src=True, mix_ratio=k, conv_only=(self.name=='s2s'))
self.dataset.reset('vali')
return d
if self.debug:
vali_size = BATCH_SIZE
else:
vali_size = 1000
self.vali_data = _feed_vali((0, 1))
"""
self.vali_data['base'] = _feed_vali((0, 0))
self.vali_data['mix'] = _feed_vali(self.mix_ratio)
if self.bias_conv:
self.vali_data['bias'] = _feed_vali((1, 1))
else:
self.vali_data['bias'] = _feed_vali((0, 1))
"""
return self.vali_data
def vali(self):
self.build_model_test()
ss = []
for inp in ['who is he ?', 'do you like this game ?', 'good morning .']:
ss.append(infer_comb(inp, self))
write_log(self.log_train, '\n'.join(ss))
"""
data = self.get_vali_data()
if self.name.startswith('fuse'):
r_rand = 0.1 * np.sqrt(self.rnn_units)
else:
r_rand = 0.
#s_decoded = ''#eval_decoded(self, data, self.classifiers, r_rand=r_rand)[0]
#s_surrogate = eval_surrogate(self, data)[0]
#write_log(self.log_train, '\n' + s_decoded + '\n\n' + s_surrogate + '\n')
"""
self.prev_n_batch = self.n_batch
# save --------------------
self.save_weights()
def init_extra(self, args):
pass
def train_a_load(self, batch_per_load):
mix_ratio = self.get_mix_ratio()
data = self.dataset.feed_data('train', BATCH_SIZE * batch_per_load, mix_ratio=mix_ratio, conv_only=(self.name == 's2s'))
n_sample, inputs, outputs = self._inp_out_data(data)
t0 = datetime.datetime.now()
t0_str = str(t0).split('.')[0]
write_log(self.log_train, 'start: %s'%t0_str + ', mix_ratio = '+str(mix_ratio))
print('fitting...')
self.fit(inputs, outputs)
self.n_trained += n_sample
self.n_batch += batch_per_load
dt = (datetime.datetime.now() - t0).seconds
loss = np.mean(self.history.losses)
write_log(self.log_train, 'n_batch: %i, prev %i'%(self.n_batch, self.prev_n_batch))
ss = ['spent: %i sec'%dt, 'train: %.4f'%loss]
write_log(self.log_train, '\n'.join(ss))
if not self.debug and (self.n_batch - self.prev_n_batch < self.dn_batch_vali):
return
# vali --------------------
self.vali()
def print_loss(self, loss_weights):
s = 'loss: '+'-'*20 + '\n'
for i in range(len(self.loss)):
loss_name = str(self.loss[i])
if loss_name.startswith('<func'):
loss_name = loss_name.split()[1]
s += '%6.2f '%loss_weights[i] + loss_name + '\n'
s += '-'*20 + '\n'
write_log(self.log_train, s)
class Seq2Seq(Seq2SeqBase):
def init_extra(self, args):
self.name = 's2s'
self.prefix = ['S2S']
def build_model(self, weights=dict()):
self.layers = self._create_layers(weights) # create new
encoder_inputs = Input(shape=(None,), name='encoder_inputs')
decoder_inputs = Input(shape=(None,), name='decoder_inputs')
# connections: teacher forcing
latent = self._build_encoder(encoder_inputs, self.prefix[0])
decoder_outputs, _ = self._build_decoder(decoder_inputs, [latent]*self.decoder_depth)
# models
self.model = Model(
[encoder_inputs, decoder_inputs], # [input sentences, ground-truth target sentences],
decoder_outputs) # shifted ground-truth sentences
self.model.compile(Adam(lr=self.lr), loss=_dec_loss)
def _inp_out_data(self, data):
inputs = [data['inp_enc']['ctxt'], data['inp_dec']['resp']]
outputs = data['out_dec']['resp']
return data['n_sample'], inputs, outputs
class VanillaMTask(Seq2SeqBase):
def init_extra(self, args):
self.name = 'mtask'
self.loss = [
_dec_loss, # logP(resp | S2S), just the seq2seq loss
_dec_loss, # logP(resp | AE_resp)
_dec_loss, # logP(resp | AE_nonc)
]
self.prefix = ['AE','S2S']
def build_model(self, weights=dict()):
loss_weights = [1., 0.5, 0.5]
self.layers = self._create_layers(weights) # create new
# inputs
inp_enc_ctxt = Input(shape=(None,), name='inp_enc_ctxt')
inp_enc_resp = Input(shape=(None,), name='inp_enc_resp')
inp_dec_resp = Input(shape=(None,), name='inp_dec_resp')
inp_enc_nonc = Input(shape=(None,), name='inp_enc_nonc')
inp_dec_nonc = Input(shape=(None,), name='inp_dec_nonc')
inps_enc = [inp_enc_ctxt, inp_enc_resp, inp_enc_nonc]
inps_dec = [inp_dec_resp, inp_dec_nonc]
inputs = inps_enc + inps_dec
# hiddens
vec_s2s = self._build_encoder(inp_enc_ctxt, prefix='S2S')
vec_ae_resp = self._build_encoder(inp_enc_resp, prefix='AE')
vec_ae_nonc = self._build_encoder(inp_enc_nonc, prefix='AE')
# outputs
out_s2s, _ = self._build_decoder(inp_dec_resp, [vec_s2s]*self.decoder_depth)
out_ae_resp, _ = self._build_decoder(inp_dec_nonc, [vec_ae_resp]*self.decoder_depth)
out_ae_nonc, _ = self._build_decoder(inp_dec_nonc, [vec_ae_nonc]*self.decoder_depth)
outputs = [out_s2s, out_ae_resp, out_ae_nonc]
# compile
self.print_loss(loss_weights)
self.model = Model(inputs, outputs)
self.model.compile(Adam(lr=self.lr), loss=self.loss, loss_weights=loss_weights)
def _inp_out_data(self, data, u=None):
n_sample = data['n_sample']
if n_sample == 0:
return n_sample, [], []
inps_enc = [data['inp_enc']['ctxt'], data['inp_enc']['resp'], data['inp_enc']['nonc']]
inps_dec = [data['inp_dec']['resp'], data['inp_dec']['nonc']]
outs_dec = [data['out_dec']['resp'], data['out_dec']['resp'], data['out_dec']['nonc']]
return n_sample, inps_enc + inps_dec, outs_dec
class StyleFusion(Seq2SeqBase):
def init_extra(self, args):
self.name = args.model_class.lower()
assert(self.name in ['fuse','fuse1'])
self.max_wt_dist = args.wt_dist
self.stddev = args.stddev
self.v1 = (self.name == 'fuse1')
self.ablation = args.ablation
if self.v1:
# roughly, not exactly, follow SpaceFusion v1, as in https://arxiv.org/abs/1902.11205
_dec_loss_ae = _dec_loss
_dist_loss = _absdiff_dist_v1
else:
# v2, consider fuse with nonc
_dec_loss_ae = _dec_loss_u # interp(ae_resp, ae_nonc)
if args.reld:
_dist_loss = _relative_dist # consider all these terms d(s2s,resp), d(s2s,nonc), d(resp), d(nonc), d(s2s)
else:
_dist_loss = _absdiff_dist
self.randmix = True # binary batch mix
self.loss = [
_dec_loss, # logP(resp | S2S), just the seq2seq loss
_dec_loss, # logP(resp | interp), interp is between ctxt and resp, i.e. the 3rd term in Eq.3 in NAACL
_dec_loss_ae,
_dist_loss]
self.prefix = ['AE','S2S']
"""
def refresh_session(self):
K.clear_session() # avoid building graph over and over to slow down everything
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
K.set_session(tf.Session(config=config))
for clf in self.classifiers:
clf.load()
"""
def build_model(self, weights=dict()):
loss_weights = [1., 1., 1., 1.]
if self.ablation:
loss_weights = [1., 1., 0., 1.] # disable L_{smooth,style}
self.layers = self._create_layers(weights) # create new
noisy = Lambda(_add_noise,
arguments={'stddev':self.stddev},
name='noisy')
concat = Concatenate(name='concat_1', axis=-1)
# inputs
inp_enc_ctxt = Input(shape=(None,), name='inp_enc_ctxt')
inp_enc_resp = Input(shape=(None,), name='inp_enc_resp')
inp_dec_resp = Input(shape=(None,), name='inp_dec_resp')
inps_enc = [inp_enc_ctxt, inp_enc_resp]
inps_dec = [inp_dec_resp]
inp_enc_nonc = Input(shape=(None,), name='inp_enc_nonc')
inp_dec_nonc = Input(shape=(None,), name='inp_dec_nonc')
inps_enc.append(inp_enc_nonc)
inps_dec.append(inp_dec_nonc)
inp_u = [Input(shape=(None,), name='inp_u')] # rand drawn from U(0,1). each batch has the same value, see _inp_out_data
inputs = inps_enc + inps_dec + inp_u # match _inp_out_data
# hiddens
vec_s2s = self._build_encoder(inp_enc_ctxt, prefix='S2S')
vec_ae_resp = self._build_encoder(inp_enc_resp, prefix='AE')
vec_ae_nonc = self._build_encoder(inp_enc_nonc, prefix='AE')
vec_interp_resp = noisy(Lambda(_interp, name='interp_resp')([vec_s2s, vec_ae_resp] + inp_u))
# outputs
out_s2s, _ = self._build_decoder(inp_dec_resp, [vec_s2s]*self.decoder_depth)
out_interp_resp, _ = self._build_decoder(inp_dec_resp, [vec_interp_resp]*self.decoder_depth)
if self.v1:
out_ae, _ = self._build_decoder(inp_dec_nonc, [vec_ae_nonc]*self.decoder_depth)
else:
vec_interp_ae = noisy(Lambda(_interp, name='interp_ae')([vec_ae_resp, vec_ae_nonc] + inp_u))
out_interp_ae_resp, _ = self._build_decoder(inp_dec_resp, [vec_interp_ae]*self.decoder_depth)
out_interp_ae_nonc, _ = self._build_decoder(inp_dec_nonc, [vec_interp_ae]*self.decoder_depth)
out_ae = concat([out_interp_ae_resp, out_interp_ae_nonc])
outs_dec = [out_s2s, out_interp_resp, out_ae]
outs_dist = concat([vec_s2s, vec_ae_resp, vec_ae_nonc])
outputs = outs_dec + [outs_dist]
# compile
self.print_loss(loss_weights)
self.model = Model(inputs, outputs)
self.model.compile(Adam(lr=self.lr), loss=self.loss, loss_weights=loss_weights)
def _inp_out_data(self, data, u=None):
n_sample = data['n_sample']
if n_sample == 0:
return n_sample, [], []
if u is None:
u = np.random.random(n_sample)
else:
u = np.array([u] * n_sample)
inps_enc = [data['inp_enc']['ctxt'], data['inp_enc']['resp']]
inps_dec = [data['inp_dec']['resp']]
outs_dec = [data['out_dec']['resp'], data['out_dec']['resp']]
inps_enc.append(data['inp_enc']['nonc'])
inps_dec.append(data['inp_dec']['nonc'])
inputs = inps_enc + inps_dec + [u]
if self.v1:
outs_dec.append(data['out_dec']['nonc'])
else:
_, l, v = data['out_dec']['resp'].shape
out_interp_nonc = np.zeros([n_sample, l, v*2+1])
out_interp_nonc[:,:,:v] = data['out_dec']['resp']
out_interp_nonc[:,:,v:v*2] = data['out_dec']['nonc']
for t in range(l):
out_interp_nonc[:,t,-1] = u
outs_dec.append(out_interp_nonc)
outputs = outs_dec + [np.zeros((n_sample, 1))]
return n_sample, inputs, outputs
class LossHistory(Callback):
def reset(self):
self.losses = []
def on_train_begin(self, logs={}):
self.reset()
def on_batch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
def _params(name, weights, extra=dict()):
params = {'name':name}
if name in weights:
params['weights'] = weights[name]
for k in extra:
params[k] = extra[k]
return params
def write_log(path, s, PRINT=True, mode='a'):
if PRINT:
print(s)
sys.stdout.flush()
if not s.endswith('\n'):
s += '\n'
if PHILLY:
n_try = 0
while n_try < 3:
try:
with open(path, mode) as f:
f.write(s)
break
except:# PermissionError as e:
#print(e)
print('cannot write_log, sleeping...')
time.sleep(2)
n_try += 1
else:
with open(path, mode) as f:
f.write(s)
# ------------------- customized loss --------------------
def _dist_1nn(a, b=None):
n = BATCH_SIZE
expanded_a = tf.expand_dims(a, 1)
if b is None:
b = a
expanded_b = tf.expand_dims(b, 0)
d_squared = tf.reduce_mean(tf.squared_difference(expanded_a, expanded_b), 2)
mat = tf.sqrt(tf.maximum(0., d_squared))
wt = 1./(mat + tf.eye(n) * 1000 + 1e-6)
sum_wt = tf.reshape(tf.reduce_sum(wt, axis=1), [n, 1])
sum_wt = tf.tile(sum_wt, [1,n])
wt = wt/sum_wt
d1nn = tf.reduce_sum(mat * wt, axis=1)
d1nn = tf.reduce_mean(d1nn)
return d1nn
def _cross_inner(vecs, v1=False):
def sqrt_mse(a, b=None, shuffle=True, cap=None):
if b is None:
b = a
if shuffle:
#diff = a - tf.random_shuffle(b)
_, d = a.shape
n = BATCH_SIZE - 1
diff = tf.slice(a, [1,0], [n,d]) - tf.slice(b, [0,0], [n,d])
else:
diff = a - b
squared = tf.pow(diff, 2)
if cap is not None:
squared = tf.minimum(cap**2, squared)
return tf.sqrt(tf.reduce_mean(squared))
vec_s2s, vec_ae_resp, vec_ae_nonc = tf.split(vecs, 3, axis=-1)
cross_resp = sqrt_mse(vec_s2s, vec_ae_resp, shuffle=False)
inner_s2s_resp = _dist_1nn(vec_s2s)
inner_ae_nonc = _dist_1nn(vec_ae_nonc)
if v1:
print('*'*10 + ' [WARNING] Using v1 cross_inner ' + '*'*10)
return cross_resp, inner_s2s_resp + inner_ae_nonc
else:
cross_s2s_nonc = _dist_1nn(vec_s2s, vec_ae_nonc)
inner_ae_resp = _dist_1nn(vec_ae_resp)
cross = 0.5 * (cross_resp + cross_s2s_nonc)
inner = tf.minimum(tf.minimum(inner_s2s_resp, inner_ae_resp), inner_ae_nonc)
return cross, inner
def _relative_dist(_, y_pred):
cross, inner = _cross_inner(y_pred)
return cross / inner
def _absdiff_dist(_, y_pred):
cross, inner = _cross_inner(y_pred)
return cross - inner
def _absdiff_dist_v1(_, y_pred):
cross, inner = _cross_inner(y_pred, v1=True)
return cross - inner
def _dec_loss(y_true, y_pred):
# to compute - logP(resp|vec_interp_resp)
return tf.reduce_mean(keras.losses.categorical_crossentropy(y_true, y_pred))
def _dec_loss_u(y_true, y_pred):
# to compute u * logP(resp|vec_interp_ae) + (1-u) * logP(nonc|vec_interp_ae)
# where vec_interp_ae = u * vec_resp_ae + (1-u) * vec_nonc_ae
# y_true = concat([y_resp, y_nonc, u]), shape = [BATCH_SIZE, seq_len, 2 * vocab_size + 1], see out_interp_nonc in _in_out_data
# y_pred = concat([y_resp_pred, y_nonc_pred])
y_resp_pred, y_nonc_pred = tf.split(y_pred, 2, axis=-1)
vocab_size = tf.cast(y_resp_pred.shape[2], tf.int32)
y_resp, y_nonc, u = tf.split(y_true, [vocab_size, vocab_size, 1], axis=-1)
u = u[:,:,0] # like tf.squeeze, so [BATCH_SIZE, seq_len]
loss_resp = keras.losses.categorical_crossentropy(y_resp, y_resp_pred) # [BATCH_SIZE, seq_len]
loss_nonc = keras.losses.categorical_crossentropy(y_nonc, y_nonc_pred)
loss = u * loss_resp + (1. - u) * loss_nonc # [BATCH_SIZE, seq_len]
return tf.reduce_mean(loss)
# ------------------- customized layers --------------------
def _add_noise(mu, stddev):
eps = K.random_normal(shape=K.shape(mu))
return mu + tf.multiply(eps, stddev)
def _interp(inp):
if len(inp) == 2:
a, b = inp
u = K.random_uniform(shape=(K.shape(a)[0], 1))
else:
a, b, u = inp
u = K.tile(K.reshape(u, [-1,1]), [1, K.shape(a)[1]]) # repeat along axis=1
#return a + tf.multiply(b - a, u)
return tf.multiply(a, u) + tf.multiply(b, 1 - u)
def convert_model_vocab(path_npz_old, path_npz_new, path_vocab_old, path_vocab_new):
if os.path.exists(path_npz_new):
print('already exists: '+path_npz_new)
return
_, token2index_old = load_vocab(path_vocab_old)
index2token_new, _ = load_vocab(path_vocab_new)
n_old = max(token2index_old.values()) + 1
n_new = max(index2token_new.keys()) + 1
print('vocab: %i => %i'%(n_old, n_new))
new2old = dict()
ix_unk_old = token2index_old[UNK_token]
for ix in index2token_new:
token = index2token_new[ix]
new2old[ix] = token2index_old.get(token, ix_unk_old)
print('loading from: '+str(path_npz_old))
npz = np.load(path_npz_old, encoding='latin1')
weights = npz['layers'].item()
embedding_old = weights['embedding'][0]
softmax_wt_old = weights['decoder_softmax'][0]
softmax_bias_old = weights['decoder_softmax'][1]
n_old_loaded, dim = embedding_old.shape
assert(n_old_loaded == n_old)
embedding_new = np.zeros((n_new, dim))
softmax_wt_new = np.zeros((dim, n_new))
softmax_bias_new = np.zeros((n_new,))
print(' embedding: ' + str(embedding_old.shape) + ' => ' + str(embedding_new.shape))
print(' softmax_wt: ' + str(softmax_wt_old.shape) + ' => ' + str(softmax_wt_new.shape))
print('softmax_bias: ' + str(softmax_bias_old.shape) + ' => ' + str(softmax_bias_new.shape))
# PAD
embedding_new[0,:] = embedding_old[0, :]
softmax_wt_new[:, 0] = softmax_wt_old[:, 0]
softmax_bias_new[0] = softmax_bias_old[0]
for ix in index2token_new:
embedding_new[ix, :] = embedding_old[new2old[ix], :]
softmax_wt_new[:, ix] = softmax_wt_old[:, new2old[ix]]
softmax_bias_new[ix] = softmax_bias_old[new2old[ix]]
weights['embedding'] = [embedding_new]
weights['decoder_softmax'] = [softmax_wt_new, softmax_bias_new]
print('saving to: '+str(path_npz_new))
to_save = {'layers':weights}
for k in npz.files:
if k != 'layers' and 'mix' not in k:
to_save[k] = npz[k]
np.savez(path_npz_new, **to_save)
| 24,831 | 28.632458 | 127 | py |
StyleFusion | StyleFusion-master/src/tf_lib.py |
from keras.models import Model, load_model, model_from_yaml
from keras.layers import Input, GRU, Dense, Embedding, Dropout, Concatenate, Lambda, Add, Subtract, Multiply, GaussianNoise
from keras.utils import plot_model
from keras.callbacks import ModelCheckpoint
from keras.optimizers import Adam, RMSprop
from keras.callbacks import Callback
from keras import backend as K
import tensorflow as tf
from keras.activations import hard_sigmoid
import keras
| 455 | 37 | 123 | py |
StyleFusion | StyleFusion-master/src/classifier.py | from shared import *
from tf_lib import *
import json
from dataset import load_vocab
from sklearn import linear_model
import pickle
"""
AUTHOR:
Sean Xiang Gao (xiag@microsoft.com) at Microsoft Research
"""
class ClassifierNeural():
def __init__(self, fld):
params = json.load(open(fld + '/args.json'))
if params['tgt_only']:
self.prefix = ['tgt']
else:
self.prefix = ['src','tgt']
self.encoder_depth = params['encoder_depth']
self.rnn_units = params['rnn_units']
self.mlp_depth = params['mlp_depth']
self.mlp_units = params['mlp_units']
self.include_punc = params['include_punc']
self.index2token, self.token2index = load_vocab(fld + '/vocab.txt')
self.fld = fld
self.load()
def load(self):
self.build_model()
self.model.load_weights(self.fld+'/model.h5')
def _create_layers(self):
layers = dict()
layers['embedding'] = Embedding(
max(self.index2token.keys()) + 1, # +1 as mask_zero
self.rnn_units, mask_zero=True,
name='embedding')
for prefix in self.prefix:
for i in range(self.encoder_depth):
name = '%s_encoder_rnn_%i'%(prefix, i)
layers[name] = GRU(
self.rnn_units,
return_state=True,
return_sequences=True,
name=name)
for i in range(self.mlp_depth - 1):
name = 'mlp_%i'%i
layers[name] = Dense(
self.mlp_units,
activation='tanh', name=name)
name = 'mlp_%i'%(self.mlp_depth - 1)
layers[name] = Dense(1, activation='sigmoid', name=name)
return layers
def _stacked_rnn(self, rnns, inputs, initial_states=None):
if initial_states is None:
initial_states = [None] * len(rnns)
outputs, state = rnns[0](inputs, initial_state=initial_states[0])
states = [state]
for i in range(1, len(rnns)):
outputs, state = rnns[i](outputs, initial_state=initial_states[i])
states.append(state)
return outputs, states
def _build_encoder(self, inputs, layers, prefix):
_, encoder_states = self._stacked_rnn(
[layers['%s_encoder_rnn_%i'%(prefix, i)] for i in range(self.encoder_depth)],
layers['embedding'](inputs))
latent = encoder_states[-1]
return latent
def build_model(self):
layers = self._create_layers()
encoder_inputs = dict()
latents = []
for prefix in self.prefix:
encoder_inputs[prefix] = Input(shape=(None,), name=prefix+'_encoder_inputs')
latents.append(self._build_encoder(encoder_inputs[prefix], layers, prefix=prefix))
if len(self.prefix) > 1:
out = Concatenate()(latents)
inp = [encoder_inputs['src'], encoder_inputs['tgt']]
else:
out = latents[0]
inp = encoder_inputs[self.prefix[0]]
for i in range(self.mlp_depth):
out = layers['mlp_%i'%i](out)
self.model = Model(inp, out)
self.model.compile(optimizer=Adam(lr=0), loss='binary_crossentropy')
def txt2seq(self, txt):
tokens = txt.strip().split(' ')
seq = []
ix_unk = self.token2index[UNK_token]
for token in tokens:
if self.include_punc or is_word(token): # skip punctuation if necessary
seq.append(self.token2index.get(token, ix_unk))
return seq
def seq2txt(self, seq):
return ' '.join([self.index2token[i] for i in seq])
def txts2mat(self, txts, max_len=30):
if isinstance(txts, str):
txts = [txts]
data = np.zeros((len(txts), max_len))
for j, txt in enumerate(txts):
seq = self.txt2seq(txt.strip(EOS_token).strip()) # stripped EOS_token here
for t in range(min(max_len, len(seq))):
data[j, t] = seq[t]
return data
def predict(self, txts):
mat = self.txts2mat(txts)
return self.model.predict(mat).ravel()
class ClassifierNgram:
def __init__(self, fld, ngram, include_punc=False):
self.fld = fld
self.ngram2ix = dict()
self.ngram = ngram
self.include_punc = include_punc
fname = '%igram'%ngram
if include_punc:
fname += '.include_punc'
self.path_prefix = fld + '/' + fname
for i, line in enumerate(open(self.path_prefix + '.txt', encoding='utf-8')):
ngram = line.strip('\n')
self.ngram2ix[ngram] = i
assert(self.ngram == len(ngram.split()))
self.vocab_size = i + 1
print('loaded %i %igram'%(self.vocab_size, self.ngram))
#self.model = LogisticRegression(solver='sag')#, max_iter=10)
self.model = linear_model.SGDClassifier(loss='log', random_state=9, max_iter=1, tol=1e-3)
def txts2mat(self, txts):
X = np.zeros((len(txts), self.vocab_size))
for i, txt in enumerate(txts):
ww = txt2ww(txt, self.include_punc)
for t in range(self.ngram, len(ww) + 1):
ngram = ' '.join(ww[t - self.ngram: t])
j = self.ngram2ix.get(ngram, None)
if j is not None:
X[i, j] = 1.
return X
def load(self):
self.model = pickle.load(open(self.path_prefix + '.p', 'rb'))
def predict(self, txts):
data = self.txts2mat(txts)
prob = self.model.predict_proba(data)
return prob[:,1]
class ClassifierNgramEnsemble:
def __init__(self, fld, include_punc=False, max_ngram=4):
self.fld = fld
self.children = dict()
self.wt = dict()
for ngram in range(1, max_ngram + 1):
self.children[ngram] = ClassifierNgram(fld, ngram, include_punc)
self.children[ngram].load()
acc = float(open(self.children[ngram].path_prefix + '.acc').readline().strip('\n'))
self.wt[ngram] = 2. * max(0, acc - 0.5)
def predict(self, txts):
avg_scores = np.array([0.] * len(txts))
for ngram in self.children:
scores = self.children[ngram].predict(txts)
avg_scores += scores * self.wt[ngram]
return avg_scores / sum(self.wt.values())
def is_word(token):
for c in token:
if c.isalpha():
return True
return False
def load_classifier(fld, args=None):
if fld.endswith('ngram'):
return ClassifierNgramEnsemble(fld)
elif fld.endswith('neural'):
return ClassifierNeural(fld)
else:
raise ValueError
def clf_interact(fld):
clf = load_classifier(fld)
while True:
print('\n---- please input ----')
txt = input()
if txt == '':
break
score = clf.predict([txt])[0]
print('%.4f'%score)
def clf_eval(clf_fld, path):
# path is a tsv, last col is hyp
clf = load_classifier(clf_fld)
sum_score = 0
n = 0
for line in open(path, encoding='utf-8'):
txt = line.strip('\n').split('\t')[-1].lower()
sum_score += clf.predict([txt])[0]
n += 1
if n % 100 == 0:
print('eval %i lines'%n)
print('finally %i samples'%n)
print('avg style score: %.4f'%(sum_score/n))
def txt2ww(txt, include_punc):
ww = [SOS_token]
for w in txt.split():
if include_punc or is_word(w):
ww.append(w)
ww.append(EOS_token)
return ww
def score_file(path, name, col=1):
clf = load_classifier(name)
txts = []
for line in open(path, encoding='utf-8'):
txts.append(line.strip('\n').split('\t')[col])
if len(txts) == 1500:
break
print('scoring...')
print(np.mean(clf.predict(txts)))
class Classifier1gramCount:
def __init__(self, fld):
self.fld = fld
def fit(self, min_freq=60, max_n=1e5):
scores = dict()
n = 0
for line in open(self.fld + '/all.txt', encoding='utf-8'):
n += 1
cells = line.strip('\n').split('\t')
if len(cells) != 2:
print(cells)
exit()
txt, score = cells
for w in set(txt.strip().split()):
if is_word(w):
if w not in scores:
scores[w] = []
scores[w].append(float(score))
if n == max_n:
break
lines = ['\t'.join(['word', 'avg', 'se', 'count'])]
for w in scores:
count = len(scores[w])
if count < min_freq:
continue
avg = np.mean(scores[w])
se = np.std(scores[w])/np.sqrt(count)
lines.append('\t'.join([w, '%.4f'%avg, '%.4f'%se, '%i'%count]))
with open(self.fld + '/count.tsv', 'w', encoding='utf-8') as f:
f.write('\n'.join(lines))
def load(self):
self.coef = dict()
f = open(self.fld + '/count.tsv', encoding='utf-8')
header = f.readline()
for line in f:
w, avg = line.strip('\n').split('\t')[:2]
self.coef[w] = float(avg)
def corpus_score(self, txts, kw=100):
scores = []
coef_w = []
for w in self.coef:
coef_w.append((self.coef[w], w))
coef_w = sorted(coef_w, reverse=True)[:kw]
print('last:',coef_w[-1])
keywords = set([w for _, w in coef_w])
#total_joint = 0
#total = 0
for txt in txts:
words = set()
for w in txt.strip().split():
if is_word(w):
words.add(w)
joint = words & keywords
scores.append(len(joint)/len(words))
#total_joint += len(joint)
#total += len(words)
return np.mean(scores), np.std(scores)/np.sqrt(len(scores))
#return total_joint/total
def test(self, kw=100):
import matplotlib.pyplot as plt
txts = []
labels = []
for line in open(self.fld + '/sorted_avg.tsv', encoding='utf-8'):
txt, label = line.strip('\n').split('\t')
txts.append(txt)
labels.append(float(label))
i0 = 0
human = []
pred = []
while True:
i1 = i0 + 100
if i1 >= len(txts):
break
human.append(np.mean(labels[i0:i1]))
pred.append(self.corpus_score(txts[i0:i1], kw=kw))
i0 = i1
plt.plot(human, pred, '.')
plt.xlabel('human')
plt.xlabel('metric (ratio of keywords)')
plt.title('corr = %.4f'%np.corrcoef(human, pred)[0][1])
plt.savefig(self.fld + '/test_corr_kw%i.png'%kw)
if __name__ == '__main__':
# e.g. `python src/classifier.py classifier/Reddit_vs_arXiv/neural' for interaction
# e.g. `python src/classifier.py classifier/Reddit_vs_arXiv/neural path/to/hyp/file.tsv' for evaluating a file
fld_model = sys.argv[1] # e.g.
if len(sys.argv) == 2:
clf_interact(fld_model)
elif len(sys.argv) == 3:
path_hyp = sys.argv[2]
clf_eval(fld_model, path_hyp) | 10,493 | 27.594005 | 114 | py |
StyleFusion | StyleFusion-master/src/dataset.py | from shared import *
"""
AUTHOR:
Sean Xiang Gao (xiag@microsoft.com) at Microsoft Research
"""
def load_vocab(path):
with io.open(path, encoding='utf-8') as f:
lines = f.readlines()
index2token = dict()
token2index = dict()
for i, line in enumerate(lines):
token = line.strip('\n').strip()
index2token[i + 1] = token # start from 1, as 0 reserved for PAD
token2index[token] = i + 1
assert(SOS_token in token2index)
assert(EOS_token in token2index)
assert(UNK_token in token2index)
return index2token, token2index
class Dataset:
def __init__(self,
fld_data,
max_ctxt_len=93,
max_resp_len=30,
vocab_only=False,
noisy_vocab=-1,
noisy_AE_src=True,
noisy_bias=True, # whether add UNK noise to bias data (conv and nonc, src and tgt)
):
self.max_ctxt_len = max_ctxt_len
self.max_resp_len = max_resp_len
self.noisy_vocab = noisy_vocab
self.noisy_AE_src = noisy_AE_src
self.noisy_bias = noisy_bias
types = ['base_conv','bias_conv', 'base_nonc', 'bias_nonc']
self.fld_data = fld_data
self.path_vocab = fld_data + '/vocab.txt'
self.index2token, self.token2index = load_vocab(self.path_vocab)
self.num_tokens = len(self.token2index) # not including 0-th
if self.noisy_vocab > 0:
self.prob_keep = dict()
for ix in self.index2token:
self.prob_keep[ix] = np.exp(-ix/self.noisy_vocab)
if vocab_only:
return
self.paths = dict()
self.files = dict()
self.n_reset = dict()
for sub in ['train', 'vali', 'test']:
self.paths[sub] = dict()
self.files[sub] = dict()
self.n_reset[sub] = dict()
for tp in types:
self.n_reset[sub][tp] = -1
self.paths[sub][tp] = fld_data + '/%s_%s.num'%(tp, sub)
self.reset(sub, tp)
for k in self.files:
print(k, self.files[k].keys())
def reset(self, sub, tp=None):
if tp is None:
types = self.files[sub].keys()
else:
types = [tp]
for tp in types:
if os.path.exists(self.paths[sub][tp]):
line = open(self.paths[sub][tp]).readline().strip('\n')
if len(line) > 0:
self.files[sub][tp] = open(self.paths[sub][tp])
self.n_reset[sub][tp] += 1
def seq2txt(self, seq):
words = []
for j in seq:
if j == 0: # skip PAD
continue
words.append(self.index2token[int(j)])
return ' '.join(words)
def txt2seq(self, text):
tokens = text.strip().split()
seq = []
for token in tokens:
seq.append(self.token2index.get(token, self.token2index[UNK_token]))
return seq
def seqs2enc(self, seqs, max_len):
inp = np.zeros((len(seqs), max_len))
for i, seq in enumerate(seqs):
for t in range(min(max_len, len(seq))):
inp[i, t] = seq[t]
return inp
def seqs2dec(self, seqs, max_len):
# len: +2 as will 1) add EOS and 2) shift to right by 1 time step
# vocab: +1 as mask_zero (token_id == 0 means PAD)
ix_SOS = self.token2index[SOS_token]
ix_EOS = self.token2index[EOS_token]
inp = np.zeros((len(seqs), max_len + 2))
out = np.zeros((len(seqs), max_len + 2, self.num_tokens + 1))
for i, seq in enumerate(seqs):
seq = seq[:min(max_len, len(seq))]
for t, token_index in enumerate(seq):
inp[i, t + 1] = token_index # shift 1 time step
out[i, t, token_index] = 1.
inp[i, 0] = ix_SOS # inp starts with EOS
out[i, len(seq), ix_EOS] = 1. # out ends with EOS
return inp, out
def skip(self, max_n, mix_ratio, conv_only=False):
sub = 'train'
if isinstance(mix_ratio, int) or isinstance(mix_ratio, float):
mix_ratio = (mix_ratio,)
def _read(tp, n, m):
for _ in self.files[sub][tp]:
if m >= n:
break
m += 1
if m%1e5 == 0:
print('%s skipped %.2f M'%(tp, m/1e6))
return m
m = dict()
suffix = ['conv']
if not conv_only:
suffix.append('nonc')
for i in range(len(suffix)):
suf = suffix[i]
for tp, n in [
('base_'+suf, max_n * (1. - mix_ratio[i])),
('bias_'+suf, max_n * mix_ratio[i])
]:
m[tp] = 0
if n < 1 or tp not in self.files[sub]:
continue
while m[tp] < n:
m_ = _read(tp, n, m[tp])
if m_ == m[tp]:
self.reset(sub, tp)
m[tp] = m_
if m_ >= n:
break
print('conv skipped %.2f M'%((m['base_conv'] + m['bias_conv'])/1e6))
if not conv_only:
print('nonc skipped %.2f M'%((m['base_nonc'] + m['bias_nonc'])/1e6))
def add_unk_noise(self, seqs):
if self.noisy_vocab < 0 or len(seqs) == 0:
return seqs
ix_unk = self.token2index[UNK_token]
ret = []
n = 0
old_n_unk = 0
new_n_unk = 0
for seq in seqs:
noisy = []
n += len(seq)
for ix in seq:
old_n_unk += (ix == ix_unk)
if np.random.random() > self.prob_keep[ix]:
noisy.append(ix_unk)
else:
noisy.append(ix)
new_n_unk += (noisy[-1] == ix_unk)
ret.append(noisy)
print('unk increased from %.2f to %.2f'%(old_n_unk/n, new_n_unk/n))
return ret
def feed_data(self, sub, max_n, check_src=False, mix_ratio=(0.,0.), conv_only=False):
if isinstance(mix_ratio, int) or isinstance(mix_ratio, float):
mix_ratio = (mix_ratio,)
print('loading data, check_src = %s, mix_ratio = %s'%(check_src, mix_ratio))
# load conversation data -------------
def _read_conv(tp, n, prev_ctxt, seqs):
for line in self.files[sub][tp]:
if len(seqs) >= n:
break
tt = line.strip('\n').split('\t')
if len(tt) != 2:
continue
seq_ctxt, seq_resp = tt
if check_src and (seq_ctxt == prev_ctxt):
continue
prev_ctxt = seq_ctxt
seq_ctxt = [int(k) for k in seq_ctxt.split()]
seq_resp = [int(k) for k in seq_resp.split()]
seq_ctxt = seq_ctxt[-min(len(seq_ctxt), self.max_ctxt_len):]
seq_resp = seq_resp[:min(len(seq_resp), self.max_resp_len)]
seqs.append((seq_ctxt, seq_resp))
return seqs, prev_ctxt
# get conv from different tp
seqs = dict()
for tp, n in [('base_conv', max_n * (1. - mix_ratio[0])), ('bias_conv', max_n * mix_ratio[0])]:
seqs[tp] = []
if n < 1 or tp not in self.files[sub]:
continue
prev_ctxt = ''
while True:
m = len(seqs[tp])
seqs[tp], prev_ctxt = _read_conv(tp, n, prev_ctxt, seqs[tp])
if len(seqs[tp]) >= n:
break
if len(seqs[tp]) == m:
self.reset(sub, tp)
print('conv from %s: %i/%i'%(tp, len(seqs[tp]), n))
if 'bias_conv' in seqs and self.noisy_bias:
seqs_ctxt = self.add_unk_noise([seq for seq, _ in seqs['bias_conv']])
seqs_resp = self.add_unk_noise([seq for _, seq in seqs['bias_conv']])
seqs['bias_conv'] = [(seqs_ctxt[i], seqs_resp[i]) for i in range(len(seqs['bias_conv']))]
# then mix them
ids = []
for tp in seqs:
ids += [(tp, i) for i in range(len(seqs[tp]))]
np.random.shuffle(ids)
seqs_ctxt = []
seqs_resp = []
for tp, i in ids:
seqs_ctxt.append(seqs[tp][i][0])
seqs_resp.append(seqs[tp][i][1])
inp_enc_ctxt = self.seqs2enc(seqs_ctxt, self.max_ctxt_len)
if self.noisy_AE_src:
inp_enc_resp = self.seqs2enc(self.add_unk_noise(seqs_resp), self.max_resp_len)
else:
inp_enc_resp = self.seqs2enc(seqs_resp, self.max_resp_len)
inp_dec_resp, out_dec_resp = self.seqs2dec(seqs_resp, self.max_resp_len)
n_sample_conv = len(ids)
d_inp_enc = {'ctxt':inp_enc_ctxt, 'resp':inp_enc_resp}
d_inp_dec = {'resp':inp_dec_resp}
d_out_dec = {'resp':out_dec_resp}
def get_ret(n, dd):
n = BATCH_SIZE * int(n/BATCH_SIZE)
ret = {'n_sample':n}
for d_name in dd:
d = dd[d_name]
for k in d:
if isinstance(d[k], list):
d[k] = d[k][:n]
else:
d[k] = d[k][:n, :]
ret[d_name] = d
return ret
if conv_only:
return get_ret(n_sample_conv, {
'inp_enc':d_inp_enc,
'inp_dec':d_inp_dec,
'out_dec':d_out_dec,
'seqs':{'resp':seqs_resp},
})
# load non-conversation (nonc) data -------------
def _read_nonc(tp, n, seqs):
for line in self.files[sub][tp]:
if len(seqs) >= n:
break
seq = [int(k) for k in line.strip('\n').split()]
seq = seq[:min(len(seq), self.max_resp_len)]
seqs.append(seq)
return seqs
# get nonc from different tp
seqs = dict()
for tp, n in [('base_nonc', max_n * (1. - mix_ratio[1])), ('bias_nonc', max_n * mix_ratio[1])]:
seqs[tp] = []
if n < 1 or tp not in self.files[sub]:
continue
while True:
m = len(seqs[tp])
seqs[tp] = _read_nonc(tp, n, seqs[tp])
if len(seqs[tp]) >= n:
break
if len(seqs[tp]) == m:
self.reset(sub, tp)
print('nonc from %s: %i/%i'%(tp, len(seqs[tp]), n))
if 'bias_nonc' in seqs and self.noisy_bias:
seqs['bias_nonc'] = self.add_unk_noise(seqs['bias_nonc'])
seqs_nonc = seqs['base_nonc'] + seqs['bias_nonc']
np.random.shuffle(seqs_nonc)
if self.noisy_AE_src:
inp_enc_nonc = self.seqs2enc(self.add_unk_noise(seqs_nonc), self.max_resp_len)
else:
inp_enc_nonc = self.seqs2enc(seqs_nonc, self.max_resp_len)
inp_dec_nonc, out_dec_nonc = self.seqs2dec(seqs_nonc, self.max_resp_len)
d_inp_enc['nonc'] = inp_enc_nonc
d_inp_dec['nonc'] = inp_dec_nonc
d_out_dec['nonc'] = out_dec_nonc
n_sample = min(n_sample_conv, len(seqs_nonc))
return get_ret(n_sample, {
'inp_enc':d_inp_enc,
'inp_dec':d_inp_dec,
'out_dec':d_out_dec,
'seqs':{'resp':seqs_resp, 'nonc':seqs_nonc},
})
| 9,117 | 26.299401 | 97 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.