_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q20000
|
modflow_hob_to_instruction_file
|
train
|
def modflow_hob_to_instruction_file(hob_file):
"""write an instruction file for a modflow head observation file
Parameters
----------
hob_file : str
modflow hob file
Returns
-------
df : pandas.DataFrame
pandas DataFrame with control file observation information
"""
hob_df = pd.read_csv(hob_file,delim_whitespace=True,skiprows=1,
header=None,names=["simval","obsval","obsnme"])
hob_df.loc[:,"obsnme"] = hob_df.obsnme.apply(str.lower)
hob_df.loc[:,"ins_line"] = hob_df.obsnme.apply(lambda x:"l1 !{0:s}!".format(x))
hob_df.loc[0,"ins_line"] = hob_df.loc[0,"ins_line"].replace('l1','l2')
ins_file = hob_file + ".ins"
f_ins = open(ins_file, 'w')
f_ins.write("pif ~\n")
f_ins.write(hob_df.loc[:,["ins_line"]].to_string(col_space=0,
columns=["ins_line"],
header=False,
index=False,
formatters=[SFMT]) + '\n')
hob_df.loc[:,"weight"] = 1.0
hob_df.loc[:,"obgnme"] = "obgnme"
f_ins.close()
return hob_df
|
python
|
{
"resource": ""
}
|
q20001
|
modflow_hydmod_to_instruction_file
|
train
|
def modflow_hydmod_to_instruction_file(hydmod_file):
"""write an instruction file for a modflow hydmod file
Parameters
----------
hydmod_file : str
modflow hydmod file
Returns
-------
df : pandas.DataFrame
pandas DataFrame with control file observation information
Note
----
calls modflow_read_hydmod_file()
"""
hydmod_df, hydmod_outfile = modflow_read_hydmod_file(hydmod_file)
hydmod_df.loc[:,"ins_line"] = hydmod_df.obsnme.apply(lambda x:"l1 w !{0:s}!".format(x))
ins_file = hydmod_outfile + ".ins"
with open(ins_file, 'w') as f_ins:
f_ins.write("pif ~\nl1\n")
f_ins.write(hydmod_df.loc[:,["ins_line"]].to_string(col_space=0,
columns=["ins_line"],
header=False,
index=False,
formatters=[SFMT]) + '\n')
hydmod_df.loc[:,"weight"] = 1.0
hydmod_df.loc[:,"obgnme"] = "obgnme"
try:
os.system("inschek {0}.ins {0}".format(hydmod_outfile))
except:
print("error running inschek")
obs_obf = hydmod_outfile + ".obf"
if os.path.exists(obs_obf):
df = pd.read_csv(obs_obf,delim_whitespace=True,header=None,names=["obsnme","obsval"])
df.loc[:,"obgnme"] = df.obsnme.apply(lambda x: x[:-9])
df.to_csv("_setup_"+os.path.split(hydmod_outfile)[-1]+'.csv',index=False)
df.index = df.obsnme
return df
return hydmod_df
|
python
|
{
"resource": ""
}
|
q20002
|
modflow_read_hydmod_file
|
train
|
def modflow_read_hydmod_file(hydmod_file, hydmod_outfile=None):
""" read in a binary hydmod file and return a dataframe of the results
Parameters
----------
hydmod_file : str
modflow hydmod binary file
hydmod_outfile : str
output file to write. If None, use <hydmod_file>.dat.
Default is None
Returns
-------
df : pandas.DataFrame
pandas DataFrame with hymod_file values
Note
----
requires flopy
"""
try:
import flopy.utils as fu
except Exception as e:
print('flopy is not installed - cannot read {0}\n{1}'.format(hydmod_file, e))
return
#print('Starting to read HYDMOD data from {0}'.format(hydmod_file))
obs = fu.HydmodObs(hydmod_file)
hyd_df = obs.get_dataframe()
hyd_df.columns = [i[2:] if i.lower() != 'totim' else i for i in hyd_df.columns]
#hyd_df.loc[:,"datetime"] = hyd_df.index
hyd_df['totim'] = hyd_df.index.map(lambda x: x.strftime("%Y%m%d"))
hyd_df.rename(columns={'totim': 'datestamp'}, inplace=True)
# reshape into a single column
hyd_df = pd.melt(hyd_df, id_vars='datestamp')
hyd_df.rename(columns={'value': 'obsval'}, inplace=True)
hyd_df['obsnme'] = [i.lower() + '_' + j.lower() for i, j in zip(hyd_df.variable, hyd_df.datestamp)]
vc = hyd_df.obsnme.value_counts().sort_values()
vc = list(vc.loc[vc>1].index.values)
if len(vc) > 0:
hyd_df.to_csv("hyd_df.duplciates.csv")
obs.get_dataframe().to_csv("hyd_org.duplicates.csv")
raise Exception("duplicates in obsnme:{0}".format(vc))
#assert hyd_df.obsnme.value_counts().max() == 1,"duplicates in obsnme"
if not hydmod_outfile:
hydmod_outfile = hydmod_file + '.dat'
hyd_df.to_csv(hydmod_outfile, columns=['obsnme','obsval'], sep=' ',index=False)
#hyd_df = hyd_df[['obsnme','obsval']]
return hyd_df[['obsnme','obsval']], hydmod_outfile
|
python
|
{
"resource": ""
}
|
q20003
|
apply_mtlist_budget_obs
|
train
|
def apply_mtlist_budget_obs(list_filename,gw_filename="mtlist_gw.dat",
sw_filename="mtlist_sw.dat",
start_datetime="1-1-1970"):
""" process an MT3D list file to extract mass budget entries.
Parameters
----------
list_filename : str
the mt3d list file
gw_filename : str
the name of the output file with gw mass budget information.
Default is "mtlist_gw.dat"
sw_filename : str
the name of the output file with sw mass budget information.
Default is "mtlist_sw.dat"
start_datatime : str
an str that can be cast to a pandas.TimeStamp. Used to give
observations a meaningful name
Returns
-------
gw : pandas.DataFrame
the gw mass dataframe
sw : pandas.DataFrame (optional)
the sw mass dataframe
Note
----
requires flopy
if SFT is not active, no SW mass budget will be returned
"""
try:
import flopy
except Exception as e:
raise Exception("error import flopy: {0}".format(str(e)))
mt = flopy.utils.MtListBudget(list_filename)
gw, sw = mt.parse(start_datetime=start_datetime, diff=True)
gw = gw.drop([col for col in gw.columns
for drop_col in ["kper", "kstp", "tkstp"]
if (col.lower().startswith(drop_col))], axis=1)
gw.to_csv(gw_filename, sep=' ', index_label="datetime", date_format="%Y%m%d")
if sw is not None:
sw = sw.drop([col for col in sw.columns
for drop_col in ["kper", "kstp", "tkstp"]
if (col.lower().startswith(drop_col))], axis=1)
sw.to_csv(sw_filename, sep=' ', index_label="datetime", date_format="%Y%m%d")
return gw, sw
|
python
|
{
"resource": ""
}
|
q20004
|
setup_mflist_budget_obs
|
train
|
def setup_mflist_budget_obs(list_filename,flx_filename="flux.dat",
vol_filename="vol.dat",start_datetime="1-1'1970",prefix='',
save_setup_file=False):
""" setup observations of budget volume and flux from modflow list file. writes
an instruction file and also a _setup_.csv to use when constructing a pest
control file
Parameters
----------
list_filename : str
modflow list file
flx_filename : str
output filename that will contain the budget flux observations. Default is
"flux.dat"
vol_filename : str)
output filename that will contain the budget volume observations. Default
is "vol.dat"
start_datetime : str
an str that can be parsed into a pandas.TimeStamp. used to give budget
observations meaningful names
prefix : str
a prefix to add to the water budget observations. Useful if processing
more than one list file as part of the forward run process. Default is ''.
save_setup_file : (boolean)
a flag to save _setup_<list_filename>.csv file that contains useful
control file information
Returns
-------
df : pandas.DataFrame
a dataframe with information for constructing a control file. If INSCHEK fails
to run, reutrns None
Note
----
This function uses INSCHEK to get observation values; the observation values are
the values of the list file list_filename. If INSCHEK fails to run, the obseravtion
values are set to 1.0E+10
the instruction files are named <flux_file>.ins and <vol_file>.ins, respectively
It is recommended to use the default values for flux_file and vol_file.
"""
flx,vol = apply_mflist_budget_obs(list_filename,flx_filename,vol_filename,
start_datetime)
_write_mflist_ins(flx_filename+".ins",flx,prefix+"flx")
_write_mflist_ins(vol_filename+".ins",vol, prefix+"vol")
#run("inschek {0}.ins {0}".format(flx_filename))
#run("inschek {0}.ins {0}".format(vol_filename))
try:
#os.system("inschek {0}.ins {0}".format(flx_filename))
#os.system("inschek {0}.ins {0}".format(vol_filename))
run("inschek {0}.ins {0}".format(flx_filename))
run("inschek {0}.ins {0}".format(vol_filename))
except:
print("error running inschek")
return None
flx_obf = flx_filename+".obf"
vol_obf = vol_filename + ".obf"
if os.path.exists(flx_obf) and os.path.exists(vol_obf):
df = pd.read_csv(flx_obf,delim_whitespace=True,header=None,names=["obsnme","obsval"])
df.loc[:,"obgnme"] = df.obsnme.apply(lambda x: x[:-9])
df2 = pd.read_csv(vol_obf, delim_whitespace=True, header=None, names=["obsnme", "obsval"])
df2.loc[:, "obgnme"] = df2.obsnme.apply(lambda x: x[:-9])
df = df.append(df2)
if save_setup_file:
df.to_csv("_setup_"+os.path.split(list_filename)[-1]+'.csv',index=False)
df.index = df.obsnme
return df
|
python
|
{
"resource": ""
}
|
q20005
|
apply_mflist_budget_obs
|
train
|
def apply_mflist_budget_obs(list_filename,flx_filename="flux.dat",
vol_filename="vol.dat",
start_datetime="1-1-1970"):
""" process a MODFLOW list file to extract flux and volume water budget entries.
Parameters
----------
list_filename : str
the modflow list file
flx_filename : str
the name of the output file with water budget flux information.
Default is "flux.dat"
vol_filename : str
the name of the output file with water budget volume information.
Default is "vol.dat"
start_datatime : str
an str that can be cast to a pandas.TimeStamp. Used to give
observations a meaningful name
Returns
-------
flx : pandas.DataFrame
the flux dataframe
vol : pandas.DataFrame
the volume dataframe
Note
----
requires flopy
"""
try:
import flopy
except Exception as e:
raise Exception("error import flopy: {0}".format(str(e)))
mlf = flopy.utils.MfListBudget(list_filename)
flx,vol = mlf.get_dataframes(start_datetime=start_datetime,diff=True)
flx.to_csv(flx_filename,sep=' ',index_label="datetime",date_format="%Y%m%d")
vol.to_csv(vol_filename,sep=' ',index_label="datetime",date_format="%Y%m%d")
return flx,vol
|
python
|
{
"resource": ""
}
|
q20006
|
apply_hds_obs
|
train
|
def apply_hds_obs(hds_file):
""" process a modflow head save file. A companion function to
setup_hds_obs that is called during the forward run process
Parameters
----------
hds_file : str
a modflow head save filename. if hds_file ends with 'ucn',
then the file is treated as a UcnFile type.
Note
----
requires flopy
writes <hds_file>.dat
expects <hds_file>.dat.ins to exist
uses pyemu.pst_utils.parse_ins_file to get observation names
"""
try:
import flopy
except Exception as e:
raise Exception("apply_hds_obs(): error importing flopy: {0}".\
format(str(e)))
from .. import pst_utils
assert os.path.exists(hds_file)
out_file = hds_file+".dat"
ins_file = out_file + ".ins"
assert os.path.exists(ins_file)
df = pd.DataFrame({"obsnme":pst_utils.parse_ins_file(ins_file)})
df.index = df.obsnme
# populate metdata
items = ["k","i","j","kper"]
for i,item in enumerate(items):
df.loc[:,item] = df.obsnme.apply(lambda x: int(x.split('_')[i+1]))
if hds_file.lower().endswith('ucn'):
hds = flopy.utils.UcnFile(hds_file)
else:
hds = flopy.utils.HeadFile(hds_file)
kpers = df.kper.unique()
df.loc[:,"obsval"] = np.NaN
for kper in kpers:
kstp = last_kstp_from_kper(hds,kper)
data = hds.get_data(kstpkper=(kstp,kper))
#jwhite 15jan2018 fix for really large values that are getting some
#trash added to them...
data[np.isnan(data)] = 0.0
data[data>1.0e+20] = 1.0e+20
data[data<-1.0e+20] = -1.0e+20
df_kper = df.loc[df.kper==kper,:]
df.loc[df_kper.index,"obsval"] = data[df_kper.k,df_kper.i,df_kper.j]
assert df.dropna().shape[0] == df.shape[0]
df.loc[:,["obsnme","obsval"]].to_csv(out_file,index=False,sep=" ")
|
python
|
{
"resource": ""
}
|
q20007
|
modflow_sfr_gag_to_instruction_file
|
train
|
def modflow_sfr_gag_to_instruction_file(gage_output_file, ins_file=None, parse_filename=False):
"""writes an instruction file for an SFR gage output file to read Flow only at all times
Parameters
----------
gage_output_file : str
the gage output filename (ASCII).
ins_file : str
the name of the instruction file to create. If None, the name
is <gage_output_file>.ins. Default is None
parse_filename : bool
if True, get the gage_num parameter by parsing the gage output file filename
if False, get the gage number from the file itself
Returns
-------
df : pandas.DataFrame
a dataframe with obsnme and obsval for the sfr simulated flows.
If inschek was not successfully run, then returns None
ins_file : str
file name of instructions file relating to gage output.
obs_file : str
file name of processed gage output for all times
Note
----
sets up observations for gage outputs only for the Flow column.
if parse_namefile is true, only text up to first '.' is used as the gage_num
TODO : allow other observation types and align explicitly with times - now returns all values
"""
if ins_file is None:
ins_file = gage_output_file + '.ins'
# navigate the file to be sure the header makes sense
indat = [line.strip() for line in open(gage_output_file, 'r').readlines()]
header = [i for i in indat if i.startswith('"')]
# yank out the gage number to identify the observation names
if parse_filename:
gage_num = os.path.basename(gage_output_file).split('.')[0]
else:
gage_num = re.sub("[^0-9]", "", indat[0].lower().split("gage no.")[-1].strip().split()[0])
# get the column names
cols = [i.lower() for i in header if 'data' in i.lower()][0].lower().replace('"', '').replace('data:', '').split()
# make sure "Flow" is included in the columns
if 'flow' not in cols:
raise Exception('Requested field "Flow" not in gage output columns')
# find which column is for "Flow"
flowidx = np.where(np.array(cols) == 'flow')[0][0]
# write out the instruction file lines
inslines = ['l1 ' + (flowidx + 1) * 'w ' + '!g{0}_{1:d}!'.format(gage_num, j)
for j in range(len(indat) - len(header))]
inslines[0] = inslines[0].replace('l1', 'l{0:d}'.format(len(header) + 1))
# write the instruction file
with open(ins_file, 'w') as ofp:
ofp.write('pif ~\n')
[ofp.write('{0}\n'.format(line)) for line in inslines]
df = _try_run_inschek(ins_file, gage_output_file)
if df is not None:
return df, ins_file, gage_output_file
else:
print("Inschek didn't run so nothing returned")
return None
|
python
|
{
"resource": ""
}
|
q20008
|
Schur.pandas
|
train
|
def pandas(self):
"""get a pandas dataframe of prior and posterior for all predictions
Returns:
pandas.DataFrame : pandas.DataFrame
a dataframe with prior and posterior uncertainty estimates
for all forecasts (predictions)
"""
names,prior,posterior = [],[],[]
for iname,name in enumerate(self.posterior_parameter.row_names):
names.append(name)
posterior.append(np.sqrt(float(
self.posterior_parameter[iname, iname]. x)))
iprior = self.parcov.row_names.index(name)
prior.append(np.sqrt(float(self.parcov[iprior, iprior].x)))
for pred_name, pred_var in self.posterior_prediction.items():
names.append(pred_name)
posterior.append(np.sqrt(pred_var))
prior.append(self.prior_prediction[pred_name])
return pd.DataFrame({"posterior": posterior, "prior": prior},
index=names)
|
python
|
{
"resource": ""
}
|
q20009
|
Schur.map_parameter_estimate
|
train
|
def map_parameter_estimate(self):
""" get the posterior expectation for parameters using Bayes linear
estimation
Returns
-------
post_expt : pandas.DataFrame
a dataframe with prior and posterior parameter expectations
"""
res = self.pst.res
assert res is not None
# build the prior expectation parameter vector
prior_expt = self.pst.parameter_data.loc[:,["parval1"]].copy()
islog = self.pst.parameter_data.partrans == "log"
prior_expt.loc[islog] = prior_expt.loc[islog].apply(np.log10)
prior_expt = Matrix.from_dataframe(prior_expt)
prior_expt.col_names = ["prior_expt"]
# build the residual vector
res_vec = Matrix.from_dataframe(res.loc[:,["residual"]])
# form the terms of Schur's complement
b = self.parcov * self.jco.T
c = ((self.jco * self.parcov * self.jco.T) + self.obscov).inv
bc = Matrix((b * c).x, row_names=b.row_names, col_names=c.col_names)
# calc posterior expectation
upgrade = bc * res_vec
upgrade.col_names = ["prior_expt"]
post_expt = prior_expt + upgrade
# post processing - back log transform
post_expt = pd.DataFrame(data=post_expt.x,index=post_expt.row_names,
columns=["post_expt"])
post_expt.loc[:,"prior_expt"] = prior_expt.x.flatten()
post_expt.loc[islog,:] = 10.0**post_expt.loc[islog,:]
# unc_sum = self.get_parameter_summary()
# post_expt.loc[:,"standard_deviation"] = unc_sum.post_var.apply(np.sqrt)
post_expt.sort_index(inplace=True)
return post_expt
|
python
|
{
"resource": ""
}
|
q20010
|
Schur.get_parameter_summary
|
train
|
def get_parameter_summary(self,include_map=False):
"""get a summary of the parameter uncertainty
Parameters
----------
include_map : bool
if True, add the prior and posterior expectations
and report standard deviation instead of variance
Returns
-------
pandas.DataFrame : pandas.DataFrame
dataframe of prior,posterior variances and percent
uncertainty reduction of each parameter
Note
----
this is the primary method for accessing parameter uncertainty
estimates - use this!
Example
-------
``>>>import matplotlib.pyplot as plt``
``>>>import pyemu``
``>>>sc = pyemu.Schur(jco="pest.jcb")``
``>>>sc = pyemu.Schur(jco="pest.jcb",forecasts=["fore1","fore2"])``
``>>>par_sum = sc.get_parameter_summary()``
``>>>par_sum.plot(kind="bar")``
``>>>plt.show()``
"""
prior_mat = self.parcov.get(self.posterior_parameter.col_names)
if prior_mat.isdiagonal:
prior = prior_mat.x.flatten()
else:
prior = np.diag(prior_mat.x)
post = np.diag(self.posterior_parameter.x)
if include_map:
par_data = self.map_parameter_estimate
prior = pd.DataFrame(data=prior,index=prior_mat.col_names)
islog = self.pst.parameter_data.partrans == "log"
par_data.loc[islog,:] = np.log10(par_data.loc[islog,:])
par_data.loc[:,"prior_stdev"] = prior
post = pd.DataFrame(data=post,index=prior.index)
par_data.loc[:,"post_stdev"] = post
par_data.loc[:,"is_log"] = islog
return par_data
else:
ureduce = 100.0 * (1.0 - (post / prior))
return pd.DataFrame({"prior_var":prior,"post_var":post,
"percent_reduction":ureduce},
index=self.posterior_parameter.col_names)
|
python
|
{
"resource": ""
}
|
q20011
|
Schur.get_forecast_summary
|
train
|
def get_forecast_summary(self, include_map=False):
"""get a summary of the forecast uncertainty
Parameters
----------
include_map : bool
if True, add the prior and posterior expectations
and report standard deviation instead of variance
Returns
-------
pandas.DataFrame : pandas.DataFrame
dataframe of prior,posterior variances and percent
uncertainty reduction of each parameter
Note
----
this is the primary method for accessing forecast uncertainty
estimates - use this!
Example
-------
``>>>import matplotlib.pyplot as plt``
``>>>import pyemu``
This usage assumes you have set the ``++forecasts()`` argument in the
control file:
``>>>sc = pyemu.Schur(jco="pest.jcb")``
or, you can pass the forecasts directly, assuming the forecasts are
names of zero-weight observations:
``>>>sc = pyemu.Schur(jco="pest.jcb",forecasts=["fore1","fore2"])``
``>>>fore_sum = sc.get_forecast_summary()``
``>>>fore_sum.plot(kind="bar")``
``>>>plt.show()``
"""
sum = {"prior_var":[], "post_var":[], "percent_reduction":[]}
for forecast in self.prior_forecast.keys():
pr = self.prior_forecast[forecast]
pt = self.posterior_forecast[forecast]
ur = 100.0 * (1.0 - (pt/pr))
sum["prior_var"].append(pr)
sum["post_var"].append(pt)
sum["percent_reduction"].append(ur)
df = pd.DataFrame(sum,index=self.prior_forecast.keys())
if include_map:
df.loc[:,"prior_stdev"] = df.pop("prior_var").apply(np.sqrt)
df.loc[:,"post_stdev"] = df.pop("post_var").apply(np.sqrt)
df.pop("percent_reduction")
forecast_map = self.map_forecast_estimate
df.loc[:,"prior_expt"] = forecast_map.prior_expt
df.loc[:,"post_expt"] = forecast_map.post_expt
return df
return pd.DataFrame(sum,index=self.prior_forecast.keys())
|
python
|
{
"resource": ""
}
|
q20012
|
Schur.__contribution_from_parameters
|
train
|
def __contribution_from_parameters(self, parameter_names):
"""private method get the prior and posterior uncertainty reduction as a result of
some parameter becoming perfectly known
Parameters
----------
parameter_names : list
parameter that are perfectly known
Returns
-------
dict : dict
dictionary of forecast name, [prior uncertainty w/o parameter_names,
% posterior uncertainty w/o parameter names]
Note
----
this method is used by get_parameter_contribution() method - don't
call this method directly
"""
#get the prior and posterior for the base case
bprior,bpost = self.prior_prediction, self.posterior_prediction
#get the prior and posterior for the conditioned case
la_cond = self.get_conditional_instance(parameter_names)
cprior,cpost = la_cond.prior_prediction, la_cond.posterior_prediction
return cprior,cpost
|
python
|
{
"resource": ""
}
|
q20013
|
Schur.get_conditional_instance
|
train
|
def get_conditional_instance(self, parameter_names):
""" get a new Schur instance that includes conditional update from
some parameters becoming known perfectly
Parameters
----------
parameter_names : list
parameters that are to be treated as notionally perfectly
known
Returns
-------
la_cond : Schur
a new Schur instance conditional on perfect knowledge
of some parameters
Note
----
this method is used by the get_parameter_contribution() method -
don't call this method directly
"""
if not isinstance(parameter_names, list):
parameter_names = [parameter_names]
for iname, name in enumerate(parameter_names):
name = str(name).lower()
parameter_names[iname] = name
assert name in self.jco.col_names,\
"contribution parameter " + name + " not found jco"
keep_names = []
for name in self.jco.col_names:
if name not in parameter_names:
keep_names.append(name)
if len(keep_names) == 0:
raise Exception("Schur.contribution_from_Parameters " +
"atleast one parameter must remain uncertain")
#get the reduced predictions
if self.predictions is None:
raise Exception("Schur.contribution_from_Parameters " +
"no predictions have been set")
# cond_preds = []
# for pred in self.predictions:
# cond_preds.append(pred.get(keep_names, pred.col_names))
cond_preds = self.predictions.get(row_names=keep_names)
la_cond = Schur(jco=self.jco.get(self.jco.row_names, keep_names),
parcov=self.parcov.condition_on(parameter_names),
obscov=self.obscov, predictions=cond_preds,verbose=False)
return la_cond
|
python
|
{
"resource": ""
}
|
q20014
|
Schur.get_par_contribution
|
train
|
def get_par_contribution(self,parlist_dict=None,include_prior_results=False):
"""get a dataframe the prior and posterior uncertainty
reduction as a result of some parameter becoming perfectly known
Parameters
----------
parlist_dict : dict
a nested dictionary-list of groups of parameters
that are to be treated as perfectly known. key values become
row labels in returned dataframe. If None, each adjustable parameter
is sequentially treated as known and the returned dataframe
has row labels for each adjustable parameter
include_prior_results : bool
flag to return a multi-indexed dataframe with both conditional
prior and posterior forecast uncertainty estimates. Default is False
Returns
-------
pandas.DataFrame : pandas.DataFrame
a dataframe that summarizes the parameter contribution analysis.
The dataframe has index (row labels) of the keys in parlist_dict
and a column labels of forecast names. The values in the dataframe
are the posterior variance of the forecast conditional on perfect
knowledge of the parameters in the values of parlist_dict. Varies
depending on `include_prior_results`.
Example
-------
``>>>import pyemu``
``>>>sc = pyemu.Schur(jco="pest.jcb")``
``>>>df = sc.get_par_contribution()``
"""
self.log("calculating contribution from parameters")
if parlist_dict is None:
parlist_dict = {}#dict(zip(self.pst.adj_par_names,self.pst.adj_par_names))
# make sure all of the adjustable pars are in the jco
for pname in self.pst.adj_par_names:
if pname in self.jco.col_names:
parlist_dict[pname] = pname
else:
if type(parlist_dict) == list:
parlist_dict = dict(zip(parlist_dict,parlist_dict))
results = {}
names = ["base"]
for forecast in self.prior_forecast.keys():
pr = self.prior_forecast[forecast]
pt = self.posterior_forecast[forecast]
#reduce = 100.0 * ((pr - pt) / pr)
results[(forecast,"prior")] = [pr]
results[(forecast,"post")] = [pt]
#results[(forecast,"percent_reduce")] = [reduce]
for case_name,par_list in parlist_dict.items():
if len(par_list) == 0:
continue
names.append(case_name)
self.log("calculating contribution from: " + str(par_list))
case_prior,case_post = self.__contribution_from_parameters(par_list)
self.log("calculating contribution from: " + str(par_list))
for forecast in case_prior.keys():
pr = case_prior[forecast]
pt = case_post[forecast]
#reduce = 100.0 * ((pr - pt) / pr)
results[(forecast, "prior")].append(pr)
results[(forecast, "post")].append(pt)
#results[(forecast, "percent_reduce")].append(reduce)
df = pd.DataFrame(results,index=names)
#base = df.loc["base",df.columns.get_level_values(1)=="post"]
#df = 1.0 - (df.loc[:,df.columns.get_level_values(1)=="post"] / base)
self.log("calculating contribution from parameters")
if include_prior_results:
return df
else:
df = df.xs("post", level=1, drop_level=True, axis=1)
return df
|
python
|
{
"resource": ""
}
|
q20015
|
ErrVar.omitted_jco
|
train
|
def omitted_jco(self):
"""get the omitted jco
Returns
-------
omitted_jco : pyemu.Jco
Note
----
returns a reference
if ErrorVariance.__omitted_jco is None,
then dynamically load the attribute before returning
"""
if self.__omitted_jco is None:
self.log("loading omitted_jco")
self.__load_omitted_jco()
self.log("loading omitted_jco")
return self.__omitted_jco
|
python
|
{
"resource": ""
}
|
q20016
|
ErrVar.omitted_parcov
|
train
|
def omitted_parcov(self):
"""get the omitted prior parameter covariance matrix
Returns
-------
omitted_parcov : pyemu.Cov
Note
----
returns a reference
If ErrorVariance.__omitted_parcov is None,
attribute is dynamically loaded
"""
if self.__omitted_parcov is None:
self.log("loading omitted_parcov")
self.__load_omitted_parcov()
self.log("loading omitted_parcov")
return self.__omitted_parcov
|
python
|
{
"resource": ""
}
|
q20017
|
ErrVar.get_identifiability_dataframe
|
train
|
def get_identifiability_dataframe(self,singular_value=None,precondition=False):
"""get the parameter identifiability as a pandas dataframe
Parameters
----------
singular_value : int
the singular spectrum truncation point. Defaults to minimum of
non-zero-weighted observations and adjustable parameters
precondition : bool
flag to use the preconditioned hessian (xtqt + sigma_theta^-1).
Default is False
Returns
-------
pandas.DataFrame : pandas.DataFrame
A pandas dataframe of the V_1**2 Matrix with the
identifiability in the column labeled "ident"
"""
if singular_value is None:
singular_value = int(min(self.pst.nnz_obs, self.pst.npar_adj))
#v1_df = self.qhalfx.v[:, :singular_value].to_dataframe() ** 2
xtqx = self.xtqx
if precondition:
xtqx = xtqx + self.parcov.inv
#v1_df = self.xtqx.v[:, :singular_value].to_dataframe() ** 2
v1_df = xtqx.v[:, :singular_value].to_dataframe() ** 2
v1_df["ident"] = v1_df.sum(axis=1)
return v1_df
|
python
|
{
"resource": ""
}
|
q20018
|
ErrVar.variance_at
|
train
|
def variance_at(self, singular_value):
"""get the error variance of all three terms at a singluar value
Parameters
----------
singular_value : int
singular value to test
Returns
-------
dict : dict
dictionary of (err var term,prediction_name), standard_deviation pairs
"""
results = {}
results.update(self.first_prediction(singular_value))
results.update(self.second_prediction(singular_value))
results.update(self.third_prediction(singular_value))
return results
|
python
|
{
"resource": ""
}
|
q20019
|
ErrVar.I_minus_R
|
train
|
def I_minus_R(self,singular_value):
"""get I - R at singular value
Parameters
----------
singular_value : int
singular value to calc R at
Returns
-------
I - R : pyemu.Matrix
identity matrix minus resolution matrix at singular_value
"""
if self.__I_R is not None and singular_value == self.__I_R_sv:
return self.__I_R
else:
if singular_value > self.jco.ncol:
return self.parcov.zero
else:
#v2 = self.qhalfx.v[:, singular_value:]
v2 = self.xtqx.v[:, singular_value:]
self.__I_R = v2 * v2.T
self.__I_R_sv = singular_value
return self.__I_R
|
python
|
{
"resource": ""
}
|
q20020
|
ErrVar.third_prediction
|
train
|
def third_prediction(self,singular_value):
"""get the omitted parameter contribution to prediction error variance
at a singular value. used to construct error variance dataframe
Parameters
----------
singular_value : int
singular value to calc third term at
Returns
-------
dict : dict
dictionary of ("third",prediction_names),error variance
"""
if not self.predictions:
raise Exception("ErrVar.third(): not predictions are set")
if self.__need_omitted is False:
zero_preds = {}
for pred in self.predictions_iter:
zero_preds[("third", pred.col_names[0])] = 0.0
return zero_preds
self.log("calc third term prediction @" + str(singular_value))
mn = min(self.jco.shape)
try:
mn = min(self.pst.npar_adj, self.pst.nnz_obs)
except:
pass
if singular_value > mn:
inf_pred = {}
for pred in self.predictions_iter:
inf_pred[("third",pred.col_names[0])] = 1.0E+35
return inf_pred
else:
results = {}
for prediction,omitted_prediction in \
zip(self.predictions_iter, self.omitted_predictions):
# comes out as row vector, but needs to be a column vector
p = ((prediction.T * self.G(singular_value) * self.omitted_jco)
- omitted_prediction.T).T
result = float((p.T * self.omitted_parcov * p).x)
results[("third", prediction.col_names[0])] = result
self.log("calc third term prediction @" + str(singular_value))
return results
|
python
|
{
"resource": ""
}
|
q20021
|
pp_file_to_dataframe
|
train
|
def pp_file_to_dataframe(pp_filename):
""" read a pilot point file to a pandas Dataframe
Parameters
----------
pp_filename : str
pilot point file
Returns
-------
df : pandas.DataFrame
a dataframe with pp_utils.PP_NAMES for columns
"""
df = pd.read_csv(pp_filename, delim_whitespace=True,
header=None, names=PP_NAMES,usecols=[0,1,2,3,4])
df.loc[:,"name"] = df.name.apply(str).apply(str.lower)
return df
|
python
|
{
"resource": ""
}
|
q20022
|
pp_tpl_to_dataframe
|
train
|
def pp_tpl_to_dataframe(tpl_filename):
""" read a pilot points template file to a pandas dataframe
Parameters
----------
tpl_filename : str
pilot points template file
Returns
-------
df : pandas.DataFrame
a dataframe with "parnme" included
"""
inlines = open(tpl_filename, 'r').readlines()
header = inlines.pop(0)
marker = header.strip().split()[1]
assert len(marker) == 1
usecols = [0,1,2,3]
df = pd.read_csv(tpl_filename, delim_whitespace=True,skiprows=1,
header=None, names=PP_NAMES[:-1],usecols=usecols)
df.loc[:,"name"] = df.name.apply(str).apply(str.lower)
df["parnme"] = [i.split(marker)[1].strip() for i in inlines]
return df
|
python
|
{
"resource": ""
}
|
q20023
|
write_pp_shapfile
|
train
|
def write_pp_shapfile(pp_df,shapename=None):
"""write pilot points dataframe to a shapefile
Parameters
----------
pp_df : pandas.DataFrame or str
pilot point dataframe or a pilot point filename. Dataframe
must include "x" and "y"
shapename : str
shapefile name. If None, pp_df must be str and shapefile
is saved as <pp_df>.shp
Note
----
requires pyshp
"""
try:
import shapefile
except Exception as e:
raise Exception("error importing shapefile: {0}, \ntry pip install pyshp...".format(str(e)))
if not isinstance(pp_df,list):
pp_df = [pp_df]
dfs = []
for pp in pp_df:
if isinstance(pp,pd.DataFrame):
dfs.append(pp)
elif isinstance(pp,str):
dfs.append(pp_file_to_dataframe(pp))
else:
raise Exception("unsupported arg type:{0}".format(type(pp)))
if shapename is None:
shapename = "pp_locs.shp"
try:
shp = shapefile.Writer(shapeType=shapefile.POINT)
except:
shp = shapefile.Writer(target=shapename, shapeType=shapefile.POINT)
for name, dtype in dfs[0].dtypes.iteritems():
if dtype == object:
shp.field(name=name, fieldType='C', size=50)
elif dtype in [int, np.int, np.int64, np.int32]:
shp.field(name=name, fieldType='N', size=50, decimal=0)
elif dtype in [float, np.float, np.float32, np.float32]:
shp.field(name=name, fieldType='N', size=50, decimal=8)
else:
raise Exception("unrecognized field type in pp_df:{0}:{1}".format(name, dtype))
# some pandas awesomeness..
for df in dfs:
#df.apply(lambda x: shp.poly([[[x.x, x.y]]]), axis=1)
df.apply(lambda x: shp.point(x.x, x.y), axis=1)
df.apply(lambda x: shp.record(*x), axis=1)
try:
shp.save(shapename)
except:
shp.close()
|
python
|
{
"resource": ""
}
|
q20024
|
write_pp_file
|
train
|
def write_pp_file(filename,pp_df):
"""write a pilot points dataframe to a pilot points file
Parameters
----------
filename : str
pilot points file to write
pp_df : pandas.DataFrame
a dataframe that has columns "x","y","zone", and "value"
"""
with open(filename,'w') as f:
f.write(pp_df.to_string(col_space=0,
columns=PP_NAMES,
formatters=PP_FMT,
justify="right",
header=False,
index=False) + '\n')
|
python
|
{
"resource": ""
}
|
q20025
|
pilot_points_to_tpl
|
train
|
def pilot_points_to_tpl(pp_file,tpl_file=None,name_prefix=None):
"""write a template file for a pilot points file
Parameters
----------
pp_file : str
pilot points file
tpl_file : str
template file name to write. If None, append ".tpl" to
the pp_file arg. Default is None
name_prefix : str
name to prepend to parameter names for each pilot point. For example,
if ``name_prefix = "hk_"``, then each pilot point parameter will be named
"hk_0001","hk_0002", etc. If None, parameter names from pp_df.name
are used. Default is None.
Returns
-------
pp_df : pandas.DataFrame
a dataframe with pilot point information (name,x,y,zone,parval1)
with the parameter information (parnme,tpl_str)
"""
if isinstance(pp_file,pd.DataFrame):
pp_df = pp_file
assert tpl_file is not None
else:
assert os.path.exists(pp_file)
pp_df = pd.read_csv(pp_file, delim_whitespace=True,
header=None, names=PP_NAMES)
if tpl_file is None:
tpl_file = pp_file + ".tpl"
if name_prefix is not None:
digits = str(len(str(pp_df.shape[0])))
fmt = "{0:0"+digits+"d}"
names = [name_prefix+fmt.format(i) for i in range(pp_df.shape[0])]
else:
names = pp_df.name.copy()
too_long = []
for name in names:
if len(name) > 12:
too_long.append(name)
if len(too_long) > 0:
raise Exception("the following parameter names are too long:" +\
",".join(too_long))
tpl_entries = ["~ {0} ~".format(name) for name in names]
pp_df.loc[:,"tpl"] = tpl_entries
pp_df.loc[:,"parnme"] = names
f_tpl = open(tpl_file,'w')
f_tpl.write("ptf ~\n")
f_tpl.write(pp_df.to_string(col_space=0,
columns=["name","x","y","zone","tpl"],
formatters=PP_FMT,
justify="left",
header=False,
index=False) + '\n')
return pp_df
|
python
|
{
"resource": ""
}
|
q20026
|
run
|
train
|
def run(cmd_str,cwd='.',verbose=False):
""" an OS agnostic function to execute command
Parameters
----------
cmd_str : str
the str to execute with os.system()
cwd : str
the directory to execute the command in
verbose : bool
flag to echo to stdout complete cmd str
Note
----
uses platform to detect OS and adds .exe or ./ as appropriate
for Windows, if os.system returns non-zero, raises exception
Example
-------
``>>>import pyemu``
``>>>pyemu.helpers.run("pestpp pest.pst")``
"""
warnings.warn("run() has moved to pyemu.os_utils",PyemuWarning)
pyemu.os_utils.run(cmd_str=cmd_str,cwd=cwd,verbose=verbose)
|
python
|
{
"resource": ""
}
|
q20027
|
condition_on_par_knowledge
|
train
|
def condition_on_par_knowledge(cov,par_knowledge_dict):
""" experimental function to include conditional prior information
for one or more parameters in a full covariance matrix
"""
missing = []
for parnme in par_knowledge_dict.keys():
if parnme not in cov.row_names:
missing.append(parnme)
if len(missing):
raise Exception("par knowledge dict parameters not found: {0}".\
format(','.join(missing)))
# build the selection matrix and sigma epsilon
#sel = cov.zero2d
#sel = pyemu.Matrix(x=np.zeros((cov.shape[0],1)),row_names=cov.row_names,col_names=['sel'])
sel = cov.zero2d
sigma_ep = cov.zero2d
for parnme,var in par_knowledge_dict.items():
idx = cov.row_names.index(parnme)
#sel.x[idx,:] = 1.0
sel.x[idx,idx] = 1.0
sigma_ep.x[idx,idx] = var
#print(sigma_ep.x)
#q = sigma_ep.inv
#cov_inv = cov.inv
print(sel)
term2 = sel * cov * sel.T
#term2 += sigma_ep
#term2 = cov
print(term2)
term2 = term2.inv
term2 *= sel
term2 *= cov
new_cov = cov - term2
return new_cov
|
python
|
{
"resource": ""
}
|
q20028
|
kl_setup
|
train
|
def kl_setup(num_eig,sr,struct,prefixes,
factors_file="kl_factors.dat",islog=True, basis_file=None,
tpl_dir="."):
"""setup a karhuenen-Loeve based parameterization for a given
geostatistical structure.
Parameters
----------
num_eig : int
number of basis vectors to retain in the reduced basis
sr : flopy.reference.SpatialReference
struct : str or pyemu.geostats.Geostruct
geostatistical structure (or file containing one)
array_dict : dict
a dict of arrays to setup as KL-based parameters. The key becomes the
parameter name prefix. The total number of parameters is
len(array_dict) * num_eig
basis_file : str
the name of the PEST-format binary file where the reduced basis will be saved
tpl_file : str
the name of the template file to make. The template
file is a csv file with the parameter names, the
original factor values,and the template entries.
The original values can be used to set the parval1
entries in the control file
Returns
-------
back_array_dict : dict
a dictionary of back transformed arrays. This is useful to see
how much "smoothing" is taking place compared to the original
arrays.
Note
----
requires flopy
Example
-------
``>>>import flopy``
``>>>import pyemu``
``>>>m = flopy.modflow.Modflow.load("mymodel.nam")``
``>>>a_dict = {"hk":m.lpf.hk[0].array}``
``>>>ba_dict = pyemu.helpers.kl_setup(10,m.sr,"struct.dat",a_dict)``
"""
try:
import flopy
except Exception as e:
raise Exception("error import flopy: {0}".format(str(e)))
assert isinstance(sr,flopy.utils.SpatialReference)
# for name,array in array_dict.items():
# assert isinstance(array,np.ndarray)
# assert array.shape[0] == sr.nrow
# assert array.shape[1] == sr.ncol
# assert len(name) + len(str(num_eig)) <= 12,"name too long:{0}".\
# format(name)
if isinstance(struct,str):
assert os.path.exists(struct)
gs = pyemu.utils.read_struct_file(struct)
else:
gs = struct
names = []
for i in range(sr.nrow):
names.extend(["i{0:04d}j{1:04d}".format(i,j) for j in range(sr.ncol)])
cov = gs.covariance_matrix(sr.xcentergrid.flatten(),
sr.ycentergrid.flatten(),
names=names)
eig_names = ["eig_{0:04d}".format(i) for i in range(cov.shape[0])]
trunc_basis = cov.u
trunc_basis.col_names = eig_names
#trunc_basis.col_names = [""]
if basis_file is not None:
trunc_basis.to_binary(basis_file)
trunc_basis = trunc_basis[:,:num_eig]
eig_names = eig_names[:num_eig]
pp_df = pd.DataFrame({"name":eig_names},index=eig_names)
pp_df.loc[:,"x"] = -1.0 * sr.ncol
pp_df.loc[:,"y"] = -1.0 * sr.nrow
pp_df.loc[:,"zone"] = -999
pp_df.loc[:,"parval1"] = 1.0
pyemu.pp_utils.write_pp_file(os.path.join("temp.dat"),pp_df)
eigen_basis_to_factor_file(sr.nrow,sr.ncol,trunc_basis,factors_file=factors_file,islog=islog)
dfs = []
for prefix in prefixes:
tpl_file = os.path.join(tpl_dir,"{0}.dat_kl.tpl".format(prefix))
df = pyemu.pp_utils.pilot_points_to_tpl("temp.dat",tpl_file,prefix)
shutil.copy2("temp.dat",tpl_file.replace(".tpl",""))
df.loc[:,"tpl_file"] = tpl_file
df.loc[:,"in_file"] = tpl_file.replace(".tpl","")
df.loc[:,"prefix"] = prefix
df.loc[:,"pargp"] = "kl_{0}".format(prefix)
dfs.append(df)
#arr = pyemu.geostats.fac2real(df,factors_file=factors_file,out_file=None)
df = pd.concat(dfs)
df.loc[:,"parubnd"] = 10.0
df.loc[:,"parlbnd"] = 0.1
return pd.concat(dfs)
|
python
|
{
"resource": ""
}
|
q20029
|
zero_order_tikhonov
|
train
|
def zero_order_tikhonov(pst, parbounds=True,par_groups=None,
reset=True):
"""setup preferred-value regularization
Parameters
----------
pst : pyemu.Pst
the control file instance
parbounds : bool
flag to weight the prior information equations according
to parameter bound width - approx the KL transform. Default
is True
par_groups : list
parameter groups to build PI equations for. If None, all
adjustable parameters are used. Default is None
reset : bool
flag to reset the prior_information attribute of the pst
instance. Default is True
Example
-------
``>>>import pyemu``
``>>>pst = pyemu.Pst("pest.pst")``
``>>>pyemu.helpers.zero_order_tikhonov(pst)``
"""
if par_groups is None:
par_groups = pst.par_groups
pilbl, obgnme, weight, equation = [], [], [], []
for idx, row in pst.parameter_data.iterrows():
pt = row["partrans"].lower()
try:
pt = pt.decode()
except:
pass
if pt not in ["tied", "fixed"] and\
row["pargp"] in par_groups:
pilbl.append(row["parnme"])
weight.append(1.0)
ogp_name = "regul"+row["pargp"]
obgnme.append(ogp_name[:12])
parnme = row["parnme"]
parval1 = row["parval1"]
if pt == "log":
parnme = "log(" + parnme + ")"
parval1 = np.log10(parval1)
eq = "1.0 * " + parnme + " ={0:15.6E}".format(parval1)
equation.append(eq)
if reset:
pst.prior_information = pd.DataFrame({"pilbl": pilbl,
"equation": equation,
"obgnme": obgnme,
"weight": weight})
else:
pi = pd.DataFrame({"pilbl": pilbl,
"equation": equation,
"obgnme": obgnme,
"weight": weight})
pst.prior_information = pst.prior_information.append(pi)
if parbounds:
regweight_from_parbound(pst)
if pst.control_data.pestmode == "estimation":
pst.control_data.pestmode = "regularization"
|
python
|
{
"resource": ""
}
|
q20030
|
first_order_pearson_tikhonov
|
train
|
def first_order_pearson_tikhonov(pst,cov,reset=True,abs_drop_tol=1.0e-3):
"""setup preferred-difference regularization from a covariance matrix.
The weights on the prior information equations are the Pearson
correlation coefficients implied by covariance matrix.
Parameters
----------
pst : pyemu.Pst
pst instance
cov : pyemu.Cov
covariance matrix instance
reset : bool
drop all other pi equations. If False, append to
existing pi equations
abs_drop_tol : float
tolerance to control how many pi equations are written.
If the Pearson C is less than abs_drop_tol, the prior information
equation will not be included in the control file
Example
-------
``>>>import pyemu``
``>>>pst = pyemu.Pst("pest.pst")``
``>>>cov = pyemu.Cov.from_ascii("prior.cov")``
``>>>pyemu.helpers.first_order_pearson_tikhonov(pst,cov,abs_drop_tol=0.25)``
"""
assert isinstance(cov,pyemu.Cov)
print("getting CC matrix")
cc_mat = cov.get(pst.adj_par_names).to_pearson()
#print(pst.parameter_data.dtypes)
try:
ptrans = pst.parameter_data.partrans.apply(lambda x:x.decode()).to_dict()
except:
ptrans = pst.parameter_data.partrans.to_dict()
pi_num = pst.prior_information.shape[0] + 1
pilbl, obgnme, weight, equation = [], [], [], []
sadj_names = set(pst.adj_par_names)
print("processing")
for i,iname in enumerate(cc_mat.row_names):
if iname not in sadj_names:
continue
for j,jname in enumerate(cc_mat.row_names[i+1:]):
if jname not in sadj_names:
continue
#print(i,iname,i+j+1,jname)
cc = cc_mat.x[i,j+i+1]
if cc < abs_drop_tol:
continue
pilbl.append("pcc_{0}".format(pi_num))
iiname = str(iname)
if str(ptrans[iname]) == "log":
iiname = "log("+iname+")"
jjname = str(jname)
if str(ptrans[jname]) == "log":
jjname = "log("+jname+")"
equation.append("1.0 * {0} - 1.0 * {1} = 0.0".\
format(iiname,jjname))
weight.append(cc)
obgnme.append("regul_cc")
pi_num += 1
df = pd.DataFrame({"pilbl": pilbl,"equation": equation,
"obgnme": obgnme,"weight": weight})
df.index = df.pilbl
if reset:
pst.prior_information = df
else:
pst.prior_information = pst.prior_information.append(df)
if pst.control_data.pestmode == "estimation":
pst.control_data.pestmode = "regularization"
|
python
|
{
"resource": ""
}
|
q20031
|
apply_array_pars
|
train
|
def apply_array_pars(arr_par_file="arr_pars.csv"):
""" a function to apply array-based multipler parameters. Used to implement
the parameterization constructed by PstFromFlopyModel during a forward run
Parameters
----------
arr_par_file : str
path to csv file detailing parameter array multipliers
Note
----
"arr_pars.csv" - is written by PstFromFlopy
the function should be added to the forward_run.py script but can be called on any correctly formatted csv
"""
df = pd.read_csv(arr_par_file)
# for fname in df.model_file:
# try:
# os.remove(fname)
# except:
# print("error removing mult array:{0}".format(fname))
if 'pp_file' in df.columns:
for pp_file,fac_file,mlt_file in zip(df.pp_file,df.fac_file,df.mlt_file):
if pd.isnull(pp_file):
continue
pyemu.geostats.fac2real(pp_file=pp_file,factors_file=fac_file,
out_file=mlt_file,lower_lim=1.0e-10)
for model_file in df.model_file.unique():
# find all mults that need to be applied to this array
df_mf = df.loc[df.model_file==model_file,:]
results = []
org_file = df_mf.org_file.unique()
if org_file.shape[0] != 1:
raise Exception("wrong number of org_files for {0}".
format(model_file))
org_arr = np.loadtxt(org_file[0])
for mlt in df_mf.mlt_file:
org_arr *= np.loadtxt(mlt)
if "upper_bound" in df.columns:
ub_vals = df_mf.upper_bound.value_counts().dropna().to_dict()
if len(ub_vals) == 0:
pass
elif len(ub_vals) > 1:
raise Exception("different upper bound values for {0}".format(org_file))
else:
ub = list(ub_vals.keys())[0]
org_arr[org_arr>ub] = ub
if "lower_bound" in df.columns:
lb_vals = df_mf.lower_bound.value_counts().dropna().to_dict()
if len(lb_vals) == 0:
pass
elif len(lb_vals) > 1:
raise Exception("different lower bound values for {0}".format(org_file))
else:
lb = list(lb_vals.keys())[0]
org_arr[org_arr < lb] = lb
np.savetxt(model_file,np.atleast_2d(org_arr),fmt="%15.6E",delimiter='')
|
python
|
{
"resource": ""
}
|
q20032
|
PstFromFlopyModel.setup_sfr_obs
|
train
|
def setup_sfr_obs(self):
"""setup sfr ASCII observations"""
if not self.sfr_obs:
return
if self.m.sfr is None:
self.logger.lraise("no sfr package found...")
org_sfr_out_file = os.path.join(self.org_model_ws,"{0}.sfr.out".format(self.m.name))
if not os.path.exists(org_sfr_out_file):
self.logger.lraise("setup_sfr_obs() error: could not locate existing sfr out file: {0}".
format(org_sfr_out_file))
new_sfr_out_file = os.path.join(self.m.model_ws,os.path.split(org_sfr_out_file)[-1])
shutil.copy2(org_sfr_out_file,new_sfr_out_file)
seg_group_dict = None
if isinstance(self.sfr_obs,dict):
seg_group_dict = self.sfr_obs
df = pyemu.gw_utils.setup_sfr_obs(new_sfr_out_file,seg_group_dict=seg_group_dict,
model=self.m,include_path=True)
if df is not None:
self.obs_dfs["sfr"] = df
self.frun_post_lines.append("pyemu.gw_utils.apply_sfr_obs()")
|
python
|
{
"resource": ""
}
|
q20033
|
PstFromFlopyModel.setup_mult_dirs
|
train
|
def setup_mult_dirs(self):
""" setup the directories to use for multiplier parameterization. Directories
are make within the PstFromFlopyModel.m.model_ws directory
"""
# setup dirs to hold the original and multiplier model input quantities
set_dirs = []
# if len(self.pp_props) > 0 or len(self.zone_props) > 0 or \
# len(self.grid_props) > 0:
if self.pp_props is not None or \
self.zone_props is not None or \
self.grid_props is not None or\
self.const_props is not None or \
self.kl_props is not None:
set_dirs.append(self.arr_org)
set_dirs.append(self.arr_mlt)
# if len(self.bc_props) > 0:
if len(self.temporal_list_props) > 0 or len(self.spatial_list_props) > 0:
set_dirs.append(self.list_org)
if len(self.spatial_list_props):
set_dirs.append(self.list_mlt)
for d in set_dirs:
d = os.path.join(self.m.model_ws,d)
self.log("setting up '{0}' dir".format(d))
if os.path.exists(d):
if self.remove_existing:
shutil.rmtree(d,onerror=remove_readonly)
else:
raise Exception("dir '{0}' already exists".
format(d))
os.mkdir(d)
self.log("setting up '{0}' dir".format(d))
|
python
|
{
"resource": ""
}
|
q20034
|
PstFromFlopyModel.setup_model
|
train
|
def setup_model(self,model,org_model_ws,new_model_ws):
""" setup the flopy.mbase instance for use with multipler parameters.
Changes model_ws, sets external_path and writes new MODFLOW input
files
Parameters
----------
model : flopy.mbase
flopy model instance
org_model_ws : str
the orginal model working space
new_model_ws : str
the new model working space
"""
split_new_mws = [i for i in os.path.split(new_model_ws) if len(i) > 0]
if len(split_new_mws) != 1:
self.logger.lraise("new_model_ws can only be 1 folder-level deep:{0}".
format(str(split_new_mws)))
if isinstance(model,str):
self.log("loading flopy model")
try:
import flopy
except:
raise Exception("from_flopy_model() requires flopy")
# prepare the flopy model
self.org_model_ws = org_model_ws
self.new_model_ws = new_model_ws
self.m = flopy.modflow.Modflow.load(model,model_ws=org_model_ws,
check=False,verbose=True,forgive=False)
self.log("loading flopy model")
else:
self.m = model
self.org_model_ws = str(self.m.model_ws)
self.new_model_ws = new_model_ws
self.log("updating model attributes")
self.m.array_free_format = True
self.m.free_format_input = True
self.m.external_path = '.'
self.log("updating model attributes")
if os.path.exists(new_model_ws):
if not self.remove_existing:
self.logger.lraise("'new_model_ws' already exists")
else:
self.logger.warn("removing existing 'new_model_ws")
shutil.rmtree(new_model_ws,onerror=pyemu.os_utils.remove_readonly)
time.sleep(1)
self.m.change_model_ws(new_model_ws,reset_external=True)
self.m.exe_name = self.m.exe_name.replace(".exe",'')
self.m.exe = self.m.version
self.log("writing new modflow input files")
self.m.write_input()
self.log("writing new modflow input files")
|
python
|
{
"resource": ""
}
|
q20035
|
PstFromFlopyModel.get_count
|
train
|
def get_count(self,name):
""" get the latest counter for a certain parameter type.
Parameters
----------
name : str
the parameter type
Returns
-------
count : int
the latest count for a parameter type
Note
----
calling this function increments the counter for the passed
parameter type
"""
if name not in self.mlt_counter:
self.mlt_counter[name] = 1
c = 0
else:
c = self.mlt_counter[name]
self.mlt_counter[name] += 1
#print(name,c)
return c
|
python
|
{
"resource": ""
}
|
q20036
|
PstFromFlopyModel.write_u2d
|
train
|
def write_u2d(self, u2d):
""" write a flopy.utils.Util2D instance to an ASCII text file using the
Util2D filename
Parameters
----------
u2d : flopy.utils.Util2D
Returns
-------
filename : str
the name of the file written (without path)
"""
filename = os.path.split(u2d.filename)[-1]
np.savetxt(os.path.join(self.m.model_ws,self.arr_org,filename),
u2d.array,fmt="%15.6E")
return filename
|
python
|
{
"resource": ""
}
|
q20037
|
PstFromFlopyModel.write_grid_tpl
|
train
|
def write_grid_tpl(self,name,tpl_file,zn_array):
""" write a template file a for grid-based multiplier parameters
Parameters
----------
name : str
the base parameter name
tpl_file : str
the template file to write
zn_array : numpy.ndarray
an array used to skip inactive cells
Returns
-------
df : pandas.DataFrame
a dataframe with parameter information
"""
parnme,x,y = [],[],[]
with open(os.path.join(self.m.model_ws,tpl_file),'w') as f:
f.write("ptf ~\n")
for i in range(self.m.nrow):
for j in range(self.m.ncol):
if zn_array[i,j] < 1:
pname = ' 1.0 '
else:
pname = "{0}{1:03d}{2:03d}".format(name,i,j)
if len(pname) > 12:
self.logger.lraise("grid pname too long:{0}".\
format(pname))
parnme.append(pname)
pname = ' ~ {0} ~ '.format(pname)
x.append(self.m.sr.xcentergrid[i,j])
y.append(self.m.sr.ycentergrid[i,j])
f.write(pname)
f.write("\n")
df = pd.DataFrame({"parnme":parnme,"x":x,"y":y},index=parnme)
df.loc[:,"pargp"] = "{0}{1}".format(self.gr_suffix.replace('_',''),name)
df.loc[:,"tpl"] = tpl_file
return df
|
python
|
{
"resource": ""
}
|
q20038
|
PstFromFlopyModel.grid_prep
|
train
|
def grid_prep(self):
""" prepare grid-based parameterizations
"""
if len(self.grid_props) == 0:
return
if self.grid_geostruct is None:
self.logger.warn("grid_geostruct is None,"\
" using ExpVario with contribution=1 and a=(max(delc,delr)*10")
dist = 10 * float(max(self.m.dis.delr.array.max(),
self.m.dis.delc.array.max()))
v = pyemu.geostats.ExpVario(contribution=1.0,a=dist)
self.grid_geostruct = pyemu.geostats.GeoStruct(variograms=v)
|
python
|
{
"resource": ""
}
|
q20039
|
PstFromFlopyModel.kl_prep
|
train
|
def kl_prep(self,mlt_df):
""" prepare KL based parameterizations
Parameters
----------
mlt_df : pandas.DataFrame
a dataframe with multiplier array information
Note
----
calls pyemu.helpers.setup_kl()
"""
if len(self.kl_props) == 0:
return
if self.kl_geostruct is None:
self.logger.warn("kl_geostruct is None,"\
" using ExpVario with contribution=1 and a=(10.0*max(delr,delc))")
kl_dist = 10.0 * float(max(self.m.dis.delr.array.max(),
self.m.dis.delc.array.max()))
v = pyemu.geostats.ExpVario(contribution=1.0,a=kl_dist)
self.kl_geostruct = pyemu.geostats.GeoStruct(variograms=v)
kl_df = mlt_df.loc[mlt_df.suffix==self.kl_suffix,:]
layers = kl_df.layer.unique()
#kl_dict = {l:list(kl_df.loc[kl_df.layer==l,"prefix"].unique()) for l in layers}
# big assumption here - if prefix is listed more than once, use the lowest layer index
#for i,l in enumerate(layers):
# p = set(kl_dict[l])
# for ll in layers[i+1:]:
# pp = set(kl_dict[ll])
# d = pp - p
# kl_dict[ll] = list(d)
kl_prefix = list(kl_df.loc[:,"prefix"])
kl_array_file = {p:m for p,m in zip(kl_df.prefix,kl_df.mlt_file)}
self.logger.statement("kl_prefix: {0}".format(str(kl_prefix)))
fac_file = os.path.join(self.m.model_ws, "kl.fac")
self.log("calling kl_setup() with factors file {0}".format(fac_file))
kl_df = kl_setup(self.kl_num_eig,self.m.sr,self.kl_geostruct,kl_prefix,
factors_file=fac_file,basis_file=fac_file+".basis.jcb",
tpl_dir=self.m.model_ws)
self.logger.statement("{0} kl parameters created".
format(kl_df.shape[0]))
self.logger.statement("kl 'pargp':{0}".
format(','.join(kl_df.pargp.unique())))
self.log("calling kl_setup() with factors file {0}".format(fac_file))
kl_mlt_df = mlt_df.loc[mlt_df.suffix==self.kl_suffix]
for prefix in kl_df.prefix.unique():
prefix_df = kl_df.loc[kl_df.prefix==prefix,:]
in_file = os.path.split(prefix_df.loc[:,"in_file"].iloc[0])[-1]
assert prefix in mlt_df.prefix.values,"{0}:{1}".format(prefix,mlt_df.prefix)
mlt_df.loc[mlt_df.prefix==prefix,"pp_file"] = in_file
mlt_df.loc[mlt_df.prefix==prefix,"fac_file"] = os.path.split(fac_file)[-1]
print(kl_mlt_df)
mlt_df.loc[mlt_df.suffix == self.kl_suffix, "tpl_file"] = np.NaN
self.par_dfs[self.kl_suffix] = kl_df
|
python
|
{
"resource": ""
}
|
q20040
|
PstFromFlopyModel.setup_observations
|
train
|
def setup_observations(self):
""" main entry point for setting up observations
"""
obs_methods = [self.setup_water_budget_obs,self.setup_hyd,
self.setup_smp,self.setup_hob,self.setup_hds,
self.setup_sfr_obs]
obs_types = ["mflist water budget obs","hyd file",
"external obs-sim smp files","hob","hds","sfr"]
self.obs_dfs = {}
for obs_method, obs_type in zip(obs_methods,obs_types):
self.log("processing obs type {0}".format(obs_type))
obs_method()
self.log("processing obs type {0}".format(obs_type))
|
python
|
{
"resource": ""
}
|
q20041
|
PstFromFlopyModel.draw
|
train
|
def draw(self, num_reals=100, sigma_range=6):
""" draw like a boss!
Parameters
----------
num_reals : int
number of realizations to generate. Default is 100
sigma_range : float
number of standard deviations represented by the parameter bounds. Default
is 6.
Returns
-------
cov : pyemu.Cov
a full covariance matrix
"""
self.log("drawing realizations")
struct_dict = {}
if self.pp_suffix in self.par_dfs.keys():
pp_df = self.par_dfs[self.pp_suffix]
pp_dfs = []
for pargp in pp_df.pargp.unique():
gp_df = pp_df.loc[pp_df.pargp==pargp,:]
p_df = gp_df.drop_duplicates(subset="parnme")
pp_dfs.append(p_df)
#pp_dfs = [pp_df.loc[pp_df.pargp==pargp,:].copy() for pargp in pp_df.pargp.unique()]
struct_dict[self.pp_geostruct] = pp_dfs
if self.gr_suffix in self.par_dfs.keys():
gr_df = self.par_dfs[self.gr_suffix]
gr_dfs = []
for pargp in gr_df.pargp.unique():
gp_df = gr_df.loc[gr_df.pargp==pargp,:]
p_df = gp_df.drop_duplicates(subset="parnme")
gr_dfs.append(p_df)
#gr_dfs = [gr_df.loc[gr_df.pargp==pargp,:].copy() for pargp in gr_df.pargp.unique()]
struct_dict[self.grid_geostruct] = gr_dfs
if "temporal_list" in self.par_dfs.keys():
bc_df = self.par_dfs["temporal_list"]
bc_df.loc[:,"y"] = 0
bc_df.loc[:,"x"] = bc_df.timedelta.apply(lambda x: x.days)
bc_dfs = []
for pargp in bc_df.pargp.unique():
gp_df = bc_df.loc[bc_df.pargp==pargp,:]
p_df = gp_df.drop_duplicates(subset="parnme")
#print(p_df)
bc_dfs.append(p_df)
#bc_dfs = [bc_df.loc[bc_df.pargp==pargp,:].copy() for pargp in bc_df.pargp.unique()]
struct_dict[self.temporal_list_geostruct] = bc_dfs
if "spatial_list" in self.par_dfs.keys():
bc_df = self.par_dfs["spatial_list"]
bc_dfs = []
for pargp in bc_df.pargp.unique():
gp_df = bc_df.loc[bc_df.pargp==pargp,:]
#p_df = gp_df.drop_duplicates(subset="parnme")
#print(p_df)
bc_dfs.append(gp_df)
struct_dict[self.spatial_list_geostruct] = bc_dfs
pe = geostatistical_draws(self.pst,struct_dict=struct_dict,num_reals=num_reals,
sigma_range=sigma_range)
self.log("drawing realizations")
return pe
|
python
|
{
"resource": ""
}
|
q20042
|
PstFromFlopyModel.write_forward_run
|
train
|
def write_forward_run(self):
""" write the forward run script forward_run.py
"""
with open(os.path.join(self.m.model_ws,self.forward_run_file),'w') as f:
f.write("import os\nimport numpy as np\nimport pandas as pd\nimport flopy\n")
f.write("import pyemu\n")
for ex_imp in self.extra_forward_imports:
f.write('import {0}\n'.format(ex_imp))
for tmp_file in self.tmp_files:
f.write("try:\n")
f.write(" os.remove('{0}')\n".format(tmp_file))
f.write("except Exception as e:\n")
f.write(" print('error removing tmp file:{0}')\n".format(tmp_file))
for line in self.frun_pre_lines:
f.write(line+'\n')
for line in self.frun_model_lines:
f.write(line+'\n')
for line in self.frun_post_lines:
f.write(line+'\n')
|
python
|
{
"resource": ""
}
|
q20043
|
PstFromFlopyModel.parse_k
|
train
|
def parse_k(self,k,vals):
""" parse the iterable from a property or boundary condition argument
Parameters
----------
k : int or iterable int
the iterable
vals : iterable of ints
the acceptable values that k may contain
Returns
-------
k_vals : iterable of int
parsed k values
"""
try:
k = int(k)
except:
pass
else:
assert k in vals,"k {0} not in vals".format(k)
return [k]
if k is None:
return vals
else:
try:
k_vals = vals[k]
except Exception as e:
raise Exception("error slicing vals with {0}:{1}".
format(k,str(e)))
return k_vals
|
python
|
{
"resource": ""
}
|
q20044
|
PstFromFlopyModel.parse_pakattr
|
train
|
def parse_pakattr(self,pakattr):
""" parse package-iterable pairs from a property or boundary condition
argument
Parameters
----------
pakattr : iterable len 2
Returns
-------
pak : flopy.PakBase
the flopy package from the model instance
attr : (varies)
the flopy attribute from pak. Could be Util2D, Util3D,
Transient2D, or MfList
attrname : (str)
the name of the attribute for MfList type. Only returned if
attr is MfList. For example, if attr is MfList and pak is
flopy.modflow.ModflowWel, then attrname can only be "flux"
"""
raw = pakattr.lower().split('.')
if len(raw) != 2:
self.logger.lraise("pakattr is wrong:{0}".format(pakattr))
pakname = raw[0]
attrname = raw[1]
pak = self.m.get_package(pakname)
if pak is None:
if pakname == "extra":
self.logger.statement("'extra' pak detected:{0}".format(pakattr))
ud = flopy.utils.Util3d(self.m,(self.m.nlay,self.m.nrow,self.m.ncol),np.float32,1.0,attrname)
return "extra",ud
self.logger.lraise("pak {0} not found".format(pakname))
if hasattr(pak,attrname):
attr = getattr(pak,attrname)
return pak,attr
elif hasattr(pak,"stress_period_data"):
dtype = pak.stress_period_data.dtype
if attrname not in dtype.names:
self.logger.lraise("attr {0} not found in dtype.names for {1}.stress_period_data".\
format(attrname,pakname))
attr = pak.stress_period_data
return pak,attr,attrname
# elif hasattr(pak,'hfb_data'):
# dtype = pak.hfb_data.dtype
# if attrname not in dtype.names:
# self.logger.lraise('attr {0} not found in dtypes.names for {1}.hfb_data. Thanks for playing.'.\
# format(attrname,pakname))
# attr = pak.hfb_data
# return pak, attr, attrname
else:
self.logger.lraise("unrecognized attr:{0}".format(attrname))
|
python
|
{
"resource": ""
}
|
q20045
|
PstFromFlopyModel.setup_list_pars
|
train
|
def setup_list_pars(self):
""" main entry point for setting up list multiplier
parameters
"""
tdf = self.setup_temporal_list_pars()
sdf = self.setup_spatial_list_pars()
if tdf is None and sdf is None:
return
os.chdir(self.m.model_ws)
try:
apply_list_pars()
except Exception as e:
os.chdir("..")
self.logger.lraise("error test running apply_list_pars():{0}".format(str(e)))
os.chdir('..')
line = "pyemu.helpers.apply_list_pars()\n"
self.logger.statement("forward_run line:{0}".format(line))
self.frun_pre_lines.append(line)
|
python
|
{
"resource": ""
}
|
q20046
|
PstFromFlopyModel.list_helper
|
train
|
def list_helper(self,k,pak,attr,col):
""" helper to setup list multiplier parameters for a given
k, pak, attr set.
Parameters
----------
k : int or iterable of int
the zero-based stress period indices
pak : flopy.PakBase=
the MODFLOW package
attr : MfList
the MfList instance
col : str
the column name in the MfList recarray to parameterize
"""
# special case for horrible HFB6 exception
# if type(pak) == flopy.modflow.mfhfb.ModflowHfb:
# filename = pak.file_name[0]
# else:
filename = attr.get_filename(k)
filename_model = os.path.join(self.m.external_path,filename)
shutil.copy2(os.path.join(self.m.model_ws,filename_model),
os.path.join(self.m.model_ws,self.list_org,filename))
return filename_model
|
python
|
{
"resource": ""
}
|
q20047
|
PstFromFlopyModel.setup_smp
|
train
|
def setup_smp(self):
""" setup observations from PEST-style SMP file pairs
"""
if self.obssim_smp_pairs is None:
return
if len(self.obssim_smp_pairs) == 2:
if isinstance(self.obssim_smp_pairs[0],str):
self.obssim_smp_pairs = [self.obssim_smp_pairs]
for obs_smp,sim_smp in self.obssim_smp_pairs:
self.log("processing {0} and {1} smp files".format(obs_smp,sim_smp))
if not os.path.exists(obs_smp):
self.logger.lraise("couldn't find obs smp: {0}".format(obs_smp))
if not os.path.exists(sim_smp):
self.logger.lraise("couldn't find sim smp: {0}".format(sim_smp))
new_obs_smp = os.path.join(self.m.model_ws,
os.path.split(obs_smp)[-1])
shutil.copy2(obs_smp,new_obs_smp)
new_sim_smp = os.path.join(self.m.model_ws,
os.path.split(sim_smp)[-1])
shutil.copy2(sim_smp,new_sim_smp)
pyemu.smp_utils.smp_to_ins(new_sim_smp)
|
python
|
{
"resource": ""
}
|
q20048
|
PstFromFlopyModel.setup_hob
|
train
|
def setup_hob(self):
""" setup observations from the MODFLOW HOB package
"""
if self.m.hob is None:
return
hob_out_unit = self.m.hob.iuhobsv
new_hob_out_fname = os.path.join(self.m.model_ws,self.m.get_output_attribute(unit=hob_out_unit))
org_hob_out_fname = os.path.join(self.org_model_ws,self.m.get_output_attribute(unit=hob_out_unit))
if not os.path.exists(org_hob_out_fname):
self.logger.warn("could not find hob out file: {0}...skipping".format(hob_out_fname))
return
shutil.copy2(org_hob_out_fname,new_hob_out_fname)
hob_df = pyemu.gw_utils.modflow_hob_to_instruction_file(new_hob_out_fname)
self.obs_dfs["hob"] = hob_df
self.tmp_files.append(os.path.split(hob_out_fname))
|
python
|
{
"resource": ""
}
|
q20049
|
PstFromFlopyModel.setup_hyd
|
train
|
def setup_hyd(self):
""" setup observations from the MODFLOW HYDMOD package
"""
if self.m.hyd is None:
return
if self.mfhyd:
org_hyd_out = os.path.join(self.org_model_ws,self.m.name+".hyd.bin")
if not os.path.exists(org_hyd_out):
self.logger.warn("can't find existing hyd out file:{0}...skipping".
format(org_hyd_out))
return
new_hyd_out = os.path.join(self.m.model_ws,os.path.split(org_hyd_out)[-1])
shutil.copy2(org_hyd_out,new_hyd_out)
df = pyemu.gw_utils.modflow_hydmod_to_instruction_file(new_hyd_out)
df.loc[:,"obgnme"] = df.obsnme.apply(lambda x: '_'.join(x.split('_')[:-1]))
line = "pyemu.gw_utils.modflow_read_hydmod_file('{0}')".\
format(os.path.split(new_hyd_out)[-1])
self.logger.statement("forward_run line: {0}".format(line))
self.frun_post_lines.append(line)
self.obs_dfs["hyd"] = df
self.tmp_files.append(os.path.split(new_hyd_out)[-1])
|
python
|
{
"resource": ""
}
|
q20050
|
PstFromFlopyModel.setup_water_budget_obs
|
train
|
def setup_water_budget_obs(self):
""" setup observations from the MODFLOW list file for
volume and flux water buget information
"""
if self.mflist_waterbudget:
org_listfile = os.path.join(self.org_model_ws,self.m.lst.file_name[0])
if os.path.exists(org_listfile):
shutil.copy2(org_listfile,os.path.join(self.m.model_ws,
self.m.lst.file_name[0]))
else:
self.logger.warn("can't find existing list file:{0}...skipping".
format(org_listfile))
return
list_file = os.path.join(self.m.model_ws,self.m.lst.file_name[0])
flx_file = os.path.join(self.m.model_ws,"flux.dat")
vol_file = os.path.join(self.m.model_ws,"vol.dat")
df = pyemu.gw_utils.setup_mflist_budget_obs(list_file,
flx_filename=flx_file,
vol_filename=vol_file,
start_datetime=self.m.start_datetime)
if df is not None:
self.obs_dfs["wb"] = df
#line = "try:\n os.remove('{0}')\nexcept:\n pass".format(os.path.split(list_file)[-1])
#self.logger.statement("forward_run line:{0}".format(line))
#self.frun_pre_lines.append(line)
self.tmp_files.append(os.path.split(list_file)[-1])
line = "pyemu.gw_utils.apply_mflist_budget_obs('{0}',flx_filename='{1}',vol_filename='{2}',start_datetime='{3}')".\
format(os.path.split(list_file)[-1],
os.path.split(flx_file)[-1],
os.path.split(vol_file)[-1],
self.m.start_datetime)
self.logger.statement("forward_run line:{0}".format(line))
self.frun_post_lines.append(line)
|
python
|
{
"resource": ""
}
|
q20051
|
read_resfile
|
train
|
def read_resfile(resfile):
"""load a residual file into a pandas.DataFrame
Parameters
----------
resfile : str
residual file name
Returns
-------
pandas.DataFrame : pandas.DataFrame
"""
assert os.path.exists(resfile),"read_resfile() error: resfile " +\
"{0} not found".format(resfile)
converters = {"name": str_con, "group": str_con}
f = open(resfile, 'r')
while True:
line = f.readline()
if line == '':
raise Exception("Pst.get_residuals: EOF before finding "+
"header in resfile: " + resfile)
if "name" in line.lower():
header = line.lower().strip().split()
break
res_df = pd.read_csv(f, header=None, names=header, sep="\s+",
converters=converters)
res_df.index = res_df.name
f.close()
return res_df
|
python
|
{
"resource": ""
}
|
q20052
|
res_from_en
|
train
|
def res_from_en(pst,enfile):
"""load ensemble file for residual into a pandas.DataFrame
Parameters
----------
enfile : str
ensemble file name
Returns
-------
pandas.DataFrame : pandas.DataFrame
"""
converters = {"name": str_con, "group": str_con}
try: #substitute ensemble for res, 'base' if there, otherwise mean
obs=pst.observation_data
if isinstance(enfile,str):
df=pd.read_csv(enfile,converters=converters)
df.columns=df.columns.str.lower()
df = df.set_index('real_name').T.rename_axis('name').rename_axis(None, 1)
else:
df = enfile.T
if 'base' in df.columns:
df['modelled']=df['base']
df['std']=df.std(axis=1)
else:
df['modelled']=df.mean(axis=1)
df['std']=df.std(axis=1)
#probably a more pandastic way to do this
res_df=df[['modelled','std']].copy()
res_df['group']=obs.loc[:,'obgnme'].copy()
res_df['measured']=obs['obsval'].copy()
res_df['weight']=obs['weight'].copy()
res_df['residual']=res_df['measured']-res_df['modelled']
except Exception as e:
raise Exception("Pst.res_from_en:{0}".format(str(e)))
return res_df
|
python
|
{
"resource": ""
}
|
q20053
|
read_parfile
|
train
|
def read_parfile(parfile):
"""load a pest-compatible .par file into a pandas.DataFrame
Parameters
----------
parfile : str
pest parameter file name
Returns
-------
pandas.DataFrame : pandas.DataFrame
"""
assert os.path.exists(parfile), "Pst.parrep(): parfile not found: " +\
str(parfile)
f = open(parfile, 'r')
header = f.readline()
par_df = pd.read_csv(f, header=None,
names=["parnme", "parval1", "scale", "offset"],
sep="\s+")
par_df.index = par_df.parnme
return par_df
|
python
|
{
"resource": ""
}
|
q20054
|
write_parfile
|
train
|
def write_parfile(df,parfile):
""" write a pest parameter file from a dataframe
Parameters
----------
df : (pandas.DataFrame)
dataframe with column names that correspond to the entries
in the parameter data section of a pest control file
parfile : str
name of the parameter file to write
"""
columns = ["parnme","parval1","scale","offset"]
formatters = {"parnme":lambda x:"{0:20s}".format(x),
"parval1":lambda x:"{0:20.7E}".format(x),
"scale":lambda x:"{0:20.7E}".format(x),
"offset":lambda x:"{0:20.7E}".format(x)}
for col in columns:
assert col in df.columns,"write_parfile() error: " +\
"{0} not found in df".format(col)
with open(parfile,'w') as f:
f.write("single point\n")
f.write(df.to_string(col_space=0,
columns=columns,
formatters=formatters,
justify="right",
header=False,
index=False,
index_names=False) + '\n')
|
python
|
{
"resource": ""
}
|
q20055
|
parse_tpl_file
|
train
|
def parse_tpl_file(tpl_file):
""" parse a pest template file to get the parameter names
Parameters
----------
tpl_file : str
template file name
Returns
-------
par_names : list
list of parameter names
"""
par_names = set()
with open(tpl_file,'r') as f:
try:
header = f.readline().strip().split()
assert header[0].lower() in ["ptf","jtf"],\
"template file error: must start with [ptf,jtf], not:" +\
str(header[0])
assert len(header) == 2,\
"template file error: header line must have two entries: " +\
str(header)
marker = header[1]
assert len(marker) == 1,\
"template file error: marker must be a single character, not:" +\
str(marker)
for line in f:
par_line = set(line.lower().strip().split(marker)[1::2])
par_names.update(par_line)
#par_names.extend(par_line)
#for p in par_line:
# if p not in par_names:
# par_names.append(p)
except Exception as e:
raise Exception("error processing template file " +\
tpl_file+" :\n" + str(e))
#par_names = [pn.strip().lower() for pn in par_names]
#seen = set()
#seen_add = seen.add
#return [x for x in par_names if not (x in seen or seen_add(x))]
return [p.strip() for p in list(par_names)]
|
python
|
{
"resource": ""
}
|
q20056
|
write_to_template
|
train
|
def write_to_template(parvals,tpl_file,in_file):
""" write parameter values to model input files using template files
Parameters
----------
parvals : dict or pandas.Series
a way to look up parameter values using parameter names
tpl_file : str
template file
in_file : str
input file
"""
f_in = open(in_file,'w')
f_tpl = open(tpl_file,'r')
header = f_tpl.readline().strip().split()
assert header[0].lower() in ["ptf", "jtf"], \
"template file error: must start with [ptf,jtf], not:" + \
str(header[0])
assert len(header) == 2, \
"template file error: header line must have two entries: " + \
str(header)
marker = header[1]
assert len(marker) == 1, \
"template file error: marker must be a single character, not:" + \
str(marker)
for line in f_tpl:
if marker not in line:
f_in.write(line)
else:
line = line.rstrip()
par_names = line.lower().split(marker)[1::2]
par_names = [name.strip() for name in par_names]
start,end = get_marker_indices(marker,line)
assert len(par_names) == len(start)
new_line = line[:start[0]]
between = [line[e:s] for s,e in zip(start[1:],end[:-1])]
for i,name in enumerate(par_names):
s,e = start[i],end[i]
w = e - s
if w > 15:
d = 6
else:
d = 3
fmt = "{0:" + str(w)+"."+str(d)+"E}"
val_str = fmt.format(parvals[name])
new_line += val_str
if i != len(par_names) - 1:
new_line += between[i]
new_line += line[end[-1]:]
f_in.write(new_line+'\n')
f_tpl.close()
f_in.close()
|
python
|
{
"resource": ""
}
|
q20057
|
parse_ins_file
|
train
|
def parse_ins_file(ins_file):
"""parse a pest instruction file to get observation names
Parameters
----------
ins_file : str
instruction file name
Returns
-------
list of observation names
"""
obs_names = []
with open(ins_file,'r') as f:
header = f.readline().strip().split()
assert header[0].lower() in ["pif","jif"],\
"instruction file error: must start with [pif,jif], not:" +\
str(header[0])
marker = header[1]
assert len(marker) == 1,\
"instruction file error: marker must be a single character, not:" +\
str(marker)
for line in f:
line = line.lower()
if marker in line:
raw = line.lower().strip().split(marker)
for item in raw[::2]:
obs_names.extend(parse_ins_string(item))
else:
obs_names.extend(parse_ins_string(line.strip()))
#obs_names = [on.strip().lower() for on in obs_names]
return obs_names
|
python
|
{
"resource": ""
}
|
q20058
|
parse_ins_string
|
train
|
def parse_ins_string(string):
""" split up an instruction file line to get the observation names
Parameters
----------
string : str
instruction file line
Returns
-------
obs_names : list
list of observation names
"""
istart_markers = ["[","(","!"]
iend_markers = ["]",")","!"]
obs_names = []
idx = 0
while True:
if idx >= len(string) - 1:
break
char = string[idx]
if char in istart_markers:
em = iend_markers[istart_markers.index(char)]
# print("\n",idx)
# print(string)
# print(string[idx+1:])
# print(string[idx+1:].index(em))
# print(string[idx+1:].index(em)+idx+1)
eidx = min(len(string),string[idx+1:].index(em)+idx+1)
obs_name = string[idx+1:eidx]
if obs_name.lower() != "dum":
obs_names.append(obs_name)
idx = eidx + 1
else:
idx += 1
return obs_names
|
python
|
{
"resource": ""
}
|
q20059
|
populate_dataframe
|
train
|
def populate_dataframe(index,columns, default_dict, dtype):
""" helper function to populate a generic Pst dataframe attribute. This
function is called as part of constructing a generic Pst instance
Parameters
----------
index : (varies)
something to use as the dataframe index
columns: (varies)
something to use as the dataframe columns
default_dict : (dict)
dictionary of default values for columns
dtype : numpy.dtype
dtype used to cast dataframe columns
Returns
-------
new_df : pandas.DataFrame
"""
new_df = pd.DataFrame(index=index,columns=columns)
for fieldname,dt in zip(columns,dtype.descr):
default = default_dict[fieldname]
new_df.loc[:,fieldname] = default
new_df.loc[:,fieldname] = new_df.loc[:,fieldname].astype(dt[1])
return new_df
|
python
|
{
"resource": ""
}
|
q20060
|
generic_pst
|
train
|
def generic_pst(par_names=["par1"],obs_names=["obs1"],addreg=False):
"""generate a generic pst instance. This can used to later fill in
the Pst parts programatically.
Parameters
----------
par_names : (list)
parameter names to setup
obs_names : (list)
observation names to setup
Returns
-------
new_pst : pyemu.Pst
"""
if not isinstance(par_names,list):
par_names = list(par_names)
if not isinstance(obs_names,list):
obs_names = list(obs_names)
new_pst = pyemu.Pst("pest.pst",load=False)
pargp_data = populate_dataframe(["pargp"], new_pst.pargp_fieldnames,
new_pst.pargp_defaults, new_pst.pargp_dtype)
new_pst.parameter_groups = pargp_data
par_data = populate_dataframe(par_names,new_pst.par_fieldnames,
new_pst.par_defaults,new_pst.par_dtype)
par_data.loc[:,"parnme"] = par_names
par_data.index = par_names
par_data.sort_index(inplace=True)
new_pst.parameter_data = par_data
obs_data = populate_dataframe(obs_names,new_pst.obs_fieldnames,
new_pst.obs_defaults,new_pst.obs_dtype)
obs_data.loc[:,"obsnme"] = obs_names
obs_data.index = obs_names
obs_data.sort_index(inplace=True)
new_pst.observation_data = obs_data
new_pst.template_files = ["file.tpl"]
new_pst.input_files = ["file.in"]
new_pst.instruction_files = ["file.ins"]
new_pst.output_files = ["file.out"]
new_pst.model_command = ["model.bat"]
new_pst.prior_information = new_pst.null_prior
#new_pst.other_lines = ["* singular value decomposition\n","1\n",
# "{0:d} {1:15.6E}\n".format(new_pst.npar_adj,1.0E-6),
# "1 1 1\n"]
if addreg:
new_pst.zero_order_tikhonov()
return new_pst
|
python
|
{
"resource": ""
}
|
q20061
|
try_run_inschek
|
train
|
def try_run_inschek(pst):
""" attempt to run INSCHEK for each instruction file, model output
file pair in a pyemu.Pst. If the run is successful, the INSCHEK written
.obf file is used to populate the pst.observation_data.obsval attribute
Parameters
----------
pst : (pyemu.Pst)
"""
for ins_file,out_file in zip(pst.instruction_files,pst.output_files):
df = _try_run_inschek(ins_file,out_file)
if df is not None:
pst.observation_data.loc[df.index, "obsval"] = df.obsval
|
python
|
{
"resource": ""
}
|
q20062
|
get_phi_comps_from_recfile
|
train
|
def get_phi_comps_from_recfile(recfile):
"""read the phi components from a record file by iteration
Parameters
----------
recfile : str
pest record file name
Returns
-------
iters : dict
nested dictionary of iteration number, {group,contribution}
"""
iiter = 1
iters = {}
f = open(recfile,'r')
while True:
line = f.readline()
if line == '':
break
if "starting phi for this iteration" in line.lower() or \
"final phi" in line.lower():
contributions = {}
while True:
line = f.readline()
if line == '':
break
if "contribution to phi" not in line.lower():
iters[iiter] = contributions
iiter += 1
break
raw = line.strip().split()
val = float(raw[-1])
group = raw[-3].lower().replace('\"', '')
contributions[group] = val
return iters
|
python
|
{
"resource": ""
}
|
q20063
|
res_from_obseravtion_data
|
train
|
def res_from_obseravtion_data(observation_data):
"""create a generic residual dataframe filled with np.NaN for
missing information
Parameters
----------
observation_data : pandas.DataFrame
pyemu.Pst.observation_data
Returns
-------
res_df : pandas.DataFrame
"""
res_df = observation_data.copy()
res_df.loc[:, "name"] = res_df.pop("obsnme")
res_df.loc[:, "measured"] = res_df.pop("obsval")
res_df.loc[:, "group"] = res_df.pop("obgnme")
res_df.loc[:, "modelled"] = np.NaN
res_df.loc[:, "residual"] = np.NaN
return res_df
|
python
|
{
"resource": ""
}
|
q20064
|
clean_missing_exponent
|
train
|
def clean_missing_exponent(pst_filename,clean_filename="clean.pst"):
"""fixes the issue where some terrible fortran program may have
written a floating point format without the 'e' - like 1.0-3, really?!
Parameters
----------
pst_filename : str
the pest control file
clean_filename : str
the new pest control file to write. Default is "clean.pst"
Returns
-------
None
"""
lines = []
with open(pst_filename,'r') as f:
for line in f:
line = line.lower().strip()
if '+' in line:
raw = line.split('+')
for i,r in enumerate(raw[:-1]):
if r[-1] != 'e':
r = r + 'e'
raw[i] = r
lines.append('+'.join(raw))
else:
lines.append(line)
with open(clean_filename,'w') as f:
for line in lines:
f.write(line+'\n')
|
python
|
{
"resource": ""
}
|
q20065
|
Pst.phi
|
train
|
def phi(self):
"""get the weighted total objective function
Returns
-------
phi : float
sum of squared residuals
"""
sum = 0.0
for grp, contrib in self.phi_components.items():
sum += contrib
return sum
|
python
|
{
"resource": ""
}
|
q20066
|
Pst.phi_components
|
train
|
def phi_components(self):
""" get the individual components of the total objective function
Returns
-------
dict : dict
dictionary of observation group, contribution to total phi
Raises
------
Assertion error if Pst.observation_data groups don't match
Pst.res groups
"""
# calculate phi components for each obs group
components = {}
ogroups = self.observation_data.groupby("obgnme").groups
rgroups = self.res.groupby("group").groups
self.res.index = self.res.name
for og,onames in ogroups.items():
#assert og in rgroups.keys(),"Pst.phi_componentw obs group " +\
# "not found: " + str(og)
#og_res_df = self.res.ix[rgroups[og]]
og_res_df = self.res.loc[onames,:].dropna()
#og_res_df.index = og_res_df.name
og_df = self.observation_data.ix[ogroups[og]]
og_df.index = og_df.obsnme
#og_res_df = og_res_df.loc[og_df.index,:]
assert og_df.shape[0] == og_res_df.shape[0],\
" Pst.phi_components error: group residual dataframe row length" +\
"doesn't match observation data group dataframe row length" + \
str(og_df.shape) + " vs. " + str(og_res_df.shape)
components[og] = np.sum((og_res_df["residual"] *
og_df["weight"]) ** 2)
if not self.control_data.pestmode.startswith("reg") and \
self.prior_information.shape[0] > 0:
ogroups = self.prior_information.groupby("obgnme").groups
for og in ogroups.keys():
assert og in rgroups.keys(),"Pst.adjust_weights_res() obs group " +\
"not found: " + str(og)
og_res_df = self.res.ix[rgroups[og]]
og_res_df.index = og_res_df.name
og_df = self.prior_information.ix[ogroups[og]]
og_df.index = og_df.pilbl
og_res_df = og_res_df.loc[og_df.index,:]
assert og_df.shape[0] == og_res_df.shape[0],\
" Pst.phi_components error: group residual dataframe row length" +\
"doesn't match observation data group dataframe row length" + \
str(og_df.shape) + " vs. " + str(og_res_df.shape)
components[og] = np.sum((og_res_df["residual"] *
og_df["weight"]) ** 2)
return components
|
python
|
{
"resource": ""
}
|
q20067
|
Pst.phi_components_normalized
|
train
|
def phi_components_normalized(self):
""" get the individual components of the total objective function
normalized to the total PHI being 1.0
Returns
-------
dict : dict
dictionary of observation group, normalized contribution to total phi
Raises
------
Assertion error if self.observation_data groups don't match
self.res groups
"""
# use a dictionary comprehension to go through and normalize each component of phi to the total
phi_components_normalized = {i: self.phi_components[i]/self.phi for i in self.phi_components}
return phi_components_normalized
|
python
|
{
"resource": ""
}
|
q20068
|
Pst.set_res
|
train
|
def set_res(self,res):
""" reset the private Pst.res attribute
Parameters
----------
res : (varies)
something to use as Pst.res attribute
"""
if isinstance(res,str):
res = pst_utils.read_resfile(res)
self.__res = res
|
python
|
{
"resource": ""
}
|
q20069
|
Pst.res
|
train
|
def res(self):
"""get the residuals dataframe attribute
Returns
-------
res : pandas.DataFrame
Note
----
if the Pst.__res attribute has not been loaded,
this call loads the res dataframe from a file
"""
if self.__res is not None:
return self.__res
else:
if self.resfile is not None:
assert os.path.exists(self.resfile),"Pst.res: self.resfile " +\
str(self.resfile) + " does not exist"
else:
self.resfile = self.filename.replace(".pst", ".res")
if not os.path.exists(self.resfile):
self.resfile = self.resfile.replace(".res", ".rei")
if not os.path.exists(self.resfile):
self.resfile = self.resfile.replace(".rei", ".base.rei")
if not os.path.exists(self.resfile):
if self.new_filename is not None:
self.resfile = self.new_filename.replace(".pst",".res")
if not os.path.exists(self.resfile):
self.resfile = self.resfile.replace(".res","rei")
if not os.path.exists(self.resfile):
raise Exception("Pst.res: " +
"could not residual file case.res" +
" or case.rei" + " or case.base.rei" +
" or case.obs.csv")
res = pst_utils.read_resfile(self.resfile)
missing_bool = self.observation_data.obsnme.apply\
(lambda x: x not in res.name)
missing = self.observation_data.obsnme[missing_bool]
if missing.shape[0] > 0:
raise Exception("Pst.res: the following observations " +
"were not found in " +
"{0}:{1}".format(self.resfile,','.join(missing)))
self.__res = res
return self.__res
|
python
|
{
"resource": ""
}
|
q20070
|
Pst.nprior
|
train
|
def nprior(self):
"""number of prior information equations
Returns
-------
nprior : int
the number of prior info equations
"""
self.control_data.nprior = self.prior_information.shape[0]
return self.control_data.nprior
|
python
|
{
"resource": ""
}
|
q20071
|
Pst.nnz_obs
|
train
|
def nnz_obs(self):
""" get the number of non-zero weighted observations
Returns
-------
nnz_obs : int
the number of non-zeros weighted observations
"""
nnz = 0
for w in self.observation_data.weight:
if w > 0.0:
nnz += 1
return nnz
|
python
|
{
"resource": ""
}
|
q20072
|
Pst.nobs
|
train
|
def nobs(self):
"""get the number of observations
Returns
-------
nobs : int
the number of observations
"""
self.control_data.nobs = self.observation_data.shape[0]
return self.control_data.nobs
|
python
|
{
"resource": ""
}
|
q20073
|
Pst.npar
|
train
|
def npar(self):
"""get number of parameters
Returns
-------
npar : int
the number of parameters
"""
self.control_data.npar = self.parameter_data.shape[0]
return self.control_data.npar
|
python
|
{
"resource": ""
}
|
q20074
|
Pst.pars_in_groups
|
train
|
def pars_in_groups(self):
"""
return a dictionary of parameter names in each parameter group.
Returns:
dictionary
"""
pargp = self.par_groups
allpars = dict()
for cpg in pargp:
allpars[cpg] = [i for i in self.parameter_data.loc[self.parameter_data.pargp == cpg, 'parnme']]
return allpars
|
python
|
{
"resource": ""
}
|
q20075
|
Pst.obs_groups
|
train
|
def obs_groups(self):
"""get the observation groups
Returns
-------
obs_groups : list
a list of unique observation groups
"""
og = list(self.observation_data.groupby("obgnme").groups.keys())
#og = list(map(pst_utils.SFMT, og))
return og
|
python
|
{
"resource": ""
}
|
q20076
|
Pst.nnz_obs_groups
|
train
|
def nnz_obs_groups(self):
""" get the observation groups that contain at least one non-zero weighted
observation
Returns
-------
nnz_obs_groups : list
a list of observation groups that contain at
least one non-zero weighted observation
"""
og = []
obs = self.observation_data
for g in self.obs_groups:
if obs.loc[obs.obgnme==g,"weight"].sum() > 0.0:
og.append(g)
return og
|
python
|
{
"resource": ""
}
|
q20077
|
Pst.adj_par_groups
|
train
|
def adj_par_groups(self):
"""get the parameter groups with atleast one adjustable parameter
Returns
-------
adj_par_groups : list
a list of parameter groups with at least one adjustable parameter
"""
adj_pargp = []
for pargp in self.par_groups:
ptrans = self.parameter_data.loc[self.parameter_data.pargp==pargp,"partrans"].values
if "log" in ptrans or "none" in ptrans:
adj_pargp.append(pargp)
return adj_pargp
|
python
|
{
"resource": ""
}
|
q20078
|
Pst.prior_groups
|
train
|
def prior_groups(self):
"""get the prior info groups
Returns
-------
prior_groups : list
a list of prior information groups
"""
og = list(self.prior_information.groupby("obgnme").groups.keys())
#og = list(map(pst_utils.SFMT, og))
return og
|
python
|
{
"resource": ""
}
|
q20079
|
Pst.prior_names
|
train
|
def prior_names(self):
""" get the prior information names
Returns
-------
prior_names : list
a list of prior information names
"""
return list(self.prior_information.groupby(
self.prior_information.index).groups.keys())
|
python
|
{
"resource": ""
}
|
q20080
|
Pst.nnz_obs_names
|
train
|
def nnz_obs_names(self):
"""get the non-zero weight observation names
Returns
-------
nnz_obs_names : list
a list of non-zero weighted observation names
"""
# nz_names = []
# for w,n in zip(self.observation_data.weight,
# self.observation_data.obsnme):
# if w > 0.0:
# nz_names.append(n)
obs = self.observation_data
nz_names = list(obs.loc[obs.weight>0.0,"obsnme"])
return nz_names
|
python
|
{
"resource": ""
}
|
q20081
|
Pst.zero_weight_obs_names
|
train
|
def zero_weight_obs_names(self):
""" get the zero-weighted observation names
Returns
-------
zero_weight_obs_names : list
a list of zero-weighted observation names
"""
self.observation_data.index = self.observation_data.obsnme
groups = self.observation_data.groupby(
self.observation_data.weight.apply(lambda x: x==0.0)).groups
if True in groups:
return list(self.observation_data.loc[groups[True],"obsnme"])
else:
return []
|
python
|
{
"resource": ""
}
|
q20082
|
Pst._read_df
|
train
|
def _read_df(f,nrows,names,converters,defaults=None):
""" a private method to read part of an open file into a pandas.DataFrame.
Parameters
----------
f : file object
nrows : int
number of rows to read
names : list
names to set the columns of the dataframe with
converters : dict
dictionary of lambda functions to convert strings
to numerical format
defaults : dict
dictionary of default values to assign columns.
Default is None
Returns
-------
pandas.DataFrame : pandas.DataFrame
"""
seek_point = f.tell()
line = f.readline()
raw = line.strip().split()
if raw[0].lower() == "external":
filename = raw[1]
assert os.path.exists(filename),"Pst._read_df() error: external file '{0}' not found".format(filename)
df = pd.read_csv(filename,index_col=False,comment='#')
df.columns = df.columns.str.lower()
for name in names:
assert name in df.columns,"Pst._read_df() error: name" +\
"'{0}' not in external file '{1}' columns".format(name,filename)
if name in converters:
df.loc[:,name] = df.loc[:,name].apply(converters[name])
if defaults is not None:
for name in names:
df.loc[:, name] = df.loc[:, name].fillna(defaults[name])
else:
if nrows is None:
raise Exception("Pst._read_df() error: non-external sections require nrows")
f.seek(seek_point)
df = pd.read_csv(f, header=None,names=names,
nrows=nrows,delim_whitespace=True,
converters=converters, index_col=False,
comment='#')
# in case there was some extra junk at the end of the lines
if df.shape[1] > len(names):
df = df.iloc[:,len(names)]
df.columns = names
isnull = pd.isnull(df)
if defaults is not None:
for name in names:
df.loc[:,name] = df.loc[:,name].fillna(defaults[name])
elif np.any(pd.isnull(df).values.flatten()):
raise Exception("NANs found")
f.seek(seek_point)
extras = []
for i in range(nrows):
line = f.readline()
extra = np.NaN
if '#' in line:
raw = line.strip().split('#')
extra = ' # '.join(raw[1:])
extras.append(extra)
df.loc[:,"extra"] = extras
return df
|
python
|
{
"resource": ""
}
|
q20083
|
Pst.rectify_pgroups
|
train
|
def rectify_pgroups(self):
""" private method to synchronize parameter groups section with
the parameter data section
"""
# add any parameters groups
pdata_groups = list(self.parameter_data.loc[:,"pargp"].\
value_counts().keys())
#print(pdata_groups)
need_groups = []
existing_groups = list(self.parameter_groups.pargpnme)
for pg in pdata_groups:
if pg not in existing_groups:
need_groups.append(pg)
if len(need_groups) > 0:
#print(need_groups)
defaults = copy.copy(pst_utils.pst_config["pargp_defaults"])
for grp in need_groups:
defaults["pargpnme"] = grp
self.parameter_groups = \
self.parameter_groups.append(defaults,ignore_index=True)
# now drop any left over groups that aren't needed
for gp in self.parameter_groups.loc[:,"pargpnme"]:
if gp in pdata_groups and gp not in need_groups:
need_groups.append(gp)
self.parameter_groups.index = self.parameter_groups.pargpnme
self.parameter_groups = self.parameter_groups.loc[need_groups,:]
|
python
|
{
"resource": ""
}
|
q20084
|
Pst._parse_pi_par_names
|
train
|
def _parse_pi_par_names(self):
""" private method to get the parameter names from prior information
equations. Sets a 'names' column in Pst.prior_information that is a list
of parameter names
"""
if self.prior_information.shape[0] == 0:
return
if "names" in self.prior_information.columns:
self.prior_information.pop("names")
if "rhs" in self.prior_information.columns:
self.prior_information.pop("rhs")
def parse(eqs):
raw = eqs.split('=')
rhs = float(raw[1])
raw = [i for i in re.split('[###]',
raw[0].lower().strip().replace(' + ','###').replace(' - ','###')) if i != '']
# in case of a leading '-' or '+'
if len(raw[0]) == 0:
raw = raw[1:]
# pnames = []
# for r in raw:
# if '*' not in r:
# continue
# pname = r.split('*')[1].replace("log(", '').replace(')', '').strip()
# pnames.append(pname)
# return pnames
return [r.split('*')[1].replace("log(",'').replace(')','').strip() for r in raw if '*' in r]
self.prior_information.loc[:,"names"] =\
self.prior_information.equation.apply(lambda x: parse(x))
|
python
|
{
"resource": ""
}
|
q20085
|
Pst.add_pi_equation
|
train
|
def add_pi_equation(self,par_names,pilbl=None,rhs=0.0,weight=1.0,
obs_group="pi_obgnme",coef_dict={}):
""" a helper to construct a new prior information equation.
Parameters
----------
par_names : list
parameter names in the equation
pilbl : str
name to assign the prior information equation. If None,
a generic equation name is formed. Default is None
rhs : (float)
the right-hand side of the equation
weight : (float)
the weight of the equation
obs_group : str
the observation group for the equation. Default is 'pi_obgnme'
coef_dict : dict
a dictionary of parameter name, coefficient pairs to assign
leading coefficients for one or more parameters in the equation.
If a parameter is not listed, 1.0 is used for its coefficients.
Default is {}
"""
if pilbl is None:
pilbl = "pilbl_{0}".format(self.__pi_count)
self.__pi_count += 1
missing,fixed = [],[]
for par_name in par_names:
if par_name not in self.parameter_data.parnme:
missing.append(par_name)
elif self.parameter_data.loc[par_name,"partrans"] in ["fixed","tied"]:
fixed.append(par_name)
if len(missing) > 0:
raise Exception("Pst.add_pi_equation(): the following pars "+\
" were not found: {0}".format(','.join(missing)))
if len(fixed) > 0:
raise Exception("Pst.add_pi_equation(): the following pars "+\
" were are fixed/tied: {0}".format(','.join(missing)))
eqs_str = ''
sign = ''
for i,par_name in enumerate(par_names):
coef = coef_dict.get(par_name,1.0)
if coef < 0.0:
sign = '-'
coef = np.abs(coef)
elif i > 0: sign = '+'
if self.parameter_data.loc[par_name,"partrans"] == "log":
par_name = "log({})".format(par_name)
eqs_str += " {0} {1} * {2} ".format(sign,coef,par_name)
eqs_str += " = {0}".format(rhs)
self.prior_information.loc[pilbl,"pilbl"] = pilbl
self.prior_information.loc[pilbl,"equation"] = eqs_str
self.prior_information.loc[pilbl,"weight"] = weight
self.prior_information.loc[pilbl,"obgnme"] = obs_group
|
python
|
{
"resource": ""
}
|
q20086
|
Pst.write
|
train
|
def write(self,new_filename,update_regul=True,version=None):
"""main entry point to write a pest control file.
Parameters
----------
new_filename : str
name of the new pest control file
update_regul : (boolean)
flag to update zero-order Tikhonov prior information
equations to prefer the current parameter values
version : int
flag for which version of control file to write (must be 1 or 2).
if None, uses Pst._version, which set in the constructor and modified
during the load
"""
if version is None:
version = self._version
if version == 1:
return self._write_version1(new_filename=new_filename,update_regul=update_regul)
elif version == 2:
return self._write_version2(new_filename=new_filename, update_regul=update_regul)
else:
raise Exception("Pst.write() error: version must be 1 or 2, not '{0}'".format(version))
|
python
|
{
"resource": ""
}
|
q20087
|
Pst.parrep
|
train
|
def parrep(self, parfile=None,enforce_bounds=True):
"""replicates the pest parrep util. replaces the parval1 field in the
parameter data section dataframe
Parameters
----------
parfile : str
parameter file to use. If None, try to use
a parameter file that corresponds to the case name.
Default is None
enforce_hounds : bool
flag to enforce parameter bounds after parameter values are updated.
This is useful because PEST and PEST++ round the parameter values in the
par file, which may cause slight bound violations
"""
if parfile is None:
parfile = self.filename.replace(".pst", ".par")
par_df = pst_utils.read_parfile(parfile)
self.parameter_data.index = self.parameter_data.parnme
par_df.index = par_df.parnme
self.parameter_data.parval1 = par_df.parval1
self.parameter_data.scale = par_df.scale
self.parameter_data.offset = par_df.offset
if enforce_bounds:
par = self.parameter_data
idx = par.loc[par.parval1 > par.parubnd,"parnme"]
par.loc[idx,"parval1"] = par.loc[idx,"parubnd"]
idx = par.loc[par.parval1 < par.parlbnd,"parnme"]
par.loc[idx, "parval1"] = par.loc[idx, "parlbnd"]
|
python
|
{
"resource": ""
}
|
q20088
|
Pst.adjust_weights_recfile
|
train
|
def adjust_weights_recfile(self, recfile=None,original_ceiling=True):
"""adjusts the weights by group of the observations based on the phi components
in a pest record file so that total phi is equal to the number of
non-zero weighted observations
Parameters
----------
recfile : str
record file name. If None, try to use a record file
with the Pst case name. Default is None
original_ceiling : bool
flag to keep weights from increasing - this is generally a good idea.
Default is True
"""
if recfile is None:
recfile = self.filename.replace(".pst", ".rec")
assert os.path.exists(recfile), \
"Pst.adjust_weights_recfile(): recfile not found: " +\
str(recfile)
iter_components = pst_utils.get_phi_comps_from_recfile(recfile)
iters = iter_components.keys()
iters.sort()
obs = self.observation_data
ogroups = obs.groupby("obgnme").groups
last_complete_iter = None
for ogroup, idxs in ogroups.iteritems():
for iiter in iters[::-1]:
incomplete = False
if ogroup not in iter_components[iiter]:
incomplete = True
break
if not incomplete:
last_complete_iter = iiter
break
if last_complete_iter is None:
raise Exception("Pst.pwtadj2(): no complete phi component" +
" records found in recfile")
self._adjust_weights_by_phi_components(
iter_components[last_complete_iter],original_ceiling)
|
python
|
{
"resource": ""
}
|
q20089
|
Pst.adjust_weights_resfile
|
train
|
def adjust_weights_resfile(self, resfile=None,original_ceiling=True):
"""adjusts the weights by group of the observations based on the phi components
in a pest residual file so that total phi is equal to the number of
non-zero weighted observations
Parameters
----------
resfile : str
residual file name. If None, try to use a residual file
with the Pst case name. Default is None
original_ceiling : bool
flag to keep weights from increasing - this is generally a good idea.
Default is True
"""
if resfile is not None:
self.resfile = resfile
self.__res = None
phi_comps = self.phi_components
self._adjust_weights_by_phi_components(phi_comps,original_ceiling)
|
python
|
{
"resource": ""
}
|
q20090
|
Pst.adjust_weights_discrepancy
|
train
|
def adjust_weights_discrepancy(self, resfile=None,original_ceiling=True):
"""adjusts the weights of each non-zero weight observation based
on the residual in the pest residual file so each observations contribution
to phi is 1.0
Parameters
----------
resfile : str
residual file name. If None, try to use a residual file
with the Pst case name. Default is None
original_ceiling : bool
flag to keep weights from increasing - this is generally a good idea.
Default is True
"""
if resfile is not None:
self.resfile = resfile
self.__res = None
obs = self.observation_data.loc[self.nnz_obs_names,:]
swr = (self.res.loc[self.nnz_obs_names,:].residual * obs.weight)**2
factors = (1.0/swr).apply(np.sqrt)
if original_ceiling:
factors = factors.apply(lambda x: 1.0 if x > 1.0 else x)
self.observation_data.loc[self.nnz_obs_names,"weight"] *= factors
|
python
|
{
"resource": ""
}
|
q20091
|
Pst._adjust_weights_by_phi_components
|
train
|
def _adjust_weights_by_phi_components(self, components,original_ceiling):
"""resets the weights of observations by group to account for
residual phi components.
Parameters
----------
components : dict
a dictionary of obs group:phi contribution pairs
original_ceiling : bool
flag to keep weights from increasing
"""
obs = self.observation_data
nz_groups = obs.groupby(obs["weight"].map(lambda x: x == 0)).groups
ogroups = obs.groupby("obgnme").groups
for ogroup, idxs in ogroups.items():
if self.control_data.pestmode.startswith("regul") \
and "regul" in ogroup.lower():
continue
og_phi = components[ogroup]
nz_groups = obs.loc[idxs,:].groupby(obs.loc[idxs,"weight"].\
map(lambda x: x == 0)).groups
og_nzobs = 0
if False in nz_groups.keys():
og_nzobs = len(nz_groups[False])
if og_nzobs == 0 and og_phi > 0:
raise Exception("Pst.adjust_weights_by_phi_components():"
" no obs with nonzero weight," +
" but phi > 0 for group:" + str(ogroup))
if og_phi > 0:
factor = np.sqrt(float(og_nzobs) / float(og_phi))
if original_ceiling:
factor = min(factor,1.0)
obs.loc[idxs,"weight"] = obs.weight[idxs] * factor
self.observation_data = obs
|
python
|
{
"resource": ""
}
|
q20092
|
Pst.__reset_weights
|
train
|
def __reset_weights(self, target_phis, res_idxs, obs_idxs):
"""private method to reset weights based on target phi values
for each group. This method should not be called directly
Parameters
----------
target_phis : dict
target phi contribution for groups to reweight
res_idxs : dict
the index positions of each group of interest
in the res dataframe
obs_idxs : dict
the index positions of each group of interest
in the observation data dataframe
"""
for item in target_phis.keys():
assert item in res_idxs.keys(),\
"Pst.__reset_weights(): " + str(item) +\
" not in residual group indices"
assert item in obs_idxs.keys(), \
"Pst.__reset_weights(): " + str(item) +\
" not in observation group indices"
actual_phi = ((self.res.loc[res_idxs[item], "residual"] *
self.observation_data.loc
[obs_idxs[item], "weight"])**2).sum()
if actual_phi > 0.0:
weight_mult = np.sqrt(target_phis[item] / actual_phi)
self.observation_data.loc[obs_idxs[item], "weight"] *= weight_mult
else:
("Pst.__reset_weights() warning: phi group {0} has zero phi, skipping...".format(item))
|
python
|
{
"resource": ""
}
|
q20093
|
Pst.adjust_weights
|
train
|
def adjust_weights(self,obs_dict=None,
obsgrp_dict=None):
"""reset the weights of observation groups to contribute a specified
amount to the composite objective function
Parameters
----------
obs_dict : dict
dictionary of obs name,new contribution pairs
obsgrp_dict : dict
dictionary of obs group name,contribution pairs
Note
----
if all observations in a named obs group have zero weight, they will be
assigned a non-zero weight so that the request phi contribution
can be met. Similarly, any observations listed in obs_dict with zero
weight will also be reset
"""
self.observation_data.index = self.observation_data.obsnme
self.res.index = self.res.name
if obsgrp_dict is not None:
# reset groups with all zero weights
obs = self.observation_data
for grp in obsgrp_dict.keys():
if obs.loc[obs.obgnme==grp,"weight"].sum() == 0.0:
obs.loc[obs.obgnme==grp,"weight"] = 1.0
res_groups = self.res.groupby("group").groups
obs_groups = self.observation_data.groupby("obgnme").groups
self.__reset_weights(obsgrp_dict, res_groups, obs_groups)
if obs_dict is not None:
# reset obs with zero weight
obs = self.observation_data
for oname in obs_dict.keys():
if obs.loc[oname,"weight"] == 0.0:
obs.loc[oname,"weight"] = 1.0
#res_groups = self.res.groupby("name").groups
res_groups = self.res.groupby(self.res.index).groups
#obs_groups = self.observation_data.groupby("obsnme").groups
obs_groups = self.observation_data.groupby(self.observation_data.index).groups
self.__reset_weights(obs_dict, res_groups, obs_groups)
|
python
|
{
"resource": ""
}
|
q20094
|
Pst.proportional_weights
|
train
|
def proportional_weights(self, fraction_stdev=1.0, wmax=100.0,
leave_zero=True):
"""setup weights inversely proportional to the observation value
Parameters
----------
fraction_stdev : float
the fraction portion of the observation
val to treat as the standard deviation. set to 1.0 for
inversely proportional
wmax : float
maximum weight to allow
leave_zero : bool
flag to leave existing zero weights
"""
new_weights = []
for oval, ow in zip(self.observation_data.obsval,
self.observation_data.weight):
if leave_zero and ow == 0.0:
ow = 0.0
elif oval == 0.0:
ow = wmax
else:
nw = 1.0 / (np.abs(oval) * fraction_stdev)
ow = min(wmax, nw)
new_weights.append(ow)
self.observation_data.weight = new_weights
|
python
|
{
"resource": ""
}
|
q20095
|
Pst.calculate_pertubations
|
train
|
def calculate_pertubations(self):
""" experimental method to calculate finite difference parameter
pertubations. The pertubation values are added to the
Pst.parameter_data attribute
Note
----
user beware!
"""
self.build_increments()
self.parameter_data.loc[:,"pertubation"] = \
self.parameter_data.parval1 + \
self.parameter_data.increment
self.parameter_data.loc[:,"out_forward"] = \
self.parameter_data.loc[:,"pertubation"] > \
self.parameter_data.loc[:,"parubnd"]
out_forward = self.parameter_data.groupby("out_forward").groups
if True in out_forward:
self.parameter_data.loc[out_forward[True],"pertubation"] = \
self.parameter_data.loc[out_forward[True],"parval1"] - \
self.parameter_data.loc[out_forward[True],"increment"]
self.parameter_data.loc[:,"out_back"] = \
self.parameter_data.loc[:,"pertubation"] < \
self.parameter_data.loc[:,"parlbnd"]
out_back = self.parameter_data.groupby("out_back").groups
if True in out_back:
still_out = out_back[True]
print(self.parameter_data.loc[still_out,:],flush=True)
raise Exception("Pst.calculate_pertubations(): " +\
"can't calc pertubations for the following "+\
"Parameters {0}".format(','.join(still_out)))
|
python
|
{
"resource": ""
}
|
q20096
|
Pst.build_increments
|
train
|
def build_increments(self):
""" experimental method to calculate parameter increments for use
in the finite difference pertubation calculations
Note
----
user beware!
"""
self.enforce_bounds()
self.add_transform_columns()
par_groups = self.parameter_data.groupby("pargp").groups
inctype = self.parameter_groups.groupby("inctyp").groups
for itype,inc_groups in inctype.items():
pnames = []
for group in inc_groups:
pnames.extend(par_groups[group])
derinc = self.parameter_groups.loc[group,"derinc"]
self.parameter_data.loc[par_groups[group],"derinc"] = derinc
if itype == "absolute":
self.parameter_data.loc[pnames,"increment"] = \
self.parameter_data.loc[pnames,"derinc"]
elif itype == "relative":
self.parameter_data.loc[pnames,"increment"] = \
self.parameter_data.loc[pnames,"derinc"] * \
self.parameter_data.loc[pnames,"parval1"]
elif itype == "rel_to_max":
mx = self.parameter_data.loc[pnames,"parval1"].max()
self.parameter_data.loc[pnames,"increment"] = \
self.parameter_data.loc[pnames,"derinc"] * mx
else:
raise Exception('Pst.get_derivative_increments(): '+\
'unrecognized increment type:{0}'.format(itype))
#account for fixed pars
isfixed = self.parameter_data.partrans=="fixed"
self.parameter_data.loc[isfixed,"increment"] = \
self.parameter_data.loc[isfixed,"parval1"]
|
python
|
{
"resource": ""
}
|
q20097
|
Pst.add_transform_columns
|
train
|
def add_transform_columns(self):
""" add transformed values to the Pst.parameter_data attribute
"""
for col in ["parval1","parlbnd","parubnd","increment"]:
if col not in self.parameter_data.columns:
continue
self.parameter_data.loc[:,col+"_trans"] = (self.parameter_data.loc[:,col] *
self.parameter_data.scale) +\
self.parameter_data.offset
#isnotfixed = self.parameter_data.partrans != "fixed"
islog = self.parameter_data.partrans == "log"
self.parameter_data.loc[islog,col+"_trans"] = \
self.parameter_data.loc[islog,col+"_trans"].\
apply(lambda x:np.log10(x))
|
python
|
{
"resource": ""
}
|
q20098
|
Pst.enforce_bounds
|
train
|
def enforce_bounds(self):
""" enforce bounds violation resulting from the
parameter pertubation calculations
"""
too_big = self.parameter_data.loc[:,"parval1"] > \
self.parameter_data.loc[:,"parubnd"]
self.parameter_data.loc[too_big,"parval1"] = \
self.parameter_data.loc[too_big,"parubnd"]
too_small = self.parameter_data.loc[:,"parval1"] < \
self.parameter_data.loc[:,"parlbnd"]
self.parameter_data.loc[too_small,"parval1"] = \
self.parameter_data.loc[too_small,"parlbnd"]
|
python
|
{
"resource": ""
}
|
q20099
|
Pst.from_io_files
|
train
|
def from_io_files(cls,tpl_files,in_files,ins_files,out_files,pst_filename=None):
""" create a Pst instance from model interface files. Assigns generic values for
parameter info. Tries to use INSCHEK to set somewhat meaningful observation
values
Parameters
----------
tpl_files : list
list of template file names
in_files : list
list of model input file names (pairs with template files)
ins_files : list
list of instruction file names
out_files : list
list of model output file names (pairs with instruction files)
pst_filename : str
name of control file to write. If None, no file is written.
Default is None
Returns
-------
Pst : Pst
Note
----
calls pyemu.helpers.pst_from_io_files()
"""
from pyemu import helpers
return helpers.pst_from_io_files(tpl_files=tpl_files,in_files=in_files,
ins_files=ins_files,out_files=out_files,
pst_filename=pst_filename)
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.