_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q20100
|
Pst.less_than_obs_constraints
|
train
|
def less_than_obs_constraints(self):
"""get the names of the observations that
are listed as less than inequality constraints. Zero-
weighted obs are skipped
Returns
-------
pandas.Series : obsnme of obseravtions that are non-zero weighted
less than constraints
"""
obs = self.observation_data
lt_obs = obs.loc[obs.apply(lambda x: self._is_less_const(x.obgnme) \
and x.weight != 0.0,axis=1),"obsnme"]
return lt_obs
|
python
|
{
"resource": ""
}
|
q20101
|
Pst.less_than_pi_constraints
|
train
|
def less_than_pi_constraints(self):
"""get the names of the prior information eqs that
are listed as less than inequality constraints. Zero-
weighted pi are skipped
Returns
-------
pandas.Series : pilbl of prior information that are non-zero weighted
less than constraints
"""
pi = self.prior_information
lt_pi = pi.loc[pi.apply(lambda x: self._is_less_const(x.obgnme) \
and x.weight != 0.0, axis=1), "pilbl"]
return lt_pi
|
python
|
{
"resource": ""
}
|
q20102
|
Pst.greater_than_obs_constraints
|
train
|
def greater_than_obs_constraints(self):
"""get the names of the observations that
are listed as greater than inequality constraints. Zero-
weighted obs are skipped
Returns
-------
pandas.Series : obsnme of obseravtions that are non-zero weighted
greater than constraints
"""
obs = self.observation_data
gt_obs = obs.loc[obs.apply(lambda x: self._is_greater_const(x.obgnme) \
and x.weight != 0.0,axis=1),"obsnme"]
return gt_obs
|
python
|
{
"resource": ""
}
|
q20103
|
Pst.greater_than_pi_constraints
|
train
|
def greater_than_pi_constraints(self):
"""get the names of the prior information eqs that
are listed as greater than inequality constraints. Zero-
weighted pi are skipped
Returns
-------
pandas.Series : pilbl of prior information that are non-zero weighted
greater than constraints
"""
pi = self.prior_information
gt_pi = pi.loc[pi.apply(lambda x: self._is_greater_const(x.obgnme) \
and x.weight != 0.0, axis=1), "pilbl"]
return gt_pi
|
python
|
{
"resource": ""
}
|
q20104
|
LinearAnalysis.__load_pst
|
train
|
def __load_pst(self):
"""private method set the pst attribute
"""
if self.pst_arg is None:
return None
if isinstance(self.pst_arg, Pst):
self.__pst = self.pst_arg
return self.pst
else:
try:
self.log("loading pst: " + str(self.pst_arg))
self.__pst = Pst(self.pst_arg)
self.log("loading pst: " + str(self.pst_arg))
return self.pst
except Exception as e:
raise Exception("linear_analysis.__load_pst(): error loading"+\
" pest control from argument: " +
str(self.pst_arg) + '\n->' + str(e))
|
python
|
{
"resource": ""
}
|
q20105
|
LinearAnalysis.__load_jco
|
train
|
def __load_jco(self):
"""private method to set the jco attribute from a file or a matrix object
"""
if self.jco_arg is None:
return None
#raise Exception("linear_analysis.__load_jco(): jco_arg is None")
if isinstance(self.jco_arg, Matrix):
self.__jco = self.jco_arg
elif isinstance(self.jco_arg, str):
self.__jco = self.__fromfile(self.jco_arg,astype=Jco)
else:
raise Exception("linear_analysis.__load_jco(): jco_arg must " +
"be a matrix object or a file name: " +
str(self.jco_arg))
|
python
|
{
"resource": ""
}
|
q20106
|
LinearAnalysis.nnz_obs_names
|
train
|
def nnz_obs_names(self):
""" wrapper around pyemu.Pst.nnz_obs_names for listing non-zero
observation names
Returns
-------
nnz_obs_names : list
pyemu.Pst.nnz_obs_names
"""
if self.__pst is not None:
return self.pst.nnz_obs_names
else:
return self.jco.obs_names
|
python
|
{
"resource": ""
}
|
q20107
|
LinearAnalysis.adj_par_names
|
train
|
def adj_par_names(self):
""" wrapper around pyemu.Pst.adj_par_names for list adjustable parameter
names
Returns
-------
adj_par_names : list
pyemu.Pst.adj_par_names
"""
if self.__pst is not None:
return self.pst.adj_par_names
else:
return self.jco.par_names
|
python
|
{
"resource": ""
}
|
q20108
|
LinearAnalysis.predictions_iter
|
train
|
def predictions_iter(self):
""" property decorated prediction iterator
Returns
-------
iterator : iterator
iterator on prediction sensitivity vectors (matrix)
"""
for fname in self.forecast_names:
yield self.predictions.get(col_names=fname)
|
python
|
{
"resource": ""
}
|
q20109
|
LinearAnalysis.pst
|
train
|
def pst(self):
""" get the pyemu.Pst attribute
Returns
-------
pst : pyemu.Pst
Note
----
returns a references
If LinearAnalysis.__pst is None, then the pst attribute is
dynamically loaded before returning
"""
if self.__pst is None and self.pst_arg is None:
raise Exception("linear_analysis.pst: can't access self.pst:" +
"no pest control argument passed")
elif self.__pst:
return self.__pst
else:
self.__load_pst()
return self.__pst
|
python
|
{
"resource": ""
}
|
q20110
|
LinearAnalysis.fehalf
|
train
|
def fehalf(self):
"""get the KL parcov scaling matrix attribute. Create the attribute if
it has not yet been created
Returns
-------
fehalf : pyemu.Matrix
"""
if self.__fehalf != None:
return self.__fehalf
self.log("fehalf")
self.__fehalf = self.parcov.u * (self.parcov.s ** (0.5))
self.log("fehalf")
return self.__fehalf
|
python
|
{
"resource": ""
}
|
q20111
|
LinearAnalysis.qhalf
|
train
|
def qhalf(self):
"""get the square root of the cofactor matrix attribute. Create the attribute if
it has not yet been created
Returns
-------
qhalf : pyemu.Matrix
"""
if self.__qhalf != None:
return self.__qhalf
self.log("qhalf")
self.__qhalf = self.obscov ** (-0.5)
self.log("qhalf")
return self.__qhalf
|
python
|
{
"resource": ""
}
|
q20112
|
LinearAnalysis.qhalfx
|
train
|
def qhalfx(self):
"""get the half normal matrix attribute. Create the attribute if
it has not yet been created
Returns
-------
qhalfx : pyemu.Matrix
"""
if self.__qhalfx is None:
self.log("qhalfx")
self.__qhalfx = self.qhalf * self.jco
self.log("qhalfx")
return self.__qhalfx
|
python
|
{
"resource": ""
}
|
q20113
|
LinearAnalysis.xtqx
|
train
|
def xtqx(self):
"""get the normal matrix attribute. Create the attribute if
it has not yet been created
Returns
-------
xtqx : pyemu.Matrix
"""
if self.__xtqx is None:
self.log("xtqx")
self.__xtqx = self.jco.T * (self.obscov ** -1) * self.jco
self.log("xtqx")
return self.__xtqx
|
python
|
{
"resource": ""
}
|
q20114
|
LinearAnalysis.mle_parameter_estimate
|
train
|
def mle_parameter_estimate(self):
""" get the maximum likelihood parameter estimate.
Returns
-------
post_expt : pandas.Series
the maximum likelihood parameter estimates
"""
res = self.pst.res
assert res is not None
# build the prior expectation parameter vector
prior_expt = self.pst.parameter_data.loc[:,["parval1"]].copy()
islog = self.pst.parameter_data.partrans == "log"
prior_expt.loc[islog] = prior_expt.loc[islog].apply(np.log10)
prior_expt = Matrix.from_dataframe(prior_expt)
prior_expt.col_names = ["prior_expt"]
# build the residual vector
res_vec = Matrix.from_dataframe(res.loc[:,["residual"]])
# calc posterior expectation
upgrade = self.mle_covariance * self.jco.T * res_vec
upgrade.col_names = ["prior_expt"]
post_expt = prior_expt + upgrade
# post processing - back log transform
post_expt = pd.DataFrame(data=post_expt.x,index=post_expt.row_names,
columns=["post_expt"])
post_expt.loc[islog,:] = 10.0**post_expt.loc[islog,:]
return post_expt
|
python
|
{
"resource": ""
}
|
q20115
|
LinearAnalysis.prior_prediction
|
train
|
def prior_prediction(self):
"""get a dict of prior prediction variances
Returns
-------
prior_prediction : dict
dictionary of prediction name, prior variance pairs
"""
if self.__prior_prediction is not None:
return self.__prior_prediction
else:
if self.predictions is not None:
self.log("propagating prior to predictions")
prior_cov = self.predictions.T *\
self.parcov * self.predictions
self.__prior_prediction = {n:v for n,v in
zip(prior_cov.row_names,
np.diag(prior_cov.x))}
self.log("propagating prior to predictions")
else:
self.__prior_prediction = {}
return self.__prior_prediction
|
python
|
{
"resource": ""
}
|
q20116
|
LinearAnalysis.apply_karhunen_loeve_scaling
|
train
|
def apply_karhunen_loeve_scaling(self):
"""apply karhuene-loeve scaling to the jacobian matrix.
Note
----
This scaling is not necessary for analyses using Schur's
complement, but can be very important for error variance
analyses. This operation effectively transfers prior knowledge
specified in the parcov to the jacobian and reset parcov to the
identity matrix.
"""
cnames = copy.deepcopy(self.jco.col_names)
self.__jco *= self.fehalf
self.__jco.col_names = cnames
self.__parcov = self.parcov.identity
|
python
|
{
"resource": ""
}
|
q20117
|
LinearAnalysis.clean
|
train
|
def clean(self):
"""drop regularization and prior information observation from the jco
"""
if self.pst_arg is None:
self.logger.statement("linear_analysis.clean(): not pst object")
return
if not self.pst.estimation and self.pst.nprior > 0:
self.drop_prior_information()
|
python
|
{
"resource": ""
}
|
q20118
|
LinearAnalysis.reset_pst
|
train
|
def reset_pst(self,arg):
""" reset the LinearAnalysis.pst attribute
Parameters
----------
arg : (str or matrix)
the value to assign to the pst attribute
"""
self.logger.statement("resetting pst")
self.__pst = None
self.pst_arg = arg
|
python
|
{
"resource": ""
}
|
q20119
|
LinearAnalysis.reset_parcov
|
train
|
def reset_parcov(self,arg=None):
"""reset the parcov attribute to None
Parameters
----------
arg : str or pyemu.Matrix
the value to assign to the parcov attribute. If None,
the private __parcov attribute is cleared but not reset
"""
self.logger.statement("resetting parcov")
self.__parcov = None
if arg is not None:
self.parcov_arg = arg
|
python
|
{
"resource": ""
}
|
q20120
|
LinearAnalysis.reset_obscov
|
train
|
def reset_obscov(self,arg=None):
"""reset the obscov attribute to None
Parameters
----------
arg : str or pyemu.Matrix
the value to assign to the obscov attribute. If None,
the private __obscov attribute is cleared but not reset
"""
self.logger.statement("resetting obscov")
self.__obscov = None
if arg is not None:
self.obscov_arg = arg
|
python
|
{
"resource": ""
}
|
q20121
|
LinearAnalysis.drop_prior_information
|
train
|
def drop_prior_information(self):
"""drop the prior information from the jco and pst attributes
"""
if self.jco is None:
self.logger.statement("can't drop prior info, LinearAnalysis.jco is None")
return
nprior_str = str(self.pst.nprior)
self.log("removing " + nprior_str + " prior info from jco, pst, and " +
"obs cov")
#pi_names = list(self.pst.prior_information.pilbl.values)
pi_names = list(self.pst.prior_names)
missing = [name for name in pi_names if name not in self.jco.obs_names]
if len(missing) > 0:
raise Exception("LinearAnalysis.drop_prior_information(): "+
" prior info not found: {0}".format(missing))
if self.jco is not None:
self.__jco.drop(pi_names, axis=0)
self.__pst.prior_information = self.pst.null_prior
self.__pst.control_data.pestmode = "estimation"
#self.__obscov.drop(pi_names,axis=0)
self.log("removing " + nprior_str + " prior info from jco, pst, and " +
"obs cov")
|
python
|
{
"resource": ""
}
|
q20122
|
LinearAnalysis.adjust_obscov_resfile
|
train
|
def adjust_obscov_resfile(self, resfile=None):
"""reset the elements of obscov by scaling the implied weights
based on the phi components in res_file so that the total phi
is equal to the number of non-zero weights.
Parameters
----------
resfile : str
residual file to use. If None, residual file with case name is
sought. default is None
Note
----
calls pyemu.Pst.adjust_weights_resfile()
"""
self.pst.adjust_weights_resfile(resfile)
self.__obscov.from_observation_data(self.pst)
|
python
|
{
"resource": ""
}
|
q20123
|
LinearAnalysis.get_par_css_dataframe
|
train
|
def get_par_css_dataframe(self):
""" get a dataframe of composite scaled sensitivities. Includes both
PEST-style and Hill-style.
Returns
-------
css : pandas.DataFrame
"""
assert self.jco is not None
assert self.pst is not None
jco = self.jco.to_dataframe()
weights = self.pst.observation_data.loc[jco.index,"weight"].copy().values
jco = (jco.T * weights).T
dss_sum = jco.apply(np.linalg.norm)
css = (dss_sum / float(self.pst.nnz_obs)).to_frame()
css.columns = ["pest_css"]
# log transform stuff
self.pst.add_transform_columns()
parval1 = self.pst.parameter_data.loc[dss_sum.index,"parval1_trans"].values
css.loc[:,"hill_css"] = (dss_sum * parval1) / (float(self.pst.nnz_obs)**2)
return css
|
python
|
{
"resource": ""
}
|
q20124
|
LinearAnalysis.get_cso_dataframe
|
train
|
def get_cso_dataframe(self):
"""
get a dataframe of composite observation sensitivity, as returned by PEST in the
seo file.
Note that this formulation deviates slightly from the PEST documentation in that the
values are divided by (npar-1) rather than by (npar).
The equation is cso_j = ((Q^1/2*J*J^T*Q^1/2)^1/2)_jj/(NPAR-1)
Returns:
cso : pandas.DataFrame
"""
assert self.jco is not None
assert self.pst is not None
weights = self.pst.observation_data.loc[self.jco.to_dataframe().index,"weight"].copy().values
cso = np.diag(np.sqrt((self.qhalfx.x.dot(self.qhalfx.x.T))))/(float(self.pst.npar-1))
cso_df = pd.DataFrame.from_dict({'obnme':self.jco.to_dataframe().index,'cso':cso})
cso_df.index=cso_df['obnme']
cso_df.drop('obnme', axis=1, inplace=True)
return cso_df
|
python
|
{
"resource": ""
}
|
q20125
|
plot_id_bar
|
train
|
def plot_id_bar(id_df, nsv=None, logger=None, **kwargs):
"""
Plot a stacked bar chart of identifiability based on a identifiability dataframe
Parameters
----------
id_df : pandas dataframe of identifiability
nsv : number of singular values to consider
logger : pyemu.Logger
kwargs : dict of keyword arguments
Returns
-------
ax : matplotlib.Axis
Example
-------
``>>> import pyemu``
``>>> pest_obj = pyemu.Pst(pest_control_file)``
``>>> ev = pyemu.ErrVar(jco='freyberg_jac.jcb'))``
``>>> id_df = ev.get_identifiability_dataframe(singular_value=48)``
``>>> pyemu.plot_id_bar(id_df, nsv=12, figsize=(12,4)``
"""
if logger is None:
logger=Logger('Default_Loggger.log',echo=False)
logger.log("plot id bar")
df = id_df.copy()
# drop the final `ident` column
if 'ident' in df.columns:
df.drop('ident', inplace=True, axis=1)
if nsv is None or nsv > len(df.columns):
nsv = len(df.columns)
logger.log('set number of SVs and number in the dataframe')
df = df[df.columns[:nsv]]
df['ident'] = df.sum(axis=1)
df.sort_values(by='ident', inplace=True, ascending=False)
df.drop('ident', inplace=True, axis=1)
if 'figsize' in kwargs:
figsize=kwargs['figsize']
else:
figsize = (8, 10.5)
if "ax" in kwargs:
ax = kwargs["ax"]
else:
fig = plt.figure(figsize=figsize)
ax = plt.subplot(1,1,1)
# plto the stacked bar chart (the easy part!)
df.plot.bar(stacked=True, cmap='jet_r', legend=False, ax=ax)
#
# horrible shenanigans to make a colorbar rather than a legend
#
# special case colormap just dark red if one SV
if nsv == 1:
tcm = matplotlib.colors.LinearSegmentedColormap.from_list('one_sv', [plt.get_cmap('jet_r')(0)] * 2, N=2)
sm = plt.cm.ScalarMappable(cmap=tcm, norm=matplotlib.colors.Normalize(vmin=0, vmax=nsv + 1))
# or typically just rock the jet_r colormap over the range of SVs
else:
sm = plt.cm.ScalarMappable(cmap=plt.get_cmap('jet_r'), norm=matplotlib.colors.Normalize(vmin=1, vmax=nsv))
sm._A = []
# now, if too many ticks for the colorbar, summarize them
if nsv < 20:
ticks = range(1, nsv + 1)
else:
ticks = np.arange(1, nsv + 1, int((nsv + 1) / 30))
cb = plt.colorbar(sm)
cb.set_ticks(ticks)
logger.log('plot id bar')
return ax
|
python
|
{
"resource": ""
}
|
q20126
|
res_phi_pie
|
train
|
def res_phi_pie(pst,logger=None, **kwargs):
"""plot current phi components as a pie chart.
Parameters
----------
pst : pyemu.Pst
logger : pyemu.Logger
kwargs : dict
accepts 'include_zero' as a flag to include phi groups with
only zero-weight obs (not sure why anyone would do this, but
whatevs).
Returns
-------
ax : matplotlib.Axis
"""
if logger is None:
logger=Logger('Default_Loggger.log',echo=False)
logger.log("plot res_phi_pie")
if "ensemble" in kwargs:
try:
res=pst_utils.res_from_en(pst,kwargs['ensemble'])
except:
logger.statement("res_1to1: could not find ensemble file {0}".format(kwargs['ensemble']))
else:
try:
res = pst.res
except:
logger.lraise("res_phi_pie: pst.res is None, couldn't find residuals file")
obs = pst.observation_data
phi = pst.phi
phi_comps = pst.phi_components
norm_phi_comps = pst.phi_components_normalized
keys = list(phi_comps.keys())
if "include_zero" not in kwargs or kwargs["include_zero"] is True:
phi_comps = {k:phi_comps[k] for k in keys if phi_comps[k] > 0.0}
keys = list(phi_comps.keys())
norm_phi_comps = {k:norm_phi_comps[k] for k in keys}
if "ax" in kwargs:
ax = kwargs["ax"]
else:
fig = plt.figure(figsize=figsize)
ax = plt.subplot(1,1,1,aspect="equal")
labels = ["{0}\n{1:4G}\n({2:3.1f}%)".format(k,phi_comps[k],100. * (phi_comps[k] / phi)) for k in keys]
ax.pie([float(norm_phi_comps[k]) for k in keys],labels=labels)
logger.log("plot res_phi_pie")
if "filename" in kwargs:
plt.savefig(kwargs["filename"])
return ax
|
python
|
{
"resource": ""
}
|
q20127
|
smp_to_ins
|
train
|
def smp_to_ins(smp_filename,ins_filename=None,use_generic_names=False,
gwutils_compliant=False, datetime_format=None,prefix=''):
""" create an instruction file for an smp file
Parameters
----------
smp_filename : str
existing smp file
ins_filename: str
instruction file to create. If None, create
an instruction file using the smp filename
with the ".ins" suffix
use_generic_names : bool
flag to force observations names to use a generic
int counter instead of trying to use a datetime str
gwutils_compliant : bool
flag to use instruction set that is compliant with the
pest gw utils (fixed format instructions). If false,
use free format (with whitespace) instruction set
datetime_format : str
str to pass to datetime.strptime in the smp_to_dataframe() function
prefix : str
a prefix to add to the front of the obsnmes. Default is ''
Returns
-------
df : pandas.DataFrame
dataframe instance of the smp file with the observation names and
instruction lines as additional columns
"""
if ins_filename is None:
ins_filename = smp_filename+".ins"
df = smp_to_dataframe(smp_filename,datetime_format=datetime_format)
df.loc[:,"ins_strings"] = None
df.loc[:,"observation_names"] = None
name_groups = df.groupby("name").groups
for name,idxs in name_groups.items():
if not use_generic_names and len(name) <= 11:
onames = df.loc[idxs,"datetime"].apply(lambda x: prefix+name+'_'+x.strftime("%d%m%Y")).values
else:
onames = [prefix+name+"_{0:d}".format(i) for i in range(len(idxs))]
if False in (map(lambda x :len(x) <= 20,onames)):
long_names = [oname for oname in onames if len(oname) > 20]
raise Exception("observation names longer than 20 chars:\n{0}".format(str(long_names)))
if gwutils_compliant:
ins_strs = ["l1 ({0:s})39:46".format(on) for on in onames]
else:
ins_strs = ["l1 w w w !{0:s}!".format(on) for on in onames]
df.loc[idxs,"observation_names"] = onames
df.loc[idxs,"ins_strings"] = ins_strs
counts = df.observation_names.value_counts()
dup_sites = [name for name in counts.index if counts[name] > 1]
if len(dup_sites) > 0:
raise Exception("duplicate observation names found:{0}"\
.format(','.join(dup_sites)))
with open(ins_filename,'w') as f:
f.write("pif ~\n")
[f.write(ins_str+"\n") for ins_str in df.loc[:,"ins_strings"]]
return df
|
python
|
{
"resource": ""
}
|
q20128
|
dataframe_to_smp
|
train
|
def dataframe_to_smp(dataframe,smp_filename,name_col="name",
datetime_col="datetime",value_col="value",
datetime_format="dd/mm/yyyy",
value_format="{0:15.6E}",
max_name_len=12):
""" write a dataframe as an smp file
Parameters
----------
dataframe : pandas.DataFrame
smp_filename : str
smp file to write
name_col: str
the column in the dataframe the marks the site namne
datetime_col: str
the column in the dataframe that is a datetime instance
value_col: str
the column in the dataframe that is the values
datetime_format: str
either 'dd/mm/yyyy' or 'mm/dd/yyy'
value_format: str
a python float-compatible format
"""
formatters = {"name":lambda x:"{0:<20s}".format(str(x)[:max_name_len]),
"value":lambda x:value_format.format(x)}
if datetime_format.lower().startswith("d"):
dt_fmt = "%d/%m/%Y %H:%M:%S"
elif datetime_format.lower().startswith("m"):
dt_fmt = "%m/%d/%Y %H:%M:%S"
else:
raise Exception("unrecognized datetime_format: " +\
"{0}".format(str(datetime_format)))
for col in [name_col,datetime_col,value_col]:
assert col in dataframe.columns
dataframe.loc[:,"datetime_str"] = dataframe.loc[:,"datetime"].\
apply(lambda x:x.strftime(dt_fmt))
if isinstance(smp_filename,str):
smp_filename = open(smp_filename,'w')
# need this to remove the leading space that pandas puts in front
s = dataframe.loc[:,[name_col,"datetime_str",value_col]].\
to_string(col_space=0,
formatters=formatters,
justify=None,
header=False,
index=False)
for ss in s.split('\n'):
smp_filename.write("{0:<s}\n".format(ss.strip()))
dataframe.pop("datetime_str")
|
python
|
{
"resource": ""
}
|
q20129
|
date_parser
|
train
|
def date_parser(items):
""" datetime parser to help load smp files
Parameters
----------
items : iterable
something or somethings to try to parse into datetimes
Returns
-------
dt : iterable
the cast datetime things
"""
try:
dt = datetime.strptime(items,"%d/%m/%Y %H:%M:%S")
except Exception as e:
try:
dt = datetime.strptime(items,"%m/%d/%Y %H:%M:%S")
except Exception as ee:
raise Exception("error parsing datetime string" +\
" {0}: \n{1}\n{2}".format(str(items),str(e),str(ee)))
return dt
|
python
|
{
"resource": ""
}
|
q20130
|
MonteCarlo.get_nsing
|
train
|
def get_nsing(self,epsilon=1.0e-4):
""" get the number of solution space dimensions given
a ratio between the largest and smallest singular values
Parameters
----------
epsilon: float
singular value ratio
Returns
-------
nsing : float
number of singular components above the epsilon ratio threshold
Note
-----
If nsing == nadj_par, then None is returned
"""
mx = self.xtqx.shape[0]
nsing = mx - np.searchsorted(
np.sort((self.xtqx.s.x / self.xtqx.s.x.max())[:,0]),epsilon)
if nsing == mx:
self.logger.warn("optimal nsing=npar")
nsing = None
return nsing
|
python
|
{
"resource": ""
}
|
q20131
|
MonteCarlo.get_null_proj
|
train
|
def get_null_proj(self,nsing=None):
""" get a null-space projection matrix of XTQX
Parameters
----------
nsing: int
optional number of singular components to use
If Nonte, then nsing is determined from
call to MonteCarlo.get_nsing()
Returns
-------
v2_proj : pyemu.Matrix
the null-space projection matrix (V2V2^T)
"""
if nsing is None:
nsing = self.get_nsing()
if nsing is None:
raise Exception("nsing is None")
print("using {0} singular components".format(nsing))
self.log("forming null space projection matrix with " +\
"{0} of {1} singular components".format(nsing,self.jco.shape[1]))
v2_proj = (self.xtqx.v[:,nsing:] * self.xtqx.v[:,nsing:].T)
self.log("forming null space projection matrix with " +\
"{0} of {1} singular components".format(nsing,self.jco.shape[1]))
return v2_proj
|
python
|
{
"resource": ""
}
|
q20132
|
MonteCarlo.draw
|
train
|
def draw(self, num_reals=1, par_file = None, obs=False,
enforce_bounds=None, cov=None, how="gaussian"):
"""draw stochastic realizations of parameters and
optionally observations, filling MonteCarlo.parensemble and
optionally MonteCarlo.obsensemble.
Parameters
----------
num_reals : int
number of realization to generate
par_file : str
parameter file to use as mean values. If None,
use MonteCarlo.pst.parameter_data.parval1.
Default is None
obs : bool
add a realization of measurement noise to observation values,
forming MonteCarlo.obsensemble.Default is False
enforce_bounds : str
enforce parameter bounds based on control file information.
options are 'reset', 'drop' or None. Default is None
how : str
type of distribution to draw from. Must be in ["gaussian","uniform"]
default is "gaussian".
Example
-------
``>>>import pyemu``
``>>>mc = pyemu.MonteCarlo(pst="pest.pst")``
``>>>mc.draw(1000)``
"""
if par_file is not None:
self.pst.parrep(par_file)
how = how.lower().strip()
assert how in ["gaussian","uniform"]
if cov is not None:
assert isinstance(cov,Cov)
if how == "uniform":
raise Exception("MonteCarlo.draw() error: 'how'='uniform'," +\
" 'cov' arg cannot be passed")
else:
cov = self.parcov
self.log("generating {0:d} parameter realizations".format(num_reals))
if how == "gaussian":
self.parensemble = ParameterEnsemble.from_gaussian_draw(pst=self.pst,cov=cov,
num_reals=num_reals,
use_homegrown=True,
enforce_bounds=False)
elif how == "uniform":
self.parensemble = ParameterEnsemble.from_uniform_draw(pst=self.pst,num_reals=num_reals)
else:
raise Exception("MonteCarlo.draw(): unrecognized 'how' arg: {0}".format(how))
#self.parensemble = ParameterEnsemble(pst=self.pst)
#self.obsensemble = ObservationEnsemble(pst=self.pst)
#self.parensemble.draw(cov,num_reals=num_reals, how=how,
# enforce_bounds=enforce_bounds)
if enforce_bounds is not None:
self.parensemble.enforce(enforce_bounds)
self.log("generating {0:d} parameter realizations".format(num_reals))
if obs:
self.log("generating {0:d} observation realizations".format(num_reals))
self.obsensemble = ObservationEnsemble.from_id_gaussian_draw(pst=self.pst,num_reals=num_reals)
self.log("generating {0:d} observation realizations".format(num_reals))
|
python
|
{
"resource": ""
}
|
q20133
|
MonteCarlo.project_parensemble
|
train
|
def project_parensemble(self,par_file=None,nsing=None,
inplace=True,enforce_bounds='reset'):
""" perform the null-space projection operations for null-space monte carlo
Parameters
----------
par_file: str
an optional file of parameter values to use
nsing: int
number of singular values to in forming null subspace matrix
inplace: bool
overwrite the existing parameter ensemble with the
projected values
enforce_bounds: str
how to enforce parameter bounds. can be None, 'reset', or 'drop'.
Default is None
Returns
-------
par_en : pyemu.ParameterEnsemble
if inplace is False, otherwise None
Note
----
to use this method, the MonteCarlo instance must have been constructed
with the ``jco`` argument.
Example
-------
``>>>import pyemu``
``>>>mc = pyemu.MonteCarlo(jco="pest.jcb")``
``>>>mc.draw(1000)``
``>>>mc.project_parensemble(par_file="final.par",nsing=100)``
"""
assert self.jco is not None,"MonteCarlo.project_parensemble()" +\
"requires a jacobian attribute"
if par_file is not None:
assert os.path.exists(par_file),"monte_carlo.draw() error: par_file not found:" +\
par_file
self.parensemble.pst.parrep(par_file)
# project the ensemble
self.log("projecting parameter ensemble")
en = self.parensemble.project(self.get_null_proj(nsing),inplace=inplace,log=self.log)
self.log("projecting parameter ensemble")
return en
|
python
|
{
"resource": ""
}
|
q20134
|
MonteCarlo.write_psts
|
train
|
def write_psts(self,prefix,existing_jco=None,noptmax=None):
""" write parameter and optionally observation realizations
to a series of pest control files
Parameters
----------
prefix: str
pest control file prefix
existing_jco: str
filename of an existing jacobian matrix to add to the
pest++ options in the control file. This is useful for
NSMC since this jco can be used to get the first set of
parameter upgrades for free! Needs to be the path the jco
file as seen from the location where pest++ will be run
noptmax: int
value of NOPTMAX to set in new pest control files
Example
-------
``>>>import pyemu``
``>>>mc = pyemu.MonteCarlo(jco="pest.jcb")``
``>>>mc.draw(1000, obs=True)``
``>>>mc.write_psts("mc_", existing_jco="pest.jcb", noptmax=1)``
"""
self.log("writing realized pest control files")
# get a copy of the pest control file
pst = self.pst.get(par_names=self.pst.par_names,obs_names=self.pst.obs_names)
if noptmax is not None:
pst.control_data.noptmax = noptmax
pst.control_data.noptmax = noptmax
if existing_jco is not None:
pst.pestpp_options["BASE_JACOBIAN"] = existing_jco
# set the indices
pst.parameter_data.index = pst.parameter_data.parnme
pst.observation_data.index = pst.observation_data.obsnme
if self.parensemble.istransformed:
par_en = self.parensemble._back_transform(inplace=False)
else:
par_en = self.parensemble
for i in range(self.num_reals):
pst_name = prefix + "{0:d}.pst".format(i)
self.log("writing realized pest control file " + pst_name)
pst.parameter_data.loc[par_en.columns,"parval1"] = par_en.iloc[i, :].T
# reset the regularization
#if pst.control_data.pestmode == "regularization":
#pst.zero_order_tikhonov(parbounds=True)
#zero_order_tikhonov(pst,parbounds=True)
# add the obs noise realization if needed
if self.obsensemble.shape[0] == self.num_reals:
pst.observation_data.loc[self.obsensemble.columns,"obsval"] = \
self.obsensemble.iloc[i, :].T
# write
pst.write(pst_name)
self.log("writing realized pest control file " + pst_name)
self.log("writing realized pest control files")
|
python
|
{
"resource": ""
}
|
q20135
|
ParetoObjFunc.is_nondominated_pathetic
|
train
|
def is_nondominated_pathetic(self, obs_df):
"""identify which candidate solutions are pareto non-dominated -
super patheically slow...
Parameters
----------
obs_df : pandas.DataFrame
dataframe with columns of observation names and rows of realizations
Returns
-------
is_dominated : pandas.Series
series with index of obs_df and bool series
"""
obj_df = obs_df.loc[:,self.obs_obj_names]
is_nondom = []
for i,iidx in enumerate(obj_df.index):
ind = True
for jidx in obj_df.index:
if iidx == jidx:
continue
# if dominates(jidx,iidx):
# ind = False
# break
if self.dominates(obj_df.loc[jidx,:], obj_df.loc[iidx,:]):
ind = False
break
is_nondom.append(ind)
is_nondom = pd.Series(data=is_nondom,index=obs_df.index,dtype=bool)
return is_nondom
|
python
|
{
"resource": ""
}
|
q20136
|
ParetoObjFunc.is_nondominated_continuous
|
train
|
def is_nondominated_continuous(self, obs_df):
"""identify which candidate solutions are pareto non-dominated continuously updated,
but still slow
Parameters
----------
obs_df : pandas.DataFrame
dataframe with columns of observation names and rows of realizations
Returns
-------
is_dominated : pandas.Series
series with index of obs_df and bool series
"""
obj_df = obs_df.loc[:,self.obs_obj_names]
P = list(obj_df.index)
PP = set()
PP.add(P[0])
#iidx = 1
#while iidx < len(P):
for iidx in P:
jidx = 0
drop = []
keep = True
for jidx in PP:
# if dominates(iidx,jidx):
# drop.append(jidx)
# elif dominates(jidx,iidx):
# keep = False
# break
if jidx == iidx:
continue
if self.dominates(obj_df.loc[iidx, :], obj_df.loc[jidx, :]):
drop.append(jidx)
elif self.dominates(obj_df.loc[jidx, :], obj_df.loc[iidx, :]):
keep = False
break
for d in drop:
PP.remove(d)
if keep:
PP.add(iidx)
#iidx += 1
is_nondom = pd.Series(data=False,index=obs_df.index,dtype=bool)
is_nondom.loc[PP] = True
return is_nondom
|
python
|
{
"resource": ""
}
|
q20137
|
ParetoObjFunc.is_nondominated_kung
|
train
|
def is_nondominated_kung(self, obs_df):
"""identify which candidate solutions are pareto non-dominated using Kungs algorithm
Parameters
----------
obs_df : pandas.DataFrame
dataframe with columns of observation names and rows of realizations
Returns
-------
is_dominated : pandas.Series
series with index of obs_df and bool series
"""
obj_df = obs_df.loc[:,self.obs_obj_names]
obj_names = self.obs_obj_names
ascending = False
if self.obs_dict[obj_names[0]] == "min":
ascending = True
obj_df.sort_values(by=obj_names[0],ascending=ascending,inplace=True)
P = list(obj_df.index)
def front(p):
if len(p) == 1:
return p
p = list(obj_df.loc[p,:].sort_values(by=obj_names[0],ascending=ascending).index)
half = int(len(p) / 2)
T = front(p[:half])
B = front(p[half:])
M = []
i = 0
while i < len(B):
j = 0
while j < len(T):
#if dominates(T[j],B[i]):
if self.dominates(obj_df.loc[T[j],:], obj_df.loc[B[i],:]):
break
j += 1
if (j == len(T)):
M.append(B[i])
i += 1
T.extend(M)
return T
PP = front(P)
is_nondom = pd.Series(data=False,index=obs_df.index,dtype=bool)
is_nondom.loc[PP] = True
return is_nondom
|
python
|
{
"resource": ""
}
|
q20138
|
ParetoObjFunc.crowd_distance
|
train
|
def crowd_distance(self,obs_df):
"""determine the crowding distance for each candidate solution
Parameters
----------
obs_df : pandas.DataFrame
dataframe with columns of observation names and rows of realizations
Returns
-------
crowd_distance : pandas.Series
series with index of obs_df and values of crowd distance
"""
# initialize the distance container
crowd_distance = pd.Series(data=0.0,index=obs_df.index)
for name,direction in self.obs_dict.items():
# make a copy - wasteful, but easier
obj_df = obs_df.loc[:,name].copy()
# sort so that largest values are first
obj_df.sort_values(ascending=False,inplace=True)
# set the ends so they are always retained
crowd_distance.loc[obj_df.index[0]] += self.max_distance
crowd_distance.loc[obj_df.index[-1]] += self.max_distance
# process the vector
i = 1
for idx in obj_df.index[1:-1]:
crowd_distance.loc[idx] += obj_df.iloc[i-1] - obj_df.iloc[i+1]
i += 1
return crowd_distance
|
python
|
{
"resource": ""
}
|
q20139
|
BaseGlyph._set_unicode
|
train
|
def _set_unicode(self, value):
"""
Assign the primary unicode to the glyph.
This will be an integer or None.
Subclasses may override this method.
"""
values = list(self.unicodes)
if value in values:
values.remove(value)
values.insert(0, value)
self.unicodes = values
|
python
|
{
"resource": ""
}
|
q20140
|
BaseGlyph.clear
|
train
|
def clear(self, contours=True, components=True, anchors=True,
guidelines=True, image=True):
"""
Clear the glyph.
>>> glyph.clear()
This clears:
- contours
- components
- anchors
- guidelines
- image
It's possible to turn off the clearing of portions of
the glyph with the listed arguments.
>>> glyph.clear(guidelines=False)
"""
self._clear(contours=contours, components=components,
anchors=anchors, guidelines=guidelines, image=image)
|
python
|
{
"resource": ""
}
|
q20141
|
BaseGlyph._iterContours
|
train
|
def _iterContours(self, **kwargs):
"""
This must return an iterator that returns wrapped contours.
Subclasses may override this method.
"""
count = len(self)
index = 0
while count:
yield self[index]
count -= 1
index += 1
|
python
|
{
"resource": ""
}
|
q20142
|
BaseGlyph.appendContour
|
train
|
def appendContour(self, contour, offset=None):
"""
Append a contour containing the same data as ``contour``
to this glyph.
>>> contour = glyph.appendContour(contour)
This will return a :class:`BaseContour` object representing
the new contour in the glyph. ``offset`` indicates the x and
y shift values that should be applied to the appended data.
It must be a :ref:`type-coordinate` value or ``None``. If
``None`` is given, the offset will be ``(0, 0)``.
>>> contour = glyph.appendContour(contour, (100, 0))
"""
contour = normalizers.normalizeContour(contour)
if offset is None:
offset = (0, 0)
offset = normalizers.normalizeTransformationOffset(offset)
return self._appendContour(contour, offset)
|
python
|
{
"resource": ""
}
|
q20143
|
BaseGlyph._appendContour
|
train
|
def _appendContour(self, contour, offset=None, **kwargs):
"""
contour will be an object with a drawPoints method.
offset will be a valid offset (x, y).
This must return the new contour.
Subclasses may override this method.
"""
copy = contour.copy()
if offset != (0, 0):
copy.moveBy(offset)
pointPen = self.getPointPen()
contour.drawPoints(pointPen)
return self[-1]
|
python
|
{
"resource": ""
}
|
q20144
|
BaseGlyph.removeContour
|
train
|
def removeContour(self, contour):
"""
Remove ``contour`` from the glyph.
>>> glyph.removeContour(contour)
``contour`` may be a :ref:`BaseContour` or an :ref:`type-int`
representing a contour index.
"""
if isinstance(contour, int):
index = contour
else:
index = self._getContourIndex(contour)
index = normalizers.normalizeIndex(index)
if index >= len(self):
raise ValueError("No contour located at index %d." % index)
self._removeContour(index)
|
python
|
{
"resource": ""
}
|
q20145
|
BaseGlyph.appendComponent
|
train
|
def appendComponent(self, baseGlyph=None, offset=None, scale=None, component=None):
"""
Append a component to this glyph.
>>> component = glyph.appendComponent("A")
This will return a :class:`BaseComponent` object representing
the new component in the glyph. ``offset`` indicates the x and
y shift values that should be applied to the appended component.
It must be a :ref:`type-coordinate` value or ``None``. If
``None`` is given, the offset will be ``(0, 0)``.
>>> component = glyph.appendComponent("A", offset=(10, 20))
``scale`` indicates the x and y scale values that should be
applied to the appended component. It must be a
:ref:`type-scale` value or ``None``. If ``None`` is given,
the scale will be ``(1.0, 1.0)``.
>>> component = glyph.appendComponent("A", scale=(1.0, 2.0))
``component`` may be a :class:`BaseComponent` object from which
attribute values will be copied. If ``baseGlyph``, ``offset``
or ``scale`` are specified as arguments, those values will be used
instead of the values in the given component object.
"""
identifier = None
sxy = 0
syx = 0
if component is not None:
component = normalizers.normalizeComponent(component)
if baseGlyph is None:
baseGlyph = component.baseGlyph
sx, sxy, syx, sy, ox, oy = component.transformation
if offset is None:
offset = (ox, oy)
if scale is None:
scale = (sx, sy)
if baseGlyph is None:
baseGlyph = component.baseGlyph
if component.identifier is not None:
existing = set([c.identifier for c in self.components if c.identifier is not None])
if component.identifier not in existing:
identifier = component.identifier
baseGlyph = normalizers.normalizeGlyphName(baseGlyph)
if self.name == baseGlyph:
raise FontPartsError(("A glyph cannot contain a component referencing itself."))
if offset is None:
offset = (0, 0)
if scale is None:
scale = (1, 1)
offset = normalizers.normalizeTransformationOffset(offset)
scale = normalizers.normalizeTransformationScale(scale)
ox, oy = offset
sx, sy = scale
transformation = (sx, sxy, syx, sy, ox, oy)
identifier = normalizers.normalizeIdentifier(identifier)
return self._appendComponent(baseGlyph, transformation=transformation, identifier=identifier)
|
python
|
{
"resource": ""
}
|
q20146
|
BaseGlyph._appendComponent
|
train
|
def _appendComponent(self, baseGlyph, transformation=None, identifier=None, **kwargs):
"""
baseGlyph will be a valid glyph name.
The baseGlyph may or may not be in the layer.
offset will be a valid offset (x, y).
scale will be a valid scale (x, y).
identifier will be a valid, nonconflicting identifier.
This must return the new component.
Subclasses may override this method.
"""
pointPen = self.getPointPen()
pointPen.addComponent(baseGlyph, transformation=transformation, identifier=identifier)
return self.components[-1]
|
python
|
{
"resource": ""
}
|
q20147
|
BaseGlyph.removeComponent
|
train
|
def removeComponent(self, component):
"""
Remove ``component`` from the glyph.
>>> glyph.removeComponent(component)
``component`` may be a :ref:`BaseComponent` or an
:ref:`type-int` representing a component index.
"""
if isinstance(component, int):
index = component
else:
index = self._getComponentIndex(component)
index = normalizers.normalizeIndex(index)
if index >= self._len__components():
raise ValueError("No component located at index %d." % index)
self._removeComponent(index)
|
python
|
{
"resource": ""
}
|
q20148
|
BaseGlyph.appendAnchor
|
train
|
def appendAnchor(self, name=None, position=None, color=None, anchor=None):
"""
Append an anchor to this glyph.
>>> anchor = glyph.appendAnchor("top", (10, 20))
This will return a :class:`BaseAnchor` object representing
the new anchor in the glyph. ``name`` indicated the name to
be assigned to the anchor. It must be a :ref:`type-string`
or ``None``. ``position`` indicates the x and y location
to be applied to the anchor. It must be a
:ref:`type-coordinate` value. ``color`` indicates the color
to be applied to the anchor. It must be a :ref:`type-color`
or ``None``.
>>> anchor = glyph.appendAnchor("top", (10, 20), color=(1, 0, 0, 1))
``anchor`` may be a :class:`BaseAnchor` object from which
attribute values will be copied. If ``name``, ``position``
or ``color`` are specified as arguments, those values will
be used instead of the values in the given anchor object.
"""
identifier = None
if anchor is not None:
anchor = normalizers.normalizeAnchor(anchor)
if name is None:
name = anchor.name
if position is None:
position = anchor.position
if color is None:
color = anchor.color
if anchor.identifier is not None:
existing = set([a.identifier for a in self.anchors if a.identifier is not None])
if anchor.identifier not in existing:
identifier = anchor.identifier
name = normalizers.normalizeAnchorName(name)
position = normalizers.normalizeCoordinateTuple(position)
if color is not None:
color = normalizers.normalizeColor(color)
identifier = normalizers.normalizeIdentifier(identifier)
return self._appendAnchor(name, position=position, color=color, identifier=identifier)
|
python
|
{
"resource": ""
}
|
q20149
|
BaseGlyph.removeAnchor
|
train
|
def removeAnchor(self, anchor):
"""
Remove ``anchor`` from the glyph.
>>> glyph.removeAnchor(anchor)
``anchor`` may be an :ref:`BaseAnchor` or an
:ref:`type-int` representing an anchor index.
"""
if isinstance(anchor, int):
index = anchor
else:
index = self._getAnchorIndex(anchor)
index = normalizers.normalizeIndex(index)
if index >= self._len__anchors():
raise ValueError("No anchor located at index %d." % index)
self._removeAnchor(index)
|
python
|
{
"resource": ""
}
|
q20150
|
BaseGlyph.removeGuideline
|
train
|
def removeGuideline(self, guideline):
"""
Remove ``guideline`` from the glyph.
>>> glyph.removeGuideline(guideline)
``guideline`` may be a :ref:`BaseGuideline` or an
:ref:`type-int` representing an guideline index.
"""
if isinstance(guideline, int):
index = guideline
else:
index = self._getGuidelineIndex(guideline)
index = normalizers.normalizeIndex(index)
if index >= self._len__guidelines():
raise ValueError("No guideline located at index %d." % index)
self._removeGuideline(index)
|
python
|
{
"resource": ""
}
|
q20151
|
BaseGlyph.interpolate
|
train
|
def interpolate(self, factor, minGlyph, maxGlyph,
round=True, suppressError=True):
"""
Interpolate the contents of this glyph at location ``factor``
in a linear interpolation between ``minGlyph`` and ``maxGlyph``.
>>> glyph.interpolate(0.5, otherGlyph1, otherGlyph2)
``factor`` may be a :ref:`type-int-float` or a tuple containing
two :ref:`type-int-float` values representing x and y factors.
>>> glyph.interpolate((0.5, 1.0), otherGlyph1, otherGlyph2)
``minGlyph`` must be a :class:`BaseGlyph` and will be located at 0.0
in the interpolation range. ``maxGlyph`` must be a :class:`BaseGlyph`
and will be located at 1.0 in the interpolation range. If ``round``
is ``True``, the contents of the glyph will be rounded to integers
after the interpolation is performed.
>>> glyph.interpolate(0.5, otherGlyph1, otherGlyph2, round=True)
This method assumes that ``minGlyph`` and ``maxGlyph`` are completely
compatible with each other for interpolation. If not, any errors
encountered will raise a :class:`FontPartsError`. If ``suppressError``
is ``True``, no exception will be raised and errors will be silently
ignored.
"""
factor = normalizers.normalizeInterpolationFactor(factor)
if not isinstance(minGlyph, BaseGlyph):
raise TypeError(("Interpolation to an instance of %r can not be "
"performed from an instance of %r.")
% (self.__class__.__name__,
minGlyph.__class__.__name__))
if not isinstance(maxGlyph, BaseGlyph):
raise TypeError(("Interpolation to an instance of %r can not be "
"performed from an instance of %r.")
% (self.__class__.__name__,
maxGlyph.__class__.__name__))
round = normalizers.normalizeBoolean(round)
suppressError = normalizers.normalizeBoolean(suppressError)
self._interpolate(factor, minGlyph, maxGlyph,
round=round, suppressError=suppressError)
|
python
|
{
"resource": ""
}
|
q20152
|
BaseGlyph.pointInside
|
train
|
def pointInside(self, point):
"""
Determine if ``point`` is in the black or white of the glyph.
>>> glyph.pointInside((40, 65))
True
``point`` must be a :ref:`type-coordinate`.
"""
point = normalizers.normalizeCoordinateTuple(point)
return self._pointInside(point)
|
python
|
{
"resource": ""
}
|
q20153
|
BaseGlyph._getLayer
|
train
|
def _getLayer(self, name, **kwargs):
"""
name will be a string, but there may not be a
layer with a name matching the string. If not,
a ``ValueError`` must be raised.
Subclasses may override this method.
"""
for glyph in self.layers:
if glyph.layer.name == name:
return glyph
raise ValueError("No layer named '%s' in glyph '%s'."
% (name, self.name))
|
python
|
{
"resource": ""
}
|
q20154
|
BaseGlyph.newLayer
|
train
|
def newLayer(self, name):
"""
Make a new layer with ``name`` in this glyph.
>>> glyphLayer = glyph.newLayer("background")
This will return the new :ref:`type-glyph-layer`.
If the layer already exists in this glyph, it
will be cleared.
"""
layerName = name
glyphName = self.name
layerName = normalizers.normalizeLayerName(layerName)
for glyph in self.layers:
if glyph.layer.name == layerName:
layer = glyph.layer
layer.removeGlyph(glyphName)
break
glyph = self._newLayer(name=layerName)
layer = self.font.getLayer(layerName)
# layer._setLayerInGlyph(glyph)
return glyph
|
python
|
{
"resource": ""
}
|
q20155
|
BaseGlyph.removeLayer
|
train
|
def removeLayer(self, layer):
"""
Remove ``layer`` from this glyph.
>>> glyph.removeLayer("background")
Layer can be a :ref:`type-glyph-layer` or a :ref:`type-string`
representing a layer name.
"""
if isinstance(layer, BaseGlyph):
layer = layer.layer.name
layerName = layer
layerName = normalizers.normalizeLayerName(layerName)
if self._getLayer(layerName).layer.name == layerName:
self._removeLayer(layerName)
|
python
|
{
"resource": ""
}
|
q20156
|
BaseAnchor._get_index
|
train
|
def _get_index(self):
"""
Get the anchor's index.
This must return an ``int``.
Subclasses may override this method.
"""
glyph = self.glyph
if glyph is None:
return None
return glyph.anchors.index(self)
|
python
|
{
"resource": ""
}
|
q20157
|
BaseLib.asDict
|
train
|
def asDict(self):
"""
Return the Lib as a ``dict``.
This is a backwards compatibility method.
"""
d = {}
for k, v in self.items():
d[k] = v
return d
|
python
|
{
"resource": ""
}
|
q20158
|
AutosummaryMethodList.get_items
|
train
|
def get_items(self, names):
"""
Subclass get items
to get support for all methods in an given object
"""
env = self.state.document.settings.env
prefixes = get_import_prefixes_from_env(env)
methodNames = []
for name in names:
methodNames.append(name)
_, obj, _, _ = import_by_name(name, prefixes=prefixes)
methodNames.extend(["%s.%s" % (name, method) for method in dir(obj) if not method.startswith("_")])
return super(AutosummaryMethodList, self).get_items(methodNames)
|
python
|
{
"resource": ""
}
|
q20159
|
BaseBPoint.round
|
train
|
def round(self):
"""
Round coordinates.
"""
x, y = self.anchor
self.anchor = (normalizers.normalizeRounding(x),
normalizers.normalizeRounding(y))
x, y = self.bcpIn
self.bcpIn = (normalizers.normalizeRounding(x),
normalizers.normalizeRounding(y))
x, y = self.bcpOut
self.bcpOut = (normalizers.normalizeRounding(x),
normalizers.normalizeRounding(y))
|
python
|
{
"resource": ""
}
|
q20160
|
_sortValue_isItalic
|
train
|
def _sortValue_isItalic(font):
"""
Returns 0 if the font is italic.
Returns 1 if the font is not italic.
"""
info = font.info
styleMapStyleName = info.styleMapStyleName
if styleMapStyleName is not None and "italic" in styleMapStyleName:
return 0
if info.italicAngle not in (None, 0):
return 0
return 1
|
python
|
{
"resource": ""
}
|
q20161
|
_sortValue_isMonospace
|
train
|
def _sortValue_isMonospace(font):
"""
Returns 0 if the font is monospace.
Returns 1 if the font is not monospace.
"""
if font.info.postscriptIsFixedPitch:
return 0
if not len(font):
return 1
testWidth = None
for glyph in font:
if testWidth is None:
testWidth = glyph.width
else:
if testWidth != glyph.width:
return 1
return 0
|
python
|
{
"resource": ""
}
|
q20162
|
BaseGuideline._get_index
|
train
|
def _get_index(self):
"""
Get the guideline's index.
This must return an ``int``.
Subclasses may override this method.
"""
glyph = self.glyph
if glyph is not None:
parent = glyph
else:
parent = self.font
if parent is None:
return None
return parent.guidelines.index(self)
|
python
|
{
"resource": ""
}
|
q20163
|
normalizeFileFormatVersion
|
train
|
def normalizeFileFormatVersion(value):
"""
Normalizes a font's file format version.
* **value** must be a :ref:`type-int`.
* Returned value will be a ``int``.
"""
if not isinstance(value, int):
raise TypeError("File format versions must be instances of "
":ref:`type-int`, not %s."
% type(value).__name__)
return value
|
python
|
{
"resource": ""
}
|
q20164
|
normalizeLayerOrder
|
train
|
def normalizeLayerOrder(value, font):
"""
Normalizes layer order.
** **value** must be a ``tuple`` or ``list``.
* **value** items must normalize as layer names with
:func:`normalizeLayerName`.
* **value** must contain layers that exist in **font**.
* **value** must not contain duplicate layers.
* Returned ``tuple`` will be unencoded ``unicode`` strings
for each layer name.
"""
if not isinstance(value, (tuple, list)):
raise TypeError("Layer order must be a list, not %s."
% type(value).__name__)
for v in value:
normalizeLayerName(v)
fontLayers = [layer.name for layer in font.layers]
for name in value:
if name not in fontLayers:
raise ValueError("Layer must exist in font. %s does not exist "
"in font.layers." % name)
duplicates = [v for v, count in Counter(value).items() if count > 1]
if len(duplicates) != 0:
raise ValueError("Duplicate layers are not allowed. Layer name(s) "
"'%s' are duplicate(s)." % ", ".join(duplicates))
return tuple([unicode(v) for v in value])
|
python
|
{
"resource": ""
}
|
q20165
|
normalizeDefaultLayerName
|
train
|
def normalizeDefaultLayerName(value, font):
"""
Normalizes default layer name.
* **value** must normalize as layer name with
:func:`normalizeLayerName`.
* **value** must be a layer in **font**.
* Returned value will be an unencoded ``unicode`` string.
"""
value = normalizeLayerName(value)
if value not in font.layerOrder:
raise ValueError("No layer with the name '%s' exists." % value)
return unicode(value)
|
python
|
{
"resource": ""
}
|
q20166
|
normalizeGlyphOrder
|
train
|
def normalizeGlyphOrder(value):
"""
Normalizes glyph order.
** **value** must be a ``tuple`` or ``list``.
* **value** items must normalize as glyph names with
:func:`normalizeGlyphName`.
* **value** must not repeat glyph names.
* Returned value will be a ``tuple`` of unencoded ``unicode`` strings.
"""
if not isinstance(value, (tuple, list)):
raise TypeError("Glyph order must be a list, not %s."
% type(value).__name__)
for v in value:
normalizeGlyphName(v)
duplicates = sorted(v for v, count in Counter(value).items() if count > 1)
if len(duplicates) != 0:
raise ValueError("Duplicate glyph names are not allowed. Glyph "
"name(s) '%s' are duplicate." % ", ".join(duplicates))
return tuple([unicode(v) for v in value])
|
python
|
{
"resource": ""
}
|
q20167
|
normalizeKerningKey
|
train
|
def normalizeKerningKey(value):
"""
Normalizes kerning key.
* **value** must be a ``tuple`` or ``list``.
* **value** must contain only two members.
* **value** items must be :ref:`type-string`.
* **value** items must be at least one character long.
* Returned value will be a two member ``tuple`` of unencoded
``unicode`` strings.
"""
if not isinstance(value, (tuple, list)):
raise TypeError("Kerning key must be a tuple instance, not %s."
% type(value).__name__)
if len(value) != 2:
raise ValueError("Kerning key must be a tuple containing two items, "
"not %d." % len(value))
for v in value:
if not isinstance(v, basestring):
raise TypeError("Kerning key items must be strings, not %s."
% type(v).__name__)
if len(v) < 1:
raise ValueError("Kerning key items must be one character long")
if value[0].startswith("public.") and not value[0].startswith(
"public.kern1."):
raise ValueError("Left Kerning key group must start with "
"public.kern1.")
if value[1].startswith("public.") and not value[1].startswith(
"public.kern2."):
raise ValueError("Right Kerning key group must start with "
"public.kern2.")
return tuple([unicode(v) for v in value])
|
python
|
{
"resource": ""
}
|
q20168
|
normalizeKerningValue
|
train
|
def normalizeKerningValue(value):
"""
Normalizes kerning value.
* **value** must be an :ref:`type-int-float`.
* Returned value is the same type as input value.
"""
if not isinstance(value, (int, float)):
raise TypeError("Kerning value must be a int or a float, not %s."
% type(value).__name__)
return value
|
python
|
{
"resource": ""
}
|
q20169
|
normalizeGroupValue
|
train
|
def normalizeGroupValue(value):
"""
Normalizes group value.
* **value** must be a ``list``.
* **value** items must normalize as glyph names with
:func:`normalizeGlyphName`.
* Returned value will be a ``tuple`` of unencoded ``unicode`` strings.
"""
if not isinstance(value, (tuple, list)):
raise TypeError("Group value must be a list, not %s."
% type(value).__name__)
value = [normalizeGlyphName(v) for v in value]
return tuple([unicode(v) for v in value])
|
python
|
{
"resource": ""
}
|
q20170
|
normalizeFeatureText
|
train
|
def normalizeFeatureText(value):
"""
Normalizes feature text.
* **value** must be a :ref:`type-string`.
* Returned value will be an unencoded ``unicode`` string.
"""
if not isinstance(value, basestring):
raise TypeError("Feature text must be a string, not %s."
% type(value).__name__)
return unicode(value)
|
python
|
{
"resource": ""
}
|
q20171
|
normalizeLibValue
|
train
|
def normalizeLibValue(value):
"""
Normalizes lib value.
* **value** must not be ``None``.
* Returned value is the same type as the input value.
"""
if value is None:
raise ValueError("Lib value must not be None.")
if isinstance(value, (list, tuple)):
for v in value:
normalizeLibValue(v)
elif isinstance(value, dict):
for k, v in value.items():
normalizeLibKey(k)
normalizeLibValue(v)
elif isinstance(value, basestring):
value = unicode(value)
return value
|
python
|
{
"resource": ""
}
|
q20172
|
normalizeGlyphUnicodes
|
train
|
def normalizeGlyphUnicodes(value):
"""
Normalizes glyph unicodes.
* **value** must be a ``list``.
* **value** items must normalize as glyph unicodes with
:func:`normalizeGlyphUnicode`.
* **value** must not repeat unicode values.
* Returned value will be a ``tuple`` of ints.
"""
if not isinstance(value, (tuple, list)):
raise TypeError("Glyph unicodes must be a list, not %s."
% type(value).__name__)
values = [normalizeGlyphUnicode(v) for v in value]
duplicates = [v for v, count in Counter(value).items() if count > 1]
if len(duplicates) != 0:
raise ValueError("Duplicate unicode values are not allowed.")
return tuple(values)
|
python
|
{
"resource": ""
}
|
q20173
|
normalizeGlyphUnicode
|
train
|
def normalizeGlyphUnicode(value):
"""
Normalizes glyph unicode.
* **value** must be an int or hex (represented as a string).
* **value** must be in a unicode range.
* Returned value will be an ``int``.
"""
if not isinstance(value, (int, basestring)) or isinstance(value, bool):
raise TypeError("Glyph unicode must be a int or hex string, not %s."
% type(value).__name__)
if isinstance(value, basestring):
try:
value = int(value, 16)
except ValueError:
raise ValueError("Glyph unicode hex must be a valid hex string.")
if value < 0 or value > 1114111:
raise ValueError("Glyph unicode must be in the Unicode range.")
return value
|
python
|
{
"resource": ""
}
|
q20174
|
normalizeGlyphWidth
|
train
|
def normalizeGlyphWidth(value):
"""
Normalizes glyph width.
* **value** must be a :ref:`type-int-float`.
* Returned value is the same type as the input value.
"""
if not isinstance(value, (int, float)):
raise TypeError("Glyph width must be an :ref:`type-int-float`, not %s."
% type(value).__name__)
return value
|
python
|
{
"resource": ""
}
|
q20175
|
normalizeGlyphLeftMargin
|
train
|
def normalizeGlyphLeftMargin(value):
"""
Normalizes glyph left margin.
* **value** must be a :ref:`type-int-float` or `None`.
* Returned value is the same type as the input value.
"""
if not isinstance(value, (int, float)) and value is not None:
raise TypeError("Glyph left margin must be an :ref:`type-int-float`, "
"not %s." % type(value).__name__)
return value
|
python
|
{
"resource": ""
}
|
q20176
|
normalizeGlyphRightMargin
|
train
|
def normalizeGlyphRightMargin(value):
"""
Normalizes glyph right margin.
* **value** must be a :ref:`type-int-float` or `None`.
* Returned value is the same type as the input value.
"""
if not isinstance(value, (int, float)) and value is not None:
raise TypeError("Glyph right margin must be an :ref:`type-int-float`, "
"not %s." % type(value).__name__)
return value
|
python
|
{
"resource": ""
}
|
q20177
|
normalizeGlyphHeight
|
train
|
def normalizeGlyphHeight(value):
"""
Normalizes glyph height.
* **value** must be a :ref:`type-int-float`.
* Returned value is the same type as the input value.
"""
if not isinstance(value, (int, float)):
raise TypeError("Glyph height must be an :ref:`type-int-float`, not "
"%s." % type(value).__name__)
return value
|
python
|
{
"resource": ""
}
|
q20178
|
normalizeGlyphBottomMargin
|
train
|
def normalizeGlyphBottomMargin(value):
"""
Normalizes glyph bottom margin.
* **value** must be a :ref:`type-int-float` or `None`.
* Returned value is the same type as the input value.
"""
if not isinstance(value, (int, float)) and value is not None:
raise TypeError("Glyph bottom margin must be an "
":ref:`type-int-float`, not %s."
% type(value).__name__)
return value
|
python
|
{
"resource": ""
}
|
q20179
|
normalizeGlyphTopMargin
|
train
|
def normalizeGlyphTopMargin(value):
"""
Normalizes glyph top margin.
* **value** must be a :ref:`type-int-float` or `None`.
* Returned value is the same type as the input value.
"""
if not isinstance(value, (int, float)) and value is not None:
raise TypeError("Glyph top margin must be an :ref:`type-int-float`, "
"not %s." % type(value).__name__)
return value
|
python
|
{
"resource": ""
}
|
q20180
|
normalizeGlyphFormatVersion
|
train
|
def normalizeGlyphFormatVersion(value):
"""
Normalizes glyph format version for saving to XML string.
* **value** must be a :ref:`type-int-float` of either 1 or 2.
* Returned value will be an int.
"""
if not isinstance(value, (int, float)):
raise TypeError("Glyph Format Version must be an "
":ref:`type-int-float`, not %s."
% type(value).__name__)
value = int(value)
if value not in (1, 2):
raise ValueError("Glyph Format Version must be either 1 or 2, not %s."
% value)
return value
|
python
|
{
"resource": ""
}
|
q20181
|
normalizePointType
|
train
|
def normalizePointType(value):
"""
Normalizes point type.
* **value** must be an string.
* **value** must be one of the following:
+----------+
| move |
+----------+
| line |
+----------+
| offcurve |
+----------+
| curve |
+----------+
| qcurve |
+----------+
* Returned value will be an unencoded ``unicode`` string.
"""
allowedTypes = ['move', 'line', 'offcurve', 'curve', 'qcurve']
if not isinstance(value, basestring):
raise TypeError("Point type must be a string, not %s."
% type(value).__name__)
if value not in allowedTypes:
raise ValueError("Point type must be '%s'; not %r."
% ("', '".join(allowedTypes), value))
return unicode(value)
|
python
|
{
"resource": ""
}
|
q20182
|
normalizePointName
|
train
|
def normalizePointName(value):
"""
Normalizes point name.
* **value** must be a :ref:`type-string`.
* **value** must be at least one character long.
* Returned value will be an unencoded ``unicode`` string.
"""
if not isinstance(value, basestring):
raise TypeError("Point names must be strings, not %s."
% type(value).__name__)
if len(value) < 1:
raise ValueError("Point names must be at least one character long.")
return unicode(value)
|
python
|
{
"resource": ""
}
|
q20183
|
normalizeBPointType
|
train
|
def normalizeBPointType(value):
"""
Normalizes bPoint type.
* **value** must be an string.
* **value** must be one of the following:
+--------+
| corner |
+--------+
| curve |
+--------+
* Returned value will be an unencoded ``unicode`` string.
"""
allowedTypes = ['corner', 'curve']
if not isinstance(value, basestring):
raise TypeError("bPoint type must be a string, not %s."
% type(value).__name__)
if value not in allowedTypes:
raise ValueError("bPoint type must be 'corner' or 'curve', not %r."
% value)
return unicode(value)
|
python
|
{
"resource": ""
}
|
q20184
|
normalizeInternalObjectType
|
train
|
def normalizeInternalObjectType(value, cls, name):
"""
Normalizes an internal object type.
* **value** must be a instance of **cls**.
* Returned value is the same type as the input value.
"""
if not isinstance(value, cls):
raise TypeError("%s must be a %s instance, not %s."
% (name, name, type(value).__name__))
return value
|
python
|
{
"resource": ""
}
|
q20185
|
normalizeBoolean
|
train
|
def normalizeBoolean(value):
"""
Normalizes a boolean.
* **value** must be an ``int`` with value of 0 or 1, or a ``bool``.
* Returned value will be a boolean.
"""
if isinstance(value, int) and value in (0, 1):
value = bool(value)
if not isinstance(value, bool):
raise ValueError("Boolean values must be True or False, not '%s'."
% value)
return value
|
python
|
{
"resource": ""
}
|
q20186
|
normalizeIndex
|
train
|
def normalizeIndex(value):
"""
Normalizes index.
* **value** must be an ``int`` or ``None``.
* Returned value is the same type as the input value.
"""
if value is not None:
if not isinstance(value, int):
raise TypeError("Indexes must be None or integers, not %s."
% type(value).__name__)
return value
|
python
|
{
"resource": ""
}
|
q20187
|
normalizeIdentifier
|
train
|
def normalizeIdentifier(value):
"""
Normalizes identifier.
* **value** must be an :ref:`type-string` or `None`.
* **value** must not be longer than 100 characters.
* **value** must not contain a character out the range of 0x20 - 0x7E.
* Returned value is an unencoded ``unicode`` string.
"""
if value is None:
return value
if not isinstance(value, basestring):
raise TypeError("Identifiers must be strings, not %s."
% type(value).__name__)
if len(value) == 0:
raise ValueError("The identifier string is empty.")
if len(value) > 100:
raise ValueError("The identifier string has a length (%d) greater "
"than the maximum allowed (100)." % len(value))
for c in value:
v = ord(c)
if v < 0x20 or v > 0x7E:
raise ValueError("The identifier string ('%s') contains a "
"character out size of the range 0x20 - 0x7E."
% value)
return unicode(value)
|
python
|
{
"resource": ""
}
|
q20188
|
normalizeX
|
train
|
def normalizeX(value):
"""
Normalizes x coordinate.
* **value** must be an :ref:`type-int-float`.
* Returned value is the same type as the input value.
"""
if not isinstance(value, (int, float)):
raise TypeError("X coordinates must be instances of "
":ref:`type-int-float`, not %s."
% type(value).__name__)
return value
|
python
|
{
"resource": ""
}
|
q20189
|
normalizeY
|
train
|
def normalizeY(value):
"""
Normalizes y coordinate.
* **value** must be an :ref:`type-int-float`.
* Returned value is the same type as the input value.
"""
if not isinstance(value, (int, float)):
raise TypeError("Y coordinates must be instances of "
":ref:`type-int-float`, not %s."
% type(value).__name__)
return value
|
python
|
{
"resource": ""
}
|
q20190
|
normalizeCoordinateTuple
|
train
|
def normalizeCoordinateTuple(value):
"""
Normalizes coordinate tuple.
* **value** must be a ``tuple`` or ``list``.
* **value** must have exactly two items.
* **value** items must be an :ref:`type-int-float`.
* Returned value is a ``tuple`` of two values of the same type as
the input values.
"""
if not isinstance(value, (tuple, list)):
raise TypeError("Coordinates must be tuple instances, not %s."
% type(value).__name__)
if len(value) != 2:
raise ValueError("Coordinates must be tuples containing two items, "
"not %d." % len(value))
x, y = value
x = normalizeX(x)
y = normalizeY(y)
return (x, y)
|
python
|
{
"resource": ""
}
|
q20191
|
normalizeArea
|
train
|
def normalizeArea(value):
"""
Normalizes area.
* **value** must be a positive :ref:`type-int-float`.
"""
if not isinstance(value, (int, float)):
raise TypeError("Area must be an instance of :ref:`type-int-float`, "
"not %s." % type(value).__name__)
if value < 0:
raise ValueError("Area must be a positive :ref:`type-int-float`, "
"not %s." % repr(value))
return float(value)
|
python
|
{
"resource": ""
}
|
q20192
|
normalizeRotationAngle
|
train
|
def normalizeRotationAngle(value):
"""
Normalizes an angle.
* Value must be a :ref:`type-int-float`.
* Value must be between -360 and 360.
* If the value is negative, it is normalized by adding it to 360
* Returned value is a ``float`` between 0 and 360.
"""
if not isinstance(value, (int, float)):
raise TypeError("Angle must be instances of "
":ref:`type-int-float`, not %s."
% type(value).__name__)
if abs(value) > 360:
raise ValueError("Angle must be between -360 and 360.")
if value < 0:
value = value + 360
return float(value)
|
python
|
{
"resource": ""
}
|
q20193
|
normalizeGlyphNote
|
train
|
def normalizeGlyphNote(value):
"""
Normalizes Glyph Note.
* **value** must be a :ref:`type-string`.
* Returned value is an unencoded ``unicode`` string
"""
if not isinstance(value, basestring):
raise TypeError("Note must be a string, not %s."
% type(value).__name__)
return unicode(value)
|
python
|
{
"resource": ""
}
|
q20194
|
normalizeFilePath
|
train
|
def normalizeFilePath(value):
"""
Normalizes file path.
* **value** must be a :ref:`type-string`.
* Returned value is an unencoded ``unicode`` string
"""
if not isinstance(value, basestring):
raise TypeError("File paths must be strings, not %s."
% type(value).__name__)
return unicode(value)
|
python
|
{
"resource": ""
}
|
q20195
|
normalizeInterpolationFactor
|
train
|
def normalizeInterpolationFactor(value):
"""
Normalizes interpolation factor.
* **value** must be an :ref:`type-int-float`, ``tuple`` or ``list``.
* If **value** is a ``tuple`` or ``list``, it must have exactly two items.
These items must be instances of :ref:`type-int-float`.
* Returned value is a ``tuple`` of two ``float``.
"""
if not isinstance(value, (int, float, list, tuple)):
raise TypeError("Interpolation factor must be an int, float, or tuple "
"instances, not %s." % type(value).__name__)
if isinstance(value, (int, float)):
value = (float(value), float(value))
else:
if not len(value) == 2:
raise ValueError("Interpolation factor tuple must contain two "
"values, not %d." % len(value))
for v in value:
if not isinstance(v, (int, float)):
raise TypeError("Interpolation factor tuple values must be an "
":ref:`type-int-float`, not %s."
% type(value).__name__)
value = tuple([float(v) for v in value])
return value
|
python
|
{
"resource": ""
}
|
q20196
|
normalizeRounding
|
train
|
def normalizeRounding(value):
"""
Normalizes rounding.
Python 2 and Python 3 handing the rounding of halves (0.5, 1.5, etc)
differently. This normalizes rounding to be the same (Python 3 style)
in both environments.
* **value** must be an :ref:`type-int-float`
* Returned value is a ``int``
"""
if not isinstance(value, (int, float)):
raise TypeError("Value to round must be an int or float, not %s."
% type(value).__name__)
return round3(value)
|
python
|
{
"resource": ""
}
|
q20197
|
AskString
|
train
|
def AskString(message, value='', title='FontParts'):
"""
An ask a string dialog, a `message` is required.
Optionally a `value` and `title` can be provided.
::
from fontParts.ui import AskString
print(AskString("who are you?"))
"""
return dispatcher["AskString"](message=message, value=value, title=title)
|
python
|
{
"resource": ""
}
|
q20198
|
AskYesNoCancel
|
train
|
def AskYesNoCancel(message, title='FontParts', default=0, informativeText=""):
"""
An ask yes, no or cancel dialog, a `message` is required.
Optionally a `title`, `default` and `informativeText` can be provided.
The `default` option is to indicate which button is the default button.
::
from fontParts.ui import AskYesNoCancel
print(AskYesNoCancel("who are you?"))
"""
return dispatcher["AskYesNoCancel"](message=message, title=title,
default=default, informativeText=informativeText)
|
python
|
{
"resource": ""
}
|
q20199
|
FindGlyph
|
train
|
def FindGlyph(aFont, message="Search for a glyph:", title='FontParts'):
"""
A dialog to search a glyph for a provided font.
Optionally a `message`, `title` and `allFonts` can be provided.
from fontParts.ui import FindGlyph
from fontParts.world import CurrentFont
glyph = FindGlyph(CurrentFont())
print(glyph)
"""
return dispatcher["FindGlyph"](aFont=aFont, message=message, title=title)
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.