sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def from_file(filename): """ load an nparray object from a json filename @parameter str filename: path to the file """ f = open(filename, 'r') j = json.load(f) f.close() return from_dict(j)
load an nparray object from a json filename @parameter str filename: path to the file
entailment
def monkeypatch(): """ monkeypath built-in numpy functions to call those provided by nparray instead. """ np.array = array np.arange = arange np.linspace = linspace np.logspace = logspace np.geomspace = geomspace np.full = full np.full_like = full_like np.zeros = zeros np.zeros_like = zeros_like np.ones = ones np.ones_like = ones_like np.eye = eye
monkeypath built-in numpy functions to call those provided by nparray instead.
entailment
def init_passbands(refresh=False): """ This function should be called only once, at import time. It traverses the passbands directory and builds a lookup table of passband names qualified as 'pbset:pbname' and corresponding files and atmosphere content within. """ global _initialized if not _initialized or refresh: # load information from online passbands first so that any that are # available locally will override online_passbands = list_online_passbands(full_dict=True, refresh=refresh) for pb, info in online_passbands.items(): _pbtable[pb] = {'fname': None, 'atms': info['atms'], 'pb': None} # load global passbands (in install directory) next and then local # (in .phoebe directory) second so that local passbands override # global passbands whenever there is a name conflict for path in [_pbdir_global, _pbdir_local]: for f in os.listdir(path): if f=='README': continue init_passband(path+f) #Check if _pbdir_env has been set and load those passbands too if not _pbdir_env == None: for path in [_pbdir_env]: for f in os.listdir(path): if f=='README': continue init_passband(path+f) _initialized = True
This function should be called only once, at import time. It traverses the passbands directory and builds a lookup table of passband names qualified as 'pbset:pbname' and corresponding files and atmosphere content within.
entailment
def install_passband(fname, local=True): """ Install a passband from a local file. This simply copies the file into the install path - but beware that clearing the installation will clear the passband as well If local=False, you must have permissions to access the installation directory """ pbdir = _pbdir_local if local else _pbdir_global shutil.copy(fname, pbdir) init_passband(os.path.join(pbdir, fname))
Install a passband from a local file. This simply copies the file into the install path - but beware that clearing the installation will clear the passband as well If local=False, you must have permissions to access the installation directory
entailment
def uninstall_all_passbands(local=True): """ Uninstall all passbands, either globally or locally (need to call twice to delete ALL passbands) If local=False, you must have permission to access the installation directory """ pbdir = _pbdir_local if local else _pbdir_global for f in os.listdir(pbdir): pbpath = os.path.join(pbdir, f) logger.warning("deleting file: {}".format(pbpath)) os.remove(pbpath)
Uninstall all passbands, either globally or locally (need to call twice to delete ALL passbands) If local=False, you must have permission to access the installation directory
entailment
def download_passband(passband, local=True): """ Download and install a given passband from the repository. If local=False, you must have permission to access the installation directory """ if passband not in list_online_passbands(): raise ValueError("passband '{}' not available".format(passband)) pbdir = _pbdir_local if local else _pbdir_global passband_fname = _online_passbands[passband]['fname'] passband_fname_local = os.path.join(pbdir, passband_fname) url = 'http://github.com/phoebe-project/phoebe2-tables/raw/master/passbands/{}'.format(passband_fname) logger.info("downloading from {} and installing to {}...".format(url, passband_fname_local)) try: urllib.urlretrieve(url, passband_fname_local) except IOError: raise IOError("unable to download {} passband - check connection".format(passband)) else: init_passband(passband_fname_local)
Download and install a given passband from the repository. If local=False, you must have permission to access the installation directory
entailment
def Inorm_bol_bb(Teff=5772., logg=4.43, abun=0.0, atm='blackbody', photon_weighted=False): """ @Teff: value or array of effective temperatures @logg: surface gravity; not used, for class compatibility only @abun: abundances; not used, for class compatibility only @atm: atmosphere model, must be blackbody, otherwise exception is raised @photon_weighted: intensity weighting scheme; must be False, otherwise exception is raised Computes normal bolometric intensity using the Stefan-Boltzmann law, Inorm_bol_bb = 1/\pi \sigma T^4. If photon-weighted intensity is requested, Inorm_bol_bb is multiplied by a conversion factor that comes from integrating lambda/hc P(lambda) over all lambda. Input parameters mimick the Passband class Inorm method for calling convenience. """ if atm != 'blackbody': raise ValueError('atmosphere must be set to blackbody for Inorm_bol_bb.') if photon_weighted: factor = 2.6814126821264836e22/Teff else: factor = 1.0 # convert scalars to vectors if necessary: if not hasattr(Teff, '__iter__'): Teff = np.array((Teff,)) return factor * sigma_sb.value * Teff**4 / np.pi
@Teff: value or array of effective temperatures @logg: surface gravity; not used, for class compatibility only @abun: abundances; not used, for class compatibility only @atm: atmosphere model, must be blackbody, otherwise exception is raised @photon_weighted: intensity weighting scheme; must be False, otherwise exception is raised Computes normal bolometric intensity using the Stefan-Boltzmann law, Inorm_bol_bb = 1/\pi \sigma T^4. If photon-weighted intensity is requested, Inorm_bol_bb is multiplied by a conversion factor that comes from integrating lambda/hc P(lambda) over all lambda. Input parameters mimick the Passband class Inorm method for calling convenience.
entailment
def _planck(self, lam, Teff): """ Computes monochromatic blackbody intensity in W/m^3 using the Planck function. @lam: wavelength in m @Teff: effective temperature in K Returns: monochromatic blackbody intensity """ return 2*self.h*self.c*self.c/lam**5 * 1./(np.exp(self.h*self.c/lam/self.k/Teff)-1)
Computes monochromatic blackbody intensity in W/m^3 using the Planck function. @lam: wavelength in m @Teff: effective temperature in K Returns: monochromatic blackbody intensity
entailment
def _planck_deriv(self, lam, Teff): """ Computes the derivative of the monochromatic blackbody intensity using the Planck function. @lam: wavelength in m @Teff: effective temperature in K Returns: the derivative of monochromatic blackbody intensity """ expterm = np.exp(self.h*self.c/lam/self.k/Teff) return 2*self.h*self.c*self.c/self.k/Teff/lam**7 * (expterm-1)**-2 * (self.h*self.c*expterm-5*lam*self.k*Teff*(expterm-1))
Computes the derivative of the monochromatic blackbody intensity using the Planck function. @lam: wavelength in m @Teff: effective temperature in K Returns: the derivative of monochromatic blackbody intensity
entailment
def _planck_spi(self, lam, Teff): """ Computes the spectral index of the monochromatic blackbody intensity using the Planck function. The spectral index is defined as: B(lambda) = 5 + d(log I)/d(log lambda), where I is the Planck function. @lam: wavelength in m @Teff: effective temperature in K Returns: the spectral index of monochromatic blackbody intensity """ hclkt = self.h*self.c/lam/self.k/Teff expterm = np.exp(hclkt) return hclkt * expterm/(expterm-1)
Computes the spectral index of the monochromatic blackbody intensity using the Planck function. The spectral index is defined as: B(lambda) = 5 + d(log I)/d(log lambda), where I is the Planck function. @lam: wavelength in m @Teff: effective temperature in K Returns: the spectral index of monochromatic blackbody intensity
entailment
def _bb_intensity(self, Teff, photon_weighted=False): """ Computes mean passband intensity using blackbody atmosphere: I_pb^E = \int_\lambda I(\lambda) P(\lambda) d\lambda / \int_\lambda P(\lambda) d\lambda I_pb^P = \int_\lambda \lambda I(\lambda) P(\lambda) d\lambda / \int_\lambda \lambda P(\lambda) d\lambda Superscripts E and P stand for energy and photon, respectively. @Teff: effective temperature in K @photon_weighted: photon/energy switch Returns: mean passband intensity using blackbody atmosphere. """ if photon_weighted: pb = lambda w: w*self._planck(w, Teff)*self.ptf(w) return integrate.quad(pb, self.wl[0], self.wl[-1])[0]/self.ptf_photon_area else: pb = lambda w: self._planck(w, Teff)*self.ptf(w) return integrate.quad(pb, self.wl[0], self.wl[-1])[0]/self.ptf_area
Computes mean passband intensity using blackbody atmosphere: I_pb^E = \int_\lambda I(\lambda) P(\lambda) d\lambda / \int_\lambda P(\lambda) d\lambda I_pb^P = \int_\lambda \lambda I(\lambda) P(\lambda) d\lambda / \int_\lambda \lambda P(\lambda) d\lambda Superscripts E and P stand for energy and photon, respectively. @Teff: effective temperature in K @photon_weighted: photon/energy switch Returns: mean passband intensity using blackbody atmosphere.
entailment
def _bindex_blackbody(self, Teff, photon_weighted=False): """ Computes the mean boosting index using blackbody atmosphere: B_pb^E = \int_\lambda I(\lambda) P(\lambda) B(\lambda) d\lambda / \int_\lambda I(\lambda) P(\lambda) d\lambda B_pb^P = \int_\lambda \lambda I(\lambda) P(\lambda) B(\lambda) d\lambda / \int_\lambda \lambda I(\lambda) P(\lambda) d\lambda Superscripts E and P stand for energy and photon, respectively. @Teff: effective temperature in K @photon_weighted: photon/energy switch Returns: mean boosting index using blackbody atmosphere. """ if photon_weighted: num = lambda w: w*self._planck(w, Teff)*self.ptf(w)*self._planck_spi(w, Teff) denom = lambda w: w*self._planck(w, Teff)*self.ptf(w) return integrate.quad(num, self.wl[0], self.wl[-1], epsabs=1e10, epsrel=1e-8)[0]/integrate.quad(denom, self.wl[0], self.wl[-1], epsabs=1e10, epsrel=1e-6)[0] else: num = lambda w: self._planck(w, Teff)*self.ptf(w)*self._planck_spi(w, Teff) denom = lambda w: self._planck(w, Teff)*self.ptf(w) return integrate.quad(num, self.wl[0], self.wl[-1], epsabs=1e10, epsrel=1e-8)[0]/integrate.quad(denom, self.wl[0], self.wl[-1], epsabs=1e10, epsrel=1e-6)[0]
Computes the mean boosting index using blackbody atmosphere: B_pb^E = \int_\lambda I(\lambda) P(\lambda) B(\lambda) d\lambda / \int_\lambda I(\lambda) P(\lambda) d\lambda B_pb^P = \int_\lambda \lambda I(\lambda) P(\lambda) B(\lambda) d\lambda / \int_\lambda \lambda I(\lambda) P(\lambda) d\lambda Superscripts E and P stand for energy and photon, respectively. @Teff: effective temperature in K @photon_weighted: photon/energy switch Returns: mean boosting index using blackbody atmosphere.
entailment
def compute_blackbody_response(self, Teffs=None): """ Computes blackbody intensities across the entire range of effective temperatures. It does this for two regimes, energy-weighted and photon-weighted. It then fits a cubic spline to the log(I)-Teff values and exports the interpolation functions _log10_Inorm_bb_energy and _log10_Inorm_bb_photon. @Teffs: an array of effective temperatures. If None, a default array from ~300K to ~500000K with 97 steps is used. The default array is uniform in log10 scale. Returns: n/a """ if Teffs is None: log10Teffs = np.linspace(2.5, 5.7, 97) # this corresponds to the 316K-501187K range. Teffs = 10**log10Teffs # Energy-weighted intensities: log10ints_energy = np.array([np.log10(self._bb_intensity(Teff, photon_weighted=False)) for Teff in Teffs]) self._bb_func_energy = interpolate.splrep(Teffs, log10ints_energy, s=0) self._log10_Inorm_bb_energy = lambda Teff: interpolate.splev(Teff, self._bb_func_energy) # Photon-weighted intensities: log10ints_photon = np.array([np.log10(self._bb_intensity(Teff, photon_weighted=True)) for Teff in Teffs]) self._bb_func_photon = interpolate.splrep(Teffs, log10ints_photon, s=0) self._log10_Inorm_bb_photon = lambda Teff: interpolate.splev(Teff, self._bb_func_photon) self.content.append('blackbody') self.atmlist.append('blackbody')
Computes blackbody intensities across the entire range of effective temperatures. It does this for two regimes, energy-weighted and photon-weighted. It then fits a cubic spline to the log(I)-Teff values and exports the interpolation functions _log10_Inorm_bb_energy and _log10_Inorm_bb_photon. @Teffs: an array of effective temperatures. If None, a default array from ~300K to ~500000K with 97 steps is used. The default array is uniform in log10 scale. Returns: n/a
entailment
def compute_ck2004_response(self, path, verbose=False): """ Computes Castelli & Kurucz (2004) intensities across the entire range of model atmospheres. @path: path to the directory containing ck2004 SEDs @verbose: switch to determine whether computing progress should be printed on screen Returns: n/a """ models = glob.glob(path+'/*M1.000*') Nmodels = len(models) # Store the length of the filename extensions for parsing: offset = len(models[0])-models[0].rfind('.') Teff, logg, abun = np.empty(Nmodels), np.empty(Nmodels), np.empty(Nmodels) InormE, InormP = np.empty(Nmodels), np.empty(Nmodels) if verbose: print('Computing Castelli & Kurucz (2004) passband intensities for %s:%s. This will take a while.' % (self.pbset, self.pbname)) for i, model in enumerate(models): #~ spc = np.loadtxt(model).T -- waaay slower spc = np.fromfile(model, sep=' ').reshape(-1,2).T Teff[i] = float(model[-17-offset:-12-offset]) logg[i] = float(model[-11-offset:-9-offset])/10 sign = 1. if model[-9-offset]=='P' else -1. abun[i] = sign*float(model[-8-offset:-6-offset])/10 spc[0] /= 1e10 # AA -> m spc[1] *= 1e7 # erg/s/cm^2/A -> W/m^3 wl = spc[0][(spc[0] >= self.ptf_table['wl'][0]) & (spc[0] <= self.ptf_table['wl'][-1])] fl = spc[1][(spc[0] >= self.ptf_table['wl'][0]) & (spc[0] <= self.ptf_table['wl'][-1])] fl *= self.ptf(wl) flP = fl*wl InormE[i] = np.log10(fl.sum()/self.ptf_area*(wl[1]-wl[0])) # energy-weighted intensity InormP[i] = np.log10(flP.sum()/self.ptf_photon_area*(wl[1]-wl[0])) # photon-weighted intensity if verbose: if 100*i % (len(models)) == 0: print('%d%% done.' % (100*i/(len(models)-1))) # Store axes (Teff, logg, abun) and the full grid of Inorm, with # nans where the grid isn't complete. self._ck2004_axes = (np.unique(Teff), np.unique(logg), np.unique(abun)) self._ck2004_energy_grid = np.nan*np.ones((len(self._ck2004_axes[0]), len(self._ck2004_axes[1]), len(self._ck2004_axes[2]), 1)) self._ck2004_photon_grid = np.nan*np.ones((len(self._ck2004_axes[0]), len(self._ck2004_axes[1]), len(self._ck2004_axes[2]), 1)) for i, I0 in enumerate(InormE): self._ck2004_energy_grid[Teff[i] == self._ck2004_axes[0], logg[i] == self._ck2004_axes[1], abun[i] == self._ck2004_axes[2], 0] = I0 for i, I0 in enumerate(InormP): self._ck2004_photon_grid[Teff[i] == self._ck2004_axes[0], logg[i] == self._ck2004_axes[1], abun[i] == self._ck2004_axes[2], 0] = I0 # Tried radial basis functions but they were just terrible. #~ self._log10_Inorm_ck2004 = interpolate.Rbf(self._ck2004_Teff, self._ck2004_logg, self._ck2004_met, self._ck2004_Inorm, function='linear') self.content.append('ck2004') self.atmlist.append('ck2004')
Computes Castelli & Kurucz (2004) intensities across the entire range of model atmospheres. @path: path to the directory containing ck2004 SEDs @verbose: switch to determine whether computing progress should be printed on screen Returns: n/a
entailment
def compute_ck2004_intensities(self, path, particular=None, verbose=False): """ Computes direction-dependent passband intensities using Castelli & Kurucz (2004) model atmospheres. @path: path to the directory with SEDs @particular: particular file in @path to be processed; if None, all files in the directory are processed. @verbose: set to True to display progress in the terminal. """ models = os.listdir(path) if particular != None: models = [particular] Nmodels = len(models) # Store the length of the filename extensions for parsing: offset = len(models[0])-models[0].rfind('.') Teff, logg, abun, mu = np.empty(Nmodels), np.empty(Nmodels), np.empty(Nmodels), np.empty(Nmodels) ImuE, ImuP = np.empty(Nmodels), np.empty(Nmodels) boostingE, boostingP = np.empty(Nmodels), np.empty(Nmodels) if verbose: print('Computing Castelli-Kurucz intensities for %s:%s. This will take a long while.' % (self.pbset, self.pbname)) for i, model in enumerate(models): #spc = np.loadtxt(path+'/'+model).T -- waaay slower spc = np.fromfile(path+'/'+model, sep=' ').reshape(-1,2).T spc[0] /= 1e10 # AA -> m spc[1] *= 1e7 # erg/s/cm^2/A -> W/m^3 Teff[i] = float(model[-17-offset:-12-offset]) logg[i] = float(model[-11-offset:-9-offset])/10 sign = 1. if model[-9-offset]=='P' else -1. abun[i] = sign*float(model[-8-offset:-6-offset])/10 mu[i] = float(model[-5-offset:-offset]) # trim the spectrum at passband limits: keep = (spc[0] >= self.ptf_table['wl'][0]) & (spc[0] <= self.ptf_table['wl'][-1]) wl = spc[0][keep] fl = spc[1][keep] # make a log-scale copy for boosting and fit a Legendre # polynomial to the Imu envelope by way of sigma clipping; # then compute a Legendre series derivative to get the # boosting index; we only take positive fluxes to keep the # log well defined. lnwl = np.log(wl[fl > 0]) lnfl = np.log(fl[fl > 0]) + 5*lnwl # First Legendre fit to the data: envelope = np.polynomial.legendre.legfit(lnwl, lnfl, 5) continuum = np.polynomial.legendre.legval(lnwl, envelope) diff = lnfl-continuum sigma = np.std(diff) clipped = (diff > -sigma) # Sigma clip to get the continuum: while True: Npts = clipped.sum() envelope = np.polynomial.legendre.legfit(lnwl[clipped], lnfl[clipped], 5) continuum = np.polynomial.legendre.legval(lnwl, envelope) diff = lnfl-continuum # clipping will sometimes unclip already clipped points # because the fit is slightly different, which can lead # to infinite loops. To prevent that, we never allow # clipped points to be resurrected, which is achieved # by the following bitwise condition (array comparison): clipped = clipped & (diff > -sigma) if clipped.sum() == Npts: break derivative = np.polynomial.legendre.legder(envelope, 1) boosting_index = np.polynomial.legendre.legval(lnwl, derivative) # calculate energy (E) and photon (P) weighted fluxes and # their integrals. flE = self.ptf(wl)*fl flP = wl*flE flEint = flE.sum() flPint = flP.sum() # calculate mean boosting coefficient and use it to get # boosting factors for energy (E) and photon (P) weighted # fluxes. boostE = (flE[fl > 0]*boosting_index).sum()/flEint boostP = (flP[fl > 0]*boosting_index).sum()/flPint boostingE[i] = boostE boostingP[i] = boostP ImuE[i] = np.log10(flEint/self.ptf_area*(wl[1]-wl[0])) # energy-weighted intensity ImuP[i] = np.log10(flPint/self.ptf_photon_area*(wl[1]-wl[0])) # photon-weighted intensity if verbose: if 100*i % (len(models)) == 0: print('%d%% done.' % (100*i/(len(models)-1))) # Store axes (Teff, logg, abun, mu) and the full grid of Imu, # with nans where the grid isn't complete. Imu-s come in two # flavors: energy-weighted intensities and photon-weighted # intensities, based on the detector used. self._ck2004_intensity_axes = (np.unique(Teff), np.unique(logg), np.unique(abun), np.append(np.array(0.0,), np.unique(mu))) self._ck2004_Imu_energy_grid = np.nan*np.ones((len(self._ck2004_intensity_axes[0]), len(self._ck2004_intensity_axes[1]), len(self._ck2004_intensity_axes[2]), len(self._ck2004_intensity_axes[3]), 1)) self._ck2004_Imu_photon_grid = np.nan*np.ones((len(self._ck2004_intensity_axes[0]), len(self._ck2004_intensity_axes[1]), len(self._ck2004_intensity_axes[2]), len(self._ck2004_intensity_axes[3]), 1)) self._ck2004_boosting_energy_grid = np.nan*np.ones((len(self._ck2004_intensity_axes[0]), len(self._ck2004_intensity_axes[1]), len(self._ck2004_intensity_axes[2]), len(self._ck2004_intensity_axes[3]), 1)) self._ck2004_boosting_photon_grid = np.nan*np.ones((len(self._ck2004_intensity_axes[0]), len(self._ck2004_intensity_axes[1]), len(self._ck2004_intensity_axes[2]), len(self._ck2004_intensity_axes[3]), 1)) # Set the limb (mu=0) to 0; in log this actually means # flux=1W/m2, but for all practical purposes that is still 0. self._ck2004_Imu_energy_grid[:,:,:,0,:] = 0.0 self._ck2004_Imu_photon_grid[:,:,:,0,:] = 0.0 self._ck2004_boosting_energy_grid[:,:,:,0,:] = 0.0 self._ck2004_boosting_photon_grid[:,:,:,0,:] = 0.0 for i, Imu in enumerate(ImuE): self._ck2004_Imu_energy_grid[Teff[i] == self._ck2004_intensity_axes[0], logg[i] == self._ck2004_intensity_axes[1], abun[i] == self._ck2004_intensity_axes[2], mu[i] == self._ck2004_intensity_axes[3], 0] = Imu for i, Imu in enumerate(ImuP): self._ck2004_Imu_photon_grid[Teff[i] == self._ck2004_intensity_axes[0], logg[i] == self._ck2004_intensity_axes[1], abun[i] == self._ck2004_intensity_axes[2], mu[i] == self._ck2004_intensity_axes[3], 0] = Imu for i, Bavg in enumerate(boostingE): self._ck2004_boosting_energy_grid[Teff[i] == self._ck2004_intensity_axes[0], logg[i] == self._ck2004_intensity_axes[1], abun[i] == self._ck2004_intensity_axes[2], mu[i] == self._ck2004_intensity_axes[3], 0] = Bavg for i, Bavg in enumerate(boostingP): self._ck2004_boosting_photon_grid[Teff[i] == self._ck2004_intensity_axes[0], logg[i] == self._ck2004_intensity_axes[1], abun[i] == self._ck2004_intensity_axes[2], mu[i] == self._ck2004_intensity_axes[3], 0] = Bavg self.content.append('ck2004_all')
Computes direction-dependent passband intensities using Castelli & Kurucz (2004) model atmospheres. @path: path to the directory with SEDs @particular: particular file in @path to be processed; if None, all files in the directory are processed. @verbose: set to True to display progress in the terminal.
entailment
def export_legacy_ldcoeffs(self, models, filename=None, photon_weighted=True): """ @models: the path (including the filename) of legacy's models.list @filename: output filename for storing the table Exports CK2004 limb darkening coefficients to a PHOEBE legacy compatible format. """ if photon_weighted: grid = self._ck2004_ld_photon_grid else: grid = self._ck2004_ld_energy_grid if filename is not None: import time f = open(filename, 'w') f.write('# PASS_SET %s\n' % self.pbset) f.write('# PASSBAND %s\n' % self.pbname) f.write('# VERSION 1.0\n\n') f.write('# Exported from PHOEBE-2 passband on %s\n' % (time.ctime())) f.write('# The coefficients are computed for the %s-weighted regime.\n\n' % ('photon' if photon_weighted else 'energy')) mods = np.loadtxt(models) for mod in mods: Tindex = np.argwhere(self._ck2004_intensity_axes[0] == mod[0])[0][0] lindex = np.argwhere(self._ck2004_intensity_axes[1] == mod[1]/10)[0][0] mindex = np.argwhere(self._ck2004_intensity_axes[2] == mod[2]/10)[0][0] if filename is None: print('%6.3f '*11 % tuple(grid[Tindex, lindex, mindex].tolist())) else: f.write(('%6.3f '*11+'\n') % tuple(self._ck2004_ld_photon_grid[Tindex, lindex, mindex].tolist())) if filename is not None: f.close()
@models: the path (including the filename) of legacy's models.list @filename: output filename for storing the table Exports CK2004 limb darkening coefficients to a PHOEBE legacy compatible format.
entailment
def compute_ck2004_ldints(self): """ Computes integrated limb darkening profiles for ck2004 atmospheres. These are used for intensity-to-flux transformations. The evaluated integral is: ldint = 2 \pi \int_0^1 Imu mu dmu """ if 'ck2004_all' not in self.content: print('Castelli & Kurucz (2004) intensities are not computed yet. Please compute those first.') return None ldaxes = self._ck2004_intensity_axes ldtable = self._ck2004_Imu_energy_grid pldtable = self._ck2004_Imu_photon_grid self._ck2004_ldint_energy_grid = np.nan*np.ones((len(ldaxes[0]), len(ldaxes[1]), len(ldaxes[2]), 1)) self._ck2004_ldint_photon_grid = np.nan*np.ones((len(ldaxes[0]), len(ldaxes[1]), len(ldaxes[2]), 1)) mu = ldaxes[3] Imu = 10**ldtable[:,:,:,:]/10**ldtable[:,:,:,-1:] pImu = 10**pldtable[:,:,:,:]/10**pldtable[:,:,:,-1:] # To compute the fluxes, we need to evaluate \int_0^1 2pi Imu mu dmu. for a in range(len(ldaxes[0])): for b in range(len(ldaxes[1])): for c in range(len(ldaxes[2])): ldint = 0.0 pldint = 0.0 for i in range(len(mu)-1): ki = (Imu[a,b,c,i+1]-Imu[a,b,c,i])/(mu[i+1]-mu[i]) ni = Imu[a,b,c,i]-ki*mu[i] ldint += ki/3*(mu[i+1]**3-mu[i]**3) + ni/2*(mu[i+1]**2-mu[i]**2) pki = (pImu[a,b,c,i+1]-pImu[a,b,c,i])/(mu[i+1]-mu[i]) pni = pImu[a,b,c,i]-pki*mu[i] pldint += pki/3*(mu[i+1]**3-mu[i]**3) + pni/2*(mu[i+1]**2-mu[i]**2) self._ck2004_ldint_energy_grid[a,b,c] = 2*ldint self._ck2004_ldint_photon_grid[a,b,c] = 2*pldint self.content.append('ck2004_ldint')
Computes integrated limb darkening profiles for ck2004 atmospheres. These are used for intensity-to-flux transformations. The evaluated integral is: ldint = 2 \pi \int_0^1 Imu mu dmu
entailment
def interpolate_ck2004_ldcoeffs(self, Teff=5772., logg=4.43, abun=0.0, atm='ck2004', ld_func='power', photon_weighted=False): """ Interpolate the passband-stored table of LD model coefficients. """ if 'ck2004_ld' not in self.content: print('Castelli & Kurucz (2004) limb darkening coefficients are not computed yet. Please compute those first.') return None if photon_weighted: table = self._ck2004_ld_photon_grid else: table = self._ck2004_ld_energy_grid if not hasattr(Teff, '__iter__'): req = np.array(((Teff, logg, abun),)) ld_coeffs = libphoebe.interp(req, self._ck2004_intensity_axes[0:3], table)[0] else: req = np.vstack((Teff, logg, abun)).T ld_coeffs = libphoebe.interp(req, self._ck2004_intensity_axes[0:3], table).T if ld_func == 'linear': return ld_coeffs[0:1] elif ld_func == 'logarithmic': return ld_coeffs[1:3] elif ld_func == 'square_root': return ld_coeffs[3:5] elif ld_func == 'quadratic': return ld_coeffs[5:7] elif ld_func == 'power': return ld_coeffs[7:11] elif ld_func == 'all': return ld_coeffs else: print('ld_func=%s is invalid; please choose from [linear, logarithmic, square_root, quadratic, power, all].') return None
Interpolate the passband-stored table of LD model coefficients.
entailment
def import_wd_atmcof(self, plfile, atmfile, wdidx, Nabun=19, Nlogg=11, Npb=25, Nints=4): """ Parses WD's atmcof and reads in all Legendre polynomials for the given passband. @plfile: path and filename of atmcofplanck.dat @atmfile: path and filename of atmcof.dat @wdidx: WD index of the passed passband. This can be automated but it's not a high priority. @Nabun: number of metallicity nodes in atmcof.dat. For the 2003 version the number of nodes is 19. @Nlogg: number of logg nodes in atmcof.dat. For the 2003 version the number of nodes is 11. @Npb: number of passbands in atmcof.dat. For the 2003 version the number of passbands is 25. @Nints: number of temperature intervals (input lines) per entry. For the 2003 version the number of lines is 4. """ # Initialize the external atmcof module if necessary: # PERHAPS WD_DATA SHOULD BE GLOBAL?? self.wd_data = libphoebe.wd_readdata(plfile, atmfile) # That is all that was necessary for *_extern_planckint() and # *_extern_atmx() functions. However, we also want to support # circumventing WD subroutines and use WD tables directly. For # that, we need to do a bit more work. # Store the passband index for use in planckint() and atmx(): self.extern_wd_idx = wdidx # Break up the table along axes and extract a single passband data: atmtab = np.reshape(self.wd_data['atm_table'], (Nabun, Npb, Nlogg, Nints, -1)) atmtab = atmtab[:, wdidx, :, :, :] # Finally, reverse the metallicity axis because it is sorted in # reverse order in atmcof: self.extern_wd_atmx = atmtab[::-1, :, :, :] self.content += ['extern_planckint', 'extern_atmx'] self.atmlist += ['extern_planckint', 'extern_atmx']
Parses WD's atmcof and reads in all Legendre polynomials for the given passband. @plfile: path and filename of atmcofplanck.dat @atmfile: path and filename of atmcof.dat @wdidx: WD index of the passed passband. This can be automated but it's not a high priority. @Nabun: number of metallicity nodes in atmcof.dat. For the 2003 version the number of nodes is 19. @Nlogg: number of logg nodes in atmcof.dat. For the 2003 version the number of nodes is 11. @Npb: number of passbands in atmcof.dat. For the 2003 version the number of passbands is 25. @Nints: number of temperature intervals (input lines) per entry. For the 2003 version the number of lines is 4.
entailment
def _log10_Inorm_extern_planckint(self, Teff): """ Internal function to compute normal passband intensities using the external WD machinery that employs blackbody approximation. @Teff: effective temperature in K Returns: log10(Inorm) """ log10_Inorm = libphoebe.wd_planckint(Teff, self.extern_wd_idx, self.wd_data["planck_table"]) return log10_Inorm
Internal function to compute normal passband intensities using the external WD machinery that employs blackbody approximation. @Teff: effective temperature in K Returns: log10(Inorm)
entailment
def _log10_Inorm_extern_atmx(self, Teff, logg, abun): """ Internal function to compute normal passband intensities using the external WD machinery that employs model atmospheres and ramps. @Teff: effective temperature in K @logg: surface gravity in cgs @abun: metallicity in dex, Solar=0.0 Returns: log10(Inorm) """ log10_Inorm = libphoebe.wd_atmint(Teff, logg, abun, self.extern_wd_idx, self.wd_data["planck_table"], self.wd_data["atm_table"]) return log10_Inorm
Internal function to compute normal passband intensities using the external WD machinery that employs model atmospheres and ramps. @Teff: effective temperature in K @logg: surface gravity in cgs @abun: metallicity in dex, Solar=0.0 Returns: log10(Inorm)
entailment
def set_keyspace(self, keyspace): """ switch all connections to another keyspace """ self.keyspace = keyspace dfrds = [] for p in self._protos: dfrds.append(p.submitRequest(ManagedThriftRequest( 'set_keyspace', keyspace))) return defer.gatherResults(dfrds)
switch all connections to another keyspace
entailment
def login(self, credentials): """ authenticate all connections """ dfrds = [] for p in self._protos: dfrds.append(p.submitRequest(ManagedThriftRequest('login', ttypes.AuthenticationRequest(credentials=credentials)))) return defer.gatherResults(dfrds)
authenticate all connections
entailment
def solve(self,problem): """ Solves optimization problem. Parameters ---------- problem : Object """ # Local vars norm2 = self.norm2 norminf = self.norminf parameters = self.parameters # Parameters tol = parameters['tol'] maxiter = parameters['maxiter'] quiet = parameters['quiet'] sigma = parameters['sigma'] eps = parameters['eps'] eps_cold = parameters['eps_cold'] # Problem if not isinstance(problem,QuadProblem): problem = cast_problem(problem) quad_problem = QuadProblem(None,None,None,None,None,None,problem=problem) else: quad_problem = problem self.problem = problem self.quad_problem = quad_problem # Linsolver self.linsolver = new_linsolver(parameters['linsolver'],'symmetric') # Reset self.reset() # Checks if not np.all(problem.l <= problem.u): raise OptSolverError_NoInterior(self) # Data self.H = quad_problem.H self.g = quad_problem.g self.A = quad_problem.A self.AT = quad_problem.A.T self.b = quad_problem.b self.l = quad_problem.l-tol/10. self.u = quad_problem.u+tol/10. self.n = quad_problem.H.shape[0] self.m = quad_problem.A.shape[0] self.e = np.ones(self.n) self.I = eye(self.n,format='coo') self.Onm = coo_matrix((self.n,self.m)) self.Omm = coo_matrix((self.m,self.m)) # Initial primal if quad_problem.x is None: self.x = (self.u + self.l)/2. else: self.x = np.maximum(np.minimum(quad_problem.x,problem.u),problem.l) # Initial duals if quad_problem.lam is None: self.lam = np.zeros(self.m) else: self.lam = quad_problem.lam.copy() if quad_problem.mu is None: self.mu = np.ones(self.x.size)*eps_cold else: self.mu = np.maximum(quad_problem.mu,eps) if quad_problem.pi is None: self.pi = np.ones(self.x.size)*eps_cold else: self.pi = np.maximum(quad_problem.pi,eps) # Check interior try: assert(np.all(self.l < self.x)) assert(np.all(self.x < self.u)) assert(np.all(self.mu > 0)) assert(np.all(self.pi > 0)) except AssertionError: raise OptSolverError_Infeasibility(self) # Init vector self.y = np.hstack((self.x,self.lam,self.mu,self.pi)) # Complementarity measures self.eta_mu = np.dot(self.mu,self.u-self.x)/self.x.size self.eta_pi = np.dot(self.pi,self.x-self.l)/self.x.size # Objective scaling fdata = self.func(self.y) self.obj_sca = np.maximum(norminf(self.g+self.H*self.x)/10.,1.) self.H = self.H/self.obj_sca self.g = self.g/self.obj_sca fdata = self.func(self.y) # Header if not quiet: print('\nSolver: IQP') print('-----------') # Outer s = 0. self.k = 0 while True: # Complementarity measures self.eta_mu = np.dot(self.mu,self.u-self.x)/self.x.size self.eta_pi = np.dot(self.pi,self.x-self.l)/self.x.size # Init eval fdata = self.func(self.y) fmax = norminf(fdata.f) gmax = norminf(fdata.GradF) # Done if fmax < tol and sigma*np.maximum(self.eta_mu,self.eta_pi) < tol: self.set_status(self.STATUS_SOLVED) self.set_error_msg('') return # Target tau = sigma*norminf(fdata.GradF) # Header if not quiet: if self.k > 0: print('') print('{0:^3s}'.format('iter'), end=' ') print('{0:^9s}'.format('phi'), end=' ') print('{0:^9s}'.format('fmax'), end=' ') print('{0:^9s}'.format('gmax'), end=' ') print('{0:^8s}'.format('cu'), end=' ') print('{0:^8s}'.format('cl'), end=' ') print('{0:^8s}'.format('s')) # Inner while True: # Eval fdata = self.func(self.y) fmax = norminf(fdata.f) gmax = norminf(fdata.GradF) compu = norminf(self.mu*(self.u-self.x)) compl = norminf(self.pi*(self.x-self.l)) phi = (0.5*np.dot(self.x,self.H*self.x)+np.dot(self.g,self.x))*self.obj_sca # Show progress if not quiet: print('{0:^3d}'.format(self.k), end=' ') print('{0:^9.2e}'.format(phi), end=' ') print('{0:^9.2e}'.format(fmax), end=' ') print('{0:^9.2e}'.format(gmax), end=' ') print('{0:^8.1e}'.format(compu), end=' ') print('{0:^8.1e}'.format(compl), end=' ') print('{0:^8.1e}'.format(s)) # Done if gmax < tau: break # Done if fmax < tol and np.maximum(compu,compl) < tol: break # Maxiters if self.k >= maxiter: raise OptSolverError_MaxIters(self) # Search direction ux = self.u-self.x xl = self.x-self.l D1 = spdiags(self.mu/ux,0,self.n,self.n,format='coo') D2 = spdiags(self.pi/xl,0,self.n,self.n,format='coo') fbar = np.hstack((-fdata.rd+fdata.ru/ux-fdata.rl/xl,fdata.rp)) if self.A.shape[0] > 0: Jbar = bmat([[tril(self.H)+D1+D2,None], [-self.A,self.Omm]],format='coo') else: Jbar = bmat([[tril(self.H)+D1+D2]], format='coo') try: if not self.linsolver.is_analyzed(): self.linsolver.analyze(Jbar) pbar = self.linsolver.factorize_and_solve(Jbar,fbar) except RuntimeError: raise OptSolverError_BadLinSystem(self) px = pbar[:self.n] pmu = (-fdata.ru + self.mu*px)/ux ppi = (-fdata.rl - self.pi*px)/xl p = np.hstack((pbar,pmu,ppi)) # Steplength bounds indices = px > 0 s1 = np.min(np.hstack(((1.-eps)*(self.u-self.x)[indices]/px[indices],np.inf))) indices = px < 0 s2 = np.min(np.hstack(((eps-1.)*(self.x-self.l)[indices]/px[indices],np.inf))) indices = pmu < 0 s3 = np.min(np.hstack(((eps-1.)*self.mu[indices]/pmu[indices],np.inf))) indices = ppi < 0 s4 = np.min(np.hstack(((eps-1.)*self.pi[indices]/ppi[indices],np.inf))) smax = np.min([s1,s2,s3,s4]) # Line search s,fdata = self.line_search(self.y,p,fdata.F,fdata.GradF,self.func,smax) # Update x self.y += s*p self.k += 1 self.x,self.lam,self.mu,self.pi = self.extract_components(self.y) # Check try: assert(np.all(self.x < self.u)) assert(np.all(self.x > self.l)) assert(np.all(self.mu > 0)) assert(np.all(self.pi > 0)) except AssertionError: raise OptSolverError_Infeasibility(self)
Solves optimization problem. Parameters ---------- problem : Object
entailment
def factorize(self, A): """ Factorizes A. Parameters ---------- A : matrix For symmetric systems, should contain only lower diagonal part. """ A = csc_matrix(A) if self.prop == self.SYMMETRIC: A = (A + A.T) - triu(A) self.lu = self.umfpack.splu(A)
Factorizes A. Parameters ---------- A : matrix For symmetric systems, should contain only lower diagonal part.
entailment
def retry(self): """ Retry this factory's connection. It is assumed that a previous connection was attempted and failed- either before or after a successful connection. """ if self.connector is None: raise ValueError("No connector to retry") if self.service is None: return self.connector.connect()
Retry this factory's connection. It is assumed that a previous connection was attempted and failed- either before or after a successful connection.
entailment
def prep_connection(self, creds=None, keyspace=None, node_auto_discovery=True): """ Do login and set_keyspace tasks as necessary, and also check this node's idea of the Cassandra ring. Expects that our connection is alive. Return a Deferred that will fire with the ring information, or be errbacked if something goes wrong. """ d = defer.succeed(None) if creds is not None: d.addCallback(lambda _: self.my_login(creds)) if keyspace is not None: d.addCallback(lambda _: self.my_set_keyspace(keyspace)) if node_auto_discovery: d.addCallback(lambda _: self.my_describe_ring(keyspace)) return d
Do login and set_keyspace tasks as necessary, and also check this node's idea of the Cassandra ring. Expects that our connection is alive. Return a Deferred that will fire with the ring information, or be errbacked if something goes wrong.
entailment
def my_pick_non_system_keyspace(self): """ Find a keyspace in the cluster which is not 'system', for the purpose of getting a valid ring view. Can't use 'system' or null. """ d = self.my_describe_keyspaces() def pick_non_system(klist): for k in klist: if k.name not in SYSTEM_KEYSPACES: return k.name err = NoKeyspacesAvailable("Can't gather information about the " "Cassandra ring; no non-system " "keyspaces available") warn(err) raise err d.addCallback(pick_non_system) return d
Find a keyspace in the cluster which is not 'system', for the purpose of getting a valid ring view. Can't use 'system' or null.
entailment
def finish_and_die(self): """ If there is a request pending, let it finish and be handled, then disconnect and die. If not, cancel any pending queue requests and just die. """ self.logstate('finish_and_die') self.stop_working_on_queue() if self.jobphase != 'pending_request': self.stopFactory()
If there is a request pending, let it finish and be handled, then disconnect and die. If not, cancel any pending queue requests and just die.
entailment
def add_connection_score(self, node): """ Return a numeric value that determines this node's score for adding a new connection. A negative value indicates that no connections should be made to this node for at least that number of seconds. A value of -inf indicates no connections should be made to this node for the foreseeable future. This score should ideally take into account the connectedness of available nodes, so that those with less current connections will get more. """ # TODO: this should ideally take node history into account conntime = node.seconds_until_connect_ok() if conntime > 0: self.log("not considering %r for new connection; has %r left on " "connect blackout" % (node, conntime)) return -conntime numconns = self.num_connectors_to(node) if numconns >= self.max_connections_per_node: return float('-Inf') return sys.maxint - numconns
Return a numeric value that determines this node's score for adding a new connection. A negative value indicates that no connections should be made to this node for at least that number of seconds. A value of -inf indicates no connections should be made to this node for the foreseeable future. This score should ideally take into account the connectedness of available nodes, so that those with less current connections will get more.
entailment
def adjustPoolSize(self, newsize): """ Change the target pool size. If we have too many connections already, ask some to finish what they're doing and die (preferring to kill connections to the node that already has the most connections). If we have too few, create more. """ if newsize < 0: raise ValueError("pool size must be nonnegative") self.log("Adjust pool size from %d to %d." % (self.target_pool_size, newsize)) self.target_pool_size = newsize self.kill_excess_pending_conns() self.kill_excess_conns() self.fill_pool()
Change the target pool size. If we have too many connections already, ask some to finish what they're doing and die (preferring to kill connections to the node that already has the most connections). If we have too few, create more.
entailment
def fill_pool(self): """ Add connections as necessary to meet the target pool size. If there are no nodes to connect to (because we maxed out connections-per-node on all active connections and any unconnected nodes have pending reconnect timers), call the on_insufficient_nodes callback. """ time_since_last_called = self.fill_pool_throttle if self.fill_pool_last_called is not None: time_since_last_called = time() - self.fill_pool_last_called need = self.target_pool_size - self.num_connectors() if need <= 0 or (self.throttle_timer is not None and self.throttle_timer.active()): return elif time_since_last_called < self.fill_pool_throttle: self.log("Filling pool too quickly, calling again in %.1f seconds" % self.fill_pool_throttle) self._set_fill_pool_timer() return else: try: for num, node in izip(xrange(need), self.choose_nodes_to_connect()): self.make_conn(node) self.fill_pool_last_called = time() except NoNodesAvailable, e: waittime = e.args[0] pending_requests = len(self.request_queue.pending) if self.on_insufficient_nodes: self.on_insufficient_nodes(self.num_active_conns(), self.target_pool_size, pending_requests, waittime if waittime != float('Inf') else None) self.schedule_future_fill_pool(e.args[0]) if self.num_connectors() == 0 and pending_requests > 0: if self.on_insufficient_conns: self.on_insufficient_conns(self.num_connectors(), pending_requests)
Add connections as necessary to meet the target pool size. If there are no nodes to connect to (because we maxed out connections-per-node on all active connections and any unconnected nodes have pending reconnect timers), call the on_insufficient_nodes callback.
entailment
def resubmit(self, req, keyspace, req_d, retries): """ Push this request to the front of the line, just to be a jerk. """ self.log('resubmitting %s request' % (req.method,)) self.pushRequest_really(req, keyspace, req_d, retries) try: self.request_queue.pending.remove((req, keyspace, req_d, retries)) except ValueError: # it's already been scooped up pass else: self.request_queue.pending.insert(0, (req, keyspace, req_d, retries))
Push this request to the front of the line, just to be a jerk.
entailment
def set_keyspace(self, keyspace): """ Change the keyspace which will be used for subsequent requests to this CassandraClusterPool, and return a Deferred that will fire once it can be verified that connections can successfully use that keyspace. If something goes wrong trying to change a connection to that keyspace, the Deferred will errback, and the keyspace to be used for future requests will not be changed. Requests made between the time this method is called and the time that the returned Deferred is fired may be made in either the previous keyspace or the new keyspace. If you may need to make use of multiple keyspaces at the same time in the same app, consider using the specialized CassandraKeyspaceConnection interface provided by the keyspaceConnection method. """ # push a real set_keyspace on some (any) connection; the idea is that # if it succeeds there, it is likely to succeed everywhere, and vice # versa. don't bother waiting for all connections to change- some of # them may be doing long blocking tasks and by the time they're done, # the keyspace might be changed again anyway d = self.pushRequest(ManagedThriftRequest('set_keyspace', keyspace)) def store_keyspace(_): self.keyspace = keyspace d.addCallback(store_keyspace) return d
Change the keyspace which will be used for subsequent requests to this CassandraClusterPool, and return a Deferred that will fire once it can be verified that connections can successfully use that keyspace. If something goes wrong trying to change a connection to that keyspace, the Deferred will errback, and the keyspace to be used for future requests will not be changed. Requests made between the time this method is called and the time that the returned Deferred is fired may be made in either the previous keyspace or the new keyspace. If you may need to make use of multiple keyspaces at the same time in the same app, consider using the specialized CassandraKeyspaceConnection interface provided by the keyspaceConnection method.
entailment
def keyspaceConnection(self, keyspace, consistency=ConsistencyLevel.ONE): """ Return a CassandraClient instance which uses this CassandraClusterPool by way of a CassandraKeyspaceConnection, so that all requests made through it are guaranteed to go to the given keyspace, no matter what other consumers of this pool may do. """ conn = CassandraKeyspaceConnection(self, keyspace) return CassandraClient(conn, consistency=consistency)
Return a CassandraClient instance which uses this CassandraClusterPool by way of a CassandraKeyspaceConnection, so that all requests made through it are guaranteed to go to the given keyspace, no matter what other consumers of this pool may do.
entailment
def split_by_commas(maybe_s: str) -> Tuple[str, ...]: """Split a string by commas, but allow escaped commas. - If maybe_s is falsey, returns an empty tuple - Ignore backslashed commas """ if not maybe_s: return () parts: List[str] = [] split_by_backslash = maybe_s.split(r'\,') for split_by_backslash_part in split_by_backslash: splitby_comma = split_by_backslash_part.split(',') if parts: parts[-1] += ',' + splitby_comma[0] else: parts.append(splitby_comma[0]) parts.extend(splitby_comma[1:]) return tuple(parts)
Split a string by commas, but allow escaped commas. - If maybe_s is falsey, returns an empty tuple - Ignore backslashed commas
entailment
def login(self, auth_request): """ Parameters: - auth_request """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_login(auth_request) return d
Parameters: - auth_request
entailment
def set_keyspace(self, keyspace): """ Parameters: - keyspace """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_set_keyspace(keyspace) return d
Parameters: - keyspace
entailment
def get(self, key, column_path, consistency_level): """ Get the Column or SuperColumn at the given column_path. If no value is present, NotFoundException is thrown. (This is the only method that can throw an exception under non-failure conditions.) Parameters: - key - column_path - consistency_level """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_get(key, column_path, consistency_level) return d
Get the Column or SuperColumn at the given column_path. If no value is present, NotFoundException is thrown. (This is the only method that can throw an exception under non-failure conditions.) Parameters: - key - column_path - consistency_level
entailment
def get_slice(self, key, column_parent, predicate, consistency_level): """ Get the group of columns contained by column_parent (either a ColumnFamily name or a ColumnFamily/SuperColumn name pair) specified by the given SlicePredicate. If no matching values are found, an empty list is returned. Parameters: - key - column_parent - predicate - consistency_level """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_get_slice(key, column_parent, predicate, consistency_level) return d
Get the group of columns contained by column_parent (either a ColumnFamily name or a ColumnFamily/SuperColumn name pair) specified by the given SlicePredicate. If no matching values are found, an empty list is returned. Parameters: - key - column_parent - predicate - consistency_level
entailment
def get_count(self, key, column_parent, predicate, consistency_level): """ returns the number of columns matching <code>predicate</code> for a particular <code>key</code>, <code>ColumnFamily</code> and optionally <code>SuperColumn</code>. Parameters: - key - column_parent - predicate - consistency_level """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_get_count(key, column_parent, predicate, consistency_level) return d
returns the number of columns matching <code>predicate</code> for a particular <code>key</code>, <code>ColumnFamily</code> and optionally <code>SuperColumn</code>. Parameters: - key - column_parent - predicate - consistency_level
entailment
def multiget_slice(self, keys, column_parent, predicate, consistency_level): """ Performs a get_slice for column_parent and predicate for the given keys in parallel. Parameters: - keys - column_parent - predicate - consistency_level """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_multiget_slice(keys, column_parent, predicate, consistency_level) return d
Performs a get_slice for column_parent and predicate for the given keys in parallel. Parameters: - keys - column_parent - predicate - consistency_level
entailment
def multiget_count(self, keys, column_parent, predicate, consistency_level): """ Perform a get_count in parallel on the given list<binary> keys. The return value maps keys to the count found. Parameters: - keys - column_parent - predicate - consistency_level """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_multiget_count(keys, column_parent, predicate, consistency_level) return d
Perform a get_count in parallel on the given list<binary> keys. The return value maps keys to the count found. Parameters: - keys - column_parent - predicate - consistency_level
entailment
def get_range_slices(self, column_parent, predicate, range, consistency_level): """ returns a subset of columns for a contiguous range of keys. Parameters: - column_parent - predicate - range - consistency_level """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_get_range_slices(column_parent, predicate, range, consistency_level) return d
returns a subset of columns for a contiguous range of keys. Parameters: - column_parent - predicate - range - consistency_level
entailment
def get_paged_slice(self, column_family, range, start_column, consistency_level): """ returns a range of columns, wrapping to the next rows if necessary to collect max_results. Parameters: - column_family - range - start_column - consistency_level """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_get_paged_slice(column_family, range, start_column, consistency_level) return d
returns a range of columns, wrapping to the next rows if necessary to collect max_results. Parameters: - column_family - range - start_column - consistency_level
entailment
def get_indexed_slices(self, column_parent, index_clause, column_predicate, consistency_level): """ Returns the subset of columns specified in SlicePredicate for the rows matching the IndexClause @deprecated use get_range_slices instead with range.row_filter specified Parameters: - column_parent - index_clause - column_predicate - consistency_level """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_get_indexed_slices(column_parent, index_clause, column_predicate, consistency_level) return d
Returns the subset of columns specified in SlicePredicate for the rows matching the IndexClause @deprecated use get_range_slices instead with range.row_filter specified Parameters: - column_parent - index_clause - column_predicate - consistency_level
entailment
def insert(self, key, column_parent, column, consistency_level): """ Insert a Column at the given column_parent.column_family and optional column_parent.super_column. Parameters: - key - column_parent - column - consistency_level """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_insert(key, column_parent, column, consistency_level) return d
Insert a Column at the given column_parent.column_family and optional column_parent.super_column. Parameters: - key - column_parent - column - consistency_level
entailment
def add(self, key, column_parent, column, consistency_level): """ Increment or decrement a counter. Parameters: - key - column_parent - column - consistency_level """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_add(key, column_parent, column, consistency_level) return d
Increment or decrement a counter. Parameters: - key - column_parent - column - consistency_level
entailment
def remove(self, key, column_path, timestamp, consistency_level): """ Remove data from the row specified by key at the granularity specified by column_path, and the given timestamp. Note that all the values in column_path besides column_path.column_family are truly optional: you can remove the entire row by just specifying the ColumnFamily, or you can remove a SuperColumn or a single Column by specifying those levels too. Parameters: - key - column_path - timestamp - consistency_level """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_remove(key, column_path, timestamp, consistency_level) return d
Remove data from the row specified by key at the granularity specified by column_path, and the given timestamp. Note that all the values in column_path besides column_path.column_family are truly optional: you can remove the entire row by just specifying the ColumnFamily, or you can remove a SuperColumn or a single Column by specifying those levels too. Parameters: - key - column_path - timestamp - consistency_level
entailment
def remove_counter(self, key, path, consistency_level): """ Remove a counter at the specified location. Note that counters have limited support for deletes: if you remove a counter, you must wait to issue any following update until the delete has reached all the nodes and all of them have been fully compacted. Parameters: - key - path - consistency_level """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_remove_counter(key, path, consistency_level) return d
Remove a counter at the specified location. Note that counters have limited support for deletes: if you remove a counter, you must wait to issue any following update until the delete has reached all the nodes and all of them have been fully compacted. Parameters: - key - path - consistency_level
entailment
def batch_mutate(self, mutation_map, consistency_level): """ Mutate many columns or super columns for many row keys. See also: Mutation. mutation_map maps key to column family to a list of Mutation objects to take place at that scope. * Parameters: - mutation_map - consistency_level """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_batch_mutate(mutation_map, consistency_level) return d
Mutate many columns or super columns for many row keys. See also: Mutation. mutation_map maps key to column family to a list of Mutation objects to take place at that scope. * Parameters: - mutation_map - consistency_level
entailment
def atomic_batch_mutate(self, mutation_map, consistency_level): """ Atomically mutate many columns or super columns for many row keys. See also: Mutation. mutation_map maps key to column family to a list of Mutation objects to take place at that scope. * Parameters: - mutation_map - consistency_level """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_atomic_batch_mutate(mutation_map, consistency_level) return d
Atomically mutate many columns or super columns for many row keys. See also: Mutation. mutation_map maps key to column family to a list of Mutation objects to take place at that scope. * Parameters: - mutation_map - consistency_level
entailment
def truncate(self, cfname): """ Truncate will mark and entire column family as deleted. From the user's perspective a successful call to truncate will result complete data deletion from cfname. Internally, however, disk space will not be immediatily released, as with all deletes in cassandra, this one only marks the data as deleted. The operation succeeds only if all hosts in the cluster at available and will throw an UnavailableException if some hosts are down. Parameters: - cfname """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_truncate(cfname) return d
Truncate will mark and entire column family as deleted. From the user's perspective a successful call to truncate will result complete data deletion from cfname. Internally, however, disk space will not be immediatily released, as with all deletes in cassandra, this one only marks the data as deleted. The operation succeeds only if all hosts in the cluster at available and will throw an UnavailableException if some hosts are down. Parameters: - cfname
entailment
def describe_schema_versions(self, ): """ for each schema version present in the cluster, returns a list of nodes at that version. hosts that do not respond will be under the key DatabaseDescriptor.INITIAL_VERSION. the cluster is all on the same version if the size of the map is 1. """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_describe_schema_versions() return d
for each schema version present in the cluster, returns a list of nodes at that version. hosts that do not respond will be under the key DatabaseDescriptor.INITIAL_VERSION. the cluster is all on the same version if the size of the map is 1.
entailment
def describe_keyspaces(self, ): """ list the defined keyspaces in this cluster """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_describe_keyspaces() return d
list the defined keyspaces in this cluster
entailment
def describe_cluster_name(self, ): """ get the cluster name """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_describe_cluster_name() return d
get the cluster name
entailment
def describe_version(self, ): """ get the thrift api version """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_describe_version() return d
get the thrift api version
entailment
def describe_ring(self, keyspace): """ get the token ring: a map of ranges to host addresses, represented as a set of TokenRange instead of a map from range to list of endpoints, because you can't use Thrift structs as map keys: https://issues.apache.org/jira/browse/THRIFT-162 for the same reason, we can't return a set here, even though order is neither important nor predictable. Parameters: - keyspace """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_describe_ring(keyspace) return d
get the token ring: a map of ranges to host addresses, represented as a set of TokenRange instead of a map from range to list of endpoints, because you can't use Thrift structs as map keys: https://issues.apache.org/jira/browse/THRIFT-162 for the same reason, we can't return a set here, even though order is neither important nor predictable. Parameters: - keyspace
entailment
def describe_token_map(self, ): """ get the mapping between token->node ip without taking replication into consideration https://issues.apache.org/jira/browse/CASSANDRA-4092 """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_describe_token_map() return d
get the mapping between token->node ip without taking replication into consideration https://issues.apache.org/jira/browse/CASSANDRA-4092
entailment
def describe_partitioner(self, ): """ returns the partitioner used by this cluster """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_describe_partitioner() return d
returns the partitioner used by this cluster
entailment
def describe_snitch(self, ): """ returns the snitch used by this cluster """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_describe_snitch() return d
returns the snitch used by this cluster
entailment
def describe_keyspace(self, keyspace): """ describe specified keyspace Parameters: - keyspace """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_describe_keyspace(keyspace) return d
describe specified keyspace Parameters: - keyspace
entailment
def describe_splits(self, cfName, start_token, end_token, keys_per_split): """ experimental API for hadoop/parallel query support. may change violently and without warning. returns list of token strings such that first subrange is (list[0], list[1]], next is (list[1], list[2]], etc. Parameters: - cfName - start_token - end_token - keys_per_split """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_describe_splits(cfName, start_token, end_token, keys_per_split) return d
experimental API for hadoop/parallel query support. may change violently and without warning. returns list of token strings such that first subrange is (list[0], list[1]], next is (list[1], list[2]], etc. Parameters: - cfName - start_token - end_token - keys_per_split
entailment
def trace_next_query(self, ): """ Enables tracing for the next query in this connection and returns the UUID for that trace session The next query will be traced idependently of trace probability and the returned UUID can be used to query the trace keyspace """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_trace_next_query() return d
Enables tracing for the next query in this connection and returns the UUID for that trace session The next query will be traced idependently of trace probability and the returned UUID can be used to query the trace keyspace
entailment
def describe_splits_ex(self, cfName, start_token, end_token, keys_per_split): """ Parameters: - cfName - start_token - end_token - keys_per_split """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_describe_splits_ex(cfName, start_token, end_token, keys_per_split) return d
Parameters: - cfName - start_token - end_token - keys_per_split
entailment
def system_add_column_family(self, cf_def): """ adds a column family. returns the new schema id. Parameters: - cf_def """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_system_add_column_family(cf_def) return d
adds a column family. returns the new schema id. Parameters: - cf_def
entailment
def system_drop_column_family(self, column_family): """ drops a column family. returns the new schema id. Parameters: - column_family """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_system_drop_column_family(column_family) return d
drops a column family. returns the new schema id. Parameters: - column_family
entailment
def system_add_keyspace(self, ks_def): """ adds a keyspace and any column families that are part of it. returns the new schema id. Parameters: - ks_def """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_system_add_keyspace(ks_def) return d
adds a keyspace and any column families that are part of it. returns the new schema id. Parameters: - ks_def
entailment
def system_drop_keyspace(self, keyspace): """ drops a keyspace and any column families that are part of it. returns the new schema id. Parameters: - keyspace """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_system_drop_keyspace(keyspace) return d
drops a keyspace and any column families that are part of it. returns the new schema id. Parameters: - keyspace
entailment
def system_update_keyspace(self, ks_def): """ updates properties of a keyspace. returns the new schema id. Parameters: - ks_def """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_system_update_keyspace(ks_def) return d
updates properties of a keyspace. returns the new schema id. Parameters: - ks_def
entailment
def system_update_column_family(self, cf_def): """ updates properties of a column family. returns the new schema id. Parameters: - cf_def """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_system_update_column_family(cf_def) return d
updates properties of a column family. returns the new schema id. Parameters: - cf_def
entailment
def execute_cql_query(self, query, compression): """ Executes a CQL (Cassandra Query Language) statement and returns a CqlResult containing the results. Parameters: - query - compression """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_execute_cql_query(query, compression) return d
Executes a CQL (Cassandra Query Language) statement and returns a CqlResult containing the results. Parameters: - query - compression
entailment
def execute_cql3_query(self, query, compression, consistency): """ Parameters: - query - compression - consistency """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_execute_cql3_query(query, compression, consistency) return d
Parameters: - query - compression - consistency
entailment
def prepare_cql_query(self, query, compression): """ Prepare a CQL (Cassandra Query Language) statement by compiling and returning - the type of CQL statement - an id token of the compiled CQL stored on the server side. - a count of the discovered bound markers in the statement Parameters: - query - compression """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_prepare_cql_query(query, compression) return d
Prepare a CQL (Cassandra Query Language) statement by compiling and returning - the type of CQL statement - an id token of the compiled CQL stored on the server side. - a count of the discovered bound markers in the statement Parameters: - query - compression
entailment
def prepare_cql3_query(self, query, compression): """ Parameters: - query - compression """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_prepare_cql3_query(query, compression) return d
Parameters: - query - compression
entailment
def execute_prepared_cql_query(self, itemId, values): """ Executes a prepared CQL (Cassandra Query Language) statement by passing an id token and a list of variables to bind and returns a CqlResult containing the results. Parameters: - itemId - values """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_execute_prepared_cql_query(itemId, values) return d
Executes a prepared CQL (Cassandra Query Language) statement by passing an id token and a list of variables to bind and returns a CqlResult containing the results. Parameters: - itemId - values
entailment
def execute_prepared_cql3_query(self, itemId, values, consistency): """ Parameters: - itemId - values - consistency """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_execute_prepared_cql3_query(itemId, values, consistency) return d
Parameters: - itemId - values - consistency
entailment
def set_cql_version(self, version): """ @deprecated This is now a no-op. Please use the CQL3 specific methods instead. Parameters: - version """ self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_set_cql_version(version) return d
@deprecated This is now a no-op. Please use the CQL3 specific methods instead. Parameters: - version
entailment
def new_linsolver(name,prop): """ Creates a linear solver. Parameters ---------- name : string prop : string Returns ------- solver : :class:`LinSolver <optalg.lin_solver.LinSolver>` """ if name == 'mumps': return LinSolverMUMPS(prop) elif name == 'superlu': return LinSolverSUPERLU(prop) elif name == 'umfpack': return LinSolverUMFPACK(prop) elif name == 'default': try: return new_linsolver('mumps',prop) except ImportError: return new_linsolver('superlu',prop) else: raise ValueError('invalid linear solver name')
Creates a linear solver. Parameters ---------- name : string prop : string Returns ------- solver : :class:`LinSolver <optalg.lin_solver.LinSolver>`
entailment
def cast_problem(problem): """ Casts problem object with known interface as OptProblem. Parameters ---------- problem : Object """ # Optproblem if isinstance(problem,OptProblem): return problem # Other else: # Type Base if (not hasattr(problem,'G') or (problem.G.shape[0] == problem.G.shape[1] and problem.G.shape[0] == problem.G.nnz and np.all(problem.G.row == problem.G.col) and np.all(problem.G.data == 1.))): return create_problem_from_type_base(problem) # Type A else: return create_problem_from_type_A(problem)
Casts problem object with known interface as OptProblem. Parameters ---------- problem : Object
entailment
def create_problem_from_type_base(problem): """ Creates OptProblem from type-base problem. Parameters ---------- problem : Object """ p = OptProblem() # Init attributes p.phi = problem.phi p.gphi = problem.gphi p.Hphi = problem.Hphi p.A = problem.A p.b = problem.b p.f = problem.f p.J = problem.J p.H_combined = problem.H_combined p.u = problem.u p.l = problem.l p.x = problem.x p.P = None p.lam = None p.nu = None p.mu = None p.pi = None p.wrapped_problem = problem # Methods def eval(cls,x): cls.wrapped_problem.eval(x) cls.phi = cls.wrapped_problem.phi cls.gphi = cls.wrapped_problem.gphi cls.Hphi = cls.wrapped_problem.Hphi cls.f = cls.wrapped_problem.f cls.J = cls.wrapped_problem.J def combine_H(cls,coeff,ensure_psd=False): cls.wrapped_problem.combine_H(coeff,ensure_psd) cls.H_combined = cls.wrapped_problem.H_combined p.eval = MethodType(eval,p) p.combine_H = MethodType(combine_H,p) # Return return p
Creates OptProblem from type-base problem. Parameters ---------- problem : Object
entailment
def create_problem_from_type_A(problem): """ Creates OptProblem from type-A problem. Parameters ---------- problem : Object """ p = OptProblem() nx = problem.get_num_primal_variables() nz = problem.G.shape[0] p.phi = problem.phi p.gphi = np.hstack((problem.gphi,np.zeros(nz))) p.Hphi = coo_matrix((problem.Hphi.data,(problem.Hphi.row,problem.Hphi.col)),shape=(nx+nz,nx+nz)) p.A = bmat([[problem.A,None],[problem.G,-eye(nz)]],format='coo') p.b = np.hstack((problem.b,np.zeros(nz))) p.f = problem.f p.J = coo_matrix((problem.J.data,(problem.J.row,problem.J.col)),shape=(problem.J.shape[0],nx+nz)) p.H_combined = coo_matrix((problem.H_combined.data,(problem.H_combined.row,problem.H_combined.col)),shape=(nx+nz,nx+nz)) p.u = np.hstack((problem.get_upper_limits(),problem.u)) p.l = np.hstack((problem.get_lower_limits(),problem.l)) p.x = np.hstack((problem.x,np.zeros(nz))) p.P = None p.lam = None p.nu = None p.mu = None p.pi = None p.wrapped_problem = problem def eval(cls,xz): x = xz[:nx] z = xz[nx:] prob = cls.wrapped_problem prob.eval(x) cls.phi = prob.phi cls.gphi = np.hstack((prob.gphi,np.zeros(nz))) cls.Hphi = coo_matrix((prob.Hphi.data,(prob.Hphi.row,prob.Hphi.col)),shape=(nx+nz,nx+nz)) cls.f = prob.f cls.J = coo_matrix((prob.J.data,(prob.J.row,prob.J.col)),shape=(prob.J.shape[0],nx+nz)) def combine_H(cls,coeff,ensure_psd=False): prob = cls.wrapped_problem prob.combine_H(coeff,ensure_psd=ensure_psd) cls.H_combined = coo_matrix((prob.H_combined.data,(prob.H_combined.row,prob.H_combined.col)),shape=(nx+nz,nx+nz)) def recover_primal_variables(cls,x): return x[:nx] def recover_dual_variables(cls,lam,nu,mu,pi): prob = cls.wrapped_problem return lam[:prob.A.shape[0]],nu,mu[nx:],pi[nx:] p.eval = MethodType(eval,p) p.combine_H = MethodType(combine_H,p) p.recover_primal_variables = MethodType(recover_primal_variables,p) p.recover_dual_variables = MethodType(recover_dual_variables,p) # Return return p
Creates OptProblem from type-A problem. Parameters ---------- problem : Object
entailment
def recover_dual_variables(self,lam,nu,mu,pi): """ Recovers dual variables for original problem. Parameters ---------- lam : ndarray nu : ndarray mu : ndarray pi : ndarray """ return lam,nu,mu,pi
Recovers dual variables for original problem. Parameters ---------- lam : ndarray nu : ndarray mu : ndarray pi : ndarray
entailment
def get_num_primal_variables(self): """ Gets number of primal variables. Returns ------- num : int """ if self.x is not None: return self.x.size if self.gphi is not None: return self.gphi.size if self.Hphi is not None: return self.Hphi.shape[0] if self.A is not None: return self.A.shape[1] if self.J is not None: return self.J.shape[1] if self.u is not None: return self.u.size if self.l is not None: return self.l.size return 0
Gets number of primal variables. Returns ------- num : int
entailment
def get_dual_variables(self): """ Gets dual variables. Returns ------- lam : vector nu : vector mu : vector pi : vector """ if self.problem: return self.problem.recover_dual_variables(self.lam*self.obj_sca, self.nu*self.obj_sca, self.mu*self.obj_sca, self.pi*self.obj_sca) else: return None,None,None,None
Gets dual variables. Returns ------- lam : vector nu : vector mu : vector pi : vector
entailment
def get_results(self): """ Gets results. Returns ------- results : dictionary """ return {'status': self.status, 'error_msg': self.error_msg, 'k': self.k, 'x': self.x, 'lam': self.lam*self.obj_sca, 'nu': self.nu*self.obj_sca, 'mu': self.mu*self.obj_sca, 'pi': self.pi*self.obj_sca}
Gets results. Returns ------- results : dictionary
entailment
def line_search(self,x,p,F,GradF,func,smax=np.inf,maxiter=40): """ Finds steplength along search direction p that satisfies the strong Wolfe conditions. Parameters ---------- x : current point (ndarray) p : search direction (ndarray) F : function value at `x` (float) GradF : gradient of function at `x` (ndarray) func : function of `x` that returns function object with attributes `F` and `GradF` (function) smax : maximum allowed steplength (float) Returns ------- s : stephlength that satisfies the Wolfe conditions (float). """ # Parameters of line search c1 = 1e-4 c2 = 5e-1 # Initialize lower bound, upper bound and step l = 0. if 1. < smax: s = 1. else: s = smax u = np.NaN phi = F dphi = np.dot(GradF,p) # Check that p is descent direction if dphi >= 0: raise OptSolverError_BadSearchDir(self) # Bisection for i in range(0,maxiter): xsp = x+s*p fdata = func(xsp) phis = fdata.F dphis = np.dot(fdata.GradF,p) if phis > phi + c1*s*dphi: u = s elif dphis > 0 and dphis > -c2*dphi: u = s elif dphis < 0 and -dphis > -c2*dphi: l = s if s >= smax: return s,fdata else: return s,fdata if np.isnan(u): s = np.min([2.*s,smax]) else: s = (l + u)/2. raise OptSolverError_LineSearch(self)
Finds steplength along search direction p that satisfies the strong Wolfe conditions. Parameters ---------- x : current point (ndarray) p : search direction (ndarray) F : function value at `x` (float) GradF : gradient of function at `x` (ndarray) func : function of `x` that returns function object with attributes `F` and `GradF` (function) smax : maximum allowed steplength (float) Returns ------- s : stephlength that satisfies the Wolfe conditions (float).
entailment
def reset(self): """ Resets solver data. """ self.k = 0. self.x = np.zeros(0) self.lam = np.zeros(0) self.nu = np.zeros(0) self.mu = np.zeros(0) self.pi = np.zeros(0) self.status = self.STATUS_UNKNOWN self.error_msg = '' self.obj_sca = 1.
Resets solver data.
entailment
def set_parameters(self,parameters): """ Sets solver parameters. Parameters ---------- parameters : dict """ for key,value in list(parameters.items()): if key in self.parameters: self.parameters[key] = value
Sets solver parameters. Parameters ---------- parameters : dict
entailment
def factorize_and_solve(self, A, b): """ Factorizes A and solves Ax=b. Returns ------- x : vector """ self.factorize(A) return self.solve(b)
Factorizes A and solves Ax=b. Returns ------- x : vector
entailment
def solve(self,problem): """ Solves optimization problem. Parameters ---------- problem : Object """ # Local vars norm2 = self.norm2 norminf = self.norminf parameters = self.parameters # Parameters feastol = parameters['feastol'] optol = parameters['optol'] maxiter = parameters['maxiter'] quiet = parameters['quiet'] sigma = parameters['sigma'] eps = parameters['eps'] ls_maxiter = parameters['line_search_maxiter'] # Problem problem = cast_problem(problem) self.problem = problem # Linsolver self.linsolver = new_linsolver(parameters['linsolver'],'symmetric') # Reset self.reset() # Checks if not np.all(problem.l <= problem.u): raise OptSolverError_NoInterior(self) # Constants self.A = problem.A self.AT = problem.A.T self.b = problem.b self.u = problem.u+feastol/10. self.l = problem.l-feastol/10. self.n = problem.get_num_primal_variables() self.m1 = problem.get_num_linear_equality_constraints() self.m2 = problem.get_num_nonlinear_equality_constraints() self.e = np.ones(self.n) self.I = eye(self.n,format='coo') self.Omm1 = coo_matrix((self.m1,self.m1)) self.Omm2 = coo_matrix((self.m2,self.m2)) # Initial primal if problem.x is None: self.x = (self.u + self.l)/2. else: self.x = np.maximum(np.minimum(problem.x,problem.u),problem.l) # Initial duals if problem.lam is None: self.lam = np.zeros(problem.get_num_linear_equality_constraints()) else: self.lam = problem.lam.copy() if problem.nu is None: self.nu = np.zeros(problem.get_num_nonlinear_equality_constraints()) else: self.nu = problem.nu.copy() self.mu = np.minimum(1./(self.u-self.x), 1.) self.pi = np.minimum(1./(self.x-self.l), 1.) # Init vector self.y = np.hstack((self.x,self.lam,self.nu,self.mu,self.pi)) # Average violation of complementarity slackness self.eta_mu = (np.dot(self.mu,self.u-self.x)/self.x.size) if self.x.size else 0. self.eta_pi = (np.dot(self.pi,self.x-self.l)/self.x.size) if self.x.size else 0. # Objective scaling fdata = self.func(self.y) self.obj_sca = np.maximum(norminf(problem.gphi)/10.,1.) fdata = self.func(self.y) # Header if not quiet: print('\nSolver: inlp') print('------------') # Outer s = 0. self.k = 0 while True: # Average violation of complementarity slackness self.eta_mu = (np.dot(self.mu,self.u-self.x)/self.x.size) if self.x.size else 0. self.eta_pi = (np.dot(self.pi,self.x-self.l)/self.x.size) if self.x.size else 0. # Init eval fdata = self.func(self.y) # Target tau = sigma*norminf(fdata.GradF) # Header if not quiet: if self.k > 0: print('') print('{0:^3s}'.format('iter'),end=' ') print('{0:^9s}'.format('phi'),end=' ') print('{0:^9s}'.format('pres'),end=' ') print('{0:^9s}'.format('dres'),end=' ') print('{0:^9s}'.format('gmax'),end=' ') print('{0:^8s}'.format('cu'),end=' ') print('{0:^8s}'.format('cl'),end=' ') print('{0:^8s}'.format('alpha')) # Inner while True: # Eval fdata = self.func(self.y) pres = norminf(np.hstack((fdata.rp1,fdata.rp2))) dres = norminf(np.hstack((fdata.rd,fdata.ru,fdata.rl))) gmax = norminf(fdata.GradF) compu = norminf(self.mu*(self.u-self.x)) compl = norminf(self.pi*(self.x-self.l)) phi = problem.phi # Show progress if not quiet: print('{0:^3d}'.format(self.k),end=' ') print('{0:^9.2e}'.format(phi),end=' ') print('{0:^9.2e}'.format(pres),end=' ') print('{0:^9.2e}'.format(dres),end=' ') print('{0:^9.2e}'.format(gmax),end=' ') print('{0:^8.1e}'.format(compu),end=' ') print('{0:^8.1e}'.format(compl),end=' ') print('{0:^8.1e}'.format(s)) # Done if self.k > 0 and pres < feastol and dres < optol and sigma*np.maximum(self.eta_mu,self.eta_pi) < optol: self.set_status(self.STATUS_SOLVED) self.set_error_msg('') return # Done if gmax < tau: break # Done if pres < feastol and dres < optol and np.maximum(compu,compl) < optol: break # Maxiters if self.k >= maxiter: raise OptSolverError_MaxIters(self) # Search direction ux = self.u-self.x xl = self.x-self.l D1 = spdiags(self.mu/ux,0,self.n,self.n,format='coo') D2 = spdiags(self.pi/xl,0,self.n,self.n,format='coo') fbar = np.hstack((-fdata.rd+fdata.ru/ux-fdata.rl/xl,fdata.rp1,fdata.rp2)) Hbar = coo_matrix((np.concatenate((problem.Hphi.data/self.obj_sca, problem.H_combined.data, D1.data, D2.data)), (np.concatenate((problem.Hphi.row, problem.H_combined.row, D1.row, D2.row)), np.concatenate((problem.Hphi.col, problem.H_combined.col, D1.col, D2.col))))) Jbar = bmat([[Hbar,None,None], [-self.A,self.Omm1,None], [-problem.J,None,self.Omm2]], format='coo') try: if not self.linsolver.is_analyzed(): self.linsolver.analyze(Jbar) pbar = self.linsolver.factorize_and_solve(Jbar,fbar) except RuntimeError: raise OptSolverError_BadLinSystem(self) px = pbar[:self.n] pmu = (-fdata.ru + self.mu*px)/ux ppi = (-fdata.rl - self.pi*px)/xl p = np.hstack((pbar,pmu,ppi)) # Steplength bounds indices = px > 0 s1 = np.min(np.hstack(((self.u-self.x)[indices]/px[indices],np.inf))) indices = px < 0 s2 = np.min(np.hstack(((self.l-self.x)[indices]/px[indices],np.inf))) indices = pmu < 0 s3 = np.min(np.hstack((-self.mu[indices]/pmu[indices],np.inf))) indices = ppi < 0 s4 = np.min(np.hstack((-self.pi[indices]/ppi[indices],np.inf))) smax = (1.-eps)*np.min([s1,s2,s3,s4]) spmax = (1.-eps)*np.min([s1,s2]) sdmax = (1.-eps)*np.min([s3,s4]) # Line search try: s, fdata = self.line_search(self.y, p, fdata.F, fdata.GradF, self.func, smax=smax, maxiter=ls_maxiter) # Update point self.y += s*p self.x, self.lam, self.nu, self.mu, self.pi = self.extract_components(self.y) except OptSolverError_LineSearch: sp = np.minimum(1., spmax) sd = np.minimum(1., sdmax) s = np.minimum(sp,sd) # Update point self.x += sp*px self.lam += sd*pbar[self.x.size:self.x.size+self.lam.size] self.nu += sd*pbar[self.x.size+self.lam.size:] self.mu += sd*pmu self.pi += sd*ppi self.y = np.hstack((self.x,self.lam,self.nu,self.mu,self.pi)) # Update iters self.k += 1 # Check try: assert(np.all(self.x < self.u)) assert(np.all(self.x > self.l)) assert(np.all(self.mu > 0)) assert(np.all(self.pi > 0)) except AssertionError: raise OptSolverError_Infeasibility(self) # Update iters self.k += 1
Solves optimization problem. Parameters ---------- problem : Object
entailment
def format_files(src: str, dest: str, **fmt_vars: str) -> None: """Copies all files inside src into dest while formatting the contents of the files into the output. For example, a file with the following contents: {foo} bar {baz} and the vars {'foo': 'herp', 'baz': 'derp'} will end up in the output as herp bar derp :param text src: Source directory. :param text dest: Destination directory. :param dict fmt_vars: Vars to format into the files. """ assert os.path.exists(src) assert os.path.exists(dest) # Only at the root. Could be made more complicated and recursive later for filename in os.listdir(src): if filename.endswith(EXCLUDED_EXTENSIONS): continue # Flat directory structure elif not os.path.isfile(os.path.join(src, filename)): continue with open(os.path.join(src, filename)) as f: output_contents = f.read().format(**fmt_vars) with open(os.path.join(dest, filename), 'w') as file_obj: file_obj.write(output_contents)
Copies all files inside src into dest while formatting the contents of the files into the output. For example, a file with the following contents: {foo} bar {baz} and the vars {'foo': 'herp', 'baz': 'derp'} will end up in the output as herp bar derp :param text src: Source directory. :param text dest: Destination directory. :param dict fmt_vars: Vars to format into the files.
entailment
def analyze(self,A): """ Analyzes structure of A. Parameters ---------- A : matrix For symmetric systems, should contain only lower diagonal part. """ A = coo_matrix(A) self.mumps.set_shape(A.shape[0]) self.mumps.set_centralized_assembled_rows_cols(A.row+1,A.col+1) self.mumps.run(job=1) self.analyzed = True
Analyzes structure of A. Parameters ---------- A : matrix For symmetric systems, should contain only lower diagonal part.
entailment
def factorize(self,A): """ Factorizes A. Parameters ---------- A : matrix For symmetric systems, should contain only lower diagonal part. """ A = coo_matrix(A) self.mumps.set_centralized_assembled_values(A.data) self.mumps.run(job=2)
Factorizes A. Parameters ---------- A : matrix For symmetric systems, should contain only lower diagonal part.
entailment
def solve(self,b): """ Solves system Ax=b. Parameters ---------- b : ndarray Returns ------- x : ndarray """ x = b.copy() self.mumps.set_rhs(x) self.mumps.run(job=3) return x
Solves system Ax=b. Parameters ---------- b : ndarray Returns ------- x : ndarray
entailment
def factorize_and_solve(self,A,b): """ Factorizes A and sovles Ax=b. Parameters ---------- A : matrix b : ndarray Returns ------- x : ndarray """ A = coo_matrix(A) x = b.copy() self.mumps.set_centralized_assembled_values(A.data) self.mumps.set_rhs(x) self.mumps.run(job=5) return x
Factorizes A and sovles Ax=b. Parameters ---------- A : matrix b : ndarray Returns ------- x : ndarray
entailment
def spsolve(A, b, comm=None): """Sparse solve A\b.""" assert A.dtype == 'd' and b.dtype == 'd', "Only double precision supported." with DMumpsContext(par=1, sym=0, comm=comm) as ctx: if ctx.myid == 0: # Set the sparse matrix -- only necessary on ctx.set_centralized_sparse(A.tocoo()) x = b.copy() ctx.set_rhs(x) # Silence most messages ctx.set_silent() # Analysis + Factorization + Solve ctx.run(job=6) if ctx.myid == 0: return x
Sparse solve A\b.
entailment
def set_centralized_sparse(self, A): """Set assembled matrix on processor 0. Parameters ---------- A : `scipy.sparse.coo_matrix` Sparse matrices of other formats will be converted to COOrdinate form. """ if self.myid != 0: return A = A.tocoo() n = A.shape[0] assert A.shape == (n, n), "Expected a square matrix." self.set_shape(n) self.set_centralized_assembled(A.row+1, A.col+1, A.data)
Set assembled matrix on processor 0. Parameters ---------- A : `scipy.sparse.coo_matrix` Sparse matrices of other formats will be converted to COOrdinate form.
entailment
def set_centralized_assembled(self, irn, jcn, a): """Set assembled matrix on processor 0. The row and column indices (irn & jcn) should be one based. """ self.set_centralized_assembled_rows_cols(irn, jcn) self.set_centralized_assembled_values(a)
Set assembled matrix on processor 0. The row and column indices (irn & jcn) should be one based.
entailment
def set_centralized_assembled_rows_cols(self, irn, jcn): """Set assembled matrix indices on processor 0. The row and column indices (irn & jcn) should be one based. """ if self.myid != 0: return assert irn.size == jcn.size self._refs.update(irn=irn, jcn=jcn) self.id.nz = irn.size self.id.irn = self.cast_array(irn) self.id.jcn = self.cast_array(jcn)
Set assembled matrix indices on processor 0. The row and column indices (irn & jcn) should be one based.
entailment