signature
stringlengths 8
3.44k
| body
stringlengths 0
1.41M
| docstring
stringlengths 1
122k
| id
stringlengths 5
17
|
|---|---|---|---|
def which(program, mode=os.F_OK | os.X_OK, path=None):
|
try:<EOL><INDENT>from shutil import which as shwhich<EOL>return shwhich(program, mode, path)<EOL><DEDENT>except ImportError:<EOL><INDENT>def is_exe(fpath):<EOL><INDENT>return os.path.isfile(fpath) and os.access(fpath, os.X_OK)<EOL><DEDENT>fpath, _ = os.path.split(program)<EOL>if fpath:<EOL><INDENT>if is_exe(program):<EOL><INDENT>return program<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if path is None:<EOL><INDENT>path = os.environ.get("<STR_LIT>", os.defpath)<EOL><DEDENT>if not path:<EOL><INDENT>return None<EOL><DEDENT>path = path.split(os.pathsep)<EOL>for pathdir in path:<EOL><INDENT>pathdir = pathdir.strip('<STR_LIT:">')<EOL>exe_file = os.path.join(pathdir, program)<EOL>if is_exe(exe_file):<EOL><INDENT>return exe_file<EOL><DEDENT><DEDENT><DEDENT>return None<EOL><DEDENT>
|
Mimics the Unix utility which.
For python3.3+, shutil.which provides all of the required functionality.
An implementation is provided in case shutil.which does
not exist.
:param program: (required) string
Name of program (can be fully-qualified path as well)
:param mode: (optional) integer flag bits
Permissions to check for in the executable
Default: os.F_OK (file exists) | os.X_OK (executable file)
:param path: (optional) string
A custom path list to check against. Implementation taken from
shutil.py.
Returns:
A fully qualified path to program as resolved by path or
user environment.
Returns None when program can not be resolved.
|
f10137:m2
|
def convert_3d_counts_to_cf(ND1, ND2, NR1, NR2,<EOL>D1D2, D1R2, D2R1, R1R2,<EOL>estimator='<STR_LIT>'):
|
import numpy as np<EOL>pair_counts = dict()<EOL>fields = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']<EOL>arrays = [D1D2, D1R2, D2R1, R1R2]<EOL>for (field, array) in zip(fields, arrays):<EOL><INDENT>try:<EOL><INDENT>npairs = array['<STR_LIT>']<EOL>pair_counts[field] = npairs<EOL><DEDENT>except IndexError:<EOL><INDENT>pair_counts[field] = array<EOL><DEDENT><DEDENT>nbins = len(pair_counts['<STR_LIT>'])<EOL>if (nbins != len(pair_counts['<STR_LIT>'])) or(nbins != len(pair_counts['<STR_LIT>'])) or(nbins != len(pair_counts['<STR_LIT>'])):<EOL><INDENT>msg = '<STR_LIT>'<EOL>raise ValueError(msg)<EOL><DEDENT>nonzero = pair_counts['<STR_LIT>'] > <NUM_LIT:0><EOL>if '<STR_LIT>' in estimator or '<STR_LIT>' in estimator:<EOL><INDENT>fN1 = np.float(NR1) / np.float(ND1)<EOL>fN2 = np.float(NR2) / np.float(ND2)<EOL>cf = np.zeros(nbins)<EOL>cf[:] = np.nan<EOL>cf[nonzero] = (fN1 * fN2 * pair_counts['<STR_LIT>'][nonzero] -<EOL>fN1 * pair_counts['<STR_LIT>'][nonzero] -<EOL>fN2 * pair_counts['<STR_LIT>'][nonzero] +<EOL>pair_counts['<STR_LIT>'][nonzero]) / pair_counts['<STR_LIT>'][nonzero]<EOL>if len(cf) != nbins:<EOL><INDENT>msg = '<STR_LIT>''<STR_LIT>''<STR_LIT>'.format(<EOL>nbins, len(cf))<EOL>raise RuntimeError(msg)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>msg = "<STR_LIT>""<STR_LIT>".format(estimator)<EOL>raise ValueError(msg)<EOL><DEDENT>return cf<EOL>
|
Converts raw pair counts to a correlation function.
Parameters
----------
ND1 : integer
Number of points in the first dataset
ND2 : integer
Number of points in the second dataset
NR1 : integer
Number of points in the randoms for first dataset
NR2 : integer
Number of points in the randoms for second dataset
D1D2 : array-like, integer
Pair-counts for the cross-correlation between D1 and D2
D1R2 : array-like, integer
Pair-counts for the cross-correlation between D1 and R2
D2R1 : array-like, integer
Pair-counts for the cross-correlation between D2 and R1
R1R2 : array-like, integer
Pair-counts for the cross-correlation between R1 and R2
For all of these pair-counts arrays, the corresponding ``numpy``
struct returned by the theory/mocks modules can also be passed
estimator: string, default='LS' (Landy-Szalay)
The kind of estimator to use for computing the correlation
function. Currently, only supports Landy-Szalay
Returns
---------
cf : A numpy array
The correlation function, calculated using the chosen estimator,
is returned. NAN is returned for the bins where the ``RR`` count
is 0.
Example
--------
>>> from __future__ import print_function
>>> import numpy as np
>>> from Corrfunc.theory.DD import DD
>>> from Corrfunc.io import read_catalog
>>> from Corrfunc.utils import convert_3d_counts_to_cf
>>> X, Y, Z = read_catalog()
>>> N = len(X)
>>> boxsize = 420.0
>>> rand_N = 3*N
>>> seed = 42
>>> np.random.seed(seed)
>>> rand_X = np.random.uniform(0, boxsize, rand_N)
>>> rand_Y = np.random.uniform(0, boxsize, rand_N)
>>> rand_Z = np.random.uniform(0, boxsize, rand_N)
>>> nthreads = 2
>>> rmin = 0.1
>>> rmax = 15.0
>>> nbins = 10
>>> bins = np.linspace(rmin, rmax, nbins + 1)
>>> autocorr = 1
>>> DD_counts = DD(autocorr, nthreads, bins, X, Y, Z)
>>> autocorr = 0
>>> DR_counts = DD(autocorr, nthreads, bins,
... X, Y, Z,
... X2=rand_X, Y2=rand_Y, Z2=rand_Z)
>>> autocorr = 1
>>> RR_counts = DD(autocorr, nthreads, bins, rand_X, rand_Y, rand_Z)
>>> cf = convert_3d_counts_to_cf(N, N, rand_N, rand_N,
... DD_counts, DR_counts,
... DR_counts, RR_counts)
>>> for xi in cf: print("{0:10.6f}".format(xi))
... # doctest: +NORMALIZE_WHITESPACE
22.769019
3.612709
1.621372
1.000969
0.691646
0.511819
0.398872
0.318815
0.255643
0.207759
|
f10139:m0
|
def convert_rp_pi_counts_to_wp(ND1, ND2, NR1, NR2,<EOL>D1D2, D1R2, D2R1, R1R2,<EOL>nrpbins, pimax, dpi=<NUM_LIT:1.0>,<EOL>estimator='<STR_LIT>'):
|
import numpy as np<EOL>if dpi <= <NUM_LIT:0.0>:<EOL><INDENT>msg = '<STR_LIT>''<STR_LIT>'.format(dpi)<EOL>raise ValueError(msg)<EOL><DEDENT>xirppi = convert_3d_counts_to_cf(ND1, ND2, NR1, NR2,<EOL>D1D2, D1R2, D2R1, R1R2,<EOL>estimator=estimator)<EOL>wp = np.empty(nrpbins)<EOL>npibins = len(xirppi) // nrpbins<EOL>if ((npibins * nrpbins) != len(xirppi)):<EOL><INDENT>msg = '<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>'.format(len(xirppi),<EOL>npibins,<EOL>nrpbins)<EOL>raise ValueError(msg)<EOL><DEDENT>if (dpi*npibins != pimax):<EOL><INDENT>msg = '<STR_LIT>''<STR_LIT>'.format(pimax, npibins, dpi)<EOL>raise ValueError(msg)<EOL><DEDENT>for i in range(nrpbins):<EOL><INDENT>wp[i] = <NUM_LIT> * dpi * np.sum(xirppi[i * npibins:(i + <NUM_LIT:1>) * npibins])<EOL><DEDENT>return wp<EOL>
|
Converts raw pair counts to a correlation function.
Parameters
----------
ND1 : integer
Number of points in the first dataset
ND2 : integer
Number of points in the second dataset
NR1 : integer
Number of points in the randoms for first dataset
NR2 : integer
Number of points in the randoms for second dataset
D1D2 : array-like, integer
Pair-counts for the cross-correlation between D1 and D2
D1R2 : array-like, integer
Pair-counts for the cross-correlation between D1 and R2
D2R1 : array-like, integer
Pair-counts for the cross-correlation between D2 and R1
R1R2 : array-like, integer
Pair-counts for the cross-correlation between R1 and R2
For all of these pair-counts arrays, the corresponding ``numpy``
struct returned by the theory/mocks modules can also be passed
nrpbins : integer
Number of bins in ``rp``
pimax : float
Integration distance along the line of sight direction
dpi : float, default=1.0 Mpc/h
Binsize in the line of sight direction
estimator: string, default='LS' (Landy-Szalay)
The kind of estimator to use for computing the correlation
function. Currently, only supports Landy-Szalay
Returns
---------
wp : A numpy array
The projected correlation function, calculated using the chosen
estimator, is returned. If *any* of the ``pi`` bins (in an ``rp``
bin) contains 0 for the ``RR`` counts, then ``NAN`` is returned
for that ``rp`` bin.
Example
--------
>>> from __future__ import print_function
>>> import numpy as np
>>> from Corrfunc.theory.DDrppi import DDrppi
>>> from Corrfunc.io import read_catalog
>>> from Corrfunc.utils import convert_rp_pi_counts_to_wp
>>> X, Y, Z = read_catalog()
>>> N = len(X)
>>> boxsize = 420.0
>>> rand_N = 3*N
>>> seed = 42
>>> np.random.seed(seed)
>>> rand_X = np.random.uniform(0, boxsize, rand_N)
>>> rand_Y = np.random.uniform(0, boxsize, rand_N)
>>> rand_Z = np.random.uniform(0, boxsize, rand_N)
>>> nthreads = 4
>>> pimax = 40.0
>>> nrpbins = 20
>>> rpmin = 0.1
>>> rpmax = 10.0
>>> bins = np.linspace(rpmin, rpmax, nrpbins + 1)
>>> autocorr = 1
>>> DD_counts = DDrppi(autocorr, nthreads, pimax, bins,
... X, Y, Z)
>>> autocorr = 0
>>> DR_counts = DDrppi(autocorr, nthreads, pimax, bins,
... X, Y, Z,
... X2=rand_X, Y2=rand_Y, Z2=rand_Z)
>>> autocorr = 1
>>> RR_counts = DDrppi(autocorr, nthreads, pimax, bins,
... rand_X, rand_Y, rand_Z)
>>> wp = convert_rp_pi_counts_to_wp(N, N, rand_N, rand_N,
... DD_counts, DR_counts,
... DR_counts, RR_counts,
... nrpbins, pimax)
>>> for w in wp: print("{0:10.6f}".format(w))
... # doctest: +NORMALIZE_WHITESPACE
187.592199
83.059181
53.200599
40.389354
33.356371
29.045476
26.088133
23.628340
21.703961
20.153125
18.724781
17.433235
16.287183
15.443230
14.436193
13.592727
12.921226
12.330074
11.696364
11.208365
|
f10139:m1
|
def return_file_with_rbins(rbins):
|
is_string = False<EOL>delete_after_use = False<EOL>try:<EOL><INDENT>if isinstance(rbins, basestring):<EOL><INDENT>is_string = True<EOL><DEDENT><DEDENT>except NameError:<EOL><INDENT>if isinstance(rbins, str):<EOL><INDENT>is_string = True<EOL><DEDENT><DEDENT>if is_string:<EOL><INDENT>if file_exists(rbins):<EOL><INDENT>delete_after_use = False<EOL>return rbins, delete_after_use<EOL><DEDENT>else:<EOL><INDENT>msg = "<STR_LIT>".format(rbins)<EOL>raise IOError(msg)<EOL><DEDENT><DEDENT>if len(rbins) >= <NUM_LIT:1>:<EOL><INDENT>import tempfile<EOL>rbins = sorted(rbins)<EOL>with tempfile.NamedTemporaryFile(delete=False, mode='<STR_LIT:w>') as f:<EOL><INDENT>for i in range(len(rbins) - <NUM_LIT:1>):<EOL><INDENT>f.write("<STR_LIT>".format(rbins[i], rbins[i + <NUM_LIT:1>]))<EOL><DEDENT>tmpfilename = f.name<EOL><DEDENT>delete_after_use = True<EOL>return tmpfilename, delete_after_use<EOL><DEDENT>msg = "<STR_LIT>""<STR_LIT>".format(len(rbins))<EOL>raise TypeError(msg)<EOL>
|
Helper function to ensure that the ``binfile`` required by the Corrfunc
extensions is a actually a string.
Checks if the input is a string and file; return if True. If not, and
the input is an array, then a temporary file is created and the contents
of rbins is written out.
Parameters
-----------
rbins: string or array-like
Expected to be a string or an array containing the bins
Returns
---------
binfile: string, filename
If the input ``rbins`` was a valid filename, then returns the same
string. If ``rbins`` was an array, then this function creates a
temporary file with the contents of the ``rbins`` arrays. This
temporary filename is returned
|
f10139:m2
|
def fix_cz(cz):
|
<EOL>max_cz_threshold = <NUM_LIT><EOL>try:<EOL><INDENT>input_dtype = cz.dtype<EOL><DEDENT>except:<EOL><INDENT>msg = "<STR_LIT>"<EOL>raise TypeError(msg)<EOL><DEDENT>if max(cz) < max_cz_threshold:<EOL><INDENT>speed_of_light = <NUM_LIT><EOL>cz *= speed_of_light<EOL><DEDENT>return cz.astype(input_dtype)<EOL>
|
Multiplies the input array by speed of light, if the input values are
too small.
Essentially, converts redshift into `cz`, if the user passed
redshifts instead of `cz`.
Parameters
-----------
cz: array-like, reals
An array containing ``[Speed of Light *] redshift`` values.
Returns
---------
cz: array-like
Actual ``cz`` values, multiplying the input ``cz`` array by the
``Speed of Light``, if ``redshift`` values were passed as input ``cz``.
|
f10139:m3
|
def fix_ra_dec(ra, dec):
|
try:<EOL><INDENT>input_dtype = ra.dtype<EOL><DEDENT>except:<EOL><INDENT>msg = "<STR_LIT>"<EOL>raise TypeError(msg)<EOL><DEDENT>if ra is None or dec is None:<EOL><INDENT>msg = "<STR_LIT>"<EOL>raise ValueError(msg)<EOL><DEDENT>if min(ra) < <NUM_LIT:0.0>:<EOL><INDENT>print("<STR_LIT>"<EOL>"<STR_LIT>")<EOL>ra += <NUM_LIT><EOL><DEDENT>if max(dec) > <NUM_LIT>:<EOL><INDENT>print("<STR_LIT>"<EOL>"<STR_LIT>")<EOL>dec += <NUM_LIT><EOL><DEDENT>return ra.astype(input_dtype), dec.astype(input_dtype)<EOL>
|
Wraps input RA and DEC values into range expected by the extensions.
Parameters
------------
RA: array-like, units must be degrees
Right Ascension values (astronomical longitude)
DEC: array-like, units must be degrees
Declination values (astronomical latitude)
Returns
--------
Tuple (RA, DEC): array-like
RA is wrapped into range [0.0, 360.0]
Declination is wrapped into range [-90.0, 90.0]
|
f10139:m4
|
def translate_isa_string_to_enum(isa):
|
msg = "<STR_LIT>""<STR_LIT>".format(type(isa))<EOL>try:<EOL><INDENT>if not isinstance(isa, basestring):<EOL><INDENT>raise TypeError(msg)<EOL><DEDENT><DEDENT>except NameError:<EOL><INDENT>if not isinstance(isa, str):<EOL><INDENT>raise TypeError(msg)<EOL><DEDENT><DEDENT>valid_isa = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']<EOL>isa_upper = isa.upper()<EOL>if isa_upper not in valid_isa:<EOL><INDENT>msg = "<STR_LIT>""<STR_LIT>".format(isa, valid_isa)<EOL>raise ValueError(msg)<EOL><DEDENT>enums = {'<STR_LIT>': -<NUM_LIT:1>,<EOL>'<STR_LIT>': <NUM_LIT:0>,<EOL>'<STR_LIT>': <NUM_LIT:1>,<EOL>'<STR_LIT>': <NUM_LIT:2>,<EOL>'<STR_LIT>': <NUM_LIT:3>,<EOL>'<STR_LIT>': <NUM_LIT:4>,<EOL>'<STR_LIT>': <NUM_LIT:5>,<EOL>'<STR_LIT>': <NUM_LIT:6>,<EOL>'<STR_LIT>': <NUM_LIT:7>,<EOL>'<STR_LIT>': <NUM_LIT:8>,<EOL>'<STR_LIT>': <NUM_LIT:9><EOL>}<EOL>try:<EOL><INDENT>return enums[isa_upper]<EOL><DEDENT>except KeyError:<EOL><INDENT>print("<STR_LIT>".format(isa))<EOL>print("<STR_LIT>".format(enums.keys()))<EOL>raise<EOL><DEDENT>
|
Helper function to convert an user-supplied string to the
underlying enum in the C-API. The extensions only have specific
implementations for AVX, SSE42 and FALLBACK. Any other value
will raise a ValueError.
Parameters
------------
isa: string
A string containing the desired instruction set. Valid values are
['AVX', 'SSE42', 'FALLBACK', 'FASTEST']
Returns
--------
instruction_set: integer
An integer corresponding to the desired instruction set, as used in the
underlying C API. The enum used here should be defined *exactly* the
same way as the enum in ``utils/defs.h``.
|
f10139:m5
|
def compute_nbins(max_diff, binsize,<EOL>refine_factor=<NUM_LIT:1>,<EOL>max_nbins=None):
|
if max_diff <= <NUM_LIT:0> or binsize <= <NUM_LIT:0>:<EOL><INDENT>msg = '<STR_LIT>''<STR_LIT>'.format(max_diff, binsize)<EOL>raise ValueError(msg)<EOL><DEDENT>if max_nbins is not None and max_nbins < <NUM_LIT:1>:<EOL><INDENT>msg = '<STR_LIT>''<STR_LIT>'.format(max_nbins)<EOL>raise ValueError(msg)<EOL><DEDENT>if refine_factor < <NUM_LIT:1>:<EOL><INDENT>msg = '<STR_LIT>''<STR_LIT>'.format(refine_factor)<EOL>raise ValueError(msg)<EOL><DEDENT>ngrid = max(int(<NUM_LIT:1>), int(max_diff/binsize))<EOL>ngrid *= refine_factor<EOL>if max_nbins:<EOL><INDENT>ngrid = min(int(max_nbins), ngrid)<EOL><DEDENT>return ngrid<EOL>
|
Helper utility to find the number of bins for
that satisfies the constraints of (binsize, refine_factor, and max_nbins).
Parameters
------------
max_diff : double
Max. difference (spatial or angular) to be spanned,
(i.e., range of allowed domain values)
binsize : double
Min. allowed binsize (spatial or angular)
refine_factor : integer, default 1
How many times to refine the bins. The refinements occurs
after ``nbins`` has already been determined (with ``refine_factor-1``).
Thus, the number of bins will be **exactly** higher by
``refine_factor`` compared to the base case of ``refine_factor=1``
max_nbins : integer, default None
Max number of allowed cells
Returns
---------
nbins: integer, >= 1
Number of bins that satisfies the constraints of
bin size >= ``binsize``, the refinement factor
and nbins <= ``max_nbins``.
Example
---------
>>> from Corrfunc.utils import compute_nbins
>>> max_diff = 180
>>> binsize = 10
>>> compute_nbins(max_diff, binsize)
18
>>> refine_factor=2
>>> max_nbins = 20
>>> compute_nbins(max_diff, binsize, refine_factor=refine_factor,
... max_nbins=max_nbins)
20
|
f10139:m6
|
def gridlink_sphere(thetamax,<EOL>ra_limits=None,<EOL>dec_limits=None,<EOL>link_in_ra=True,<EOL>ra_refine_factor=<NUM_LIT:1>, dec_refine_factor=<NUM_LIT:1>,<EOL>max_ra_cells=<NUM_LIT:100>, max_dec_cells=<NUM_LIT:200>,<EOL>return_num_ra_cells=False,<EOL>input_in_degrees=True):
|
from math import radians, pi<EOL>import numpy as np<EOL>if input_in_degrees:<EOL><INDENT>thetamax = radians(thetamax)<EOL>if ra_limits:<EOL><INDENT>ra_limits = [radians(x) for x in ra_limits]<EOL><DEDENT>if dec_limits:<EOL><INDENT>dec_limits = [radians(x) for x in dec_limits]<EOL><DEDENT><DEDENT>if not ra_limits:<EOL><INDENT>ra_limits = [<NUM_LIT:0.0>, <NUM_LIT>*pi]<EOL><DEDENT>if not dec_limits:<EOL><INDENT>dec_limits = [-<NUM_LIT:0.5>*pi, <NUM_LIT:0.5>*pi]<EOL><DEDENT>if dec_limits[<NUM_LIT:0>] >= dec_limits[<NUM_LIT:1>]:<EOL><INDENT>msg = '<STR_LIT>''<STR_LIT>'.format(dec_limits[<NUM_LIT:0>], dec_limits[<NUM_LIT:1>])<EOL>raise ValueError(msg)<EOL><DEDENT>if ra_limits[<NUM_LIT:0>] >= ra_limits[<NUM_LIT:1>]:<EOL><INDENT>msg = '<STR_LIT>''<STR_LIT>'.format(ra_limits[<NUM_LIT:0>], ra_limits[<NUM_LIT:1>])<EOL>raise ValueError(msg)<EOL><DEDENT>if dec_limits[<NUM_LIT:0>] < -<NUM_LIT:0.5>*pi or dec_limits[<NUM_LIT:1>] > <NUM_LIT:0.5>*pi:<EOL><INDENT>msg = '<STR_LIT>''<STR_LIT>''<STR_LIT>'.format(dec_limits[<NUM_LIT:0>], dec_limits[<NUM_LIT:1>])<EOL>raise ValueError(msg)<EOL><DEDENT>if ra_limits[<NUM_LIT:0>] < <NUM_LIT:0.0> or ra_limits[<NUM_LIT:1>] > <NUM_LIT>*pi:<EOL><INDENT>msg = '<STR_LIT>''<STR_LIT>''<STR_LIT>'.format(ra_limits[<NUM_LIT:0>], ra_limits[<NUM_LIT:1>])<EOL>raise ValueError(msg)<EOL><DEDENT>dec_diff = abs(dec_limits[<NUM_LIT:1>] - dec_limits[<NUM_LIT:0>])<EOL>ngrid_dec = compute_nbins(dec_diff, thetamax,<EOL>refine_factor=dec_refine_factor,<EOL>max_nbins=max_dec_cells)<EOL>dec_binsize = dec_diff/ngrid_dec<EOL>grid_dtype= np.dtype({'<STR_LIT>':['<STR_LIT>','<STR_LIT>'],<EOL>'<STR_LIT>':[(np.float, (<NUM_LIT:2>, )), (np.float, (<NUM_LIT:2>, ))]<EOL>})<EOL>if not link_in_ra:<EOL><INDENT>sphere_grid = np.zeros(ngrid_dec, dtype=grid_dtype)<EOL>for i, r in enumerate(sphere_grid['<STR_LIT>']):<EOL><INDENT>r[<NUM_LIT:0>] = dec_limits[<NUM_LIT:0>] + i*dec_binsize<EOL>r[<NUM_LIT:1>] = dec_limits[<NUM_LIT:0>] + (i+<NUM_LIT:1>)*dec_binsize<EOL><DEDENT>for r in sphere_grid['<STR_LIT>']:<EOL><INDENT>r[<NUM_LIT:0>] = ra_limits[<NUM_LIT:0>]<EOL>r[<NUM_LIT:1>] = ra_limits[<NUM_LIT:1>]<EOL><DEDENT>return sphere_grid<EOL><DEDENT>ra_diff = ra_limits[<NUM_LIT:1>] - ra_limits[<NUM_LIT:0>]<EOL>sin_half_thetamax = np.sin(thetamax)<EOL>costhetamax = np.cos(thetamax)<EOL>max_nmesh_ra = <NUM_LIT:1><EOL>totncells = <NUM_LIT:0><EOL>num_ra_cells = np.zeros(ngrid_dec, dtype=np.int64)<EOL>num_ra_cells[:] = ra_refine_factor<EOL>for idec in xrange(ngrid_dec):<EOL><INDENT>dec_min = dec_limits[<NUM_LIT:0>] + idec*dec_binsize<EOL>dec_max = dec_min + dec_binsize<EOL>cos_dec_min = np.cos(dec_min)<EOL>cos_dec_max = np.cos(dec_max)<EOL>if cos_dec_min < cos_dec_max:<EOL><INDENT>min_cos = cos_dec_min<EOL><DEDENT>else:<EOL><INDENT>min_cos = cos_dec_max<EOL><DEDENT>if min_cos > <NUM_LIT:0>:<EOL><INDENT>_tmp = sin_half_thetamax/min_cos<EOL>_tmp = max(min(_tmp, <NUM_LIT:1.0>), <NUM_LIT:0.0>)<EOL>ra_binsize = min(<NUM_LIT> * np.arcsin(_tmp), ra_diff)<EOL>num_ra_cells[idec] = compute_nbins(ra_diff, ra_binsize,<EOL>refine_factor=ra_refine_factor,<EOL>max_nbins=max_ra_cells)<EOL><DEDENT><DEDENT>totncells = num_ra_cells.sum()<EOL>sphere_grid = np.zeros(totncells, dtype=grid_dtype)<EOL>ra_binsizes = ra_diff/num_ra_cells<EOL>start = <NUM_LIT:0><EOL>for idec in xrange(ngrid_dec):<EOL><INDENT>assert start + num_ra_cells[idec] <= totncells<EOL>source_sel = np.s_[start:start+num_ra_cells[idec]]<EOL>for ira, r in enumerate(sphere_grid[source_sel]):<EOL><INDENT>r['<STR_LIT>'][<NUM_LIT:0>] = dec_limits[<NUM_LIT:0>] + dec_binsize*idec<EOL>r['<STR_LIT>'][<NUM_LIT:1>] = dec_limits[<NUM_LIT:0>] + dec_binsize*(idec + <NUM_LIT:1>)<EOL>r['<STR_LIT>'][<NUM_LIT:0>] = ra_limits[<NUM_LIT:0>] + ra_binsizes[idec] * ira<EOL>r['<STR_LIT>'][<NUM_LIT:1>] = ra_limits[<NUM_LIT:0>] + ra_binsizes[idec] * (ira + <NUM_LIT:1>)<EOL><DEDENT>start += num_ra_cells[idec]<EOL><DEDENT>if return_num_ra_cells:<EOL><INDENT>return sphere_grid, num_ra_cells<EOL><DEDENT>else:<EOL><INDENT>return sphere_grid<EOL><DEDENT>
|
A method to optimally partition spherical regions such that pairs of
points within a certain angular separation, ``thetamax``, can be quickly
computed.
Generates the binning scheme used in :py:mod:`Corrfunc.mocks.DDtheta_mocks`
for a spherical region in Right Ascension (RA), Declination (DEC)
and a maximum angular separation.
For a given ``thetamax``, regions on the sphere are divided into bands
in DEC bands, with the width in DEC equal to ``thetamax``. If
``link_in_ra`` is set, then these DEC bands are further sub-divided
into RA cells.
Parameters
----------
thetamax : double
Max. angular separation of pairs. Expected to be in degrees
unless ``input_in_degrees`` is set to ``False``.
ra_limits : array of 2 doubles. Default [0.0, 2*pi]
Range of Righ Ascension (longitude) for the spherical region
dec_limits : array of 2 doubles. Default [-pi/2, pi/2]
Range of Declination (latitude) values for the spherical region
link_in_ra : Boolean. Default True
Whether linking in RA is done (in addition to linking in DEC)
ra_refine_factor : integer, >= 1. Default 1
Controls the sub-division of the RA cells. For a large number of
particles, higher `ra_refine_factor` typically results in a faster
runtime
dec_refine_factor : integer, >= 1. Default 1
Controls the sub-division of the DEC cells. For a large number of
particles, higher `dec_refine_factor` typically results in a faster
runtime
max_ra_cells : integer, >= 1. Default 100
The max. number of RA cells **per DEC band**.
max_dec_cells : integer >= 1. Default 200
The max. number of total DEC bands
return_num_ra_cells: bool, default False
Flag to return the number of RA cells per DEC band
input_in_degrees : Boolean. Default True
Flag to show if the input quantities are in degrees. If set to
False, all angle inputs will be taken to be in radians.
Returns
---------
sphere_grid : A numpy compound array, shape (ncells, 2)
A numpy compound array with fields ``dec_limit`` and ``ra_limit`` of
size 2 each. These arrays contain the beginning and end of DEC
and RA regions for the cell.
num_ra_cells: numpy array, returned if ``return_num_ra_cells`` is set
A numpy array containing the number of RA cells per declination band
.. note:: If ``link_in_ra=False``, then there is effectively one RA bin
per DEC band. The 'ra_limit' field will show the range of allowed
RA values.
.. seealso:: :py:mod:`Corrfunc.mocks.DDtheta_mocks`
Example
--------
>>> from Corrfunc.utils import gridlink_sphere
>>> import numpy as np
>>> np.set_printoptions(precision=8)
>>> thetamax=30
>>> grid = gridlink_sphere(thetamax) # doctest: +NORMALIZE_WHITESPACE
>>> print(grid)
[([-1.57079633, -1.04719755], [ 0. , 3.14159265])
([-1.57079633, -1.04719755], [ 3.14159265, 6.28318531])
([-1.04719755, -0.52359878], [ 0. , 3.14159265])
([-1.04719755, -0.52359878], [ 3.14159265, 6.28318531])
([-0.52359878, 0. ], [ 0. , 1.25663706])
([-0.52359878, 0. ], [ 1.25663706, 2.51327412])
([-0.52359878, 0. ], [ 2.51327412, 3.76991118])
([-0.52359878, 0. ], [ 3.76991118, 5.02654825])
([-0.52359878, 0. ], [ 5.02654825, 6.28318531])
([ 0. , 0.52359878], [ 0. , 1.25663706])
([ 0. , 0.52359878], [ 1.25663706, 2.51327412])
([ 0. , 0.52359878], [ 2.51327412, 3.76991118])
([ 0. , 0.52359878], [ 3.76991118, 5.02654825])
([ 0. , 0.52359878], [ 5.02654825, 6.28318531])
([ 0.52359878, 1.04719755], [ 0. , 3.14159265])
([ 0.52359878, 1.04719755], [ 3.14159265, 6.28318531])
([ 1.04719755, 1.57079633], [ 0. , 3.14159265])
([ 1.04719755, 1.57079633], [ 3.14159265, 6.28318531])]
>>> grid = gridlink_sphere(60, dec_refine_factor=3, ra_refine_factor=2) # doctest: +NORMALIZE_WHITESPACE
>>> print(grid)
[([-1.57079633, -1.22173048], [ 0. , 1.57079633])
([-1.57079633, -1.22173048], [ 1.57079633, 3.14159265])
([-1.57079633, -1.22173048], [ 3.14159265, 4.71238898])
([-1.57079633, -1.22173048], [ 4.71238898, 6.28318531])
([-1.22173048, -0.87266463], [ 0. , 1.57079633])
([-1.22173048, -0.87266463], [ 1.57079633, 3.14159265])
([-1.22173048, -0.87266463], [ 3.14159265, 4.71238898])
([-1.22173048, -0.87266463], [ 4.71238898, 6.28318531])
([-0.87266463, -0.52359878], [ 0. , 1.57079633])
([-0.87266463, -0.52359878], [ 1.57079633, 3.14159265])
([-0.87266463, -0.52359878], [ 3.14159265, 4.71238898])
([-0.87266463, -0.52359878], [ 4.71238898, 6.28318531])
([-0.52359878, -0.17453293], [ 0. , 1.57079633])
([-0.52359878, -0.17453293], [ 1.57079633, 3.14159265])
([-0.52359878, -0.17453293], [ 3.14159265, 4.71238898])
([-0.52359878, -0.17453293], [ 4.71238898, 6.28318531])
([-0.17453293, 0.17453293], [ 0. , 1.57079633])
([-0.17453293, 0.17453293], [ 1.57079633, 3.14159265])
([-0.17453293, 0.17453293], [ 3.14159265, 4.71238898])
([-0.17453293, 0.17453293], [ 4.71238898, 6.28318531])
([ 0.17453293, 0.52359878], [ 0. , 1.57079633])
([ 0.17453293, 0.52359878], [ 1.57079633, 3.14159265])
([ 0.17453293, 0.52359878], [ 3.14159265, 4.71238898])
([ 0.17453293, 0.52359878], [ 4.71238898, 6.28318531])
([ 0.52359878, 0.87266463], [ 0. , 1.57079633])
([ 0.52359878, 0.87266463], [ 1.57079633, 3.14159265])
([ 0.52359878, 0.87266463], [ 3.14159265, 4.71238898])
([ 0.52359878, 0.87266463], [ 4.71238898, 6.28318531])
([ 0.87266463, 1.22173048], [ 0. , 1.57079633])
([ 0.87266463, 1.22173048], [ 1.57079633, 3.14159265])
([ 0.87266463, 1.22173048], [ 3.14159265, 4.71238898])
([ 0.87266463, 1.22173048], [ 4.71238898, 6.28318531])
([ 1.22173048, 1.57079633], [ 0. , 1.57079633])
([ 1.22173048, 1.57079633], [ 1.57079633, 3.14159265])
([ 1.22173048, 1.57079633], [ 3.14159265, 4.71238898])
([ 1.22173048, 1.57079633], [ 4.71238898, 6.28318531])]
|
f10139:m7
|
def convert_to_native_endian(array):
|
if array is None:<EOL><INDENT>return array<EOL><DEDENT>import numpy as np<EOL>array = np.asanyarray(array)<EOL>system_is_little_endian = (sys.byteorder == '<STR_LIT>') <EOL>array_is_little_endian = (array.dtype.byteorder == '<STR_LIT:<>')<EOL>if (array_is_little_endian != system_is_little_endian) and not (array.dtype.byteorder == '<STR_LIT:=>'):<EOL><INDENT>return array.byteswap().newbyteorder()<EOL><DEDENT>else:<EOL><INDENT>return array<EOL><DEDENT>
|
Returns the supplied array in native endian byte-order.
If the array already has native endianness, then the
same array is returned.
Parameters
----------
array: np.ndarray
The array to convert
Returns
-------
new_array: np.ndarray
The array in native-endian byte-order.
Example
-------
>>> import numpy as np
>>> import sys
>>> sys_is_le = sys.byteorder == 'little'
>>> native_code = sys_is_le and '<' or '>'
>>> swapped_code = sys_is_le and '>' or '<'
>>> native_dt = np.dtype(native_code + 'i4')
>>> swapped_dt = np.dtype(swapped_code + 'i4')
>>> arr = np.arange(10, dtype=native_dt)
>>> new_arr = convert_to_native_endian(arr)
>>> arr is new_arr
True
>>> arr = np.arange(10, dtype=swapped_dt)
>>> new_arr = convert_to_native_endian(arr)
>>> new_arr.dtype.byteorder == '=' or new_arr.dtype.byteorder == native_code
True
>>> convert_to_native_endian(None) is None
True
|
f10139:m8
|
def is_native_endian(array):
|
if array is None:<EOL><INDENT>return True<EOL><DEDENT>import numpy as np<EOL>array = np.asanyarray(array)<EOL>system_is_little_endian = (sys.byteorder == '<STR_LIT>')<EOL>array_is_little_endian = (array.dtype.byteorder == '<STR_LIT:<>')<EOL>return (array_is_little_endian == system_is_little_endian) or (array.dtype.byteorder == '<STR_LIT:=>')<EOL>
|
Checks whether the given array is native-endian.
None evaluates to True.
Parameters
----------
array: np.ndarray
The array to check
Returns
-------
is_native: bool
Whether the endianness is native
Example
-------
>>> import numpy as np
>>> import sys
>>> sys_is_le = sys.byteorder == 'little'
>>> native_code = sys_is_le and '<' or '>'
>>> swapped_code = sys_is_le and '>' or '<'
>>> native_dt = np.dtype(native_code + 'i4')
>>> swapped_dt = np.dtype(swapped_code + 'i4')
>>> arr = np.arange(10, dtype=native_dt)
>>> is_native_endian(arr)
True
>>> arr = np.arange(10, dtype=swapped_dt)
>>> is_native_endian(arr)
False
|
f10139:m9
|
def sys_pipes():
|
kwargs = {'<STR_LIT>':None if sys.stdout.isatty() else sys.stdout,<EOL>'<STR_LIT>':None if sys.stderr.isatty() else sys.stderr }<EOL>return wurlitzer.pipes(**kwargs)<EOL>
|
We can use the Wurlitzer package to redirect stdout and stderr
from the command line into a Jupyter notebook. But if we're not
in a notebook, this isn't safe because we can't redirect stdout
to itself. This function is a thin wrapper that checks if the
stdout/err streams are TTYs and enables output redirection
based on that.
Basic usage is:
>>> with sys_pipes():
>>> call_some_c_function()
See the Wurlitzer package for usage of `wurlitzer.pipes()`;
see also https://github.com/manodeep/Corrfunc/issues/157.
|
f10139:m10
|
def _convert_cell_timer(cell_time_lst):
|
import numpy as np<EOL>from future.utils import bytes_to_native_str<EOL>dtype = np.dtype([(bytes_to_native_str(b'<STR_LIT>'), np.int64),<EOL>(bytes_to_native_str(b'<STR_LIT>'), np.int64),<EOL>(bytes_to_native_str(b'<STR_LIT>'), np.int64),<EOL>(bytes_to_native_str(b'<STR_LIT>'), np.int32),<EOL>(bytes_to_native_str(b'<STR_LIT>'), np.int32),<EOL>(bytes_to_native_str(b'<STR_LIT>'), np.int32)])<EOL>cell_times = np.array(cell_time_lst, dtype=dtype)<EOL>return cell_times<EOL>
|
Converts a the cell timings list returned by the python extensions
into a more user-friendly numpy structured array.
The fields correspond to the C ``struct api_cell_timings`` defined
in ``utils/defs.h``.
Returns:
--------
cell_times : numpy structured array
The following fields are present in the ``cell_times``:
N1 -> number of particles in cell 1
N2 -> number of particles in cell 2
time_in_ns -> time taken to compute all pairs between two cells
(cellidx1, cellidx2)
cellidx1, cellidx2 -> the 1-D index for the two cells
tid -> thread-id that computed the pairs (identically 0 for
serial/single-threaded runs)
|
f10145:m1
|
def read_fastfood_catalog(filename, return_dtype=None, need_header=None):
|
if return_dtype is None:<EOL><INDENT>return_dtype = np.float<EOL><DEDENT>if return_dtype not in [np.float32, np.float]:<EOL><INDENT>msg = "<STR_LIT>"<EOL>raise ValueError(msg)<EOL><DEDENT>if not file_exists(filename):<EOL><INDENT>msg = "<STR_LIT>".format(filename)<EOL>raise IOError(msg)<EOL><DEDENT>import struct<EOL>try:<EOL><INDENT>from future.utils import bytes_to_native_str<EOL><DEDENT>except ImportError:<EOL><INDENT>print("<STR_LIT>"<EOL>"<STR_LIT>")<EOL>raise<EOL><DEDENT>with open(filename, "<STR_LIT:rb>") as f:<EOL><INDENT>skip1 = struct.unpack(bytes_to_native_str(b'<STR_LIT>'), f.read(<NUM_LIT:4>))[<NUM_LIT:0>]<EOL>idat = struct.unpack(bytes_to_native_str(b'<STR_LIT>'),<EOL>f.read(<NUM_LIT:20>))[<NUM_LIT:0>:<NUM_LIT:5>]<EOL>skip2 = struct.unpack(bytes_to_native_str(b'<STR_LIT>'), f.read(<NUM_LIT:4>))[<NUM_LIT:0>]<EOL>assert skip1 == <NUM_LIT:20> and skip2 == <NUM_LIT:20>,"<STR_LIT>"<EOL>ngal = idat[<NUM_LIT:1>]<EOL>if need_header is not None:<EOL><INDENT>skip1 = struct.unpack(bytes_to_native_str(b'<STR_LIT>'), f.read(<NUM_LIT:4>))[<NUM_LIT:0>]<EOL>fdat = struct.unpack(bytes_to_native_str(b'<STR_LIT>'),<EOL>f.read(<NUM_LIT>))[<NUM_LIT:0>:<NUM_LIT:9>]<EOL>skip2 = struct.unpack(bytes_to_native_str(b'<STR_LIT>'), f.read(<NUM_LIT:4>))[<NUM_LIT:0>]<EOL>assert skip1 == <NUM_LIT> and skip2 == <NUM_LIT>,"<STR_LIT>"<EOL>skip1 = struct.unpack(bytes_to_native_str(b'<STR_LIT>'), f.read(<NUM_LIT:4>))[<NUM_LIT:0>]<EOL>znow = struct.unpack(bytes_to_native_str(b'<STR_LIT>'), f.read(<NUM_LIT:4>))[<NUM_LIT:0>]<EOL>skip2 = struct.unpack(bytes_to_native_str(b'<STR_LIT>'), f.read(<NUM_LIT:4>))[<NUM_LIT:0>]<EOL>assert skip1 == <NUM_LIT:4> and skip2 == <NUM_LIT:4>,"<STR_LIT>"<EOL><DEDENT>else:<EOL><INDENT>fdat_bytes = <NUM_LIT:4> + <NUM_LIT> + <NUM_LIT:4><EOL>znow_bytes = <NUM_LIT:4> + <NUM_LIT:4> + <NUM_LIT:4><EOL>f.seek(fdat_bytes + znow_bytes, <NUM_LIT:1>)<EOL><DEDENT>skip1 = struct.unpack(bytes_to_native_str(b'<STR_LIT>'), f.read(<NUM_LIT:4>))[<NUM_LIT:0>]<EOL>assert skip1 == ngal * <NUM_LIT:4> or skip1 == ngal * <NUM_LIT:8>,"<STR_LIT>"<EOL>f.seek(-<NUM_LIT:4>, <NUM_LIT:1>)<EOL>pos = {}<EOL>for field in '<STR_LIT>':<EOL><INDENT>skip1 = struct.unpack(bytes_to_native_str(b'<STR_LIT>'), f.read(<NUM_LIT:4>))[<NUM_LIT:0>]<EOL>assert skip1 == ngal * <NUM_LIT:4> or skip1 == ngal * <NUM_LIT:8>,"<STR_LIT>"<EOL>input_dtype = np.float32 if skip1 // ngal == <NUM_LIT:4> else np.float<EOL>array = np.fromfile(f, input_dtype, ngal)<EOL>skip2 = struct.unpack(bytes_to_native_str(b'<STR_LIT>'), f.read(<NUM_LIT:4>))[<NUM_LIT:0>]<EOL>if return_dtype == input_dtype:<EOL><INDENT>pos[field] = array<EOL><DEDENT>else:<EOL><INDENT>pos[field] = [return_dtype(a) for a in array]<EOL><DEDENT><DEDENT><DEDENT>x = np.array(pos['<STR_LIT:x>'])<EOL>y = np.array(pos['<STR_LIT:y>'])<EOL>z = np.array(pos['<STR_LIT:z>'])<EOL>if need_header is not None:<EOL><INDENT>return idat, fdat, znow, x, y, z<EOL><DEDENT>else:<EOL><INDENT>return x, y, z<EOL><DEDENT>
|
Read a galaxy catalog from a fast-food binary file.
Parameters
-----------
filename: string
Filename containing the galaxy positions
return_dtype: numpy dtype for returned arrays. Default ``numpy.float``
Specifies the datatype for the returned arrays. Must be in
{np.float, np.float32}
need_header: boolean, default None.
Returns the header found in the fast-food file in addition to the
X/Y/Z arrays.
Returns
--------
X, Y, Z: numpy arrays
Returns the triplet of X/Y/Z positions as separate numpy arrays.
If need_header is set, then the header is also returned
Example
--------
>>> import numpy as np
>>> from os.path import dirname, abspath, join as pjoin
>>> import Corrfunc
>>> from Corrfunc.io import read_fastfood_catalog
>>> filename = pjoin(dirname(abspath(Corrfunc.__file__)),
... "../theory/tests/data/",
... "gals_Mr19.ff")
>>> X, Y, Z = read_fastfood_catalog(filename)
>>> N = 20
>>> for x,y,z in zip(X[0:N], Y[0:N], Z[0:]):
... print("{0:10.5f} {1:10.5f} {2:10.5f}".format(x, y, z))
... # doctest: +NORMALIZE_WHITESPACE
419.94550 1.96340 0.01610
419.88272 1.79736 0.11960
0.32880 10.63620 4.16550
0.15314 10.68723 4.06529
0.46400 8.91150 6.97090
6.30690 9.77090 8.61080
5.87160 9.65870 9.29810
8.06210 0.42350 4.89410
11.92830 4.38660 4.54410
11.95543 4.32622 4.51485
11.65676 4.34665 4.53181
11.75739 4.26262 4.31666
11.81329 4.27530 4.49183
11.80406 4.54737 4.26824
12.61570 4.14470 3.70140
13.23640 4.34750 5.26450
13.19833 4.33196 5.29435
13.21249 4.35695 5.37418
13.06805 4.24275 5.35126
13.19693 4.37618 5.28772
|
f10148:m0
|
def read_ascii_catalog(filename, return_dtype=None):
|
if return_dtype is None:<EOL><INDENT>return_dtype = np.float<EOL><DEDENT>if not file_exists(filename):<EOL><INDENT>msg = "<STR_LIT>".format(filename)<EOL>raise IOError(msg)<EOL><DEDENT>if pd is not None:<EOL><INDENT>df = pd.read_csv(filename, header=None,<EOL>engine="<STR_LIT:c>",<EOL>dtype={"<STR_LIT:x>": return_dtype,<EOL>"<STR_LIT:y>": return_dtype,<EOL>"<STR_LIT:z>": return_dtype},<EOL>delim_whitespace=True)<EOL>x = np.asarray(df[<NUM_LIT:0>], dtype=return_dtype)<EOL>y = np.asarray(df[<NUM_LIT:1>], dtype=return_dtype)<EOL>z = np.asarray(df[<NUM_LIT:2>], dtype=return_dtype)<EOL><DEDENT>else:<EOL><INDENT>x, y, z, _ = np.genfromtxt(filename, dtype=return_dtype, unpack=True)<EOL><DEDENT>return x, y, z<EOL>
|
Read a galaxy catalog from an ascii file.
Parameters
-----------
filename: string
Filename containing the galaxy positions
return_dtype: numpy dtype for returned arrays. Default ``numpy.float``
Specifies the datatype for the returned arrays. Must be in
{np.float, np.float32}
Returns
--------
X, Y, Z: numpy arrays
Returns the triplet of X/Y/Z positions as separate numpy arrays.
Example
--------
>>> from __future__ import print_function
>>> from os.path import dirname, abspath, join as pjoin
>>> import Corrfunc
>>> from Corrfunc.io import read_ascii_catalog
>>> filename = pjoin(dirname(abspath(Corrfunc.__file__)),
... "../mocks/tests/data/", "Mr19_mock_northonly.rdcz.dat")
>>> ra, dec, cz = read_ascii_catalog(filename)
>>> N = 20
>>> for r,d,c in zip(ra[0:N], dec[0:N], cz[0:]):
... print("{0:10.5f} {1:10.5f} {2:10.5f}".format(r, d, c))
... # doctest: +NORMALIZE_WHITESPACE
178.45087 67.01112 19905.28514
178.83495 67.72519 19824.02285
179.50132 67.67628 19831.21553
182.75497 67.13004 19659.79825
186.29853 68.64099 20030.64412
186.32346 68.65879 19763.38137
187.36173 68.15151 19942.66996
187.20613 68.56189 19996.36607
185.56358 67.97724 19729.32308
183.27930 67.11318 19609.71345
183.86498 67.82823 19500.44130
184.07771 67.43429 19440.53790
185.13370 67.15382 19390.60304
189.15907 68.28252 19858.85853
190.12209 68.55062 20044.29744
193.65245 68.36878 19445.62469
194.93514 68.34870 19158.93155
180.36897 67.50058 18671.40780
179.63278 67.51318 18657.59191
180.75742 67.95530 18586.88913
|
f10148:m1
|
def read_catalog(filebase=None, return_dtype=np.float):
|
if filebase is None:<EOL><INDENT>filename = pjoin(dirname(abspath(__file__)),<EOL>"<STR_LIT>", "<STR_LIT>")<EOL>allowed_exts = {'<STR_LIT>': read_fastfood_catalog,<EOL>'<STR_LIT>': read_ascii_catalog,<EOL>'<STR_LIT>': read_ascii_catalog,<EOL>'<STR_LIT>': read_ascii_catalog<EOL>}<EOL>for e in allowed_exts:<EOL><INDENT>if file_exists(filename + e):<EOL><INDENT>f = allowed_exts[e]<EOL>x, y, z = f(filename + e, return_dtype)<EOL>return x, y, z<EOL><DEDENT><DEDENT>raise IOError("<STR_LIT>".format(filename, allowed_exts.keys()))<EOL><DEDENT>else:<EOL><INDENT>if file_exists(filebase):<EOL><INDENT>extension = splitext(filebase)[<NUM_LIT:1>]<EOL>f = read_fastfood_catalog if '<STR_LIT>' in extension else read_ascii_catalog<EOL>x, y, z = f(filebase, return_dtype)<EOL>return x, y, z<EOL><DEDENT>raise IOError("<STR_LIT>".format(filebase))<EOL><DEDENT>
|
Reads a galaxy/randoms catalog and returns 3 XYZ arrays.
Parameters
-----------
filebase: string (optional)
The fully qualified path to the file. If omitted, reads the
theory galaxy catalog under ../theory/tests/data/
return_dtype: numpy dtype for returned arrays. Default ``numpy.float``
Specifies the datatype for the returned arrays. Must be in
{np.float, np.float32}
Returns
--------
``x y z`` - Unpacked numpy arrays compatible with the installed
version of ``Corrfunc``.
.. note:: If the filename is omitted, then first the fast-food file
is searched for, and then the ascii file. End-users should always
supply the full filename.
|
f10148:m2
|
def strip_line(line, sep=os.linesep):
|
try:<EOL><INDENT>return line.strip(sep)<EOL><DEDENT>except TypeError:<EOL><INDENT>return line.decode('<STR_LIT:utf-8>').strip(sep)<EOL><DEDENT>
|
Removes occurrence of character (sep) from a line of text
|
f10149:m0
|
def get_dict_from_buffer(buf, keys=['<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>']):
|
pairs = dict()<EOL>if keys is None:<EOL><INDENT>keys = "<STR_LIT>"<EOL>regex = re.compile(r'''<STR_LIT>'''.format(keys), re.VERBOSE)<EOL>validate = False<EOL><DEDENT>else:<EOL><INDENT>keys = [k.strip() for k in keys]<EOL>regex = re.compile(r'''<STR_LIT>'''.format('<STR_LIT:|>'.join(keys)), re.VERBOSE)<EOL>validate = True<EOL>for k in keys:<EOL><INDENT>pairs[k] = []<EOL><DEDENT><DEDENT>matches = regex.findall(buf)<EOL>for match in matches:<EOL><INDENT>key, val = match.split('<STR_LIT:=>', <NUM_LIT:1>)<EOL>key = (strip_line(key, '<STR_LIT::>')).strip()<EOL>val = (strip_line(val)).strip()<EOL>if validate and key not in keys:<EOL><INDENT>msg = "<STR_LIT>""<STR_LIT>""<STR_LIT>".format(regex.pattern, key, '<STR_LIT:|>'.join(keys))<EOL>raise AssertionError(msg)<EOL><DEDENT>pairs.setdefault(key, []).append(val)<EOL><DEDENT>return pairs<EOL>
|
Parses a string buffer for key-val pairs for the supplied keys.
Returns: Python dictionary with all the keys (all keys in buffer
if None is passed for keys) with the values being a list
corresponding to each key.
Note: Return dict will contain all keys supplied (if not None).
If any key was not found in the buffer, then the value for
that key will be [] such that dict[key] does not produce
a KeyError.
Slightly modified from:
"http://stackoverflow.com/questions/5323703/regex-how-to-"\
"match-sequence-of-key-value-pairs-at-end-of-string
|
f10149:m2
|
def replace_first_key_in_makefile(buf, key, replacement, outfile=None):
|
regexp = re.compile(r'''<STR_LIT>'''.format(key), re.VERBOSE)<EOL>matches = regexp.findall(buf)<EOL>if matches is None:<EOL><INDENT>msg = "<STR_LIT>""<STR_LIT>".format(key, regexp.pattern)<EOL>raise ValueError(msg)<EOL><DEDENT>newbuf = regexp.sub(replacement, buf, count=<NUM_LIT:1>)<EOL>if outfile is not None:<EOL><INDENT>write_text_file(outfile, newbuf)<EOL><DEDENT>return newbuf<EOL>
|
Replaces first line in 'buf' matching 'key' with 'replacement'.
Optionally, writes out this new buffer into 'outfile'.
Returns: Buffer after replacement has been done
|
f10149:m3
|
def setup_packages():
|
<EOL>src_path = dirname(abspath(sys.argv[<NUM_LIT:0>]))<EOL>old_path = os.getcwd()<EOL>os.chdir(src_path)<EOL>sys.path.insert(<NUM_LIT:0>, src_path)<EOL>python_dirs = ["<STR_LIT>",<EOL>"<STR_LIT>"]<EOL>extensions = generate_extensions(python_dirs)<EOL>common_dict = requirements_check()<EOL>if install_required():<EOL><INDENT>from distutils.sysconfig import get_config_var<EOL>if get_config_var('<STR_LIT>') != '<STR_LIT>' and version_info[<NUM_LIT:0>] == <NUM_LIT:2>:<EOL><INDENT>msg = "<STR_LIT>""<STR_LIT>".format(get_config_var('<STR_LIT>'))<EOL>raise ValueError(msg)<EOL><DEDENT>extra_string = '<STR_LIT>'<EOL>if compiler != '<STR_LIT>':<EOL><INDENT>extra_string = '<STR_LIT>'.format(compiler)<EOL><DEDENT>command = "<STR_LIT>".format(extra_string)<EOL>run_command(command)<EOL><DEDENT>else:<EOL><INDENT>if '<STR_LIT>' in sys.argv:<EOL><INDENT>command = "<STR_LIT>"<EOL>run_command(command)<EOL><DEDENT><DEDENT>dirs_patterns = {'<STR_LIT>': ['<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>'],<EOL>'<STR_LIT>': ['<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>'],<EOL>'<STR_LIT>': ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>'],<EOL>'<STR_LIT>': ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>'],<EOL>'<STR_LIT>': ['<STR_LIT>'],<EOL>'<STR_LIT>': ['<STR_LIT>']<EOL>}<EOL>data_files = []<EOL>for d in dirs_patterns:<EOL><INDENT>patterns = dirs_patterns[d]<EOL>f = recursive_glob(d, patterns)<EOL>data_files.extend(f)<EOL><DEDENT>data_files = ["<STR_LIT>".format(d) for d in data_files]<EOL>long_description = read_text_file('<STR_LIT>')<EOL>min_np_major = int(common_dict['<STR_LIT>'][<NUM_LIT:0>])<EOL>min_np_minor = int(common_dict['<STR_LIT>'][<NUM_LIT:0>])<EOL>classifiers = ['<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>']<EOL>metadata = dict(<EOL>name=projectname,<EOL>version=version,<EOL>author='<STR_LIT>',<EOL>author_email='<STR_LIT>',<EOL>maintainer='<STR_LIT>',<EOL>maintainer_email='<STR_LIT>',<EOL>url=base_url,<EOL>download_url='<STR_LIT>'.format(<EOL>base_url, projectname, version),<EOL>description='<STR_LIT>',<EOL>long_description=long_description,<EOL>classifiers=classifiers,<EOL>license='<STR_LIT>',<EOL>platforms=["<STR_LIT>", "<STR_LIT>", "<STR_LIT>"],<EOL>keywords=['<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>'],<EOL>provides=[projectname],<EOL>packages=find_packages(),<EOL>ext_package=projectname,<EOL>ext_modules=extensions,<EOL>package_data={'<STR_LIT>': data_files},<EOL>include_package_data=True,<EOL>setup_requires=['<STR_LIT>',<EOL>'<STR_LIT>'.format(min_np_major,<EOL>min_np_minor)],<EOL>install_requires=['<STR_LIT>'.format(min_np_major,<EOL>min_np_minor),<EOL>'<STR_LIT>',<EOL>'<STR_LIT>'],<EOL>python_requires='<STR_LIT>',<EOL>zip_safe=False,<EOL>cmdclass={'<STR_LIT>': BuildExtSubclass})<EOL>try:<EOL><INDENT>setup(**metadata)<EOL><DEDENT>finally:<EOL><INDENT>del sys.path[<NUM_LIT:0>]<EOL>os.chdir(old_path)<EOL><DEDENT>return<EOL>
|
Custom setup for Corrfunc package.
Optional: Set compiler via 'CC=/path/to/compiler' or
'CC /path/to/compiler' or 'CC = /path/to/compiler'
All the CC options are removed from sys.argv after
being parsed.
|
f10149:m8
|
@contextmanager<EOL>def stderr_redirected(to=os.devnull):
|
fd = sys.stderr.fileno()<EOL>def _redirect_stderr(to):<EOL><INDENT>sys.stderr.close() <EOL>os.dup2(to.fileno(), fd) <EOL>sys.stderr = os.fdopen(fd, '<STR_LIT:w>') <EOL><DEDENT>with os.fdopen(os.dup(fd), '<STR_LIT:w>') as old_stderr:<EOL><INDENT>with open(to, '<STR_LIT:w>') as file:<EOL><INDENT>_redirect_stderr(to=file)<EOL><DEDENT>try:<EOL><INDENT>yield <EOL><DEDENT>finally:<EOL><INDENT>_redirect_stderr(to=old_stderr) <EOL><DEDENT><DEDENT>
|
import os
with stderr_redirected(to=filename):
print("from Python")
os.system("echo non-Python applications are also supported")
|
f10151:m0
|
@contextmanager<EOL>def stderr_redirected(to=os.devnull):
|
fd = sys.stderr.fileno()<EOL>def _redirect_stderr(to):<EOL><INDENT>sys.stderr.close() <EOL>os.dup2(to.fileno(), fd) <EOL>sys.stderr = os.fdopen(fd, '<STR_LIT:w>') <EOL><DEDENT>with os.fdopen(os.dup(fd), '<STR_LIT:w>') as old_stderr:<EOL><INDENT>with open(to, '<STR_LIT:w>') as file:<EOL><INDENT>_redirect_stderr(to=file)<EOL><DEDENT>try:<EOL><INDENT>yield <EOL><DEDENT>finally:<EOL><INDENT>_redirect_stderr(to=old_stderr) <EOL><DEDENT><DEDENT>
|
import os
with stderr_redirected(to=filename):
print("from Python")
os.system("echo non-Python applications are also supported")
|
f10152:m0
|
def read_file(filename):
|
dtype = np.dtype([('<STR_LIT>', np.int32),<EOL>('<STR_LIT>', np.int),<EOL>('<STR_LIT>', np.int),<EOL>('<STR_LIT:time>', np.float)<EOL>])<EOL>if pd is not None:<EOL><INDENT>timings = pd.read_csv(filename, header=None,<EOL>engine="<STR_LIT:c>",<EOL>dtype={'<STR_LIT>': np.int32,<EOL>'<STR_LIT>': np.int,<EOL>'<STR_LIT>': np.int,<EOL>'<STR_LIT:time>': np.float},<EOL>index_col=None,<EOL>names=['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT:time>'],<EOL>delim_whitespace=True)<EOL><DEDENT>else:<EOL><INDENT>timings = np.loadtxt(filename, dtype=dtype)<EOL><DEDENT>return timings<EOL>
|
Reads in the file I created manually (by recompiling and adding timers)
Not used any more but left for historical reasons (the first 'speedup'
plots were generated with this function)
|
f10154:m0
|
@contextmanager<EOL>def stderr_redirected(to=os.devnull):
|
fd = sys.stderr.fileno()<EOL>def _redirect_stderr(to):<EOL><INDENT>sys.stderr.close() <EOL>os.dup2(to.fileno(), fd) <EOL>sys.stderr = os.fdopen(fd, '<STR_LIT:w>') <EOL><DEDENT>with os.fdopen(os.dup(fd), '<STR_LIT:w>') as old_stderr:<EOL><INDENT>with open(to, '<STR_LIT:w>') as file:<EOL><INDENT>_redirect_stderr(to=file)<EOL><DEDENT>try:<EOL><INDENT>yield <EOL><DEDENT>finally:<EOL><INDENT>_redirect_stderr(to=old_stderr) <EOL><DEDENT><DEDENT>
|
import os
with stderr_redirected(to=filename):
print("from Python")
os.system("echo non-Python applications are also supported")
|
f10155:m0
|
def read_text_file(filename, encoding="<STR_LIT:utf-8>"):
|
try:<EOL><INDENT>with open(filename, '<STR_LIT:r>', encoding) as f:<EOL><INDENT>r = f.read()<EOL><DEDENT><DEDENT>except TypeError:<EOL><INDENT>with open(filename, '<STR_LIT:r>') as f:<EOL><INDENT>r = f.read()<EOL><DEDENT><DEDENT>return r<EOL>
|
Reads a file under python3 with encoding (default UTF-8).
Also works under python2, without encoding.
Uses the EAFP (https://docs.python.org/2/glossary.html#term-eafp)
principle.
|
f10156:m0
|
def read_catalog(filebase=None):
|
def read_ascii_catalog(filename, return_dtype=None):<EOL><INDENT>if return_dtype is None:<EOL><INDENT>msg = '<STR_LIT>'<EOL>raise ValueError(msg)<EOL><DEDENT>print("<STR_LIT>")<EOL>try:<EOL><INDENT>import pandas as pd<EOL><DEDENT>except ImportError:<EOL><INDENT>pd = None<EOL><DEDENT>if pd is not None:<EOL><INDENT>df = pd.read_csv(filename, header=None,<EOL>engine="<STR_LIT:c>",<EOL>dtype={"<STR_LIT:x>": return_dtype,<EOL>"<STR_LIT:y>": return_dtype,<EOL>"<STR_LIT:z>": return_dtype},<EOL>delim_whitespace=True)<EOL>x = np.asarray(df[<NUM_LIT:0>], dtype=return_dtype)<EOL>y = np.asarray(df[<NUM_LIT:1>], dtype=return_dtype)<EOL>z = np.asarray(df[<NUM_LIT:2>], dtype=return_dtype)<EOL><DEDENT>else:<EOL><INDENT>x, y, z, _ = np.genfromtxt(filename, dtype=return_dtype,<EOL>unpack=True)<EOL><DEDENT>return x, y, z<EOL><DEDENT>def read_fastfood_catalog(filename, return_dtype=None, need_header=None):<EOL><INDENT>if return_dtype is None:<EOL><INDENT>msg = "<STR_LIT>"<EOL>raise ValueError(msg)<EOL><DEDENT>import struct<EOL>with open(filename, "<STR_LIT:rb>") as f:<EOL><INDENT>skip1 = struct.unpack(bytes_to_native_str(b'<STR_LIT>'), f.read(<NUM_LIT:4>))[<NUM_LIT:0>]<EOL>idat = struct.unpack(bytes_to_native_str(b'<STR_LIT>'),<EOL>f.read(<NUM_LIT:20>))[<NUM_LIT:0>:<NUM_LIT:5>]<EOL>skip2 = struct.unpack(bytes_to_native_str(b'<STR_LIT>'), f.read(<NUM_LIT:4>))[<NUM_LIT:0>]<EOL>assert skip1 == <NUM_LIT:20> and skip2 == <NUM_LIT:20>,"<STR_LIT>"<EOL>ngal = idat[<NUM_LIT:1>]<EOL>if need_header is not None:<EOL><INDENT>skip1 = struct.unpack(bytes_to_native_str(b'<STR_LIT>'), f.read(<NUM_LIT:4>))[<NUM_LIT:0>]<EOL>fdat = struct.unpack(bytes_to_native_str(b'<STR_LIT>'),<EOL>f.read(<NUM_LIT>))[<NUM_LIT:0>:<NUM_LIT:9>]<EOL>skip2 = struct.unpack(bytes_to_native_str(b'<STR_LIT>'), f.read(<NUM_LIT:4>))[<NUM_LIT:0>]<EOL>assert skip1 == <NUM_LIT> and skip2 == <NUM_LIT>,"<STR_LIT>"<EOL>skip1 = struct.unpack(bytes_to_native_str(b'<STR_LIT>'), f.read(<NUM_LIT:4>))[<NUM_LIT:0>]<EOL>znow = struct.unpack(bytes_to_native_str(b'<STR_LIT>'), f.read(<NUM_LIT:4>))[<NUM_LIT:0>]<EOL>skip2 = struct.unpack(bytes_to_native_str(b'<STR_LIT>'), f.read(<NUM_LIT:4>))[<NUM_LIT:0>]<EOL>assert skip1 == <NUM_LIT:4> and skip2 == <NUM_LIT:4>,"<STR_LIT>"<EOL><DEDENT>else:<EOL><INDENT>fdat_bytes = <NUM_LIT:4> + <NUM_LIT> + <NUM_LIT:4><EOL>znow_bytes = <NUM_LIT:4> + <NUM_LIT:4> + <NUM_LIT:4><EOL>f.seek(fdat_bytes + znow_bytes, <NUM_LIT:1>)<EOL><DEDENT>skip1 = struct.unpack(bytes_to_native_str(b'<STR_LIT>'), f.read(<NUM_LIT:4>))[<NUM_LIT:0>]<EOL>assert skip1 == ngal * <NUM_LIT:4> or skip1 == ngal * <NUM_LIT:8>,"<STR_LIT>"<EOL>f.seek(-<NUM_LIT:4>, <NUM_LIT:1>)<EOL>pos = {}<EOL>for field in '<STR_LIT>':<EOL><INDENT>skip1 = struct.unpack(bytes_to_native_str(b'<STR_LIT>'), f.read(<NUM_LIT:4>))[<NUM_LIT:0>]<EOL>assert skip1 == ngal * <NUM_LIT:4> or skip1 == ngal * <NUM_LIT:8>,"<STR_LIT>"<EOL>input_dtype = np.float32 if skip1 // ngal == <NUM_LIT:4> else np.float<EOL>array = np.fromfile(f, input_dtype, ngal)<EOL>skip2 = struct.unpack(bytes_to_native_str(b'<STR_LIT>'), f.read(<NUM_LIT:4>))[<NUM_LIT:0>]<EOL>pos[field] = array if return_dtype == input_dtypeelse [return_dtype(a) for a in array]<EOL><DEDENT><DEDENT>x = pos['<STR_LIT:x>']<EOL>y = pos['<STR_LIT:y>']<EOL>z = pos['<STR_LIT:z>']<EOL>if need_header is not None:<EOL><INDENT>return idat, fdat, znow, x, y, z<EOL><DEDENT>else:<EOL><INDENT>return x, y, z<EOL><DEDENT><DEDENT>if filebase is None:<EOL><INDENT>filename = pjoin(dirname(abspath(_countpairs.__file__)),<EOL>"<STR_LIT>", "<STR_LIT>")<EOL>dtype = np.float32<EOL>allowed_exts = {'<STR_LIT>': read_fastfood_catalog,<EOL>'<STR_LIT>': read_ascii_catalog,<EOL>'<STR_LIT>': read_ascii_catalog,<EOL>'<STR_LIT>': read_ascii_catalog<EOL>}<EOL>for e in allowed_exts:<EOL><INDENT>if exists(filename + e):<EOL><INDENT>f = allowed_exts[e]<EOL>x, y, z = f(filename + e, dtype)<EOL>return x, y, z<EOL><DEDENT><DEDENT>raise IOError("<STR_LIT>".format(filename, allowed_exts.keys()))<EOL><DEDENT>else:<EOL><INDENT>if exists(filebase):<EOL><INDENT>extension = splitext(filebase)[<NUM_LIT:1>]<EOL>f = read_fastfood_catalog if '<STR_LIT>' in extension else read_ascii_catalog<EOL>x, y, z = f(filebase, np.float)<EOL>return x, y, z<EOL><DEDENT>raise IOError("<STR_LIT>", filebase)<EOL><DEDENT>
|
Reads a galaxy/randoms catalog.
:param filebase: (optional)
The fully qualified path to the file. If omitted, reads the
theory galaxy catalog under ../tests/data/
Returns:
* ``x y z`` - Unpacked numpy arrays compatible with the installed
version of ``Corrfunc``.
**Note** If the filename is omitted, then first the fast-food file
is searched for, and then the ascii file. End-users should always
supply the full filename.
|
f10156:m1
|
def distance(self, loc):
|
assert type(loc) == type(self)<EOL>lon1, lat1, lon2, lat2 = map(radians, [<EOL>self.lon,<EOL>self.lat,<EOL>loc.lon,<EOL>loc.lat,<EOL>])<EOL>dlon = lon2 - lon1<EOL>dlat = lat2 - lat1<EOL>a = sin(dlat/<NUM_LIT:2>)**<NUM_LIT:2> + cos(lat1) * cos(lat2) * sin(dlon/<NUM_LIT:2>)**<NUM_LIT:2><EOL>c = <NUM_LIT:2> * asin(sqrt(a))<EOL>r = <NUM_LIT> <EOL>return c * r<EOL>
|
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
|
f10158:c0:m1
|
def allow_key(self):
|
return '<STR_LIT>'.format(self.cls_key(), self.id)<EOL>
|
Gets the key associated with this user where we store permission
information
|
f10175:c0:m0
|
def value_or_default(self, value):
|
if value is None:<EOL><INDENT>if callable(self.default):<EOL><INDENT>return self.default()<EOL><DEDENT>else:<EOL><INDENT>return self.default<EOL><DEDENT><DEDENT>return value<EOL>
|
Returns the given value or the specified default value for this
field
|
f10176:c0:m1
|
def validate_required(self, value):
|
if self.required and (value is None or value=='<STR_LIT>'):<EOL><INDENT>raise MissingFieldError(self.name)<EOL><DEDENT>
|
Validates the given value agains this field's 'required' property
|
f10176:c0:m2
|
def init(self, value):
|
return self.value_or_default(value)<EOL>
|
Returns the value that will be set in the model when it is passed
as an __init__ attribute
|
f10176:c0:m3
|
def recover(self, data, redis=None):
|
value = data.get(self.name)<EOL>if value is None or value == '<STR_LIT:None>':<EOL><INDENT>return None<EOL><DEDENT>return str(value)<EOL>
|
Retrieve this field's value from the database
|
f10176:c0:m4
|
def prepare(self, value):
|
if value is None: return None<EOL>return str(value)<EOL>
|
Prepare this field's value to insert in database
|
f10176:c0:m5
|
def to_json(self, value):
|
return value<EOL>
|
Format the value to be presented in json format
|
f10176:c0:m6
|
def save(self, value, redis, *, commit=True):
|
value = self.prepare(value)<EOL>if value is not None:<EOL><INDENT>redis.hset(self.obj.key(), self.name, value)<EOL><DEDENT>else:<EOL><INDENT>redis.hdel(self.obj.key(), self.name)<EOL><DEDENT>if self.index:<EOL><INDENT>key = self.key()<EOL>if self.name in self.obj._old:<EOL><INDENT>redis.hdel(key, self.obj._old[self.name])<EOL><DEDENT>if value is not None:<EOL><INDENT>redis.hset(key, value, self.obj.id)<EOL><DEDENT><DEDENT>
|
Sets this fields value in the databse
|
f10176:c0:m7
|
def delete(self, redis):
|
if self.index:<EOL><INDENT>redis.hdel(self.key(), getattr(self.obj, self.name))<EOL><DEDENT>
|
Deletes this field's value from the databse. Should be implemented
in special cases
|
f10176:c0:m8
|
def validate(self, value, redis):
|
<EOL>if type(value) == str:<EOL><INDENT>value = value.strip()<EOL><DEDENT>value = self.value_or_default(value)<EOL>self.validate_required(value)<EOL>if self.regex and not re.match(self.regex, value, flags=re.ASCII):<EOL><INDENT>raise InvalidFieldError(self.name)<EOL><DEDENT>if self.forbidden and value in self.forbidden:<EOL><INDENT>raise ReservedFieldError(self.name)<EOL><DEDENT>if self.allowed and value not in self.allowed:<EOL><INDENT>raise InvalidFieldError(self.name)<EOL><DEDENT>if self.index:<EOL><INDENT>key = self.key()<EOL>old = debyte_string(redis.hget(key, value))<EOL>old_value = getattr(self.obj, self.name)<EOL>if old is not None and old != self.obj.id:<EOL><INDENT>raise NotUniqueFieldError(self.name)<EOL><DEDENT>elif old_value != value:<EOL><INDENT>self.obj._old[self.name] = old_value<EOL><DEDENT><DEDENT>return value<EOL>
|
Validates data obtained from a request and returns it in the apropiate
format
|
f10176:c0:m9
|
def save(self, value, redis, *, commit=True):
|
value = self.prepare(value)<EOL>if value is not None:<EOL><INDENT>redis.hset(self.obj.key(), self.name, value)<EOL><DEDENT>else:<EOL><INDENT>redis.hdel(self.obj.key(), self.name)<EOL><DEDENT>key = self.key()<EOL>if self.name in self.obj._old:<EOL><INDENT>redis.hdel(key, self.obj._old[self.name])<EOL><DEDENT>redis.sadd(key + '<STR_LIT::>' + value, self.obj.id)<EOL>
|
Sets this fields value in the databse
|
f10176:c2:m0
|
def delete(self, redis):
|
value = getattr(self.obj, self.name)<EOL>redis.srem(self.key() + '<STR_LIT::>' + value, self.obj.id)<EOL>
|
Deletes this field's value from the databse. Should be implemented
in special cases
|
f10176:c2:m1
|
def init(self, value):
|
value = self.value_or_default(value)<EOL>if value is None: return None<EOL>if is_hashed(value):<EOL><INDENT>return value<EOL><DEDENT>return make_password(value)<EOL>
|
hash passwords given in the constructor
|
f10176:c3:m0
|
def prepare(self, value):
|
if value is None:<EOL><INDENT>return None<EOL><DEDENT>if is_hashed(value):<EOL><INDENT>return value<EOL><DEDENT>return make_password(value)<EOL>
|
Prepare this field's value to insert in database
|
f10176:c3:m1
|
def validate(self, value, redis):
|
value = super().validate(value, redis)<EOL>if is_hashed(value):<EOL><INDENT>return value<EOL><DEDENT>return make_password(value)<EOL>
|
hash passwords given via http
|
f10176:c3:m2
|
def validate(self, value, redis):
|
value = self.value_or_default(value)<EOL>self.validate_required(value)<EOL>if value is None:<EOL><INDENT>return None<EOL><DEDENT>if type(value) == str:<EOL><INDENT>try:<EOL><INDENT>value = datetime.datetime.strptime(value, '<STR_LIT>')<EOL><DEDENT>except ValueError:<EOL><INDENT>raise InvalidFieldError(self.name)<EOL><DEDENT><DEDENT>return value<EOL>
|
Validates data obtained from a request in ISO 8061 and returns it in Datetime data type
|
f10176:c7:m0
|
def recover(self, data, redis=None):
|
return []<EOL>
|
Don't read the database by default
|
f10176:c12:m3
|
def fill(self, **kwargs):
|
setattr(self.obj, self.name, self.get(**kwargs))<EOL>
|
Loads the relationships into this model. They are not loaded by
default
|
f10176:c12:m6
|
def get(self, **kwargs):
|
redis = type(self.obj).get_redis()<EOL>related = list(map(<EOL>lambda id : self.model().get(debyte_string(id)),<EOL>self.get_related_ids(redis, **kwargs)<EOL>))<EOL>return related<EOL>
|
Returns this relation
|
f10176:c12:m7
|
def make_filter(self, fieldname, query_func, expct_value):
|
def actual_filter(item):<EOL><INDENT>value = getattr(item, fieldname)<EOL>if query_func in NULL_AFFECTED_FILTERS and value is None:<EOL><INDENT>return False<EOL><DEDENT>if query_func == '<STR_LIT>':<EOL><INDENT>return value == expct_value<EOL><DEDENT>elif query_func == '<STR_LIT>':<EOL><INDENT>return value != expct_value<EOL><DEDENT>elif query_func == '<STR_LIT>':<EOL><INDENT>return value < expct_value<EOL><DEDENT>elif query_func == '<STR_LIT>':<EOL><INDENT>return value <= expct_value<EOL><DEDENT>elif query_func == '<STR_LIT>':<EOL><INDENT>return value > expct_value<EOL><DEDENT>elif query_func == '<STR_LIT>':<EOL><INDENT>return value >= expct_value<EOL><DEDENT>elif query_func == '<STR_LIT>':<EOL><INDENT>return value.startswith(expct_value)<EOL><DEDENT>elif query_func == '<STR_LIT>':<EOL><INDENT>return value.endswith(expct_value)<EOL><DEDENT><DEDENT>actual_filter.__doc__ = '<STR_LIT>'.format('<STR_LIT>', query_func, expct_value)<EOL>return actual_filter<EOL>
|
makes a filter that will be appliead to an object's property based
on query_func
|
f10178:c0:m4
|
@classmethod<EOL><INDENT>def validate(cls, **kwargs):<DEDENT>
|
<EOL>errors = ValidationErrors()<EOL>obj = cls()<EOL>redis = cls.get_redis()<EOL>for fieldname, field in obj.proxy:<EOL><INDENT>if not field.fillable:<EOL><INDENT>value = field.default<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>value = field.validate(kwargs.get(fieldname), redis)<EOL><DEDENT>except BadField as e:<EOL><INDENT>errors.append(e)<EOL>continue<EOL><DEDENT><DEDENT>setattr(<EOL>obj,<EOL>fieldname,<EOL>value<EOL>)<EOL><DEDENT>for fieldname in dir(cls):<EOL><INDENT>rule = getattr(cls, fieldname)<EOL>if hasattr(rule, '<STR_LIT>') and rule._is_validation_rule:<EOL><INDENT>try:<EOL><INDENT>rule(obj)<EOL><DEDENT>except BadField as e:<EOL><INDENT>errors.append(e)<EOL><DEDENT><DEDENT><DEDENT>if errors.has_errors():<EOL><INDENT>raise errors<EOL><DEDENT>return obj<EOL>
|
Validates the data received as keyword arguments whose name match
this class attributes.
|
f10179:c1:m1
|
@classmethod<EOL><INDENT>def set_engine(cls, neweng):<DEDENT>
|
assert isinstance(neweng, Engine), '<STR_LIT>'<EOL>if hasattr(cls, '<STR_LIT:Meta>'):<EOL><INDENT>cls.Meta.engine = neweng<EOL><DEDENT>else:<EOL><INDENT>class Meta:<EOL><INDENT>engine = neweng<EOL><DEDENT>cls.Meta = Meta<EOL><DEDENT>
|
Sets the given coralillo engine so the model uses it to communicate
with the redis database
|
f10179:c1:m4
|
def save(self):
|
redis = type(self).get_redis()<EOL>pipe = to_pipeline(redis)<EOL>pipe.hset(self.key(), '<STR_LIT:id>', self.id)<EOL>for fieldname, field in self.proxy:<EOL><INDENT>if not isinstance(field, Relation):<EOL><INDENT>field.save(getattr(self, fieldname), pipe, commit=False)<EOL><DEDENT><DEDENT>pipe.sadd(type(self).members_key(), self.id)<EOL>pipe.execute()<EOL>if self.notify:<EOL><INDENT>data = json.dumps({<EOL>'<STR_LIT>': '<STR_LIT>' if not self._persisted else '<STR_LIT>',<EOL>'<STR_LIT:data>': self.to_json(),<EOL>})<EOL>redis.publish(type(self).cls_key(), data)<EOL>redis.publish(self.key(), data)<EOL><DEDENT>self._persisted = True<EOL>return self<EOL>
|
Persists this object to the database. Each field knows how to store
itself so we don't have to worry about it
|
f10179:c2:m1
|
def update(self, **kwargs):
|
redis = type(self).get_redis()<EOL>errors = ValidationErrors()<EOL>for fieldname, field in self.proxy:<EOL><INDENT>if not field.fillable:<EOL><INDENT>continue<EOL><DEDENT>given = kwargs.get(fieldname)<EOL>if given is None:<EOL><INDENT>continue<EOL><DEDENT>try:<EOL><INDENT>value = field.validate(kwargs.get(fieldname), redis)<EOL><DEDENT>except BadField as e:<EOL><INDENT>errors.append(e)<EOL>continue<EOL><DEDENT>setattr(<EOL>self,<EOL>fieldname,<EOL>value<EOL>)<EOL><DEDENT>if errors.has_errors():<EOL><INDENT>raise errors<EOL><DEDENT>return self.save()<EOL>
|
validates the given data against this object's rules and then
updates
|
f10179:c2:m2
|
@staticmethod<EOL><INDENT>def is_object_key(key):<DEDENT>
|
return re.match('<STR_LIT>', key)<EOL>
|
checks if the given key belongs to an object. Its easy since it
depends on the key ending like: ':obj
|
f10179:c2:m3
|
@classmethod<EOL><INDENT>def get(cls, id):<DEDENT>
|
if not id:<EOL><INDENT>return None<EOL><DEDENT>redis = cls.get_redis()<EOL>key = '<STR_LIT>'.format(cls.cls_key(), id)<EOL>if not redis.exists(key):<EOL><INDENT>return None<EOL><DEDENT>obj = cls(id=id)<EOL>obj._persisted = True<EOL>data = debyte_hash(redis.hgetall(key))<EOL>for fieldname, field in obj.proxy:<EOL><INDENT>value = field.recover(data, redis)<EOL>setattr(<EOL>obj,<EOL>fieldname,<EOL>value<EOL>)<EOL><DEDENT>return obj<EOL>
|
Retrieves an object by id. Returns None in case of failure
|
f10179:c2:m4
|
@classmethod<EOL><INDENT>def q(cls, **kwargs):<DEDENT>
|
redis = cls.get_redis()<EOL>return QuerySet(cls, redis.sscan_iter(cls.members_key()))<EOL>
|
Creates an iterator over the members of this class that applies the
given filters and returns only the elements matching them
|
f10179:c2:m5
|
@classmethod<EOL><INDENT>def count(cls):<DEDENT>
|
redis = cls.get_redis()<EOL>return redis.scard(cls.members_key())<EOL>
|
returns object count for this model
|
f10179:c2:m6
|
def reload(self):
|
key = self.key()<EOL>redis = type(self).get_redis()<EOL>if not redis.exists(key):<EOL><INDENT>raise ModelNotFoundError('<STR_LIT>')<EOL><DEDENT>data = debyte_hash(redis.hgetall(key))<EOL>for fieldname, field in self.proxy:<EOL><INDENT>value = field.recover(data, redis)<EOL>setattr(<EOL>self,<EOL>fieldname,<EOL>value<EOL>)<EOL><DEDENT>return self<EOL>
|
reloads this object so if it was updated in the database it now
contains the new values
|
f10179:c2:m7
|
@classmethod<EOL><INDENT>def get_or_exception(cls, id):<DEDENT>
|
obj = cls.get(id)<EOL>if obj is None:<EOL><INDENT>raise ModelNotFoundError('<STR_LIT>')<EOL><DEDENT>return obj<EOL>
|
Tries to retrieve an instance of this model from the database or
raises an exception in case of failure
|
f10179:c2:m8
|
@classmethod<EOL><INDENT>def get_by(cls, field, value):<DEDENT>
|
redis = cls.get_redis()<EOL>key = cls.cls_key()+'<STR_LIT>'+field<EOL>id = redis.hget(key, value)<EOL>if id:<EOL><INDENT>return cls.get(debyte_string(id))<EOL><DEDENT>return None<EOL>
|
Tries to retrieve an isinstance of this model from the database
given a value for a defined index. Return None in case of failure
|
f10179:c2:m9
|
@classmethod<EOL><INDENT>def get_all(cls):<DEDENT>
|
redis = cls.get_redis()<EOL>return list(map(<EOL>lambda id: cls.get(id),<EOL>map(<EOL>debyte_string,<EOL>redis.smembers(cls.members_key())<EOL>)<EOL>))<EOL>
|
Gets all available instances of this model from the database
|
f10179:c2:m11
|
@classmethod<EOL><INDENT>def tree_match(cls, field, string):<DEDENT>
|
if not string:<EOL><INDENT>return set()<EOL><DEDENT>redis = cls.get_redis()<EOL>prefix = '<STR_LIT>'.format(cls.cls_key(), field)<EOL>pieces = string.split('<STR_LIT::>')<EOL>ans = redis.sunion(<EOL>prefix + '<STR_LIT::>' + '<STR_LIT::>'.join(pieces[<NUM_LIT:0>:i+<NUM_LIT:1>])<EOL>for i in range(len(pieces))<EOL>)<EOL>return sorted(map(<EOL>lambda id: cls.get(id),<EOL>map(<EOL>debyte_string,<EOL>ans<EOL>)<EOL>), key=lambda x:x.id)<EOL>
|
Given a tree index, retrieves the ids atached to the given prefix,
think of if as a mechanism for pattern suscription, where two models
attached to the `a`, `a:b` respectively are found by the `a:b` string,
because both model's subscription key matches the string.
|
f10179:c2:m12
|
@classmethod<EOL><INDENT>def cls_key(cls):<DEDENT>
|
return snake_case(cls.__name__)<EOL>
|
Returns the redis key prefix assigned to this model
|
f10179:c2:m13
|
@classmethod<EOL><INDENT>def members_key(cls):<DEDENT>
|
return cls.cls_key() + '<STR_LIT>'<EOL>
|
This key holds a set whose members are the ids that exist of objects
from this class
|
f10179:c2:m14
|
def key(self):
|
prefix = type(self).cls_key()<EOL>return '<STR_LIT>'.format(prefix, self.id)<EOL>
|
Returns the redis key to access this object's values
|
f10179:c2:m15
|
def fqn(self):
|
prefix = type(self).cls_key()<EOL>return '<STR_LIT>'.format(prefix, self.id)<EOL>
|
Returns a fully qualified name for this object
|
f10179:c2:m16
|
def permission(self, restrict=None):
|
if restrict is None:<EOL><INDENT>return self.fqn()<EOL><DEDENT>return self.fqn() + '<STR_LIT:/>' + restrict<EOL>
|
Returns a fully qualified key name to a permission over this object
|
f10179:c2:m17
|
def to_json(self, *, include=None):
|
json = dict()<EOL>if include is None or '<STR_LIT:id>' in include or '<STR_LIT:*>' in include:<EOL><INDENT>json['<STR_LIT:id>'] = self.id<EOL><DEDENT>if include is None or '<STR_LIT>' in include or '<STR_LIT:*>' in include:<EOL><INDENT>json['<STR_LIT>'] = type(self).cls_key()<EOL><DEDENT>def fieldfilter(fieldtuple):<EOL><INDENT>returnnot fieldtuple[<NUM_LIT:1>].private andnot isinstance(fieldtuple[<NUM_LIT:1>], Relation) and (<EOL>include is None or fieldtuple[<NUM_LIT:0>] in include or '<STR_LIT:*>' in include<EOL>)<EOL><DEDENT>json.update(dict(starmap(<EOL>lambda fn, f: (fn, f.to_json(getattr(self, fn))),<EOL>filter(<EOL>fieldfilter,<EOL>self.proxy<EOL>)<EOL>)))<EOL>for requested_relation in parse_embed(include):<EOL><INDENT>relation_name, subfields = requested_relation<EOL>if not hasattr(self.proxy, relation_name):<EOL><INDENT>continue<EOL><DEDENT>relation = getattr(self.proxy, relation_name)<EOL>if isinstance(relation, ForeignIdRelation):<EOL><INDENT>item = relation.get()<EOL>if item is not None:<EOL><INDENT>json[relation_name] = item.to_json(include=subfields)<EOL><DEDENT>else:<EOL><INDENT>json[relation_name] = None<EOL><DEDENT><DEDENT>elif isinstance(relation, MultipleRelation):<EOL><INDENT>json[relation_name] = list(map(lambda o: o.to_json(include=subfields), relation.get()))<EOL><DEDENT><DEDENT>return json<EOL>
|
Serializes this model to a JSON representation so it can be sent
via an HTTP REST API
|
f10179:c2:m18
|
def __eq__(self, other):
|
if type(other) == str:<EOL><INDENT>return self.id == other<EOL><DEDENT>if type(self) != type(other):<EOL><INDENT>return False<EOL><DEDENT>return self.id == other.id<EOL>
|
Compares this object to another. Returns true if both are of the
same class and have the same properties. Returns false otherwise
|
f10179:c2:m19
|
def delete(self):
|
redis = type(self).get_redis()<EOL>for fieldname, field in self.proxy:<EOL><INDENT>field.delete(redis)<EOL><DEDENT>redis.delete(self.key())<EOL>redis.srem(type(self).members_key(), self.id)<EOL>if isinstance(self, PermissionHolder):<EOL><INDENT>redis.delete(self.allow_key())<EOL><DEDENT>if self.notify:<EOL><INDENT>data = json.dumps({<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT:data>': self.to_json(),<EOL>})<EOL>redis.publish(type(self).cls_key(), data)<EOL>redis.publish(self.key(), data)<EOL><DEDENT>return self<EOL>
|
Deletes this model from the database, calling delete in each field
to properly delete special cases
|
f10179:c2:m20
|
def to_pipeline(redis):
|
if isinstance(redis, BasePipeline):<EOL><INDENT>return redis<EOL><DEDENT>return redis.pipeline()<EOL>
|
If the argument is a redis connection this function makes a redis
pipeline from it. Otherwise it returns the object passed, so it ensures
it is a pipeline
|
f10180:m0
|
def snake_case(string):
|
s1 = re.sub('<STR_LIT>', r'<STR_LIT>', string)<EOL>return re.sub('<STR_LIT>', r'<STR_LIT>', s1).lower()<EOL>
|
Takes a string that represents for example a class name and returns
the snake case version of it. It is used for model-to-key conversion
|
f10180:m1
|
def camelCase(string):
|
return '<STR_LIT>'.join(s[<NUM_LIT:0>].upper()+s[<NUM_LIT:1>:] for s in string.split('<STR_LIT:_>'))<EOL>
|
Takes a string that represents the redis key version of a model name
and returns its camel case version. It is used for key-to-model
conversion.
|
f10180:m2
|
def force_text(s, encoding='<STR_LIT:utf-8>', strings_only=False, errors='<STR_LIT:strict>'):
|
<EOL>if issubclass(type(s), str):<EOL><INDENT>return s<EOL><DEDENT>if strings_only and is_protected_type(s):<EOL><INDENT>return s<EOL><DEDENT>try:<EOL><INDENT>if isinstance(s, bytes):<EOL><INDENT>s = str(s, encoding, errors)<EOL><DEDENT>else:<EOL><INDENT>s = str(s)<EOL><DEDENT><DEDENT>except UnicodeDecodeError as e:<EOL><INDENT>raise DjangoUnicodeDecodeError(s, *e.args)<EOL><DEDENT>return s<EOL>
|
Similar to smart_text, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
|
f10181:m0
|
def force_bytes(s, encoding='<STR_LIT:utf-8>', strings_only=False, errors='<STR_LIT:strict>'):
|
<EOL>if isinstance(s, bytes):<EOL><INDENT>if encoding == '<STR_LIT:utf-8>':<EOL><INDENT>return s<EOL><DEDENT>else:<EOL><INDENT>return s.decode('<STR_LIT:utf-8>', errors).encode(encoding, errors)<EOL><DEDENT><DEDENT>if strings_only and is_protected_type(s):<EOL><INDENT>return s<EOL><DEDENT>if isinstance(s, memoryview):<EOL><INDENT>return bytes(s)<EOL><DEDENT>if not isinstance(s, str):<EOL><INDENT>return str(s).encode(encoding, errors)<EOL><DEDENT>else:<EOL><INDENT>return s.encode(encoding, errors)<EOL><DEDENT>
|
Similar to smart_bytes, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
|
f10181:m1
|
def constant_time_compare(val1, val2):
|
return hmac.compare_digest(force_bytes(val1), force_bytes(val2))<EOL>
|
Return True if the two strings are equal, False otherwise.
|
f10181:m2
|
def get_random_string(length=<NUM_LIT:12>,<EOL>allowed_chars='<STR_LIT>'<EOL>'<STR_LIT>'):
|
if not using_sysrandom:<EOL><INDENT>random.seed(<EOL>hashlib.sha256(<EOL>('<STR_LIT>' % (random.getstate(), time.time(), settings.SECRET_KEY)).encode()<EOL>).digest()<EOL>)<EOL>return '<STR_LIT>'.join(random.choice(allowed_chars) for i in range(length))<EOL><DEDENT>
|
Return a securely generated random string.
The default length of 12 with the a-z, A-Z, 0-9 character set returns
a 71-bit value. log_2((26+26+10)^12) =~ 71 bits
|
f10181:m3
|
def check_password(password, encoded, setter=None, preferred='<STR_LIT:default>'):
|
if password is None:<EOL><INDENT>return False<EOL><DEDENT>preferred = bCryptPasswordHasher<EOL>hasher = bCryptPasswordHasher<EOL>hasher_changed = hasher.algorithm != preferred.algorithm<EOL>must_update = hasher_changed or preferred.must_update(encoded)<EOL>is_correct = hasher.verify(password, encoded)<EOL>if not is_correct and not hasher_changed and must_update:<EOL><INDENT>hasher.harden_runtime(password, encoded)<EOL><DEDENT>if setter and is_correct and must_update:<EOL><INDENT>setter(password)<EOL><DEDENT>return is_correct<EOL>
|
Return a boolean of whether the raw password matches the three
part encoded digest.
If setter is specified, it'll be called when you need to
regenerate the password.
|
f10181:m5
|
def make_password(password, salt=None, hasher='<STR_LIT:default>'):
|
if password is None:<EOL><INDENT>return UNUSABLE_PASSWORD_PREFIX + get_random_string(UNUSABLE_PASSWORD_SUFFIX_LENGTH)<EOL><DEDENT>hasher = bCryptPasswordHasher<EOL>if not salt:<EOL><INDENT>salt = hasher.salt()<EOL><DEDENT>return hasher.encode(password, salt)<EOL>
|
Turn a plain-text password into a hash for database storage
Same as encode() but generate a new random salt. If password is None then
return a concatenation of UNUSABLE_PASSWORD_PREFIX and a random string,
which disallows logins. Additional random string reduces chances of gaining
access to staff or superuser accounts. See ticket #20079 for more info.
|
f10181:m6
|
def mask_hash(hash, show=<NUM_LIT:6>, char="<STR_LIT:*>"):
|
masked = hash[:show]<EOL>masked += char * len(hash[show:])<EOL>return masked<EOL>
|
Return the given hash, with only the first ``show`` number shown. The
rest are masked with ``char`` for security reasons.
|
f10181:m7
|
def salt(self):
|
return get_random_string()<EOL>
|
Generate a cryptographically secure nonce salt in ASCII.
|
f10181:c0:m1
|
def verify(self, password, encoded):
|
raise NotImplementedError('<STR_LIT>')<EOL>
|
Check if the given password is correct.
|
f10181:c0:m2
|
def encode(self, password, salt):
|
raise NotImplementedError('<STR_LIT>')<EOL>
|
Create an encoded database value.
The result is normally formatted as "algorithm$salt$hash" and
must be fewer than 128 characters.
|
f10181:c0:m3
|
def safe_summary(self, encoded):
|
raise NotImplementedError('<STR_LIT>')<EOL>
|
Return a summary of safe values.
The result is a dictionary and will be used where the password field
must be displayed to construct a safe representation of the password.
|
f10181:c0:m4
|
def harden_runtime(self, password, encoded):
|
warnings.warn('<STR_LIT>')<EOL>
|
Bridge the runtime gap between the work factor supplied in `encoded`
and the work factor suggested by this hasher.
Taking PBKDF2 as an example, if `encoded` contains 20000 iterations and
`self.iterations` is 30000, this method should run password through
another 10000 iterations of PBKDF2. Similar approaches should exist
for any hasher that has a work factor. If not, this method should be
defined as a no-op to silence the warning.
|
f10181:c0:m6
|
def __init__(self,<EOL>configfile,<EOL>refresh_cache=False,<EOL>boto_profile=None<EOL>):
|
<EOL>self.boto_profile = boto_profile<EOL>self.inventory = self._empty_inventory()<EOL>self.index = {}<EOL>self.read_settings(configfile)<EOL>if self.boto_profile:<EOL><INDENT>if not hasattr(boto.ec2.EC2Connection, '<STR_LIT>'):<EOL><INDENT>self.fail_with_error("<STR_LIT>"<EOL>"<STR_LIT>")<EOL><DEDENT><DEDENT>if refresh_cache or not self.is_cache_valid():<EOL><INDENT>self.do_api_calls_update_cache()<EOL><DEDENT>else:<EOL><INDENT>self.load_inventory_from_cache()<EOL>self.load_index_from_cache()<EOL><DEDENT>
|
Initialize Ec2Inventory
|
f10185:c0:m1
|
def get_inventory(self):
|
return self.inventory<EOL>
|
Get full inventory
|
f10185:c0:m2
|
def is_cache_valid(self):
|
if os.path.isfile(self.cache_path_cache):<EOL><INDENT>mod_time = os.path.getmtime(self.cache_path_cache)<EOL>current_time = time()<EOL>if (mod_time + self.cache_max_age) > current_time:<EOL><INDENT>if os.path.isfile(self.cache_path_index):<EOL><INDENT>return True<EOL><DEDENT><DEDENT><DEDENT>return False<EOL>
|
Determines if the cache files have expired, or if it is still
valid
|
f10185:c0:m3
|
def read_settings(self, configfile):
|
if six.PY3:<EOL><INDENT>config = configparser.ConfigParser()<EOL><DEDENT>else:<EOL><INDENT>config = configparser.SafeConfigParser()<EOL><DEDENT>config.read(configfile)<EOL>self.eucalyptus_host = None<EOL>self.eucalyptus = False<EOL>if config.has_option('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>self.eucalyptus = config.getboolean('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>if self.eucalyptus and config.has_option('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>self.eucalyptus_host = config.get('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>self.regions = []<EOL>configRegions = config.get('<STR_LIT>', '<STR_LIT>')<EOL>configRegions_exclude = config.get('<STR_LIT>', '<STR_LIT>')<EOL>if (configRegions == '<STR_LIT:all>'):<EOL><INDENT>if self.eucalyptus_host:<EOL><INDENT>self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name)<EOL><DEDENT>else:<EOL><INDENT>for regionInfo in ec2.regions():<EOL><INDENT>if regionInfo.name not in configRegions_exclude:<EOL><INDENT>self.regions.append(regionInfo.name)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>self.regions = configRegions.split("<STR_LIT:U+002C>")<EOL><DEDENT>self.destination_variable = config.get('<STR_LIT>', '<STR_LIT>')<EOL>self.vpc_destination_variable = config.get('<STR_LIT>',<EOL>'<STR_LIT>')<EOL>self.route53_enabled = config.getboolean('<STR_LIT>', '<STR_LIT>')<EOL>self.route53_excluded_zones = []<EOL>if config.has_option('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>self.route53_excluded_zones.extend(<EOL>config.get('<STR_LIT>', '<STR_LIT>', '<STR_LIT>').split('<STR_LIT:U+002C>'))<EOL><DEDENT>self.rds_enabled = True<EOL>if config.has_option('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>self.rds_enabled = config.getboolean('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>self.elasticache_enabled = True<EOL>if config.has_option('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>self.elasticache_enabled = config.getboolean('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>if config.has_option('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>self.all_instances = config.getboolean('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>self.all_instances = False<EOL><DEDENT>ec2_valid_instance_states = [<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>'<EOL>]<EOL>self.ec2_instance_states = []<EOL>if self.all_instances:<EOL><INDENT>self.ec2_instance_states = ec2_valid_instance_states<EOL><DEDENT>elif config.has_option('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>for instance_state in config.get('<STR_LIT>',<EOL>'<STR_LIT>').split('<STR_LIT:U+002C>'):<EOL><INDENT>instance_state = instance_state.strip()<EOL>if instance_state not in ec2_valid_instance_states:<EOL><INDENT>continue<EOL><DEDENT>self.ec2_instance_states.append(instance_state)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>self.ec2_instance_states = ['<STR_LIT>']<EOL><DEDENT>if config.has_option('<STR_LIT>', '<STR_LIT>') and self.rds_enabled:<EOL><INDENT>self.all_rds_instances = config.getboolean('<STR_LIT>',<EOL>'<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>self.all_rds_instances = False<EOL><DEDENT>if (config.has_option('<STR_LIT>', '<STR_LIT>')<EOL>and self.elasticache_enabled):<EOL><INDENT>self.all_elasticache_replication_groups = config.getboolean(<EOL>'<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>self.all_elasticache_replication_groups = False<EOL><DEDENT>if (config.has_option('<STR_LIT>', '<STR_LIT>')<EOL>and self.elasticache_enabled):<EOL><INDENT>self.all_elasticache_clusters = config.getboolean(<EOL>'<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>self.all_elasticache_clusters = False<EOL><DEDENT>if config.has_option('<STR_LIT>', '<STR_LIT>') and self.elasticache_enabled:<EOL><INDENT>self.all_elasticache_nodes = config.getboolean('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>self.all_elasticache_nodes = False<EOL><DEDENT>if config.has_option('<STR_LIT>', '<STR_LIT>') and not self.boto_profile:<EOL><INDENT>self.boto_profile = config.get('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>cache_dir = os.path.expanduser(config.get('<STR_LIT>', '<STR_LIT>'))<EOL>if self.boto_profile:<EOL><INDENT>cache_dir = os.path.join(cache_dir, '<STR_LIT>' + self.boto_profile)<EOL><DEDENT>if not os.path.exists(cache_dir):<EOL><INDENT>os.makedirs(cache_dir)<EOL><DEDENT>self.cache_path_cache = cache_dir + "<STR_LIT>"<EOL>self.cache_path_index = cache_dir + "<STR_LIT>"<EOL>self.cache_max_age = config.getint('<STR_LIT>', '<STR_LIT>')<EOL>if config.has_option('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>self.expand_csv_tags = config.getboolean('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>self.expand_csv_tags = False<EOL><DEDENT>if config.has_option('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>self.nested_groups = config.getboolean('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>self.nested_groups = False<EOL><DEDENT>if config.has_option('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>self.replace_dash_in_groups = config.getboolean('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>self.replace_dash_in_groups = True<EOL><DEDENT>group_by_options = [<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>]<EOL>for option in group_by_options:<EOL><INDENT>if config.has_option('<STR_LIT>', option):<EOL><INDENT>setattr(self, option, config.getboolean('<STR_LIT>', option))<EOL><DEDENT>else:<EOL><INDENT>setattr(self, option, True)<EOL><DEDENT><DEDENT>try:<EOL><INDENT>pattern_include = config.get('<STR_LIT>', '<STR_LIT>')<EOL>if pattern_include and len(pattern_include) > <NUM_LIT:0>:<EOL><INDENT>self.pattern_include = re.compile(pattern_include)<EOL><DEDENT>else:<EOL><INDENT>self.pattern_include = None<EOL><DEDENT><DEDENT>except configparser.NoOptionError:<EOL><INDENT>self.pattern_include = None<EOL><DEDENT>try:<EOL><INDENT>pattern_exclude = config.get('<STR_LIT>', '<STR_LIT>');<EOL>if pattern_exclude and len(pattern_exclude) > <NUM_LIT:0>:<EOL><INDENT>self.pattern_exclude = re.compile(pattern_exclude)<EOL><DEDENT>else:<EOL><INDENT>self.pattern_exclude = None<EOL><DEDENT><DEDENT>except configparser.NoOptionError:<EOL><INDENT>self.pattern_exclude = None<EOL><DEDENT>self.ec2_instance_filters = defaultdict(list)<EOL>if config.has_option('<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>for instance_filter in config.get('<STR_LIT>', '<STR_LIT>', '<STR_LIT>').split('<STR_LIT:U+002C>'):<EOL><INDENT>instance_filter = instance_filter.strip()<EOL>if not instance_filter or '<STR_LIT:=>' not in instance_filter:<EOL><INDENT>continue<EOL><DEDENT>filter_key, filter_value = [x.strip() for x in instance_filter.split('<STR_LIT:=>', <NUM_LIT:1>)]<EOL>if not filter_key:<EOL><INDENT>continue<EOL><DEDENT>self.ec2_instance_filters[filter_key].append(filter_value)<EOL><DEDENT><DEDENT>
|
Reads the settings from the ec2.ini file
|
f10185:c0:m4
|
def do_api_calls_update_cache(self):
|
if self.route53_enabled:<EOL><INDENT>self.get_route53_records()<EOL><DEDENT>for region in self.regions:<EOL><INDENT>self.get_instances_by_region(region)<EOL>if self.rds_enabled:<EOL><INDENT>self.get_rds_instances_by_region(region)<EOL><DEDENT>if self.elasticache_enabled:<EOL><INDENT>self.get_elasticache_clusters_by_region(region)<EOL>self.get_elasticache_replication_groups_by_region(region)<EOL><DEDENT><DEDENT>self.write_to_cache(self.inventory, self.cache_path_cache)<EOL>self.write_to_cache(self.index, self.cache_path_index)<EOL>
|
Do API calls to each region, and save data in cache files
|
f10185:c0:m5
|
def connect(self, region):
|
if self.eucalyptus:<EOL><INDENT>conn = boto.connect_euca(host=self.eucalyptus_host)<EOL>conn.APIVersion = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>conn = self.connect_to_aws(ec2, region)<EOL><DEDENT>return conn<EOL>
|
create connection to api server
|
f10185:c0:m6
|
def boto_fix_security_token_in_profile(self, connect_args):
|
profile = '<STR_LIT>' + self.boto_profile<EOL>if boto.config.has_option(profile, '<STR_LIT>'):<EOL><INDENT>connect_args['<STR_LIT>'] = boto.config.get(profile, '<STR_LIT>')<EOL><DEDENT>return connect_args<EOL>
|
monkey patch for boto issue boto/boto#2100
|
f10185:c0:m7
|
def get_instances_by_region(self, region):
|
try:<EOL><INDENT>conn = self.connect(region)<EOL>reservations = []<EOL>if self.ec2_instance_filters:<EOL><INDENT>for filter_key, filter_values in self.ec2_instance_filters.items():<EOL><INDENT>reservations.extend(conn.get_all_instances(filters = { filter_key : filter_values }))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>reservations = conn.get_all_instances()<EOL><DEDENT>for reservation in reservations:<EOL><INDENT>for instance in reservation.instances:<EOL><INDENT>self.add_instance(instance, region)<EOL><DEDENT><DEDENT><DEDENT>except boto.exception.BotoServerError as e:<EOL><INDENT>if e.error_code == '<STR_LIT>':<EOL><INDENT>error = self.get_auth_error_message()<EOL><DEDENT>else:<EOL><INDENT>backend = '<STR_LIT>' if self.eucalyptus else '<STR_LIT>' <EOL>error = "<STR_LIT>" % (backend, e.message)<EOL><DEDENT>self.fail_with_error(error, '<STR_LIT>')<EOL><DEDENT>
|
Makes an AWS EC2 API call to the list of instances in a particular
region
|
f10185:c0:m9
|
def get_rds_instances_by_region(self, region):
|
try:<EOL><INDENT>conn = self.connect_to_aws(rds, region)<EOL>if conn:<EOL><INDENT>instances = conn.get_all_dbinstances()<EOL>for instance in instances:<EOL><INDENT>self.add_rds_instance(instance, region)<EOL><DEDENT><DEDENT><DEDENT>except boto.exception.BotoServerError as e:<EOL><INDENT>error = e.reason<EOL>if e.error_code == '<STR_LIT>':<EOL><INDENT>error = self.get_auth_error_message()<EOL><DEDENT>if not e.reason == "<STR_LIT>":<EOL><INDENT>error = "<STR_LIT>" % e.message<EOL><DEDENT>self.fail_with_error(error, '<STR_LIT>')<EOL><DEDENT>
|
Makes an AWS API call to the list of RDS instances in a particular
region
|
f10185:c0:m10
|
def get_elasticache_clusters_by_region(self, region):
|
<EOL>try:<EOL><INDENT>conn = elasticache.connect_to_region(region)<EOL>if conn:<EOL><INDENT>response = conn.describe_cache_clusters(None, None, None, True)<EOL><DEDENT><DEDENT>except boto.exception.BotoServerError as e:<EOL><INDENT>error = e.reason<EOL>if e.error_code == '<STR_LIT>':<EOL><INDENT>error = self.get_auth_error_message()<EOL><DEDENT>if not e.reason == "<STR_LIT>":<EOL><INDENT>error = "<STR_LIT>" % e.message<EOL><DEDENT>self.fail_with_error(error, '<STR_LIT>')<EOL><DEDENT>try:<EOL><INDENT>clusters = response['<STR_LIT>']['<STR_LIT>']['<STR_LIT>']<EOL><DEDENT>except KeyError as e:<EOL><INDENT>error = "<STR_LIT>"<EOL>self.fail_with_error(error, '<STR_LIT>')<EOL><DEDENT>for cluster in clusters:<EOL><INDENT>self.add_elasticache_cluster(cluster, region)<EOL><DEDENT>
|
Makes an AWS API call to the list of ElastiCache clusters (with
nodes' info) in a particular region.
|
f10185:c0:m11
|
def get_elasticache_replication_groups_by_region(self, region):
|
<EOL>try:<EOL><INDENT>conn = elasticache.connect_to_region(region)<EOL>if conn:<EOL><INDENT>response = conn.describe_replication_groups()<EOL><DEDENT><DEDENT>except boto.exception.BotoServerError as e:<EOL><INDENT>error = e.reason<EOL>if e.error_code == '<STR_LIT>':<EOL><INDENT>error = self.get_auth_error_message()<EOL><DEDENT>if not e.reason == "<STR_LIT>":<EOL><INDENT>error = "<STR_LIT>" % e.message<EOL><DEDENT>self.fail_with_error(error, '<STR_LIT>')<EOL><DEDENT>try:<EOL><INDENT>replication_groups = response['<STR_LIT>']['<STR_LIT>']['<STR_LIT>']<EOL><DEDENT>except KeyError as e:<EOL><INDENT>error = "<STR_LIT>"<EOL>self.fail_with_error(error, '<STR_LIT>')<EOL><DEDENT>for replication_group in replication_groups:<EOL><INDENT>self.add_elasticache_replication_group(replication_group, region)<EOL><DEDENT>
|
Makes an AWS API call to the list of ElastiCache replication groups
in a particular region.
|
f10185:c0:m12
|
def get_auth_error_message(self):
|
errors = ["<STR_LIT>"]<EOL>if None in [os.environ.get('<STR_LIT>'), os.environ.get('<STR_LIT>')]:<EOL><INDENT>errors.append('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>errors.append('<STR_LIT>')<EOL><DEDENT>boto_paths = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>']<EOL>boto_config_found = list(p for p in boto_paths if os.path.isfile(os.path.expanduser(p)))<EOL>if len(boto_config_found) > <NUM_LIT:0>:<EOL><INDENT>errors.append("<STR_LIT>" % '<STR_LIT:U+002CU+0020>'.join(boto_config_found))<EOL><DEDENT>else:<EOL><INDENT>errors.append("<STR_LIT>" % '<STR_LIT:U+002CU+0020>'.join(boto_paths))<EOL><DEDENT>return '<STR_LIT:\n>'.join(errors)<EOL>
|
create an informative error message if there is an issue authenticating
|
f10185:c0:m13
|
def fail_with_error(self, err_msg, err_operation=None):
|
if err_operation:<EOL><INDENT>err_msg = '<STR_LIT>'.format(<EOL>err_msg=err_msg, err_operation=err_operation)<EOL><DEDENT>sys.stderr.write(err_msg)<EOL>sys.exit(<NUM_LIT:1>)<EOL>
|
log an error to std err for ansible-playbook to consume and exit
|
f10185:c0:m14
|
def add_instance(self, instance, region):
|
<EOL>if instance.state not in self.ec2_instance_states:<EOL><INDENT>return<EOL><DEDENT>if instance.subnet_id:<EOL><INDENT>dest = getattr(instance, self.vpc_destination_variable, None)<EOL>if dest is None:<EOL><INDENT>dest = getattr(instance, '<STR_LIT>').get(self.vpc_destination_variable, None)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>dest = getattr(instance, self.destination_variable, None)<EOL>if dest is None:<EOL><INDENT>dest = getattr(instance, '<STR_LIT>').get(self.destination_variable, None)<EOL><DEDENT><DEDENT>if not dest:<EOL><INDENT>return<EOL><DEDENT>if self.pattern_include and not self.pattern_include.match(dest):<EOL><INDENT>return<EOL><DEDENT>if self.pattern_exclude and self.pattern_exclude.match(dest):<EOL><INDENT>return<EOL><DEDENT>self.index[dest] = [region, instance.id]<EOL>if self.group_by_instance_id:<EOL><INDENT>self.inventory[instance.id] = [dest]<EOL>if self.nested_groups:<EOL><INDENT>self.push_group(self.inventory, '<STR_LIT>', instance.id)<EOL><DEDENT><DEDENT>if self.group_by_region:<EOL><INDENT>self.push(self.inventory, region, dest)<EOL>if self.nested_groups:<EOL><INDENT>self.push_group(self.inventory, '<STR_LIT>', region)<EOL><DEDENT><DEDENT>if self.group_by_availability_zone:<EOL><INDENT>self.push(self.inventory, instance.placement, dest)<EOL>if self.nested_groups:<EOL><INDENT>if self.group_by_region:<EOL><INDENT>self.push_group(self.inventory, region, instance.placement)<EOL><DEDENT>self.push_group(self.inventory, '<STR_LIT>', instance.placement)<EOL><DEDENT><DEDENT>if self.group_by_ami_id:<EOL><INDENT>ami_id = self.to_safe(instance.image_id)<EOL>self.push(self.inventory, ami_id, dest)<EOL>if self.nested_groups:<EOL><INDENT>self.push_group(self.inventory, '<STR_LIT>', ami_id)<EOL><DEDENT><DEDENT>if self.group_by_instance_type:<EOL><INDENT>type_name = self.to_safe('<STR_LIT>' + instance.instance_type)<EOL>self.push(self.inventory, type_name, dest)<EOL>if self.nested_groups:<EOL><INDENT>self.push_group(self.inventory, '<STR_LIT>', type_name)<EOL><DEDENT><DEDENT>if self.group_by_key_pair and instance.key_name:<EOL><INDENT>key_name = self.to_safe('<STR_LIT>' + instance.key_name)<EOL>self.push(self.inventory, key_name, dest)<EOL>if self.nested_groups:<EOL><INDENT>self.push_group(self.inventory, '<STR_LIT>', key_name)<EOL><DEDENT><DEDENT>if self.group_by_vpc_id and instance.vpc_id:<EOL><INDENT>vpc_id_name = self.to_safe('<STR_LIT>' + instance.vpc_id)<EOL>self.push(self.inventory, vpc_id_name, dest)<EOL>if self.nested_groups:<EOL><INDENT>self.push_group(self.inventory, '<STR_LIT>', vpc_id_name)<EOL><DEDENT><DEDENT>if self.group_by_security_group:<EOL><INDENT>try:<EOL><INDENT>for group in instance.groups:<EOL><INDENT>key = self.to_safe("<STR_LIT>" + group.name)<EOL>self.push(self.inventory, key, dest)<EOL>if self.nested_groups:<EOL><INDENT>self.push_group(self.inventory, '<STR_LIT>', key)<EOL><DEDENT><DEDENT><DEDENT>except AttributeError:<EOL><INDENT>self.fail_with_error('<STR_LIT:\n>'.join(['<STR_LIT>', <EOL>'<STR_LIT>']))<EOL><DEDENT><DEDENT>if self.group_by_tag_keys:<EOL><INDENT>for k, v in instance.tags.items():<EOL><INDENT>if self.expand_csv_tags and v and '<STR_LIT:U+002C>' in v:<EOL><INDENT>values = map(lambda x: x.strip(), v.split('<STR_LIT:U+002C>'))<EOL><DEDENT>else:<EOL><INDENT>values = [v]<EOL><DEDENT>for v in values:<EOL><INDENT>if v:<EOL><INDENT>key = self.to_safe("<STR_LIT>" + k + "<STR_LIT:=>" + v)<EOL><DEDENT>else:<EOL><INDENT>key = self.to_safe("<STR_LIT>" + k)<EOL><DEDENT>self.push(self.inventory, key, dest)<EOL>if self.nested_groups:<EOL><INDENT>self.push_group(self.inventory, '<STR_LIT>', self.to_safe("<STR_LIT>" + k))<EOL>if v:<EOL><INDENT>self.push_group(self.inventory, self.to_safe("<STR_LIT>" + k), key)<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>if self.route53_enabled and self.group_by_route53_names:<EOL><INDENT>route53_names = self.get_instance_route53_names(instance)<EOL>for name in route53_names:<EOL><INDENT>self.push(self.inventory, name, dest)<EOL>if self.nested_groups:<EOL><INDENT>self.push_group(self.inventory, '<STR_LIT>', name)<EOL><DEDENT><DEDENT><DEDENT>if self.group_by_tag_none and len(instance.tags) == <NUM_LIT:0>:<EOL><INDENT>self.push(self.inventory, '<STR_LIT>', dest)<EOL>if self.nested_groups:<EOL><INDENT>self.push_group(self.inventory, '<STR_LIT>', '<STR_LIT>')<EOL><DEDENT><DEDENT>self.push(self.inventory, '<STR_LIT>', dest)<EOL>self.inventory["<STR_LIT>"]["<STR_LIT>"][dest] = self.get_host_info_dict_from_instance(instance)<EOL>
|
Adds an instance to the inventory and index, as long as it is
addressable
|
f10185:c0:m16
|
def add_rds_instance(self, instance, region):
|
<EOL>if not self.all_rds_instances and instance.status != '<STR_LIT>':<EOL><INDENT>return<EOL><DEDENT>dest = instance.endpoint[<NUM_LIT:0>]<EOL>if not dest:<EOL><INDENT>return<EOL><DEDENT>self.index[dest] = [region, instance.id]<EOL>if self.group_by_instance_id:<EOL><INDENT>self.inventory[instance.id] = [dest]<EOL>if self.nested_groups:<EOL><INDENT>self.push_group(self.inventory, '<STR_LIT>', instance.id)<EOL><DEDENT><DEDENT>if self.group_by_region:<EOL><INDENT>self.push(self.inventory, region, dest)<EOL>if self.nested_groups:<EOL><INDENT>self.push_group(self.inventory, '<STR_LIT>', region)<EOL><DEDENT><DEDENT>if self.group_by_availability_zone:<EOL><INDENT>self.push(self.inventory, instance.availability_zone, dest)<EOL>if self.nested_groups:<EOL><INDENT>if self.group_by_region:<EOL><INDENT>self.push_group(self.inventory, region, instance.availability_zone)<EOL><DEDENT>self.push_group(self.inventory, '<STR_LIT>', instance.availability_zone)<EOL><DEDENT><DEDENT>if self.group_by_instance_type:<EOL><INDENT>type_name = self.to_safe('<STR_LIT>' + instance.instance_class)<EOL>self.push(self.inventory, type_name, dest)<EOL>if self.nested_groups:<EOL><INDENT>self.push_group(self.inventory, '<STR_LIT>', type_name)<EOL><DEDENT><DEDENT>if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id:<EOL><INDENT>vpc_id_name = self.to_safe('<STR_LIT>' + instance.subnet_group.vpc_id)<EOL>self.push(self.inventory, vpc_id_name, dest)<EOL>if self.nested_groups:<EOL><INDENT>self.push_group(self.inventory, '<STR_LIT>', vpc_id_name)<EOL><DEDENT><DEDENT>if self.group_by_security_group:<EOL><INDENT>try:<EOL><INDENT>if instance.security_group:<EOL><INDENT>key = self.to_safe("<STR_LIT>" + instance.security_group.name)<EOL>self.push(self.inventory, key, dest)<EOL>if self.nested_groups:<EOL><INDENT>self.push_group(self.inventory, '<STR_LIT>', key)<EOL><DEDENT><DEDENT><DEDENT>except AttributeError:<EOL><INDENT>self.fail_with_error('<STR_LIT:\n>'.join(['<STR_LIT>', <EOL>'<STR_LIT>']))<EOL><DEDENT><DEDENT>if self.group_by_rds_engine:<EOL><INDENT>self.push(self.inventory, self.to_safe("<STR_LIT>" + instance.engine), dest)<EOL>if self.nested_groups:<EOL><INDENT>self.push_group(self.inventory, '<STR_LIT>', self.to_safe("<STR_LIT>" + instance.engine))<EOL><DEDENT><DEDENT>if self.group_by_rds_parameter_group:<EOL><INDENT>self.push(self.inventory, self.to_safe("<STR_LIT>" + instance.parameter_group.name), dest)<EOL>if self.nested_groups:<EOL><INDENT>self.push_group(self.inventory, '<STR_LIT>', self.to_safe("<STR_LIT>" + instance.parameter_group.name))<EOL><DEDENT><DEDENT>self.push(self.inventory, '<STR_LIT>', dest)<EOL>self.inventory["<STR_LIT>"]["<STR_LIT>"][dest] = self.get_host_info_dict_from_instance(instance)<EOL>
|
Adds an RDS instance to the inventory and index, as long as it is
addressable
|
f10185:c0:m17
|
def add_elasticache_cluster(self, cluster, region):
|
<EOL>if not self.all_elasticache_clusters and cluster['<STR_LIT>'] != '<STR_LIT>':<EOL><INDENT>return<EOL><DEDENT>if '<STR_LIT>' in cluster and cluster['<STR_LIT>']:<EOL><INDENT>dest = cluster['<STR_LIT>']['<STR_LIT>']<EOL>is_redis = False<EOL><DEDENT>else:<EOL><INDENT>dest = cluster['<STR_LIT>'][<NUM_LIT:0>]['<STR_LIT>']['<STR_LIT>']<EOL>is_redis = True<EOL><DEDENT>if not dest:<EOL><INDENT>return<EOL><DEDENT>self.index[dest] = [region, cluster['<STR_LIT>']]<EOL>if self.group_by_instance_id:<EOL><INDENT>self.inventory[cluster['<STR_LIT>']] = [dest]<EOL>if self.nested_groups:<EOL><INDENT>self.push_group(self.inventory, '<STR_LIT>', cluster['<STR_LIT>'])<EOL><DEDENT><DEDENT>if self.group_by_region and not is_redis:<EOL><INDENT>self.push(self.inventory, region, dest)<EOL>if self.nested_groups:<EOL><INDENT>self.push_group(self.inventory, '<STR_LIT>', region)<EOL><DEDENT><DEDENT>if self.group_by_availability_zone and not is_redis:<EOL><INDENT>self.push(self.inventory, cluster['<STR_LIT>'], dest)<EOL>if self.nested_groups:<EOL><INDENT>if self.group_by_region:<EOL><INDENT>self.push_group(self.inventory, region, cluster['<STR_LIT>'])<EOL><DEDENT>self.push_group(self.inventory, '<STR_LIT>', cluster['<STR_LIT>'])<EOL><DEDENT><DEDENT>if self.group_by_instance_type and not is_redis:<EOL><INDENT>type_name = self.to_safe('<STR_LIT>' + cluster['<STR_LIT>'])<EOL>self.push(self.inventory, type_name, dest)<EOL>if self.nested_groups:<EOL><INDENT>self.push_group(self.inventory, '<STR_LIT>', type_name)<EOL><DEDENT><DEDENT>if self.group_by_security_group and not is_redis:<EOL><INDENT>if '<STR_LIT>' in cluster and cluster['<STR_LIT>'] is not None:<EOL><INDENT>for security_group in cluster['<STR_LIT>']:<EOL><INDENT>key = self.to_safe("<STR_LIT>" + security_group['<STR_LIT>'])<EOL>self.push(self.inventory, key, dest)<EOL>if self.nested_groups:<EOL><INDENT>self.push_group(self.inventory, '<STR_LIT>', key)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>if self.group_by_elasticache_engine and not is_redis:<EOL><INDENT>self.push(self.inventory, self.to_safe("<STR_LIT>" + cluster['<STR_LIT>']), dest)<EOL>if self.nested_groups:<EOL><INDENT>self.push_group(self.inventory, '<STR_LIT>', self.to_safe(cluster['<STR_LIT>']))<EOL><DEDENT><DEDENT>if self.group_by_elasticache_parameter_group:<EOL><INDENT>self.push(self.inventory, self.to_safe("<STR_LIT>" + cluster['<STR_LIT>']['<STR_LIT>']), dest)<EOL>if self.nested_groups:<EOL><INDENT>self.push_group(self.inventory, '<STR_LIT>', self.to_safe(cluster['<STR_LIT>']['<STR_LIT>']))<EOL><DEDENT><DEDENT>if self.group_by_elasticache_replication_group and '<STR_LIT>' in cluster and cluster['<STR_LIT>']:<EOL><INDENT>self.push(self.inventory, self.to_safe("<STR_LIT>" + cluster['<STR_LIT>']), dest)<EOL>if self.nested_groups:<EOL><INDENT>self.push_group(self.inventory, '<STR_LIT>', self.to_safe(cluster['<STR_LIT>']))<EOL><DEDENT><DEDENT>self.push(self.inventory, '<STR_LIT>', cluster['<STR_LIT>'])<EOL>host_info = self.get_host_info_dict_from_describe_dict(cluster)<EOL>self.inventory["<STR_LIT>"]["<STR_LIT>"][dest] = host_info<EOL>for node in cluster['<STR_LIT>']:<EOL><INDENT>self.add_elasticache_node(node, cluster, region)<EOL><DEDENT>
|
Adds an ElastiCache cluster to the inventory and index, as long as
it's nodes are addressable
|
f10185:c0:m18
|
def add_elasticache_node(self, node, cluster, region):
|
<EOL>if not self.all_elasticache_nodes and node['<STR_LIT>'] != '<STR_LIT>':<EOL><INDENT>return<EOL><DEDENT>dest = node['<STR_LIT>']['<STR_LIT>']<EOL>if not dest:<EOL><INDENT>return<EOL><DEDENT>node_id = self.to_safe(cluster['<STR_LIT>'] + '<STR_LIT:_>' + node['<STR_LIT>'])<EOL>self.index[dest] = [region, node_id]<EOL>if self.group_by_instance_id:<EOL><INDENT>self.inventory[node_id] = [dest]<EOL>if self.nested_groups:<EOL><INDENT>self.push_group(self.inventory, '<STR_LIT>', node_id)<EOL><DEDENT><DEDENT>if self.group_by_region:<EOL><INDENT>self.push(self.inventory, region, dest)<EOL>if self.nested_groups:<EOL><INDENT>self.push_group(self.inventory, '<STR_LIT>', region)<EOL><DEDENT><DEDENT>if self.group_by_availability_zone:<EOL><INDENT>self.push(self.inventory, cluster['<STR_LIT>'], dest)<EOL>if self.nested_groups:<EOL><INDENT>if self.group_by_region:<EOL><INDENT>self.push_group(self.inventory, region, cluster['<STR_LIT>'])<EOL><DEDENT>self.push_group(self.inventory, '<STR_LIT>', cluster['<STR_LIT>'])<EOL><DEDENT><DEDENT>if self.group_by_instance_type:<EOL><INDENT>type_name = self.to_safe('<STR_LIT>' + cluster['<STR_LIT>'])<EOL>self.push(self.inventory, type_name, dest)<EOL>if self.nested_groups:<EOL><INDENT>self.push_group(self.inventory, '<STR_LIT>', type_name)<EOL><DEDENT><DEDENT>if self.group_by_security_group:<EOL><INDENT>if '<STR_LIT>' in cluster and cluster['<STR_LIT>'] is not None:<EOL><INDENT>for security_group in cluster['<STR_LIT>']:<EOL><INDENT>key = self.to_safe("<STR_LIT>" + security_group['<STR_LIT>'])<EOL>self.push(self.inventory, key, dest)<EOL>if self.nested_groups:<EOL><INDENT>self.push_group(self.inventory, '<STR_LIT>', key)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>if self.group_by_elasticache_engine:<EOL><INDENT>self.push(self.inventory, self.to_safe("<STR_LIT>" + cluster['<STR_LIT>']), dest)<EOL>if self.nested_groups:<EOL><INDENT>self.push_group(self.inventory, '<STR_LIT>', self.to_safe("<STR_LIT>" + cluster['<STR_LIT>']))<EOL><DEDENT><DEDENT>if self.group_by_elasticache_cluster:<EOL><INDENT>self.push(self.inventory, self.to_safe("<STR_LIT>" + cluster['<STR_LIT>']), dest)<EOL><DEDENT>self.push(self.inventory, '<STR_LIT>', dest)<EOL>host_info = self.get_host_info_dict_from_describe_dict(node)<EOL>if dest in self.inventory["<STR_LIT>"]["<STR_LIT>"]:<EOL><INDENT>self.inventory["<STR_LIT>"]["<STR_LIT>"][dest].update(host_info)<EOL><DEDENT>else:<EOL><INDENT>self.inventory["<STR_LIT>"]["<STR_LIT>"][dest] = host_info<EOL><DEDENT>
|
Adds an ElastiCache node to the inventory and index, as long as
it is addressable
|
f10185:c0:m19
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.