_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q14300
FittingSequence.pso
train
def pso(self, n_particles, n_iterations, sigma_scale=1, print_key='PSO', threadCount=1): """ Particle Swarm Optimization :param n_particles: number of particles in the Particle Swarm Optimization :param n_iterations: number of iterations in the optimization process :param sigma_scale: scaling of the initial parameter spread relative to the width in the initial settings :param print_key: string, printed text when executing this routine :param threadCount: number of CPU threads. If MPI option is set, threadCount=1 :return: result of the best fit, the chain of the best fit parameter after each iteration, list of parameters in same order """ param_class = self._param_class init_pos = param_class.kwargs2args(self._lens_temp, self._source_temp, self._lens_light_temp, self._ps_temp, self._cosmo_temp) lens_sigma, source_sigma, lens_light_sigma, ps_sigma, cosmo_sigma = self._updateManager.sigma_kwargs sigma_start = param_class.kwargs2args(lens_sigma, source_sigma, lens_light_sigma, ps_sigma, cosmo_sigma) lowerLimit = np.array(init_pos) - np.array(sigma_start) * sigma_scale upperLimit = np.array(init_pos) + np.array(sigma_start) * sigma_scale num_param, param_list = param_class.num_param() # run PSO sampler = Sampler(likelihoodModule=self.likelihoodModule) result, chain = sampler.pso(n_particles, n_iterations, lowerLimit, upperLimit, init_pos=init_pos, threadCount=threadCount, mpi=self._mpi, print_key=print_key) lens_result, source_result, lens_light_result, ps_result, cosmo_result = param_class.args2kwargs(result, bijective=True) return lens_result, source_result, lens_light_result, ps_result, cosmo_result, chain, param_list
python
{ "resource": "" }
q14301
FittingSequence.psf_iteration
train
def psf_iteration(self, num_iter=10, no_break=True, stacking_method='median', block_center_neighbour=0, keep_psf_error_map=True, psf_symmetry=1, psf_iter_factor=1, verbose=True, compute_bands=None): """ iterative PSF reconstruction :param num_iter: number of iterations in the process :param no_break: bool, if False will break the process as soon as one step lead to a wors reconstruction then the previous step :param stacking_method: string, 'median' and 'mean' supported :param block_center_neighbour: radius of neighbouring point source to be blocked in the reconstruction :param keep_psf_error_map: bool, whether or not to keep the previous psf_error_map :param psf_symmetry: int, number of invariant rotations in the reconstructed PSF :param psf_iter_factor: factor of new estimated PSF relative to the old one PSF_updated = (1-psf_iter_factor) * PSF_old + psf_iter_factor*PSF_new :param verbose: bool, print statements :param compute_bands: bool list, if multiple bands, this process can be limited to a subset of bands :return: 0, updated PSF is stored in self.mult_iband_list """ #lens_temp = copy.deepcopy(lens_input) kwargs_model = self._updateManager.kwargs_model param_class = self._param_class lens_updated = param_class.update_lens_scaling(self._cosmo_temp, self._lens_temp) source_updated = param_class.image2source_plane(self._source_temp, lens_updated) if compute_bands is None: compute_bands = [True] * len(self.multi_band_list) for i in range(len(self.multi_band_list)): if compute_bands[i] is True: kwargs_data = self.multi_band_list[i][0] kwargs_psf = self.multi_band_list[i][1] kwargs_numerics = self.multi_band_list[i][2] image_model = class_creator.create_image_model(kwargs_data=kwargs_data, kwargs_psf=kwargs_psf, kwargs_numerics=kwargs_numerics, kwargs_model=kwargs_model) psf_iter = PsfFitting(image_model_class=image_model) kwargs_psf = psf_iter.update_iterative(kwargs_psf, lens_updated, source_updated, self._lens_light_temp, self._ps_temp, num_iter=num_iter, no_break=no_break, stacking_method=stacking_method, block_center_neighbour=block_center_neighbour, keep_psf_error_map=keep_psf_error_map, psf_symmetry=psf_symmetry, psf_iter_factor=psf_iter_factor, verbose=verbose) self.multi_band_list[i][1] = kwargs_psf return 0
python
{ "resource": "" }
q14302
FittingSequence.align_images
train
def align_images(self, n_particles=10, n_iterations=10, lowerLimit=-0.2, upperLimit=0.2, threadCount=1, compute_bands=None): """ aligns the coordinate systems of different exposures within a fixed model parameterisation by executing a PSO with relative coordinate shifts as free parameters :param n_particles: number of particles in the Particle Swarm Optimization :param n_iterations: number of iterations in the optimization process :param lowerLimit: lower limit of relative shift :param upperLimit: upper limit of relative shift :param verbose: bool, print statements :param compute_bands: bool list, if multiple bands, this process can be limited to a subset of bands :return: """ kwargs_model = self._updateManager.kwargs_model param_class = self._updateManager.param_class(self._lens_temp) lens_updated = param_class.update_lens_scaling(self._cosmo_temp, self._lens_temp) source_updated = param_class.image2source_plane(self._source_temp, lens_updated) if compute_bands is None: compute_bands = [True] * len(self.multi_band_list) for i in range(len(self.multi_band_list)): if compute_bands[i] is True: kwargs_data = self.multi_band_list[i][0] kwargs_psf = self.multi_band_list[i][1] kwargs_numerics = self.multi_band_list[i][2] alignmentFitting = AlignmentFitting(kwargs_data, kwargs_psf, kwargs_numerics, kwargs_model, lens_updated, source_updated, self._lens_light_temp, self._ps_temp) kwargs_data, chain = alignmentFitting.pso(n_particles=n_particles, n_iterations=n_iterations, lowerLimit=lowerLimit, upperLimit=upperLimit, threadCount=threadCount, mpi=self._mpi, print_key='Alignment fitting for band %s ...' % i) print('Align completed for band %s.' % i) print('ra_shift: %s, dec_shift: %s' %(kwargs_data['ra_shift'], kwargs_data['dec_shift'])) self.multi_band_list[i][0] = kwargs_data return 0
python
{ "resource": "" }
q14303
FittingSequence.update_settings
train
def update_settings(self, kwargs_model={}, kwargs_constraints={}, kwargs_likelihood={}, lens_add_fixed=[], source_add_fixed=[], lens_light_add_fixed=[], ps_add_fixed=[], cosmo_add_fixed=[], lens_remove_fixed=[], source_remove_fixed=[], lens_light_remove_fixed=[], ps_remove_fixed=[], cosmo_remove_fixed=[], change_source_lower_limit=None, change_source_upper_limit=None): """ updates lenstronomy settings "on the fly" :param kwargs_model: kwargs, specified keyword arguments overwrite the existing ones :param kwargs_constraints: kwargs, specified keyword arguments overwrite the existing ones :param kwargs_likelihood: kwargs, specified keyword arguments overwrite the existing ones :param lens_add_fixed: [[i_model, ['param1', 'param2',...], [...]] :param source_add_fixed: [[i_model, ['param1', 'param2',...], [...]] :param lens_light_add_fixed: [[i_model, ['param1', 'param2',...], [...]] :param ps_add_fixed: [[i_model, ['param1', 'param2',...], [...]] :param cosmo_add_fixed: ['param1', 'param2',...] :param lens_remove_fixed: [[i_model, ['param1', 'param2',...], [...]] :param source_remove_fixed: [[i_model, ['param1', 'param2',...], [...]] :param lens_light_remove_fixed: [[i_model, ['param1', 'param2',...], [...]] :param ps_remove_fixed: [[i_model, ['param1', 'param2',...], [...]] :param cosmo_remove_fixed: ['param1', 'param2',...] :return: 0, the settings are overwritten for the next fitting step to come """ self._updateManager.update_options(kwargs_model, kwargs_constraints, kwargs_likelihood) self._updateManager.update_fixed(self._lens_temp, self._source_temp, self._lens_light_temp, self._ps_temp, self._cosmo_temp, lens_add_fixed, source_add_fixed, lens_light_add_fixed, ps_add_fixed, cosmo_add_fixed, lens_remove_fixed, source_remove_fixed, lens_light_remove_fixed, ps_remove_fixed, cosmo_remove_fixed) self._updateManager.update_limits(change_source_lower_limit, change_source_upper_limit) return 0
python
{ "resource": "" }
q14304
MamonLokasAnisotropy._B
train
def _B(self, x, a, b): """ incomplete Beta function as described in Mamon&Lokas A13 :param x: :param a: :param b: :return: """ return special.betainc(a, b, x) * special.beta(a, b)
python
{ "resource": "" }
q14305
UpdateManager.update_fixed
train
def update_fixed(self, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, kwargs_cosmo, lens_add_fixed=[], source_add_fixed=[], lens_light_add_fixed=[], ps_add_fixed=[], cosmo_add_fixed=[], lens_remove_fixed=[], source_remove_fixed=[], lens_light_remove_fixed=[], ps_remove_fixed=[], cosmo_remove_fixed=[]): """ adds the values of the keyword arguments that are stated in the _add_fixed to the existing fixed arguments. :param kwargs_lens: :param kwargs_source: :param kwargs_lens_light: :param kwargs_ps: :param kwargs_cosmo: :param lens_add_fixed: :param source_add_fixed: :param lens_light_add_fixed: :param ps_add_fixed: :param cosmo_add_fixed: :return: updated kwargs fixed """ lens_fixed = self._add_fixed(kwargs_lens, self._lens_fixed, lens_add_fixed) lens_fixed = self._remove_fixed(lens_fixed, lens_remove_fixed) source_fixed = self._add_fixed(kwargs_source, self._source_fixed, source_add_fixed) source_fixed = self._remove_fixed(source_fixed, source_remove_fixed) lens_light_fixed = self._add_fixed(kwargs_lens_light, self._lens_light_fixed, lens_light_add_fixed) lens_light_fixed = self._remove_fixed(lens_light_fixed, lens_light_remove_fixed) ps_fixed = self._add_fixed(kwargs_ps, self._ps_fixed, ps_add_fixed) ps_fixed = self._remove_fixed(ps_fixed, ps_remove_fixed) cosmo_fixed = copy.deepcopy(self._cosmo_fixed) for param_name in cosmo_add_fixed: if param_name in cosmo_fixed: pass else: cosmo_fixed[param_name] = kwargs_cosmo[param_name] for param_name in cosmo_remove_fixed: if param_name in cosmo_fixed: del cosmo_fixed[param_name] self._lens_fixed, self._source_fixed, self._lens_light_fixed, self._ps_fixed, self._cosmo_fixed = lens_fixed, source_fixed, lens_light_fixed, ps_fixed, cosmo_fixed
python
{ "resource": "" }
q14306
AnalyticKinematics.vel_disp_one
train
def vel_disp_one(self, gamma, rho0_r0_gamma, r_eff, r_ani, R_slit, dR_slit, FWHM): """ computes one realisation of the velocity dispersion realized in the slit :param gamma: power-law slope of the mass profile (isothermal = 2) :param rho0_r0_gamma: combination of Einstein radius and power-law slope as equation (14) in Suyu+ 2010 :param r_eff: half light radius of the Hernquist profile (or as an approximation of any other profile to be described as a Hernquist profile :param r_ani: anisotropy radius :param R_slit: length of the slit/box :param dR_slit: width of the slit/box :param FWHM: full width at half maximum of the seeing conditions, described as a Gaussian :return: projected velocity dispersion of a single drawn position in the potential [km/s] """ a = 0.551 * r_eff while True: r = self.P_r(a) # draw r R, x, y = self.R_r(r) # draw projected R x_, y_ = self.displace_PSF(x, y, FWHM) # displace via PSF bool = self.check_in_slit(x_, y_, R_slit, dR_slit) if bool is True: break sigma_s2 = self.sigma_s2(r, R, r_ani, a, gamma, rho0_r0_gamma) return sigma_s2
python
{ "resource": "" }
q14307
LensAnalysis.ellipticity_lens_light
train
def ellipticity_lens_light(self, kwargs_lens_light, center_x=0, center_y=0, model_bool_list=None, deltaPix=None, numPix=None): """ make sure that the window covers all the light, otherwise the moments may give to low answers. :param kwargs_lens_light: :param center_x: :param center_y: :param model_bool_list: :param deltaPix: :param numPix: :return: """ if model_bool_list is None: model_bool_list = [True] * len(kwargs_lens_light) if numPix is None: numPix = 100 if deltaPix is None: deltaPix = 0.05 x_grid, y_grid = util.make_grid(numPix=numPix, deltapix=deltaPix) x_grid += center_x y_grid += center_y I_xy = self._lens_light_internal(x_grid, y_grid, kwargs_lens_light, model_bool_list=model_bool_list) e1, e2 = analysis_util.ellipticities(I_xy, x_grid, y_grid) return e1, e2
python
{ "resource": "" }
q14308
LensAnalysis._lens_light_internal
train
def _lens_light_internal(self, x_grid, y_grid, kwargs_lens_light, model_bool_list=None): """ evaluates only part of the light profiles :param x_grid: :param y_grid: :param kwargs_lens_light: :return: """ if model_bool_list is None: model_bool_list = [True] * len(kwargs_lens_light) lens_light = np.zeros_like(x_grid) for i, bool in enumerate(model_bool_list): if bool is True: lens_light_i = self.LensLightModel.surface_brightness(x_grid, y_grid, kwargs_lens_light, k=i) lens_light += lens_light_i return lens_light
python
{ "resource": "" }
q14309
LensAnalysis.multi_gaussian_lens
train
def multi_gaussian_lens(self, kwargs_lens, model_bool_list=None, e1=0, e2=0, n_comp=20): """ multi-gaussian lens model in convergence space :param kwargs_lens: :param n_comp: :return: """ if 'center_x' in kwargs_lens[0]: center_x = kwargs_lens[0]['center_x'] center_y = kwargs_lens[0]['center_y'] else: raise ValueError('no keyword center_x defined!') theta_E = self._lensModelExtensions.effective_einstein_radius(kwargs_lens) r_array = np.logspace(-4, 2, 200) * theta_E x_coords, y_coords = param_util.transform_e1e2(r_array, np.zeros_like(r_array), e1=-e1, e2=-e2) x_coords += center_x y_coords += center_y #r_array = np.logspace(-2, 1, 50) * theta_E if model_bool_list is None: model_bool_list = [True] * len(kwargs_lens) kappa_s = np.zeros_like(r_array) for i in range(len(kwargs_lens)): if model_bool_list[i] is True: kappa_s += self.LensModel.kappa(x_coords, y_coords, kwargs_lens, k=i) amplitudes, sigmas, norm = mge.mge_1d(r_array, kappa_s, N=n_comp) return amplitudes, sigmas, center_x, center_y
python
{ "resource": "" }
q14310
LensAnalysis.flux_components
train
def flux_components(self, kwargs_light, n_grid=400, delta_grid=0.01, deltaPix=0.05, type="lens"): """ computes the total flux in each component of the model :param kwargs_light: :param n_grid: :param delta_grid: :return: """ flux_list = [] R_h_list = [] x_grid, y_grid = util.make_grid(numPix=n_grid, deltapix=delta_grid) kwargs_copy = copy.deepcopy(kwargs_light) for k, kwargs in enumerate(kwargs_light): if 'center_x' in kwargs_copy[k]: kwargs_copy[k]['center_x'] = 0 kwargs_copy[k]['center_y'] = 0 if type == 'lens': light = self.LensLightModel.surface_brightness(x_grid, y_grid, kwargs_copy, k=k) elif type == 'source': light = self.SourceModel.surface_brightness(x_grid, y_grid, kwargs_copy, k=k) else: raise ValueError("type %s not supported!" % type) flux = np.sum(light)*delta_grid**2 / deltaPix**2 R_h = analysis_util.half_light_radius(light, x_grid, y_grid) flux_list.append(flux) R_h_list.append(R_h) return flux_list, R_h_list
python
{ "resource": "" }
q14311
LensAnalysis.error_map_source
train
def error_map_source(self, kwargs_source, x_grid, y_grid, cov_param): """ variance of the linear source reconstruction in the source plane coordinates, computed by the diagonal elements of the covariance matrix of the source reconstruction as a sum of the errors of the basis set. :param kwargs_source: keyword arguments of source model :param x_grid: x-axis of positions to compute error map :param y_grid: y-axis of positions to compute error map :param cov_param: covariance matrix of liner inversion parameters :return: diagonal covariance errors at the positions (x_grid, y_grid) """ error_map = np.zeros_like(x_grid) basis_functions, n_source = self.SourceModel.functions_split(x_grid, y_grid, kwargs_source) basis_functions = np.array(basis_functions) if cov_param is not None: for i in range(len(error_map)): error_map[i] = basis_functions[:, i].T.dot(cov_param[:n_source, :n_source]).dot(basis_functions[:, i]) return error_map
python
{ "resource": "" }
q14312
LensAnalysis.mass_fraction_within_radius
train
def mass_fraction_within_radius(self, kwargs_lens, center_x, center_y, theta_E, numPix=100): """ computes the mean convergence of all the different lens model components within a spherical aperture :param kwargs_lens: lens model keyword argument list :param center_x: center of the aperture :param center_y: center of the aperture :param theta_E: radius of aperture :return: list of average convergences for all the model components """ x_grid, y_grid = util.make_grid(numPix=numPix, deltapix=2.*theta_E / numPix) x_grid += center_x y_grid += center_y mask = mask_util.mask_sphere(x_grid, y_grid, center_x, center_y, theta_E) kappa_list = [] for i in range(len(kwargs_lens)): kappa = self.LensModel.kappa(x_grid, y_grid, kwargs_lens, k=i) kappa_mean = np.sum(kappa * mask) / np.sum(mask) kappa_list.append(kappa_mean) return kappa_list
python
{ "resource": "" }
q14313
PointSource.update_search_window
train
def update_search_window(self, search_window, x_center, y_center): """ update the search area for the lens equation solver :param search_window: search_window: window size of the image position search with the lens equation solver. :param x_center: center of search window :param y_center: center of search window :return: updated self instances """ self._search_window, self._x_center, self._y_center = search_window, x_center, y_center
python
{ "resource": "" }
q14314
PointSource.point_source_list
train
def point_source_list(self, kwargs_ps, kwargs_lens, k=None): """ returns the coordinates and amplitudes of all point sources in a single array :param kwargs_ps: :param kwargs_lens: :return: """ ra_list, dec_list = self.image_position(kwargs_ps, kwargs_lens, k=k) amp_list = self.image_amplitude(kwargs_ps, kwargs_lens) ra_array, dec_array, amp_array = [], [], [] for i, ra in enumerate(ra_list): for j in range(len(ra)): ra_array.append(ra_list[i][j]) dec_array.append(dec_list[i][j]) amp_array.append(amp_list[i][j]) return ra_array, dec_array, amp_array
python
{ "resource": "" }
q14315
PointSource.image_amplitude
train
def image_amplitude(self, kwargs_ps, kwargs_lens, k=None): """ returns the image amplitudes :param kwargs_ps: :param kwargs_lens: :return: """ amp_list = [] for i, model in enumerate(self._point_source_list): if k is None or k == i: amp_list.append(model.image_amplitude(kwargs_ps=kwargs_ps[i], kwargs_lens=kwargs_lens, min_distance=self._min_distance, search_window=self._search_window, precision_limit=self._precision_limit, num_iter_max=self._num_iter_max, x_center=self._x_center, y_center=self._y_center)) return amp_list
python
{ "resource": "" }
q14316
PointSource.source_amplitude
train
def source_amplitude(self, kwargs_ps, kwargs_lens): """ returns the source amplitudes :param kwargs_ps: :param kwargs_lens: :return: """ amp_list = [] for i, model in enumerate(self._point_source_list): amp_list.append(model.source_amplitude(kwargs_ps=kwargs_ps[i], kwargs_lens=kwargs_lens)) return amp_list
python
{ "resource": "" }
q14317
PointSource.re_normalize_flux
train
def re_normalize_flux(self, kwargs_ps, norm_factor): """ renormalizes the point source amplitude keywords by a factor :param kwargs_ps_updated: :param norm_factor: :return: """ for i, model in enumerate(self.point_source_type_list): if model == 'UNLENSED': kwargs_ps[i]['point_amp'] *= norm_factor elif model in ['LENSED_POSITION', 'SOURCE_POSITION']: if self._fixed_magnification_list[i] is True: kwargs_ps[i]['source_amp'] *= norm_factor else: kwargs_ps[i]['point_amp'] *= norm_factor return kwargs_ps
python
{ "resource": "" }
q14318
LensModelExtensions._tiling_crit
train
def _tiling_crit(self, edge1, edge2, edge_90, max_order, kwargs_lens): """ tiles a rectangular triangle and compares the signs of the magnification :param edge1: [ra_coord, dec_coord, magnification] :param edge2: [ra_coord, dec_coord, magnification] :param edge_90: [ra_coord, dec_coord, magnification] :param max_order: maximal order to fold triangle :return: """ ra_1, dec_1, mag_1 = edge1 ra_2, dec_2, mag_2 = edge2 ra_3, dec_3, mag_3 = edge_90 sign_list = np.sign([mag_1, mag_2, mag_3]) if sign_list[0] == sign_list[1] and sign_list[0] == sign_list[2]: # if all signs are the same return [], [] else: # split triangle along the long axis # execute tiling twice # add ra_crit and dec_crit together # if max depth has been reached, return the mean value in the triangle max_order -= 1 if max_order <= 0: return [(ra_1 + ra_2 + ra_3)/3], [(dec_1 + dec_2 + dec_3)/3] else: # split triangle ra_90_ = (ra_1 + ra_2)/2 # find point in the middle of the long axis to split triangle dec_90_ = (dec_1 + dec_2)/2 mag_90_ = self._lensModel.magnification(ra_90_, dec_90_, kwargs_lens) edge_90_ = [ra_90_, dec_90_, mag_90_] ra_crit, dec_crit = self._tiling_crit(edge1=edge_90, edge2=edge1, edge_90=edge_90_, max_order=max_order, kwargs_lens=kwargs_lens) ra_crit_2, dec_crit_2 = self._tiling_crit(edge1=edge_90, edge2=edge2, edge_90=edge_90_, max_order=max_order, kwargs_lens=kwargs_lens) ra_crit += ra_crit_2 dec_crit += dec_crit_2 return ra_crit, dec_crit
python
{ "resource": "" }
q14319
LensModelExtensions.effective_einstein_radius
train
def effective_einstein_radius(self, kwargs_lens_list, k=None, spacing=1000): """ computes the radius with mean convergence=1 :param kwargs_lens: :param spacing: number of annular bins to compute the convergence (resolution of the Einstein radius estimate) :return: """ if 'center_x' in kwargs_lens_list[0]: center_x = kwargs_lens_list[0]['center_x'] center_y = kwargs_lens_list[0]['center_y'] elif self._lensModel.lens_model_list[0] in ['INTERPOL', 'INTERPOL_SCALED']: center_x, center_y = 0, 0 else: center_x, center_y = 0, 0 numPix = 200 deltaPix = 0.05 x_grid, y_grid = util.make_grid(numPix=numPix, deltapix=deltaPix) x_grid += center_x y_grid += center_y kappa = self._lensModel.kappa(x_grid, y_grid, kwargs_lens_list, k=k) if self._lensModel.lens_model_list[0] in ['INTERPOL', 'INTERPOL_SCALED']: center_x = x_grid[kappa == np.max(kappa)] center_y = y_grid[kappa == np.max(kappa)] kappa = util.array2image(kappa) r_array = np.linspace(0.0001, numPix*deltaPix/2., spacing) for r in r_array: mask = np.array(1 - mask_util.mask_center_2d(center_x, center_y, r, x_grid, y_grid)) sum_mask = np.sum(mask) if sum_mask > 0: kappa_mean = np.sum(kappa*mask)/np.sum(mask) if kappa_mean < 1: return r print(kwargs_lens_list, "Warning, no Einstein radius computed!") return r_array[-1]
python
{ "resource": "" }
q14320
LensModelExtensions.lens_center
train
def lens_center(self, kwargs_lens, k=None, bool_list=None, numPix=200, deltaPix=0.01, center_x_init=0, center_y_init=0): """ computes the convergence weighted center of a lens model :param kwargs_lens: lens model keyword argument list :param bool_list: bool list (optional) to include certain models or not :return: center_x, center_y """ x_grid, y_grid = util.make_grid(numPix=numPix, deltapix=deltaPix) x_grid += center_x_init y_grid += center_y_init if bool_list is None: kappa = self._lensModel.kappa(x_grid, y_grid, kwargs_lens, k=k) else: kappa = np.zeros_like(x_grid) for k in range(len(kwargs_lens)): if bool_list[k] is True: kappa += self._lensModel.kappa(x_grid, y_grid, kwargs_lens, k=k) center_x = x_grid[kappa == np.max(kappa)] center_y = y_grid[kappa == np.max(kappa)] return center_x, center_y
python
{ "resource": "" }
q14321
LensModelExtensions.profile_slope
train
def profile_slope(self, kwargs_lens_list, lens_model_internal_bool=None, num_points=10): """ computes the logarithmic power-law slope of a profile :param kwargs_lens_list: lens model keyword argument list :param lens_model_internal_bool: bool list, indicate which part of the model to consider :param num_points: number of estimates around the Einstein radius :return: """ theta_E = self.effective_einstein_radius(kwargs_lens_list) x0 = kwargs_lens_list[0]['center_x'] y0 = kwargs_lens_list[0]['center_y'] x, y = util.points_on_circle(theta_E, num_points) dr = 0.01 x_dr, y_dr = util.points_on_circle(theta_E + dr, num_points) if lens_model_internal_bool is None: lens_model_internal_bool = [True]*len(kwargs_lens_list) alpha_E_x_i, alpha_E_y_i = self._lensModel.alpha(x0 + x, y0 + y, kwargs_lens_list, k=lens_model_internal_bool) alpha_E_r = np.sqrt(alpha_E_x_i**2 + alpha_E_y_i**2) alpha_E_dr_x_i, alpha_E_dr_y_i = self._lensModel.alpha(x0 + x_dr, y0 + y_dr, kwargs_lens_list, k=lens_model_internal_bool) alpha_E_dr = np.sqrt(alpha_E_dr_x_i ** 2 + alpha_E_dr_y_i ** 2) slope = np.mean(np.log(alpha_E_dr / alpha_E_r) / np.log((theta_E + dr) / theta_E)) gamma = -slope + 2 return gamma
python
{ "resource": "" }
q14322
Background.D_dt
train
def D_dt(self, z_lens, z_source): """ time-delay distance :param z_lens: redshift of lens :param z_source: redshift of source :return: time-delay distance in units of Mpc """ return self.D_xy(0, z_lens) * self.D_xy(0, z_source) / self.D_xy(z_lens, z_source) * (1 + z_lens)
python
{ "resource": "" }
q14323
ParticleSwarmOptimizer._sample
train
def _sample(self, maxIter=1000, c1=1.193, c2=1.193, lookback = 0.25, standard_dev = None): """ Launches the PSO. Yields the complete swarm per iteration :param maxIter: maximum iterations :param c1: cognitive weight :param c2: social weight :param lookback: percentange of particles to use when determining convergence :param standard_dev: standard deviation of the last lookback particles for convergence """ self._get_fitness(self.swarm) i = 0 self.i = i while True: for particle in self.swarm: if ((self._gbest.fitness)<particle.fitness): self._gbest = particle.copy() if (particle.fitness > particle.pbest.fitness): particle.updatePBest() if(i>=maxIter): if self._verbose: print("max iteration reached! stoping") return if self._func.is_converged: return if self._converged_likelihood(maxIter*lookback, self._particleCount, standard_dev): return for particle in self.swarm: w = 0.5 + numpy.random.uniform(0, 1, size=self._paramCount) / 2 #w=0.72 part_vel = w * particle.velocity cog_vel = c1 * numpy.random.uniform(0, 1, size=self._paramCount) * (particle.pbest.position - particle.position) soc_vel = c2 * numpy.random.uniform(0, 1, size=self._paramCount) * (self._gbest.position - particle.position) particle.velocity = part_vel + cog_vel + soc_vel particle.position = particle.position + particle.velocity self._get_fitness(self.swarm) swarm = [] for particle in self.swarm: swarm.append(particle.copy()) yield swarm i+=1 self.i = i
python
{ "resource": "" }
q14324
Particle.create
train
def create(cls, paramCount): """ Creates a new particle without position, velocity and -inf as fitness """ return Particle(numpy.array([[]]*paramCount), numpy.array([[]]*paramCount), -numpy.Inf)
python
{ "resource": "" }
q14325
Particle.copy
train
def copy(self): """ Creates a copy of itself """ return Particle(copy(self.position), copy(self.velocity), self.fitness)
python
{ "resource": "" }
q14326
moments
train
def moments(I_xy_input, x, y): """ compute quadrupole moments from a light distribution :param I_xy: light distribution :param x: x-coordinates of I_xy :param y: y-coordinates of I_xy :return: Q_xx, Q_xy, Q_yy """ I_xy = copy.deepcopy(I_xy_input) background = np.minimum(0, np.min(I_xy)) I_xy -= background x_ = np.sum(I_xy * x) y_ = np.sum(I_xy * y) r = (np.max(x) - np.min(x)) / 3. mask = mask_util.mask_sphere(x, y, center_x=x_, center_y=y_, r=r) Q_xx = np.sum(I_xy * mask * (x - x_) ** 2) Q_xy = np.sum(I_xy * mask * (x - x_) * (y - y_)) Q_yy = np.sum(I_xy * mask * (y - y_) ** 2) return Q_xx, Q_xy, Q_yy, background / np.mean(I_xy)
python
{ "resource": "" }
q14327
ellipticities
train
def ellipticities(I_xy, x, y): """ compute ellipticities of a light distribution :param I_xy: :param x: :param y: :return: """ Q_xx, Q_xy, Q_yy, bkg = moments(I_xy, x, y) norm = Q_xx + Q_yy + 2 * np.sqrt(Q_xx*Q_yy - Q_xy**2) e1 = (Q_xx - Q_yy) / norm e2 = 2 * Q_xy / norm return e1 / (1+bkg), e2 / (1+bkg)
python
{ "resource": "" }
q14328
MultiPlane.alpha
train
def alpha(self, theta_x, theta_y, kwargs_lens, k=None): """ reduced deflection angle :param theta_x: angle in x-direction :param theta_y: angle in y-direction :param kwargs_lens: lens model kwargs :return: """ beta_x, beta_y = self.ray_shooting(theta_x, theta_y, kwargs_lens) alpha_x = theta_x - beta_x alpha_y = theta_y - beta_y return alpha_x, alpha_y
python
{ "resource": "" }
q14329
MultiPlane.hessian
train
def hessian(self, theta_x, theta_y, kwargs_lens, k=None, diff=0.00000001): """ computes the hessian components f_xx, f_yy, f_xy from f_x and f_y with numerical differentiation :param theta_x: x-position (preferentially arcsec) :type theta_x: numpy array :param theta_y: y-position (preferentially arcsec) :type theta_y: numpy array :param kwargs_lens: list of keyword arguments of lens model parameters matching the lens model classes :param diff: numerical differential step (float) :return: f_xx, f_xy, f_yx, f_yy """ alpha_ra, alpha_dec = self.alpha(theta_x, theta_y, kwargs_lens) alpha_ra_dx, alpha_dec_dx = self.alpha(theta_x + diff, theta_y, kwargs_lens) alpha_ra_dy, alpha_dec_dy = self.alpha(theta_x, theta_y + diff, kwargs_lens) dalpha_rara = (alpha_ra_dx - alpha_ra)/diff dalpha_radec = (alpha_ra_dy - alpha_ra)/diff dalpha_decra = (alpha_dec_dx - alpha_dec)/diff dalpha_decdec = (alpha_dec_dy - alpha_dec)/diff f_xx = dalpha_rara f_yy = dalpha_decdec f_xy = dalpha_radec f_yx = dalpha_decra return f_xx, f_xy, f_yx, f_yy
python
{ "resource": "" }
q14330
MultiPlane._co_moving2angle_source
train
def _co_moving2angle_source(self, x, y): """ special case of the co_moving2angle definition at the source redshift :param x: :param y: :return: """ T_z = self._T_z_source theta_x = x / T_z theta_y = y / T_z return theta_x, theta_y
python
{ "resource": "" }
q14331
MultiPlane._ray_step
train
def _ray_step(self, x, y, alpha_x, alpha_y, delta_T): """ ray propagation with small angle approximation :param x: co-moving x-position :param y: co-moving y-position :param alpha_x: deflection angle in x-direction at (x, y) :param alpha_y: deflection angle in y-direction at (x, y) :param delta_T: transversal angular diameter distance to the next step :return: """ x_ = x + alpha_x * delta_T y_ = y + alpha_y * delta_T return x_, y_
python
{ "resource": "" }
q14332
MultiPlane._add_deflection
train
def _add_deflection(self, x, y, alpha_x, alpha_y, kwargs_lens, idex): """ adds the pyhsical deflection angle of a single lens plane to the deflection field :param x: co-moving distance at the deflector plane :param y: co-moving distance at the deflector plane :param alpha_x: physical angle (radian) before the deflector plane :param alpha_y: physical angle (radian) before the deflector plane :param kwargs_lens: lens model parameter kwargs :param idex: index of the lens model to be added :param idex_lens: redshift of the deflector plane :return: updated physical deflection after deflector plane (in a backwards ray-tracing perspective) """ theta_x, theta_y = self._co_moving2angle(x, y, idex) alpha_x_red, alpha_y_red = self._lens_model.alpha(theta_x, theta_y, kwargs_lens, k=self._sorted_redshift_index[idex]) alpha_x_phys = self._reduced2physical_deflection(alpha_x_red, idex) alpha_y_phys = self._reduced2physical_deflection(alpha_y_red, idex) alpha_x_new = alpha_x - alpha_x_phys alpha_y_new = alpha_y - alpha_y_phys return alpha_x_new, alpha_y_new
python
{ "resource": "" }
q14333
SinglePlane.mass_3d
train
def mass_3d(self, r, kwargs, bool_list=None): """ computes the mass within a 3d sphere of radius r :param r: radius (in angular units) :param kwargs: list of keyword arguments of lens model parameters matching the lens model classes :param bool_list: list of bools that are part of the output :return: mass (in angular units, modulo epsilon_crit) """ bool_list = self._bool_list(bool_list) mass_3d = 0 for i, func in enumerate(self.func_list): if bool_list[i] is True: kwargs_i = {k:v for k, v in kwargs[i].items() if not k in ['center_x', 'center_y']} mass_3d_i = func.mass_3d_lens(r, **kwargs_i) mass_3d += mass_3d_i #except: # raise ValueError('Lens profile %s does not support a 3d mass function!' % self.model_list[i]) return mass_3d
python
{ "resource": "" }
q14334
Filters.close_ns
train
def close_ns(symbol): '''generates a closing names statement from a symbol''' closing = ' '.join(['}' for x in symbol.module.name_parts]) name = '::'.join(symbol.module.name_parts) return '{0} // namespace {1}'.format(closing, name)
python
{ "resource": "" }
q14335
app
train
def app(config, src, dst, features, reload, force): """Takes several files or directories as src and generates the code in the given dst directory.""" config = Path(config) if reload: argv = sys.argv.copy() argv.remove('--reload') monitor(config.dirname(), src, dst, argv) else: run(config, src, dst, force)
python
{ "resource": "" }
q14336
reload
train
def reload(script, input, output): """ reloads the generator script when the script files or the input files changes """ script = Path(script).expand().abspath() output = Path(output).expand().abspath() input = input if isinstance(input, (list, tuple)) else [input] output.makedirs_p() _script_reload(script, input, output)
python
{ "resource": "" }
q14337
_script_reload
train
def _script_reload(script, input, output): """run the named generator and monitor the input and generator folder""" input = [Path(entry).expand().abspath() for entry in input] output = Path(output).expand().abspath() cmd = 'python3 {0} {1} {2}'.format(script, ' '.join(input), output) event_handler = RunScriptChangeHandler(cmd) event_handler.run() # run always once observer = Observer() path = script.dirname().expand().abspath() click.secho('watch: {0}'.format(path), fg='blue') observer.schedule(event_handler, path, recursive=True) for entry in input: entry = entry.dirname().expand().abspath() click.secho('watch: {0}'.format(entry), fg='blue') observer.schedule(event_handler, entry, recursive=True) path = Path(__file__).parent / 'qface' click.secho('watch: {0}'.format(path), fg='blue') observer.schedule(event_handler, path, recursive=True) observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: observer.stop() observer.join()
python
{ "resource": "" }
q14338
install
train
def install(editable): """install the script onto the system using pip3""" script_dir = str(Path(__file__).parent.abspath()) click.secho(script_dir, fg='blue') if editable: sh('pip3 install --editable {0} --upgrade'.format(script_dir)) else: sh('pip3 install {0} --upgrade'.format(script_dir))
python
{ "resource": "" }
q14339
merge
train
def merge(a, b): "merges b into a recursively if a and b are dicts" for key in b: if isinstance(a.get(key), dict) and isinstance(b.get(key), dict): merge(a[key], b[key]) else: a[key] = b[key] return a
python
{ "resource": "" }
q14340
Generator.get_template
train
def get_template(self, name): """Retrieves a single template file from the template loader""" source = name if name and name[0] is '/': source = name[1:] elif self.source is not None: source = '/'.join((self.source, name)) return self.env.get_template(source)
python
{ "resource": "" }
q14341
Generator.render
train
def render(self, name, context): """Returns the rendered text from a single template file from the template loader using the given context data""" if Generator.strict: self.env.undefined = TestableUndefined else: self.env.undefined = Undefined template = self.get_template(name) return template.render(context)
python
{ "resource": "" }
q14342
Generator.apply
train
def apply(self, template, context={}): context.update(self.context) """Return the rendered text of a template instance""" return self.env.from_string(template).render(context)
python
{ "resource": "" }
q14343
Generator.write
train
def write(self, file_path, template, context={}, preserve=False, force=False): """Using a template file name it renders a template into a file given a context """ if not file_path or not template: click.secho('source or target missing for document') return if not context: context = self.context error = False try: self._write(file_path, template, context, preserve, force) except TemplateSyntaxError as exc: message = '{0}:{1}: error: {2}'.format(exc.filename, exc.lineno, exc.message) click.secho(message, fg='red', err=True) error = True except TemplateNotFound as exc: message = '{0}: error: Template not found'.format(exc.name) click.secho(message, fg='red', err=True) error = True except TemplateError as exc: # Just return with an error, the generic template_error_handler takes care of printing it error = True if error and Generator.strict: sys.exit(1)
python
{ "resource": "" }
q14344
RuleGenerator.process_rules
train
def process_rules(self, path: Path, system: System): """writes the templates read from the rules document""" self.context.update({ 'system': system, }) document = FileSystem.load_yaml(path, required=True) for module, rules in document.items(): click.secho('process: {0}'.format(module), fg='green') self._process_rules(rules, system)
python
{ "resource": "" }
q14345
RuleGenerator._process_rules
train
def _process_rules(self, rules: dict, system: System): """ process a set of rules for a target """ self._source = None # reset the template source if not self._shall_proceed(rules): return self.context.update(rules.get('context', {})) self.path = rules.get('path', '') self.source = rules.get('source', None) self._process_rule(rules.get('system', None), {'system': system}) for module in system.modules: self._process_rule(rules.get('module', None), {'module': module}) for interface in module.interfaces: self._process_rule(rules.get('interface', None), {'interface': interface}) for struct in module.structs: self._process_rule(rules.get('struct', None), {'struct': struct}) for enum in module.enums: self._process_rule(rules.get('enum', None), {'enum': enum})
python
{ "resource": "" }
q14346
RuleGenerator._process_rule
train
def _process_rule(self, rule: dict, context: dict): """ process a single rule """ if not rule or not self._shall_proceed(rule): return self.context.update(context) self.context.update(rule.get('context', {})) self.path = rule.get('path', None) self.source = rule.get('source', None) for entry in rule.get('documents', []): target, source = self._resolve_rule_document(entry) self.write(target, source) for entry in rule.get('preserve', []): target, source = self._resolve_rule_document(entry) self.write(target, source, preserve=True)
python
{ "resource": "" }
q14347
FileSystem._parse_document
train
def _parse_document(document: Path, system: System = None, profile=EProfile.FULL): """Parses a document and returns the resulting domain system :param path: document path to parse :param system: system to be used (optional) """ logger.debug('parse document: {0}'.format(document)) stream = FileStream(str(document), encoding='utf-8') system = FileSystem._parse_stream(stream, system, document, profile) FileSystem.merge_annotations(system, document.stripext() + '.yaml') return system
python
{ "resource": "" }
q14348
FileSystem.merge_annotations
train
def merge_annotations(system, document): """Read a YAML document and for each root symbol identifier updates the tag information of that symbol """ if not Path(document).exists(): return meta = FileSystem.load_yaml(document) click.secho('merge: {0}'.format(document.name), fg='blue') for identifier, data in meta.items(): symbol = system.lookup(identifier) if symbol: merge(symbol.tags, data)
python
{ "resource": "" }
q14349
FileSystem.parse
train
def parse(input, identifier: str = None, use_cache=False, clear_cache=True, pattern="*.qface", profile=EProfile.FULL): """Input can be either a file or directory or a list of files or directory. A directory will be parsed recursively. The function returns the resulting system. Stores the result of the run in the domain cache named after the identifier. :param path: directory to parse :param identifier: identifies the parse run. Used to name the cache :param clear_cache: clears the domain cache (defaults to true) """ inputs = input if isinstance(input, (list, tuple)) else [input] logger.debug('parse input={0}'.format(inputs)) identifier = 'system' if not identifier else identifier system = System() cache = None if use_cache: cache = shelve.open('qface.cache') if identifier in cache and clear_cache: del cache[identifier] if identifier in cache: # use the cached domain model system = cache[identifier] # if domain model not cached generate it for input in inputs: path = Path.getcwd() / str(input) if path.isfile(): FileSystem.parse_document(path, system) else: for document in path.walkfiles(pattern): FileSystem.parse_document(document, system) if use_cache: cache[identifier] = system return system
python
{ "resource": "" }
q14350
monitor
train
def monitor(args, watch): """ reloads the script given by argv when src files changes """ watch = watch if isinstance(watch, (list, tuple)) else [watch] watch = [Path(entry).expand().abspath() for entry in watch] event_handler = RunScriptChangeHandler(args) observer = Observer() for entry in watch: if entry.isfile(): entry = entry.parent click.secho('watch recursive: {0}'.format(entry), fg='blue') observer.schedule(event_handler, entry, recursive=True) event_handler.run() # run always once observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: observer.stop() observer.join()
python
{ "resource": "" }
q14351
System.lookup
train
def lookup(self, name: str): '''lookup a symbol by fully qualified name.''' # <module> if name in self._moduleMap: return self._moduleMap[name] # <module>.<Symbol> (module_name, type_name, fragment_name) = self.split_typename(name) if not module_name and type_name: click.secho('not able to lookup symbol: {0}'.format(name), fg='red') return None module = self._moduleMap[module_name] return module.lookup(type_name, fragment_name)
python
{ "resource": "" }
q14352
Symbol.add_tag
train
def add_tag(self, tag): """ add a tag to the tag list """ if tag not in self._tags: self._tags[tag] = dict()
python
{ "resource": "" }
q14353
Symbol.attribute
train
def attribute(self, tag, name): """ return attribute by tag and attribute name """ if tag in self._tags and name in self._tags[tag]: return self._tags[tag][name]
python
{ "resource": "" }
q14354
TypeSymbol.is_valid
train
def is_valid(self): '''checks if type is a valid type''' return (self.is_primitive and self.name) \ or (self.is_complex and self.name) \ or (self.is_list and self.nested) \ or (self.is_map and self.nested) \ or (self.is_model and self.nested)
python
{ "resource": "" }
q14355
TypeSymbol._resolve
train
def _resolve(self): """resolve the type symbol from name by doing a lookup""" self.__is_resolved = True if self.is_complex: type = self.nested if self.nested else self type.__reference = self.module.lookup(type.name)
python
{ "resource": "" }
q14356
Module.lookup
train
def lookup(self, name: str, fragment: str = None): '''lookup a symbol by name. If symbol is not local it will be looked up system wide''' if name in self._contentMap: symbol = self._contentMap[name] if fragment: return symbol._contentMap[fragment] return symbol return self.system.lookup(name)
python
{ "resource": "" }
q14357
jsonify
train
def jsonify(symbol): """ returns json format for symbol """ try: # all symbols have a toJson method, try it return json.dumps(symbol.toJson(), indent=' ') except AttributeError: pass return json.dumps(symbol, indent=' ')
python
{ "resource": "" }
q14358
hash
train
def hash(symbol, hash_type='sha1'): """ create a hash code from symbol """ code = hashlib.new(hash_type) code.update(str(symbol).encode('utf-8')) return code.hexdigest()
python
{ "resource": "" }
q14359
sh
train
def sh(args, **kwargs): """ runs the given cmd as shell command """ if isinstance(args, str): args = args.split() if not args: return click.echo('$ {0}'.format(' '.join(args))) try: return subprocess.check_call(args, **kwargs) except subprocess.CalledProcessError as exc: click.secho('run error {}'.format(exc)) except OSError as exc: click.secho('not found error {}'.format(exc))
python
{ "resource": "" }
q14360
parse_doc
train
def parse_doc(s): """ parse a comment in the format of JavaDoc and returns an object, where each JavaDoc tag is a property of the object. """ if not s: return doc = DocObject() tag = None s = s[3:-2] # remove '/**' and '*/' for line in s.splitlines(): line = line.lstrip(' *') # strip a ' ' and '*' from start if not line: tag = None # on empty line reset the tag information elif line[0] == '@': line = line[1:] res = line.split(maxsplit=1) if len(res) == 0: continue tag = res[0] if len(res) == 1: doc.add_tag(tag, True) elif len(res) == 2: value = res[1] doc.add_tag(tag, value) elif tag: # append to previous matched tag doc.add_tag(tag, line) else: # append any loose lines to description doc.add_tag('description', line) return doc
python
{ "resource": "" }
q14361
IsUserInUrl.has_permission
train
def has_permission(self, request, view): """ Returns true if the current request is by the user themselves. Note: a 404 is returned for non-staff instead of a 403. This is to prevent users from being able to detect the existence of accounts. """ url_username = request.parser_context.get('kwargs', {}).get('username', '') if request.user.username.lower() != url_username.lower(): if request.user.is_staff: return False # staff gets 403 raise Http404() return True
python
{ "resource": "" }
q14362
BigAutoField.db_type
train
def db_type(self, connection): """ The type of the field to insert into the database. """ conn_module = type(connection).__module__ if "mysql" in conn_module: return "bigint AUTO_INCREMENT" elif "postgres" in conn_module: return "bigserial" return super(BigAutoField, self).db_type(connection)
python
{ "resource": "" }
q14363
BlockCompletionManager.submit_completion
train
def submit_completion(self, user, course_key, block_key, completion): """ Update the completion value for the specified record. Parameters: * user (django.contrib.auth.models.User): The user for whom the completion is being submitted. * course_key (opaque_keys.edx.keys.CourseKey): The course in which the submitted block is found. * block_key (opaque_keys.edx.keys.UsageKey): The block that has had its completion changed. * completion (float in range [0.0, 1.0]): The fractional completion value of the block (0.0 = incomplete, 1.0 = complete). Return Value: (BlockCompletion, bool): A tuple comprising the created or updated BlockCompletion object and a boolean value indicating whether the object was newly created by this call. Raises: ValueError: If the wrong type is passed for one of the parameters. django.core.exceptions.ValidationError: If a float is passed that is not between 0.0 and 1.0. django.db.DatabaseError: If there was a problem getting, creating, or updating the BlockCompletion record in the database. This will also be a more specific error, as described here: https://docs.djangoproject.com/en/1.11/ref/exceptions/#database-exceptions. IntegrityError and OperationalError are relatively common subclasses. """ # Raise ValueError to match normal django semantics for wrong type of field. if not isinstance(course_key, CourseKey): raise ValueError( "course_key must be an instance of `opaque_keys.edx.keys.CourseKey`. Got {}".format(type(course_key)) ) try: block_type = block_key.block_type except AttributeError: raise ValueError( "block_key must be an instance of `opaque_keys.edx.keys.UsageKey`. Got {}".format(type(block_key)) ) if waffle.waffle().is_enabled(waffle.ENABLE_COMPLETION_TRACKING): try: with transaction.atomic(): obj, is_new = self.get_or_create( # pylint: disable=unpacking-non-sequence user=user, course_key=course_key, block_key=block_key, defaults={ 'completion': completion, 'block_type': block_type, }, ) except IntegrityError: # The completion was created concurrently by another process log.info( "An IntegrityError was raised when trying to create a BlockCompletion for %s:%s:%s. " "Falling back to get().", user, course_key, block_key, ) obj = self.get( user=user, course_key=course_key, block_key=block_key, ) is_new = False if not is_new and obj.completion != completion: obj.completion = completion obj.full_clean() obj.save(update_fields={'completion', 'modified'}) else: # If the feature is not enabled, this method should not be called. # Error out with a RuntimeError. raise RuntimeError( "BlockCompletion.objects.submit_completion should not be \ called when the feature is disabled." ) return obj, is_new
python
{ "resource": "" }
q14364
BlockCompletionManager.submit_batch_completion
train
def submit_batch_completion(self, user, course_key, blocks): """ Performs a batch insertion of completion objects. Parameters: * user (django.contrib.auth.models.User): The user for whom the completions are being submitted. * course_key (opaque_keys.edx.keys.CourseKey): The course in which the submitted blocks are found. * blocks: A list of tuples of UsageKey to float completion values. (float in range [0.0, 1.0]): The fractional completion value of the block (0.0 = incomplete, 1.0 = complete). Return Value: Dict of (BlockCompletion, bool): A dictionary with a BlockCompletion object key and a value of bool. The boolean value indicates whether the object was newly created by this call. Raises: ValueError: If the wrong type is passed for one of the parameters. django.core.exceptions.ValidationError: If a float is passed that is not between 0.0 and 1.0. django.db.DatabaseError: If there was a problem getting, creating, or updating the BlockCompletion record in the database. """ block_completions = {} for block, completion in blocks: (block_completion, is_new) = self.submit_completion(user, course_key, block, completion) block_completions[block_completion] = is_new return block_completions
python
{ "resource": "" }
q14365
BlockCompletion.full_block_key
train
def full_block_key(self): """ Returns the "correct" usage key value with the run filled in. """ if self.block_key.run is None: # pylint: disable=unexpected-keyword-arg, no-value-for-parameter return self.block_key.replace(course_key=self.course_key) return self.block_key
python
{ "resource": "" }
q14366
BlockCompletion.get_course_completions
train
def get_course_completions(cls, user, course_key): """ Returns a dictionary mapping BlockKeys to completion values for all BlockCompletion records for the given user and course_key. Return value: dict[BlockKey] = float """ user_course_completions = cls.user_course_completion_queryset(user, course_key) return cls.completion_by_block_key(user_course_completions)
python
{ "resource": "" }
q14367
BlockCompletion.user_course_completion_queryset
train
def user_course_completion_queryset(cls, user, course_key): """ Returns a Queryset of completions for a given user and course_key. """ return cls.objects.filter(user=user, course_key=course_key)
python
{ "resource": "" }
q14368
scorable_block_completion
train
def scorable_block_completion(sender, **kwargs): # pylint: disable=unused-argument """ When a problem is scored, submit a new BlockCompletion for that block. """ if not waffle.waffle().is_enabled(waffle.ENABLE_COMPLETION_TRACKING): return course_key = CourseKey.from_string(kwargs['course_id']) block_key = UsageKey.from_string(kwargs['usage_id']) block_cls = XBlock.load_class(block_key.block_type) if XBlockCompletionMode.get_mode(block_cls) != XBlockCompletionMode.COMPLETABLE: return if getattr(block_cls, 'has_custom_completion', False): return user = User.objects.get(id=kwargs['user_id']) if kwargs.get('score_deleted'): completion = 0.0 else: completion = 1.0 if not kwargs.get('grader_response'): BlockCompletion.objects.submit_completion( user=user, course_key=course_key, block_key=block_key, completion=completion, )
python
{ "resource": "" }
q14369
CompletionBatchView._validate_and_parse
train
def _validate_and_parse(self, batch_object): """ Performs validation on the batch object to make sure it is in the proper format. Parameters: * batch_object: The data provided to a POST. The expected format is the following: { "username": "username", "course_key": "course-key", "blocks": { "block_key1": 0.0, "block_key2": 1.0, "block_key3": 1.0, } } Return Value: * tuple: (User, CourseKey, List of tuples (UsageKey, completion_float) Raises: django.core.exceptions.ValidationError: If any aspect of validation fails a ValidationError is raised. ObjectDoesNotExist: If a database object cannot be found an ObjectDoesNotExist is raised. """ if not waffle.waffle().is_enabled(waffle.ENABLE_COMPLETION_TRACKING): raise ValidationError( _("BlockCompletion.objects.submit_batch_completion should not be called when the feature is disabled.") ) for key in self.REQUIRED_KEYS: if key not in batch_object: raise ValidationError(_("Key '{key}' not found.").format(key=key)) username = batch_object['username'] user = User.objects.get(username=username) course_key_obj = self._validate_and_parse_course_key(batch_object['course_key']) if not CourseEnrollment.is_enrolled(user, course_key_obj): raise ValidationError(_('User is not enrolled in course.')) blocks = batch_object['blocks'] block_objs = [] for block_key in blocks: block_key_obj = self._validate_and_parse_block_key(block_key, course_key_obj) completion = float(blocks[block_key]) block_objs.append((block_key_obj, completion)) return user, course_key_obj, block_objs
python
{ "resource": "" }
q14370
CompletionBatchView._validate_and_parse_course_key
train
def _validate_and_parse_course_key(self, course_key): """ Returns a validated parsed CourseKey deserialized from the given course_key. """ try: return CourseKey.from_string(course_key) except InvalidKeyError: raise ValidationError(_("Invalid course key: {}").format(course_key))
python
{ "resource": "" }
q14371
CompletionBatchView._validate_and_parse_block_key
train
def _validate_and_parse_block_key(self, block_key, course_key_obj): """ Returns a validated, parsed UsageKey deserialized from the given block_key. """ try: block_key_obj = UsageKey.from_string(block_key) except InvalidKeyError: raise ValidationError(_("Invalid block key: {}").format(block_key)) if block_key_obj.run is None: expected_matching_course_key = course_key_obj.replace(run=None) else: expected_matching_course_key = course_key_obj if block_key_obj.course_key != expected_matching_course_key: raise ValidationError( _("Block with key: '{key}' is not in course {course}").format(key=block_key, course=course_key_obj) ) return block_key_obj
python
{ "resource": "" }
q14372
CompletionBatchView.post
train
def post(self, request, *args, **kwargs): # pylint: disable=unused-argument """ Inserts a batch of completions. REST Endpoint Format: { "username": "username", "course_key": "course-key", "blocks": { "block_key1": 0.0, "block_key2": 1.0, "block_key3": 1.0, } } **Returns** A Response object, with an appropriate status code. If successful, status code is 200. { "detail" : _("ok") } Otherwise, a 400 or 404 may be returned, and the "detail" content will explain the error. """ batch_object = request.data or {} try: user, course_key, blocks = self._validate_and_parse(batch_object) BlockCompletion.objects.submit_batch_completion(user, course_key, blocks) except ValidationError as exc: return Response({ "detail": _(' ').join(text_type(msg) for msg in exc.messages), }, status=status.HTTP_400_BAD_REQUEST) except ValueError as exc: return Response({ "detail": text_type(exc), }, status=status.HTTP_400_BAD_REQUEST) except ObjectDoesNotExist as exc: return Response({ "detail": text_type(exc), }, status=status.HTTP_404_NOT_FOUND) except DatabaseError as exc: return Response({ "detail": text_type(exc), }, status=status.HTTP_500_INTERNAL_SERVER_ERROR) return Response({"detail": _("ok")}, status=status.HTTP_200_OK)
python
{ "resource": "" }
q14373
CompletionService.get_completions
train
def get_completions(self, candidates): """ Given an iterable collection of block_keys in the course, returns a mapping of the block_keys to the present completion values of their associated blocks. If a completion is not found for a given block in the current course, 0.0 is returned. The service does not attempt to verify that the block exists within the course. Parameters: candidates: collection of BlockKeys within the current course. Note: Usage keys may not have the course run filled in for old mongo courses. This method checks for completion records against a set of BlockKey candidates with the course run filled in from self._course_key. Return value: dict[BlockKey] -> float: Mapping blocks to their completion value. """ queryset = BlockCompletion.user_course_completion_queryset(self._user, self._course_key).filter( block_key__in=candidates ) completions = BlockCompletion.completion_by_block_key(queryset) candidates_with_runs = [candidate.replace(course_key=self._course_key) for candidate in candidates] for candidate in candidates_with_runs: if candidate not in completions: completions[candidate] = 0.0 return completions
python
{ "resource": "" }
q14374
CompletionService.can_mark_block_complete_on_view
train
def can_mark_block_complete_on_view(self, block): """ Returns True if the xblock can be marked complete on view. This is true of any non-customized, non-scorable, completable block. """ return ( XBlockCompletionMode.get_mode(block) == XBlockCompletionMode.COMPLETABLE and not getattr(block, 'has_custom_completion', False) and not getattr(block, 'has_score', False) )
python
{ "resource": "" }
q14375
CompletionService.blocks_to_mark_complete_on_view
train
def blocks_to_mark_complete_on_view(self, blocks): """ Returns a set of blocks which should be marked complete on view and haven't been yet. """ blocks = {block for block in blocks if self.can_mark_block_complete_on_view(block)} completions = self.get_completions({block.location for block in blocks}) return {block for block in blocks if completions.get(block.location, 0) < 1.0}
python
{ "resource": "" }
q14376
CompletionService.submit_group_completion
train
def submit_group_completion(self, block_key, completion, users=None, user_ids=None): """ Submit a completion for a group of users. Arguments: block_key (opaque_key.edx.keys.UsageKey): The block to submit completions for. completion (float): A value in the range [0.0, 1.0] users ([django.contrib.auth.models.User]): An optional iterable of Users that completed the block. user_ids ([int]): An optional iterable of ids of Users that completed the block. Returns a list of (BlockCompletion, bool) where the boolean indicates whether the given BlockCompletion was newly created. """ if users is None: users = [] if user_ids is None: user_ids = [] more_users = User.objects.filter(id__in=user_ids) if len(more_users) < len(user_ids): found_ids = {u.id for u in more_users} not_found_ids = [pk for pk in user_ids if pk not in found_ids] raise User.DoesNotExist("User not found with id(s): {}".format(not_found_ids)) users.extend(more_users) submitted = [] for user in users: submitted.append(BlockCompletion.objects.submit_completion( user=user, course_key=self._course_key, block_key=block_key, completion=completion )) return submitted
python
{ "resource": "" }
q14377
CompletionService.submit_completion
train
def submit_completion(self, block_key, completion): """ Submit a completion for the service user and course. Returns a (BlockCompletion, bool) where the boolean indicates whether the given BlockCompletion was newly created. """ return BlockCompletion.objects.submit_completion( user=self._user, course_key=self._course_key, block_key=block_key, completion=completion )
python
{ "resource": "" }
q14378
throw_if_bad_response
train
def throw_if_bad_response(response): """Throw an exception if the Cerberus response is not successful.""" try: response.raise_for_status() except RequestException: try: msg = 'Response code: {}; response body:\n{}'.format(response.status_code, json.dumps(response.json(), indent=2)) raise CerberusClientException(msg) except ValueError: msg = 'Response code: {}; response body:\n{}'.format(response.status_code, response.text) raise CerberusClientException(msg)
python
{ "resource": "" }
q14379
_initialize_client_from_environment
train
def _initialize_client_from_environment(): ''' Initialize a KeenClient instance using environment variables. ''' global _client, project_id, write_key, read_key, master_key, base_url if _client is None: # check environment for project ID and keys project_id = project_id or os.environ.get("KEEN_PROJECT_ID") write_key = write_key or os.environ.get("KEEN_WRITE_KEY") read_key = read_key or os.environ.get("KEEN_READ_KEY") master_key = master_key or os.environ.get("KEEN_MASTER_KEY") base_url = base_url or os.environ.get("KEEN_BASE_URL") if not project_id: raise InvalidEnvironmentError("Please set the KEEN_PROJECT_ID environment variable or set keen.project_id!") _client = KeenClient(project_id, write_key=write_key, read_key=read_key, master_key=master_key, base_url=base_url)
python
{ "resource": "" }
q14380
count
train
def count(event_collection, timeframe=None, timezone=None, interval=None, filters=None, group_by=None, order_by=None, max_age=None, limit=None): """ Performs a count query Counts the number of events that meet the given criteria. :param event_collection: string, the name of the collection to query :param timeframe: string or dict, the timeframe in which the events happened example: "previous_7_days" :param timezone: int, the timezone you'd like to use for the timeframe and interval in seconds :param interval: string, the time interval used for measuring data over time example: "daily" :param filters: array of dict, contains the filters you'd like to apply to the data example: [{"property_name":"device", "operator":"eq", "property_value":"iPhone"}] :param group_by: string or array of strings, the name(s) of the properties you would like to group you results by. example: "customer.id" or ["browser","operating_system"] :param order_by: dictionary or list of dictionary objects containing the property_name(s) to order by and the desired direction(s) of sorting. Example: {"property_name":"result", "direction":keen.direction.DESCENDING} May not be used without a group_by specified. :param limit: positive integer limiting the displayed results of a query using order_by :param max_age: an integer, greater than 30 seconds, the maximum 'staleness' you're willing to trade for increased query performance, in seconds """ _initialize_client_from_environment() return _client.count(event_collection=event_collection, timeframe=timeframe, timezone=timezone, interval=interval, filters=filters, group_by=group_by, order_by=order_by, max_age=max_age, limit=limit)
python
{ "resource": "" }
q14381
sum
train
def sum(event_collection, target_property, timeframe=None, timezone=None, interval=None, filters=None, group_by=None, order_by=None, max_age=None, limit=None): """ Performs a sum query Adds the values of a target property for events that meet the given criteria. :param event_collection: string, the name of the collection to query :param target_property: string, the name of the event property you would like use :param timeframe: string or dict, the timeframe in which the events happened example: "previous_7_days" :param timezone: int, the timezone you'd like to use for the timeframe and interval in seconds :param interval: string, the time interval used for measuring data over time example: "daily" :param filters: array of dict, contains the filters you'd like to apply to the data example: [{"property_name":"device", "operator":"eq", "property_value":"iPhone"}] :param group_by: string or array of strings, the name(s) of the properties you would like to group you results by. example: "customer.id" or ["browser","operating_system"] :param order_by: dictionary or list of dictionary objects containing the property_name(s) to order by and the desired direction(s) of sorting. Example: {"property_name":"result", "direction":keen.direction.DESCENDING} May not be used without a group_by specified. :param limit: positive integer limiting the displayed results of a query using order_by :param max_age: an integer, greater than 30 seconds, the maximum 'staleness' you're willing to trade for increased query performance, in seconds """ _initialize_client_from_environment() return _client.sum(event_collection=event_collection, timeframe=timeframe, timezone=timezone, interval=interval, filters=filters, group_by=group_by, order_by=order_by, target_property=target_property, max_age=max_age, limit=limit)
python
{ "resource": "" }
q14382
multi_analysis
train
def multi_analysis(event_collection, analyses, timeframe=None, interval=None, timezone=None, filters=None, group_by=None, order_by=None, max_age=None, limit=None): """ Performs a multi-analysis query Returns a dictionary of analysis results. :param event_collection: string, the name of the collection to query :param analyses: dict, the types of analyses you'd like to run. example: {"total money made":{"analysis_type":"sum","target_property":"purchase.price", "average price":{"analysis_type":"average","target_property":"purchase.price"} :param timeframe: string or dict, the timeframe in which the events happened example: "previous_7_days" :param interval: string, the time interval used for measuring data over time example: "daily" :param timezone: int, the timezone you'd like to use for the timeframe and interval in seconds :param filters: array of dict, contains the filters you'd like to apply to the data example: [{"property_name":"device", "operator":"eq", "property_value":"iPhone"}] :param group_by: string or array of strings, the name(s) of the properties you would like to group you results by. example: "customer.id" or ["browser","operating_system"] :param order_by: dictionary or list of dictionary objects containing the property_name(s) to order by and the desired direction(s) of sorting. Example: {"property_name":"result", "direction":keen.direction.DESCENDING} May not be used without a group_by specified. :param limit: positive integer limiting the displayed results of a query using order_by :param max_age: an integer, greater than 30 seconds, the maximum 'staleness' you're willing to trade for increased query performance, in seconds """ _initialize_client_from_environment() return _client.multi_analysis(event_collection=event_collection, timeframe=timeframe, interval=interval, timezone=timezone, filters=filters, group_by=group_by, order_by=order_by, analyses=analyses, max_age=max_age, limit=limit)
python
{ "resource": "" }
q14383
check_error
train
def check_error(status): """Set a generic function as the restype attribute of all OpenJPEG functions that return a BOOL_TYPE value. This way we do not have to check for error status in each wrapping function and an exception will always be appropriately raised. """ global ERROR_MSG_LST if status != 1: if len(ERROR_MSG_LST) > 0: # clear out the existing error message so that we don't pick up # a bad one next time around. msg = '\n'.join(ERROR_MSG_LST) ERROR_MSG_LST = [] raise OpenJPEGLibraryError(msg) else: raise OpenJPEGLibraryError("OpenJPEG function failure.")
python
{ "resource": "" }
q14384
decode
train
def decode(codec, stream, image): """Reads an entire image. Wraps the openjp2 library function opj_decode. Parameters ---------- codec : CODEC_TYPE The JPEG2000 codec stream : STREAM_TYPE_P The stream to decode. image : ImageType Output image structure. Raises ------ RuntimeError If the OpenJPEG library routine opj_decode fails. """ OPENJP2.opj_decode.argtypes = [CODEC_TYPE, STREAM_TYPE_P, ctypes.POINTER(ImageType)] OPENJP2.opj_decode.restype = check_error OPENJP2.opj_decode(codec, stream, image)
python
{ "resource": "" }
q14385
decode_tile_data
train
def decode_tile_data(codec, tidx, data, data_size, stream): """Reads tile data. Wraps the openjp2 library function opj_decode_tile_data. Parameters ---------- codec : CODEC_TYPE The JPEG2000 codec tile_index : int The index of the tile being decoded data : array Holds a memory block into which data will be decoded. data_size : int The size of data in bytes stream : STREAM_TYPE_P The stream to decode. Raises ------ RuntimeError If the OpenJPEG library routine opj_decode fails. """ OPENJP2.opj_decode_tile_data.argtypes = [CODEC_TYPE, ctypes.c_uint32, ctypes.POINTER(ctypes.c_uint8), ctypes.c_uint32, STREAM_TYPE_P] OPENJP2.opj_decode_tile_data.restype = check_error datap = data.ctypes.data_as(ctypes.POINTER(ctypes.c_uint8)) OPENJP2.opj_decode_tile_data(codec, ctypes.c_uint32(tidx), datap, ctypes.c_uint32(data_size), stream)
python
{ "resource": "" }
q14386
destroy_codec
train
def destroy_codec(codec): """Destroy a decompressor handle. Wraps the openjp2 library function opj_destroy_codec. Parameters ---------- codec : CODEC_TYPE Decompressor handle to destroy. """ OPENJP2.opj_destroy_codec.argtypes = [CODEC_TYPE] OPENJP2.opj_destroy_codec.restype = ctypes.c_void_p OPENJP2.opj_destroy_codec(codec)
python
{ "resource": "" }
q14387
encode
train
def encode(codec, stream): """Wraps openjp2 library function opj_encode. Encode an image into a JPEG 2000 codestream. Parameters ---------- codec : CODEC_TYPE The jpeg2000 codec. stream : STREAM_TYPE_P The stream to which data is written. Raises ------ RuntimeError If the OpenJPEG library routine opj_encode fails. """ OPENJP2.opj_encode.argtypes = [CODEC_TYPE, STREAM_TYPE_P] OPENJP2.opj_encode.restype = check_error OPENJP2.opj_encode(codec, stream)
python
{ "resource": "" }
q14388
get_decoded_tile
train
def get_decoded_tile(codec, stream, imagep, tile_index): """get the decoded tile from the codec Wraps the openjp2 library function opj_get_decoded_tile. Parameters ---------- codec : CODEC_TYPE The jpeg2000 codec. stream : STREAM_TYPE_P The input stream. image : ImageType Output image structure. tiler_index : int Index of the tile which will be decoded. Raises ------ RuntimeError If the OpenJPEG library routine opj_get_decoded_tile fails. """ OPENJP2.opj_get_decoded_tile.argtypes = [CODEC_TYPE, STREAM_TYPE_P, ctypes.POINTER(ImageType), ctypes.c_uint32] OPENJP2.opj_get_decoded_tile.restype = check_error OPENJP2.opj_get_decoded_tile(codec, stream, imagep, tile_index)
python
{ "resource": "" }
q14389
end_compress
train
def end_compress(codec, stream): """End of compressing the current image. Wraps the openjp2 library function opj_end_compress. Parameters ---------- codec : CODEC_TYPE Compressor handle. stream : STREAM_TYPE_P Output stream buffer. Raises ------ RuntimeError If the OpenJPEG library routine opj_end_compress fails. """ OPENJP2.opj_end_compress.argtypes = [CODEC_TYPE, STREAM_TYPE_P] OPENJP2.opj_end_compress.restype = check_error OPENJP2.opj_end_compress(codec, stream)
python
{ "resource": "" }
q14390
end_decompress
train
def end_decompress(codec, stream): """End of decompressing the current image. Wraps the openjp2 library function opj_end_decompress. Parameters ---------- codec : CODEC_TYPE Compressor handle. stream : STREAM_TYPE_P Output stream buffer. Raises ------ RuntimeError If the OpenJPEG library routine opj_end_decompress fails. """ OPENJP2.opj_end_decompress.argtypes = [CODEC_TYPE, STREAM_TYPE_P] OPENJP2.opj_end_decompress.restype = check_error OPENJP2.opj_end_decompress(codec, stream)
python
{ "resource": "" }
q14391
image_destroy
train
def image_destroy(image): """Deallocate any resources associated with an image. Wraps the openjp2 library function opj_image_destroy. Parameters ---------- image : ImageType pointer Image resource to be disposed. """ OPENJP2.opj_image_destroy.argtypes = [ctypes.POINTER(ImageType)] OPENJP2.opj_image_destroy.restype = ctypes.c_void_p OPENJP2.opj_image_destroy(image)
python
{ "resource": "" }
q14392
read_header
train
def read_header(stream, codec): """Decodes an image header. Wraps the openjp2 library function opj_read_header. Parameters ---------- stream: STREAM_TYPE_P The JPEG2000 stream. codec: codec_t The JPEG2000 codec to read. Returns ------- imagep : reference to ImageType instance The image structure initialized with image characteristics. Raises ------ RuntimeError If the OpenJPEG library routine opj_read_header fails. """ ARGTYPES = [STREAM_TYPE_P, CODEC_TYPE, ctypes.POINTER(ctypes.POINTER(ImageType))] OPENJP2.opj_read_header.argtypes = ARGTYPES OPENJP2.opj_read_header.restype = check_error imagep = ctypes.POINTER(ImageType)() OPENJP2.opj_read_header(stream, codec, ctypes.byref(imagep)) return imagep
python
{ "resource": "" }
q14393
read_tile_header
train
def read_tile_header(codec, stream): """Reads a tile header. Wraps the openjp2 library function opj_read_tile_header. Parameters ---------- codec : codec_t The JPEG2000 codec to read. stream : STREAM_TYPE_P The JPEG2000 stream. Returns ------- tile_index : int index of the tile being decoded data_size : int number of bytes for the decoded area x0, y0 : int upper left-most coordinate of tile x1, y1 : int lower right-most coordinate of tile ncomps : int number of components in the tile go_on : bool indicates that decoding should continue Raises ------ RuntimeError If the OpenJPEG library routine opj_read_tile_header fails. """ ARGTYPES = [CODEC_TYPE, STREAM_TYPE_P, ctypes.POINTER(ctypes.c_uint32), ctypes.POINTER(ctypes.c_uint32), ctypes.POINTER(ctypes.c_int32), ctypes.POINTER(ctypes.c_int32), ctypes.POINTER(ctypes.c_int32), ctypes.POINTER(ctypes.c_int32), ctypes.POINTER(ctypes.c_uint32), ctypes.POINTER(BOOL_TYPE)] OPENJP2.opj_read_tile_header.argtypes = ARGTYPES OPENJP2.opj_read_tile_header.restype = check_error tile_index = ctypes.c_uint32() data_size = ctypes.c_uint32() col0 = ctypes.c_int32() row0 = ctypes.c_int32() col1 = ctypes.c_int32() row1 = ctypes.c_int32() ncomps = ctypes.c_uint32() go_on = BOOL_TYPE() OPENJP2.opj_read_tile_header(codec, stream, ctypes.byref(tile_index), ctypes.byref(data_size), ctypes.byref(col0), ctypes.byref(row0), ctypes.byref(col1), ctypes.byref(row1), ctypes.byref(ncomps), ctypes.byref(go_on)) go_on = bool(go_on.value) return (tile_index.value, data_size.value, col0.value, row0.value, col1.value, row1.value, ncomps.value, go_on)
python
{ "resource": "" }
q14394
set_decode_area
train
def set_decode_area(codec, image, start_x=0, start_y=0, end_x=0, end_y=0): """Wraps openjp2 library function opj_set_decode area. Sets the given area to be decoded. This function should be called right after read_header and before any tile header reading. Parameters ---------- codec : CODEC_TYPE Codec initialized by create_decompress function. image : ImageType pointer The decoded image previously set by read_header. start_x, start_y : optional, int The left and upper position of the rectangle to decode. end_x, end_y : optional, int The right and lower position of the rectangle to decode. Raises ------ RuntimeError If the OpenJPEG library routine opj_set_decode_area fails. """ OPENJP2.opj_set_decode_area.argtypes = [CODEC_TYPE, ctypes.POINTER(ImageType), ctypes.c_int32, ctypes.c_int32, ctypes.c_int32, ctypes.c_int32] OPENJP2.opj_set_decode_area.restype = check_error OPENJP2.opj_set_decode_area(codec, image, ctypes.c_int32(start_x), ctypes.c_int32(start_y), ctypes.c_int32(end_x), ctypes.c_int32(end_y))
python
{ "resource": "" }
q14395
set_default_decoder_parameters
train
def set_default_decoder_parameters(): """Wraps openjp2 library function opj_set_default_decoder_parameters. Sets decoding parameters to default values. Returns ------- dparam : DecompressionParametersType Decompression parameters. """ ARGTYPES = [ctypes.POINTER(DecompressionParametersType)] OPENJP2.opj_set_default_decoder_parameters.argtypes = ARGTYPES OPENJP2.opj_set_default_decoder_parameters.restype = ctypes.c_void_p dparams = DecompressionParametersType() OPENJP2.opj_set_default_decoder_parameters(ctypes.byref(dparams)) return dparams
python
{ "resource": "" }
q14396
set_default_encoder_parameters
train
def set_default_encoder_parameters(): """Wraps openjp2 library function opj_set_default_encoder_parameters. Sets encoding parameters to default values. That means lossless 1 tile size of precinct : 2^15 x 2^15 (means 1 precinct) size of code-block : 64 x 64 number of resolutions: 6 no SOP marker in the codestream no EPH marker in the codestream no sub-sampling in x or y direction no mode switch activated progression order: LRCP no index file no ROI upshifted no offset of the origin of the image no offset of the origin of the tiles reversible DWT 5-3 The signature for this function differs from its C library counterpart, as the the C function pass-by-reference parameter becomes the Python return value. Returns ------- cparameters : CompressionParametersType Compression parameters. """ ARGTYPES = [ctypes.POINTER(CompressionParametersType)] OPENJP2.opj_set_default_encoder_parameters.argtypes = ARGTYPES OPENJP2.opj_set_default_encoder_parameters.restype = ctypes.c_void_p cparams = CompressionParametersType() OPENJP2.opj_set_default_encoder_parameters(ctypes.byref(cparams)) return cparams
python
{ "resource": "" }
q14397
set_error_handler
train
def set_error_handler(codec, handler, data=None): """Wraps openjp2 library function opj_set_error_handler. Set the error handler use by openjpeg. Parameters ---------- codec : CODEC_TYPE Codec initialized by create_compress function. handler : python function The callback function to be used. user_data : anything User/client data. Raises ------ RuntimeError If the OpenJPEG library routine opj_set_error_handler fails. """ OPENJP2.opj_set_error_handler.argtypes = [CODEC_TYPE, ctypes.c_void_p, ctypes.c_void_p] OPENJP2.opj_set_error_handler.restype = check_error OPENJP2.opj_set_error_handler(codec, handler, data)
python
{ "resource": "" }
q14398
set_info_handler
train
def set_info_handler(codec, handler, data=None): """Wraps openjp2 library function opj_set_info_handler. Set the info handler use by openjpeg. Parameters ---------- codec : CODEC_TYPE Codec initialized by create_compress function. handler : python function The callback function to be used. user_data : anything User/client data. Raises ------ RuntimeError If the OpenJPEG library routine opj_set_info_handler fails. """ OPENJP2.opj_set_info_handler.argtypes = [CODEC_TYPE, ctypes.c_void_p, ctypes.c_void_p] OPENJP2.opj_set_info_handler.restype = check_error OPENJP2.opj_set_info_handler(codec, handler, data)
python
{ "resource": "" }
q14399
set_warning_handler
train
def set_warning_handler(codec, handler, data=None): """Wraps openjp2 library function opj_set_warning_handler. Set the warning handler use by openjpeg. Parameters ---------- codec : CODEC_TYPE Codec initialized by create_compress function. handler : python function The callback function to be used. user_data : anything User/client data. Raises ------ RuntimeError If the OpenJPEG library routine opj_set_warning_handler fails. """ OPENJP2.opj_set_warning_handler.argtypes = [CODEC_TYPE, ctypes.c_void_p, ctypes.c_void_p] OPENJP2.opj_set_warning_handler.restype = check_error OPENJP2.opj_set_warning_handler(codec, handler, data)
python
{ "resource": "" }