code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
self._get_fitness(self.swarm) i = 0 self.i = i while True: for particle in self.swarm: if ((self._gbest.fitness)<particle.fitness): self._gbest = particle.copy() if (particle.fitness > particle.pbest.fitness): particle.updatePBest() if(i>=maxIter): if self._verbose: print("max iteration reached! stoping") return if self._func.is_converged: return if self._converged_likelihood(maxIter*lookback, self._particleCount, standard_dev): return for particle in self.swarm: w = 0.5 + numpy.random.uniform(0, 1, size=self._paramCount) / 2 #w=0.72 part_vel = w * particle.velocity cog_vel = c1 * numpy.random.uniform(0, 1, size=self._paramCount) * (particle.pbest.position - particle.position) soc_vel = c2 * numpy.random.uniform(0, 1, size=self._paramCount) * (self._gbest.position - particle.position) particle.velocity = part_vel + cog_vel + soc_vel particle.position = particle.position + particle.velocity self._get_fitness(self.swarm) swarm = [] for particle in self.swarm: swarm.append(particle.copy()) yield swarm i+=1 self.i = i
def _sample(self, maxIter=1000, c1=1.193, c2=1.193, lookback = 0.25, standard_dev = None)
Launches the PSO. Yields the complete swarm per iteration :param maxIter: maximum iterations :param c1: cognitive weight :param c2: social weight :param lookback: percentange of particles to use when determining convergence :param standard_dev: standard deviation of the last lookback particles for convergence
3.612111
3.358559
1.075494
gBests = [] for swarm in self._sample(maxIter, c1, c2, lookback, standard_dev): #swarms.append(swarm) gBests.append(self._gbest.copy()) return gBests
def _optimize(self, maxIter=1000, c1=1.193, c2=1.193, lookback=0.25, standard_dev=None)
:param maxIter: maximum number of swarm iterations :param c1: social weight :param c2: personal weight :param lookback: how many particles to assess when considering convergence :param standard_dev: the standard deviation of the last lookback # of particles used to determine convergence :return:
4.742669
5.041147
0.940792
return Particle(numpy.array([[]]*paramCount), numpy.array([[]]*paramCount), -numpy.Inf)
def create(cls, paramCount)
Creates a new particle without position, velocity and -inf as fitness
14.585196
6.159806
2.367801
return Particle(copy(self.position), copy(self.velocity), self.fitness)
def copy(self)
Creates a copy of itself
9.149858
7.620312
1.20072
for imageModel in self._imageModel_list: imageModel.reset_point_source_cache(bool=bool)
def reset_point_source_cache(self, bool=True)
deletes all the cache in the point source class and saves it from then on :return:
4.956447
6.453341
0.768044
I_xy = copy.deepcopy(I_xy_input) background = np.minimum(0, np.min(I_xy)) I_xy -= background x_ = np.sum(I_xy * x) y_ = np.sum(I_xy * y) r = (np.max(x) - np.min(x)) / 3. mask = mask_util.mask_sphere(x, y, center_x=x_, center_y=y_, r=r) Q_xx = np.sum(I_xy * mask * (x - x_) ** 2) Q_xy = np.sum(I_xy * mask * (x - x_) * (y - y_)) Q_yy = np.sum(I_xy * mask * (y - y_) ** 2) return Q_xx, Q_xy, Q_yy, background / np.mean(I_xy)
def moments(I_xy_input, x, y)
compute quadrupole moments from a light distribution :param I_xy: light distribution :param x: x-coordinates of I_xy :param y: y-coordinates of I_xy :return: Q_xx, Q_xy, Q_yy
2.354317
2.364901
0.995524
Q_xx, Q_xy, Q_yy, bkg = moments(I_xy, x, y) norm = Q_xx + Q_yy + 2 * np.sqrt(Q_xx*Q_yy - Q_xy**2) e1 = (Q_xx - Q_yy) / norm e2 = 2 * Q_xy / norm return e1 / (1+bkg), e2 / (1+bkg)
def ellipticities(I_xy, x, y)
compute ellipticities of a light distribution :param I_xy: :param x: :param y: :return:
2.727539
3.12084
0.873976
x = np.zeros_like(theta_x) y = np.zeros_like(theta_y) alpha_x = theta_x alpha_y = theta_y i = -1 for i, idex in enumerate(self._sorted_redshift_index): delta_T = self._T_ij_list[i] x, y = self._ray_step(x, y, alpha_x, alpha_y, delta_T) alpha_x, alpha_y = self._add_deflection(x, y, alpha_x, alpha_y, kwargs_lens, i) delta_T = self._T_ij_list[i+1] x, y = self._ray_step(x, y, alpha_x, alpha_y, delta_T) beta_x, beta_y = self._co_moving2angle_source(x, y) return beta_x, beta_y
def ray_shooting(self, theta_x, theta_y, kwargs_lens, k=None)
ray-tracing (backwards light cone) :param theta_x: angle in x-direction on the image :param theta_y: angle in y-direction on the image :param kwargs_lens: :return: angles in the source plane
2.8411
2.945302
0.964621
z_lens_last = z_start first_deflector = True for i, idex in enumerate(self._sorted_redshift_index): z_lens = self._redshift_list[idex] if self._start_condition(include_z_start, z_lens, z_start) and z_lens <= z_stop: #if z_lens > z_start and z_lens <= z_stop: if first_deflector is True: if keep_range is True: if not hasattr(self, '_cosmo_bkg_T_start'): self._cosmo_bkg_T_start = self._cosmo_bkg.T_xy(z_start, z_lens) delta_T = self._cosmo_bkg_T_start else: delta_T = self._cosmo_bkg.T_xy(z_start, z_lens) first_deflector = False else: delta_T = self._T_ij_list[i] x, y = self._ray_step(x, y, alpha_x, alpha_y, delta_T) alpha_x, alpha_y = self._add_deflection(x, y, alpha_x, alpha_y, kwargs_lens, i) z_lens_last = z_lens if keep_range is True: if not hasattr(self, '_cosmo_bkg_T_stop'): self._cosmo_bkg_T_stop = self._cosmo_bkg.T_xy(z_lens_last, z_stop) delta_T = self._cosmo_bkg_T_stop else: delta_T = self._cosmo_bkg.T_xy(z_lens_last, z_stop) x, y = self._ray_step(x, y, alpha_x, alpha_y, delta_T) return x, y, alpha_x, alpha_y
def ray_shooting_partial(self, x, y, alpha_x, alpha_y, z_start, z_stop, kwargs_lens, keep_range=False, include_z_start=False)
ray-tracing through parts of the coin, starting with (x,y) and angles (alpha_x, alpha_y) at redshift z_start and then backwards to redshfit z_stop :param x: co-moving position [Mpc] :param y: co-moving position [Mpc] :param alpha_x: ray angle at z_start [arcsec] :param alpha_y: ray angle at z_start [arcsec] :param z_start: redshift of start of computation :param z_stop: redshift where output is computed :param kwargs_lens: lens model keyword argument list :param keep_range: bool, if True, only computes the angular diameter ratio between the first and last step once :return: co-moving position and angles at redshift z_stop
2.056026
2.041523
1.007104
z_lens_last = z_start first_deflector = True pos_x, pos_y, redshifts, Tz_list = [], [], [], [] pos_x.append(x) pos_y.append(y) redshifts.append(z_start) Tz_list.append(self._cosmo_bkg.T_xy(0, z_start)) current_z = z_lens_last for i, idex in enumerate(self._sorted_redshift_index): z_lens = self._redshift_list[idex] if self._start_condition(include_z_start,z_lens,z_start) and z_lens <= z_stop: if z_lens != current_z: new_plane = True current_z = z_lens else: new_plane = False if first_deflector is True: delta_T = self._cosmo_bkg.T_xy(z_start, z_lens) first_deflector = False else: delta_T = self._T_ij_list[i] x, y = self._ray_step(x, y, alpha_x, alpha_y, delta_T) alpha_x, alpha_y = self._add_deflection(x, y, alpha_x, alpha_y, kwargs_lens, i) z_lens_last = z_lens if new_plane: pos_x.append(x) pos_y.append(y) redshifts.append(z_lens) Tz_list.append(self._T_z_list[i]) delta_T = self._cosmo_bkg.T_xy(z_lens_last, z_stop) x, y = self._ray_step(x, y, alpha_x, alpha_y, delta_T) pos_x.append(x) pos_y.append(y) redshifts.append(self._z_source) Tz_list.append(self._T_z_source) return pos_x, pos_y, redshifts, Tz_list
def ray_shooting_partial_steps(self, x, y, alpha_x, alpha_y, z_start, z_stop, kwargs_lens, include_z_start=False)
ray-tracing through parts of the coin, starting with (x,y) and angles (alpha_x, alpha_y) at redshift z_start and then backwards to redshfit z_stop. This function differs from 'ray_shooting_partial' in that it returns the angular position of the ray at each lens plane. :param x: co-moving position [Mpc] :param y: co-moving position [Mpc] :param alpha_x: ray angle at z_start [arcsec] :param alpha_y: ray angle at z_start [arcsec] :param z_start: redshift of start of computation :param z_stop: redshift where output is computed :param kwargs_lens: lens model keyword argument list :param keep_range: bool, if True, only computes the angular diameter ratio between the first and last step once :return: co-moving position and angles at redshift z_stop
2.39077
2.38418
1.002764
dt_grav = np.zeros_like(theta_x) dt_geo = np.zeros_like(theta_x) x = np.zeros_like(theta_x) y = np.zeros_like(theta_y) alpha_x = theta_x alpha_y = theta_y i = 0 for i, idex in enumerate(self._sorted_redshift_index): z_lens = self._redshift_list[idex] delta_T = self._T_ij_list[i] dt_geo_new = self._geometrical_delay(alpha_x, alpha_y, delta_T) x, y = self._ray_step(x, y, alpha_x, alpha_y, delta_T) dt_grav_new = self._gravitational_delay(x, y, kwargs_lens, i, z_lens) alpha_x, alpha_y = self._add_deflection(x, y, alpha_x, alpha_y, kwargs_lens, i) dt_geo = dt_geo + dt_geo_new dt_grav = dt_grav + dt_grav_new delta_T = self._T_ij_list[i + 1] dt_geo += self._geometrical_delay(alpha_x, alpha_y, delta_T) x, y = self._ray_step(x, y, alpha_x, alpha_y, delta_T) beta_x, beta_y = self._co_moving2angle_source(x, y) dt_geo -= self._geometrical_delay(beta_x, beta_y, self._T_z_source) return dt_grav + dt_geo
def arrival_time(self, theta_x, theta_y, kwargs_lens, k=None)
light travel time relative to a straight path through the coordinate (0,0) Negative sign means earlier arrival time :param theta_x: angle in x-direction on the image :param theta_y: angle in y-direction on the image :param kwargs_lens: :return: travel time in unit of days
2.473031
2.522358
0.980444
beta_x, beta_y = self.ray_shooting(theta_x, theta_y, kwargs_lens) alpha_x = theta_x - beta_x alpha_y = theta_y - beta_y return alpha_x, alpha_y
def alpha(self, theta_x, theta_y, kwargs_lens, k=None)
reduced deflection angle :param theta_x: angle in x-direction :param theta_y: angle in y-direction :param kwargs_lens: lens model kwargs :return:
1.950276
2.360493
0.826216
alpha_ra, alpha_dec = self.alpha(theta_x, theta_y, kwargs_lens) alpha_ra_dx, alpha_dec_dx = self.alpha(theta_x + diff, theta_y, kwargs_lens) alpha_ra_dy, alpha_dec_dy = self.alpha(theta_x, theta_y + diff, kwargs_lens) dalpha_rara = (alpha_ra_dx - alpha_ra)/diff dalpha_radec = (alpha_ra_dy - alpha_ra)/diff dalpha_decra = (alpha_dec_dx - alpha_dec)/diff dalpha_decdec = (alpha_dec_dy - alpha_dec)/diff f_xx = dalpha_rara f_yy = dalpha_decdec f_xy = dalpha_radec f_yx = dalpha_decra return f_xx, f_xy, f_yx, f_yy
def hessian(self, theta_x, theta_y, kwargs_lens, k=None, diff=0.00000001)
computes the hessian components f_xx, f_yy, f_xy from f_x and f_y with numerical differentiation :param theta_x: x-position (preferentially arcsec) :type theta_x: numpy array :param theta_y: y-position (preferentially arcsec) :type theta_y: numpy array :param kwargs_lens: list of keyword arguments of lens model parameters matching the lens model classes :param diff: numerical differential step (float) :return: f_xx, f_xy, f_yx, f_yy
1.60259
1.50679
1.063579
factor = self._reduced2physical_factor[idex_lens] #factor = self._cosmo_bkg.D_xy(0, z_source) / self._cosmo_bkg.D_xy(z_lens, z_source) return alpha_reduced * factor
def _reduced2physical_deflection(self, alpha_reduced, idex_lens)
alpha_reduced = D_ds/Ds alpha_physical :param alpha_reduced: reduced deflection angle :param z_lens: lens redshift :param z_source: source redshift :return: physical deflection angle
4.837858
4.438681
1.089932
dt_days = (alpha_x**2 + alpha_y**2) / 2. * delta_T * const.Mpc / const.c / const.day_s * const.arcsec**2 return dt_days
def _geometrical_delay(self, alpha_x, alpha_y, delta_T)
geometrical delay (evaluated at z=0) of a light ray with an angle relative to the shortest path :param alpha_x: angle relative to a straight path :param alpha_y: angle relative to a straight path :param delta_T: transversal diameter distance between the start and end of the ray :return: geometrical delay in units of days
6.028872
5.807637
1.038094
D_dt = self._cosmo_bkg.D_dt(z_lens, z_source) delay_days = const.delay_arcsec2days(potential, D_dt) return delay_days
def _lensing_potential2time_delay(self, potential, z_lens, z_source)
transforms the lensing potential (in units arcsec^2) to a gravitational time-delay as measured at z=0 :param potential: lensing potential :param z_lens: redshift of the deflector :param z_source: redshift of source for the definition of the lensing quantities :return: gravitational time-delay in units of days
7.240554
9.2052
0.786572
T_z = self._T_z_list[idex] #T_z = self._cosmo_bkg.T_xy(0, z_lens) theta_x = x / T_z theta_y = y / T_z return theta_x, theta_y
def _co_moving2angle(self, x, y, idex)
transforms co-moving distances Mpc into angles on the sky (radian) :param x: co-moving distance :param y: co-moving distance :param z_lens: redshift of plane :return: angles on the sky
5.264258
4.700934
1.119832
T_z = self._T_z_source theta_x = x / T_z theta_y = y / T_z return theta_x, theta_y
def _co_moving2angle_source(self, x, y)
special case of the co_moving2angle definition at the source redshift :param x: :param y: :return:
3.998833
4.297105
0.930588
x_ = x + alpha_x * delta_T y_ = y + alpha_y * delta_T return x_, y_
def _ray_step(self, x, y, alpha_x, alpha_y, delta_T)
ray propagation with small angle approximation :param x: co-moving x-position :param y: co-moving y-position :param alpha_x: deflection angle in x-direction at (x, y) :param alpha_y: deflection angle in y-direction at (x, y) :param delta_T: transversal angular diameter distance to the next step :return:
2.24457
3.161638
0.709939
theta_x, theta_y = self._co_moving2angle(x, y, idex) alpha_x_red, alpha_y_red = self._lens_model.alpha(theta_x, theta_y, kwargs_lens, k=self._sorted_redshift_index[idex]) alpha_x_phys = self._reduced2physical_deflection(alpha_x_red, idex) alpha_y_phys = self._reduced2physical_deflection(alpha_y_red, idex) alpha_x_new = alpha_x - alpha_x_phys alpha_y_new = alpha_y - alpha_y_phys return alpha_x_new, alpha_y_new
def _add_deflection(self, x, y, alpha_x, alpha_y, kwargs_lens, idex)
adds the pyhsical deflection angle of a single lens plane to the deflection field :param x: co-moving distance at the deflector plane :param y: co-moving distance at the deflector plane :param alpha_x: physical angle (radian) before the deflector plane :param alpha_y: physical angle (radian) before the deflector plane :param kwargs_lens: lens model parameter kwargs :param idex: index of the lens model to be added :param idex_lens: redshift of the deflector plane :return: updated physical deflection after deflector plane (in a backwards ray-tracing perspective)
2.828882
2.493821
1.134357
x_shift = x - center_x y_shift = y - center_y R = np.sqrt(x_shift*x_shift + y_shift*y_shift) if isinstance(R, int) or isinstance(R, float): a = theta_E / max(0.000001, R) else: a=np.empty_like(R) r = R[R > 0] #in the SIS regime a[R == 0] = 0 a[R > 0] = theta_E / r f_x = a * x_shift f_y = a * y_shift return f_x, f_y
def derivatives(self, x, y, theta_E, center_x=0, center_y=0)
returns df/dx and df/dy of the function
2.669643
2.682524
0.995198
x_shift = x - center_x y_shift = y - center_y R = (x_shift*x_shift + y_shift*y_shift)**(3./2) if isinstance(R, int) or isinstance(R, float): prefac = theta_E / max(0.000001, R) else: prefac = np.empty_like(R) r = R[R>0] #in the SIS regime prefac[R==0] = 0. prefac[R>0] = theta_E / r f_xx = y_shift*y_shift * prefac f_yy = x_shift*x_shift * prefac f_xy = -x_shift*y_shift * prefac return f_xx, f_yy, f_xy
def hessian(self, x, y, theta_E, center_x=0, center_y=0)
returns Hessian matrix of function d^2f/dx^2, d^f/dy^2, d^2/dxdy
2.735056
2.738826
0.998624
x_shift = x - center_x y_shift = y - center_y A = np.pi * a_x * a_y dist = (x_shift/a_x)**2 + (y_shift/a_y)**2 torus = np.zeros_like(x) torus[dist <= 1] = 1 return amp/A * torus
def function(x, y, amp, a_x, a_y, center_x, center_y)
returns torus (ellipse with constant surface brightnes) profile
2.729439
2.41393
1.130704
return mp.hyp2f1(a, b, c, z)
def hyp_2F1(a, b, c, z)
http://docs.sympy.org/0.7.1/modules/mpmath/functions/hypergeometric.html
4.076663
2.71386
1.502164
dx, dy = self.alpha(x, y, kwargs, k=k) return x - dx, y - dy
def ray_shooting(self, x, y, kwargs, k=None)
maps image to source position (inverse deflection) :param x: x-position (preferentially arcsec) :type x: numpy array :param y: y-position (preferentially arcsec) :type y: numpy array :param kwargs: list of keyword arguments of lens model parameters matching the lens model classes :param k: only evaluate the k-th lens model :return: source plane positions corresponding to (x, y) in the image plane
4.764486
6.545517
0.727901
potential = self.potential(x_image, y_image, kwargs_lens, k=k) geometry = ((x_image - x_source)**2 + (y_image - y_source)**2) / 2. return geometry - potential
def fermat_potential(self, x_image, y_image, x_source, y_source, kwargs_lens, k=None)
fermat potential (negative sign means earlier arrival time) :param x_image: image position :param y_image: image position :param x_source: source position :param y_source: source position :param kwargs_lens: list of keyword arguments of lens model parameters matching the lens model classes :return: fermat potential in arcsec**2 without geometry term (second part of Eqn 1 in Suyu et al. 2013) as a list
2.96584
3.713108
0.798749
x = np.array(x, dtype=float) y = np.array(y, dtype=float) bool_list = self._bool_list(k) x_, y_, kwargs_copy = self._update_foreground(x, y, kwargs) potential = np.zeros_like(x) for i, func in enumerate(self.func_list): if bool_list[i] is True: if self._model_list[i] == 'SHEAR': potential += func.function(x, y, **kwargs[i]) else: potential += func.function(x_, y_, **kwargs_copy[i]) return potential
def potential(self, x, y, kwargs, k=None)
lensing potential :param x: x-position (preferentially arcsec) :type x: numpy array :param y: y-position (preferentially arcsec) :type y: numpy array :param kwargs: list of keyword arguments of lens model parameters matching the lens model classes :param k: only evaluate the k-th lens model :return: lensing potential in units of arcsec^2
3.138692
3.060693
1.025484
x = np.array(x, dtype=float) y = np.array(y, dtype=float) bool_list = self._bool_list(k) x_, y_, kwargs_copy = self._update_foreground(x, y, kwargs) f_x, f_y = np.zeros_like(x_), np.zeros_like(x_) for i, func in enumerate(self.func_list): if bool_list[i] is True: if self._model_list[i] == 'SHEAR': f_x_i, f_y_i = func.derivatives(x, y, **kwargs[i]) else: f_x_i, f_y_i = func.derivatives(x_, y_, **kwargs_copy[i]) f_x += f_x_i f_y += f_y_i return f_x, f_y
def alpha(self, x, y, kwargs, k=None)
deflection angles :param x: x-position (preferentially arcsec) :type x: numpy array :param y: y-position (preferentially arcsec) :type y: numpy array :param kwargs: list of keyword arguments of lens model parameters matching the lens model classes :param k: only evaluate the k-th lens model :return: deflection angles in units of arcsec
2.402149
2.370913
1.013174
x = np.array(x, dtype=float) y = np.array(y, dtype=float) if self._foreground_shear: # needs to be computed numerically due to non-linear effects f_xx, f_xy, f_yx, f_yy = self.hessian_differential(x, y, kwargs, k=k) else: bool_list = self._bool_list(k) x_ = x y_ = y f_xx, f_yy, f_xy = np.zeros_like(x_), np.zeros_like(x_), np.zeros_like(x_) for i, func in enumerate(self.func_list): if bool_list[i] is True: f_xx_i, f_yy_i, f_xy_i = func.hessian(x_, y_, **kwargs[i]) f_xx += f_xx_i f_yy += f_yy_i f_xy += f_xy_i f_yx = f_xy return f_xx, f_xy, f_yx, f_yy
def hessian(self, x, y, kwargs, k=None)
hessian matrix :param x: x-position (preferentially arcsec) :type x: numpy array :param y: y-position (preferentially arcsec) :type y: numpy array :param kwargs: list of keyword arguments of lens model parameters matching the lens model classes :param k: only evaluate the k-th lens model :return: f_xx, f_xy, f_yy components
2.474805
2.448004
1.010948
bool_list = self._bool_list(bool_list) mass_3d = 0 for i, func in enumerate(self.func_list): if bool_list[i] is True: kwargs_i = {k:v for k, v in kwargs[i].items() if not k in ['center_x', 'center_y']} mass_3d_i = func.mass_3d_lens(r, **kwargs_i) mass_3d += mass_3d_i #except: # raise ValueError('Lens profile %s does not support a 3d mass function!' % self.model_list[i]) return mass_3d
def mass_3d(self, r, kwargs, bool_list=None)
computes the mass within a 3d sphere of radius r :param r: radius (in angular units) :param kwargs: list of keyword arguments of lens model parameters matching the lens model classes :param bool_list: list of bools that are part of the output :return: mass (in angular units, modulo epsilon_crit)
2.944845
2.898285
1.016065
bool_list = self._bool_list(bool_list) mass_2d = 0 for i, func in enumerate(self.func_list): if bool_list[i] is True: kwargs_i = {k: v for k, v in kwargs[i].items() if not k in ['center_x', 'center_y']} mass_2d_i = func.mass_2d_lens(r, **kwargs_i) mass_2d += mass_2d_i #except: # raise ValueError('Lens profile %s does not support a 2d mass function!' % self.model_list[i]) return mass_2d
def mass_2d(self, r, kwargs, bool_list=None)
computes the mass enclosed a projected (2d) radius r :param r: radius (in angular units) :param kwargs: list of keyword arguments of lens model parameters matching the lens model classes :param bool_list: list of bools that are part of the output :return: projected mass (in angular units, modulo epsilon_crit)
2.904019
2.875254
1.010004
n = len(self.func_list) if k is None: bool_list = [True] * n elif isinstance(k, (int, np.integer)): bool_list = [False] * n bool_list[k] = True else: bool_list = [False] * n for i, k_i in enumerate(k): if k_i is not False: if k_i is True: bool_list[i] = True elif k_i < n: bool_list[k_i] = True else: raise ValueError("k as set by %s is not convertable in a bool string!" % k) if self._foreground_shear is True: bool_list[self._foreground_shear_idex] = False return bool_list
def _bool_list(self, k=None)
returns a bool list of the length of the lens models if k = None: returns bool list with True's if k is int, returns bool list with False's but k'th is True :param k: None, int, or list of ints :return: bool list
3.133973
3.06535
1.022386
''' generates a open namespace from symbol namespace x { y { z {''' blocks = ['namespace {0} {{'.format(x) for x in symbol.module.name_parts] return ' '.join(blocks)
def open_ns(symbol)
generates a open namespace from symbol namespace x { y { z {
16.46858
6.637915
2.480987
'''generates a closing names statement from a symbol''' closing = ' '.join(['}' for x in symbol.module.name_parts]) name = '::'.join(symbol.module.name_parts) return '{0} // namespace {1}'.format(closing, name)
def close_ns(symbol)
generates a closing names statement from a symbol
9.571106
7.038645
1.359794
'''generates a namespace x::y::z statement from a symbol''' if symbol.type and symbol.type.is_primitive: return '' return '{0}::'.format('::'.join(symbol.module.name_parts))
def ns(symbol)
generates a namespace x::y::z statement from a symbol
10.259564
5.764815
1.779687
config = Path(config) if reload: argv = sys.argv.copy() argv.remove('--reload') monitor(config.dirname(), src, dst, argv) else: run(config, src, dst, force)
def app(config, src, dst, features, reload, force)
Takes several files or directories as src and generates the code in the given dst directory.
5.28922
5.512682
0.959464
script = Path(script).expand().abspath() output = Path(output).expand().abspath() input = input if isinstance(input, (list, tuple)) else [input] output.makedirs_p() _script_reload(script, input, output)
def reload(script, input, output)
reloads the generator script when the script files or the input files changes
3.845479
3.940369
0.975919
input = [Path(entry).expand().abspath() for entry in input] output = Path(output).expand().abspath() cmd = 'python3 {0} {1} {2}'.format(script, ' '.join(input), output) event_handler = RunScriptChangeHandler(cmd) event_handler.run() # run always once observer = Observer() path = script.dirname().expand().abspath() click.secho('watch: {0}'.format(path), fg='blue') observer.schedule(event_handler, path, recursive=True) for entry in input: entry = entry.dirname().expand().abspath() click.secho('watch: {0}'.format(entry), fg='blue') observer.schedule(event_handler, entry, recursive=True) path = Path(__file__).parent / 'qface' click.secho('watch: {0}'.format(path), fg='blue') observer.schedule(event_handler, path, recursive=True) observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: observer.stop() observer.join()
def _script_reload(script, input, output)
run the named generator and monitor the input and generator folder
2.689855
2.653737
1.01361
script_dir = str(Path(__file__).parent.abspath()) click.secho(script_dir, fg='blue') if editable: sh('pip3 install --editable {0} --upgrade'.format(script_dir)) else: sh('pip3 install {0} --upgrade'.format(script_dir))
def install(editable)
install the script onto the system using pip3
3.174756
2.942548
1.078914
"merges b into a recursively if a and b are dicts" for key in b: if isinstance(a.get(key), dict) and isinstance(b.get(key), dict): merge(a[key], b[key]) else: a[key] = b[key] return a
def merge(a, b)
merges b into a recursively if a and b are dicts
2.300357
2.002082
1.148982
source = name if name and name[0] is '/': source = name[1:] elif self.source is not None: source = '/'.join((self.source, name)) return self.env.get_template(source)
def get_template(self, name)
Retrieves a single template file from the template loader
3.641528
3.327896
1.094243
if Generator.strict: self.env.undefined = TestableUndefined else: self.env.undefined = Undefined template = self.get_template(name) return template.render(context)
def render(self, name, context)
Returns the rendered text from a single template file from the template loader using the given context data
5.848664
5.883265
0.994119
context.update(self.context) return self.env.from_string(template).render(context)
def apply(self, template, context={})
Return the rendered text of a template instance
5.104401
6.034329
0.845894
if not file_path or not template: click.secho('source or target missing for document') return if not context: context = self.context error = False try: self._write(file_path, template, context, preserve, force) except TemplateSyntaxError as exc: message = '{0}:{1}: error: {2}'.format(exc.filename, exc.lineno, exc.message) click.secho(message, fg='red', err=True) error = True except TemplateNotFound as exc: message = '{0}: error: Template not found'.format(exc.name) click.secho(message, fg='red', err=True) error = True except TemplateError as exc: # Just return with an error, the generic template_error_handler takes care of printing it error = True if error and Generator.strict: sys.exit(1)
def write(self, file_path, template, context={}, preserve=False, force=False)
Using a template file name it renders a template into a file given a context
3.321839
3.370625
0.985526
self.context.update({ 'system': system, }) document = FileSystem.load_yaml(path, required=True) for module, rules in document.items(): click.secho('process: {0}'.format(module), fg='green') self._process_rules(rules, system)
def process_rules(self, path: Path, system: System)
writes the templates read from the rules document
6.054622
4.930481
1.227998
self._source = None # reset the template source if not self._shall_proceed(rules): return self.context.update(rules.get('context', {})) self.path = rules.get('path', '') self.source = rules.get('source', None) self._process_rule(rules.get('system', None), {'system': system}) for module in system.modules: self._process_rule(rules.get('module', None), {'module': module}) for interface in module.interfaces: self._process_rule(rules.get('interface', None), {'interface': interface}) for struct in module.structs: self._process_rule(rules.get('struct', None), {'struct': struct}) for enum in module.enums: self._process_rule(rules.get('enum', None), {'enum': enum})
def _process_rules(self, rules: dict, system: System)
process a set of rules for a target
2.558715
2.471876
1.035131
if not rule or not self._shall_proceed(rule): return self.context.update(context) self.context.update(rule.get('context', {})) self.path = rule.get('path', None) self.source = rule.get('source', None) for entry in rule.get('documents', []): target, source = self._resolve_rule_document(entry) self.write(target, source) for entry in rule.get('preserve', []): target, source = self._resolve_rule_document(entry) self.write(target, source, preserve=True)
def _process_rule(self, rule: dict, context: dict)
process a single rule
3.31749
3.112894
1.065725
logger.debug('parse document: {0}'.format(document)) stream = FileStream(str(document), encoding='utf-8') system = FileSystem._parse_stream(stream, system, document, profile) FileSystem.merge_annotations(system, document.stripext() + '.yaml') return system
def _parse_document(document: Path, system: System = None, profile=EProfile.FULL)
Parses a document and returns the resulting domain system :param path: document path to parse :param system: system to be used (optional)
7.148089
7.468143
0.957144
if not Path(document).exists(): return meta = FileSystem.load_yaml(document) click.secho('merge: {0}'.format(document.name), fg='blue') for identifier, data in meta.items(): symbol = system.lookup(identifier) if symbol: merge(symbol.tags, data)
def merge_annotations(system, document)
Read a YAML document and for each root symbol identifier updates the tag information of that symbol
7.063753
5.305212
1.331474
inputs = input if isinstance(input, (list, tuple)) else [input] logger.debug('parse input={0}'.format(inputs)) identifier = 'system' if not identifier else identifier system = System() cache = None if use_cache: cache = shelve.open('qface.cache') if identifier in cache and clear_cache: del cache[identifier] if identifier in cache: # use the cached domain model system = cache[identifier] # if domain model not cached generate it for input in inputs: path = Path.getcwd() / str(input) if path.isfile(): FileSystem.parse_document(path, system) else: for document in path.walkfiles(pattern): FileSystem.parse_document(document, system) if use_cache: cache[identifier] = system return system
def parse(input, identifier: str = None, use_cache=False, clear_cache=True, pattern="*.qface", profile=EProfile.FULL)
Input can be either a file or directory or a list of files or directory. A directory will be parsed recursively. The function returns the resulting system. Stores the result of the run in the domain cache named after the identifier. :param path: directory to parse :param identifier: identifies the parse run. Used to name the cache :param clear_cache: clears the domain cache (defaults to true)
3.90022
3.810062
1.023663
watch = watch if isinstance(watch, (list, tuple)) else [watch] watch = [Path(entry).expand().abspath() for entry in watch] event_handler = RunScriptChangeHandler(args) observer = Observer() for entry in watch: if entry.isfile(): entry = entry.parent click.secho('watch recursive: {0}'.format(entry), fg='blue') observer.schedule(event_handler, entry, recursive=True) event_handler.run() # run always once observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: observer.stop() observer.join()
def monitor(args, watch)
reloads the script given by argv when src files changes
3.636063
3.514169
1.034686
'''lookup a symbol by fully qualified name.''' # <module> if name in self._moduleMap: return self._moduleMap[name] # <module>.<Symbol> (module_name, type_name, fragment_name) = self.split_typename(name) if not module_name and type_name: click.secho('not able to lookup symbol: {0}'.format(name), fg='red') return None module = self._moduleMap[module_name] return module.lookup(type_name, fragment_name)
def lookup(self, name: str)
lookup a symbol by fully qualified name.
4.727326
4.210304
1.122799
'''return the fully qualified name (`<module>.<name>`)''' if self.module == self: return self.module.name else: if "." not in self.name: return '{0}.{1}'.format(self.module.name, self.name) else: # We have a fully qualified reference, just return it return self.name
def qualified_name(self)
return the fully qualified name (`<module>.<name>`)
4.644251
3.815457
1.21722
if tag not in self._tags: self._tags[tag] = dict()
def add_tag(self, tag)
add a tag to the tag list
4.645204
4.450225
1.043813
self.add_tag(tag) d = self._tags[tag] d[name] = value
def add_attribute(self, tag, name, value)
add an attribute (nam, value pair) to the named tag
4.287236
3.95124
1.085035
if tag in self._tags and name in self._tags[tag]: return self._tags[tag][name]
def attribute(self, tag, name)
return attribute by tag and attribute name
2.755986
2.498414
1.103094
'''checks if type is a valid type''' return (self.is_primitive and self.name) \ or (self.is_complex and self.name) \ or (self.is_list and self.nested) \ or (self.is_map and self.nested) \ or (self.is_model and self.nested)
def is_valid(self)
checks if type is a valid type
3.682385
3.317441
1.110008
self.__is_resolved = True if self.is_complex: type = self.nested if self.nested else self type.__reference = self.module.lookup(type.name)
def _resolve(self)
resolve the type symbol from name by doing a lookup
12.199239
9.780742
1.247271
'''lookup a symbol by name. If symbol is not local it will be looked up system wide''' if name in self._contentMap: symbol = self._contentMap[name] if fragment: return symbol._contentMap[fragment] return symbol return self.system.lookup(name)
def lookup(self, name: str, fragment: str = None)
lookup a symbol by name. If symbol is not local it will be looked up system wide
7.211608
3.583485
2.012457
'''return the fully qualified name (`<module>.<interface>#<operation>`)''' return '{0}.{1}#{2}'.format(self.module.name, self.interface.name, self.name)
def qualified_name(self)
return the fully qualified name (`<module>.<interface>#<operation>`)
6.76404
3.210894
2.106591
'''return the fully qualified name (`<module>.<struct>#<field>`)''' return '{0}.{1}#{2}'.format(self.module.name, self.struct.name, self.name)
def qualified_name(self)
return the fully qualified name (`<module>.<struct>#<field>`)
7.232825
3.265759
2.214745
'''return the fully qualified name (`<module>.<enum>#<member>`)''' return '{0}.{1}#{2}'.format(self.module.name, self.enum.name, self.name)
def qualified_name(self)
return the fully qualified name (`<module>.<enum>#<member>`)
7.627668
3.147005
2.423787
try: # all symbols have a toJson method, try it return json.dumps(symbol.toJson(), indent=' ') except AttributeError: pass return json.dumps(symbol, indent=' ')
def jsonify(symbol)
returns json format for symbol
5.80531
6.504834
0.892461
code = hashlib.new(hash_type) code.update(str(symbol).encode('utf-8')) return code.hexdigest()
def hash(symbol, hash_type='sha1')
create a hash code from symbol
2.331671
2.031215
1.147919
if isinstance(args, str): args = args.split() if not args: return click.echo('$ {0}'.format(' '.join(args))) try: return subprocess.check_call(args, **kwargs) except subprocess.CalledProcessError as exc: click.secho('run error {}'.format(exc)) except OSError as exc: click.secho('not found error {}'.format(exc))
def sh(args, **kwargs)
runs the given cmd as shell command
2.798756
2.717623
1.029854
if not s: return doc = DocObject() tag = None s = s[3:-2] # remove '/**' and '*/' for line in s.splitlines(): line = line.lstrip(' *') # strip a ' ' and '*' from start if not line: tag = None # on empty line reset the tag information elif line[0] == '@': line = line[1:] res = line.split(maxsplit=1) if len(res) == 0: continue tag = res[0] if len(res) == 1: doc.add_tag(tag, True) elif len(res) == 2: value = res[1] doc.add_tag(tag, value) elif tag: # append to previous matched tag doc.add_tag(tag, line) else: # append any loose lines to description doc.add_tag('description', line) return doc
def parse_doc(s)
parse a comment in the format of JavaDoc and returns an object, where each JavaDoc tag is a property of the object.
3.564255
3.391141
1.051049
url_username = request.parser_context.get('kwargs', {}).get('username', '') if request.user.username.lower() != url_username.lower(): if request.user.is_staff: return False # staff gets 403 raise Http404() return True
def has_permission(self, request, view)
Returns true if the current request is by the user themselves. Note: a 404 is returned for non-staff instead of a 403. This is to prevent users from being able to detect the existence of accounts.
4.364127
3.96242
1.101379
conn_module = type(connection).__module__ if "mysql" in conn_module: return "bigint AUTO_INCREMENT" elif "postgres" in conn_module: return "bigserial" return super(BigAutoField, self).db_type(connection)
def db_type(self, connection)
The type of the field to insert into the database.
3.438274
3.213626
1.069905
# Raise ValueError to match normal django semantics for wrong type of field. if not isinstance(course_key, CourseKey): raise ValueError( "course_key must be an instance of `opaque_keys.edx.keys.CourseKey`. Got {}".format(type(course_key)) ) try: block_type = block_key.block_type except AttributeError: raise ValueError( "block_key must be an instance of `opaque_keys.edx.keys.UsageKey`. Got {}".format(type(block_key)) ) if waffle.waffle().is_enabled(waffle.ENABLE_COMPLETION_TRACKING): try: with transaction.atomic(): obj, is_new = self.get_or_create( # pylint: disable=unpacking-non-sequence user=user, course_key=course_key, block_key=block_key, defaults={ 'completion': completion, 'block_type': block_type, }, ) except IntegrityError: # The completion was created concurrently by another process log.info( "An IntegrityError was raised when trying to create a BlockCompletion for %s:%s:%s. " "Falling back to get().", user, course_key, block_key, ) obj = self.get( user=user, course_key=course_key, block_key=block_key, ) is_new = False if not is_new and obj.completion != completion: obj.completion = completion obj.full_clean() obj.save(update_fields={'completion', 'modified'}) else: # If the feature is not enabled, this method should not be called. # Error out with a RuntimeError. raise RuntimeError( "BlockCompletion.objects.submit_completion should not be \ called when the feature is disabled." ) return obj, is_new
def submit_completion(self, user, course_key, block_key, completion)
Update the completion value for the specified record. Parameters: * user (django.contrib.auth.models.User): The user for whom the completion is being submitted. * course_key (opaque_keys.edx.keys.CourseKey): The course in which the submitted block is found. * block_key (opaque_keys.edx.keys.UsageKey): The block that has had its completion changed. * completion (float in range [0.0, 1.0]): The fractional completion value of the block (0.0 = incomplete, 1.0 = complete). Return Value: (BlockCompletion, bool): A tuple comprising the created or updated BlockCompletion object and a boolean value indicating whether the object was newly created by this call. Raises: ValueError: If the wrong type is passed for one of the parameters. django.core.exceptions.ValidationError: If a float is passed that is not between 0.0 and 1.0. django.db.DatabaseError: If there was a problem getting, creating, or updating the BlockCompletion record in the database. This will also be a more specific error, as described here: https://docs.djangoproject.com/en/1.11/ref/exceptions/#database-exceptions. IntegrityError and OperationalError are relatively common subclasses.
2.839891
2.721065
1.043669
block_completions = {} for block, completion in blocks: (block_completion, is_new) = self.submit_completion(user, course_key, block, completion) block_completions[block_completion] = is_new return block_completions
def submit_batch_completion(self, user, course_key, blocks)
Performs a batch insertion of completion objects. Parameters: * user (django.contrib.auth.models.User): The user for whom the completions are being submitted. * course_key (opaque_keys.edx.keys.CourseKey): The course in which the submitted blocks are found. * blocks: A list of tuples of UsageKey to float completion values. (float in range [0.0, 1.0]): The fractional completion value of the block (0.0 = incomplete, 1.0 = complete). Return Value: Dict of (BlockCompletion, bool): A dictionary with a BlockCompletion object key and a value of bool. The boolean value indicates whether the object was newly created by this call. Raises: ValueError: If the wrong type is passed for one of the parameters. django.core.exceptions.ValidationError: If a float is passed that is not between 0.0 and 1.0. django.db.DatabaseError: If there was a problem getting, creating, or updating the BlockCompletion record in the database.
3.323028
2.84256
1.169027
if self.block_key.run is None: # pylint: disable=unexpected-keyword-arg, no-value-for-parameter return self.block_key.replace(course_key=self.course_key) return self.block_key
def full_block_key(self)
Returns the "correct" usage key value with the run filled in.
4.642973
3.659534
1.268733
user_course_completions = cls.user_course_completion_queryset(user, course_key) return cls.completion_by_block_key(user_course_completions)
def get_course_completions(cls, user, course_key)
Returns a dictionary mapping BlockKeys to completion values for all BlockCompletion records for the given user and course_key. Return value: dict[BlockKey] = float
4.695773
4.606086
1.019471
return cls.objects.filter(user=user, course_key=course_key)
def user_course_completion_queryset(cls, user, course_key)
Returns a Queryset of completions for a given user and course_key.
3.590921
2.535703
1.416144
return course_key = CourseKey.from_string(kwargs['course_id']) block_key = UsageKey.from_string(kwargs['usage_id']) block_cls = XBlock.load_class(block_key.block_type) if XBlockCompletionMode.get_mode(block_cls) != XBlockCompletionMode.COMPLETABLE: return if getattr(block_cls, 'has_custom_completion', False): return user = User.objects.get(id=kwargs['user_id']) if kwargs.get('score_deleted'): completion = 0.0 else: completion = 1.0 if not kwargs.get('grader_response'): BlockCompletion.objects.submit_completion( user=user, course_key=course_key, block_key=block_key, completion=completion, )
def scorable_block_completion(sender, **kwargs): # pylint: disable=unused-argument if not waffle.waffle().is_enabled(waffle.ENABLE_COMPLETION_TRACKING)
When a problem is scored, submit a new BlockCompletion for that block.
3.134613
3.00095
1.04454
if not waffle.waffle().is_enabled(waffle.ENABLE_COMPLETION_TRACKING): raise ValidationError( _("BlockCompletion.objects.submit_batch_completion should not be called when the feature is disabled.") ) for key in self.REQUIRED_KEYS: if key not in batch_object: raise ValidationError(_("Key '{key}' not found.").format(key=key)) username = batch_object['username'] user = User.objects.get(username=username) course_key_obj = self._validate_and_parse_course_key(batch_object['course_key']) if not CourseEnrollment.is_enrolled(user, course_key_obj): raise ValidationError(_('User is not enrolled in course.')) blocks = batch_object['blocks'] block_objs = [] for block_key in blocks: block_key_obj = self._validate_and_parse_block_key(block_key, course_key_obj) completion = float(blocks[block_key]) block_objs.append((block_key_obj, completion)) return user, course_key_obj, block_objs
def _validate_and_parse(self, batch_object)
Performs validation on the batch object to make sure it is in the proper format. Parameters: * batch_object: The data provided to a POST. The expected format is the following: { "username": "username", "course_key": "course-key", "blocks": { "block_key1": 0.0, "block_key2": 1.0, "block_key3": 1.0, } } Return Value: * tuple: (User, CourseKey, List of tuples (UsageKey, completion_float) Raises: django.core.exceptions.ValidationError: If any aspect of validation fails a ValidationError is raised. ObjectDoesNotExist: If a database object cannot be found an ObjectDoesNotExist is raised.
3.051676
2.587868
1.179224
try: return CourseKey.from_string(course_key) except InvalidKeyError: raise ValidationError(_("Invalid course key: {}").format(course_key))
def _validate_and_parse_course_key(self, course_key)
Returns a validated parsed CourseKey deserialized from the given course_key.
2.563798
2.228747
1.150331
try: block_key_obj = UsageKey.from_string(block_key) except InvalidKeyError: raise ValidationError(_("Invalid block key: {}").format(block_key)) if block_key_obj.run is None: expected_matching_course_key = course_key_obj.replace(run=None) else: expected_matching_course_key = course_key_obj if block_key_obj.course_key != expected_matching_course_key: raise ValidationError( _("Block with key: '{key}' is not in course {course}").format(key=block_key, course=course_key_obj) ) return block_key_obj
def _validate_and_parse_block_key(self, block_key, course_key_obj)
Returns a validated, parsed UsageKey deserialized from the given block_key.
2.22456
2.039688
1.090637
user, course_key, blocks = self._validate_and_parse(batch_object) BlockCompletion.objects.submit_batch_completion(user, course_key, blocks) except ValidationError as exc: return Response({ "detail": _(' ').join(text_type(msg) for msg in exc.messages), }, status=status.HTTP_400_BAD_REQUEST) except ValueError as exc: return Response({ "detail": text_type(exc), }, status=status.HTTP_400_BAD_REQUEST) except ObjectDoesNotExist as exc: return Response({ "detail": text_type(exc), }, status=status.HTTP_404_NOT_FOUND) except DatabaseError as exc: return Response({ "detail": text_type(exc), }, status=status.HTTP_500_INTERNAL_SERVER_ERROR) return Response({"detail": _("ok")}, status=status.HTTP_200_OK)
def post(self, request, *args, **kwargs): # pylint: disable=unused-argument batch_object = request.data or {} try
Inserts a batch of completions. REST Endpoint Format: { "username": "username", "course_key": "course-key", "blocks": { "block_key1": 0.0, "block_key2": 1.0, "block_key3": 1.0, } } **Returns** A Response object, with an appropriate status code. If successful, status code is 200. { "detail" : _("ok") } Otherwise, a 400 or 404 may be returned, and the "detail" content will explain the error.
2.481854
2.219666
1.118121
def get_completion(course_completions, all_blocks, block_id): block = all_blocks.get(block_id) child_ids = block.get('children', []) if not child_ids: return course_completions.get(block.serializer.instance, 0) completion = 0 total_children = 0 for child_id in child_ids: completion += get_completion(course_completions, all_blocks, child_id) total_children += 1 return int(completion == total_children) user_id = User.objects.get(username=username).id block_types_filter = [ 'course', 'chapter', 'sequential', 'vertical', 'html', 'problem', 'video', 'discussion', 'drag-and-drop-v2' ] blocks = get_blocks( request, UsageKey.from_string(subsection_id), nav_depth=2, requested_fields=[ 'children' ], block_types_filter=block_types_filter ) course_completions = BlockCompletion.get_course_completions(user_id, CourseKey.from_string(course_key)) aggregated_completion = get_completion(course_completions, blocks['blocks'], blocks['root']) return Response({"completion": aggregated_completion}, status=status.HTTP_200_OK)
def get(self, request, username, course_key, subsection_id)
Returns completion for a (user, subsection, course).
3.164474
3.107499
1.018334
queryset = BlockCompletion.user_course_completion_queryset(self._user, self._course_key).filter( block_key__in=candidates ) completions = BlockCompletion.completion_by_block_key(queryset) candidates_with_runs = [candidate.replace(course_key=self._course_key) for candidate in candidates] for candidate in candidates_with_runs: if candidate not in completions: completions[candidate] = 0.0 return completions
def get_completions(self, candidates)
Given an iterable collection of block_keys in the course, returns a mapping of the block_keys to the present completion values of their associated blocks. If a completion is not found for a given block in the current course, 0.0 is returned. The service does not attempt to verify that the block exists within the course. Parameters: candidates: collection of BlockKeys within the current course. Note: Usage keys may not have the course run filled in for old mongo courses. This method checks for completion records against a set of BlockKey candidates with the course run filled in from self._course_key. Return value: dict[BlockKey] -> float: Mapping blocks to their completion value.
4.014941
3.240557
1.238966
if item.location.block_type != 'vertical': raise ValueError('The passed in xblock is not a vertical type!') if not self.completion_tracking_enabled(): return None # this is temporary local logic and will be removed when the whole course tree is included in completion child_locations = [ child.location for child in item.get_children() if child.location.block_type != 'discussion' ] completions = self.get_completions(child_locations) for child_location in child_locations: if completions[child_location] < 1.0: return False return True
def vertical_is_complete(self, item)
Calculates and returns whether a particular vertical is complete. The logic in this method is temporary, and will go away once the completion API is able to store a first-order notion of completeness for parent blocks (right now it just stores completion for leaves- problems, HTML, video, etc.).
5.89787
4.92837
1.196718
return ( XBlockCompletionMode.get_mode(block) == XBlockCompletionMode.COMPLETABLE and not getattr(block, 'has_custom_completion', False) and not getattr(block, 'has_score', False) )
def can_mark_block_complete_on_view(self, block)
Returns True if the xblock can be marked complete on view. This is true of any non-customized, non-scorable, completable block.
5.102518
3.582889
1.424135
blocks = {block for block in blocks if self.can_mark_block_complete_on_view(block)} completions = self.get_completions({block.location for block in blocks}) return {block for block in blocks if completions.get(block.location, 0) < 1.0}
def blocks_to_mark_complete_on_view(self, blocks)
Returns a set of blocks which should be marked complete on view and haven't been yet.
3.648817
3.120148
1.169437
if users is None: users = [] if user_ids is None: user_ids = [] more_users = User.objects.filter(id__in=user_ids) if len(more_users) < len(user_ids): found_ids = {u.id for u in more_users} not_found_ids = [pk for pk in user_ids if pk not in found_ids] raise User.DoesNotExist("User not found with id(s): {}".format(not_found_ids)) users.extend(more_users) submitted = [] for user in users: submitted.append(BlockCompletion.objects.submit_completion( user=user, course_key=self._course_key, block_key=block_key, completion=completion )) return submitted
def submit_group_completion(self, block_key, completion, users=None, user_ids=None)
Submit a completion for a group of users. Arguments: block_key (opaque_key.edx.keys.UsageKey): The block to submit completions for. completion (float): A value in the range [0.0, 1.0] users ([django.contrib.auth.models.User]): An optional iterable of Users that completed the block. user_ids ([int]): An optional iterable of ids of Users that completed the block. Returns a list of (BlockCompletion, bool) where the boolean indicates whether the given BlockCompletion was newly created.
2.315657
2.292967
1.009896
return BlockCompletion.objects.submit_completion( user=self._user, course_key=self._course_key, block_key=block_key, completion=completion )
def submit_completion(self, block_key, completion)
Submit a completion for the service user and course. Returns a (BlockCompletion, bool) where the boolean indicates whether the given BlockCompletion was newly created.
3.316936
2.83713
1.169117
last_completed_block = BlockCompletion.get_latest_block_completed(user, course_key) if last_completed_block is not None: return last_completed_block.block_key raise UnavailableCompletionData(course_key)
def get_key_to_last_completed_course_block(user, course_key)
Returns the last block a "user" completed in a course (stated as "course_key"). raises UnavailableCompletionData when the user has not completed blocks in the course. raises UnavailableCompletionData when the visual progress waffle flag is disabled.
4.419707
3.130496
1.411823
try: response.raise_for_status() except RequestException: try: msg = 'Response code: {}; response body:\n{}'.format(response.status_code, json.dumps(response.json(), indent=2)) raise CerberusClientException(msg) except ValueError: msg = 'Response code: {}; response body:\n{}'.format(response.status_code, response.text) raise CerberusClientException(msg)
def throw_if_bad_response(response)
Throw an exception if the Cerberus response is not successful.
2.19041
1.994163
1.09841
''' Initialize a KeenClient instance using environment variables. ''' global _client, project_id, write_key, read_key, master_key, base_url if _client is None: # check environment for project ID and keys project_id = project_id or os.environ.get("KEEN_PROJECT_ID") write_key = write_key or os.environ.get("KEEN_WRITE_KEY") read_key = read_key or os.environ.get("KEEN_READ_KEY") master_key = master_key or os.environ.get("KEEN_MASTER_KEY") base_url = base_url or os.environ.get("KEEN_BASE_URL") if not project_id: raise InvalidEnvironmentError("Please set the KEEN_PROJECT_ID environment variable or set keen.project_id!") _client = KeenClient(project_id, write_key=write_key, read_key=read_key, master_key=master_key, base_url=base_url)
def _initialize_client_from_environment()
Initialize a KeenClient instance using environment variables.
1.896259
1.82706
1.037875
_initialize_client_from_environment() _client.add_event(event_collection, body, timestamp=timestamp)
def add_event(event_collection, body, timestamp=None)
Adds an event. Depending on the persistence strategy of the client, this will either result in the event being uploaded to Keen immediately or will result in saving the event to some local cache. :param event_collection: the name of the collection to insert the event to :param body: dict, the body of the event to insert the event to :param timestamp: datetime, optional, the timestamp of the event
6.717342
11.936584
0.562752
_initialize_client_from_environment() return _client.generate_image_beacon(event_collection, body, timestamp=timestamp)
def generate_image_beacon(event_collection, body, timestamp=None)
Generates an image beacon URL. :param event_collection: the name of the collection to insert the event to :param body: dict, the body of the event to insert the event to :param timestamp: datetime, optional, the timestamp of the event
6.114973
11.915482
0.513196
_initialize_client_from_environment() return _client.count(event_collection=event_collection, timeframe=timeframe, timezone=timezone, interval=interval, filters=filters, group_by=group_by, order_by=order_by, max_age=max_age, limit=limit)
def count(event_collection, timeframe=None, timezone=None, interval=None, filters=None, group_by=None, order_by=None, max_age=None, limit=None)
Performs a count query Counts the number of events that meet the given criteria. :param event_collection: string, the name of the collection to query :param timeframe: string or dict, the timeframe in which the events happened example: "previous_7_days" :param timezone: int, the timezone you'd like to use for the timeframe and interval in seconds :param interval: string, the time interval used for measuring data over time example: "daily" :param filters: array of dict, contains the filters you'd like to apply to the data example: [{"property_name":"device", "operator":"eq", "property_value":"iPhone"}] :param group_by: string or array of strings, the name(s) of the properties you would like to group you results by. example: "customer.id" or ["browser","operating_system"] :param order_by: dictionary or list of dictionary objects containing the property_name(s) to order by and the desired direction(s) of sorting. Example: {"property_name":"result", "direction":keen.direction.DESCENDING} May not be used without a group_by specified. :param limit: positive integer limiting the displayed results of a query using order_by :param max_age: an integer, greater than 30 seconds, the maximum 'staleness' you're willing to trade for increased query performance, in seconds
2.169784
2.786466
0.778687
_initialize_client_from_environment() return _client.sum(event_collection=event_collection, timeframe=timeframe, timezone=timezone, interval=interval, filters=filters, group_by=group_by, order_by=order_by, target_property=target_property, max_age=max_age, limit=limit)
def sum(event_collection, target_property, timeframe=None, timezone=None, interval=None, filters=None, group_by=None, order_by=None, max_age=None, limit=None)
Performs a sum query Adds the values of a target property for events that meet the given criteria. :param event_collection: string, the name of the collection to query :param target_property: string, the name of the event property you would like use :param timeframe: string or dict, the timeframe in which the events happened example: "previous_7_days" :param timezone: int, the timezone you'd like to use for the timeframe and interval in seconds :param interval: string, the time interval used for measuring data over time example: "daily" :param filters: array of dict, contains the filters you'd like to apply to the data example: [{"property_name":"device", "operator":"eq", "property_value":"iPhone"}] :param group_by: string or array of strings, the name(s) of the properties you would like to group you results by. example: "customer.id" or ["browser","operating_system"] :param order_by: dictionary or list of dictionary objects containing the property_name(s) to order by and the desired direction(s) of sorting. Example: {"property_name":"result", "direction":keen.direction.DESCENDING} May not be used without a group_by specified. :param limit: positive integer limiting the displayed results of a query using order_by :param max_age: an integer, greater than 30 seconds, the maximum 'staleness' you're willing to trade for increased query performance, in seconds
2.267616
2.953087
0.76788
_initialize_client_from_environment() return _client.extraction(event_collection=event_collection, timeframe=timeframe, timezone=timezone, filters=filters, latest=latest, email=email, property_names=property_names)
def extraction(event_collection, timeframe=None, timezone=None, filters=None, latest=None, email=None, property_names=None)
Performs a data extraction Returns either a JSON object of events or a response indicating an email will be sent to you with data. :param event_collection: string, the name of the collection to query :param timeframe: string or dict, the timeframe in which the events happened example: "previous_7_days" :param timezone: int, the timezone you'd like to use for the timeframe and interval in seconds :param filters: array of dict, contains the filters you'd like to apply to the data example: [{"property_name":"device", "operator":"eq", "property_value":"iPhone"}] :param latest: int, the number of most recent records you'd like to return :param email: string, optional string containing an email address to email results to :param property_names: string or list of strings, used to limit the properties returned
2.890873
4.491976
0.643564
_initialize_client_from_environment() return _client.multi_analysis(event_collection=event_collection, timeframe=timeframe, interval=interval, timezone=timezone, filters=filters, group_by=group_by, order_by=order_by, analyses=analyses, max_age=max_age, limit=limit)
def multi_analysis(event_collection, analyses, timeframe=None, interval=None, timezone=None, filters=None, group_by=None, order_by=None, max_age=None, limit=None)
Performs a multi-analysis query Returns a dictionary of analysis results. :param event_collection: string, the name of the collection to query :param analyses: dict, the types of analyses you'd like to run. example: {"total money made":{"analysis_type":"sum","target_property":"purchase.price", "average price":{"analysis_type":"average","target_property":"purchase.price"} :param timeframe: string or dict, the timeframe in which the events happened example: "previous_7_days" :param interval: string, the time interval used for measuring data over time example: "daily" :param timezone: int, the timezone you'd like to use for the timeframe and interval in seconds :param filters: array of dict, contains the filters you'd like to apply to the data example: [{"property_name":"device", "operator":"eq", "property_value":"iPhone"}] :param group_by: string or array of strings, the name(s) of the properties you would like to group you results by. example: "customer.id" or ["browser","operating_system"] :param order_by: dictionary or list of dictionary objects containing the property_name(s) to order by and the desired direction(s) of sorting. Example: {"property_name":"result", "direction":keen.direction.DESCENDING} May not be used without a group_by specified. :param limit: positive integer limiting the displayed results of a query using order_by :param max_age: an integer, greater than 30 seconds, the maximum 'staleness' you're willing to trade for increased query performance, in seconds
2.214658
2.987409
0.741331
_initialize_client_from_environment() return _client.create_access_key(name=name, is_active=is_active, permitted=permitted, options=options)
def create_access_key(name, is_active=True, permitted=[], options={})
Creates a new access key. A master key must be set first. :param name: the name of the access key to create :param is_active: Boolean value dictating whether this key is currently active (default True) :param permitted: list of strings describing which operation types this key will permit Legal values include "writes", "queries", "saved_queries", "cached_queries", "datasets", and "schema". :param options: dictionary containing more details about the key's permitted and restricted functionality
3.662294
5.481589
0.668108
_initialize_client_from_environment() return _client.update_access_key_full(access_key_id, name, is_active, permitted, options)
def update_access_key_full(access_key_id, name, is_active, permitted, options)
Replaces the 'name', 'is_active', 'permitted', and 'options' values of a given key. A master key must be set first. :param access_key_id: the 'key' value of the access key for which the values will be replaced :param name: the new name desired for this access key :param is_active: whether the key should become enabled (True) or revoked (False) :param permitted: the new list of permissions desired for this access key :param options: the new dictionary of options for this access key
3.618376
4.860915
0.744382
try: OPENJP2.opj_version.restype = ctypes.c_char_p except: return "0.0.0" v = OPENJP2.opj_version() return v.decode('utf-8') if sys.hexversion >= 0x03000000 else v
def version()
Wrapper for opj_version library routine.
3.838221
3.207042
1.19681
global ERROR_MSG_LST if status != 1: if len(ERROR_MSG_LST) > 0: # clear out the existing error message so that we don't pick up # a bad one next time around. msg = '\n'.join(ERROR_MSG_LST) ERROR_MSG_LST = [] raise OpenJPEGLibraryError(msg) else: raise OpenJPEGLibraryError("OpenJPEG function failure.")
def check_error(status)
Set a generic function as the restype attribute of all OpenJPEG functions that return a BOOL_TYPE value. This way we do not have to check for error status in each wrapping function and an exception will always be appropriately raised.
5.185017
4.133392
1.254422
OPENJP2.opj_create_compress.restype = CODEC_TYPE OPENJP2.opj_create_compress.argtypes = [CODEC_FORMAT_TYPE] codec = OPENJP2.opj_create_compress(codec_format) return codec
def create_compress(codec_format)
Creates a J2K/JP2 compress structure. Wraps the openjp2 library function opj_create_compress. Parameters ---------- codec_format : int Specifies codec to select. Should be one of CODEC_J2K or CODEC_JP2. Returns ------- codec : Reference to CODEC_TYPE instance.
3.795026
3.130658
1.212213
OPENJP2.opj_decode.argtypes = [CODEC_TYPE, STREAM_TYPE_P, ctypes.POINTER(ImageType)] OPENJP2.opj_decode.restype = check_error OPENJP2.opj_decode(codec, stream, image)
def decode(codec, stream, image)
Reads an entire image. Wraps the openjp2 library function opj_decode. Parameters ---------- codec : CODEC_TYPE The JPEG2000 codec stream : STREAM_TYPE_P The stream to decode. image : ImageType Output image structure. Raises ------ RuntimeError If the OpenJPEG library routine opj_decode fails.
5.42951
3.702725
1.466355