code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
wrapped_func = SimpleCLFunction.from_string(''' double _negate_''' + func.get_cl_function_name() + '''( local mot_float_type* x, void* data, local mot_float_type* objective_list){ double return_val = ''' + func.get_cl_function_name() + '''(x, data, objective_list); if(objective_list){ const uint nmr_observations = ''' + str(nmr_observations) + '''; uint local_id = get_local_id(0); uint workgroup_size = get_local_size(0); uint observation_ind; for(uint i = 0; i < (nmr_observations + workgroup_size - 1) / workgroup_size; i++){ observation_ind = i * workgroup_size + local_id; if(observation_ind < nmr_observations){ objective_list[observation_ind] *= -1; } } } return -return_val; } ''', dependencies=[func]) kwargs['nmr_observations'] = nmr_observations return minimize(wrapped_func, x0, **kwargs)
def maximize(func, x0, nmr_observations, **kwargs)
Maximization of a function. This wraps the objective function to take the negative of the computed values and passes it then on to one of the minimization routines. Args: func (mot.lib.cl_function.CLFunction): A CL function with the signature: .. code-block:: c double <func_name>(local const mot_float_type* const x, void* data, local mot_float_type* objective_list); The objective list needs to be filled when the provided pointer is not null. It should contain the function values for each observation. This list is used by non-linear least-squares routines, and will be squared by the least-square optimizer. This is only used by the ``Levenberg-Marquardt`` routine. x0 (ndarray): Initial guess. Array of real elements of size (n, p), for 'n' problems and 'p' independent variables. nmr_observations (int): the number of observations returned by the optimization function. **kwargs: see :func:`minimize`.
4.227765
3.189315
1.325603
if method == 'Powell': return {'patience': 2, 'patience_line_search': None, 'reset_method': 'EXTRAPOLATED_POINT'} elif method == 'Nelder-Mead': return {'patience': 200, 'alpha': 1.0, 'beta': 0.5, 'gamma': 2.0, 'delta': 0.5, 'scale': 0.1, 'adaptive_scales': True} elif method == 'Levenberg-Marquardt': return {'patience': 250, 'step_bound': 100.0, 'scale_diag': 1, 'usertol_mult': 30} elif method == 'Subplex': return {'patience': 10, 'patience_nmsimplex': 100, 'alpha': 1.0, 'beta': 0.5, 'gamma': 2.0, 'delta': 0.5, 'scale': 1.0, 'psi': 0.0001, 'omega': 0.01, 'adaptive_scales': True, 'min_subspace_length': 'auto', 'max_subspace_length': 'auto'} raise ValueError('Could not find the specified method "{}".'.format(method))
def get_minimizer_options(method)
Return a dictionary with the default options for the given minimization method. Args: method (str): the name of the method we want the options off Returns: dict: a dictionary with the default options
3.86172
3.92338
0.984284
provided_options = provided_options or {} default_options = get_minimizer_options(method) result = {} for name, default in default_options.items(): if name in provided_options: result[name] = provided_options[name] else: result[name] = default_options[name] return result
def _clean_options(method, provided_options)
Clean the given input options. This will make sure that all options are present, either with their default values or with the given values, and that no other options are present then those supported. Args: method (str): the method name provided_options (dict): the given options Returns: dict: the resulting options dictionary
2.673778
3.286086
0.813666
options = options or {} nmr_problems = x0.shape[0] nmr_parameters = x0.shape[1] penalty_data, penalty_func = _get_penalty_function(nmr_parameters, constraints_func) eval_func = SimpleCLFunction.from_string(''' double evaluate(local mot_float_type* x, void* data){ double penalty = _mle_penalty( x, ((_powell_eval_func_data*)data)->data, ((_powell_eval_func_data*)data)->lower_bounds, ((_powell_eval_func_data*)data)->upper_bounds, ''' + str(options.get('penalty_weight', 1e30)) + ''', ((_powell_eval_func_data*)data)->penalty_data ); double func_val = ''' + func.get_cl_function_name() + '''(x, ((_powell_eval_func_data*)data)->data, 0); if(isnan(func_val)){ return INFINITY; } return func_val + penalty; } ''', dependencies=[func, penalty_func]) optimizer_func = Powell(eval_func, nmr_parameters, **_clean_options('Powell', options)) kernel_data = {'model_parameters': Array(x0, ctype='mot_float_type', mode='rw'), 'data': Struct({'data': data, 'lower_bounds': lower_bounds, 'upper_bounds': upper_bounds, 'penalty_data': penalty_data}, '_powell_eval_func_data')} kernel_data.update(optimizer_func.get_kernel_data()) return_code = optimizer_func.evaluate( kernel_data, nmr_problems, use_local_reduction=all(env.is_gpu for env in cl_runtime_info.cl_environments), cl_runtime_info=cl_runtime_info) return OptimizeResults({'x': kernel_data['model_parameters'].get_data(), 'status': return_code})
def _minimize_powell(func, x0, cl_runtime_info, lower_bounds, upper_bounds, constraints_func=None, data=None, options=None)
Options: patience (int): Used to set the maximum number of iterations to patience*(number_of_parameters+1) reset_method (str): one of 'EXTRAPOLATED_POINT' or 'RESET_TO_IDENTITY' lower case or upper case. patience_line_search (int): the patience of the searching algorithm. Defaults to the same patience as for the Powell algorithm itself.
4.613266
4.772212
0.966693
dependencies = [] data_requirements = {'scratch': LocalMemory('double', 1)} constraints_code = '' if constraints_func and constraints_func.get_nmr_constraints() > 0: nmr_constraints = constraints_func.get_nmr_constraints() dependencies.append(constraints_func) data_requirements['constraints'] = LocalMemory('mot_float_type', nmr_constraints) constraints_code = ''' local mot_float_type* constraints = ((_mle_penalty_data*)scratch_data)->constraints; ''' + constraints_func.get_cl_function_name() + '''(x, data, constraints); for(int i = 0; i < ''' + str(nmr_constraints) + '''; i++){ *penalty_sum += pown(max((mot_float_type)0, constraints[i]), 2); } ''' data = Struct(data_requirements, '_mle_penalty_data') func = SimpleCLFunction.from_string(''' double _mle_penalty( local mot_float_type* x, void* data, local mot_float_type* lower_bounds, local mot_float_type* upper_bounds, float penalty_weight, void* scratch_data){ local double* penalty_sum = ((_mle_penalty_data*)scratch_data)->scratch; if(get_local_id(0) == 0){ *penalty_sum = 0; // boundary conditions for(int i = 0; i < ''' + str(nmr_parameters) + '''; i++){ if(isfinite(upper_bounds[i])){ *penalty_sum += pown(max((mot_float_type)0, x[i] - upper_bounds[i]), 2); } if(isfinite(lower_bounds[i])){ *penalty_sum += pown(max((mot_float_type)0, lower_bounds[i] - x[i]), 2); } } } barrier(CLK_LOCAL_MEM_FENCE); // constraints ''' + constraints_code + ''' return penalty_weight * *penalty_sum; } ''', dependencies=dependencies) return data, func
def _get_penalty_function(nmr_parameters, constraints_func=None)
Get a function to compute the penalty term for the boundary conditions. This is meant to be used in the evaluation function of the optimization routines. Args: nmr_parameters (int): the number of parameters in the model constraints_func (mot.optimize.base.ConstraintFunction): function to compute (inequality) constraints. Should hold a CL function with the signature: .. code-block:: c void <func_name>(local const mot_float_type* const x, void* data, local mot_float_type* constraint_values); Where ``constraints_values`` is filled as: .. code-block:: c constraint_values[i] = g_i(x) That is, for each constraint function :math:`g_i`, formulated as :math:`g_i(x) <= 0`, we should return the function value of :math:`g_i`. Returns: tuple: Struct and SimpleCLFunction, the required data for the penalty function and the penalty function itself.
3.75023
3.116638
1.203293
return_type, function_name, parameter_list, body = split_cl_function(cl_function) return SimpleConstraintFunction(return_type, function_name, parameter_list, body, dependencies=dependencies, nmr_constraints=nmr_constraints)
def from_string(cls, cl_function, dependencies=(), nmr_constraints=None)
Parse the given CL function into a SimpleCLFunction object. Args: cl_function (str): the function we wish to turn into an object dependencies (list or tuple of CLLibrary): The list of CL libraries this function depends on Returns: SimpleCLFunction: the CL data type for this parameter declaration
3.782197
3.851171
0.98209
schema = Schema({ 'lane' if not branch else 'branch': { Optional('name'): str, Optional('run_parallel'): bool, 'tasks': list } }) schema.validate(yaml_def) from schema import And, Use task_schema = Schema({ 'class': str, Optional('kwargs'): Or({str: object}), Optional('args'): Or([object], And(Use(lambda a: isinstance(a, dict)), False)) }) def validate_tasks(tasks): # pylint: disable=missing-docstring for task in tasks: try: Schema({'branch': dict}).validate(task) validate_schema(task, True) except SchemaError: task_schema.validate(task) return True return validate_tasks(yaml_def['lane']['tasks'] if not branch else yaml_def['branch']['tasks'])
def validate_schema(yaml_def, branch=False)
Validates the schema of a dict Parameters ---------- yaml_def : dict dict whose schema shall be validated branch : bool Indicates whether `yaml_def` is a dict of a top-level lane, or of a branch inside a lane (needed for recursion) Returns ------- bool True if validation was successful
3.9804
4.390933
0.906504
mtd = getattr(cls, mtd_name) py3_mtd_condition = (not (inspect.isfunction(mtd) or inspect.ismethod(mtd)) and hasattr(cls, mtd_name)) py2_mtd_condition = (not inspect.ismethod(mtd) and not isinstance(cls.__dict__[mtd_name], staticmethod)) if (PY3 and py3_mtd_condition) or (PY2 and py2_mtd_condition): raise TypeError('Attribute `%s` of class `%s` must be a method. Got type `%s` instead.' % (mtd_name, cls.__name__, type(mtd))) req_params, opt_params = arg_spec(cls, mtd_name) n_params = len(req_params) + len(opt_params) n_args_kwargs = len(args) + len(kwargs) for k in kwargs: if k not in req_params and k not in opt_params: raise TaskInitializationError('kwarg `%s` is not a parameter of callable `%s`.' % (k, mtd.__name__)) if n_args_kwargs < len(req_params): raise TaskInitializationError('Not enough args/kwargs supplied for callable `%s`. ' 'Required args: %s' % (mtd.__name__, str(req_params))) if len(args) > n_params or n_args_kwargs > n_params or len(kwargs) > n_params: raise TaskInitializationError('Too many args/kwargs supplied for callable `%s`. ' 'Required args: %s' % (mtd.__name__, str(req_params))) redundant_p = [p for p in kwargs if p not in req_params[len(args):] + opt_params] if redundant_p: raise TaskInitializationError('Supplied one or more kwargs that in the signature of ' 'callable `%s`. Redundant kwargs: %s' % (mtd.__name__, str(redundant_p))) needed_kwargs = req_params[len(args):] if not all([True if p in kwargs else False for p in needed_kwargs]): raise TaskInitializationError('Not enough args/kwargs supplied for callable `%s`. ' 'Required args: %s' % (mtd.__name__, str(req_params)))
def validate_params(cls, mtd_name, *args, **kwargs)
Validates if the given args/kwargs match the method signature. Checks if: - at least all required args/kwargs are given - no redundant args/kwargs are given Parameters ---------- cls : Class mtd_name : str Name of the method whose parameters shall be validated args: list Positional arguments kwargs : dict Dict of keyword arguments
2.344964
2.327914
1.007324
mtd = getattr(cls, mtd_name) required_params = [] optional_params = [] if hasattr(inspect, 'signature'): # Python 3 params = inspect.signature(mtd).parameters # pylint: disable=no-member for k in params.keys(): if params[k].default == inspect.Parameter.empty: # pylint: disable=no-member # Python 3 does not make a difference between unbound methods and functions, so the # only way to distinguish if the first argument is of a regular method, or a class # method, is to look for the conventional argument name. Yikes. if not (params[k].name == 'self' or params[k].name == 'cls'): required_params.append(k) else: optional_params.append(k) else: # Python 2 params = inspect.getargspec(mtd) # pylint: disable=deprecated-method num = len(params[0]) if params[0] else 0 n_opt = len(params[3]) if params[3] else 0 n_req = (num - n_opt) if n_opt <= num else 0 for i in range(0, n_req): required_params.append(params[0][i]) for i in range(n_req, num): optional_params.append(params[0][i]) if inspect.isroutine(getattr(cls, mtd_name)): bound_mtd = cls.__dict__[mtd_name] if not isinstance(bound_mtd, staticmethod): del required_params[0] return required_params, optional_params
def arg_spec(cls, mtd_name)
Cross-version argument signature inspection Parameters ---------- cls : class mtd_name : str Name of the method to be inspected Returns ------- required_params : list of str List of required, positional parameters optional_params : list of str List of optional parameters, i.e. parameters with a default value
2.633827
2.638724
0.998144
if is_scalar(low): low = np.ones((nmr_distributions, 1)) * low if is_scalar(high): high = np.ones((nmr_distributions, 1)) * high kernel_data = {'low': Array(low, as_scalar=True), 'high': Array(high, as_scalar=True)} kernel = SimpleCLFunction.from_string(''' void compute(double low, double high, global uint* rng_state, global ''' + ctype + '''* samples){ rand123_data rand123_rng_data = rand123_initialize_data((uint[]){ rng_state[0], rng_state[1], rng_state[2], rng_state[3], rng_state[4], rng_state[5], 0}); void* rng_data = (void*)&rand123_rng_data; for(uint i = 0; i < ''' + str(nmr_samples) + '''; i++){ double4 randomnr = rand4(rng_data); samples[i] = (''' + ctype + ''')(low + randomnr.x * (high - low)); } } ''', dependencies=[Rand123()]) return _generate_samples(kernel, nmr_distributions, nmr_samples, ctype, kernel_data, seed=seed)
def uniform(nmr_distributions, nmr_samples, low=0, high=1, ctype='float', seed=None)
Draw random samples from the Uniform distribution. Args: nmr_distributions (int): the number of unique continuous_distributions to create nmr_samples (int): The number of samples to draw low (double): The minimum value of the random numbers high (double): The minimum value of the random numbers ctype (str): the C type of the output samples seed (float): the seed for the RNG Returns: ndarray: A two dimensional numpy array as (nmr_distributions, nmr_samples).
3.748317
3.784362
0.990475
if is_scalar(mean): mean = np.ones((nmr_distributions, 1)) * mean if is_scalar(std): std = np.ones((nmr_distributions, 1)) * std kernel_data = {'mean': Array(mean, as_scalar=True), 'std': Array(std, as_scalar=True)} kernel = SimpleCLFunction.from_string(''' void compute(double mean, double std, global uint* rng_state, global ''' + ctype + '''* samples){ rand123_data rand123_rng_data = rand123_initialize_data((uint[]){ rng_state[0], rng_state[1], rng_state[2], rng_state[3], rng_state[4], rng_state[5], 0}); void* rng_data = (void*)&rand123_rng_data; for(uint i = 0; i < ''' + str(nmr_samples) + '''; i++){ double4 randomnr = randn4(rng_data); samples[i] = (''' + ctype + ''')(mean + randomnr.x * std); } } ''', dependencies=[Rand123()]) return _generate_samples(kernel, nmr_distributions, nmr_samples, ctype, kernel_data, seed=seed)
def normal(nmr_distributions, nmr_samples, mean=0, std=1, ctype='float', seed=None)
Draw random samples from the Gaussian distribution. Args: nmr_distributions (int): the number of unique continuous_distributions to create nmr_samples (int): The number of samples to draw mean (float or ndarray): The mean of the distribution std (float or ndarray): The standard deviation or the distribution ctype (str): the C type of the output samples seed (float): the seed for the RNG Returns: ndarray: A two dimensional numpy array as (nmr_distributions, nmr_samples).
3.809194
3.812376
0.999165
if not thinning or thinning < 1: thinning = 1 if not burnin or burnin < 0: burnin = 0 max_samples_per_batch = max(1000 // thinning, 100) with self._logging(nmr_samples, burnin, thinning): if burnin > 0: for batch_start, batch_end in split_in_batches(burnin, max_samples_per_batch): self._sample(batch_end - batch_start, return_output=False) if nmr_samples > 0: outputs = [] for batch_start, batch_end in split_in_batches(nmr_samples, max_samples_per_batch): outputs.append(self._sample(batch_end - batch_start, thinning=thinning)) return SimpleSampleOutput(*[np.concatenate([o[ind] for o in outputs], axis=-1) for ind in range(3)])
def sample(self, nmr_samples, burnin=0, thinning=1)
Take additional samples from the given likelihood and prior, using this sampler. This method can be called multiple times in which the sample state is stored in between. Args: nmr_samples (int): the number of samples to return burnin (int): the number of samples to discard before returning samples thinning (int): how many sample we wait before storing a new one. This will draw extra samples such that the total number of samples generated is ``nmr_samples * (thinning)`` and the number of samples stored is ``nmr_samples``. If set to one or lower we store every sample after the burn in. Returns: SamplingOutput: the sample output object
2.762755
2.712454
1.018545
kernel_data = self._get_kernel_data(nmr_samples, thinning, return_output) sample_func = self._get_compute_func(nmr_samples, thinning, return_output) sample_func.evaluate(kernel_data, self._nmr_problems, use_local_reduction=all(env.is_gpu for env in self._cl_runtime_info.cl_environments), cl_runtime_info=self._cl_runtime_info) self._sampling_index += nmr_samples * thinning if return_output: return (kernel_data['samples'].get_data(), kernel_data['log_likelihoods'].get_data(), kernel_data['log_priors'].get_data())
def _sample(self, nmr_samples, thinning=1, return_output=True)
Sample the given number of samples with the given thinning. If ``return_output`` we will return the samples, log likelihoods and log priors. If not, we will advance the state of the sampler without returning storing the samples. Args: nmr_samples (int): the number of iterations to advance the sampler thinning (int): the thinning to apply return_output (boolean): if we should return the output Returns: None or tuple: if ``return_output`` is True three ndarrays as (samples, log_likelihoods, log_priors)
4.069725
4.083171
0.996707
func = SimpleCLFunction.from_string(''' void compute(global mot_float_type* chain_position, global mot_float_type* log_likelihood, global mot_float_type* log_prior, local mot_float_type* x_tmp, void* data){ bool is_first_work_item = get_local_id(0) == 0; if(is_first_work_item){ for(uint i = 0; i < ''' + str(self._nmr_params) + '''; i++){ x_tmp[i] = chain_position[i]; } *log_prior = _computeLogPrior(x_tmp, data); } barrier(CLK_LOCAL_MEM_FENCE); *log_likelihood = _computeLogLikelihood(x_tmp, data); } ''', dependencies=[self._get_log_prior_cl_func(), self._get_log_likelihood_cl_func()]) kernel_data = { 'chain_position': Array(positions, 'mot_float_type', mode='rw', ensure_zero_copy=True), 'log_likelihood': Array(log_likelihoods, 'mot_float_type', mode='rw', ensure_zero_copy=True), 'log_prior': Array(log_priors, 'mot_float_type', mode='rw', ensure_zero_copy=True), 'x_tmp': LocalMemory('mot_float_type', self._nmr_params), 'data': self._data } func.evaluate(kernel_data, self._nmr_problems, use_local_reduction=all(env.is_gpu for env in self._cl_runtime_info.cl_environments), cl_runtime_info=self._cl_runtime_info)
def _initialize_likelihood_prior(self, positions, log_likelihoods, log_priors)
Initialize the likelihood and the prior using the given positions. This is a general method for computing the log likelihoods and log priors for given positions. Subclasses can use this to instantiate secondary chains as well.
3.749031
3.770291
0.994361
kernel_data = { 'data': self._data, 'method_data': self._get_mcmc_method_kernel_data(), 'nmr_iterations': Scalar(nmr_samples * thinning, ctype='ulong'), 'iteration_offset': Scalar(self._sampling_index, ctype='ulong'), 'rng_state': Array(self._rng_state, 'uint', mode='rw', ensure_zero_copy=True), 'current_chain_position': Array(self._current_chain_position, 'mot_float_type', mode='rw', ensure_zero_copy=True), 'current_log_likelihood': Array(self._current_log_likelihood, 'mot_float_type', mode='rw', ensure_zero_copy=True), 'current_log_prior': Array(self._current_log_prior, 'mot_float_type', mode='rw', ensure_zero_copy=True), } if return_output: kernel_data.update({ 'samples': Zeros((self._nmr_problems, self._nmr_params, nmr_samples), ctype='mot_float_type'), 'log_likelihoods': Zeros((self._nmr_problems, nmr_samples), ctype='mot_float_type'), 'log_priors': Zeros((self._nmr_problems, nmr_samples), ctype='mot_float_type'), }) return kernel_data
def _get_kernel_data(self, nmr_samples, thinning, return_output)
Get the kernel data we will input to the MCMC sampler. This sets the items: * data: the pointer to the user provided data * method_data: the data specific to the MCMC method * nmr_iterations: the number of iterations to sample * iteration_offset: the current sample index, that is, the offset to the given number of iterations * rng_state: the random number generator state * current_chain_position: the current position of the sampled chain * current_log_likelihood: the log likelihood of the current position on the chain * current_log_prior: the log prior of the current position on the chain Additionally, if ``return_output`` is True, we add to that the arrays: * samples: for the samples * log_likelihoods: for storing the log likelihoods * log_priors: for storing the priors Args: nmr_samples (int): the number of samples we will draw thinning (int): the thinning factor we want to use return_output (boolean): if the kernel should return output Returns: dict[str: mot.lib.utils.KernelData]: the kernel input data
2.738355
2.111799
1.296693
cl_func = ''' void compute(global uint* rng_state, global mot_float_type* current_chain_position, global mot_float_type* current_log_likelihood, global mot_float_type* current_log_prior, ulong iteration_offset, ulong nmr_iterations, ''' + ('''global mot_float_type* samples, global mot_float_type* log_likelihoods, global mot_float_type* log_priors,''' if return_output else '') + ''' void* method_data, void* data){ bool is_first_work_item = get_local_id(0) == 0; rand123_data rand123_rng_data = rand123_initialize_data((uint[]){ rng_state[0], rng_state[1], rng_state[2], rng_state[3], rng_state[4], rng_state[5], 0, 0}); void* rng_data = (void*)&rand123_rng_data; for(ulong i = 0; i < nmr_iterations; i++){ ''' if return_output: cl_func += ''' if(is_first_work_item){ if(i % ''' + str(thinning) + ''' == 0){ log_likelihoods[i / ''' + str(thinning) + '''] = *current_log_likelihood; log_priors[i / ''' + str(thinning) + '''] = *current_log_prior; for(uint j = 0; j < ''' + str(self._nmr_params) + '''; j++){ samples[(ulong)(i / ''' + str(thinning) + ''') // remove the interval + j * ''' + str(nmr_samples) + ''' // parameter index ] = current_chain_position[j]; } } } ''' cl_func += ''' _advanceSampler(method_data, data, i + iteration_offset, rng_data, current_chain_position, current_log_likelihood, current_log_prior); } if(is_first_work_item){ uint state[8]; rand123_data_to_array(rand123_rng_data, state); for(uint i = 0; i < 6; i++){ rng_state[i] = state[i]; } } } ''' return SimpleCLFunction.from_string( cl_func, dependencies=[Rand123(), self._get_log_prior_cl_func(), self._get_log_likelihood_cl_func(), SimpleCLCodeObject(self._get_state_update_cl_func(nmr_samples, thinning, return_output))])
def _get_compute_func(self, nmr_samples, thinning, return_output)
Get the MCMC algorithm as a computable function. Args: nmr_samples (int): the number of samples we will draw thinning (int): the thinning factor we want to use return_output (boolean): if the kernel should return output Returns: mot.lib.cl_function.CLFunction: the compute function
3.531275
3.498425
1.00939
return SimpleCLFunction.from_string(''' mot_float_type _computeLogPrior(local const mot_float_type* x, void* data){ return ''' + self._log_prior_func.get_cl_function_name() + '''(x, data); } ''', dependencies=[self._log_prior_func])
def _get_log_prior_cl_func(self)
Get the CL log prior compute function. Returns: str: the compute function for computing the log prior.
10.620206
11.994408
0.88543
return SimpleCLFunction.from_string(''' double _computeLogLikelihood(local const mot_float_type* current_position, void* data){ return ''' + self._ll_func.get_cl_function_name() + '''(current_position, data); } ''', dependencies=[self._ll_func])
def _get_log_likelihood_cl_func(self)
Get the CL log likelihood compute function. This uses local reduction to compute the log likelihood for every observation in CL local space. The results are then summed in the first work item and returned using a pointer. Returns: str: the CL code for the log likelihood compute func.
12.509809
12.980324
0.963752
return {'proposal_stds': Array(self._proposal_stds, 'mot_float_type', mode='rw', ensure_zero_copy=True), 'x_tmp': LocalMemory('mot_float_type', nmr_items=1 + self._nmr_params)}
def _get_mcmc_method_kernel_data_elements(self)
Get the mcmc method kernel data elements. Used by :meth:`_get_mcmc_method_kernel_data`.
25.912388
23.135136
1.120045
def get_cl_function(): nmr_params = parameters.shape[1] if len(parameters.shape) > 2: return SimpleCLFunction.from_string(''' void compute(global mot_float_type* parameters, global mot_float_type* log_likelihoods, void* data){ local mot_float_type x[''' + str(nmr_params) + ''']; for(uint sample_ind = 0; sample_ind < ''' + str(parameters.shape[2]) + '''; sample_ind++){ for(uint i = 0; i < ''' + str(nmr_params) + '''; i++){ x[i] = parameters[i *''' + str(parameters.shape[2]) + ''' + sample_ind]; } double ll = ''' + ll_func.get_cl_function_name() + '''(x, data); if(get_local_id(0) == 0){ log_likelihoods[sample_ind] = ll; } } } ''', dependencies=[ll_func]) return SimpleCLFunction.from_string(''' void compute(local mot_float_type* parameters, global mot_float_type* log_likelihoods, void* data){ double ll = ''' + ll_func.get_cl_function_name() + '''(parameters, data); if(get_local_id(0) == 0){ *(log_likelihoods) = ll; } } ''', dependencies=[ll_func]) kernel_data = {'data': data, 'parameters': Array(parameters, 'mot_float_type', mode='r')} shape = parameters.shape if len(shape) > 2: kernel_data.update({ 'log_likelihoods': Zeros((shape[0], shape[2]), 'mot_float_type'), }) else: kernel_data.update({ 'log_likelihoods': Zeros((shape[0],), 'mot_float_type'), }) get_cl_function().evaluate(kernel_data, parameters.shape[0], use_local_reduction=True, cl_runtime_info=cl_runtime_info) return kernel_data['log_likelihoods'].get_data()
def compute_log_likelihood(ll_func, parameters, data=None, cl_runtime_info=None)
Calculate and return the log likelihood of the given model for the given parameters. This calculates the log likelihoods for every problem in the model (typically after optimization), or a log likelihood for every sample of every model (typically after sample). In the case of the first (after optimization), the parameters must be an (d, p) array for d problems and p parameters. In the case of the second (after sample), you must provide this function with a matrix of shape (d, p, n) with d problems, p parameters and n samples. Args: ll_func (mot.lib.cl_function.CLFunction): The log-likelihood function. A CL function with the signature: .. code-block:: c double <func_name>(local const mot_float_type* const x, void* data); parameters (ndarray): The parameters to use in the evaluation of the model. This is either an (d, p) matrix or (d, p, n) matrix with d problems, p parameters and n samples. data (mot.lib.kernel_data.KernelData): the user provided data for the ``void* data`` pointer. cl_runtime_info (mot.configuration.CLRuntimeInfo): the runtime information Returns: ndarray: per problem the log likelihood, or, per problem and per sample the log likelihood.
2.802325
2.565733
1.092212
return objective_func.evaluate({'data': data, 'parameters': Array(parameters, 'mot_float_type', mode='r')}, parameters.shape[0], use_local_reduction=True, cl_runtime_info=cl_runtime_info)
def compute_objective_value(objective_func, parameters, data=None, cl_runtime_info=None)
Calculate and return the objective function value of the given model for the given parameters. Args: objective_func (mot.lib.cl_function.CLFunction): A CL function with the signature: .. code-block:: c double <func_name>(local const mot_float_type* const x, void* data, local mot_float_type* objective_list); parameters (ndarray): The parameters to use in the evaluation of the model, an (d, p) matrix with d problems and p parameters. data (mot.lib.kernel_data.KernelData): the user provided data for the ``void* data`` pointer. cl_runtime_info (mot.configuration.CLRuntimeInfo): the runtime information Returns: ndarray: vector matrix with per problem the objective function value
10.163044
9.199621
1.104724
redis = get_redis_client() log_key = 'log:{}'.format(username) raw_log = redis.lrange(log_key, 0, -1) log = [] for raw_item in raw_log: item = json.loads(raw_item.decode()) item['datetime'] = convert_timestamp(item.pop('time')) log.append(item) return log
def get_log(username)
Return a list of page views. Each item is a dict with `datetime`, `method`, `path` and `code` keys.
2.336538
2.248174
1.039305
redis = get_redis_client() token = get_random_string(length) token_key = 'token:{}'.format(token) redis.set(token_key, username) redis.expire(token_key, timeout) return token
def get_token(username, length=20, timeout=20)
Obtain an access token that can be passed to a websocket client.
2.368229
2.411502
0.982055
return { 'method': request.method, 'path': request.get_full_path(), 'code': response.status_code, 'time': time.time(), }
def get_log(self, request, response)
Return a dict of data to log for a given request and response. Override this method if you need to log a different set of values.
2.829105
2.590788
1.091986
song_id = song['id'] response = self._call( mm_calls.Export, self.uploader_id, song_id) audio = response.body suggested_filename = unquote( response.headers['Content-Disposition'].split("filename*=UTF-8''")[-1] ) return (audio, suggested_filename)
def download(self, song)
Download a song from a Google Music library. Parameters: song (dict): A song dict. Returns: tuple: Song content as bytestring, suggested filename.
5.74194
5.082411
1.129767
response = self._call( mm_calls.ClientState, self.uploader_id ) client_state = response.body.clientstate_response return (client_state.total_track_count, client_state.locker_track_limit)
def quota(self)
Get the uploaded track count and allowance. Returns: tuple: Number of uploaded tracks, number of tracks allowed.
14.850395
10.346949
1.435244
if not uploaded and not purchased: raise ValueError("'uploaded' and 'purchased' cannot both be False.") if purchased and uploaded: song_list = [] for chunk in self.songs_iter(export_type=1): song_list.extend(chunk) elif purchased: song_list = [] for chunk in self.songs_iter(export_type=2): song_list.extend(chunk) elif uploaded: purchased_songs = [] for chunk in self.songs_iter(export_type=2): purchased_songs.extend(chunk) song_list = [ song for chunk in self.songs_iter(export_type=1) for song in chunk if song not in purchased_songs ] return song_list
def songs(self, *, uploaded=True, purchased=True)
Get a listing of Music Library songs. Returns: list: Song dicts.
2.075229
2.056439
1.009137
def track_info_to_dict(track_info): return dict( (field.name, value) for field, value in track_info.ListFields() ) while True: response = self._call( mm_calls.ExportIDs, self.uploader_id, continuation_token=continuation_token, export_type=export_type ) items = [ track_info_to_dict(track_info) for track_info in response.body.download_track_info ] if items: yield items continuation_token = response.body.continuation_token if not continuation_token: break
def songs_iter(self, *, continuation_token=None, export_type=1)
Get a paged iterator of Music Library songs. Parameters: continuation_token (str, Optional): The token of the page to return. Default: Not sent to get first page. export_type (int, Optional): The type of tracks to return. 1 for all tracks, 2 for promotional and purchased. Default: ``1`` Yields: list: Song dicts.
2.939041
2.997832
0.980389
self._username = username self._oauth(username, token=token) return self.is_authenticated
def login(self, username, *, token=None)
Log in to Google Music. Parameters: username (str, Optional): Your Google Music username. Used for keeping stored OAuth tokens for multiple accounts separate. device_id (str, Optional): A mobile device ID or music manager uploader ID. Default: MAC address is used. token (dict, Optional): An OAuth token compatible with ``requests-oauthlib``. Returns: bool: ``True`` if successfully authenticated, ``False`` if not.
8.028806
8.190268
0.980286
if self.logout(): return self.login(username, token=token) return False
def switch_user(self, username='', *, token=None)
Log in to Google Music with a different user. Parameters: username (str, Optional): Your Google Music username. Used for keeping stored OAuth tokens for multiple accounts separate. token (dict, Optional): An OAuth token compatible with ``requests-oauthlib``. Returns: bool: ``True`` if successfully authenticated, ``False`` if not.
6.805519
6.608771
1.029771
pivot = floor(K/S) return [S/K * 1/d for d in range(1, pivot)] \ + [S/K * log(S/delta)] \ + [0 for d in range(pivot, K)]
def gen_tau(S, K, delta)
The Robust part of the RSD, we precompute an array for speed
5.805619
6.425298
0.903556
S = c * log(K/delta) * sqrt(K) tau = gen_tau(S, K, delta) rho = gen_rho(K) normalizer = sum(rho) + sum(tau) return [(rho[d] + tau[d])/normalizer for d in range(K)]
def gen_mu(K, delta, c)
The Robust Soliton Distribution on the degree of transmitted blocks
5.702886
5.777405
0.987102
mu = gen_mu(K, delta, c) return [sum(mu[:d+1]) for d in range(K)]
def gen_rsd_cdf(K, delta, c)
The CDF of the RSD on block degree, precomputed for sampling speed
5.709478
5.584617
1.022358
self.state = PRNG_A * self.state % PRNG_M return self.state
def _get_next(self)
Executes the next iteration of the PRNG evolution process, and returns the result
19.37051
9.066401
2.136516
p = self._get_next() / PRNG_MAX_RAND for ix, v in enumerate(self.cdf): if v > p: return ix + 1 return ix + 1
def _sample_d(self)
Samples degree given the precomputed distributions above and the linear PRNG output
10.95883
8.875634
1.23471
if seed: self.state = seed blockseed = self.state d = self._sample_d() have = 0 nums = set() while have < d: num = self._get_next() % self.K if num not in nums: nums.add(num) have += 1 return blockseed, d, nums
def get_src_blocks(self, seed=None)
Returns the indices of a set of `d` source blocks sampled from indices i = 1, ..., K-1 uniformly, where `d` is sampled from the RSD described above.
5.811159
5.089307
1.141837
with open(fn, 'rb') as f: for block in encode.encoder(f, blocksize, seed, c, delta): sys.stdout.buffer.write(block)
def run(fn, blocksize, seed, c, delta)
Run the encoder until the channel is broken, signalling that the receiver has successfully reconstructed the file
3.236217
3.187067
1.015422
subscribed = next( ( config_item['value'] == 'true' for config_item in self.config() if config_item['key'] == 'isNautilusUser' ), None ) if subscribed: self.tier = 'aa' else: self.tier = 'fr' return subscribed
def is_subscribed(self)
The subscription status of the account linked to the :class:`MobileClient` instance.
5.535928
5.297343
1.045039
response = self._call( mc_calls.FetchAlbum, album_id, include_description=include_description, include_tracks=include_songs ) album_info = response.body return album_info
def album(self, album_id, *, include_description=True, include_songs=True)
Get information about an album. Parameters: album_id (str): An album ID. Album IDs start with a 'B'. include_description (bool, Optional): Include description of the album in the returned dict. include_songs (bool, Optional): Include songs from the album in the returned dict. Default: ``True``. Returns: dict: Album information.
4.571253
5.118708
0.893048
response = self._call( mc_calls.FetchArtist, artist_id, include_albums=include_albums, num_related_artists=num_related_artists, num_top_tracks=num_top_tracks ) artist_info = response.body return artist_info
def artist( self, artist_id, *, include_albums=True, num_related_artists=5, num_top_tracks=5 )
Get information about an artist. Parameters: artist_id (str): An artist ID. Artist IDs start with an 'A'. include_albums (bool, Optional): Include albums by the artist in returned dict. Default: ``True``. num_related_artists (int, Optional): Include up to given number of related artists in returned dict. Default: ``5``. num_top_tracks (int, Optional): Include up to given number of top tracks in returned dict. Default: ``5``. Returns: dict: Artist information.
2.715102
2.910788
0.932772
response = self._call( mc_calls.PodcastBrowse, podcast_genre_id=podcast_genre_id ) podcast_series_list = response.body.get('series', []) return podcast_series_list
def browse_podcasts(self, podcast_genre_id='JZCpodcasttopchartall')
Get the podcasts for a genre from the Podcasts browse tab. Parameters: podcast_genre_id (str, Optional): A podcast genre ID as found in :meth:`browse_podcasts_genres`. Default: ``'JZCpodcasttopchartall'``. Returns: list: Podcast dicts.
4.500231
5.425839
0.829407
response = self._call( mc_calls.PodcastBrowseHierarchy ) genres = response.body.get('groups', []) return genres
def browse_podcasts_genres(self)
Get the genres from the Podcasts browse tab dropdown. Returns: list: Genre groups that contain sub groups.
15.091697
12.169761
1.240098
response = self._call( mc_calls.BrowseStations, station_category_id ) stations = response.body.get('stations', []) return stations
def browse_stations(self, station_category_id)
Get the stations for a category from Browse Stations. Parameters: station_category_id (str): A station category ID as found with :meth:`browse_stations_categories`. Returns: list: Station dicts.
5.310962
5.649726
0.940039
response = self._call( mc_calls.BrowseStationCategories ) station_categories = response.body.get('root', {}).get('subcategories', []) return station_categories
def browse_stations_categories(self)
Get the categories from Browse Stations. Returns: list: Station categories that can contain subcategories.
7.788645
7.102973
1.096533
response = self._call( mc_calls.Config ) config_list = response.body.get('data', {}).get('entries', []) return config_list
def config(self)
Get a listing of mobile client configuration settings.
9.858028
7.257434
1.358335
if device['id'].startswith('0x'): self.device_id = device['id'][2:] elif device['id'].startswith('ios:'): self.device_id = device['id'].replace(':', '') else: self.device_id = device['id']
def device_set(self, device)
Set device used by :class:`MobileClient` instance. Parameters: device (dict): A device dict as returned by :meth:`devices`.
2.957528
2.757938
1.072369
response = self._call( mc_calls.DeviceManagementInfo ) registered_devices = response.body.get('data', {}).get('items', []) return registered_devices
def devices(self)
Get a listing of devices registered to the Google Music account.
10.769996
8.938294
1.204927
response = self._call( mc_calls.ExploreGenres, parent_genre_id ) genre_list = response.body.get('genres', []) return genre_list
def explore_genres(self, parent_genre_id=None)
Get a listing of song genres. Parameters: parent_genre_id (str, Optional): A genre ID. If given, a listing of this genre's sub-genres is returned. Returns: list: Genre dicts.
5.091036
5.531322
0.920401
response = self._call( mc_calls.ExploreTabs, num_items=num_items, genre_id=genre_id ) tab_list = response.body.get('tabs', []) explore_tabs = {} for tab in tab_list: explore_tabs[tab['tab_type'].lower()] = tab return explore_tabs
def explore_tabs(self, *, num_items=100, genre_id=None)
Get a listing of explore tabs. Parameters: num_items (int, Optional): Number of items per tab to return. Default: ``100`` genre_id (genre_id, Optional): Genre ID from :meth:`explore_genres` to explore. Default: ``None``. Returns: dict: Explore tabs content.
3.346982
3.414561
0.980209
response = self._call( mc_calls.ListenNowGetDismissedItems ) dismissed_items = response.body.get('items', []) return dismissed_items
def listen_now_dismissed_items(self)
Get a listing of items dismissed from Listen Now tab.
6.826358
5.837325
1.169433
response = self._call( mc_calls.ListenNowGetListenNowItems ) listen_now_item_list = response.body.get('listennow_items', []) listen_now_items = defaultdict(list) for item in listen_now_item_list: type_ = f"{ListenNowItemType(item['type']).name}s" listen_now_items[type_].append(item) return dict(listen_now_items)
def listen_now_items(self)
Get a listing of Listen Now items. Note: This does not include situations; use the :meth:`situations` method instead. Returns: dict: With ``albums`` and ``stations`` keys of listen now items.
3.966595
3.537674
1.121244
playlist_song_info = next( ( playlist_song for playlist in self.playlists(include_songs=True) for playlist_song in playlist['tracks'] if playlist_song['id'] == playlist_song_id ), None ) return playlist_song_info
def playlist_song(self, playlist_song_id)
Get information about a playlist song. Note: This returns the playlist entry information only. For full song metadata, use :meth:`song` with the ``'trackId'`` field. Parameters: playlist_song_id (str): A playlist song ID. Returns: dict: Playlist song information.
2.408598
3.045396
0.790898
prev, next_ = get_ple_prev_next( self.playlist_songs(playlist), after=after, before=before, index=index, position=position ) if 'storeId' in song: song_id = song['storeId'] elif 'trackId' in song: song_id = song['trackId'] else: song_id = song['id'] mutation = mc_calls.PlaylistEntriesBatch.create( song_id, playlist['id'], preceding_entry_id=prev.get('id'), following_entry_id=next_.get('id') ) self._call(mc_calls.PlaylistEntriesBatch, mutation) return self.playlist(playlist['id'], include_songs=True)
def playlist_song_add( self, song, playlist, *, after=None, before=None, index=None, position=None )
Add a song to a playlist. Note: * Provide no optional arguments to add to end. * Provide playlist song dicts for ``after`` and/or ``before``. * Provide a zero-based ``index``. * Provide a one-based ``position``. Songs are inserted *at* given index or position. It's also possible to add to the end by using ``len(songs)`` for index or ``len(songs) + 1`` for position. Parameters: song (dict): A song dict. playlist (dict): A playlist dict. after (dict, Optional): A playlist song dict ``songs`` will follow. before (dict, Optional): A playlist song dict ``songs`` will precede. index (int, Optional): The zero-based index position to insert ``song``. position (int, Optional): The one-based position to insert ``song``. Returns: dict: Playlist dict including songs.
3.189549
3.56002
0.895936
playlist_songs = self.playlist_songs(playlist) prev, next_ = get_ple_prev_next( playlist_songs, after=after, before=before, index=index, position=position ) songs_len = len(songs) for i, song in enumerate(songs): if 'storeId' in song: song_id = song['storeId'] elif 'trackId' in song: song_id = song['trackId'] else: song_id = song['id'] mutation = mc_calls.PlaylistEntriesBatch.create( song_id, playlist['id'], preceding_entry_id=prev.get('id'), following_entry_id=next_.get('id') ) response = self._call(mc_calls.PlaylistEntriesBatch, mutation) result = response.body['mutate_response'][0] # TODO: Proper exception on failure. if result['response_code'] != 'OK': break if i < songs_len - 1: while True: prev = self.playlist_song(result['id']) if prev: break return self.playlist(playlist['id'], include_songs=True)
def playlist_songs_add( self, songs, playlist, *, after=None, before=None, index=None, position=None )
Add songs to a playlist. Note: * Provide no optional arguments to add to end. * Provide playlist song dicts for ``after`` and/or ``before``. * Provide a zero-based ``index``. * Provide a one-based ``position``. Songs are inserted *at* given index or position. It's also possible to add to the end by using ``len(songs)`` for index or ``len(songs) + 1`` for position. Parameters: songs (list): A list of song dicts. playlist (dict): A playlist dict. after (dict, Optional): A playlist song dict ``songs`` will follow. before (dict, Optional): A playlist song dict ``songs`` will precede. index (int, Optional): The zero-based index position to insert ``songs``. position (int, Optional): The one-based position to insert ``songs``. Returns: dict: Playlist dict including songs.
3.06021
3.328789
0.919316
self.playlist_songs_delete([playlist_song]) return self.playlist(playlist_song['playlistId'], include_songs=True)
def playlist_song_delete(self, playlist_song)
Delete song from playlist. Parameters: playlist_song (str): A playlist song dict. Returns: dict: Playlist dict including songs.
5.686433
6.134212
0.927003
if not more_itertools.all_equal( playlist_song['playlistId'] for playlist_song in playlist_songs ): raise ValueError( "All 'playlist_songs' must be from the same playlist." ) mutations = [mc_calls.PlaylistEntriesBatch.delete(playlist_song['id']) for playlist_song in playlist_songs] self._call(mc_calls.PlaylistEntriesBatch, mutations) return self.playlist(playlist_songs[0]['playlistId'], include_songs=True)
def playlist_songs_delete(self, playlist_songs)
Delete songs from playlist. Parameters: playlist_songs (list): A list of playlist song dicts. Returns: dict: Playlist dict including songs.
4.436568
4.406376
1.006852
playlist_songs = self.playlist( playlist_song['playlistId'], include_songs=True )['tracks'] prev, next_ = get_ple_prev_next( playlist_songs, after=after, before=before, index=index, position=position ) mutation = mc_calls.PlaylistEntriesBatch.update( playlist_song, preceding_entry_id=prev.get('id'), following_entry_id=next_.get('id') ) self._call(mc_calls.PlaylistEntriesBatch, mutation) return self.playlist(playlist_song['playlistId'], include_songs=True)
def playlist_song_move( self, playlist_song, *, after=None, before=None, index=None, position=None )
Move a song in a playlist. Note: * Provide no optional arguments to move to end. * Provide playlist song dicts for ``after`` and/or ``before``. * Provide a zero-based ``index``. * Provide a one-based ``position``. Songs are inserted *at* given index or position. It's also possible to move to the end by using ``len(songs)`` for index or ``len(songs) + 1`` for position. Parameters: playlist_song (dict): A playlist song dict. after (dict, Optional): A playlist song dict ``songs`` will follow. before (dict, Optional): A playlist song dict ``songs`` will precede. index (int, Optional): The zero-based index position to insert ``song``. position (int, Optional): The one-based position to insert ``song``. Returns: dict: Playlist dict including songs.
3.311302
3.843991
0.861423
if not more_itertools.all_equal( playlist_song['playlistId'] for playlist_song in playlist_songs ): raise ValueError( "All 'playlist_songs' must be from the same playlist." ) playlist = self.playlist( playlist_songs[0]['playlistId'], include_songs=True ) prev, next_ = get_ple_prev_next( playlist['tracks'], after=after, before=before, index=index, position=position ) playlist_songs_len = len(playlist_songs) for i, playlist_song in enumerate(playlist_songs): mutation = mc_calls.PlaylistEntriesBatch.update( playlist_song, preceding_entry_id=prev.get('id'), following_entry_id=next_.get('id') ) response = self._call(mc_calls.PlaylistEntriesBatch, mutation) result = response.body['mutate_response'][0] # TODO: Proper exception on failure. if result['response_code'] != 'OK': break if i < playlist_songs_len - 1: while True: prev = self.playlist_song(result['id']) if prev: break return self.playlist(playlist_songs[0]['playlistId'], include_songs=True)
def playlist_songs_move( self, playlist_songs, *, after=None, before=None, index=None, position=None )
Move songs in a playlist. Note: * Provide no optional arguments to move to end. * Provide playlist song dicts for ``after`` and/or ``before``. * Provide a zero-based ``index``. * Provide a one-based ``position``. Songs are inserted *at* given index or position. It's also possible to move to the end by using ``len(songs)`` for index or ``len(songs) + 1`` for position. Parameters: playlist_songs (list): A list of playlist song dicts. after (dict, Optional): A playlist song dict ``songs`` will follow. before (dict, Optional): A playlist song dict ``songs`` will precede. index (int, Optional): The zero-based index position to insert ``songs``. position (int, Optional): The one-based position to insert ``songs``. Returns: dict: Playlist dict including songs.
3.026658
3.11486
0.971683
playlist_type = playlist.get('type') playlist_song_list = [] if playlist_type in ('USER_GENERATED', None): start_token = None playlist_song_list = [] while True: response = self._call( mc_calls.PlaylistEntryFeed, max_results=49995, start_token=start_token ) items = response.body.get('data', {}).get('items', []) if items: playlist_song_list.extend(items) start_token = response.body.get('nextPageToken') if start_token is None: break elif playlist_type == 'SHARED': playlist_share_token = playlist['shareToken'] start_token = None playlist_song_list = [] while True: response = self._call( mc_calls.PlaylistEntriesShared, playlist_share_token, max_results=49995, start_token=start_token ) entry = response.body['entries'][0] items = entry.get('playlistEntry', []) if items: playlist_song_list.extend(items) start_token = entry.get('nextPageToken') if start_token is None: break playlist_song_list.sort(key=itemgetter('absolutePosition')) return playlist_song_list
def playlist_songs(self, playlist)
Get a listing of songs from a playlist. Paramters: playlist (dict): A playlist dict. Returns: list: Playlist song dicts.
2.447482
2.425262
1.009162
playlist_info = next( ( playlist for playlist in self.playlists(include_songs=include_songs) if playlist['id'] == playlist_id ), None ) return playlist_info
def playlist(self, playlist_id, *, include_songs=False)
Get information about a playlist. Parameters: playlist_id (str): A playlist ID. include_songs (bool, Optional): Include songs from the playlist in the returned dict. Default: ``False`` Returns: dict: Playlist information.
2.476412
3.475058
0.712625
share_state = 'PUBLIC' if make_public else 'PRIVATE' playlist = self._call( mc_calls.PlaylistsCreate, name, description, share_state ).body if songs: playlist = self.playlist_songs_add(songs, playlist) return playlist
def playlist_create( self, name, description='', *, make_public=False, songs=None )
Create a playlist. Parameters: name (str): Name to give the playlist. description (str): Description to give the playlist. make_public (bool, Optional): If ``True`` and account has a subscription, make playlist public. Default: ``False`` songs (list, Optional): A list of song dicts to add to the playlist. Returns: dict: Playlist information.
4.188157
6.137904
0.682343
if all( value is None for value in (name, description, public) ): raise ValueError( 'At least one of name, description, or public must be provided' ) playlist_id = playlist['id'] playlist = self.playlist(playlist_id) name = name if name is not None else playlist['name'] description = ( description if description is not None else playlist['description'] ) share_state = 'PUBLIC' if public else playlist['accessControlled'] playlist = self._call( mc_calls.PlaylistsUpdate, playlist_id, name, description, share_state ).body return playlist
def playlist_edit(self, playlist, *, name=None, description=None, public=None)
Edit playlist(s). Parameters: playlist (dict): A playlist dict. name (str): Name to give the playlist. description (str, Optional): Description to give the playlist. make_public (bool, Optional): If ``True`` and account has a subscription, make playlist public. Default: ``False`` Returns: dict: Playlist information.
2.973779
3.051652
0.974482
mutation = mc_calls.PlaylistBatch.create( playlist['name'], playlist['description'], 'SHARED', owner_name=playlist.get('ownerName', ''), share_token=playlist['shareToken'] ) response_body = self._call( mc_calls.PlaylistBatch, mutation ).body playlist_id = response_body['mutate_response'][0]['id'] return self.playlist(playlist_id)
def playlist_subscribe(self, playlist)
Subscribe to a public playlist. Parameters: playlist (dict): A public playlist dict. Returns: dict: Playlist information.
5.409009
5.549558
0.974674
playlist_list = [] for chunk in self.playlists_iter(page_size=49995): for playlist in chunk: if include_songs: playlist['tracks'] = self.playlist_songs(playlist) playlist_list.append(playlist) return playlist_list
def playlists(self, *, include_songs=False)
Get a listing of library playlists. Parameters: include_songs (bool, Optional): Include songs in the returned playlist dicts. Default: ``False``. Returns: list: A list of playlist dicts.
3.386512
3.886386
0.871378
start_token = None while True: response = self._call( mc_calls.PlaylistFeed, max_results=page_size, start_token=start_token ) items = response.body.get('data', {}).get('items', []) if items: yield items start_token = response.body.get('nextPageToken') if start_token is None: break
def playlists_iter(self, *, start_token=None, page_size=250)
Get a paged iterator of library playlists. Parameters: start_token (str): The token of the page to return. Default: Not sent to get first page. page_size (int, Optional): The maximum number of results per returned page. Max allowed is ``49995``. Default: ``250`` Yields: list: Playlist dicts.
3.069045
3.413746
0.899026
podcast_info = self._call( mc_calls.PodcastFetchSeries, podcast_series_id, max_episodes=max_episodes ).body return podcast_info
def podcast(self, podcast_series_id, *, max_episodes=50)
Get information about a podcast series. Parameters: podcast_series_id (str): A podcast series ID. max_episodes (int, Optional): Include up to given number of episodes in returned dict. Default: ``50`` Returns: dict: Podcast series information.
5.977476
8.393706
0.712138
if device_id is None: device_id = self.device_id podcast_list = [] for chunk in self.podcasts_iter(device_id=device_id, page_size=49995): podcast_list.extend(chunk) return podcast_list
def podcasts(self, *, device_id=None)
Get a listing of subsribed podcast series. Paramaters: device_id (str, Optional): A mobile device ID. Default: Use ``device_id`` of the :class:`MobileClient` instance. Returns: list: Podcast series dict.
3.057047
3.087705
0.990071
if device_id is None: device_id = self.device_id start_token = None prev_items = None while True: response = self._call( mc_calls.PodcastSeries, device_id, max_results=page_size, start_token=start_token ) items = response.body.get('data', {}).get('items', []) # Google does some weird shit. if items != prev_items: subscribed_podcasts = [ item for item in items if item.get('userPreferences', {}).get('subscribed') ] yield subscribed_podcasts prev_items = items else: break start_token = response.body.get('nextPageToken') if start_token is None: break
def podcasts_iter(self, *, device_id=None, page_size=250)
Get a paged iterator of subscribed podcast series. Parameters: device_id (str, Optional): A mobile device ID. Default: Use ``device_id`` of the :class:`MobileClient` instance. page_size (int, Optional): The maximum number of results per returned page. Max allowed is ``49995``. Default: ``250`` Yields: list: Podcast series dicts.
2.954924
2.845299
1.038528
response = self._call( mc_calls.PodcastFetchEpisode, podcast_episode_id ) podcast_episode_info = [ podcast_episode for podcast_episode in response.body if not podcast_episode['deleted'] ] return podcast_episode_info
def podcast_episode(self, podcast_episode_id)
Get information about a podcast_episode. Parameters: podcast_episode_id (str): A podcast episode ID. Returns: dict: Podcast episode information.
4.166706
4.51454
0.922952
if device_id is None: device_id = self.device_id podcast_episode_list = [] for chunk in self.podcast_episodes_iter( device_id=device_id, page_size=49995 ): podcast_episode_list.extend(chunk) return podcast_episode_list
def podcast_episodes(self, *, device_id=None)
Get a listing of podcast episodes for all subscribed podcasts. Paramaters: device_id (str, Optional): A mobile device ID. Default: Use ``device_id`` of the :class:`MobileClient` instance. Returns: list: Podcast episode dicts.
2.937038
2.913846
1.007959
if device_id is None: device_id = self.device_id start_token = None prev_items = None while True: response = self._call( mc_calls.PodcastEpisode, device_id, max_results=page_size, start_token=start_token ) items = response.body.get('data', {}).get('items', []) # Google does some weird shit. if items != prev_items: yield items prev_items = items else: break start_token = response.body.get('nextPageToken') if start_token is None: break
def podcast_episodes_iter(self, *, device_id=None, page_size=250)
Get a paged iterator of podcast episode for all subscribed podcasts. Parameters: device_id (str, Optional): A mobile device ID. Default: Use ``device_id`` of the :class:`MobileClient` instance. page_size (int, Optional): The maximum number of results per returned page. Max allowed is ``49995``. Default: ``250`` Yields: list: Podcast episode dicts.
2.943867
2.986096
0.985858
results = defaultdict(list) for type_, results_ in self.search_library( query, max_results=max_results, **kwargs ).items(): results[type_].extend(results_) for type_, results_ in self.search_google( query, max_results=max_results, **kwargs ).items(): results[type_].extend(results_) return dict(results)
def search(self, query, *, max_results=100, **kwargs)
Search Google Music and library for content. Parameters: query (str): Search text. max_results (int, Optional): Maximum number of results per type per location to retrieve. I.e up to 100 Google and 100 library for a total of 200 for the default value. Google only accepts values up to 100. Default: ``100`` kwargs (bool, Optional): Any of ``albums``, ``artists``, ``genres``, ``playlists``, ``podcasts``, ``situations``, ``songs``, ``stations``, ``videos`` set to ``True`` will include that result type in the returned dict. Setting none of them will include all result types in the returned dict. Returns: dict: A dict of results separated into keys: ``'albums'``, ``'artists'``, ``'genres'``, ``'playlists'``, ```'podcasts'``, ``'situations'``, ``'songs'``, ``'stations'``, ``'videos'``. Note: Free account search is restricted so may not contain hits for all result types.
2.089134
2.051754
1.018219
response = self._call( mc_calls.Query, query, max_results=max_results, **kwargs ) clusters = response.body.get('clusterDetail', []) results = defaultdict(list) for cluster in clusters: result_type = QueryResultType(cluster['cluster']['type']).name entries = cluster.get('entries', []) if len(entries) > 0: for entry in entries: item_key = next( key for key in entry if key not in ['cluster', 'score', 'type'] ) results[f"{result_type}s"].append(entry[item_key]) return dict(results)
def search_google(self, query, *, max_results=100, **kwargs)
Search Google Music for content. Parameters: query (str): Search text. max_results (int, Optional): Maximum number of results per type to retrieve. Google only accepts values up to 100. Default: ``100`` kwargs (bool, Optional): Any of ``albums``, ``artists``, ``genres``, ``playlists``, ``podcasts``, ``situations``, ``songs``, ``stations``, ``videos`` set to ``True`` will include that result type in the returned dict. Setting none of them will include all result types in the returned dict. Returns: dict: A dict of results separated into keys: ``'albums'``, ``'artists'``, ``'genres'``, ``'playlists'``, ```'podcasts'``, ``'situations'``, ``'songs'``, ``'stations'``, ``'videos'``. Note: Free account search is restricted so may not contain hits for all result types.
3.736409
3.607075
1.035856
def match_fields(item, fields): return any( query.casefold() in item.get(field, '').casefold() for field in fields ) types = [ ( 'playlists', ['description', 'name'], self.playlists ), ( 'podcasts', ['author', 'description', 'title'], self.podcasts ), ( 'songs', ['album', 'albumArtist', 'artist', 'composer', 'genre', 'title'], self.songs ), ( 'stations', ['byline', 'description', 'name'], self.stations ), ] results = {} for type_, fields, func in types: if (not kwargs) or (type_ in kwargs): results[type_] = [ item for item in func() if match_fields(item, fields) ][:max_results] return results
def search_library(self, query, *, max_results=100, **kwargs)
Search Google Music for content. Parameters: query (str): Search text. max_results (int, Optional): Maximum number of results per type to retrieve. Default: ``100`` kwargs (bool, Optional): Any of ``playlists``, ``podcasts``, ``songs``, ``stations``, ``videos`` set to ``True`` will include that result type in the returned dict. Setting none of them will include all result types in the returned dict. Returns: dict: A dict of results separated into keys: ``'playlists'``, ``'podcasts'``, ``'songs'``, ``'stations'``.
2.344822
2.165538
1.08279
response = self._call( mc_calls.QuerySuggestion, query ) suggested_queries = response.body.get('suggested_queries', []) return [ suggested_query['suggestion_string'] for suggested_query in suggested_queries ]
def search_suggestion(self, query)
Get search query suggestions for query. Parameters: query (str): Search text. Returns: list: Suggested query strings.
4.742895
4.417585
1.07364
station_info = { 'seed': { 'albumId': album['albumId'], 'seedType': StationSeedType.album.value }, 'num_entries': num_songs, 'library_content_only': only_library, } if recently_played is not None: station_info['recently_played'] = recently_played response = self._call( mc_calls.RadioStationFeed, station_infos=[station_info] ) station_feed = response.body.get('data', {}).get('stations', []) try: station = station_feed[0] except IndexError: station = {} return station.get('tracks', [])
def shuffle_album( self, album, *, num_songs=100, only_library=False, recently_played=None )
Get a listing of album shuffle/mix songs. Parameters: album (dict): An album dict. num_songs (int, Optional): The maximum number of songs to return from the station. Default: ``100`` only_library (bool, Optional): Only return content from library. Default: False recently_played (list, Optional): A list of dicts in the form of {'id': '', 'type'} where ``id`` is a song ID and ``type`` is 0 for a library song and 1 for a store song. Returns: list: List of album shuffle/mix songs.
3.827746
4.119484
0.929181
station_info = { 'num_entries': num_songs, 'library_content_only': only_library } if only_artist: station_info['seed'] = { 'artistId': artist['artistId'], 'seedType': StationSeedType.artist_only.value } else: station_info['seed'] = { 'artistId': artist['artistId'], 'seedType': StationSeedType.artist_related.value } if recently_played is not None: station_info['recently_played'] = recently_played response = self._call( mc_calls.RadioStationFeed, station_infos=[station_info] ) station_feed = response.body.get('data', {}).get('stations', []) try: station = station_feed[0] except IndexError: station = {} return station.get('tracks', [])
def shuffle_artist( self, artist, *, num_songs=100, only_library=False, recently_played=None, only_artist=False )
Get a listing of artist shuffle/mix songs. Parameters: artist (dict): An artist dict. num_songs (int, Optional): The maximum number of songs to return from the station. Default: ``100`` only_library (bool, Optional): Only return content from library. Default: False recently_played (list, Optional): A list of dicts in the form of {'id': '', 'type'} where ``id`` is a song ID and ``type`` is 0 for a library song and 1 for a store song. only_artist (bool, Optional): If ``True``, only return songs from the artist, else return songs from artist and related artists. Default: ``False`` Returns: list: List of artist shuffle/mix songs.
3.004378
3.098546
0.969609
station_info = { 'num_entries': num_songs, 'library_content_only': only_library } if 'storeId' in song: station_info['seed'] = { 'trackId': song['storeId'], 'seedType': StationSeedType.store_track.value } else: station_info['seed'] = { 'trackLockerId': song['id'], 'seedType': StationSeedType.library_track.value } if recently_played is not None: station_info['recently_played'] = recently_played response = self._call(mc_calls.RadioStationFeed, station_infos=[station_info]) station_feed = response.body.get('data', {}).get('stations', []) try: station = station_feed[0] except IndexError: station = {} return station.get('tracks', [])
def shuffle_song( self, song, *, num_songs=100, only_library=False, recently_played=None )
Get a listing of song shuffle/mix songs. Parameters: song (dict): A song dict. num_songs (int, Optional): The maximum number of songs to return from the station. Default: ``100`` only_library (bool, Optional): Only return content from library. Default: False recently_played (list, Optional): A list of dicts in the form of {'id': '', 'type'} where ``id`` is a song ID and ``type`` is 0 for a library song and 1 for a store song. Returns: list: List of artist shuffle/mix songs.
3.501131
3.571386
0.980328
response = self._call( mc_calls.ListenNowSituations, tz_offset ) situation_list = response.body.get('situations', []) return situation_list
def situations(self, *, tz_offset=None)
Get a listing of situations. Parameters: tz_offset (int, Optional): A time zone offset from UTC in seconds.
9.043975
11.135544
0.812172
if song_id.startswith('T'): song_info = self._call( mc_calls.FetchTrack, song_id ).body else: song_info = next( ( song for song in self.songs() if song['id'] == song_id ), None ) return song_info
def song(self, song_id)
Get information about a song. Parameters: song_id (str): A song ID. Returns: dict: Song information.
3.870963
3.725571
1.039025
mutations = [mc_calls.TrackBatch.add(song) for song in songs] response = self._call( mc_calls.TrackBatch, mutations ) success_ids = [ res['id'] for res in response.body['mutate_response'] if res['response_code'] == 'OK' ] return success_ids
def songs_add(self, songs)
Add store songs to your library. Parameters: songs (list): A list of store song dicts. Returns: list: Songs' library IDs.
5.874266
5.456503
1.076562
mutations = [mc_calls.TrackBatch.delete(song['id']) for song in songs] response = self._call( mc_calls.TrackBatch, mutations ) success_ids = [ res['id'] for res in response.body['mutate_response'] if res['response_code'] == 'OK' ] # TODO: Report failures. # failure_ids = [ # res['id'] # for res in response.body['mutate_response'] # if res['response_code'] != 'OK' # ] return success_ids
def songs_delete(self, songs)
Delete songs from library. Parameters: song (list): A list of song dicts. Returns: list: Successfully deleted song IDs.
3.420251
3.301976
1.03582
if 'storeId' in song: song_id = song['storeId'] elif 'trackId' in song: song_id = song['trackId'] else: song_id = song['id'] song_duration = song['durationMillis'] event = mc_calls.ActivityRecordRealtime.play(song_id, song_duration) response = self._call( mc_calls.ActivityRecordRealtime, event ) return True if response.body['eventResults'][0]['code'] == 'OK' else False
def song_play(self, song)
Add play to song play count. Parameters: song (dict): A song dict. Returns: bool: ``True`` if successful, ``False`` if not.
4.639574
4.420092
1.049655
if 'storeId' in song: song_id = song['storeId'] elif 'trackId' in song: song_id = song['trackId'] else: song_id = song['id'] event = mc_calls.ActivityRecordRealtime.rate(song_id, rating) response = self._call( mc_calls.ActivityRecordRealtime, event ) return True if response.body['eventResults'][0]['code'] == 'OK' else False
def song_rate(self, song, rating)
Rate song. Parameters: song (dict): A song dict. rating (int): 0 (not rated), 1 (thumbs down), or 5 (thumbs up). Returns: bool: ``True`` if successful, ``False`` if not.
4.659651
4.345735
1.072235
song_list = [] for chunk in self.songs_iter(page_size=49995): song_list.extend(chunk) return song_list
def songs(self)
Get a listing of library songs. Returns: list: Song dicts.
5.478621
5.622259
0.974452
start_token = None while True: response = self._call( mc_calls.TrackFeed, max_results=page_size, start_token=start_token ) items = response.body.get('data', {}).get('items', []) if items: yield items start_token = response.body.get('nextPageToken') if start_token is None: break
def songs_iter(self, *, page_size=250)
Get a paged iterator of library songs. Parameters: page_size (int, Optional): The maximum number of results per returned page. Max allowed is ``49995``. Default: ``250`` Yields: list: Song dicts.
3.531566
3.721005
0.949089
station_info = { 'station_id': station_id, 'num_entries': num_songs, 'library_content_only': False } if recently_played is not None: station_info['recently_played'] = recently_played response = self._call( mc_calls.RadioStationFeed, station_infos=[station_info] ) station_feed = response.body.get('data', {}).get('stations', []) try: station = station_feed[0] except IndexError: station = {} return station
def station(self, station_id, *, num_songs=25, recently_played=None)
Get information about a station. Parameters: station_id (str): A station ID. Use 'IFL' for I'm Feeling Lucky. num_songs (int, Optional): The maximum number of songs to return from the station. Default: ``25`` recently_played (list, Optional): A list of dicts in the form of {'id': '', 'type'} where ``id`` is a song ID and ``type`` is 0 for a library song and 1 for a store song. Returns: dict: Station information.
3.366876
3.687335
0.913092
response = self._call( mc_calls.RadioStationFeed, num_entries=num_songs, num_stations=num_stations ) station_feed = response.body.get('data', {}).get('stations', []) return station_feed
def station_feed(self, *, num_songs=25, num_stations=4)
Generate stations. Note: A Google Music subscription is required. Parameters: num_songs (int, Optional): The total number of songs to return. Default: ``25`` num_stations (int, Optional): The number of stations to return when no station_infos is provided. Default: ``5`` Returns: list: Station information dicts.
4.953547
5.148795
0.962079
station_id = station['id'] station = self.station( station_id, num_songs=num_songs, recently_played=recently_played ) return station.get('tracks', [])
def station_songs(self, station, *, num_songs=25, recently_played=None)
Get a listing of songs from a station. Parameters: station (str): A station dict. num_songs (int, Optional): The maximum number of songs to return from the station. Default: ``25`` recently_played (list, Optional): A list of dicts in the form of {'id': '', 'type'} where ``id`` is a song ID and ``type`` is 0 for a library song and 1 for a store song. Returns: list: Station song dicts.
3.023885
3.290816
0.918886
station_list = [] for chunk in self.stations_iter(page_size=49995): for station in chunk: if ( (generated and not station.get('inLibrary')) or (library and station.get('inLibrary')) ): station_list.append(station) return station_list
def stations(self, *, generated=True, library=True)
Get a listing of library stations. The listing can contain stations added to the library and generated from the library. Parameters: generated (bool, Optional): Include generated stations. Default: True library (bool, Optional): Include library stations. Default: True Returns: list: Station information dicts.
3.539317
3.95224
0.895522
start_token = None while True: response = self._call( mc_calls.RadioStation, max_results=page_size, start_token=start_token ) yield response.body.get('data', {}).get('items', []) start_token = response.body.get('nextPageToken') if start_token is None: break
def stations_iter(self, *, page_size=250)
Get a paged iterator of library stations. Parameters: page_size (int, Optional): The maximum number of results per returned page. Max allowed is ``49995``. Default: ``250`` Yields: list: Station dicts.
3.965162
4.199515
0.944195
if device_id is None: device_id = self.device_id stream_url = self.stream_url( item, device_id=device_id, quality=quality, session_token=session_token ) response = self.session.get(stream_url) audio = response.content return audio
def stream(self, item, *, device_id=None, quality='hi', session_token=None)
Get MP3 stream of a podcast episode, library song, station_song, or store song. Note: Streaming requires a ``device_id`` from a valid, linked mobile device. Parameters: item (str): A podcast episode, library song, station_song, or store song. A Google Music subscription is required to stream store songs. device_id (str, Optional): A mobile device ID. Default: Use ``device_id`` of the :class:`MobileClient` instance. quality (str, Optional): Stream quality is one of ``'hi'`` (320Kbps), ``'med'`` (160Kbps), or ``'low'`` (128Kbps). Default: ``'hi'``. session_token (str): Session token from a station dict required for unsubscribed users to stream a station song. station['sessionToken'] as returend by :meth:`station` only exists for free accounts. Returns: bytes: An MP3 file.
2.123631
1.931344
1.099561
if device_id is None: device_id = self.device_id if 'episodeId' in item: # Podcast episode. response = self._call( mc_calls.PodcastEpisodeStreamURL, item['episodeId'], quality=quality, device_id=device_id ) elif 'wentryid' in item: # Free account station song. response = self._call( mc_calls.RadioStationTrackStreamURL, item['storeId'], item['wentryid'], session_token, quality=quality, device_id=device_id ) elif 'trackId' in item: # Playlist song. response = self._call( mc_calls.TrackStreamURL, item['trackId'], quality=quality, device_id=device_id ) elif 'storeId' in item and self.is_subscribed: # Store song. response = self._call( mc_calls.TrackStreamURL, item['storeId'], quality=quality, device_id=device_id ) elif 'id' in item: # Library song. response = self._call( mc_calls.TrackStreamURL, item['id'], quality=quality, device_id=device_id ) else: # TODO: Create an exception for not being subscribed or use a better builtin exception for this case. if 'storeId' in item and not self.is_subscribed: msg = "Can't stream a store song without a subscription." else: msg = "Item does not contain an ID field." raise ValueError(msg) try: stream_url = response.headers['Location'] except KeyError: stream_url = response.body['url'] return stream_url
def stream_url(self, item, *, device_id=None, quality='hi', session_token=None)
Get a URL to stream a podcast episode, library song, station_song, or store song. Note: Streaming requires a ``device_id`` from a valid, linked mobile device. Parameters: item (str): A podcast episode, library song, station_song, or store song. A Google Music subscription is required to stream store songs. device_id (str, Optional): A mobile device ID. Default: Use ``device_id`` of the :class:`MobileClient` instance. quality (str, Optional): Stream quality is one of ``'hi'`` (320Kbps), ``'med'`` (160Kbps), or ``'low'`` (128Kbps). Default: ``'hi'``. session_token (str): Session token from a station dict required for unsubscribed users to stream a station song. station['sessionToken'] as returend by :meth:`station` only exists for free accounts. Returns: str: A URL to an MP3 file.
2.553039
2.309386
1.105505
thumbs_up_songs = [] if library is True: thumbs_up_songs.extend( song for song in self.songs() if song.get('rating', '0') == '5' ) if store is True: response = self._call(mc_calls.EphemeralTop) thumbs_up_songs.extend(response.body.get('data', {}).get('items', [])) return thumbs_up_songs
def thumbs_up_songs(self, *, library=True, store=True)
Get a listing of 'Thumbs Up' store songs. Parameters: library (bool, Optional): Include 'Thumbs Up' songs from library. Default: True generated (bool, Optional): Include 'Thumbs Up' songs from store. Default: True Returns: list: Dicts of 'Thumbs Up' songs.
4.103029
4.354963
0.94215
response = self._call(mc_calls.BrowseTopChart) top_charts = response.body return top_charts
def top_charts(self)
Get a listing of the default top charts.
14.963835
13.655745
1.09579
response = self._call(mc_calls.BrowseTopChartForGenre, genre_id) top_chart_for_genre = response.body return top_chart_for_genre
def top_charts_for_genre(self, genre_id)
Get a listing of top charts for a top chart genre. Parameters: genre_id (str): A top chart genre ID as found with :meth:`top_charts_genres`.
6.605506
9.547132
0.691884
response = self._call(mc_calls.BrowseTopChartGenres) top_chart_genres = response.body.get('genres', []) return top_chart_genres
def top_charts_genres(self)
Get a listing of genres from the browse top charts tab.
8.218795
6.4854
1.267276
payload = decode.decode(stream) sys.stdout.write(payload.decode('utf8'))
def run(stream=sys.stdin.buffer)
Reads from stream, applying the LT decoding algorithm to incoming encoded blocks until sufficiently many blocks have been received to reconstruct the entire file.
9.113885
8.190227
1.112776
f_bytes = f.read() blocks = [int.from_bytes(f_bytes[i:i+blocksize].ljust(blocksize, b'0'), sys.byteorder) for i in range(0, len(f_bytes), blocksize)] return len(f_bytes), blocks
def _split_file(f, blocksize)
Block file byte contents into blocksize chunks, padding last one if necessary
2.955191
2.690976
1.098186
# Generate seed if not provided if seed is None: seed = randint(0, 1 << 31 - 1) # get file blocks filesize, blocks = _split_file(f, blocksize) # init stream vars K = len(blocks) prng = sampler.PRNG(params=(K, delta, c)) prng.set_seed(seed) # block generation loop while True: blockseed, d, ix_samples = prng.get_src_blocks() block_data = 0 for ix in ix_samples: block_data ^= blocks[ix] # Generate blocks of XORed data in network byte order block = (filesize, blocksize, blockseed, int.to_bytes(block_data, blocksize, sys.byteorder)) yield pack('!III%ss'%blocksize, *block)
def encoder(f, blocksize, seed=None, c=sampler.DEFAULT_C, delta=sampler.DEFAULT_DELTA)
Generates an infinite sequence of blocks to transmit to the receiver
6.045417
6.153264
0.982473