code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
#!/usr/bin/env python3
import torch
from torch.distributions.kl import kl_divergence
from ..distributions import Delta, MultivariateNormal
from ..lazy import MatmulLazyTensor, SumLazyTensor
from ..utils.errors import CachingError
from ..utils.memoize import pop_from_cache_ignore_args
from .delta_variational_distribution import DeltaVariationalDistribution
from .variational_strategy import VariationalStrategy
class BatchDecoupledVariationalStrategy(VariationalStrategy):
r"""
A VariationalStrategy that uses a different set of inducing points for the
variational mean and variational covar. It follows the "decoupled" model
proposed by `Jankowiak et al. (2020)`_ (which is roughly based on the strategies
proposed by `Cheng et al. (2017)`_.
Let :math:`\mathbf Z_\mu` and :math:`\mathbf Z_\sigma` be the mean/variance
inducing points. The variational distribution for an input :math:`\mathbf
x` is given by:
.. math::
\begin{align*}
\mathbb E[ f(\mathbf x) ] &= \mathbf k_{\mathbf Z_\mu \mathbf x}^\top
\mathbf K_{\mathbf Z_\mu \mathbf Z_\mu}^{-1} \mathbf m
\\
\text{Var}[ f(\mathbf x) ] &= k_{\mathbf x \mathbf x} - \mathbf k_{\mathbf Z_\sigma \mathbf x}^\top
\mathbf K_{\mathbf Z_\sigma \mathbf Z_\sigma}^{-1}
\left( \mathbf K_{\mathbf Z_\sigma} - \mathbf S \right)
\mathbf K_{\mathbf Z_\sigma \mathbf Z_\sigma}^{-1}
\mathbf k_{\mathbf Z_\sigma \mathbf x}
\end{align*}
where :math:`\mathbf m` and :math:`\mathbf S` are the variational parameters.
Unlike the original proposed implementation, :math:`\mathbf Z_\mu` and :math:`\mathbf Z_\sigma`
have **the same number of inducing points**, which allows us to perform batched operations.
Additionally, you can use a different set of kernel hyperparameters for the mean and the variance function.
We recommend using this feature only with the :obj:`~gpytorch.mlls.PredictiveLogLikelihood` objective function
as proposed in "Parametric Gaussian Process Regressors" (`Jankowiak et al. (2020)`_).
Use the :attr:`mean_var_batch_dim` to indicate which batch dimension corresponds to the different mean/var
kernels.
.. note::
We recommend using the "right-most" batch dimension (i.e. :attr:`mean_var_batch_dim=-1`) for the dimension
that corresponds to the different mean/variance kernel parameters.
Assuming you want `b1` many independent GPs, the :obj:`~gpytorch.variational._VariationalDistribution`
objects should have a batch shape of `b1`, and the mean/covar modules
of the GP should have a batch shape of `b1 x 2`.
(The 2 corresponds to the mean/variance hyperparameters.)
.. seealso::
:obj:`~gpytorch.variational.OrthogonallyDecoupledVariationalStrategy` (a variant proposed by
`Salimbeni et al. (2018)`_ that uses orthogonal projections.)
:param ~gpytorch.models.ApproximateGP model: Model this strategy is applied to.
Typically passed in when the VariationalStrategy is created in the
__init__ method of the user defined model.
:param torch.Tensor inducing_points: Tensor containing a set of inducing
points to use for variational inference.
:param ~gpytorch.variational.VariationalDistribution variational_distribution: A
VariationalDistribution object that represents the form of the variational distribution :math:`q(\mathbf u)`
:param learn_inducing_locations: (Default True): Whether or not
the inducing point locations :math:`\mathbf Z` should be learned (i.e. are they
parameters of the model).
:type learn_inducing_locations: `bool`, optional
:type mean_var_batch_dim: `int`, optional
:param mean_var_batch_dim: (Default `None`):
Set this parameter (ideally to `-1`) to indicate which dimension corresponds to different
kernel hyperparameters for the mean/variance functions.
.. _Cheng et al. (2017):
https://arxiv.org/abs/1711.10127
.. _Salimbeni et al. (2018):
https://arxiv.org/abs/1809.08820
.. _Jankowiak et al. (2020):
https://arxiv.org/abs/1910.07123
Example (**different** hypers for mean/variance):
>>> class MeanFieldDecoupledModel(gpytorch.models.ApproximateGP):
>>> '''
>>> A batch of 3 independent MeanFieldDecoupled PPGPR models.
>>> '''
>>> def __init__(self, inducing_points):
>>> # The variational parameters have a batch_shape of [3]
>>> variational_distribution = gpytorch.variational.MeanFieldVariationalDistribution(
>>> inducing_points.size(-1), batch_shape=torch.Size([3]),
>>> )
>>> variational_strategy = gpytorch.variational.BatchDecoupledVariationalStrategy(
>>> self, inducing_points, variational_distribution, learn_inducing_locations=True,
>>> mean_var_batch_dim=-1
>>> )
>>>
>>> # The mean/covar modules have a batch_shape of [3, 2]
>>> # where the last batch dim corresponds to the mean & variance hyperparameters
>>> super().__init__(variational_strategy)
>>> self.mean_module = gpytorch.means.ConstantMean(batch_shape=torch.Size([3, 2]))
>>> self.covar_module = gpytorch.kernels.ScaleKernel(
>>> gpytorch.kernels.RBFKernel(batch_shape=torch.Size([3, 2])),
>>> batch_shape=torch.Size([3, 2]),
>>> )
Example (**shared** hypers for mean/variance):
>>> class MeanFieldDecoupledModel(gpytorch.models.ApproximateGP):
>>> '''
>>> A batch of 3 independent MeanFieldDecoupled PPGPR models.
>>> '''
>>> def __init__(self, inducing_points):
>>> # The variational parameters have a batch_shape of [3]
>>> variational_distribution = gpytorch.variational.MeanFieldVariationalDistribution(
>>> inducing_points.size(-1), batch_shape=torch.Size([3]),
>>> )
>>> variational_strategy = gpytorch.variational.BatchDecoupledVariationalStrategy(
>>> self, inducing_points, variational_distribution, learn_inducing_locations=True,
>>> )
>>>
>>> # The mean/covar modules have a batch_shape of [3]
>>> super().__init__(variational_strategy)
>>> self.mean_module = gpytorch.means.ConstantMean(batch_shape=torch.Size([3]))
>>> self.covar_module = gpytorch.kernels.ScaleKernel(
>>> gpytorch.kernels.RBFKernel(batch_shape=torch.Size([3])),
>>> batch_shape=torch.Size([3]),
>>> )
"""
def __init__(
self, model, inducing_points, variational_distribution, learn_inducing_locations=True, mean_var_batch_dim=None
):
if isinstance(variational_distribution, DeltaVariationalDistribution):
raise NotImplementedError(
"BatchDecoupledVariationalStrategy does not work with DeltaVariationalDistribution"
)
if mean_var_batch_dim is not None and mean_var_batch_dim >= 0:
raise ValueError(f"mean_var_batch_dim should be negative indexed, got {mean_var_batch_dim}")
self.mean_var_batch_dim = mean_var_batch_dim
# Maybe unsqueeze inducing points
if inducing_points.dim() == 1:
inducing_points = inducing_points.unsqueeze(-1)
# We're going to create two set of inducing points
# One set for computing the mean, one set for computing the variance
if self.mean_var_batch_dim is not None:
inducing_points = torch.stack([inducing_points, inducing_points], dim=(self.mean_var_batch_dim - 2))
else:
inducing_points = torch.stack([inducing_points, inducing_points], dim=-3)
super().__init__(model, inducing_points, variational_distribution, learn_inducing_locations)
def _expand_inputs(self, x, inducing_points):
# If we haven't explicitly marked a dimension as batch, add the corresponding batch dimension to the input
if self.mean_var_batch_dim is None:
x = x.unsqueeze(-3)
else:
x = x.unsqueeze(self.mean_var_batch_dim - 2)
return super()._expand_inputs(x, inducing_points)
def forward(self, x, inducing_points, inducing_values, variational_inducing_covar=None):
# We'll compute the covariance, and cross-covariance terms for both the
# pred-mean and pred-covar, using their different inducing points (and maybe kernel hypers)
mean_var_batch_dim = self.mean_var_batch_dim or -1
# Compute full prior distribution
full_inputs = torch.cat([inducing_points, x], dim=-2)
full_output = self.model.forward(full_inputs)
full_covar = full_output.lazy_covariance_matrix
# Covariance terms
num_induc = inducing_points.size(-2)
test_mean = full_output.mean[..., num_induc:]
induc_induc_covar = full_covar[..., :num_induc, :num_induc].add_jitter()
induc_data_covar = full_covar[..., :num_induc, num_induc:].evaluate()
data_data_covar = full_covar[..., num_induc:, num_induc:]
# Compute interpolation terms
# K_ZZ^{-1/2} K_ZX
# K_ZZ^{-1/2} \mu_Z
L = self._cholesky_factor(induc_induc_covar)
if L.shape != induc_induc_covar.shape:
# Aggressive caching can cause nasty shape incompatibilies when evaluating with different batch shapes
# TODO: Use a hook to make this cleaner
try:
pop_from_cache_ignore_args(self, "cholesky_factor")
except CachingError:
pass
L = self._cholesky_factor(induc_induc_covar)
interp_term = L.inv_matmul(induc_data_covar.double()).to(full_inputs.dtype)
mean_interp_term = interp_term.select(mean_var_batch_dim - 2, 0)
var_interp_term = interp_term.select(mean_var_batch_dim - 2, 1)
# Compute the mean of q(f)
# k_XZ K_ZZ^{-1/2} m + \mu_X
# Here we're using the terms that correspond to the mean's inducing points
predictive_mean = torch.add(
torch.matmul(mean_interp_term.transpose(-1, -2), inducing_values.unsqueeze(-1)).squeeze(-1),
test_mean.select(mean_var_batch_dim - 1, 0),
)
# Compute the covariance of q(f)
# K_XX + k_XZ K_ZZ^{-1/2} (S - I) K_ZZ^{-1/2} k_ZX
middle_term = self.prior_distribution.lazy_covariance_matrix.mul(-1)
if variational_inducing_covar is not None:
middle_term = SumLazyTensor(variational_inducing_covar, middle_term)
predictive_covar = SumLazyTensor(
data_data_covar.add_jitter(1e-4).evaluate().select(mean_var_batch_dim - 2, 1),
MatmulLazyTensor(var_interp_term.transpose(-1, -2), middle_term @ var_interp_term),
)
return MultivariateNormal(predictive_mean, predictive_covar)
def kl_divergence(self):
variational_dist = self.variational_distribution
prior_dist = self.prior_distribution
mean_dist = Delta(variational_dist.mean)
covar_dist = MultivariateNormal(
torch.zeros_like(variational_dist.mean), variational_dist.lazy_covariance_matrix
)
return kl_divergence(mean_dist, prior_dist) + kl_divergence(covar_dist, prior_dist)
|
jrg365/gpytorch
|
gpytorch/variational/batch_decoupled_variational_strategy.py
|
Python
|
mit
| 11,612
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Bien',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('descripcion', models.CharField(max_length=255, blank=True)),
('direccion', models.CharField(max_length=255, blank=True)),
('barrio', models.CharField(max_length=255, blank=True)),
('localidad', models.CharField(max_length=255, blank=True)),
('provincia', models.CharField(max_length=255, blank=True)),
('pais', models.CharField(max_length=255, blank=True)),
('modelo', models.IntegerField(null=True, blank=True)),
('entidad', models.CharField(max_length=255, blank=True)),
('ramo', models.CharField(max_length=255, blank=True)),
('cant_acciones', models.CharField(max_length=255, blank=True)),
('fecha_desde', models.DateField(null=True, blank=True)),
('destino', models.CharField(max_length=255, blank=True)),
('origen', models.CharField(max_length=255, blank=True)),
('superficie', models.DecimalField(help_text='Superficie de la propiedad', null=True, max_digits=10, decimal_places=2, blank=True)),
('unidad_medida_id', models.IntegerField(blank=True, help_text='Unidad de medida usada para la superficie', null=True, choices=[(0, 'm2'), (1, 'ha')])),
('m_mejoras_id', models.IntegerField(blank=True, null=True, choices=[(0, '$'), (1, 'us$'), (2, 'E'), (3, '$ Uruguayos'), (4, '\xa3'), (5, 'A'), (6, 'A$'), (7, '$L')])),
('mejoras', models.DecimalField(null=True, max_digits=10, decimal_places=2, blank=True)),
('m_valor_fiscal_id', models.IntegerField(blank=True, null=True, choices=[(0, '$'), (1, 'us$'), (2, 'E'), (3, '$ Uruguayos'), (4, '\xa3'), (5, 'A'), (6, 'A$'), (7, '$L')])),
('valor_fiscal', models.DecimalField(null=True, max_digits=10, decimal_places=2, blank=True)),
('m_valor_adq_id', models.IntegerField(blank=True, null=True, choices=[(0, '$'), (1, 'us$'), (2, 'E'), (3, '$ Uruguayos'), (4, '\xa3'), (5, 'A'), (6, 'A$'), (7, '$L')])),
('valor_adq', models.DecimalField(null=True, max_digits=10, decimal_places=2, blank=True)),
('fecha_hasta', models.DateField(null=True, blank=True)),
('titular_dominio', models.CharField(max_length=255, blank=True)),
('porcentaje', models.DecimalField(help_text="<strong>NO</strong> incluir el signo '%'.<br> Si ingresa un n\xfamero decimal use '.' (punto) como delimitador", null=True, max_digits=10, decimal_places=2, blank=True)),
('vinculo', models.CharField(default='Titular', help_text='Indica la relacion con el titular de la DDJJ', max_length=255, blank=True, choices=[('Conviviente', 'Conviviente'), ('C\xf3nyuge', 'C\xf3nyuge'), ('Hijo/a', 'Hijo/a'), ('Titular', 'Titular')])),
('periodo', models.CharField(max_length=255, blank=True)),
('obs', models.CharField(max_length=255, blank=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('tipo_bien_s', models.CharField(max_length=255, blank=True)),
('nombre_bien_s', models.CharField(max_length=255, blank=True)),
],
options={
'ordering': ['tipo_bien', 'nombre_bien'],
'db_table': 'biens',
'verbose_name_plural': 'bienes',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Cargo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('jurisdiccion', models.CharField(max_length=255, blank=True)),
('cargo', models.CharField(help_text='Nombre del cargo', max_length=255)),
('poder_id', models.IntegerField(blank=True, null=True, choices=[(0, 'Ejecutivo'), (1, 'Legislativo'), (2, 'Judicial')])),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'ordering': ['cargo'],
'db_table': 'cargos',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ContenidoDdjjs',
fields=[
('id', models.IntegerField(serialize=False, primary_key=True)),
('ddjj_id', models.IntegerField(null=True, blank=True)),
('ddjj_ano', models.CharField(max_length=255, blank=True)),
('ddjj_tipo', models.CharField(max_length=255, blank=True)),
('poder_id', models.IntegerField(null=True, blank=True)),
('persona_str', models.CharField(max_length=255, blank=True)),
('persona_id', models.IntegerField(null=True, blank=True)),
('cargo_str', models.CharField(max_length=255, blank=True)),
('cargo_id', models.IntegerField(null=True, blank=True)),
('contenido', models.TextField(blank=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'db_table': 'contenido_ddjjs',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Ddjj',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ano', models.IntegerField()),
('tipo_ddjj_id', models.IntegerField(choices=[(0, 'alta'), (1, 'baja'), (2, 'inicial'), (3, 'anual')])),
('funcionario', models.CharField(help_text='Este campo lo completa el sistema.', max_length=255, blank=True)),
('url', models.CharField(help_text='Url DocumentCloud', max_length=255, blank=True)),
('key', models.IntegerField(help_text='Este campo lo completa el sistema.', null=True, blank=True)),
('clave', models.CharField(help_text='Este campo lo completa el sistema.', max_length=255, blank=True)),
('flag_presenta', models.IntegerField(default=1, choices=[(0, 'Si'), (1, 'No')], blank=True, help_text="<strong style='color:blue'>'Solo el PDF'</strong> si solo se muestra el pdf, ej: cartas donde declaran que la ddjj es igual a la del a\xf1o anterior", null=True, verbose_name='Carta de DDJJ')),
('obs', models.TextField(blank=True)),
('flag_search', models.CharField(help_text='Este campo lo completa el sistema.', max_length=255, blank=True)),
('visitas', models.DecimalField(null=True, max_digits=10, decimal_places=0, blank=True)),
('status', models.IntegerField(default=0, help_text='Indica si puede ser publicada', choices=[(0, 'Deshabilitado'), (1, 'Habilitado')])),
('poder_id', models.IntegerField(choices=[(0, 'Ejecutivo'), (1, 'Legislativo'), (2, 'Judicial')])),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'ordering': ['persona'],
'db_table': 'ddjjs',
'verbose_name': 'Declaraci\xf3n Jurada',
'verbose_name_plural': 'Declaraciones Juradas',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Jurisdiccion',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('nombre', models.CharField(max_length=255)),
('poder_id', models.IntegerField(blank=True, null=True, choices=[(0, 'Ejecutivo'), (1, 'Legislativo'), (2, 'Judicial')])),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'ordering': ['nombre'],
'db_table': 'jurisdiccions',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='NombreBien',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('nombre', models.CharField(max_length=255, blank=True)),
('tipo_bien_id', models.IntegerField(null=True, blank=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'ordering': ['nombre'],
'db_table': 'nombre_biens',
'verbose_name_plural': 'Nombre Bienes',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Persona',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('apellido', models.CharField(max_length=255)),
('nombre', models.CharField(max_length=255)),
('legajo', models.CharField(max_length=255, blank=True)),
('tipo_documento_id', models.IntegerField(blank=True, null=True, choices=[(0, 'dni'), (1, 'le'), (2, 'lc'), (3, 'pasaporte')])),
('documento', models.IntegerField(null=True, blank=True)),
('cuit_cuil', models.CharField(max_length=255, blank=True)),
('nacimento', models.DateField(null=True, blank=True)),
('sexo_id', models.IntegerField(blank=True, null=True, choices=[(0, 'M'), (1, 'F')])),
('estado_civil_id', models.IntegerField(blank=True, null=True, choices=[(0, 'Casado/a'), (1, 'C\xf3nyugue'), (2, 'Divorciado/a'), (3, 'Separado'), (4, 'Soltero/a'), (5, 'U. Hecho'), (6, 'Viudo/a')])),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('tag_id', models.CharField(help_text='ID del tag en el diario La Naci\xf3n', max_length=255, blank=True)),
('tag_img_id', models.CharField(help_text='ID de la img del tag', max_length=255, blank=True)),
('tag_descripcion', models.CharField(help_text='Descripcion del tag Nacion', max_length=255, blank=True)),
('ficha_d_l', models.CharField(help_text='Url ficha de Directorio Legislativo', max_length=255, blank=True)),
],
options={
'ordering': ['apellido', 'nombre'],
'db_table': 'personas',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='PersonaCargo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('flag_ingreso', models.IntegerField(null=True, blank=True)),
('ingreso', models.DateField(null=True, blank=True)),
('egreso', models.DateField(null=True, blank=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('cargo', models.ForeignKey(to='admin_ddjj_app.Cargo')),
('jurisdiccion', models.ForeignKey(blank=True, to='admin_ddjj_app.Jurisdiccion', null=True)),
('persona', models.ForeignKey(to='admin_ddjj_app.Persona')),
],
options={
'ordering': ['cargo'],
'db_table': 'persona_cargos',
'verbose_name_plural': 'Persona Cargos',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TiempoControls',
fields=[
('id', models.IntegerField(serialize=False, primary_key=True)),
('dias', models.CharField(max_length=255, blank=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'db_table': 'tiempo_controls',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TipoBien',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('nombre', models.CharField(max_length=255, blank=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'ordering': ['nombre'],
'db_table': 'tipo_biens',
'verbose_name_plural': 'Tipo Bienes',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='ddjj',
name='persona',
field=models.ForeignKey(related_name='ddjjs', to='admin_ddjj_app.Persona'),
preserve_default=True,
),
migrations.AddField(
model_name='ddjj',
name='persona_cargo',
field=models.ForeignKey(related_name='ddjjs', to='admin_ddjj_app.PersonaCargo', help_text='Indique el cargo que ocupa para esta DDJJ'),
preserve_default=True,
),
migrations.AddField(
model_name='cargo',
name='personas',
field=models.ManyToManyField(to='admin_ddjj_app.Persona', through='admin_ddjj_app.PersonaCargo'),
preserve_default=True,
),
migrations.AddField(
model_name='bien',
name='ddjj',
field=models.ForeignKey(related_name='bienes', to='admin_ddjj_app.Ddjj', help_text='Indica la DDJJ a la cual pertenece este bien'),
preserve_default=True,
),
migrations.AddField(
model_name='bien',
name='nombre_bien',
field=models.ForeignKey(related_name='bienes', to='admin_ddjj_app.NombreBien'),
preserve_default=True,
),
migrations.AddField(
model_name='bien',
name='persona',
field=models.ForeignKey(help_text='Es el titular del bien, este puede ser distinto al titular de la DDJJ', to='admin_ddjj_app.Persona'),
preserve_default=True,
),
migrations.AddField(
model_name='bien',
name='tipo_bien',
field=models.ForeignKey(related_name='bienes', to='admin_ddjj_app.TipoBien'),
preserve_default=True,
),
]
|
lanacioncom/ddjj_admin_lanacion
|
admin_ddjj_app/migrations/0001_initial.py
|
Python
|
mit
| 15,375
|
#encoding: utf-8
from flask.ext.restful import Resource, reqparse
class Test(Resource):
def __init__(self):
self.parser = reqparse.RequestParser()
self.parser.add_argument('id', type=int)
super(AccountAPI, self).__init__()
def get(self):
return {'id': id}
def post(self):
pass
def put(self):
pass
def delete(self):
pass
|
chenke91/ckPermission
|
app/api_v1/resources/tests.py
|
Python
|
mit
| 403
|
# -*- coding: utf-8 -*-
"""
Helper functions used in views.
"""
from json import dumps
from functools import wraps
from flask import Response
def jsonify(function):
"""
Creates a response with the JSON representation of wrapped function result.
"""
@wraps(function)
def inner(*args, **kwargs):
return Response(dumps(function(*args, **kwargs)),
mimetype='application/json')
return inner
|
sargo/exif-compare
|
src/exif_compare/utils.py
|
Python
|
mit
| 446
|
#-------------------------------------------------------------------------
# The Azure Batch Apps Python Client
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#--------------------------------------------------------------------------
"""Unit tests for Pool and PoolSpecifier"""
import sys
try:
import unittest2 as unittest
except ImportError:
import unittest
try:
from unittest import mock
except ImportError:
import mock
from batchapps.pool import (
Pool,
PoolSpecifier)
from batchapps.api import (
BatchAppsApi,
Response)
from batchapps.exceptions import RestCallException
# pylint: disable=W0212
class TestPool(unittest.TestCase):
"""Unit tests for Pool"""
def test_pool_create(self):
"""Test Pool object"""
api = mock.create_autospec(BatchAppsApi)
pool = Pool(api)
self.assertIsNone(pool.id)
self.assertIsNone(pool.created)
self.assertEqual(pool.target_size, 0)
pool_spec = {
'id': 'abc',
'creationTime': '',
'targetDedicated': '5',
'state': 'active',
'communication': True
}
pool = Pool(api, **pool_spec)
self.assertEqual(pool.id, 'abc')
self.assertEqual(pool.created, '')
self.assertEqual(pool.target_size, 5)
self.assertEqual(pool.communication, True)
def test_pool_delete(self):
"""Test delete"""
api = mock.create_autospec(BatchAppsApi)
api.delete_pool.return_value = mock.create_autospec(Response)
api.delete_pool.return_value.success = True
pool = Pool(api)
pool.delete()
api.delete_pool.assert_called_with(None)
api.delete_pool.return_value.success = False
api.delete_pool.return_value.result = RestCallException(None, "Test", None)
with self.assertRaises(RestCallException):
pool.delete()
@mock.patch.object(Pool, 'update')
def test_pool_resize(self, mock_update):
"""Test resize"""
api = mock.create_autospec(BatchAppsApi)
api.resize_pool.return_value = mock.create_autospec(Response)
api.resize_pool.return_value.success = True
pool = Pool(api)
pool.resize(5)
api.resize_pool.assert_called_with(None, 5)
mock_update.assert_called_with()
with self.assertRaises(ValueError):
pool.resize("test")
api.resize_pool.return_value.success = False
api.resize_pool.return_value.result = RestCallException(None, "Test", None)
mock_update.called = False
with self.assertRaises(RestCallException):
pool.resize(1)
self.assertFalse(mock_update.called)
def test_pool_update(self):
"""Test delete"""
api = mock.create_autospec(BatchAppsApi)
pool = Pool(api)
api.get_pool.return_value = mock.create_autospec(Response)
api.get_pool.return_value.success = True
api.get_pool.return_value.result = {
'targetDedicated':'5',
'currentDedicated':'4',
'state':'active',
'allocationState':'test',
}
self.assertEqual(pool.target_size, 0)
self.assertEqual(pool.current_size, 0)
self.assertEqual(pool.state, None)
self.assertEqual(pool.allocation_state, None)
self.assertEqual(pool.resize_error, '')
pool.update()
api.get_pool.assert_called_with(pool_id=None)
self.assertEqual(pool.target_size, 5)
self.assertEqual(pool.current_size, 4)
self.assertEqual(pool.state, 'active')
self.assertEqual(pool.allocation_state, 'test')
self.assertEqual(pool.resize_error, '')
api.get_pool.return_value.success = False
api.get_pool.return_value.result = RestCallException(None, "Test", None)
with self.assertRaises(RestCallException):
pool.update()
class TestPoolSpecifier(unittest.TestCase):
"""Unit tests for PoolSpecifier"""
def test_poolspecifier_create(self):
"""Test PoolSpecifier object"""
api = mock.create_autospec(BatchAppsApi)
pool = PoolSpecifier(api)
self.assertEqual(pool.target_size, 0)
self.assertEqual(pool.max_tasks, 1)
self.assertEqual(pool.communication, False)
self.assertEqual(pool.certificates, [])
pool = PoolSpecifier(api, target_size=5, max_tasks=2, communication=True)
self.assertEqual(pool.target_size, 5)
self.assertEqual(pool.max_tasks, 2)
self.assertEqual(pool.communication, True)
self.assertEqual(pool.certificates, [])
def test_poolspecifier_start(self):
"""Test start"""
api = mock.create_autospec(BatchAppsApi)
api.add_pool.return_value.success = True
api.add_pool.return_value.result = {
'poolId':'abc', 'link':{'href':'test.com'}}
pool = PoolSpecifier(api)
new_pool = pool.start()
self.assertEqual(new_pool, {'id':'abc', 'link':'test.com'})
api.add_pool.assert_called_with(0, 1, False, [])
api.add_pool.return_value.success = False
api.add_pool.return_value.result = RestCallException(None, "Test", None)
with self.assertRaises(RestCallException):
pool.start()
def test_poolspecifier_add_cert(self):
api = mock.create_autospec(BatchAppsApi)
pool = PoolSpecifier(api)
pool.add_cert("test_thumb")
self.assertEqual(pool.certificates, [{
'thumbprint':'test_thumb',
'thumbprintAlgorithm':'SHA1',
'storeLocation':'CurrentUser',
'storeName':'My'}])
pool.add_cert("test_thumb", store_location="test", store_name=None)
self.assertEqual(pool.certificates, [{
'thumbprint':'test_thumb',
'thumbprintAlgorithm':'SHA1',
'storeLocation':'CurrentUser',
'storeName':'My'},{
'thumbprint':'test_thumb',
'thumbprintAlgorithm':'SHA1',
'storeLocation':'test',
'storeName':'None'}])
pool.id = None
pool.certificates = [0,1,2,3,4,5,6,7,8,9]
pool.add_cert("new_cert")
self.assertEqual(pool.certificates, [0,1,2,3,4,5,6,7,8,9])
if __name__ == '__main__':
unittest.main()
|
Azure/azure-batch-apps-python
|
batchapps/test/unittest_pool.py
|
Python
|
mit
| 7,440
|
def AND(x1, x2):
w1, w2, theta = 0.5, 0.5, 0.7
tmp = x1 * w1 + x2 * w2
if tmp <= theta:
print(0)
elif tmp > theta:
print(1)
AND(0, 0)
AND(1, 0)
AND(0, 1)
AND(1, 1)
|
yukihirai0505/tutorial-program
|
programming/python/machine-learning/ch02/and_gate01.py
|
Python
|
mit
| 198
|
#!/usr/bin/env python3
def get_plist_text(cf_bundler_identifier, cf_bundle_name=None,
docset_platform_family=None):
"""TODO"""
cf_bundle_name = cf_bundle_name or cf_bundler_identifier.upper()
docset_platform_family = docset_platform_family or cf_bundle_name.upper()
return """
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>CFBundleIdentifier</key>
<string>{cf_bundler_identifier}</string>
<key>CFBundleName</key>
<string>{cf_bundle_name}</string>
<key>DocSetPlatformFamily</key>
<string>{docset_platform_family}</string>
<key>isDashDocset</key>
<true/>
</dict>
</plist>""".format(cf_bundler_identifier=cf_bundler_identifier,
cf_bundle_name=cf_bundle_name,
docset_platform_family=docset_platform_family)
|
cblair/docset_from_html
|
get_plist_text.py
|
Python
|
mit
| 959
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_envcheckr
----------------------------------
Tests for `envcheckr` module.
"""
import pytest
from envcheckr import envcheckr
def test_parse_lines():
lines_a = envcheckr.parse_lines('tests/env')
assert len(lines_a) == 3
lines_b = envcheckr.parse_lines('tests/env.example')
assert len(lines_b) == 7
def test_parse_key():
lines = envcheckr.parse_lines('tests/env')
assert(envcheckr.parse_key(lines[0])) == 'FRUIT'
assert(envcheckr.parse_key(lines[1])) == 'DRINK'
assert(envcheckr.parse_key(lines[2])) == 'ANIMAL'
def test_get_missing_keys():
file_a = 'tests/env'
file_b = 'tests/env.example'
missing_keys = envcheckr.get_missing_keys(file_a, file_b)
assert(len(missing_keys)) == 4
assert(missing_keys[0]) == 'FOOD=Pizza\n'
assert(missing_keys[1]) == 'CODE=Python\n'
assert(missing_keys[2]) == 'SPORT=Football\n'
assert(missing_keys[3]) == 'CITY=Brisbane\n'
|
adamjace/envcheckr
|
tests/test_envcheckr.py
|
Python
|
mit
| 983
|
from django.template import Library
from django.conf import settings
if "django.contrib.sites" in settings.INSTALLED_APPS:
from django.contrib.sites.models import Site
current_domain = lambda: Site.objects.get_current().domain
elif getattr(settings, "SITE_DOMAIN", None):
current_domain = lambda: settings.SITE_DOMAIN
else:
current_domain = lambda: "example.com"
register = Library()
def fully_qualified(url):
# if it's not a string the rest of this fn will bomb
if not isinstance(url, basestring): return ""
if url.startswith('http'):
return url
elif url.startswith("/"):
return 'http://%s%s' % (current_domain(), url)
else:
return 'http://%s' % url
@register.inclusion_tag('social_tags/twitter.html')
def twitter_share(url=None):
url = fully_qualified(url)
return locals()
@register.inclusion_tag('social_tags/facebook.html')
def facebook_share(url=None):
url = fully_qualified(url)
return locals()
@register.inclusion_tag('social_tags/linkedin.html')
def linkedin_share(url=None):
url = fully_qualified(url)
return locals()
@register.inclusion_tag('social_tags/email.html')
def email_share(url=None):
url = fully_qualified(url)
return locals()
@register.inclusion_tag('social_tags/google.html')
def google_plus(url=None):
url = fully_qualified(url)
return locals()
|
Rootbuzz/Django-Socialtags
|
socialtags/templatetags/social_tags.py
|
Python
|
mit
| 1,400
|
from django.db import models
class Salary(models.Model):
id = models.AutoField(primary_key = True)
bh = models.CharField(max_length = 10)
xm = models.CharField(max_length = 12)
status = models.CharField(max_length = 8)
class Meta:
db_table = 'swan_salary'
def __str__(self):
return self.id
|
huaiping/pandora
|
salary/models.py
|
Python
|
mit
| 332
|
#Pizza please
import pyaudiogame
from pyaudiogame import storage
spk = pyaudiogame.speak
MyApp = pyaudiogame.App("Pizza Please")
storage.screen = ["start"]
storage.toppings = ["cheese", "olives", "mushrooms", "Pepperoni", "french fries"]
storage.your_toppings = ["cheese"]
storage.did_run = False
def is_number(number, topping_list):
"""Will check that what the user enters is really a number and not a letter, also that it is within our list"""
if number in "0123456789":
number = int(number)
if number <= len(topping_list)-1:
return number
def say_message(message):
"""Will check if the message has been read and if so, passes. Else, it will read the message"""
if not storage.did_run:
spk(message)
storage.did_run = True
def add_topping(key):
"""Will add a topping to your pizza"""
number = is_number(key, storage.toppings)
if number or number == 0:
storage.your_toppings.append(storage.toppings[number])
spk("You added %s to your pizza. Your pizza currently has %s on top" % (storage.toppings[number], storage.your_toppings))
def remove_topping(key):
"""Removes toppings from the pizza"""
number = is_number(key, storage.your_toppings)
if number or number == 0:
t = storage.your_toppings.pop(number)
if t == "cheese":
spk("You can't remove cheese, what are you, Italian?")
storage.your_toppings.insert(0, "cheese")
else:
spk("You removed %s from your pizza. Now your pizza has %s on top" % (t, storage.your_toppings))
def logic(actions):
"""Press a and d to switch from adding and removing toppings, press 0-9 to deal with the toppings and press space to eat the pizza"""
key = actions['key']
if key == "d":
spk("Press a number to remove a topping from your pizza, press a to add toppings again")
storage.screen[0] = "remove"
storage.did_run = False
elif key == "a":
spk("Press a number to add a topping to your pizza. Press d to remove a topping you don't like")
storage.screen[0] = "add"
storage.did_run = False
elif key == "space":
spk("You sit down to enjoy a yummy pizza. You eat... eat... eat... eat... and are finally done. That was good! Now it's time for another!")
storage.your_toppings = ['cheese']
storage.did_run = False
elif storage.screen[0] == "start":
spk("Welcom to pizza madness! Here you can build your own pizza to eat! Press a to add toppings, press d to remove them and when you are done, press space to eat your yummy pizza!!!")
storage.screen.remove("start")
storage.screen.append("add")
elif storage.screen[0] == "add":
say_message("Please choose a number of toppings to add! Press d to start removing toppings. Toppings are %s" % storage.toppings)
if key:
add_topping(key)
elif storage.screen[0] == "remove" and key:
remove_topping(key)
MyApp.logic = logic
MyApp.run()
|
frastlin/PyAudioGame
|
examples/basic_tutorial/ex6.py
|
Python
|
mit
| 2,789
|
from exterminate.Utilities import builtins
_range = range
def alt_range(start, stop, step=1):
return _range(start-2, stop+2, max(1, int(step/2)))
builtins.range = alt_range
|
adtac/exterminate
|
exterminate/AltRange.py
|
Python
|
mit
| 183
|
from collections import OrderedDict
class DataSet(object):
__slots__ = (
'events', # List of all events in this data set
'group', # Iterable containing groups of events
)
def __init__(self, query, group_function):
self.events = query.all()
if group_function is None:
self.group = self.events
elif callable(group_function):
self.group = OrderedDict()
for event in self.events:
# Add this event to the group-by entries
key = group_function(event)
if key not in self.group:
self.group[key] = []
self.group[key].append(event)
else:
raise ValueError("group_function is not callable")
def __pretty__(self, p, cycle):
p.text('<{0}: '.format(type(self).__name__))
if cycle:
p.text('...')
else:
p.pretty({
'events': self.events,
'group': self.group.keys(),
})
p.text('>')
|
ex-nerd/health-stats
|
health_stats/dataset.py
|
Python
|
mit
| 1,067
|
import logging
from datetime import datetime
from core import app
from sqlalchemy import inspect
from flask.ext.sqlalchemy import SQLAlchemy
db = SQLAlchemy(app)
class Show(db.Model):
show_id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50))
link = db.Column(db.String(255))
country = db.Column(db.String(10))
started = db.Column(db.String(15))
total_seasons = db.Column(db.Integer)
status = db.Column(db.String(32))
classification = db.Column(db.String(20))
episodes_saved_at = db.Column(db.DateTime)
# genres = db.Column(db.String(20))
episodes = db.relationship("Episode", order_by="Episode.episode_id", backref="show")
def __init__(self, show_id, name, link, country, started, total_seasons, status, classification):
self.show_id = show_id
self.name = name
self.link = link
self.country = country
self.started = started
self.total_seasons = total_seasons
self.status = status
self.classification = classification
class Episode(db.Model):
episode_id = db.Column(db.Integer, primary_key=True)
show_id = db.Column(db.Integer, db.ForeignKey('show.show_id'))
episode_number = db.Column(db.Integer)
season_number = db.Column(db.Integer)
season_episode_number = db.Column(db.Integer)
air_date = db.Column(db.Date)
title = db.Column(db.String(255))
link = db.Column(db.String(255))
watched = db.Column(db.String(1))
# show = db.relationship("Show", backref=db.backref("episodes", order_by=episode_id))
__table_args__ = (
db.UniqueConstraint('show_id', 'season_number', 'season_episode_number', name='_show_session_epi_uc'),)
def __int__(self, episode_id, show_id, episode_number, season_number, season_episode_number, air_date, title, link,
watched):
self.episode_id = episode_id
self.show_id = show_id
self.episode_number = episode_number
self.season_number = season_number
self.season_episode_number = season_episode_number
self.air_date = air_date
self.title = title
self.link = link
self.watched = watched
def is_older(self, dt=datetime.now().date()):
return self.air_date < dt
def insert_show(show):
db.session.add(show)
db.session.commit()
def insert_entity(entity):
db.session.add(entity)
db.session.commit()
def show_exists(show_id):
show = Show.query.filter_by(show_id=show_id).first()
return show is not None
def delete_show(show_id):
show = Show.query.filter_by(show_id=show_id).first()
db.session.delete(show)
for e in Episode.query.filter_by(show_id=show_id).all():
db.session.delete(e)
db.session.commit()
def create_tables():
logging.info('Creating tables')
db.create_all()
def drop_tables():
logging.info('Deleting tables')
db.drop_all()
def check_table_exists():
table_names = inspect(db.engine).get_table_names()
return 'episode' in table_names and 'show' in table_names
if __name__ == '__main__':
import sys
app.config['SQLALCHEMY_ECHO'] = True
if sys.argv[1] == 'init':
create_tables()
|
fahadshaon/tv_tracker
|
db.py
|
Python
|
mit
| 3,213
|
import unittest
from .connected_graph import Node
class TestConnectedGraph(unittest.TestCase):
def test_acyclic_graph(self):
"""Example graph from https://upload.wikimedia.org/wikipedia/commons/0/03/Directed_acyclic_graph_2.svg"""
n9 = Node(9)
n10 = Node(10)
n8 = Node(8, [n9])
n3 = Node(3, [n8, n10])
n2 = Node(2)
n11 = Node(11, [n2, n9, n10])
n5 = Node(5, [n11])
self.assertTrue(n3.connected_to(n9))
self.assertTrue(n11.connected_to(n9))
self.assertTrue(n3.connected_to(n9))
self.assertTrue(n11.connected_to(n9))
self.assertTrue(n5.connected_to(n9))
self.assertFalse(n9.connected_to(n5))
self.assertFalse(n9.connected_to(n11))
self.assertFalse(n3.connected_to(n11))
def test_connected_to_self(self):
n1 = Node(1)
self.assertTrue(n1.connected_to(n1))
|
intenthq/code-challenges
|
python/connected_graph/test_connected_graph.py
|
Python
|
mit
| 910
|
import _plotly_utils.basevalidators
class LocationssrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="locationssrc", parent_name="choropleth", **kwargs):
super(LocationssrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
plotly/python-api
|
packages/python/plotly/plotly/validators/choropleth/_locationssrc.py
|
Python
|
mit
| 456
|
# -*- coding: utf-8 -*-
"""
Test for: command line arguments
"""
from nose.tools import eq_, assert_raises
from m2bk import app, config, const
import os
def _get_arg_cfg_file_name(arg, filename):
try:
app.init_parsecmdline([arg, filename])
except FileNotFoundError:
pass
return config.get_config_file_name()
def test_args_config():
# file names
f1 = 'f1.txt'
f2 = 'f2.txt'
f3 = 'f3.txt'
# ---
# Test whether -c works as --config
eq_(_get_arg_cfg_file_name('-c', f1),
_get_arg_cfg_file_name('--config', f1),
msg="-c and --config are not capturing the expected file name")
# ---
# Test -c and --config with more than one value
assert_raises(SystemExit, app.init_parsecmdline, ['-c', f1, f2])
# absolute path is expected for f1
eq_(config.get_config_file_name(), os.path.abspath(f1),
msg="Unexpected file, got '{f}' instead of '{f1}'".format(f=config.get_config_file_name(), f1=os.path.abspath(f1)))
# ---
# test when several config directives are specified
try:
app.init_parsecmdline(['-c', f1, '--config', f2, '-c', f3])
except FileNotFoundError:
pass
# file name should be f3
eq_(config.get_config_file_name(), os.path.abspath(f3),
msg="The last --config/-c argument should be the one whose file name"
"should be captured")
def test_args_noargs():
# Test whether m2bk tries to use default config file
# when no arguments are present
try:
app.init_parsecmdline()
except FileNotFoundError:
pass
eq_(config.get_config_file_name(), config.CONF_DEFAULT_FILE,
msg="CONF_DEFAULT_FILE expected, got '{f}'".format(f=config.get_config_file_name()))
|
axltxl/m2bk
|
tests/test_app.py
|
Python
|
mit
| 1,751
|
#!/usr/bin/python
# openvpn.py: library to handle starting and stopping openvpn instances
import subprocess
import threading
import time
class OpenVPN():
def __init__(self, config_file=None, auth_file=None, timeout=10):
self.started = False
self.stopped = False
self.error = False
self.notifications = ""
self.auth_file = auth_file
self.config_file = config_file
self.thread = threading.Thread(target=self._invoke_openvpn)
self.thread.setDaemon(1)
self.timeout = timeout
def _invoke_openvpn(self):
if self.auth_file is None:
cmd = ['sudo', 'openvpn', '--script-security', '2',
'--config', self.config_file]
else:
cmd = ['sudo', 'openvpn', '--script-security', '2',
'--config', self.config_file,
'--auth-user-pass', self.auth_file]
self.process = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
self.kill_switch = self.process.terminate
self.starting = True
while True:
line = self.process.stdout.readline().strip()
if not line:
break
self.output_callback(line, self.process.terminate)
def output_callback(self, line, kill_switch):
"""Set status of openvpn according to what we process"""
self.notifications += line + "\n"
if "Initialization Sequence Completed" in line:
self.started = True
if "ERROR:" in line:
self.error = True
if "process exiting" in line:
self.stopped = True
def start(self, timeout=None):
"""Start openvpn and block until the connection is opened or there is
an error
"""
if not timeout:
timeout = self.timeout
self.thread.start()
start_time = time.time()
while start_time + timeout > time.time():
self.thread.join(1)
if self.error or self.started:
break
if self.started:
print "openvpn started"
else:
print "openvpn not started"
print self.notifications
def stop(self, timeout=None):
"""Stop openvpn"""
if not timeout:
timeout = self.timeout
self.kill_switch()
self.thread.join(timeout)
if self.stopped:
print "stopped"
else:
print "not stopped"
print self.notifications
|
ben-jones/centinel
|
centinel/vpn/openvpn.py
|
Python
|
mit
| 2,675
|
from cocosCairo.cocosCairo import * # Convenience module to import all other modules
from splash import *
BACKGROUND_COLOR = Color(0.1, 0.3, 0.7)
MAZE_PATHS = ["maze01.maze", "maze02.maze", "maze03.maze"] # an ordered list of the maze files
PATH_INDEX = 0 # the index of the next maze file to load
class MazeScene(Scene):
def __init__(self, modelPath):
Scene.__init__(self)
self._modelPath = modelPath
def setup(self):
self.setBackgroundColor(BACKGROUND_COLOR)
def onEnterFromFinishedTransition(self):
Scene.onEnterFromFinishedTransition(self)
self._mazePathController = MazePathController(self._modelPath)
self.addController(self._mazePathController)
x = self.getSize().width/2
y = self.getSize().height/2
self._mazePathController.getNode().setPosition(Point(x,y))
self._mazePathController.getNode().setOpacity(0.0)
action = EaseSineInOut(FadeIn(1.0))
cbAction = CallbackInstantAction(self._onFadeInCompletion)
sequence = Sequence(action, cbAction)
self._mazePathController.getNode().runAction(sequence)
def _onFadeInCompletion(self):
self._mazePathController.getNode().showPieces()
class MazePathModel(AbstractModel):
def __init__(self, filepath):
AbstractModel.__init__(self)
self._modelArray = []
self._playerLocation = [0,0]
self._goalLocation = [0,0]
self._moveCount = 0
f = open(filepath)
# populate the model array
for line in f:
line = line.strip()
if len(line) < 1 or line[0] is "#" or line[:2] is "//": # if the line is a comment or empty
continue # then move on to the next line
row = line.split(',')
row = [int(x[:1]) for x in row if (len(x) > 0 and x != '\n')] # trim and convert to int
self._modelArray.append(row)
# look for special characters
for i in range(0, len(self._modelArray[0])):
for j in range(0, len(self._modelArray)):
if self._modelArray[j][i] is 2:
self._playerLocation = [i, j]
self._modelArray[j][i] = 1
elif self._modelArray[j][i] is 3:
self._goalLocation = [i, j]
self._modelArray[j][i] = 1
f.close()
self.didChange()
def getModelArray(self):
return self._modelArray
def getPlayerLocation(self):
return self._playerLocation
def getGoalLocation(self):
return self._goalLocation
def getMoveCount(self):
return self._moveCount
def movePlayerLocation(self, direction):
self._moveCount += 1
row = self._playerLocation[1]
col = self._playerLocation[0]
if direction == "left":
if col-1 < 0 or self._modelArray[row][col-1] != 1:
return
else:
self._playerLocation = [col-1, row]
self.didChange()
elif direction == "right":
if col+1 >= len(self._modelArray[0]) or self._modelArray[row][col+1] != 1:
return
else:
self._playerLocation = [col+1, row]
self.didChange()
elif direction == "up":
if row-1 < 0 or self._modelArray[row-1][col] != 1:
return
else:
self._playerLocation = [col, row-1]
self.didChange()
elif direction == "down":
if row+1 >= len(self._modelArray) or self._modelArray[row+1][col] != 1:
return
else:
self._playerLocation = [col, row+1]
self.didChange()
class MazePathNode(Node):
def __init__(self, rect=None):
Node.__init__(self, rect)
self._hasRenderedTiles = False
self._hasFinishedActions = False
self._player = None
self._goal = None
self._tileSize = 50
self.setAnchorPoint(Point(0.5, 0.5))
def setOpacity(self, opacity):
Node.setOpacity(self, opacity)
for child in self.getChildren():
child.setOpacity(opacity)
def onModelChange(self, model):
if not model:
return
# render the tiles
if not self._hasRenderedTiles:
self._hasRenderedTiles = True
modelArray = model.getModelArray()
width = self._tileSize * len(modelArray[0])
height = self._tileSize * len(modelArray)
self.setSize(Size(width, height))
for i in range(0, len(modelArray[0])):
for j in range(0, len(modelArray)):
x = i*self._tileSize
y = j*self._tileSize
w = self._tileSize
h = self._tileSize
rect = MakeRect(x, y, w, h)
if modelArray[j][i] is 0: # 'matrix' lookup is [row,col], but that's equivalent to (y,x) instead of (x,y), so switch the i,j indices
continue
else:
color = WhiteColor()
rectangle = RectangleNode(rect, color)
self.addChild(rectangle, 1)
# set up the player's sprite
x = model.getPlayerLocation()[0] * self._tileSize
y = model.getPlayerLocation()[1] * self._tileSize
if not self._player:
self._player = Sprite("images/character.png", Point(x,y))
self.addChild(self._player,3)
self._player.setScale(0.01)
self._player.setAnchorPoint(Point(0.5,0.5))
size = self._player.getSize().width
self._player.setPosition(pointAdd(self._player.getPosition(), Point(size/2, size/2)))
else:
self._hasFinishedActions = False
action = EaseSineInOut(MoveTo(0.05, Point(x,y)))
cbAction = CallbackInstantAction(self.onPlayerMotionCompletion)
sequence = Sequence(action, cbAction)
self._player.runAction(sequence)
# set up the goal sprite
x = model.getGoalLocation()[0] * self._tileSize
y = model.getGoalLocation()[1] * self._tileSize
if not self._goal:
self._goal = Sprite("images/goal.png", Point(x,y))
self.addChild(self._goal,2)
self._goal.setScale(0.01)
self._goal.setAnchorPoint(Point(0.5,0.5))
size = self._goal.getSize().width
self._goal.setPosition(pointAdd(self._goal.getPosition(), Point(size/2, size/2)))
else:
self._goal.setPosition(Point(x,y))
def showPieces(self):
if self._goal:
action = EaseBounceOut(ScaleTo(0.75, 1.0))
sequence = Sequence(action, CallbackInstantAction(self.onGoalScaleCompletion))
self._goal.runAction(sequence)
def onGoalScaleCompletion(self):
self._goal.setAnchorPoint(PointZero())
size = self._goal.getSize().width
self._goal.setPosition(pointSub(self._goal.getPosition(), Point(size/2, size/2)))
if self._player:
action = EaseBounceOut(ScaleTo(0.75, 1.0))
sequence = Sequence(action, CallbackInstantAction(self.onPlayerScaleCompletion))
self._player.runAction(sequence)
def onPlayerScaleCompletion(self):
self._player.setAnchorPoint(PointZero())
size = self._player.getSize().width
self._player.setPosition(pointSub(self._player.getPosition(), Point(size/2, size/2)))
self._hasFinishedActions = True
def onPlayerMotionCompletion(self):
self._hasFinishedActions = True
def reset(self):
self._hasRenderedTiles = False
self._hasFinishedActions = False
self.removeAllChildren()
self._player = None
self._goal = None
def hasFinishedActions(self):
return self._hasFinishedActions
class MazePathController(AbstractController):
def __init__(self, modelPath):
AbstractController.__init__(self, MazePathNode(RectZero()), MazePathModel(modelPath))
def onKeyPress(self, event):
if not self.getNode().hasFinishedActions():
return
key = event.key
if key == "Left":
self.getModel().movePlayerLocation("left")
elif key == "Right":
self.getModel().movePlayerLocation("right")
elif key == "Up":
self.getModel().movePlayerLocation("up")
elif key == "Down":
self.getModel().movePlayerLocation("down")
if self.getModel().getPlayerLocation() == self.getModel().getGoalLocation():
winScene = WinScene(self.getModel().getMoveCount())
transition = MoveInTopTransition(.5, winScene)
self.getDirector().replaceScene(transition)
return True
class WinScene(Scene, GestureListener):
def __init__(self, moveCount):
Scene.__init__(self)
self._currentCount = 0
self._moveCount = moveCount
def setup(self):
self.setBackgroundColor(WhiteColor())
self._label = PangoLabel()
self.setMarkupText(0)
self._label.setAnchorPoint(Point(0.5, 0.5))
self._label.setAlignment("center")
self._label.setFontSize(48)
self.addChild(self._label)
def onEnter(self):
Scene.onEnter(self)
self.getDirector().getGestureDispatch().addListener(self)
x = self.getSize().width/2
y = self.getSize().height/2
self._label.setPosition(Point(x,y))
def onEnterFromFinishedTransition(self):
Scene.onEnterFromFinishedTransition(self)
self.scheduleCallback(self._updateCount, 0.005)
def onExit(self):
Scene.onExit(self)
self.getDirector().getGestureDispatch().removeListener(self)
def _updateCount(self, dt):
self._currentCount += 1
self.setMarkupText(self._currentCount)
if self._currentCount >= self._moveCount:
self.unscheduleCallback(self._updateCount)
def setMarkupText(self, count):
if count < 10:
countString = "0"+str(count)
else:
countString = str(count)
markupText = '<span foreground="#000000" size="xx-large">You win!</span>' + \
'<span size="xx-small">\n\n</span>' + \
'<span foreground="#003399">You took\n' + \
'<span size="xx-large">' + countString + \
' moves\n</span>to complete the maze!</span>'
self._label.setMarkupText(markupText)
def onKeyPress(self, event):
self._onEvent()
return True
def onMousePress(self, event):
self._onEvent()
return True
def _onEvent(self):
global PATH_INDEX
global MAZE_PATHS
if PATH_INDEX < len(MAZE_PATHS):
path = MAZE_PATHS[PATH_INDEX]
PATH_INDEX += 1
transition = RotoZoomTransition(1.0, MazeScene(path))
self.getDirector().replaceScene(transition)
else:
PATH_INDEX = 0 # for right now, just loop through the mazes
self._onEvent()
if __name__ == "__main__":
director = Director()
director.setWindow()
path = MAZE_PATHS[PATH_INDEX]
PATH_INDEX += 1
transition = MoveInTopTransition(1.0, MazeScene(path))
director.runWithScene(SplashScene(transition))
#director.runWithScene(MazeScene("maze02.txt"))
|
jeremyflores/cocosCairo
|
oldTests/maze.py
|
Python
|
mit
| 9,600
|
#!/usr/bin/python
import re, csv, sys
from urlparse import urlparse
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize, sent_tokenize
from nltk.text import TextCollection
#process command line arguments
if len(sys.argv) < 2:
print "ERROR: arg1: must specify the input file"
print " arg2: specify -t to generate test ARFF"
sys.exit(1)
test = False
if len(sys.argv) > 2:
test = (sys.argv[2] == '-t')
# initialize some variables
stoplist = stopwords.words('english')
stoplist.extend(['.', ',', ':', '?', '!' ';', '"', "'", '-', '--', '(', ')', '/', '\\',
'[', ']', '{', '}', '|', '+', '*', '^'])
emots_pos = [':)', ':D', ':-)', ':-D', '=)', '=D', ':]', ':-]', '=]', 'X)', 'XD', 'X]',
'X-)', 'X-D', 'X-]', 'C:', ';)', ';D', ';]', ';-)', ';-D', ';-]', '<3',
':P', ':-P', '=P', 'XP', 'X-P', ':o)', ':3', ':>', '8)', ':^)', '8-D', '8D',
'=3', 'B^D', '\\o/', '<:', '(:', '(-:', '(=', '[:', '[-:', '[=', '(X', '[X',
'(-X', '[-X', ':\')', ':\'-)', ':\']', ':\'-]', '=\')', '=\']', ';^)',
'>:P', ':-b', ':b']
emots_pos = [emot.lower() for emot in emots_pos]
emots_neg = [':(', ':[', ':-(', ':-[', 'D:', '=(', '=[', 'D=', 'DX', ':C', '</3',
'>:[', ':-c', ':-<', ':<', '>:', ':{', ':\'-(', ':\'(', ':\'[', '=\'(',
'=\'[', 'D;', 'D\':', 'D:<', 'D8', 'D-\':', '):', ']:', ')-:', ']-:',
')=', ']=', ']:<', '>-:']
emots_neg = [emot.lower() for emot in emots_neg]
gaz_pos = []
gaz_neg = []
tweets = []
sentiments = []
emots_count = []
punct_count = []
gaz_count = []
words = [] #will contain all non-stop words that occur >1 times
words1 = [] #will contain all non-stop words that occur 1 time
# generate the gazetteers
gaz_file = open('positive-words.txt', 'r')
for line in gaz_file:
line = line.strip()
if line != '' and line[0] != ';':
gaz_pos.append(line)
gaz_file.close()
gaz_file = open('negative-words.txt', 'r')
for line in gaz_file:
line = line.strip()
if line != '' and line[0] != ';':
gaz_neg.append(line)
gaz_file.close()
# print some information
print 'Number of positive emoticons: ' + str(len(emots_pos))
print 'Number of negative emoticons: ' + str(len(emots_neg))
print '\nNumber of positive gazetteer words: ' + str(len(gaz_pos))
print 'Number of negative gazetteer words: ' + str(len(gaz_neg))
# extract all tweets and words (IN TRAINING)
words_file = []
if not test:
words_file = open('words-list.txt', 'w') # COMMENT OUT FOR TESTING
tweet_file = open(sys.argv[1], 'rb')
reader = csv.reader(tweet_file, delimiter=',', quotechar='"', escapechar='\\', quoting=csv.QUOTE_ALL)
for line in reader:
# save tweet data
tweet = line[4].lower()
sent = line[1]
# REMOVE THIS SECTION FOR TESTING
if not test:
if sent == 'positive':
sent = 'POS'
elif sent == 'negative':
sent = 'NEG'
else:
sent = 'OTHER'
sentiments.append(sent)
# standardize URLs
w = tweet.split()
for i in range(len(w)):
r = urlparse(w[i])
if r[0] != '' and r[1] != '':
w[i] = 'URL'
tweet = ' '.join(w)
tweets.append(tweet)
# count emoticons
count_pos = 0
for emot in emots_pos:
count_pos += tweet.count(emot)
count_neg = 0
for emot in emots_neg:
count_neg += tweet.count(emot)
emots_count.append( (count_pos, count_neg) )
# count punctuation
punct_count.append( (tweet.count('?'), tweet.count('!')) )
# count gazetteer words
count_pos = 0
for gw in gaz_pos:
count_pos += tweet.count(gw)
count_neg = 0
for gw in gaz_neg:
count_neg += tweet.count(gw)
gaz_count.append( (count_pos, count_neg) )
# USE THIS SECTION FOR TRAINING
# extract only words used >1 times, and ignore stopwords
if not test :
tweet_sents = sent_tokenize(tweet)
for sent in tweet_sents:
sw = word_tokenize(sent)
for word in sw:
if word not in stoplist:
if word not in words:
if word in words1:
words.append(word)
words_file.write(word + '\n')
else:
words1.append(word)
tweet_file.close()
if not test:
words_file.close() # COMMENT OUT FOR TESTING
# USE THIS SECTION FOR TESTING
# extract all words (IN TESTING)
if test:
wfile = open('words-list.txt', 'r')
for line in wfile:
words.append(line.strip())
wfile.close()
# print some more information
print '\nNumber of tweets: ' + str(len(tweets))
print 'Number of words occuring >1 time: ' + str(len(words))
print 'Number of words occuring 1 time: ' + str(len(words1))
# create .arff file for Weka
texts = TextCollection(tweets)
arff = open('tweets_sentiment.arff', "w")
wc = 0
# header
arff.write("@relation sentiment_analysis\n\n")
arff.write("@attribute numPosEmots numeric\n")
arff.write("@attribute numNegEmots numeric\n")
arff.write("@attribute numQuest numeric\n")
arff.write("@attribute numExclam numeric\n")
arff.write("@attribute numPosGaz numeric\n")
arff.write("@attribute numNegGaz numeric\n")
for word in words:
arff.write("@attribute word_")
sub_w = re.subn('[^a-zA-Z]', 'X', word)
arff.write(sub_w[0])
if sub_w[1] > 0:
arff.write('_' + str(wc))
wc += 1
arff.write(" numeric\n")
arff.write("@attribute class {POS, NEG, OTHER}\n\n")
arff.write("@data\n")
# data
for i in xrange(len(tweets)):
arff.write(str(emots_count[i][0]) + ',' + str(emots_count[i][1]) + ',')
arff.write(str(punct_count[i][0]) + ',' + str(punct_count[i][1]) + ',')
arff.write(str(gaz_count[i][0]) + ',' + str(gaz_count[i][1]) + ',')
for j in xrange(len(words)): #loop through unigrams
arff.write(str(texts.tf_idf(words[j], tweets[i])) + ',')
arff.write(sentiments[i] + '\n')
arff.close()
print '\nFinished pre-processing! The ARFF file for Weka has been created.'
|
satybald/twitter-modeling-lda
|
source code/preprocess.py
|
Python
|
mit
| 5,802
|
# """SearchIndex classes for Django-haystack."""
from typing import List
from django.utils.html import format_html, mark_safe
from haystack import indexes
from projects.models import Project, Nomination, Claim
class ProjectIndex(indexes.SearchIndex, indexes.Indexable):
"""Django-haystack index of Project model."""
name = indexes.CharField(model_attr='name', indexed=True, stored=True)
text = indexes.CharField(document=True, use_template=True, stored=False)
slug = indexes.CharField(model_attr='slug', indexed=True, stored=True)
title = indexes.CharField(model_attr='title', indexed=True, stored=True)
description = indexes.CharField(model_attr='description', indexed=True, stored=True)
administrators = indexes.MultiValueField(indexed=True, null=True, stored=True)
nomination_policy = indexes.CharField(model_attr='nomination_policy', indexed=True, stored=True)
# nominator_orgs
nominators = indexes.MultiValueField(indexed=True, null=True, stored=True)
# nominator_blacklist
status = indexes.CharField(model_attr='status', indexed=True, stored=True)
impact_factor = indexes.IntegerField(model_attr='impact_factor', indexed=True, stored=True)
tags = indexes.MultiValueField(indexed=True, null=True, stored=True)
subject_headings = indexes.MultiValueField(indexed=True, null=True, stored=True)
# notes
unclaimed_nominations = indexes.IntegerField(model_attr='n_unclaimed', indexed=True, stored=True)
claimed_nominations = indexes.IntegerField(model_attr='n_claimed', indexed=True, stored=True)
held_nominations = indexes.IntegerField(model_attr='n_held', indexed=True, stored=True)
def get_model(self):
return Project
def index_queryset(self, using=None):
return self.get_model().objects.exclude(status='Deleted')
def prepare_administrators(self, obj: Project) -> List[str]:
return [user.get_absolute_url() for user in obj.administrators.all()]
def prepare_nominators(self, obj: Project) -> List[str]:
return [user.get_absolute_url for user in obj.nominators.all()]
def prepare_tags(self, obj: Project) -> List[str]:
return [tag.name for tag in obj.tags.all()]
def prepare_subject_headings(self, obj: Project) -> List[str]:
return [subj.name for subj in obj.subject_headings.all()]
class NominationIndex(indexes.SearchIndex, indexes.Indexable):
name = indexes.CharField(model_attr='name', indexed=True, stored=True)
text = indexes.CharField(document=True, use_template=False)
project_pk = indexes.IntegerField(model_attr='project__pk', indexed=True, stored=True)
project_slug = indexes.CharField(model_attr='project__slug', indexed=True, stored=True)
url = indexes.CharField(model_attr='resource__url')
status = indexes.CharField(model_attr='status', indexed=True, stored=True)
# needs_claim = indexes.BooleanField(model_attr='needs_claim', indexed=True, stored=True)
# nominated_by = indexes.MultiValueField(model_attr='nominated_by', indexed=True, stored=True)
# rationale = indexes.(model_attr='rationale', indexed=True, stored=True)
# suggested_crawl_frequency = indexes.(model_attr='suggested_crawl_frequency', indexed=True, stored=True)
# suggested_crawl_end_date = indexes.(model_attr='suggested_crawl_end_date', indexed=True, stored=True)
# notes = indexes.(model_attr='notes', indexed=True, stored=True)
# impact_factor = indexes.IntegerField(model_attr='impact_factor', indexed=True, stored=True)
def get_model(self):
return Nomination
def index_queryset(self, using=None):
return self.get_model().objects.all()
class ClaimIndex(indexes.ModelSearchIndex, indexes.Indexable):
class Meta:
model = Claim
name = indexes.CharField(model_attr='name', indexed=True, stored=True)
text = indexes.CharField(document=True, use_template=False)
nomination_pk = indexes.IntegerField(model_attr='nomination_id',
indexed=True, stored=True)
def index_queryset(self, using=None):
return self.get_model().objects.all()
|
CobwebOrg/cobweb-django
|
projects/search_indexes.py
|
Python
|
mit
| 4,143
|
# PRNG (Pseudo-Random Number Generator) Test
# PRNG info:
# http://en.wikipedia.org/wiki/Pseudorandom_number_generator
# FB - 201012046
# Compares output distribution of any given PRNG
# w/ an hypothetical True-Random Number Generator (TRNG)
import math
import time
global x
x = time.clock() # seed for the PRNG
# PRNG to test
def prng():
global x
x = math.fmod((x + math.pi) ** 2.0, 1.0)
return x
# combination by recursive method
def c(n, k):
if k == 0: return 1
if n == 0: return 0
return c(n - 1, k - 1) + c(n - 1, k)
### combination by multiplicative method
##def c_(n, k):
## mul = 1.0
## for i in range(k):
## mul = mul * (n - k + i + 1) / (i + 1)
## return mul
# MAIN
n = 20 # number of bits in each trial
print 'Test in progress...'
print
cnk = [] # array to hold bit counts
for k in range(n + 1):
cnk.append(0)
# generate 2**n n-bit pseudo-random numbers
for j in range(2 ** n):
# generate n-bit pseudo-random number and count the 0's in it
# num = ''
ctr = 0
for i in range(n):
b = int(round(prng())) # generate 1 pseudo-random bit
# num += str(b)
if b == 0: ctr += 1
# print num
# increase bit count in the array
cnk[ctr] += 1
print 'Number of bits in each pseudo-random number (n) =', n
print
print 'Comparison of "0" count distributions:'
print
print ' k', ' c(n,k)', ' actual', '%dif'
difSum = 0
for k in range(n + 1):
cnk_ = c(n, k)
dif = abs(cnk_ - cnk[k])
print '%2d %10d %10d %4d' % (k, cnk_, cnk[k], 100 * dif / cnk_)
difSum += dif
print
print 'Difference percentage between the distributions:'
print 100 * difSum / (2 ** n)
|
ActiveState/code
|
recipes/Python/577484_PRNG_Test/recipe-577484.py
|
Python
|
mit
| 1,669
|
from datetime import date
from . import GenericCalendarTest
from ..america import (
Argentina, Barbados, Chile, Colombia, Mexico, Panama, Paraguay
)
class ArgentinaTest(GenericCalendarTest):
cal_class = Argentina
def test_holidays_2018(self):
holidays = self.cal.holidays_set(2018)
# 1. Año Nuevo
self.assertIn(date(2018, 1, 1), holidays)
# 2. Carnaval
self.assertIn(date(2018, 2, 12), holidays)
# 3. Carnaval
self.assertIn(date(2018, 2, 13), holidays)
# 4. Día de la Memoria
self.assertIn(date(2018, 3, 24), holidays)
# 5. Día del Veterano y de los Caídos en la Guerra de Malvinas
self.assertIn(date(2018, 4, 2), holidays)
# 6. Viernes Santo
self.assertIn(date(2018, 3, 30), holidays)
# 7. Día del Trabajador
self.assertIn(date(2018, 5, 1), holidays)
# 8. Día de la Revolución de Mayo
self.assertIn(date(2018, 5, 25), holidays)
# 9. Día Paso a la Inmortalidad del General Manuel Belgrano
self.assertIn(date(2018, 6, 20), holidays)
# 10. Día de la Independencia
self.assertIn(date(2018, 7, 9), holidays)
# 11. Inmaculada Concepción de María
self.assertIn(date(2018, 12, 8), holidays)
# 12. Navidad
self.assertIn(date(2018, 12, 25), holidays)
# variable days
# 13. Día Paso a la Inmortalidad del General Martín Miguel de Güemes
self.assertIn(date(2018, 6, 17), holidays)
# 14. Paso a la Inmortalidad del General José de San Martín
self.assertIn(date(2018, 8, 20), holidays)
# 15. Día del Respeto a la Diversidad Cultural
self.assertIn(date(2018, 10, 15), holidays)
# 16. Día de la Soberanía Nacional
self.assertIn(date(2018, 11, 19), holidays)
def test_holidays_2019(self):
holidays = self.cal.holidays_set(2019)
self.assertIn(date(2019, 1, 1), holidays)
self.assertIn(date(2019, 3, 4), holidays)
self.assertIn(date(2019, 3, 5), holidays)
self.assertIn(date(2019, 3, 24), holidays)
self.assertIn(date(2019, 4, 2), holidays)
self.assertIn(date(2019, 4, 19), holidays)
self.assertIn(date(2019, 5, 1), holidays)
self.assertIn(date(2019, 5, 25), holidays)
self.assertIn(date(2019, 6, 20), holidays)
self.assertIn(date(2019, 7, 9), holidays)
self.assertIn(date(2019, 12, 8), holidays)
self.assertIn(date(2019, 12, 25), holidays)
# variable days
self.assertIn(date(2019, 6, 17), holidays)
self.assertIn(date(2019, 8, 19), holidays)
self.assertIn(date(2019, 10, 14), holidays)
self.assertIn(date(2019, 11, 18), holidays)
def test_holidays_2020(self):
holidays = self.cal.holidays_set(2020)
self.assertIn(date(2020, 1, 1), holidays)
self.assertIn(date(2020, 2, 24), holidays)
self.assertIn(date(2020, 2, 25), holidays)
self.assertIn(date(2020, 3, 24), holidays)
# Special case: Argentina has shifted this holiday due to
# Coronavirus lockdown in 2020.
self.assertNotIn(date(2020, 4, 2), holidays)
self.assertIn(date(2020, 3, 31), holidays)
# Back to normal, I hope...
self.assertIn(date(2020, 4, 10), holidays)
self.assertIn(date(2020, 5, 1), holidays)
self.assertIn(date(2020, 5, 25), holidays)
self.assertIn(date(2020, 6, 20), holidays)
self.assertIn(date(2020, 7, 9), holidays)
self.assertIn(date(2020, 12, 8), holidays)
self.assertIn(date(2020, 12, 25), holidays)
# variable days
self.assertIn(date(2020, 6, 15), holidays)
self.assertIn(date(2020, 8, 17), holidays)
self.assertIn(date(2020, 10, 12), holidays)
self.assertIn(date(2020, 11, 23), holidays)
def test_holidays_2021(self):
# Testing it because June 17th happens on THU (general_guemes_day).
holidays = self.cal.holidays_set(2021)
# Not happening on June 17
self.assertNotIn(date(2021, 6, 17), holidays)
# Happens on the 1st MON after this date.
self.assertIn(date(2021, 6, 20), holidays)
# Also, Día del Respeto a la Diversidad Cultural is shifted
self.assertNotIn(date(2021, 10, 12), holidays)
# The day before
self.assertIn(date(2021, 10, 11), holidays)
def test_dia_malvinas_label(self):
_, label = self.cal.get_malvinas_day(2020)
self.assertEqual(
label,
"Día del Veterano y de los Caídos en la Guerra de Malvinas"
)
def test_dia_memoria_label(self):
holidays = self.cal.holidays(2020)
holidays = dict(holidays)
label_memoria = holidays[date(2020, 3, 24)]
self.assertEqual(
label_memoria,
"Día Nacional de la Memoria por la Verdad y la Justicia"
)
def test_carnival_label(self):
holidays = self.cal.holidays(2020)
holidays = dict(holidays)
label_carnival = holidays[date(2020, 2, 25)]
self.assertEqual(label_carnival, "Carnival")
def test_labour_day_label(self):
holidays = self.cal.holidays(2020)
holidays = dict(holidays)
label = holidays[date(2020, 5, 1)]
self.assertEqual(label, "Día del Trabajador")
def test_immaculate_conception_label(self):
holidays = self.cal.holidays(2020)
holidays = dict(holidays)
label = holidays[date(2020, 12, 8)]
self.assertEqual(label, "Día de la Inmaculada Concepción de María")
class ChileTest(GenericCalendarTest):
cal_class = Chile
def test_holidays_2013(self):
holidays = self.cal.holidays_set(2013)
self.assertIn(date(2013, 1, 1), holidays)
self.assertIn(date(2013, 3, 29), holidays)
self.assertIn(date(2013, 3, 30), holidays)
self.assertIn(date(2013, 5, 1), holidays)
self.assertIn(date(2013, 5, 21), holidays)
self.assertIn(date(2013, 6, 29), holidays)
self.assertIn(date(2013, 7, 16), holidays)
self.assertIn(date(2013, 8, 15), holidays)
self.assertIn(date(2013, 9, 18), holidays)
self.assertIn(date(2013, 9, 19), holidays)
self.assertIn(date(2013, 9, 20), holidays)
self.assertIn(date(2013, 10, 12), holidays)
self.assertIn(date(2013, 10, 31), holidays)
self.assertIn(date(2013, 11, 1), holidays)
self.assertIn(date(2013, 12, 8), holidays)
self.assertIn(date(2013, 12, 25), holidays)
self.assertIn(date(2013, 12, 31), holidays)
def test_reformation_day(self):
holidays = self.cal.holidays_set(2012)
self.assertNotIn(date(2012, 10, 31), holidays)
self.assertIn(date(2012, 11, 2), holidays)
#
holidays = self.cal.holidays_set(2017)
self.assertNotIn(date(2017, 10, 31), holidays)
self.assertIn(date(2017, 10, 27), holidays)
class ColombiaTest(GenericCalendarTest):
cal_class = Colombia
def test_holidays_2015(self):
holidays = self.cal.holidays_set(2015)
self.assertIn(date(2015, 1, 1), holidays) # New year
self.assertIn(date(2015, 1, 12), holidays) # Epiphany (shifted)
self.assertIn(date(2015, 3, 23), holidays) # Saint Joseph
self.assertIn(date(2015, 3, 29), holidays) # Palm Sunday
self.assertIn(date(2015, 4, 2), holidays) # Holy Thursday
self.assertIn(date(2015, 4, 3), holidays) # Good Friday
self.assertIn(date(2015, 4, 5), holidays) # Easter (SUN)
self.assertIn(date(2015, 5, 1), holidays) # Labour Day
self.assertIn(date(2015, 5, 18), holidays) # Ascension (shifted)
self.assertIn(date(2015, 6, 8), holidays) # Corpus Christi
self.assertIn(date(2015, 6, 15), holidays) # Sacred Heart
self.assertIn(date(2015, 6, 29), holidays) # St Peter & St Paul
self.assertIn(date(2015, 7, 20), holidays) # Independance Day
self.assertIn(date(2015, 8, 7), holidays) # Boyacá battle
self.assertIn(date(2015, 8, 17), holidays) # Assumption (shifted)
self.assertIn(date(2015, 10, 12), holidays) # Day of the Races
self.assertIn(date(2015, 11, 2), holidays) # All Saints (shifted)
self.assertIn(date(2015, 11, 16), holidays) # Cartagena independence
self.assertIn(date(2015, 12, 8), holidays) # Immaculate Conception
self.assertIn(date(2015, 12, 25), holidays) # XMas
self.assertEqual(len(holidays), 20)
def test_holidays_2020(self):
holidays = self.cal.holidays_set(2020)
self.assertIn(date(2020, 1, 1), holidays) # New year
self.assertIn(date(2020, 1, 6), holidays) # Epiphany
self.assertIn(date(2020, 3, 23), holidays) # Saint Joseph
self.assertIn(date(2020, 4, 5), holidays) # Palm Sunday
self.assertIn(date(2020, 4, 9), holidays) # Holy Thursday
self.assertIn(date(2020, 4, 10), holidays) # Good Friday
self.assertIn(date(2020, 4, 12), holidays) # Easter (SUN)
self.assertIn(date(2020, 5, 1), holidays) # Labour Day
self.assertIn(date(2020, 5, 25), holidays) # Ascension (shifted)
self.assertIn(date(2020, 6, 15), holidays) # Corpus Christi
self.assertIn(date(2020, 6, 22), holidays) # Sacred Heart
self.assertIn(date(2020, 6, 29), holidays) # St Peter & St Paul
self.assertIn(date(2020, 7, 20), holidays) # Independance Day
self.assertIn(date(2020, 8, 7), holidays) # Boyacá battle
self.assertIn(date(2020, 8, 17), holidays) # Assumption (shifted)
self.assertIn(date(2020, 10, 12), holidays) # Day of the Races
self.assertIn(date(2020, 11, 2), holidays) # All Saints (shifted)
self.assertIn(date(2020, 11, 16), holidays) # Cartagena independence
self.assertIn(date(2020, 12, 8), holidays) # Immaculate Conception
self.assertIn(date(2020, 12, 25), holidays) # XMas
self.assertEqual(len(holidays), 20)
def test_epiphany_monday(self):
# In 2020, Epiphany falls on MON
epiphany_2020 = self.cal.get_epiphany(2020)
self.assertEqual(epiphany_2020, date(2020, 1, 6))
# In 2021, it does not, so it's shifted to the next MON
epiphany_2021 = self.cal.get_epiphany(2021)
self.assertEqual(epiphany_2021, date(2021, 1, 11))
def test_saint_peter_and_saint_paul_monday(self):
# In 2020, Saint Peter and Saint Paul falls on MON
st_peter_paul_2020 = self.cal.get_saint_peter_and_saint_paul(2020)
self.assertEqual(st_peter_paul_2020, date(2020, 6, 29))
# In 2021, it does not, so it's shifted to the next MON
st_peter_paul_2021 = self.cal.get_saint_peter_and_saint_paul(2021)
self.assertEqual(st_peter_paul_2021, date(2021, 7, 5))
def test_assumption_monday(self):
# In 2021, Assumption falls on SUN, so it's shifted to MON
assumption_2021 = self.cal.get_assumption(2021)
self.assertEqual(assumption_2021, date(2021, 8, 16))
# In 2022, Assumption falls on MON
assumption_2022 = self.cal.get_assumption(2022)
self.assertEqual(assumption_2022, date(2022, 8, 15))
def test_day_of_the_races_monday(self):
# In 2020, Day of the races and hispanity falls on MON
day_races_2020 = self.cal.get_day_of_the_races(2020)
self.assertEqual(day_races_2020, date(2020, 10, 12))
# In 2021, It does not, so it's shifted to the next MON
day_races_2021 = self.cal.get_day_of_the_races(2021)
self.assertEqual(day_races_2021, date(2021, 10, 18))
def test_all_saints_monday(self):
# In 2021, The All Saints falls on MON
all_saints_2021 = self.cal.get_all_saints(2021)
self.assertEqual(all_saints_2021, date(2021, 11, 1))
# In 2022, It does not, so it's shifted to the next MON
all_saints_2022 = self.cal.get_all_saints(2022)
self.assertEqual(all_saints_2022, date(2022, 11, 7))
def test_cartagena_independence_monday(self):
# In 2019, The Cartagena Independance falls on MON
cartagena_2019 = self.cal.get_cartagena_independence(2019)
self.assertEqual(cartagena_2019, date(2019, 11, 11))
# In 2020, It does not, so it's shifted to the next MON
cartagena_2020 = self.cal.get_cartagena_independence(2020)
self.assertEqual(cartagena_2020, date(2020, 11, 16))
class MexicoTest(GenericCalendarTest):
cal_class = Mexico
def test_holidays_2013(self):
holidays = self.cal.holidays_set(2013)
self.assertIn(date(2013, 1, 1), holidays)
self.assertIn(date(2013, 2, 4), holidays) # Constitution day
self.assertIn(date(2013, 3, 18), holidays) # Benito Juárez's birthday
self.assertIn(date(2013, 5, 1), holidays) # Labour day
self.assertIn(date(2013, 9, 16), holidays) # Independence day
self.assertIn(date(2013, 11, 18), holidays) # Revolution day
self.assertIn(date(2013, 12, 25), holidays) # XMas
def test_shift_to_monday(self):
observed = set(map(self.cal.get_observed_date, self.cal.holidays_set(2017)))
# New year on Sunday -> shift
assert date(2017, 1, 2) in observed
observed = set(map(self.cal.get_observed_date, self.cal.holidays_set(2016)))
# XMas on sunday -> shift to monday
assert date(2016, 12, 26) in observed
# Same for Labour day
assert date(2016, 5, 2) in observed
def test_shift_to_friday(self):
holidays = self.cal.holidays_set(2021) | self.cal.holidays_set(2022)
observed = set(map(self.cal.get_observed_date, holidays))
# January 1st 2022 is a saturday, so we shift to friday
assert date(2021, 12, 31) in observed
# Same for Labour day
assert date(2021, 4, 30) in observed
holidays = self.cal.holidays_set(2021)
observed = set(map(self.cal.get_observed_date, holidays))
# December 25th, 2022 is a saturday, so we shift to friday
assert date(2021, 12, 24) in observed
class PanamaTest(GenericCalendarTest):
cal_class = Panama
def test_holidays_2013(self):
holidays = self.cal.holidays_set(2013)
self.assertIn(date(2013, 1, 1), holidays)
self.assertIn(date(2013, 1, 9), holidays) # Martyrs day
self.assertIn(date(2013, 2, 12), holidays) # carnival tuesday
self.assertIn(date(2013, 3, 29), holidays) # good friday
self.assertIn(date(2013, 3, 30), holidays) # easter saturday
self.assertIn(date(2013, 3, 31), holidays) # easter sunday
self.assertIn(date(2013, 5, 1), holidays) # labour day
self.assertIn(date(2013, 11, 3), holidays) # independence day
self.assertIn(date(2013, 11, 5), holidays) # colon day
# Shout in Villa de los Santos
self.assertIn(date(2013, 11, 10), holidays)
self.assertIn(date(2013, 11, 28), holidays) # Independence from spain
self.assertIn(date(2013, 12, 8), holidays) # mother day
self.assertIn(date(2013, 12, 25), holidays) # XMas
class ParaguayTest(GenericCalendarTest):
cal_class = Paraguay
def test_holidays_2019(self):
holidays = self.cal.holidays_set(2019)
self.assertIn(date(2019, 1, 1), holidays)
self.assertIn(date(2019, 3, 1), holidays) # Heroes day
self.assertIn(date(2019, 4, 18), holidays) # Maundy thursday
self.assertIn(date(2019, 4, 19), holidays) # Good friday
self.assertIn(date(2019, 5, 1), holidays) # Labour day
self.assertIn(date(2019, 5, 14), holidays) # Independance day
self.assertIn(date(2019, 6, 12), holidays) # Chaco Armistice Day
self.assertIn(date(2019, 8, 15), holidays) # Founding of Asunción
self.assertIn(date(2019, 9, 29), holidays) # Boqueron Battle Victory
self.assertIn(date(2019, 12, 8), holidays) # Virgin of Caacupe
self.assertIn(date(2019, 12, 25), holidays) # XMas
def test_holidays_2017(self):
holidays = self.cal.holidays_set(2017)
# In 2017, Heroes day has been moved to February 27th
self.assertNotIn(date(2017, 3, 1), holidays)
self.assertIn(date(2017, 2, 27), holidays)
# Fundation of Asunción day: moved to August 14 for 2017
self.assertNotIn(date(2017, 8, 15), holidays)
self.assertIn(date(2017, 8, 14), holidays)
# Boqueron Battle Victory Day: moved to October 2nd for 2017
self.assertNotIn(date(2017, 9, 29), holidays)
self.assertIn(date(2017, 10, 2), holidays)
def test_immaculate_conception_label(self):
holidays = self.cal.holidays(2020)
holidays = dict(holidays)
label = holidays[date(2020, 12, 8)]
self.assertEqual(label, "Virgin of Caacupé Day")
class BarbadosTest(GenericCalendarTest):
cal_class = Barbados
def test_holidays_2009(self):
holidays = self.cal.holidays_set(2009)
self.assertIn(date(2009, 1, 1), holidays)
self.assertIn(date(2009, 1, 21), holidays) # Errol Barrow Day
self.assertIn(date(2009, 4, 10), holidays) # Good Friday
self.assertIn(date(2009, 4, 12), holidays) # Easter Sunday
self.assertIn(date(2009, 4, 13), holidays) # Easter Monday
self.assertIn(date(2009, 4, 28), holidays) # National Heroes Day
self.assertIn(date(2009, 5, 1), holidays) # Labour Day
self.assertIn(date(2009, 6, 1), holidays) # Whit Monday
self.assertIn(date(2009, 8, 1), holidays) # Emancipation Day
self.assertIn(date(2009, 8, 3), holidays) # Kabooment Day
self.assertIn(date(2009, 11, 30), holidays) # Independant Day
self.assertIn(date(2009, 12, 25), holidays) # Christmas Day
self.assertIn(date(2009, 12, 26), holidays) # Boxing Day
def test_holidays_2018(self):
holidays = self.cal.holidays_set(2018)
self.assertIn(date(2018, 1, 1), holidays)
self.assertIn(date(2018, 1, 21), holidays) # Errol Barrow Day
self.assertIn(date(2018, 1, 22), holidays) # Errol Barrow Day (shift)
self.assertIn(date(2018, 3, 30), holidays) # Good Friday
self.assertIn(date(2018, 4, 1), holidays) # Easter Sunday
self.assertIn(date(2018, 4, 2), holidays) # Easter Monday
self.assertIn(date(2018, 4, 28), holidays) # National Heroes Day
self.assertIn(date(2018, 5, 1), holidays) # Labour Day
self.assertIn(date(2018, 5, 21), holidays) # Whit Monday
self.assertIn(date(2018, 8, 1), holidays) # Emancipation Day
self.assertIn(date(2018, 8, 6), holidays) # Kabooment Day
self.assertIn(date(2018, 11, 30), holidays) # Independant Day
self.assertIn(date(2018, 12, 25), holidays) # Christmas Day
self.assertIn(date(2018, 12, 26), holidays) # Boxing Day
def test_holidays_2019(self):
holidays = self.cal.holidays_set(2019)
self.assertIn(date(2019, 1, 1), holidays)
self.assertIn(date(2019, 1, 21), holidays) # Errol Barrow Day
self.assertIn(date(2019, 4, 19), holidays) # Good Friday
self.assertIn(date(2019, 4, 21), holidays) # Easter Sunday
self.assertIn(date(2019, 4, 22), holidays) # Easter Monday
# National Heroes Day & shift
self.assertIn(date(2019, 4, 28), holidays)
self.assertIn(date(2019, 4, 29), holidays) # shft'd
self.assertIn(date(2019, 5, 1), holidays) # Labour Day
self.assertIn(date(2019, 6, 10), holidays) # Whit Monday
self.assertIn(date(2019, 8, 1), holidays) # Emancipation Day
self.assertIn(date(2019, 8, 5), holidays) # Kabooment Day
self.assertIn(date(2019, 11, 30), holidays) # Independant Day
self.assertIn(date(2019, 12, 25), holidays) # Christmas Day
self.assertIn(date(2019, 12, 26), holidays) # Boxing Day
|
jaraco/calendra
|
calendra/tests/test_america.py
|
Python
|
mit
| 20,047
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('deals', '0002_advertiser_logo'),
]
operations = [
migrations.RemoveField(
model_name='advertiser',
name='logo',
),
]
|
andela/troupon
|
troupon/deals/migrations/0003_remove_advertiser_logo.py
|
Python
|
mit
| 349
|
from networkx import DiGraph
from networkx.readwrite import json_graph
import cantera as ct
import numpy as np
import json
#from src.core.def_tools import *
import os
__author__ = 'Xiang Gao'
""" ----------------------------------------------
construction of the element flux graph
-----------------------------------------------"""
def build_flux_graph(soln, raw, traced_element, path_save=None, overwrite=False, i0=0, i1='eq', constV=False):
"""
:param mechanism: type = dict, keys include "species", "reaction", "element", etc
:param raw: type = dict, keys include "mole_fraction", "net_reaction_rate", etc
:param traced_element: type = str
:param i0: type = int, specifying the starting point of the considered interval of the raw data
:param i1: type = int or str, specifying the ending point of the considered interval of the raw data
:return flux graph: type = networkx object, will be also saved as a .json file,
"""
element = soln.element_names
species = soln.species
reaction = soln.reaction
n_rxn = soln.n_reactions
""" --------------------------------
check if results already exist, if so, load
-------------------------------- """
if path_save is not None:
if overwrite is False:
try:
data = json.load(open(path_save, 'r'))
flux_graph = json_graph.node_link_graph(data)
return flux_graph
except IOError:
pass
""" --------------------------------
if not, then compute, and save
-------------------------------- """
# ---------------------------------------------
# check if traced_element is legal
if traced_element not in element:
raise('traced element ' + traced_element + ' is not listed in mechanism')
# ---------------------------------------------
# find the reaction rate during the considered interval
# unit will be converted to mole/sec
rr = np.reshape(raw['net_reaction_rate'][i0,:],[n_rxn,1])
flux_graph = DiGraph()
# -------------------------------------
# adding edge from reactions
# one edge may contribute from multiple reactions, the list of the contributors will be stored in edge['member']
# note though in .cti id_rxn starts from 1, in soln.reaction, id_rxn starts from 0
for id_rxn in range(n_rxn):
# sp_mu is a dict, where key is species, val is net stoichiometric coefficient
sp_mu = reaction(id_rxn).products
for sp in reaction(id_rxn).reactants.keys():
mu = reaction(id_rxn).reactants[sp]
if sp in sp_mu.keys():
sp_mu[sp] -= mu
else:
sp_mu[sp] = -mu
# -----------------------
# produced is a dict, where key is sp, val is number of traced atoms
# being transferred when this sp is produced
produced = {}
consumed = {}
for sp in sp_mu.keys():
atoms = species(sp).composition
if traced_element in atoms.keys():
n = int(sp_mu[sp] * atoms[traced_element] * np.sign(rr[id_rxn]))
if n > 0:
produced[sp] = abs(n)
elif n < 0:
consumed[sp] = abs(n)
# -----------------------
# consider this reaction only when traced element is transferred
# note "if bool(consumed)" works the same way
if bool(produced):
n_sum = sum(produced.values())
for target in produced.keys():
for source in consumed.keys():
n_i2j = 1.0 * produced[target] * consumed[source] / n_sum
# note that the direction (source-->target) is already assured
# therefore we use abs(RR) here
dw = float(n_i2j * abs(rr[id_rxn]))
try:
flux_graph[source][target]['flux'] += dw
except KeyError:
# if this edge doesn't exist, create it
flux_graph.add_edge(source, target)
flux_graph[source][target]['flux'] = dw
flux_graph[source][target]['member'] = {}
flux_graph[source][target]['member'][str(id_rxn)] = dw
flux_graph[source][target]['1/flux'] = 1.0 / flux_graph[source][target]['flux']
# -------------------------------------
# save the graph using json, which is fast, and human-readable
data = json_graph.node_link_data(flux_graph)
json.dump(data, open(path_save, 'w'))
#print 'graph saved as',path_save
return flux_graph
|
golsun/GPS
|
src/core/def_build_graph.py
|
Python
|
mit
| 4,118
|
import argparse
import datetime
import pathlib
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from mushroom_rl.algorithms.value import AveragedDQN, CategoricalDQN, DQN,\
DoubleDQN, MaxminDQN, DuelingDQN, NoisyDQN, Rainbow
from mushroom_rl.approximators.parametric import TorchApproximator
from mushroom_rl.core import Core, Logger
from mushroom_rl.environments import *
from mushroom_rl.policy import EpsGreedy
from mushroom_rl.utils.dataset import compute_metrics
from mushroom_rl.utils.parameters import LinearParameter, Parameter
from mushroom_rl.utils.replay_memory import PrioritizedReplayMemory
"""
This script runs Atari experiments with DQN, and some of its variants, as
presented in:
"Human-Level Control Through Deep Reinforcement Learning". Mnih V. et al.. 2015.
"""
class Network(nn.Module):
n_features = 512
def __init__(self, input_shape, output_shape, **kwargs):
super().__init__()
n_input = input_shape[0]
n_output = output_shape[0]
self._h1 = nn.Conv2d(n_input, 32, kernel_size=8, stride=4)
self._h2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
self._h3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
self._h4 = nn.Linear(3136, self.n_features)
self._h5 = nn.Linear(self.n_features, n_output)
nn.init.xavier_uniform_(self._h1.weight,
gain=nn.init.calculate_gain('relu'))
nn.init.xavier_uniform_(self._h2.weight,
gain=nn.init.calculate_gain('relu'))
nn.init.xavier_uniform_(self._h3.weight,
gain=nn.init.calculate_gain('relu'))
nn.init.xavier_uniform_(self._h4.weight,
gain=nn.init.calculate_gain('relu'))
nn.init.xavier_uniform_(self._h5.weight,
gain=nn.init.calculate_gain('linear'))
def forward(self, state, action=None):
h = F.relu(self._h1(state.float() / 255.))
h = F.relu(self._h2(h))
h = F.relu(self._h3(h))
h = F.relu(self._h4(h.view(-1, 3136)))
q = self._h5(h)
if action is None:
return q
else:
q_acted = torch.squeeze(q.gather(1, action.long()))
return q_acted
class FeatureNetwork(nn.Module):
def __init__(self, input_shape, output_shape, **kwargs):
super().__init__()
n_input = input_shape[0]
self._h1 = nn.Conv2d(n_input, 32, kernel_size=8, stride=4)
self._h2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
self._h3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
self._h4 = nn.Linear(3136, Network.n_features)
nn.init.xavier_uniform_(self._h1.weight,
gain=nn.init.calculate_gain('relu'))
nn.init.xavier_uniform_(self._h2.weight,
gain=nn.init.calculate_gain('relu'))
nn.init.xavier_uniform_(self._h3.weight,
gain=nn.init.calculate_gain('relu'))
nn.init.xavier_uniform_(self._h4.weight,
gain=nn.init.calculate_gain('relu'))
def forward(self, state, action=None):
h = F.relu(self._h1(state.float() / 255.))
h = F.relu(self._h2(h))
h = F.relu(self._h3(h))
h = F.relu(self._h4(h.view(-1, 3136)))
return h
def print_epoch(epoch, logger):
logger.info('################################################################')
logger.info('Epoch: %d' % epoch)
logger.info('----------------------------------------------------------------')
def get_stats(dataset, logger):
score = compute_metrics(dataset)
logger.info(('min_reward: %f, max_reward: %f, mean_reward: %f,'
' games_completed: %d' % score))
return score
def experiment():
np.random.seed()
# Argument parser
parser = argparse.ArgumentParser()
arg_game = parser.add_argument_group('Game')
arg_game.add_argument("--name",
type=str,
default='BreakoutDeterministic-v4',
help='Gym ID of the Atari game.')
arg_game.add_argument("--screen-width", type=int, default=84,
help='Width of the game screen.')
arg_game.add_argument("--screen-height", type=int, default=84,
help='Height of the game screen.')
arg_mem = parser.add_argument_group('Replay Memory')
arg_mem.add_argument("--initial-replay-size", type=int, default=50000,
help='Initial size of the replay memory.')
arg_mem.add_argument("--max-replay-size", type=int, default=500000,
help='Max size of the replay memory.')
arg_mem.add_argument("--prioritized", action='store_true',
help='Whether to use prioritized memory or not.')
arg_net = parser.add_argument_group('Deep Q-Network')
arg_net.add_argument("--optimizer",
choices=['adadelta',
'adam',
'rmsprop',
'rmspropcentered'],
default='adam',
help='Name of the optimizer to use.')
arg_net.add_argument("--learning-rate", type=float, default=.0001,
help='Learning rate value of the optimizer.')
arg_net.add_argument("--decay", type=float, default=.95,
help='Discount factor for the history coming from the'
'gradient momentum in rmspropcentered and'
'rmsprop')
arg_net.add_argument("--epsilon", type=float, default=1e-8,
help='Epsilon term used in rmspropcentered and'
'rmsprop')
arg_alg = parser.add_argument_group('Algorithm')
arg_alg.add_argument("--algorithm", choices=['dqn', 'ddqn', 'adqn', 'mmdqn',
'cdqn', 'dueldqn', 'ndqn', 'rainbow'],
default='dqn',
help='Name of the algorithm. dqn is for standard'
'DQN, ddqn is for Double DQN and adqn is for'
'Averaged DQN.')
arg_alg.add_argument("--n-approximators", type=int, default=1,
help="Number of approximators used in the ensemble for"
"AveragedDQN or MaxminDQN.")
arg_alg.add_argument("--batch-size", type=int, default=32,
help='Batch size for each fit of the network.')
arg_alg.add_argument("--history-length", type=int, default=4,
help='Number of frames composing a state.')
arg_alg.add_argument("--target-update-frequency", type=int, default=10000,
help='Number of collected samples before each update'
'of the target network.')
arg_alg.add_argument("--evaluation-frequency", type=int, default=250000,
help='Number of collected samples before each'
'evaluation. An epoch ends after this number of'
'steps')
arg_alg.add_argument("--train-frequency", type=int, default=4,
help='Number of collected samples before each fit of'
'the neural network.')
arg_alg.add_argument("--max-steps", type=int, default=50000000,
help='Total number of collected samples.')
arg_alg.add_argument("--final-exploration-frame", type=int, default=1000000,
help='Number of collected samples until the exploration'
'rate stops decreasing.')
arg_alg.add_argument("--initial-exploration-rate", type=float, default=1.,
help='Initial value of the exploration rate.')
arg_alg.add_argument("--final-exploration-rate", type=float, default=.1,
help='Final value of the exploration rate. When it'
'reaches this values, it stays constant.')
arg_alg.add_argument("--test-exploration-rate", type=float, default=.05,
help='Exploration rate used during evaluation.')
arg_alg.add_argument("--test-samples", type=int, default=125000,
help='Number of collected samples for each'
'evaluation.')
arg_alg.add_argument("--max-no-op-actions", type=int, default=30,
help='Maximum number of no-op actions performed at the'
'beginning of the episodes.')
arg_alg.add_argument("--alpha-coeff", type=float, default=.6,
help='Prioritization exponent for prioritized experience replay.')
arg_alg.add_argument("--n-atoms", type=int, default=51,
help='Number of atoms for Categorical DQN.')
arg_alg.add_argument("--v-min", type=int, default=-10,
help='Minimum action-value for Categorical DQN.')
arg_alg.add_argument("--v-max", type=int, default=10,
help='Maximum action-value for Categorical DQN.')
arg_alg.add_argument("--n-steps-return", type=int, default=3,
help='Number of steps for n-step return for Rainbow.')
arg_alg.add_argument("--sigma-coeff", type=float, default=.5,
help='Sigma0 coefficient for noise initialization in'
'NoisyDQN and Rainbow.')
arg_utils = parser.add_argument_group('Utils')
arg_utils.add_argument('--use-cuda', action='store_true',
help='Flag specifying whether to use the GPU.')
arg_utils.add_argument('--save', action='store_true',
help='Flag specifying whether to save the model.')
arg_utils.add_argument('--load-path', type=str,
help='Path of the model to be loaded.')
arg_utils.add_argument('--render', action='store_true',
help='Flag specifying whether to render the game.')
arg_utils.add_argument('--quiet', action='store_true',
help='Flag specifying whether to hide the progress'
'bar.')
arg_utils.add_argument('--debug', action='store_true',
help='Flag specifying whether the script has to be'
'run in debug mode.')
args = parser.parse_args()
scores = list()
optimizer = dict()
if args.optimizer == 'adam':
optimizer['class'] = optim.Adam
optimizer['params'] = dict(lr=args.learning_rate,
eps=args.epsilon)
elif args.optimizer == 'adadelta':
optimizer['class'] = optim.Adadelta
optimizer['params'] = dict(lr=args.learning_rate,
eps=args.epsilon)
elif args.optimizer == 'rmsprop':
optimizer['class'] = optim.RMSprop
optimizer['params'] = dict(lr=args.learning_rate,
alpha=args.decay,
eps=args.epsilon)
elif args.optimizer == 'rmspropcentered':
optimizer['class'] = optim.RMSprop
optimizer['params'] = dict(lr=args.learning_rate,
alpha=args.decay,
eps=args.epsilon,
centered=True)
else:
raise ValueError
# Summary folder
folder_name = './logs/atari_' + args.algorithm + '_' + args.name +\
'_' + datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
pathlib.Path(folder_name).mkdir(parents=True)
# Settings
if args.debug:
initial_replay_size = 50
max_replay_size = 500
train_frequency = 5
target_update_frequency = 10
test_samples = 20
evaluation_frequency = 50
max_steps = 1000
else:
initial_replay_size = args.initial_replay_size
max_replay_size = args.max_replay_size
train_frequency = args.train_frequency
target_update_frequency = args.target_update_frequency
test_samples = args.test_samples
evaluation_frequency = args.evaluation_frequency
max_steps = args.max_steps
# MDP
mdp = Atari(args.name, args.screen_width, args.screen_height,
ends_at_life=True, history_length=args.history_length,
max_no_op_actions=args.max_no_op_actions)
if args.load_path:
logger = Logger(DQN.__name__, results_dir=None)
logger.strong_line()
logger.info('Experiment Algorithm: ' + DQN.__name__)
# Agent
agent = DQN.load(args.load_path)
epsilon_test = Parameter(value=args.test_exploration_rate)
agent.policy.set_epsilon(epsilon_test)
# Algorithm
core_test = Core(agent, mdp)
# Evaluate model
dataset = core_test.evaluate(n_steps=args.test_samples,
render=args.render,
quiet=args.quiet)
get_stats(dataset, logger)
else:
# Policy
epsilon = LinearParameter(value=args.initial_exploration_rate,
threshold_value=args.final_exploration_rate,
n=args.final_exploration_frame)
epsilon_test = Parameter(value=args.test_exploration_rate)
epsilon_random = Parameter(value=1)
pi = EpsGreedy(epsilon=epsilon_random)
# Approximator
approximator_params = dict(
network=Network if args.algorithm not in ['dueldqn', 'cdqn', 'ndqn', 'rainbow'] else FeatureNetwork,
input_shape=mdp.info.observation_space.shape,
output_shape=(mdp.info.action_space.n,),
n_actions=mdp.info.action_space.n,
n_features=Network.n_features,
optimizer=optimizer,
use_cuda=args.use_cuda
)
if args.algorithm not in ['cdqn', 'rainbow']:
approximator_params['loss'] = F.smooth_l1_loss
approximator = TorchApproximator
if args.prioritized:
replay_memory = PrioritizedReplayMemory(
initial_replay_size, max_replay_size, alpha=args.alpha_coeff,
beta=LinearParameter(.4, threshold_value=1,
n=max_steps // train_frequency)
)
else:
replay_memory = None
# Agent
algorithm_params = dict(
batch_size=args.batch_size,
target_update_frequency=target_update_frequency // train_frequency,
replay_memory=replay_memory,
initial_replay_size=initial_replay_size,
max_replay_size=max_replay_size
)
if args.algorithm == 'dqn':
alg = DQN
agent = alg(mdp.info, pi, approximator,
approximator_params=approximator_params,
**algorithm_params)
elif args.algorithm == 'ddqn':
alg = DoubleDQN
agent = alg(mdp.info, pi, approximator,
approximator_params=approximator_params,
**algorithm_params)
elif args.algorithm == 'adqn':
alg = AveragedDQN
agent = alg(mdp.info, pi, approximator,
approximator_params=approximator_params,
n_approximators=args.n_approximators,
**algorithm_params)
elif args.algorithm == 'mmdqn':
alg = MaxminDQN
agent = alg(mdp.info, pi, approximator,
approximator_params=approximator_params,
n_approximators=args.n_approximators,
**algorithm_params)
elif args.algorithm == 'dueldqn':
alg = DuelingDQN
agent = alg(mdp.info, pi, approximator_params=approximator_params,
**algorithm_params)
elif args.algorithm == 'cdqn':
alg = CategoricalDQN
agent = alg(mdp.info, pi, approximator_params=approximator_params,
n_atoms=args.n_atoms, v_min=args.v_min,
v_max=args.v_max, **algorithm_params)
elif args.algorithm == 'ndqn':
alg = NoisyDQN
agent = alg(mdp.info, pi, approximator_params=approximator_params,
sigma_coeff=args.sigma_coeff, **algorithm_params)
elif args.algorithm == 'rainbow':
alg = Rainbow
beta = LinearParameter(.4, threshold_value=1, n=max_steps // train_frequency)
agent = alg(mdp.info, pi, approximator_params=approximator_params,
n_atoms=args.n_atoms, v_min=args.v_min,
v_max=args.v_max, n_steps_return=args.n_steps_return,
alpha_coeff=args.alpha_coeff, beta=beta,
sigma_coeff=args.sigma_coeff, **algorithm_params)
logger = Logger(alg.__name__, results_dir=None)
logger.strong_line()
logger.info('Experiment Algorithm: ' + alg.__name__)
# Algorithm
core = Core(agent, mdp)
# RUN
# Fill replay memory with random dataset
print_epoch(0, logger)
core.learn(n_steps=initial_replay_size,
n_steps_per_fit=initial_replay_size, quiet=args.quiet)
if args.save:
agent.save(folder_name + '/agent_0.msh')
# Evaluate initial policy
pi.set_epsilon(epsilon_test)
mdp.set_episode_end(False)
dataset = core.evaluate(n_steps=test_samples, render=args.render,
quiet=args.quiet)
scores.append(get_stats(dataset, logger))
np.save(folder_name + '/scores.npy', scores)
for n_epoch in range(1, max_steps // evaluation_frequency + 1):
print_epoch(n_epoch, logger)
logger.info('- Learning:')
# learning step
pi.set_epsilon(epsilon)
mdp.set_episode_end(True)
core.learn(n_steps=evaluation_frequency,
n_steps_per_fit=train_frequency, quiet=args.quiet)
if args.save:
agent.save(folder_name + '/agent_' + str(n_epoch) + '.msh')
logger.info('- Evaluation:')
# evaluation step
pi.set_epsilon(epsilon_test)
mdp.set_episode_end(False)
dataset = core.evaluate(n_steps=test_samples, render=args.render,
quiet=args.quiet)
scores.append(get_stats(dataset, logger))
np.save(folder_name + '/scores.npy', scores)
return scores
if __name__ == '__main__':
experiment()
|
carloderamo/mushroom
|
examples/atari_dqn.py
|
Python
|
mit
| 19,062
|
"""
WSGI config for django-example project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from django.core.wsgi import get_wsgi_application
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
gen1us2k/django-example
|
config/wsgi.py
|
Python
|
mit
| 1,453
|
"""
@package ssw_wrap
@brief Simple python wrapper for SSW align library
To use the dynamic library libssw.so you may need to modify the LD_LIBRARY_PATH environment
variable to include the library directory (export LD_LIBRARY_PATH=$PWD) or for definitive
inclusion of the lib edit /etc/ld.so.conf and add the path or the directory containing the
library and update the cache by using /sbin/ldconfig as root
@copyright [The MIT licence](http://opensource.org/licenses/MIT)
@author Clement & Adrien Leger - 2014
"""
#~~~~~~~GLOBAL IMPORTS~~~~~~~#
# Standard library packages
from ctypes import *
import os
def _get_libssw_path():
base = os.path.dirname(__file__)
matches = [x for x in os.listdir(base) if (x.startswith("libssw") & x.endswith(".so"))]
if len(matches) < 1:
raise Exception("Couldn't find libssw.so in this directory: '{}'".format(base))
return os.path.join(base, matches[0])
libssw = cdll.LoadLibrary(_get_libssw_path())#os.path.join(os.path.dirname(__file__), 'libssw.so'))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
class CAlignRes(Structure):
"""
@class SSWAlignRes
@brief ctypes Structure with s_align struct mapping returned by SSWAligner.Align func
Correspond to the structure of the query profile
"""
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#~~~~~~~Ctype Structure~~~~~~~#
_fields_ = [('score', c_uint16),
('score2', c_uint16),
('ref_begin', c_int32),
('ref_end', c_int32),
('query_begin', c_int32),
('query_end', c_int32),
('ref_end2', c_int32),
('cigar', POINTER(c_uint32)),
('cigarLen', c_int32)]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
class Aligner(object):
"""
@class SSWAligner
@brief Wrapper for SSW align library
"""
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#~~~~~~~CLASS VARIABLES~~~~~~~#
# Dictionnary to map Nucleotide to int as expected by the SSW C library
base_to_int = { 'A':0, 'C':1, 'G':2, 'T':3, 'N':4, 'a':0, 'c':1, 'g':2, 't':3, 'n':4}
int_to_base = { 0:'A', 1:'C', 2:'G', 3:'T', 4:'N'}
# Load the ssw library using ctypes
# libssw = cdll.LoadLibrary('libssw.so')
#libssw = cdll.LoadLibrary(_get_libssw_path())#os.path.join(os.path.dirname(__file__), 'libssw.so'))
# Init and setup the functions pointer to map the one specified in the SSW lib
# ssw_init method
ssw_init = libssw.ssw_init
ssw_init.restype = c_void_p
ssw_init.argtypes = [POINTER(c_int8), c_int32, POINTER(c_int8), c_int32, c_int8]
# init_destroy function
init_destroy = libssw.init_destroy
init_destroy.restype = None
init_destroy.argtypes = [c_void_p]
# ssw_align function
ssw_align = libssw.ssw_align
ssw_align.restype = POINTER(CAlignRes)
ssw_align.argtypes = [c_void_p, POINTER(c_int8), c_int32, c_uint8, c_uint8, c_uint8, c_uint16, c_int32, c_int32]
# align_destroy function
align_destroy = libssw.align_destroy
align_destroy.restype = None
align_destroy.argtypes = [POINTER(CAlignRes)]
#~~~~~~~FONDAMENTAL METHODS~~~~~~~#
def __repr__(self):
msg = self.__str__()
msg += "SCORE PARAMETERS:\n"
msg += " Gap Weight Open: {} Extension: {}\n".format(-self.gap_open, -self.gap_extend)
msg += " Align Weight Match: {} Mismatch: {}\n\n".format(self.match, -self.mismatch)
msg += " Match/mismatch Score matrix\n"
msg += " \tA\tC\tG\tT\tN\n"
msg += " A\t{}\t{}\t{}\t{}\t{}\n".format(self.match, -self.mismatch, -self.mismatch, -self.mismatch, 0)
msg += " C\t{}\t{}\t{}\t{}\t{}\n".format(-self.mismatch, self.match, -self.mismatch, -self.mismatch, 0)
msg += " G\t{}\t{}\t{}\t{}\t{}\n".format(-self.mismatch, -self.mismatch, self.match, -self.mismatch, 0)
msg += " T\t{}\t{}\t{}\t{}\t{}\n".format(-self.mismatch, -self.mismatch, -self.mismatch, self.match, 0)
msg += " N\t{}\t{}\t{}\t{}\t{}\n\n".format(0,0,0,0,0)
msg += "RESULT PARAMETERS:\n"
msg += " Report cigar {}\n".format(self.report_cigar)
msg += " Report secondary match {}\n\n".format(self.report_secondary)
msg += "REFERENCE SEQUENCE :\n"
if self.ref_len <= 50:
msg += "".join([self.int_to_base[i] for i in self.ref_seq])+"\n"
else:
msg += "".join([self.int_to_base[self.ref_seq[i]] for i in range(50)])+"...\n"
msg += " Lenght :{} nucleotides\n".format(self.ref_len)
return msg
def __str__(self):
return "\n<Instance of {} from {} >\n".format(self.__class__.__name__, self.__module__)
def __init__(self,
ref_seq="",
match=2,
mismatch=2,
gap_open=3,
gap_extend=1,
report_secondary=False,
report_cigar=False):
"""
Initialize object by creating an interface with ssw library fonctions
A reference sequence is also assigned to the object for multiple alignment against queries
with the align function
@param ref_seq Reference sequence as a python string (case insensitive)
@param match Weight for a match
@param mismatch Absolute value of mismatch penalty
@param gap_open Absolute value of gap open penalty
@param gap_extend Absolute value of gap extend penalty
@param report_secondary Report the 2nd best alignement if true
@param report_cigar Report cigar string if true
"""
# Store overall alignment parameters
self.report_secondary = report_secondary
self.report_cigar = report_cigar
# Set gap penalties
self.set_gap(gap_open, gap_extend)
# Set the cost matrix
self.set_mat(match, mismatch)
# Set the reference sequence
self.set_ref(ref_seq)
#~~~~~~~SETTERS METHODS~~~~~~~#
def set_gap(self, gap_open=3, gap_extend=1):
"""
Store gapopen and gap extension penalties
"""
self.gap_open = gap_open
self.gap_extend = gap_extend
def set_mat(self, match=2, mismatch=2):
"""
Store match and mismatch scores then initialize a Cost matrix and fill it with match and
mismatch values. Ambiguous base: no penalty
"""
self.match = match
self.mismatch = mismatch
mat_decl = c_int8 * 25
self.mat = mat_decl(match, -mismatch, -mismatch, -mismatch, 0,
-mismatch, match, -mismatch, -mismatch, 0,
-mismatch, -mismatch, match, -mismatch, 0,
-mismatch, -mismatch, -mismatch, match, 0,
0, 0, 0, 0, 0)
def set_ref(self, ref_seq):
"""
Determine the size of the ref sequence and cast it in a c type integer matrix
"""
if ref_seq:
self.ref_len = len(ref_seq)
self.ref_seq = self._DNA_to_int_mat (ref_seq, self.ref_len)
else:
self.ref_len = 0
self.ref_seq = ""
#~~~~~~~PUBLIC METHODS~~~~~~~#
def align(self, query_seq, min_score=0, min_len=0):
"""
Perform the alignment of query against the object reference sequence
@param query_seq Query sequence as a python string (case insensitive)
@param min_score Minimal score of match. None will be return in case of filtering out
@param min_len Minimal length of match. None will be return in case of filtering out
@return A SSWAlignRes Object containing informations about the alignment.
"""
# Determine the size of the ref sequence and cast it in a c type integer matrix
query_len = len(query_seq)
query_seq = self._DNA_to_int_mat (query_seq, query_len)
# Create the query profile using the query sequence
profile = self.ssw_init(query_seq, # Query seq in c type integers
c_int32(query_len), # Length of Queryseq in bites
self.mat, # Score matrix
5, # Square root of the number of elements in mat
2) # flag = no estimation of the best alignment score
# Setup the mask_len parameters = distance between the optimal and suboptimal alignment
# if < 15, the function will NOT return the suboptimal alignment information
if query_len > 30:
mask_len = query_len//2
else:
mask_len = 15
c_result = self.ssw_align (profile, # Query profile
self.ref_seq, # Ref seq in c type integers
c_int32(self.ref_len), # Length of Refseq in bites
self.gap_open, # Absolute value of gap open penalty
self.gap_extend, # absolute value of gap extend penalty
1, # Bitwise FLAG for output values = return all
0, # Score filter = return all
0, # Distance filter = return all
mask_len) # Distance between the optimal and suboptimal alignment
# Transform the Cstructure into a python object if score and lenght match the requirements
score = c_result.contents.score
match_len = c_result.contents.query_end - c_result.contents.query_begin + 1
if score >= min_score and match_len >= min_len:
py_result = PyAlignRes(c_result, query_len, self.report_secondary, self.report_cigar)
else:
py_result = None
# Free reserved space by ssw.init and ssw_init methods.
self._init_destroy(profile)
self._align_destroy(c_result)
# Return the object
return py_result
#~~~~~~~PRIVATE METHODS~~~~~~~#
def _DNA_to_int_mat (self, seq, len_seq):
"""
Cast a python DNA string into a Ctype int8 matrix
"""
# Declare the matrix
query_num_decl = c_int8 * len_seq
query_num = query_num_decl()
# for each letters in ATCGN transform in integers thanks to self.base_to_int
for i in range(len_seq):
try:
value = self.base_to_int[seq[i]]
# if the base is not in the canonic DNA bases assign 4 as for N
except KeyError:
value = 4
finally:
query_num[i] = value
return query_num
def _init_destroy(self, profile):
"""
Free the space alocated for the matrix used by init
"""
self.init_destroy(profile)
def _align_destroy(self, align):
"""
Free the space alocated for the matrix used by align
"""
self.align_destroy(align)
# Load the ssw library using ctypes
#glibssw = cdll.LoadLibrary(os.path.join(os.path.dirname(__file__), 'libssw.so'))
# libssw = cdll.LoadLibrary('libssw.so')
# Init and setup the functions pointer to map the one specified in the SSW lib
# cigar_int_to_len function
cigar_int_to_len = libssw.cigar_int_to_len
cigar_int_to_len.restype = c_int32
cigar_int_to_len.argtypes = [c_int32]
# cigar_int_to_op function
cigar_int_to_op = libssw.cigar_int_to_op
cigar_int_to_op.restype = c_char
cigar_int_to_op.argtypes = [c_int32]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
class PyAlignRes(object):
"""
@class PyAlignRes
@brief Extract and verify result from a CAlignRes structure. A comprehensive python
object is created according to user requirements (+- cigar string and secondary alignment)
"""
def __repr__(self):
msg = self.__str__()
msg += "OPTIMAL MATCH\n"
msg += "Score {}\n".format(self.score)
msg += "Reference begin {}\n".format(self.ref_begin)
msg += "Reference end {}\n".format(self.ref_end)
msg += "Query begin {}\n".format(self.query_begin)
msg += "Query end {}\n".format(self.query_end)
if self.cigar_string:
msg += "Cigar_string {}\n".format(self.cigar_string)
if self.score2:
msg += "SUB-OPTIMAL MATCH\n"
msg += "Score 2 {}\n".format(self.score2)
msg += "Ref_end2 {}\n".format(self.ref_end2)
return msg
def __str__(self):
return "\n<Instance of {} from {} >\n".format(self.__class__.__name__, self.__module__)
def __init__ (self, Res, query_len, report_secondary=False, report_cigar=False):
"""
Parse CAlignRes structure and copy its values in object variables
@param Res A CAlignRes structure
@param query_len length of the query sequence
@param report_secondary Report the 2nd best alignement if true
@param report_cigar Report cigar string if true
"""
# Parse value in the C type structure pointer
# Minimal mandatory parameters
self.score = Res.contents.score
self.ref_begin = Res.contents.ref_begin
self.ref_end = Res.contents.ref_end
self.query_begin = Res.contents.query_begin
self.query_end = Res.contents.query_end
# Information for sub-optimal match if require and available
score2 = Res.contents.score2
if report_secondary and score2 != 0:
self.score2 = score2
self.ref_end2 = Res.contents.ref_end2
else:
self.score2 = None
self.ref_end2 = None
# Cigar Information if CIGAR string if require and available
cigar_len = Res.contents.cigarLen
if report_cigar and cigar_len > 0:
self.cigar_string = self._cigar_string (Res.contents.cigar, cigar_len, query_len)
else:
self.cigar_string = None
def _cigar_string(self, cigar, cigar_len, query_len):
"""
Convert cigar and cigarLen into an human readable Cigar string as in SAM files
"""
# Empty string for iterative writing of the cigar string
cigar_string = []
# If the query match do not start at its first base
# = introduce a softclip at the begining
if self.query_begin > 0:
op_len = self.query_begin
op_char = "S"
cigar_string.append('{}{}'.format(op_len, op_char))
# Iterate over the cigar (pointer to a vector of int)
for i in range(cigar_len):
op_len = cigar_int_to_len(cigar[i])
op_char = cigar_int_to_op(cigar[i]).decode("utf-8")
cigar_string.append('{}{}'.format(op_len, op_char))
# If the length of bases aligned is shorter than the overall query length
# = introduce a softclip at the end
end_len = query_len - self.query_end - 1
if end_len != 0:
op_len = end_len
op_char = "S"
cigar_string.append('{}{}'.format(op_len, op_char))
return "".join(cigar_string)
|
svviz/svviz
|
src/ssw/ssw_wrap.py
|
Python
|
mit
| 15,367
|
import os
import re
import string
from itertools import chain
from .detector_morse import Detector
from .detector_morse import slurp
# from .penn_treebank_tokenizer import word_tokenize
import nlup
from pug.nlp.constant import DATA_PATH
from pug.nlp.util import generate_files
# regex namespace only conflicts with regex kwarg in Tokenizer constructur
from pug.nlp.regex import CRE_TOKEN, RE_NONWORD
def list_ngrams(token_list, n=1, join=' '):
"""Return a list of n-tuples, one for each possible sequence of n items in the token_list
Arguments:
join (bool or str): if str, then join ngrom tuples on it before returning
True is equivalent to join=' '
default = True
See: http://stackoverflow.com/a/30609050/623735
>>> list_ngrams('goodbye cruel world'.split(), join=False)
[('goodbye',), ('cruel',), ('world',)]
>>> list_ngrams('goodbye cruel world'.split(), 2, join=False)
[('goodbye', 'cruel'), ('cruel', 'world')]
"""
join = ' ' if join is True else join
if isinstance(join, basestring):
return [join.join(ng) for ng in list_ngrams(token_list, n=n, join=False)]
return zip(*[token_list[i:] for i in range(n)])
def list_ngram_range(token_list, *args, **kwargs):
"""Return a list of n-tuples, one for each possible sequence of n items in the token_list
Arguments:
join (bool or str): if str, then join ngrom tuples on it before returning
True is equivalent to join=' '
default = True
>>> list_ngram_range('goodbye cruel world'.split(), 0, 2, join=False)
[('goodbye',), ('cruel',), ('world',), ('goodbye', 'cruel'), ('cruel', 'world')]
>>> list_ngram_range('goodbye cruel world'.split(), 2, join=False)
[('goodbye',), ('cruel',), ('world',), ('goodbye', 'cruel'), ('cruel', 'world')]
>>> list_ngram_range('goodbye cruel world'.split(), 0, 2, join='|')
['goodbye', 'cruel', 'world', 'goodbye|cruel', 'cruel|world']
>>> list_ngram_range('goodbye cruel world'.split(), 0, 2, join=True)
['goodbye', 'cruel', 'world', 'goodbye cruel', 'cruel world']
"""
m, n = (args if len(args) > 1 else ((0, args[0]) if args else (0, 1)))
join = args[2] if len(args) > 2 else kwargs.pop('join', True)
return list(chain(*(list_ngrams(token_list, i + 1, join=join) for i in range(0, n))))
def generate_sentences(text='', train_path=None, case_sensitive=True, epochs=20, classifier=nlup.BinaryAveragedPerceptron, **kwargs):
"""Generate sentences from a sequence of characters (text)
Thin wrapper for Kyle Gorman's "DetectorMorse" module
Arguments:
case_sensitive (int): whether to consider case to make decisions about sentence boundaries
epochs (int): number of epochs (iterations for classifier training)
"""
if train_path:
generate_sentences.detector = Detector(slurp(train_path), epochs=epochs, nocase=not case_sensitive)
# generate_sentences.detector = SentenceDetector(text=text, nocase=not case_sensitive, epochs=epochs, classifier=classifier)
return iter(generate_sentences.detector.segments(text))
generate_sentences.detector = nlup.decorators.IO(Detector.load)(os.path.join(DATA_PATH, 'wsj_detector_morse_model.json.gz'))
def str_strip(s, strip_chars=string.punctuation + ' \t\n\r'):
return s.strip(strip_chars)
def str_lower(s):
return s.lower()
def to_ascii(s, filler='-'):
if not s:
return ''
if not isinstance(s, basestring): # e.g. np.nan
return to_ascii(repr(s))
try:
return s.encode('utf8')
except:
return ''.join(c if c < chr(128) else filler for c in s if c)
stringify = to_ascii
def passthrough(s):
return s
class Tokenizer(object):
"""Callable and iterable class that yields substrings split on spaces or other configurable delimitters.
For both __init__ and __call__, doc is the first arg.
TODO: All args and functionality of __init__() and __call__() should be the same.
FIXME: Implement the `nltk.tokenize.TokenizerI` interface
Is it at all pythonic to make a class callable and iterable?
Is it pythonic to have to instantiate a TokenizerI instance and then call that instance's `tokenize` method?
>>> abc = (chr(ord('a') + (i % 26)) for i in xrange(1000))
>>> tokenize = Tokenizer(ngrams=5)
>>> ans = list(tokenize(' '.join(abc)))
>>> ans[:7]
['a', 'b', 'c', 'd', 'e', 'f', 'g']
>>> ans[1000:1005]
['a b', 'b c', 'c d', 'd e', 'e f']
>>> ans[1999:2004]
['a b c', 'b c d', 'c d e', 'd e f', 'e f g']
>>> tokenize = Tokenizer(stem='Porter')
>>> doc = "Here're some stemmable words provided to you for your stemming pleasure."
>>> sorted(set(tokenize(doc)) - set(Tokenizer(doc, stem='Lancaster')))
[u"Here'r", u'pleasur', u'some', u'stemmabl', u'your']
>>> sorted(set(Tokenizer(doc, stem='WordNet')) - set(Tokenizer(doc, stem='Lancaster')))
["Here're", 'pleasure', 'provided', 'some', 'stemmable', 'stemming', 'your']
"""
def __init__(self, doc=None, regex=CRE_TOKEN, strip=True, nonwords=False, nonwords_set=None, nonwords_regex=RE_NONWORD,
lower=None, stem=None, ngrams=1):
# specific set of characters to strip
self.strip_chars = None
if isinstance(strip, basestring):
self.strip_chars = strip
# strip_chars takes care of the stripping config, so no need for strip function anymore
self.strip = None
elif strip is True:
self.strip_chars = '-_*`()"' + '"'
strip = strip or None
# strip whitespace, overrides strip() method
self.strip = strip if callable(strip) else (str_strip if strip else None)
self.doc = to_ascii(doc)
self.regex = regex
if isinstance(self.regex, basestring):
self.regex = re.compile(self.regex)
self.nonwords = nonwords # whether to use the default REGEX for nonwords
self.nonwords_set = nonwords_set or set()
self.nonwords_regex = nonwords_regex
self.lower = lower if callable(lower) else (str_lower if lower else None)
self.stemmer_name, self.stem = 'passthrough', passthrough # stem can be a callable Stemmer instance or just a function
self.ngrams = ngrams or 1 # ngram degree, numger of ngrams per token
if isinstance(self.nonwords_regex, basestring):
self.nonwords_regex = re.compile(self.nonwords_regex)
elif self.nonwords:
try:
self.nonwords_set = set(self.nonwords)
except TypeError:
self.nonwords_set = set(['None', 'none', 'and', 'but'])
# if a set of nonwords has been provided dont use the internal nonwords REGEX?
self.nonwords = not bool(self.nonwords)
def __call__(self, doc):
"""Lazily tokenize a new document (tokens aren't generated until the class instance is iterated)
>>> list(Tokenizer()('new string to parse'))
['new', 'string', 'to', 'parse']
"""
# tokenization doesn't happen until you try to iterate through the Tokenizer instance or class
self.doc = to_ascii(doc)
# need to return self so that this will work: Tokenizer()('doc (str) to parse even though default doc is None')
return self
# to conform to this part of the nltk.tokenize.TokenizerI interface
tokenize = __call__
def __reduce__(self):
"""Unpickling constructor and args so that pickling can be done efficiently without any bound methods, etc"""
return (Tokenizer, (None, self.regex, self.strip, self.nonwords, self.nonwords_set, self.nonwords_regex,
self.lower, self.stemmer_name, self.ngrams))
def span_tokenize(self, s):
"""Identify the tokens using integer offsets `(start_i, end_i)` rather than copying them to a new sequence
The sequence of tokens (strings) can be generated with
`s[start_i:end_i] for start_i, end_i in span_tokenize(s)`
Returns:
generator of 2-tuples of ints, like ((int, int) for token in s)
"""
return
# raise NotImplementedError("span_tokenizer interface not yet implemented, so just suck it up and use RAM to tokenize() ;)")
def tokenize_sents(self, strings):
"""NTLK.
Apply ``self.tokenize()`` to each element of ``strings``. I.e.:
return [self.tokenize(s) for s in strings]
:rtype: list(list(str))
"""
return [self.tokenize(s) for s in strings]
def span_tokenize_sents(self, strings):
"""
Apply ``self.span_tokenize()`` to each element of ``strings``. I.e.:
return iter((self.span_tokenize(s) for s in strings))
:rtype: iter(list(tuple(int, int)))
"""
for s in strings:
yield list(self.span_tokenize(s))
def __iter__(self, ngrams=None):
r"""Generate a sequence of words or tokens, using a re.match iteratively through the str
TODO:
- need two different self.lower and lemmatize transforms, 1 before and 1 after nonword detection
- each of 3 nonword filters on a separate line, setting w=None when nonword "hits"
- refactor `nonwords` arg/attr to `ignore_stopwords` to be more explicit
>>> doc = "John D. Rock\n\nObjective: \n\tSeeking a position as Software --Architect-- / _Project Lead_ that can utilize my expertise and"
>>> doc += " experiences in business application development and proven records in delivering 90's software. "
>>> doc += "\n\nSummary: \n\tSoftware Architect"
>>> doc += " who has gone through several full product-delivery life cycles from requirements gathering to deployment / production, and"
>>> doc += " skilled in all areas of software development from client-side JavaScript to database modeling. With strong experiences in:"
>>> doc += " \n\tRequirements gathering and analysis."
The python splitter will produce 2 tokens that are only punctuation ("/")
>>> len([s for s in doc.split() if s])
72
The built-in nonword REGEX ignores all-punctuation words, so there are 2 less here:
>>> len(list(Tokenizer(doc, strip=False, nonwords=False)))
70
In addition, punctuation at the end of tokens is stripped so "D. Rock" doesn't tokenize to "D." but rather "D"
>>> run_together_tokens = ''.join(list(Tokenizer(doc, strip=False, nonwords=False)))
>>> '/' in run_together_tokens or ':' in ''.join(run_together_tokens)
False
But you can turn off stripping when instantiating the object.
>>> all(t in Tokenizer(doc, strip=False, nonwords=True) for t in ('D', '_Project', 'Lead_', "90's", "product-delivery"))
True
"""
ngrams = ngrams or self.ngrams
# FIXME: Improve memory efficiency by making this ngram tokenizer an actual generator
if ngrams > 1:
original_tokens = list(self.__iter__(ngrams=1))
for tok in original_tokens:
yield tok
for i in range(2, ngrams + 1):
for tok in list_ngrams(original_tokens, n=i, join=' '):
yield tok
else:
for w in self.regex.finditer(self.doc):
if w:
w = w.group()
w = w if not self.strip_chars else str_strip(w, self.strip_chars)
w = w if not self.strip else self.strip(w)
w = w if not self.stem else self.stem(w)
w = w if not self.lemmatize else self.lemmatize(w)
w = w if not self.lower else self.lower(w)
# FIXME: nonword check before and after preprossing? (lower, lemmatize, strip, stem)
# 1. check if the default nonwords REGEX filter is requested, if so, use it.
# 2. check if a customized nonwords REGES filter is provided, if so, use it.
# 3. make sure the word isn't in the provided (or empty) set of nonwords
if w and (not self.nonwords or not re.match(r'^' + RE_NONWORD + '$', w)) and (
not self.nonwords_regex or not self.nonwords_regex.match(w)) and (
w not in self.nonwords_set):
yield w
# can these all just be left to default assignments in __init__ or as class methods assigned to global `passthrough()`
def strip(self, s):
"""Strip punctuation surrounding a token"""
return s
def stem(self, s):
"""Find the lexial root of a word, e.g. convert 'running' to 'run'"""
return s
def lemmatize(self, s):
"""Find the semantic root of a word, e.g. convert 'was' to 'be'"""
return s
def __getstate__(self):
return self.__dict__
def __setstate__(self, d):
self.__dict__.update(d)
class PassageIter(object):
"""Passage (document, sentence, line, phrase) generator for files at indicated path
Walks all the text files it finds in the indicated path,
segmenting sentences and yielding them one at a time
References:
Radim's [word2vec tutorial](http://radimrehurek.com/2014/02/word2vec-tutorial/)
"""
def __init__(self, path='', ext='', level=None, dirs=False, files=True,
sentence_segmenter=generate_sentences, word_segmenter=string.split, verbosity=0):
self.file_generator = generate_files(path=path, ext='', level=None, dirs=False, files=True, verbosity=0)
def __iter__(self):
for fname in os.listdir(self.file_generator):
for line in open(os.path.join(self.dirname, fname)):
yield line.split()
|
hobson/pug-nlp
|
pug/nlp/segmentation.py
|
Python
|
mit
| 13,806
|
from .base import *
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!i%7s@1+v&293zcy*kljuke=_l176nqpj2-3dtms()pw^et!we'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
|
acdh-oeaw/dig_ed_cat
|
digital_editions/settings/dev.py
|
Python
|
mit
| 533
|
""" """
from __future__ import unicode_literals, division, print_function, absolute_import
import argparse
import codecs
import sys
from sqlalchemy.engine import create_engine
from sqlalchemy.schema import MetaData
from sqlacodegen.codegen import CodeGenerator
import sqlacodegen
def main():
parser = argparse.ArgumentParser(description='Generates SQLAlchemy model code from an existing database.')
parser.add_argument('url', nargs='?', help='SQLAlchemy url to the database')
parser.add_argument('--version', action='store_true', help="print the version number and exit")
parser.add_argument('--schema', help='load tables from an alternate schema')
parser.add_argument('--tables', help='tables to process (comma-separated, default: all)')
parser.add_argument('--noviews', action='store_true', help="ignore views")
parser.add_argument('--noindexes', action='store_true', help='ignore indexes')
parser.add_argument('--noconstraints', action='store_true', help='ignore constraints')
parser.add_argument('--nojoined', action='store_true', help="don't autodetect joined table inheritance")
parser.add_argument('--noinflect', action='store_true', help="don't try to convert tables names to singular form")
parser.add_argument('--noclasses', action='store_true', help="don't generate classes, only tables")
parser.add_argument('--alwaysclasses', action='store_true', help="always generate classes")
parser.add_argument('--nosequences', action='store_true', help="don't auto-generate postgresql sequences")
parser.add_argument('--outfile', help='file to write output to (default: stdout)')
args = parser.parse_args()
if args.version:
print(sqlacodegen.version)
return
if not args.url:
print('You must supply a url\n', file=sys.stderr)
parser.print_help()
return
engine = create_engine(args.url)
metadata = MetaData(engine)
tables = args.tables.split(',') if args.tables else None
metadata.reflect(engine, args.schema, not args.noviews, tables)
outfile = codecs.open(args.outfile, 'w', encoding='utf-8') if args.outfile else sys.stdout
generator = CodeGenerator(metadata, args.noindexes, args.noconstraints, args.nojoined, args.noinflect,
args.noclasses, args.alwaysclasses, args.nosequences)
generator.render(outfile)
|
rflynn/sqlacodegen
|
sqlacodegen/main.py
|
Python
|
mit
| 2,382
|
import re
import string
import sys
sys.path.append('/Users/exu/PlayGround/readinglists/')
from key.keys import *
from amazon.api import AmazonAPI
from html2text import html2text
pattern = re.compile("https?://.*amazon.com/gp/product/([0-9]+)/.*")
amazon = AmazonAPI(AMAZON_ACCESS_KEY_ID, AMAZON_SECRET_ACCESS_KEY, AMAZON_ASSOC_TAG, MaxQPS=0.9)
def uprint(s):
print s.encode('utf-8')
def get_asin(url):
global pattern
m = pattern.match(url)
if m and len(m.groups()) > 0:
return m.groups()[0]
def read_file():
if (len(sys.argv) < 1):
print "Please provide a file that includes a list of Amazon links."
sys.exit(-1)
fname = sys.argv[1]
f = open(fname, 'r')
products = []
for l in f.readlines():
product = amazon.lookup(ItemId=get_asin(l))
products.append([product.title, product.editorial_review, product.large_image_url, product.offer_url])
print "Got product", product.title
return products
rtitle = re.compile('(.*)(\(.*\))')
def normalize_title(title):
""" Book titles are long. We crop out the last part that is in (part)"""
splits = re.findall(rtitle, title)
if splits:
new_title = splits[0][0]
else:
new_title = title
return new_title
def sanitize_text(t):
s = html2text(t)
s = string.replace(s, "'", "’")
s = string.replace(s, "**", "*")
return s
if __name__ == '__main__':
import os.path
import cPickle
pickle_file = 'products.pickle'
products = None
if os.path.isfile(pickle_file):
products = cPickle.load(open(pickle_file, 'r'))
else:
products = read_file()
f = open(pickle_file, "wb")
cPickle.dump(products, f)
for product in products:
title = normalize_title(product[0])
uprint(title)
print '=' * len(title)
review = sanitize_text(product[1])
uprint(review)
print
|
xuy/readinglists
|
md_gen/parse.py
|
Python
|
mit
| 1,942
|
from PyQt4 import QtCore
from PyQt4 import QtGui
from Action import Speech
from UI.ActionPushButton import ActionPushButton
class BaseStudy(QtGui.QWidget):
def __init__(self):
super(BaseStudy, self).__init__()
self._actionQueue = None
self._nao = None
self._widgets = None
self._buttons = None
#END __init__()
def LEDActive(self):
if self._nao is not None:
self._nao.LEDrandomEyes(1.0, True)
#END if
#END LEDActive()
def LEDNormal(self):
if self._nao is not None:
self._nao.LEDNormal()
#END if
#END LEDNormal()
def setActionQueue(self, actionQueue):
self._actionQueue = actionQueue
#END setActionQueue()
def setNao(self, nao):
if self._nao is not None:
self._nao.connected.disconnect(self.on_nao_connected)
self._nao.disconnected.disconnect(self.on_nao_disconnected)
#END if
self._nao = nao
if self._nao is not None:
self._nao.connected.connect(self.on_nao_connected)
self._nao.disconnected.connect(self.on_nao_disconnected)
#END if
#END setNao()
def speech(self, txt, speed, shaping):
return None
#END speech()
def on_button_clicked(self):
if self._actionQueue is not None:
self._actionQueue.addActions(self.sender().getRobotActions())
#END if
#END on_button_clicked()
def on_nao_connected(self):
pass
#END on_nao_connected()
def on_nao_disconnected(self):
pass
#END on_nao_disconnected()
def on_runSpeech_clicked(self):
if self._actionQueue is not None:
self._actionQueue.addActions(self.sender().getRobotActions())
#END if
#END on_runSpeech_clicked()
def _setupUi(self, general_panel = True, custom_widget = None):
wgtGeneral = None
if general_panel:
wgtGeneral = QtGui.QWidget()
wgtGeneral.setMaximumHeight(80)
wgtGeneral.setMinimumHeight(80)
##################################################
# General Speech
##################################################
self._speechs = [
ActionPushButton(None, "Hello", Speech("Hello")),
ActionPushButton(None, "Thanks", Speech("Thank you")),
ActionPushButton(None, "Sorry", Speech("I'm sorry")),
ActionPushButton(None, "Good", Speech("Good!")),
ActionPushButton(None, "Okay", Speech("Okay")),
ActionPushButton(None, "Yes", Speech("Yes")),
ActionPushButton(None, "No", Speech("No")),
ActionPushButton(None, "Hmmm", Speech("Heum,")),
None,
ActionPushButton(None, "Louder", Speech("Please speak louder")),
ActionPushButton(None, "Say again?", Speech("Can you say one more time?")),
ActionPushButton(None, "Repeat?", Speech("Would you like me to repeat that?")),
ActionPushButton(None, "Understood?", Speech("Do you understand?")),
ActionPushButton(None, "Don't Understand", Speech("I don't understand")),
ActionPushButton(None, "Greeting", Speech("Hello, my name is NAO, nice to meet you")),
ActionPushButton(None, "End Experiment", Speech("Thank you for participating in our experiment")),
]
self._grpSpeech = QtGui.QGroupBox(wgtGeneral)
self._grpSpeech.setTitle("General Speech")
layoutSpeech = QtGui.QVBoxLayout(self._grpSpeech)
layoutSpeech.setMargin(6)
layoutSpeech.addSpacing(3)
widget = QtGui.QWidget(self._grpSpeech)
layout = QtGui.QHBoxLayout(widget)
layout.setMargin(0)
for item in self._speechs:
if item is None:
layoutSpeech.addWidget(widget)
widget = QtGui.QWidget(self._grpSpeech)
layout = QtGui.QHBoxLayout(widget)
layout.setMargin(0)
else:
item.setParent(widget)
item.clicked.connect(self.on_runSpeech_clicked)
layout.addWidget(item)
#END if
#END for
layoutSpeech.addWidget(widget)
#END if
wgtButtons = None
if self._widgets is not None and self._buttons is not None:
wgtButtons = QtGui.QWidget()
layout = QtGui.QHBoxLayout(wgtButtons)
layout.setMargin(0)
for i in range(len(self._widgets)):
layoutButtons = QtGui.QVBoxLayout(self._widgets[i])
layoutButtons.setMargin(0)
for button in self._buttons[i]:
if isinstance(button, ActionPushButton):
button.clicked.connect(self.on_button_clicked)
#END if
layoutButtons.addWidget(button)
#END for
scroll = QtGui.QScrollArea()
scroll.setAlignment(QtCore.Qt.AlignCenter)
scroll.setWidget(self._widgets[i])
layoutScroll = QtGui.QHBoxLayout()
layoutScroll.setMargin(0)
layoutScroll.addWidget(scroll)
layout.addLayout(layoutScroll)
#END for
#END if
if wgtGeneral is not None or wgtButtons is not None or custom_widget is not None:
splitter = QtGui.QSplitter(self)
splitter.setOrientation(QtCore.Qt.Vertical)
layout = QtGui.QHBoxLayout(self)
layout.setMargin(0)
layout.addWidget(splitter)
if wgtGeneral is not None:
wgtGeneral.setParent(splitter)
#END if
if wgtButtons is not None:
wgtButtons.setParent(splitter)
#END if
if custom_widget is not None:
custom_widget.setParent(splitter)
#END if
#END if
#END _setupUi()
#END BaseStudy
|
mattBrzezinski/Hydrogen
|
robot-controller/Study/BaseStudy.py
|
Python
|
mit
| 6,141
|
class CodeBlock:
"""Code fragment for the readable format.
"""
def __init__(self, head, codes):
self._head = '' if head == '' else head + ' '
self._codes = codes
def _to_str_list(self, indent_width=0):
codes = []
codes.append(' ' * indent_width + self._head + '{')
for code in self._codes:
next_indent_width = indent_width + 2
if isinstance(code, str):
codes.append(' ' * next_indent_width + code)
elif isinstance(code, CodeBlock):
codes += code._to_str_list(indent_width=next_indent_width)
else:
assert False
codes.append(' ' * indent_width + '}')
return codes
def __str__(self):
"""Emit CUDA program like the following format.
<<head>> {
<<begin codes>>
...;
<<end codes>>
}
"""
return '\n'.join(self._to_str_list())
|
cupy/cupy
|
cupy/_core/_codeblock.py
|
Python
|
mit
| 966
|
import redis
from app.config import get_config_obj
from app.util.httputil import Http_util
class Component_access_token():
def __init__(self):
self.component_appid = get_config_obj().component_appid
self.component_appsecret = get_config_obj().component_secret
self.r = redis.Redis(host='localhost', port=6379, db=0)
def get_component_verify_ticket(self):
# TODO 读取保存的ticket
component_verify_ticket = self.r.get('component_verify_ticket')
return component_verify_ticket
def get_commponent_access_token(self):
token_json_data = Http_util().post_get_component_access_token(self.get_component_verify_ticket())
# TODO 保存
return token_json_data.get("component_access_token")
|
CoderHito/wx_demo
|
app/util/component_access_token.py
|
Python
|
mit
| 772
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Reference.year'
db.add_column(u'citations_reference', 'year',
self.gf('django.db.models.fields.IntegerField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Reference.year'
db.delete_column(u'citations_reference', 'year')
models = {
u'citations.reference': {
'Meta': {'object_name': 'Reference'},
'abstract': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'edition': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isbn': ('django.db.models.fields.CharField', [], {'max_length': '17', 'null': 'True', 'blank': 'True'}),
'place': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'publisher': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'series': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'BK'", 'max_length': '3'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'volume': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['citations']
|
will-hart/django-citations
|
citations/migrations/0006_auto__add_field_reference_year.py
|
Python
|
mit
| 2,201
|
from selenium import webdriver
from time import sleep
driver = webdriver.Firefox()
driver.get("https://www.baidu.com/")
#绝对路径定位
# driver.find_element_by_xpath("/html/body/div[1]/div[1]/div/div[1]/div/form/span[1]/input").send_keys("51zxw")
# a.根据input标签中的id属性定位元素
driver.find_element_by_xpath("//input[@id='kw']").send_keys("51zxw")
# b.根据input标签中name属性定位元素
driver.find_element_by_xpath("//input[@name='wd']").send_keys("51zxw")
# c.根据input标签中class属性定位元素
driver.find_element_by_xpath("//*[@class='s_ipt']").send_keys("51zxw")
driver.find_element_by_id("su").click()
sleep(3)
driver.quit()
|
1065865483/0python_script
|
four/Webdriver/FindElement/By_xpath_p1.py
|
Python
|
mit
| 674
|
import unittest
from datetime import date
from binder.col import *
from binder.table import Table, SqlCondition, SqlSort, AND, OR
from bindertest.tabledefs import Foo, Bar
class TableTest(unittest.TestCase):
def test_init_2_AutoIdCols(self):
# Table can have only 1 AutoIdCol
try:
Table("xyz", AutoIdCol("id1"), IntCol("x"), AutoIdCol("id2"))
except AssertionError, e:
self.assertEquals("Table 'xyz' has more than one AutoIdCol", str(e))
else:
self.fail()
def test_init_duplicate_col_name(self):
try:
Table("xyz", AutoIdCol("id1"), IntCol("x"), UnicodeCol("x", 20))
except AssertionError, e:
self.assertEquals("Table 'xyz' has more than one column with name 'x'", str(e))
else:
self.fail()
def test_cols(self):
expected = ["foo_id", "i1", "s1", "d1"]
actual = [col.col_name for col in Foo.cols]
self.assertEquals(expected, actual)
expected = ["bi", "bs", "bd", "bdt1", "bb"]
actual = [col.col_name for col in Bar.cols]
self.assertEquals(expected, actual)
def test_auto_id_col(self):
# AutoIdCol field identified by __init__
self.assert_(Foo.auto_id_col is Foo.cols[0])
self.assert_(Bar.auto_id_col is None)
def test_new_parse_defaults(self):
expected = {
"foo_id": None,
"i1": 0,
"s1": "",
"d1": None,
}
actual = Foo.new()
self.assertEquals(expected, actual)
actual = Foo.parse()
self.assertEquals(expected, actual)
expected = {
"bi": None,
"bs": "",
"bd": None,
"bdt1": None,
"bb": False,
}
actual = Bar.new()
self.assertEquals(expected, actual)
actual = Bar.parse()
self.assertEquals(expected, actual)
def test_parse_auto_id(self):
expected = {
"foo_id": None,
"i1": 0,
"s1": "",
"d1": None,
}
actual = Foo.parse(foo_id=None)
self.assertEquals(expected, actual)
def test_new_parse_all(self):
expected = {
"foo_id": 42,
"i1": 101,
"s1": "alpha",
"d1": date(2006,6,6),
}
actual = Foo.new(foo_id=42, i1=101, s1="alpha", d1=date(2006,6,6))
self.assertEquals(expected, actual)
actual = Foo.parse(foo_id="42", i1="101", s1="alpha", d1="2006-06-06")
self.assertEquals(expected, actual)
# parse some fields str
actual = Foo.parse(foo_id="42", i1=101, s1="alpha", d1=date(2006,6,6))
self.assertEquals(expected, actual)
def test_new_parse_some_fields(self):
expected = {
"foo_id": 42,
"i1": 0,
"s1": "alpha",
"d1": None,
}
actual = Foo.new(foo_id=42, s1="alpha")
self.assertEquals(expected, actual)
actual = Foo.parse(foo_id="42", s1="alpha")
self.assertEquals(expected, actual)
def test_new_parse_clone(self):
# new() and parse() should return a new dictionary
expected = {
"foo_id": 42,
"i1": 0,
"s1": "alpha",
"d1": None,
}
actual = Foo.new(**expected)
self.assertEquals(expected, actual)
self.assertFalse(actual is expected)
actual = Foo.parse(**expected)
self.assertEquals(expected, actual)
self.assertFalse(actual is expected)
def test_new_parse_unkown_cols(self):
# DONT copy unknown columns
expected = {
"foo_id": None,
"i1": 16,
"s1": "",
"d1": None,
}
actual = Foo.new(i1=16, s2="beta")
self.assertEquals(expected, actual)
actual = Foo.parse(i1="16", s2="beta")
self.assertEquals(expected, actual)
def test_parse_empty_string(self):
# parse() replaces empty strings with default value
expected = {
"foo_id": None,
"i1": 0,
"s1": "",
"d1": None,
}
actual = Foo.parse(foo_id="", i1="", s1="", d1="")
self.assertEquals(expected, actual)
expected = {
"bi": None,
"bs": "",
"bd": None,
"bdt1": None,
"bb": False,
}
actual = Bar.parse(bi="", bs="", bd="", bdt1="", bb="")
self.assertEquals(expected, actual)
def test_new_bad_values(self):
# new() does not allow bad values
try:
Foo.new(i1="bar", s2=1.1)
except TypeError, e:
self.assertEquals("IntCol 'i1': int expected, got str", str(e))
else:
self.fail()
def test_parse_bad_values(self):
# parse() does not allow non-string bad values
try:
Foo.parse(i1=2.3, s2=1.1)
except TypeError, e:
self.assertEquals("IntCol 'i1': int expected, got float", str(e))
else:
self.fail()
def test_parse_error(self):
# parse() gives parse error for bad strings
try:
Foo.parse(i1="2.3", s2=1.1)
except ValueError, e:
self.assert_(
str(e) in [
"invalid literal for int(): 2.3",
"invalid literal for int() with base 10: '2.3'",
]
)
else:
self.fail()
def test_check_values(self):
# defaults / None
foo = Foo.new()
auto_id = Foo.check_values(foo)
self.assert_(auto_id)
# given values / no None
foo = {
"foo_id": 42,
"i1": 101,
"s1": "alpha",
"d1": date(2006,6,6),
}
auto_id = Foo.check_values(foo)
self.assertFalse(auto_id)
# bad value
foo = Foo.new()
foo["i1"] = "bar"
try:
Foo.check_values(foo)
except TypeError, e:
self.assertEquals("IntCol 'i1': int expected, got str", str(e))
else:
self.fail()
# bad value
foo = Foo.new()
foo["s1"] = 1.1
try:
Foo.check_values(foo)
except TypeError, e:
self.assertEquals("UnicodeCol 's1': unicode expected, got float", str(e))
else:
self.fail()
# unknown columns ignored
foo = Foo.new(s2=None)
foo["s3"] = 1.2
auto_id = Foo.check_values(foo)
self.assert_(True, auto_id)
def test_q(self):
q = Foo.q
# existing columns
q_foo_id = Foo.q.foo_id
q_i1 = Foo.q.i1
# non-existing column
try:
Foo.q.i2
except AttributeError, e:
self.assertEquals("QueryCols instance has no attribute 'i2'", str(e))
else:
self.fail()
def test_q_ops(self):
qexpr = Foo.q.foo_id == 1
self.assert_(isinstance(qexpr, SqlCondition))
qexpr = Foo.q.d1 == None
self.assert_(isinstance(qexpr, SqlCondition))
qexpr = Foo.q.d1 > date(2007, 5, 22)
self.assert_(isinstance(qexpr, SqlCondition))
qexpr = Foo.q.d1 >= date(2007, 5, 22)
self.assert_(isinstance(qexpr, SqlCondition))
qexpr = Foo.q.d1 < date(2007, 5, 22)
self.assert_(isinstance(qexpr, SqlCondition))
qexpr = Foo.q.d1 <= date(2007, 5, 22)
self.assert_(isinstance(qexpr, SqlCondition))
def test_q_ops_assign(self):
try:
Foo.q.foo_id = "xyz"
except AttributeError:
pass
else:
self.fail()
def test_q_ops_check_value(self):
try:
Foo.q.foo_id == "xyz"
except TypeError, e:
self.assertEquals("AutoIdCol 'foo_id': int expected, got str", str(e))
else:
self.fail()
try:
Foo.q.s1 > 23
except TypeError, e:
self.assertEquals("UnicodeCol 's1': unicode expected, got int", str(e))
else:
self.fail()
def test_q_ops_auto_id(self):
try:
Foo.q.foo_id == None
except AssertionError, e:
self.assertEquals("SqlCondition: cannot use None for AutoIdCol", str(e))
else:
self.fail()
def test_AND(self):
qexpr1 = Foo.q.foo_id == 1
qexpr2 = Foo.q.s1 == 'x'
qexpr3 = Foo.q.d1 == None
AND(qexpr1, qexpr2)
AND(qexpr1, qexpr2, qexpr3)
try:
AND(qexpr1, "xyz")
except AssertionError, e:
self.assertEquals("AND: conditions must be SqlCondition", str(e))
else:
self.fail()
try:
AND(qexpr1)
except AssertionError, e:
self.assertEquals("AND: must have at least 2 conditions", str(e))
else:
self.fail()
def test_OR(self):
qexpr1 = Foo.q.foo_id == 1
qexpr2 = Foo.q.s1 == 'x'
qexpr3 = Foo.q.d1 == None
OR(qexpr1, qexpr2)
OR(qexpr1, qexpr2, qexpr3)
try:
OR(qexpr1, "xyz")
except AssertionError, e:
self.assertEquals("OR: conditions must be SqlCondition", str(e))
else:
self.fail()
try:
OR(qexpr1)
except AssertionError, e:
self.assertEquals("OR: must have at least 2 conditions", str(e))
else:
self.fail()
def test_q_sort(self):
qexpr = Foo.q.foo_id.ASC
self.assert_(isinstance(qexpr, SqlSort))
qexpr = Foo.q.d1.DESC
self.assert_(isinstance(qexpr, SqlSort))
if __name__ == '__main__':
unittest.main()
|
divtxt/binder
|
bindertest/test_table.py
|
Python
|
mit
| 9,815
|
""":mod:`cliche.celery` --- Celery_-backed task queue worker
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Sometimes web app should provide time-consuming features that cannot
immediately respond to user (and we define "immediately" as "shorter than
a second or two seconds" in here). Such things should be queued and then
processed by background workers. Celery_ does that in natural way.
We use this at serveral points like resampling images to make thumbnails,
or crawling ontology data from other services. Such tasks are definitely
cannot "immediately" respond.
.. seealso::
:ref:`faq-when-to-use` --- Celery FAQ
Answer to what kinds of benefits are there in Celery.
`Queue everything and delight everyone`__
This article describes why you should use a queue in a web application.
__ http://decafbad.com/blog/2008/07/04/queue-everything-and-delight-everyone
.. _Celery: http://celeryproject.org/
How to define tasks
-------------------
In order to defer some types of tasks, you have to make these functions
a task. It's not a big deal, just attach a decorator to them::
@celery.task(ignore_result=True)
def do_heavy_work(some, inputs):
'''Do something heavy work.'''
...
How to defer tasks
------------------
It's similar to ordinary function calls except it uses :meth:`delay()
<celery.app.task.Task.delay>` method (or :meth:`apply_async()
<celery.app.task.Task.apply_async>` method) instead of calling operator::
do_heavy_work.delay('some', inputs='...')
That command will be queued and sent to one of distributed workers.
That means these argument values are serialized using :mod:`json`.
If any argument value isn't serializable it will error.
Simple objects like numbers, strings, tuples, lists, dictionaries are
safe to serialize.
In the other hand, entity objects (that an instance of :class:`cliche.orm.Base`
and its subtypes) mostly fail to serialize, so use primary key values like
entity id instead of object itself.
What things are ready for task?
-------------------------------
Every deferred call of task share equivalent inital state:
- You can get a database session using :func:`get_session()`.
- You also can get a database engine using :func:`get_database_engine()`.
While there are several things not ready either:
- Flask's request context isn't ready for each task. You should explicitly
deal with it using :meth:`~flask.Flask.request_context()` method
to use context locals like :class:`flask.request`.
See also :ref:`request-context`.
- Physical computers would differ from web environment. Total memory,
CPU capacity, the number of processors, IP address, operating system,
Python VM (which of PyPy or CPython), and other many environments also
can vary. Assume nothing on these variables.
- Hence global states (e.g. module-level global variables) are completely
isolated from web environment which called the task. Don't depend on
such global states.
How to run Celery worker
------------------------
:program:`celery worker` (formerly :program:`celeryd`) takes Celery app object
as its endpoint, and Cliche's endpoint is :data:`cliche.celery.celery`.
You can omit the latter variable name and module name: :mod:`cliche`.
Execute the following command in the shell:
.. sourcecode:: console
$ celery worker -A cliche --config dev.cfg.yml
-------------- celery@localhost v3.1.13 (Cipater)
---- **** -----
--- * *** * -- Darwin-13.3.0-x86_64-i386-64bit
-- * - **** ---
- ** ---------- [config]
- ** ---------- .> app: cliche.celery:0x1... (cliche.celery.Loader)
- ** ---------- .> transport: redis://localhost:6379/5
- ** ---------- .> results: disabled
- *** --- * --- .> concurrency: 4 (prefork)
-- ******* ----
--- ***** ----- [queues]
-------------- .> celery exchange=celery(direct) key=celery
[2014-09-12 00:31:25,150: WARNING/MainProcess] celery@localhost ready.
Note that you should pass the same configuration file (``--config`` option)
to the WSGI application. It should contain ``DATABASE_URL`` and so on.
References
----------
"""
import os
import pathlib
from celery import Celery, current_app, current_task
from celery.loaders.base import BaseLoader
from celery.signals import celeryd_init, task_failure, task_postrun
from raven import Client
from raven.conf import setup_logging
from raven.handlers.logging import SentryHandler
from sqlalchemy.engine import Engine, create_engine
from .config import ConfigDict, read_config
from .orm import Session, import_all_modules
__all__ = (
'Loader',
'get_database_engine',
'get_session',
'get_raven_client',
'app',
)
app = Celery(__name__, loader=__name__ + ':Loader')
class Loader(BaseLoader):
"""The loader used by Cliche app."""
def read_configuration(self):
config = ConfigDict()
config_path = os.environ.get(
'CELERY_CONFIG_MODULE',
os.environ.get('CLICHE_CONFIG')
)
if config_path is not None:
config = read_config(pathlib.Path(config_path))
config['CELERY_IMPORTS'] = import_all_modules()
config['CELERY_ACCEPT_CONTENT'] = ['pickle', 'json']
return config
def get_database_engine() -> Engine:
"""Get a database engine.
:returns: a database engine
:rtype: :class:`sqlalchemy.engine.base.Engine`
"""
config = current_app.conf
if 'DATABASE_ENGINE' not in config:
db_url = config['DATABASE_URL']
config['DATABASE_ENGINE'] = create_engine(db_url)
if 'BROKER_URL' not in config:
config['BROKER_URL'] = 'sqla+' + db_url
if 'CELERY_RESULT_BACKEND' not in config and \
'CELERY_RESULT_DBURI' not in config:
config['CELERY_RESULT_BACKEND'] = 'database'
config['CELERY_RESULT_DBURI'] = db_url
return config['DATABASE_ENGINE']
def get_session() -> Session:
"""Get a database session.
:returns: a database session
:rtype: :class:`~.orm.Session`
"""
task = current_task._get_current_object()
request = task.request
if getattr(request, 'db_session', None) is None:
request.db_session = Session(bind=get_database_engine())
return request.db_session
@task_postrun.connect
def close_session(task_id, task, *args, **kwargs):
"""Close the session if there's the opened session."""
session = getattr(task.request, 'db_session', None)
if session is not None:
session.close()
def get_raven_client() -> Client:
"""Get a raven client.
:returns: a raven client
:rtype: :class:`raven.Client`
"""
config = current_app.conf
if 'SENTRY_DSN' in config:
if 'RAVEN_CLIENT' not in config:
sentry_dsn = config['SENTRY_DSN']
config['RAVEN_CLIENT'] = Client(
dsn=sentry_dsn,
include_paths=[
'cliche',
],
)
return config['RAVEN_CLIENT']
else:
return None
@celeryd_init.connect
def setup_raven_logging(conf=None, **kwargs):
client = get_raven_client()
if client is not None:
handler = SentryHandler(client)
setup_logging(handler)
@task_failure.connect
def report_task_failure(task_id, exception, args, kwargs,
traceback, einfo, sender):
client = get_raven_client()
client.captureException(einfo.exc_info)
|
clicheio/cliche
|
cliche/celery.py
|
Python
|
mit
| 7,480
|
from __future__ import absolute_import
from sqlalchemy import *
from migrate import *
meta = MetaData()
vieworderings = Table('vieworderings', meta,
Column('id', Integer, primary_key=True),
Column('tagset', Text()),
Column('timestamp', Float, index=True),
)
def upgrade(migrate_engine):
meta.bind = migrate_engine
vieworderings.c.tagset.alter(name="norm_query")
def downgrade(migrate_engine):
raise NotImplementedError
|
inducer/synoptic
|
synoptic/schema_ver_repo/versions/001_Rename_tagset_column.py
|
Python
|
mit
| 467
|
import _plotly_utils.basevalidators
class ShowexponentValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="showexponent", parent_name="parcats.line.colorbar", **kwargs
):
super(ShowexponentValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["all", "first", "last", "none"]),
**kwargs
)
|
plotly/python-api
|
packages/python/plotly/plotly/validators/parcats/line/colorbar/_showexponent.py
|
Python
|
mit
| 569
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_auto_20141029_1945'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),
('name', models.CharField(max_length=32)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='article',
name='tags',
field=models.ManyToManyField(related_name='articles', to='blog.Tag', blank=True),
preserve_default=True,
),
]
|
bobisjan/django-shanghai
|
tests/project/blog/migrations/0003_auto_20141104_2232.py
|
Python
|
mit
| 823
|
from distutils.core import setup
setup(
name='flashback',
packages=['flashback'],
version='0.4',
description='The handiest Flashback scraper in the game',
author='Robin Linderborg',
author_email='robin.linderborg@gmail.com',
install_requires=[
'beautifulsoup4==4.4.1',
'requests==2.8.0'
],
url='https://github.com/miroli/flashback',
download_url='https://github.com/miroli/flashback/tarball/0.4',
keywords=['flashback', 'scraping'],
classifiers=[],
)
|
vienno/flashback
|
setup.py
|
Python
|
mit
| 515
|
from ..gitpub import gitpub
def sort_repos(repo_list):
"""
Sort the repo_list using quicksort
Parameters
----------------------------------
repo_list : [gitpub.Repository()]
Array of friends (loaded from the input file)
=================================================
Returns:
-----------------------------------
repo_list : [gitpub.Repository()]
List of repositories sorted by number of stars
"""
if repo_list == []:
return []
else:
pivot = repo_list[0]
lesser = sort_repos([repo for repo in repo_list[1:] if repo.stargazers_count < pivot.stargazers_count])
greater = sort_repos([repo for repo in repo_list[1:] if repo.stargazers_count >= pivot.stargazers_count])
return lesser + [pivot] + greater
def main(username='defunkt'):
"""
Main module to put it all together
Loading Profile > Fetch public repos > Generating sorted list of repos
Parameters
----------------------------------------------------------------------
filename : str
Input file with friend details
==================================
Returns
---------------------------------------------------
sorted_repos : [gitpub.Repository()]
Array of repositories sorted by number of stars
"""
profile = gitpub.Profile()
profile.load_gh_profile(username)
profile.get_public_repos()
sorted_repos = reversed(sort_repos(profile.public_repos))
print ("%s(%s)'s most popular repositories by stargazers count are:" % (profile.name, profile.username))
for repo in sorted_repos:
print ("%s (%d stars)" % (repo.name, repo.stargazers_count))
return sorted_repos
if __name__ == '__main__':
sorted_repos = main()
|
Demfier/GitPub
|
build/lib/samples/most_popular_repo.py
|
Python
|
mit
| 1,767
|
ACCESS_KEY = 'twitter_access_token'
REQUEST_KEY = 'twitter_request_token'
SUCCESS_URL_KEY = 'twitter_success_url'
USERINFO_KEY = 'twitter_user_info'
|
callowayproject/django-tweeter
|
django_oauth_twitter/__init__.py
|
Python
|
mit
| 149
|
# The scripts begin here
|
odarbelaeze/dummy-project
|
firtFile.py
|
Python
|
mit
| 26
|
# -*- coding:utf-8 -*-
import platform
import asyncio
import json
from base.logger import LOG
def singleton(cls, *args, **kw):
instances = {}
def _singleton(*args, **kw):
if cls not in instances:
instances[cls] = cls(*args, **kw)
return instances[cls]
return _singleton
def func_coroutine(func):
"""make the decorated function run in EventLoop
"""
def wrapper(*args, **kwargs):
LOG.debug("In func_coroutine: before call ")
LOG.debug("function name is : " + func.__name__)
APP_EVENT_LOOP = asyncio.get_event_loop()
APP_EVENT_LOOP.call_soon(func, *args)
LOG.debug("In func_coroutine: after call ")
return wrapper
def write_json_into_file(data_json, filepath):
try:
with open(filepath, "w") as f:
data_str = json.dumps(data_json, indent=4)
f.write(data_str)
return True
except Exception as e:
LOG.error(str(e))
LOG.error("Write json into file failed")
return False
|
JanlizWorldlet/FeelUOwn
|
src/base/utils.py
|
Python
|
mit
| 1,039
|
##################################################################################################
# Copyright (c) 2012 Brett Dixon
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
##################################################################################################
from optparse import make_option
from django.core.management.base import BaseCommand
import path
from frog.models import Gallery, Image, Video, Piece, VideoQueue
class Command(BaseCommand):
help = 'Generated thumbnails'
def add_arguments(self, parser):
parser.add_argument(
'guids',
nargs='*',
default=[],
)
def handle(self, *args, **options):
for guid in options['guids']:
video = Video.objects.get(guid=guid)
item = VideoQueue.objects.get_or_create(video=video)[0]
item.video = video
item.status = VideoQueue.QUEUED
item.save()
self.stdout.write('Added: {}'.format(video))
|
theiviaxx/Frog
|
frog/management/commands/frog_queue_videos.py
|
Python
|
mit
| 2,015
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PublicIPAddressesOperations:
"""PublicIPAddressesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
public_ip_address_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
public_ip_address_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified public IP address.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_address_name: The name of the subnet.
:type public_ip_address_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
public_ip_address_name=public_ip_address_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore
async def get(
self,
resource_group_name: str,
public_ip_address_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.PublicIPAddress":
"""Gets the specified public IP address in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_address_name: The name of the subnet.
:type public_ip_address_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PublicIPAddress, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_12_01.models.PublicIPAddress
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddress"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
public_ip_address_name: str,
parameters: "_models.PublicIPAddress",
**kwargs: Any
) -> "_models.PublicIPAddress":
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddress"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PublicIPAddress')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
public_ip_address_name: str,
parameters: "_models.PublicIPAddress",
**kwargs: Any
) -> AsyncLROPoller["_models.PublicIPAddress"]:
"""Creates or updates a static or dynamic public IP address.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_address_name: The name of the public IP address.
:type public_ip_address_name: str
:param parameters: Parameters supplied to the create or update public IP address operation.
:type parameters: ~azure.mgmt.network.v2019_12_01.models.PublicIPAddress
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PublicIPAddress or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_12_01.models.PublicIPAddress]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddress"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
public_ip_address_name=public_ip_address_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore
async def update_tags(
self,
resource_group_name: str,
public_ip_address_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.PublicIPAddress":
"""Updates public IP address tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_address_name: The name of the public IP address.
:type public_ip_address_name: str
:param parameters: Parameters supplied to update public IP address tags.
:type parameters: ~azure.mgmt.network.v2019_12_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PublicIPAddress, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_12_01.models.PublicIPAddress
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddress"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}'} # type: ignore
def list_all(
self,
**kwargs: Any
) -> AsyncIterable["_models.PublicIPAddressListResult"]:
"""Gets all the public IP addresses in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PublicIPAddressListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_12_01.models.PublicIPAddressListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddressListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPAddressListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/publicIPAddresses'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.PublicIPAddressListResult"]:
"""Gets all public IP addresses in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PublicIPAddressListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_12_01.models.PublicIPAddressListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddressListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPAddressListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses'} # type: ignore
def list_virtual_machine_scale_set_public_ip_addresses(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
**kwargs: Any
) -> AsyncIterable["_models.PublicIPAddressListResult"]:
"""Gets information about all public IP addresses on a virtual machine scale set level.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PublicIPAddressListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_12_01.models.PublicIPAddressListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddressListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_virtual_machine_scale_set_public_ip_addresses.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPAddressListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_virtual_machine_scale_set_public_ip_addresses.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/publicipaddresses'} # type: ignore
def list_virtual_machine_scale_set_vm_public_ip_addresses(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
virtualmachine_index: str,
network_interface_name: str,
ip_configuration_name: str,
**kwargs: Any
) -> AsyncIterable["_models.PublicIPAddressListResult"]:
"""Gets information about all public IP addresses in a virtual machine IP configuration in a
virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param network_interface_name: The network interface name.
:type network_interface_name: str
:param ip_configuration_name: The IP configuration name.
:type ip_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PublicIPAddressListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_12_01.models.PublicIPAddressListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddressListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_virtual_machine_scale_set_vm_public_ip_addresses.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'ipConfigurationName': self._serialize.url("ip_configuration_name", ip_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPAddressListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_virtual_machine_scale_set_vm_public_ip_addresses.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}/ipconfigurations/{ipConfigurationName}/publicipaddresses'} # type: ignore
async def get_virtual_machine_scale_set_public_ip_address(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
virtualmachine_index: str,
network_interface_name: str,
ip_configuration_name: str,
public_ip_address_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.PublicIPAddress":
"""Get the specified public IP address in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param ip_configuration_name: The name of the IP configuration.
:type ip_configuration_name: str
:param public_ip_address_name: The name of the public IP Address.
:type public_ip_address_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PublicIPAddress, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_12_01.models.PublicIPAddress
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPAddress"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
# Construct URL
url = self.get_virtual_machine_scale_set_public_ip_address.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'ipConfigurationName': self._serialize.url("ip_configuration_name", ip_configuration_name, 'str'),
'publicIpAddressName': self._serialize.url("public_ip_address_name", public_ip_address_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PublicIPAddress', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_virtual_machine_scale_set_public_ip_address.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}/ipconfigurations/{ipConfigurationName}/publicipaddresses/{publicIpAddressName}'} # type: ignore
|
Azure/azure-sdk-for-python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_12_01/aio/operations/_public_ip_addresses_operations.py
|
Python
|
mit
| 40,646
|
"""dryorm URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.views.generic import TemplateView
from core import views
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='index.html')),
url(r'^save', views.save, name='save'),
url(r'^(?P<pk>[0-9a-zA-Z\-]+)', views.detail, name='detail'),
]
|
omaraboumrad/djanground
|
backend/dryorm/urls.py
|
Python
|
mit
| 971
|
import json
from feature_ramp import redis
class Feature(object):
"""
A class to control ramping features to a percentage of users
without needing to deploy to change the ramp.
Usage:
Feature("on_off_toggled").activate()
Feature("on_off_toggled").is_active
Feature("on_off_toggled").deactivate()
Feature("all_functionality").set_percentage(5)
Feature("all_functionality").add_to_whitelist(identifier)
Feature("all_functionality").is_visible(identifier)
Feature("all_functionality").remove_from_whitelist(identifier)
Feature("all_functionality").deactivate()
Feature("go_away").reset_settings()
Feature("go_away").delete()
"""
REDIS_NAMESPACE = 'feature'
REDIS_VERSION = 1
REDIS_SET_KEY = 'active_features'
def __init__(self, feature_name, feature_group_name=None, default_percentage=0):
self.feature_name = feature_name # set here so redis_key() works
self.feature_group_name = feature_group_name
key = self._get_redis_key()
redis_raw = redis.get(key)
redis_data = self._deserialize(redis_raw)
self.whitelist = redis_data.get('whitelist', [])
self.blacklist = redis_data.get('blacklist', [])
self.percentage = redis_data.get('percentage', default_percentage)
def is_visible(self, identifier):
""" Returns true if the feature is visible to the given identifier.
Whitelisted users are always on even if they are also blacklisted.
Blacklisted users are always off unless whitelisted.
For users neither white or blacklisted, it will respect ramp percentage.
"""
if self.is_whitelisted(identifier):
return True
if self.is_blacklisted(identifier):
return False
return self._is_ramped(identifier)
@property
def is_active(self):
""" Returns true if a single-toggle feature is on or off.
Similar to is_visible() but does not require an identifier.
"""
return self.percentage > 0
def is_whitelisted(self, identifier):
""" Given a identifier, returns true if the id is present in the whitelist. """
return identifier in self.whitelist
def is_blacklisted(self, identifier):
""" Given a identifier, returns true if the id is present in the blacklist. """
return identifier in self.blacklist
def _is_ramped(self, identifier):
"""
Checks whether ``identifier`` is ramped for this feature or not.
``identifier`` can be a user_id, email address, etc
Warning: This method ignores white- and blacklists. For
completeness, you probably want to use is_visible().
Users are ramped for features by this method in a deterministic
way, such that the same set of users will be ramped
consistently for the same feature across multiple requests.
However, different features will have different sets of users
ramped, so that the same set of users aren't always the ones
getting the first percent of experimental changes (e.g.,
user.id in {1, 101, 202, ...}). To achieve this, whether or not
this user is ramped is computed by hashing the feature name and
combining this hash with the user's integer id, using the
modulus operator to distribute the results evenly on a scale
of 0 to 100.
Returns True if the feature is ramped high enough that the
feature should be visible to the user with that id, and False
if not.
"""
consistent_offset = hash(self.feature_name) % 100 if not self.feature_group_name else hash(self.feature_group_name)
identifier = identifier if isinstance(identifier, basestring) else str(identifier)
ramp_ranking = (consistent_offset + hash(identifier)) % 100
return ramp_ranking < self.percentage
def activate(self):
""" Ramp feature to 100%. This is a convenience method useful for single-toggle features. """
self.set_percentage(100)
def deactivate(self):
""" Ramp feature to 0%. This is a convenience method useful for single-toggle features. """
self.set_percentage(0)
def reset_settings(self):
""" Clears all settings for the feature. The feature is deactivated and
the whitelist and blacklist are emptied.
"""
self.percentage = 0
self.whitelist = []
self.blacklist = []
self._save()
def delete(self):
""" Deletes the feature settings from Redis entirely. """
key = self._get_redis_key()
redis.delete(key)
redis.srem(Feature._get_redis_set_key(), key)
def set_percentage(self, percentage):
""" Ramps the feature to the given percentage.
If percentage is not a number between 0 and 100 inclusive, ValueError is raised.
Calls int() on percentage because we are using modulus to select the users
being shown the feature in _is_ramped(); floats will truncated.
"""
percentage = int(float(percentage))
if (percentage < 0 or percentage > 100):
raise ValueError("Percentage is not a valid integer")
self.percentage = percentage
self._save()
def add_to_whitelist(self, identifier):
""" Whitelist the given identifier to always see the feature regardless of ramp. """
self.whitelist.append(identifier)
self._save()
def remove_from_whitelist(self, identifier):
""" Remove the given identifier from the whitelist to respect ramp percentage. """
self.whitelist.remove(identifier)
self._save()
def add_to_blacklist(self, identifier):
""" Blacklist the given identifier to never see the feature regardless of ramp. """
self.blacklist.append(identifier)
self._save()
def remove_from_blacklist(self, identifier):
""" Remove the given identifier from the blacklist to respect ramp percentage. """
self.blacklist.remove(identifier)
self._save()
@classmethod
def all_features(cls, include_data=False):
"""
Returns a list of all active feature names.
With an optional flag, this method will instead return a dict with
ramping data for the feature included.
Example ramping data:
{ 'feature_name':
{ 'percentage': 50, 'whitelist': [3], 'blacklist': [4,5] }
}
"""
key = cls._get_redis_set_key()
features = [cls._get_feature_name_from_redis_key(rkey) for rkey in redis.smembers(key)]
if not include_data:
return features
# we intentionally do not use pipelining here, since that would lock Redis and
# this does not need to be atomic
features_with_data = dict()
for feature in features:
data = cls(feature)
features_with_data[feature] = {'percentage': data.percentage}
if data.whitelist:
features_with_data[feature]['whitelist'] = data.whitelist
if data.blacklist:
features_with_data[feature]['blacklist'] = data.blacklist
return features_with_data
def _save(self):
""" Saves the feature settings to Redis in a dictionary. """
key = self._get_redis_key()
value = json.dumps(self._get_redis_data())
redis.set(key, value)
# store feature key in a set so we know what's turned on without
# needing to search all Redis keys with a * which is slow.
set_key = Feature._get_redis_set_key()
redis.sadd(set_key, key)
def _get_redis_key(self):
""" Returns the key used in Redis to store a feature's information, with namespace. """
return '{0}.{1}.{2}'.format(Feature.REDIS_NAMESPACE,
Feature.REDIS_VERSION,
self.feature_name)
@classmethod
def _get_feature_name_from_redis_key(self, key):
""" Returns the feature name given the namespaced key used in Redis. """
return key.split('.')[-1]
@classmethod
def _get_redis_set_key(cls):
""" Returns the key used in Redis to store a feature's information, with namespace. """
return '{0}.{1}'.format(Feature.REDIS_NAMESPACE,
Feature.REDIS_SET_KEY)
def _get_redis_data(self):
""" Returns the dictionary representation of this object for storage in Redis. """
return {
'whitelist': self.whitelist,
'blacklist': self.blacklist,
'percentage': self.percentage
}
def _deserialize(self, redis_obj):
""" Deserializes the serialized JSON representation of this object's dictionary
from Redis. If no object is provided, it returns an empty dictionary.
"""
if redis_obj is None:
return {}
return json.loads(redis_obj)
def __str__(self):
""" Pretty print the feature and some stats """
stats = self._get_redis_data()
return "Feature: {0}\nwhitelisted: {1}\nblacklisted: {2}\npercentage: {3}\n".format(self.feature_name, stats['whitelist'], stats['blacklist'], stats['percentage'])
|
venmo/feature_ramp
|
feature_ramp/Feature.py
|
Python
|
mit
| 9,312
|
# coding: utf-8
from unittest import TestCase
#import string
import json
import re
from grab import Grab, GrabMisuseError
from test.util import TMP_FILE, GRAB_TRANSPORT, get_temp_file
from test.server import SERVER
from grab.proxy import ProxyList
DEFAULT_PLIST_DATA = \
'1.1.1.1:8080\n'\
'1.1.1.2:8080\n'
class GrabProxyTestCase(TestCase):
def setUp(self):
SERVER.reset()
def generate_plist_file(self, data=DEFAULT_PLIST_DATA):
path = get_temp_file()
with open(path, 'w') as out:
out.write(data)
return path
def test_basic(self):
g = Grab(transport=GRAB_TRANSPORT)
self.assertEqual(0, len(g.proxylist.proxy_list))
class ProxyListTestCase(TestCase):
def setUp(self):
SERVER.reset()
def test_basic(self):
pl = ProxyList()
self.assertEqual(0, len(pl.proxy_list))
def generate_plist_file(self, data=DEFAULT_PLIST_DATA):
path = get_temp_file()
with open(path, 'w') as out:
out.write(data)
return path
def test_file_source(self):
pl = ProxyList()
path = self.generate_plist_file()
pl.set_source('file', location=path)
self.assertEqual(2, len(pl.proxy_list))
def test_remote_load(self):
pl = ProxyList()
SERVER.RESPONSE['get'] = DEFAULT_PLIST_DATA
pl.set_source('url', url=SERVER.BASE_URL)
self.assertEqual(2, len(pl.proxy_list))
def test_accumulate_updates_basic(self):
# test that all work with disabled accumulate_updates feature
pl = ProxyList()
path = self.generate_plist_file()
pl.setup(accumulate_updates=False)
pl.set_source('file', location=path)
self.assertEqual(2, len(pl.proxy_list))
# enable accumulate updates
pl = ProxyList()
pl.setup(accumulate_updates=True)
path = self.generate_plist_file()
pl.set_source('file', location=path)
self.assertEqual(2, len(pl.proxy_list))
def test_accumulate_updates_basic(self):
pl = ProxyList()
pl.setup(accumulate_updates=True)
# load initial list
path = self.generate_plist_file('foo:1\nbar:1')
pl.set_source('file', location=path)
self.assertEqual(2, len(pl.proxy_list))
# load list with one new and one old proxies
with open(path, 'w') as out:
out.write('foo:1\nbaz:1')
pl.reload(force=True)
self.assertEqual(3, len(pl.proxy_list))
def test_get_next_proxy(self):
pl = ProxyList()
path = self.generate_plist_file('foo:1\nbar:1')
pl.set_source('file', location=path)
self.assertEqual(pl.get_next_proxy().server, 'foo')
self.assertEqual(pl.get_next_proxy().server, 'bar')
self.assertEqual(pl.get_next_proxy().server, 'foo')
pl.set_source('file', location=path)
self.assertEqual(pl.get_next_proxy().server, 'foo')
def test_get_next_proxy_in_accumulate_mode(self):
pl = ProxyList()
pl.setup(accumulate_updates=True)
path = self.generate_plist_file('foo:1\nbar:1')
pl.set_source('file', location=path)
self.assertEqual(pl.get_next_proxy().server, 'foo')
path = self.generate_plist_file('baz:1')
pl.set_source('file', location=path)
self.assertEqual(pl.get_next_proxy().server, 'bar')
self.assertEqual(pl.get_next_proxy().server, 'baz')
self.assertEqual(pl.get_next_proxy().server, 'foo')
|
subeax/grab
|
test/case/proxy.py
|
Python
|
mit
| 3,521
|
#!/usr/bin/env python
# Jonas Schnelli, 2013
# make sure the Nichts-Qt.app contains the right plist (including the right version)
# fix made because of serval bugs in Qt mac deployment (https://bugreports.qt-project.org/browse/QTBUG-21267)
from string import Template
from datetime import date
bitcoinDir = "./";
inFile = bitcoinDir+"/share/qt/Info.plist"
outFile = "Nichts-Qt.app/Contents/Info.plist"
version = "unknown";
fileForGrabbingVersion = bitcoinDir+"bitcoin-qt.pro"
for line in open(fileForGrabbingVersion):
lineArr = line.replace(" ", "").split("=");
if lineArr[0].startswith("VERSION"):
version = lineArr[1].replace("\n", "");
fIn = open(inFile, "r")
fileContent = fIn.read()
s = Template(fileContent)
newFileContent = s.substitute(VERSION=version,YEAR=date.today().year)
fOut = open(outFile, "w");
fOut.write(newFileContent);
print "Info.plist fresh created"
|
eysho/BestKnownGame-Coins---Source
|
share/qt/clean_mac_info_plist.py
|
Python
|
mit
| 922
|
#implementation of radix sort in Python.
def RadixSort(A):
RADIX = 10
maxLength = False
tmp , placement = -1, 1
while not maxLength:
maxLength = True
buckets = [list() for _ in range(RADIX)]
for i in A:
tmp = i / placement
buckets[tmp % RADIX].append(i)
if maxLength and tmp > 0:
maxLength = False
a = 0
for b in range(RADIX):
buck = buckets[b]
for i in buck:
A[a] = i
a += 1
# move to next digit
placement *= RADIX
A = [534, 246, 933, 127, 277, 321, 454, 565, 220]
print(RadixSort(A))
|
applecool/Practice
|
Python/Sorting/RadixSort.py
|
Python
|
mit
| 556
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Resources to make it easier and faster to implement and test game of life
#
# @author Mikael Wikström
# https://github.com/leakim/GameOfLifeKata
#
import pygame
class GameOfLife:
# still
BLOCK_0 = set([(0, 0), (0, 1), (1, 0), (1, 1)])
BLOCK_1 = BLOCK_0
# oscillators
THREE_0 = set([(0, 1), (0, 0), (0, 2)])
THREE_1 = set([(0, 1), (-1, 1), (1, 1)])
# spaceships (moves)
GLIDER_0 = set([(0, 1), (1, 2), (0, 0), (0, 2), (2, 1)])
GLIDER_1 = set([(0, 1), (1, 2), (-1, 1), (1, 0), (0, 2)])
def move(state, (x, y)):
newstate = set()
for (u, v) in state:
newstate.add((x + u, y + v))
return newstate
#WINDOW_SIZE = [2*255, 2*255]
#screen = pygame.display.set_mode(WINDOW_SIZE)
def draw(screen, state, rows=25, cols=25, MARGIN=1):
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
(w, h) = screen.get_size()
width, height = w/cols, h/rows
screen.fill(BLACK)
for (x, y) in state:
pygame.draw.rect(screen, WHITE, [
(MARGIN + width) * x + MARGIN,
(MARGIN + height) * y + MARGIN,
width, height])
pygame.display.flip()
|
leakim/GameOfLifeKata
|
python/resources.py
|
Python
|
mit
| 1,181
|
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import numpy as np
def coefficient(slipValue, extremumValue, extremumSlip, asymptoteValue, asymptoteSlip):
coefficient = asymptoteValue;
absoluteSlip = abs(slipValue);
if (absoluteSlip <= extremumSlip):
coefficient = (extremumValue / extremumSlip) * absoluteSlip;
elif (absoluteSlip > extremumSlip and absoluteSlip < asymptoteSlip):
coefficient = ((asymptoteValue - extremumValue) / (asymptoteSlip - extremumSlip)) \
* (absoluteSlip - extremumSlip) + extremumValue;
return coefficient
fig = plt.figure()
ax = fig.gca(projection='3d')
X = np.arange(0, 0.4, 0.01)
Y = np.arange(0, 40, 1)
X, Y = np.meshgrid(X, Y)
longfunc = np.vectorize(lambda t: coefficient(t, 1, 0.2, 0.75, 0.4))
lateralfunc = np.vectorize(lambda t: coefficient(t, 1.0, 20.0, 0.75, 40))
Z = np.sqrt(longfunc(X)**2 + lateralfunc(Y)**2)
surf = ax.plot_surface(X, Y, Z, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
ax.set_zlim(0.0, 2.0)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
|
tommiseppanen/visualizations
|
tyre-model/old-plots/combined-slip-basic.py
|
Python
|
mit
| 1,320
|
# -*- coding: utf-8 -*-
import os
import logging
from dotenv import find_dotenv, load_dotenv
from constants import *
import json
from utils import json_from_file, merge_json
import shutil
from settings import *
def prepare_train_data():
""" Runs data processing scripts to turn traning raw data from (../raw) into
interim data to be analyzed (saved in ../interim).
"""
logger = logging.getLogger(__name__)
logger.info('Making interim train data set from raw data')
# Init absolute path of folders
raw_input_folder_path = os.path.join(DATA_RAW_ROOT, DATASET_NAME, RAW_INPUT_FOLDER)
raw_output_folder_path = os.path.join(DATA_RAW_ROOT, DATASET_NAME, RAW_OUTPUT_FOLDER)
interim_folder_path = os.path.join(DATA_INTERIM_ROOT, DATASET_NAME)
# Read veracities from both test and dev files
veracity_labels = merge_json(
json_from_file(os.path.join(raw_output_folder_path, VERACITY_LABEL_FILE[0])),
json_from_file(os.path.join(raw_output_folder_path, VERACITY_LABEL_FILE[1])))
# Read stances from both test and dev files
stance_labels = merge_json(
json_from_file(os.path.join(raw_output_folder_path, STANCE_LABEL_FILE[0])),
json_from_file(os.path.join(raw_output_folder_path, STANCE_LABEL_FILE[1])))
# If interim data existed, delete and create a new one
if os.path.exists(interim_folder_path):
shutil.rmtree(interim_folder_path)
os.makedirs(interim_folder_path)
for event_name in DATASET_EVENTS:
interim_event_folder_path = os.path.join(interim_folder_path, event_name)
os.makedirs(interim_event_folder_path)
event_folder_path = os.path.join(raw_input_folder_path, event_name)
list_tweet_ids = [name for name in os.listdir(event_folder_path) if os.path.isdir(os.path.join(event_folder_path,name))]
for index, id in enumerate(list_tweet_ids):
# thread conversation folder in raw
source_tweet_folder_path = os.path.join(event_folder_path, id)
# read source tweet
source_tweet_file = open(os.path.join(source_tweet_folder_path,'source-tweet', id + '.json'), 'r')
source_tweet_content = source_tweet_file.read()
source_tweet_file.close()
source_tweet = json.loads(source_tweet_content)
source_tweet_replies = []
# read replies
replies_folder_path = os.path.join(source_tweet_folder_path, 'replies')
list_reply_ids = [name for name in os.listdir(replies_folder_path) if os.path.isfile(os.path.join(replies_folder_path, name))]
for reply_id in list_reply_ids:
reply_file = open(os.path.join(replies_folder_path, reply_id), "r")
reply_content = reply_file.read()
reply_file.close()
reply = json.loads(reply_content)
reply['stance'] = stance_labels[reply['id_str']]
source_tweet_replies.append(reply)
source_tweet['replies'] = source_tweet_replies
# read structure
structure_file = open(os.path.join(source_tweet_folder_path,'structure.json'), "r")
structure_content = structure_file.read()
structure_file.close()
structure = json.loads(structure_content)
source_tweet['structure'] = structure
source_tweet['veracity'] = veracity_labels[source_tweet['id_str']]
source_tweet['stance'] = stance_labels[source_tweet['id_str']]
# create tweet file in interim to write
interim_tweet_file = open(os.path.join(interim_event_folder_path, str(index) + '.json'), "w")
# write tweet to interim
interim_tweet_file.write(json.dumps(source_tweet, indent = 4))
interim_tweet_file.close()
def prepare_test_data():
""" Runs data processing scripts to turn testing raw data from (../raw) into
interim data to be analyzed (saved in ../interim).
"""
logger = logging.getLogger(__name__)
logger.info('Making interim test data set from raw data')
# Init absolute path of folders
raw_input_folder_path = os.path.join(DATA_RAW_ROOT, TESTSET_NAME)
raw_output_folder_path = os.path.join(DATA_RAW_ROOT, TESTSET_NAME)
interim_folder_path = os.path.join(DATA_INTERIM_ROOT, TESTSET_NAME)
# Read veracities from both test and dev files
veracity_labels = json_from_file(os.path.join(raw_output_folder_path, VERACITY_LABEL_TEST_FILE[0]))
# Read stances from both test and dev files
stance_labels = json_from_file(os.path.join(raw_output_folder_path, STANCE_LABEL_TEST_FILE[0]))
# If interim data existed, delete and create a new one
if os.path.exists(interim_folder_path):
shutil.rmtree(interim_folder_path)
os.makedirs(interim_folder_path)
list_tweet_ids = [name for name in os.listdir(raw_input_folder_path) if
os.path.isdir(os.path.join(raw_input_folder_path, name))]
for index, id in enumerate(list_tweet_ids):
# thread conversation folder in raw
source_tweet_folder_path = os.path.join(raw_input_folder_path, id)
# read source tweet
source_tweet_file = open(os.path.join(source_tweet_folder_path, 'source-tweet', id + '.json'), 'r')
source_tweet_content = source_tweet_file.read()
source_tweet_file.close()
source_tweet = json.loads(source_tweet_content)
source_tweet_replies = []
# read replies
replies_folder_path = os.path.join(source_tweet_folder_path, 'replies')
list_reply_ids = [name for name in os.listdir(replies_folder_path) if
os.path.isfile(os.path.join(replies_folder_path, name))]
for reply_id in list_reply_ids:
reply_file = open(os.path.join(replies_folder_path, reply_id), "r")
reply_content = reply_file.read()
reply_file.close()
reply = json.loads(reply_content)
reply['stance'] = stance_labels[reply['id_str']]
source_tweet_replies.append(reply)
source_tweet['replies'] = source_tweet_replies
# read structure
structure_file = open(os.path.join(source_tweet_folder_path, 'structure.json'), "r")
structure_content = structure_file.read()
structure_file.close()
structure = json.loads(structure_content)
source_tweet['structure'] = structure
source_tweet['veracity'] = veracity_labels[source_tweet['id_str']]
source_tweet['stance'] = stance_labels[source_tweet['id_str']]
# create tweet file in interim to write
interim_tweet_file = open(os.path.join(interim_folder_path, str(index) + '.json'), "w")
# write tweet to interim
interim_tweet_file.write(json.dumps(source_tweet, indent=4))
interim_tweet_file.close()
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
prepare_train_data()
prepare_test_data()
|
iamhuy/rumour-veracity-verification
|
src/data/make_interim.py
|
Python
|
mit
| 7,273
|
# mailstat.utils
# Utilities and functions for the mailstat package
#
# Author: Benjamin Bengfort <benjamin@bengfort.com>
# Created: Sun Dec 29 17:27:38 2013 -0600
#
# Copyright (C) 2013 Bengfort.com
# For license information, see LICENSE.txt
#
# ID: __init__.py [] benjamin@bengfort.com $
"""
Utilities and functions for the mailstat package
"""
##########################################################################
## Imports
##########################################################################
|
bbengfort/email-analysis
|
mailstat/utils/__init__.py
|
Python
|
mit
| 513
|
import unittest
from .layer import *
class L(Layer):
L1 = LayerSpec("l1", "layer-1")
L2 = LayerSpec("l2", "layer-2", [L1])
class TestLayer(unittest.TestCase):
def testFromId(self):
self.assertEqual(L.FromId(L.L1.id_), L.L1)
self.assertEqual(L.FromId(L.L2.id_), L.L2)
def testFromName(self):
self.assertEqual(L.FromName(L.L1.name), L.L1)
self.assertEqual(L.FromName(L.L2.name), L.L2)
def testAllLayers(self):
self.assertEqual(set(L.AllLayers()), set((L.L2, L.L1)))
def testIsSublayer(self):
self.assertTrue(L.IsSublayer(L.L1, L.L2))
self.assertFalse(Layer.IsSublayer(L.L2, L.L1))
def testTopLayer(self):
self.assertEqual(L.TopLayer(), L.L2)
if __name__ == '__main__':
unittest.main()
|
mthomure/glimpse-project
|
glimpse/models/base/layer_test.py
|
Python
|
mit
| 741
|
import OOMP
newPart = OOMP.oompItem(9439)
newPart.addTag("oompType", "RESE")
newPart.addTag("oompSize", "0805")
newPart.addTag("oompColor", "X")
newPart.addTag("oompDesc", "O202")
newPart.addTag("oompIndex", "01")
OOMP.parts.append(newPart)
|
oomlout/oomlout-OOMP
|
old/OOMPpart_RESE_0805_X_O202_01.py
|
Python
|
cc0-1.0
| 243
|
def circle(cx, cy, diameter):
radius = diameter / 2
oval(cx - radius, cy - radius, diameter, diameter)
# diameter = 254
# radius = diameter / 2
# cx, cy = (420, 532)
# oval(cx - radius, cy - radius, diameter, diameter)
circle(420, 532, 254)
# diameter = 154
# radius = diameter / 2
# cx, cy = (728, 414)
# oval(cx - radius, cy - radius, diameter, diameter)
circle(728, 414, 154)
circle(510, 258, 306)
|
shannpersand/cooper-type
|
workshops/Python Workshop/Just/2016-06-21 Cooper workshop day 2/14 circle function.py
|
Python
|
cc0-1.0
| 416
|
def countup(n):
if n >= 10:
print "Blastoff!"
else:
print n
countup(n+1)
def main():
countup(1)
main()
def countdown_from_to(start,stop):
if start == stop:
print "Blastoff!"
elif start <= stop:
print "Invalid pair"
else:
print start
countdown_from_to(start - 1,stop)
def main():
countdown_from_to(89,53)
main()
def adder(sum_):
number = (raw_input("Next Number"))
if (number) == "":
print "The Sum Is {}".format(sum_)
elif number == float:
print number
else:
sum_ += float(number)
print "Running total: {}".format(sum_)
adder(sum_)
def main():
sum_ = 0
adder(sum_)
main()
|
tonsom1592-cmis/tonsom1592-cmis-cs2
|
recursion.py
|
Python
|
cc0-1.0
| 624
|
#Music Class and support functions
import pygame
import parameters
from filemanager import filemanager
from pygame.locals import *
from pygame import *
from pygame.mixer import *
#Pygame Module for Music and Sound
pigmusic = None
currentStdMusic=None
currentMenuMusic=None
currentType = None
def initmusic():
global pigmusic
#Init pygame mixer and music
print "music init GO"
try:
if pygame.mixer and not pygame.mixer.get_init():
pygame.mixer.init()
if not pygame.mixer:
print 'Warning, sound disabled'
else:
pigmusic=pygame.mixer.music
except (pygame.error):
print 'Warning, unable to init music'
print "music init OUT ",pigmusic
def upmusic():
global pigmusic
if not pigmusic:
return
vol=pigmusic.get_volume()
if vol <= 0.9:
pigmusic.set_volume(vol+0.1)
def downmusic():
global pigmusic
if not pigmusic:
return
vol=pigmusic.get_volume()
if vol > 0.0:
pigmusic.set_volume(vol-0.1)
def stopmusic():
global pigmusic
if not pygame.mixer.get_init():
return
if not pigmusic:
return
if pigmusic.get_busy():
pigmusic.stop()
def setvolume(vol):
global pigmusic
pigmusic.set_volume(vol)
def getcurrentStdMusic():
global currentStdMusic
return currentStdMusic
def getcurrentMenuMusic():
global currentMenuMusic
return currentMenuMusic
def returtostdmusic():
#called when we want to force the music to play std music
cur=currentStdMusic
cur.playmusic()
class Music:
def __init__(self, name, filename, musictype='std', vol=0.5):
self._name=name
self._file=filename
self._type=musictype
self._vol=vol
def playmusic(self,loop=-1):
global pigmusic,currentStdMusic,currentMenuMusic,currentType
print "music play",self._file
if not pigmusic:
initmusic()
if self._type == 'std':
#print "music std type current is ",currentType
if not currentStdMusic:
#print "music std no currentStdMusic, we create it with ",self._file
currentStdMusic=self
#print "is pigmusic busy ? ",pigmusic.get_busy()
if pigmusic.get_busy():
#print "music std, music is busy"
if currentType == 'std':
#print "music std, currentType is std isn't it : ",currentType
if currentStdMusic.getfile()==self._file:
#print "music std, same music don't do anything"
return
else:
#print "music std, not the same we change, currentStdMusic=",self._file
currentStdMusic=self
#print "is pigmusic busy ? ",pigmusic.get_busy()
if pigmusic.get_busy():
print " music std, music is busy"
if currentType == 'std':
print " music std, currentType is std isn't it : ",currentType
if currentStdMusic.getfile()==self._file:
print " music std, same music don't do anything"
return
else:
print " music std, not the same we change, currentStdMusic=",self._file
currentStdMusic=self
else:
print " music std, current type is menu isn't it :", currentType ," so we change it to std\n"
#we change menu slide to standard slide
currentType='std'
else:
#print "music std, current type is menu isn't it :", currentType ," so we change it to std\n"
#we change menu slide to standard slide
currentType='std'
else:
#print "music std, music is not busy we start it"
currentType='std'
currentStdMusic=self
else:
#print "music menu type current is ",currentType
if not currentMenuMusic:
#print "music menu no currentMenuMusic, we create it with ",self._file
currentMenuMusic=self
if pigmusic.get_busy():
#print "music menu, music is busy"
if currentType == 'menu':
#print "music menu, currentType is menu isn't it : ",currentType
if currentMenuMusic.getfile()==self._file:
#print "music menu, same music don't do anything"
#return
pass
else:
#print "music menu, not the same we change, currentMenuMusic=",self._file
currentMenuMusic=self
if pigmusic.get_busy():
print " music menu, music is busy"
if currentType == 'menu':
print " music menu, currentType is menu isn't it : ",currentType
if currentMenuMusic.getfile()==self._file:
print " music menu, same music don't do anything"
return
else:
print " music menu, not the same we change, currentMenuMusic=",self._file
currentMenuMusic=self
else:
print " music menu, current type is std isn't it :", currentType ," so we change it to menu\n"
#we change standard slide to menu slide
currentType='menu'
else:
#print "music menu, current type is std isn't it :", currentType ," so we change it to menu\n"
#we change standard slide to menu slide
currentType='menu'
else:
#print "music menu ,music is not busy we start it"
currentType='menu'
currentMenuMusic=self
pigmusic.load(filemanager.find_music(self._file))
pigmusic.set_volume(self._vol)
pigmusic.play(loop)
def getfile(self):
return self._file
def getname(self):
return self._name
def stopmusic(self):
print "we stop music!!!!! ",self._file
global pigmusic
if not pigmusic:
return
if pigmusic.get_busy():
if self._type == 'std':
if currentStdMusic.getfile()==self._file:
pigmusic.stop()
else:
if currentMenuMusic.getfile()==self._file:
pigmusic.stop()
|
ilathid/ilathidEngine
|
engine/music.py
|
Python
|
epl-1.0
| 6,871
|
## This file is part of Invenio.
## Copyright (C) 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Basic simplified data test functions - NOT FOR XML """
from invenio.bibworkflow_config import CFG_OBJECT_STATUS
def task_a(a):
def _task_a(obj, eng):
"""Function task_a docstring"""
eng.log.info("executing task a " + str(a))
obj.data += a
return _task_a
def task_b(obj, eng):
"""Function task_b docstring"""
eng.log.info("executing task b")
if obj.data < 20:
obj.change_status(CFG_OBJECT_STATUS.ERROR)
eng.log.info("Object status %s" % (obj.db_obj.status,))
eng.log.info("data < 20")
obj.add_task_result("task_b", {'a': 12, 'b': 13, 'c': 14})
eng.halt("Value of filed: data in object is too small.")
|
kntem/webdeposit
|
modules/bibworkflow/lib/tasks/simplified_data_tasks.py
|
Python
|
gpl-2.0
| 1,455
|
from pik.flights import Flight
import csv
import sys
reader = csv.reader(sys.stdin)
for flight in Flight.generate_from_csv(reader):
print flight
|
tazle/pik-laskutin
|
import-flights.py
|
Python
|
gpl-2.0
| 151
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
#===============================================================================
#
# Dependencies
#
#-------------------------------------------------------------------------------
from layman.utils import path
from layman.overlays.source import OverlaySource
#===============================================================================
#
# Class StubOverlay
#
#-------------------------------------------------------------------------------
class StubOverlay(OverlaySource):
''' Handles overlays with missing modules. '''
type = 'N/A'
type_key = 'n/a'
def __init__(self, parent, config, _location, ignore = 0):
super(StubOverlay, self).__init__(parent,
config, _location, ignore)
self.branch = self.parent.branch
self.info = {'name': self.parent.name, 'type': self.parent.ovl_type}
self.missing_msg = 'Overlay "%(name)s" is missing "%(type)s" module!'\
% self.info
self.hint = 'Did you install layman with "%(type)s" support?'\
% self.info
def add(self, base):
'''Add overlay.'''
self.output.error(self.missing_msg)
self.output.warn(self.hint)
return True
def update(self, base, src):
'''
Updates overlay src-url.
'''
self.output.error(self.missing_msg)
self.output.warn(self.hint)
return True
def sync(self, base):
'''Sync overlay.'''
self.output.error(self.missing_msg)
self.output.warn(self.hint)
return True
def supported(self):
'''Overlay type supported?'''
return False
|
gentoo/layman
|
layman/overlays/modules/stub/stub.py
|
Python
|
gpl-2.0
| 1,731
|
import os
from flask import Flask
from flask import request
import requests
import random
import codecs
#API id
#move this to a config file
bot_id = ''
app = Flask(__name__)
#encode string as ASCII
def stripped(text):
text = text.lower()
return text.encode('ascii','replace_spc')
def send(text):
message = {
'text' : text,
'bot_id' : bot_id
}
r = requests.post("https://api.groupme.com/v3/bots/post", params = message)
@app.route('/', methods=['POST'])
def message():
if not request.json or not 'text' in request.json:
return
user_id = request.json['user_id']
nick = request.json['name'].lower()
message = request.json['text'].lower()
message = stripped(message).strip()
print 'Got message' + message
message_callback.got_message(message, nick);
return ''
if __name__ == "__main__":
app.run(port = 8080, host = '0.0.0.0', debug = True)
|
marcb1/groupme_bot
|
src/groupme_bot.py
|
Python
|
gpl-2.0
| 891
|
from django.conf.urls import patterns, url
from django.contrib import admin
import views
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^$', views.HomeView.as_view(), name="home"),
url(r'^clues/$', views.CluesView.as_view(), name="clues"),
url(r'^test$', views.TestView.as_view(), name="test"),
url(r'^userlog/post$', views.userlog_post, name="userlog_post"),
url(r'^clues/cat/(?P<cat>.+)$', views.CluesView.as_view(), name="clues-by-cat"),
)
|
hillscottc/quest
|
questapp/urls.py
|
Python
|
gpl-2.0
| 484
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# Convolve MTSS rotamers with MD trajectory.
# Copyright (c) 2011-2017 Philip Fowler and AUTHORS
# Published under the GNU Public Licence, version 2 (or higher)
#
# Includes a rotamer library for MTSS at 298 K by Gunnar Jeschke,
# which is published under the same licence by permission.
"""\
Rotamer library handling
========================
:mod:`rotamers.library` contains the data (:data:`LIBRARIES`) to load
a rotamer library, represented by a :class:`RotamerLibrary`.
"""
from __future__ import absolute_import, division, print_function
import MDAnalysis, MDAnalysis.lib.util
import logging
logger = logging.getLogger("MDAnalysis.app")
import numpy as np
import os.path
import pkg_resources
#: Name of the directory in the package that contains the library data.
LIBDIR = "data"
# This could be turned into a YAML file.
#: Registry of libraries, indexed by name.
LIBRARIES = {
'MTSSL 298K 2011': {
'topology': "rotamer1_R1A_298K_2011.pdb",
'ensemble': "rotamer1_R1A_298K_2011.dcd",
'populations': "R1A_298K_populations_2011.dat",
'author': "Gunnar Jeschke",
'licence': "GPL v2",
'citation': "Polyhach Y, Bordignon E, Jeschke G. "
"Phys Chem Chem Phys. 2011; 13(6):2356-2366. doi: 10.1039/c0cp01865a",
},
'MTSSL 298K 2015': {
'topology': "rotamer1_R1A_298K_2015.pdb",
'ensemble': "rotamer1_R1A_298K_2015.dcd",
'populations': "R1A_298K_populations_2015.dat",
'author': "Gunnar Jeschke",
'licence': "GPL v2",
'citation': "Polyhach Y, Bordignon E, Jeschke G. "
"Phys Chem Chem Phys. 2011; 13(6):2356-2366. doi: 10.1039/c0cp01865a",
'information': "updated version of the MTSSL rotamer library from 2015"
},
}
def find_file(filename, pkglibdir=LIBDIR):
"""Return full path to file *filename*.
1) If the *filename* exists, return rooted canonical path.
2) Otherwise, create a path to file in the installed *pkglibdir*.
.. note::
A file name is *always* returned, even if the file does not
exist (because this is how :func:`pkg_resources.resource_filename`
works).
"""
if os.path.exists(filename):
return MDAnalysis.lib.util.realpath(filename)
return pkg_resources.resource_filename(__name__, os.path.join(pkglibdir, filename))
class RotamerLibrary(object):
"""Rotamer library
The library makes available the attributes :attr:`rotamers`, and :attr:`weights`.
.. attribute:: rotamers
:class:`MDAnalysis.core.AtomGroup.Universe` instance that
records all rotamers as a trajectory
.. attribute:: weights
NumPy array containing the population of each rotomer.
.. attribute:: name
Name of the library.
.. attribute:: lib
Dictionary containing the file names and meta data for the library :attr:`name`.
"""
def __init__(self, name):
"""RotamerLibrary(name)
:Arguments:
*name*
name of the library (must exist in the registry of libraries, :data:`LIBRARIES`)
"""
self.name = name
self.lib = {}
try:
self.lib.update(LIBRARIES[name]) # make a copy
except KeyError:
raise ValueError("No rotamer library with name {0} known: must be one of {1}".format(name,
LIBRARIES.keys()))
logger.info("Using rotamer library '{0}' by {1[author]}".format(self.name, self.lib))
logger.info("Please cite: {0[citation]}".format(self.lib))
# adjust paths
for k in 'ensemble', 'topology', 'populations':
self.lib[k] = find_file(self.lib[k])
logger.debug("[rotamers] ensemble = {0[ensemble]} with topology = {0[topology]}".format(self.lib))
logger.debug("[rotamers] populations = {0[populations]}".format(self.lib))
self.rotamers = MDAnalysis.Universe(self.lib['topology'], self.lib['ensemble'])
self.weights = self.read_rotamer_weights(self.lib['populations'])
if len(self.rotamers.trajectory) != len(self.weights):
err_msg = "Discrepancy between number of rotamers ({0}) and weights ({1})".format(
len(self.rotamers.trajectory), len(self.weights))
logger.critical(err_msg)
raise ValueError(err_msg)
def read_rotamer_weights(self, filename):
"""read in the rotamer weights from *filename*
There is one weight per conformer (frame) in the trajectory.
"""
return np.loadtxt(filename)
def __repr__(self):
return "<RotamerLibrary '{0}' by {1} with {2} rotamers>".format(self.name, self.lib['author'],
len(self.weights))
|
MDAnalysis/RotamerConvolveMD
|
rotcon/library.py
|
Python
|
gpl-2.0
| 5,002
|
from django.http import HttpResponse, HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.shortcuts import render_to_response
from omeroweb.webgateway.views import getBlitzConnection, _session_logout
from omeroweb.webgateway import views as webgateway_views
import settings
import logging
import traceback
import omero
# use the webclient's gateway connection wrapper
from webclient.webclient_gateway import OmeroWebGateway
import webmobile_util
logger = logging.getLogger('webmobilewebmobile')
def isUserConnected (f):
"""
connection decorator (wraps methods that require connection) - adapted from webclient.views
retrieves connection and passes it to the wrapped method in kwargs
TODO: would be nice to refactor isUserConnected from webclient to be usable from here.
"""
def wrapped (request, *args, **kwargs):
#this check the connection exist, if not it will redirect to login page
url = request.REQUEST.get('url')
if url is None or len(url) == 0:
if request.META.get('QUERY_STRING'):
url = '%s?%s' % (request.META.get('PATH_INFO'), request.META.get('QUERY_STRING'))
else:
url = '%s' % (request.META.get('PATH_INFO'))
conn = None
loginUrl = reverse("webmobile_login")
try:
conn = getBlitzConnection(request, useragent="OMERO.webmobile")
except Exception, x:
logger.error(traceback.format_exc())
return HttpResponseRedirect("%s?error=%s&url=%s" % (loginUrl, str(x), url))
# if we failed to connect - redirect to login page, passing the destination url
if conn is None:
return HttpResponseRedirect("%s?url=%s" % (loginUrl, url))
# if we got a connection, pass it to the wrapped method in kwargs
kwargs["error"] = request.REQUEST.get('error')
kwargs["conn"] = conn
kwargs["url"] = url
return f(request, *args, **kwargs)
return wrapped
def groups_members(request):
"""
List the users of the current group - if permitted
"""
conn = getBlitzConnection (request, useragent="OMERO.webmobile")
if conn is None or not conn.isConnected():
return HttpResponseRedirect(reverse('webmobile_login'))
groupId = conn.getEventContext().groupId
showMembers = True
if str(conn.getEventContext().groupPermissions) == "rw----":
showMembers = False
members = conn.containedExperimenters(groupId)
groups = []
perms = {"rw----":'private', "rwr---":'read-only', "rwrw--":'collaborative'}
for g in conn.getGroupsMemberOf():
try:
p = perms[str(g.getDetails().permissions)]
except KeyError:
p = ""
groups.append({
"id": g.id,
"name": g.getName(),
"permissions": p
})
return render_to_response('webmobile/groups_members.html', {'client': conn, 'showMembers': showMembers,
'members': members, 'groups': groups})
def switch_group(request, groupId):
"""
Switch to the specified group, then redirect to index.
"""
conn = getBlitzConnection (request, useragent="OMERO.webmobile")
if conn is None or not conn.isConnected():
return HttpResponseRedirect(reverse('webmobile_login'))
from webclient.views import change_active_group
try:
#change_active_group(request, kwargs={'conn': conn})
conn.changeActiveGroup(long(groupId)) # works except after viewing thumbnails in private group!
except:
logger.error(traceback.format_exc())
return HttpResponse(traceback.format_exc())
return HttpResponseRedirect(reverse('webmobile_index'))
@isUserConnected
def change_active_group(request, groupId, **kwargs):
try:
conn = kwargs["conn"]
except:
logger.error(traceback.format_exc())
return handlerInternalError("Connection is not available. Please contact your administrator.")
url = reverse('webmobile_index')
server = request.session.get('server')
username = request.session.get('username')
password = request.session.get('password')
ssl = request.session.get('ssl')
version = request.session.get('version')
webgateway_views._session_logout(request, request.session.get('server'))
blitz = settings.SERVER_LIST.get(pk=server)
request.session['server'] = blitz.id
request.session['host'] = blitz.host
request.session['port'] = blitz.port
request.session['username'] = username
request.session['password'] = password
request.session['ssl'] = (True, False)[request.REQUEST.get('ssl') is None]
request.session['clipboard'] = {'images': None, 'datasets': None, 'plates': None}
request.session['shares'] = dict()
request.session['imageInBasket'] = set()
blitz_host = "%s:%s" % (blitz.host, blitz.port)
request.session['nav']={"error": None, "blitz": blitz_host, "menu": "start", "view": "icon", "basket": 0, "experimenter":None, 'callback':dict()}
#conn = getBlitzConnection(request, useragent="OMERO.webmobile")
if conn.changeActiveGroup(groupId):
request.session.modified = True
else:
error = 'You cannot change your group becuase the data is currently processing. You can force it by logging out and logging in again.'
url = reverse("webindex")+ ("?error=%s" % error)
if request.session.get('nav')['experimenter'] is not None:
url += "&experimenter=%s" % request.session.get('nav')['experimenter']
request.session['version'] = conn.getServerVersion()
return HttpResponseRedirect(url)
def viewer(request, imageId):
conn = getBlitzConnection (request, useragent="OMERO.webmobile")
if conn is None or not conn.isConnected():
return HttpResponseRedirect(reverse('webmobile_login'))
image = conn.getObject("Image", imageId)
w = image.getSizeX()
h = image.getSizeY()
return render_to_response('webmobile/viewers/viewer_iphone.html', {'image':image})
@isUserConnected
def viewer_big(request, imageId, **kwargs):
conn = None
try:
conn = kwargs["conn"]
except:
logger.error(traceback.format_exc())
return HttpResponse(traceback.format_exc())
image = conn.getImage(imageId)
w = image.getWidth()
h = image.getHeight()
z = image.z_count() /2
return render_to_response('webmobile/viewers/big_iphone.html', {'image':image, 'w':w, 'h': h, 'z':z})
@isUserConnected
def projects (request, eid=None, **kwargs):
""" List the projects owned by the current user, or another user specified by eId """
conn = None
try:
conn = kwargs["conn"]
except:
logger.error(traceback.format_exc())
return HttpResponse(traceback.format_exc())
#projects = filter(lambda x: x.isOwned(), conn.listProjects())
#eId = request.REQUEST.get('experimenter', None)
experimenter = None
if eid is not None:
experimenter = conn.getObject("Experimenter", eid)
else:
# show current user's projects by default
eid = conn.getEventContext().userId
projs = conn.listProjects(eid=eid)
projs = list(projs)
if request.REQUEST.get('sort', None) == 'recent':
projs.sort(key=lambda x: x.creationEventDate())
projs.reverse()
else:
projs.sort(key=lambda x: x.getName().lower())
ods = conn.listOrphans("Dataset", eid=eid)
orphanedDatasets = list(ods)
return render_to_response('webmobile/browse/projects.html',
{ 'client':conn, 'projects':projs, 'datasets':orphanedDatasets, 'experimenter':experimenter })
@isUserConnected
def project(request, id, **kwargs):
""" Show datasets belonging to the specified project """
conn = None
try:
conn = kwargs["conn"]
except:
logger.error(traceback.format_exc())
return HttpResponse(traceback.format_exc())
prj = conn.getObject("Project", id)
return render_to_response('webmobile/browse/project.html', {'client':conn, 'project':prj})
@isUserConnected
def object_details(request, obj_type, id, **kwargs):
""" Show project/dataset details: Name, description, owner, annotations etc """
conn = None
try:
conn = kwargs["conn"]
except:
logger.error(traceback.format_exc())
return HttpResponse(traceback.format_exc())
if obj_type == 'project':
obj = conn.getObject("Project", id)
title = 'Project'
elif obj_type == 'dataset':
obj = conn.getObject("Dataset", id)
title = 'Dataset'
anns = getAnnotations(obj)
parent = obj.getParent()
return render_to_response('webmobile/browse/object_details.html', {'client': conn, 'object': obj, 'title': title,
'annotations':anns, 'obj_type': obj_type})
@isUserConnected
def dataset(request, id, **kwargs):
""" Show images in the specified dataset """
conn = None
try:
conn = kwargs["conn"]
except:
logger.error(traceback.format_exc())
return HttpResponse(traceback.format_exc())
ds = conn.getObject("Dataset", id)
return render_to_response('webmobile/browse/dataset.html', {'client': conn, 'dataset': ds})
@isUserConnected
def image(request, imageId, **kwargs):
""" Show image summary: Name, dimensions, large thumbnail, description, annotations """
conn = None
try:
conn = kwargs["conn"]
except:
logger.error(traceback.format_exc())
return HttpResponse(traceback.format_exc())
img = conn.getObject("Image", imageId)
anns = getAnnotations(img)
return render_to_response('webmobile/browse/image.html', {'client': conn, 'object':img, 'obj_type':'image',
'annotations': anns})
@isUserConnected
def orphaned_images(request, eid, **kwargs):
""" Show image summary: Name, dimensions, large thumbnail, description, annotations """
conn = None
try:
conn = kwargs["conn"]
except:
logger.error(traceback.format_exc())
return HttpResponse(traceback.format_exc())
orphans = conn.listOrphans("Image", eid=eid)
return render_to_response('webmobile/browse/orphaned_images.html', {'client': conn, 'orphans':orphans})
@isUserConnected
def screens(request, eid=None, **kwargs):
""" """
conn = None
try:
conn = kwargs["conn"]
except:
logger.error(traceback.format_exc())
return HttpResponse(traceback.format_exc())
experimenter = None
if eid is not None:
experimenter = conn.getObject("Experimenter", eid)
else:
# show current user's screens by default
eid = conn.getEventContext().userId
scrs = conn.listScreens(eid=eid)
if request.REQUEST.get('sort', None) == 'recent':
scrs = list(scrs)
scrs.sort(key=lambda x: x.creationEventDate())
scrs.reverse()
ops = conn.listOrphans("Plate", eid=eid)
orphanedPlates = list(ops)
return render_to_response('webmobile/browse/screens.html',
{'client':conn, 'screens':scrs, 'orphans':orphanedPlates, 'experimenter':experimenter })
@isUserConnected
def screen(request, id, **kwargs):
""" Show plates in the specified scren """
conn = None
try:
conn = kwargs["conn"]
except:
logger.error(traceback.format_exc())
return HttpResponse(traceback.format_exc())
scrn = conn.getObject("Screen", id)
return render_to_response('webmobile/browse/screen.html', {'client': conn, 'screen': scrn})
@isUserConnected
def plate(request, id, **kwargs):
""" Show plate - grid of thumbs? """
conn = None
try:
conn = kwargs["conn"]
except:
logger.error(traceback.format_exc())
return HttpResponse(traceback.format_exc())
scrn = conn.getObject("Screen", id)
return render_to_response('webmobile/browse/screen.html', {'client': conn, 'screen': scrn})
def getAnnotations(obj):
""" List the annotations and sort into comments, tags, ratings, files etc """
comments = list()
ratings = list()
files = list()
tags = list()
from omero.model import CommentAnnotationI, LongAnnotationI, TagAnnotationI, FileAnnotationI
for ann in obj.listAnnotations():
if isinstance(ann._obj, CommentAnnotationI):
comments.append(ann)
elif isinstance(ann._obj, LongAnnotationI):
ratings.append(ann)
elif isinstance(ann._obj, FileAnnotationI):
files.append(ann)
elif isinstance(ann._obj, TagAnnotationI):
tags.append(ann)
comments.sort(key=lambda x: x.creationEventDate())
comments.reverse()
return {"comments":comments, "ratings":ratings, "files":files, "tags":tags}
@isUserConnected
def edit_object(request, obj_type, obj_id, **kwargs):
"""
Display a page for editing Name and Description of Project/Dataset/Image etc
Page 'submit' redirects here with 'name' and 'description' in POST, which
will do the edit and return to the object_details page.
"""
conn = None
try:
conn = kwargs["conn"]
except:
logger.error(traceback.format_exc())
return HttpResponse(traceback.format_exc())
if obj_type == 'image':
obj = conn.getObject("Image", obj_id)
title = 'Image'
redirect = reverse('webmobile_image', kwargs={'imageId':obj_id})
elif obj_type == 'dataset':
obj = conn.getObject("Dataset", obj_id)
title = 'Dataset'
redirect = reverse('webmobile_dataset_details', kwargs={'id':obj_id})
elif obj_type == 'project':
obj = conn.getObject("Project", obj_id)
title = 'Project'
redirect = reverse('webmobile_project_details', kwargs={'id':obj_id})
# if name, description in request, edit and redirect to object_details
name = request.REQUEST.get('name', None)
if name:
obj.setName(name)
description = request.REQUEST.get('description', '').strip()
if len(description) == 0:
description = None
obj.setDescription(description)
obj.save()
return HttpResponseRedirect(redirect)
return render_to_response('webmobile/browse/edit_object.html', {'client': conn, 'title':title, 'object':obj})
@isUserConnected
def add_comment(request, obj_type, obj_id, **kwargs):
"""
Adds a comment (from request 'comment') to object 'project', 'dataset', 'image' then
redirects to the 'details' page for that object: E.g. project_details page etc.
"""
conn = None
try:
conn = kwargs["conn"]
except:
logger.error(traceback.format_exc())
return HttpResponse(traceback.format_exc())
from omero.rtypes import rstring
redirect = reverse('webmobile_index') # default
if obj_type == 'image':
l = omero.model.ImageAnnotationLinkI()
parent = omero.model.ImageI(obj_id, False) # use unloaded object to avoid update conflicts
redirect = reverse('webmobile_image', kwargs={'imageId':obj_id})
elif obj_type == 'dataset':
l = omero.model.DatasetAnnotationLinkI()
parent = omero.model.DatasetI(obj_id, False)
redirect = reverse('webmobile_dataset_details', kwargs={'id':obj_id})
elif obj_type == 'project':
l = omero.model.ProjectAnnotationLinkI()
parent = omero.model.ProjectI(obj_id, False)
redirect = reverse('webmobile_project_details', kwargs={'id':obj_id})
comment = request.REQUEST.get('comment', None)
if comment is None or (len(comment.strip()) == 0):
return HttpResponseRedirect(redirect)
updateService = conn.getUpdateService()
ann = omero.model.CommentAnnotationI()
comment = unicode(comment).encode("utf-8").strip()
ann.setTextValue(rstring(comment))
ann = updateService.saveAndReturnObject(ann)
l.setParent(parent)
l.setChild(ann)
updateService.saveObject(l)
return HttpResponseRedirect(redirect)
def login (request):
if request.method == 'POST' and request.REQUEST['server']:
blitz = settings.SERVER_LIST.get(pk=request.REQUEST['server'])
request.session['server'] = blitz.id
request.session['host'] = blitz.host
request.session['port'] = blitz.port
conn = getBlitzConnection (request, useragent="OMERO.webmobile")
logger.debug(conn)
url = request.REQUEST.get("url")
if conn is None:
return render_to_response('webmobile/login.html', {'gw':settings.SERVER_LIST, 'url': url})
if url is not None and len(url) != 0:
return HttpResponseRedirect(url)
else:
return HttpResponseRedirect(reverse('webmobile_index'))
def logout (request):
_session_logout(request, request.session['server'])
try:
del request.session['username']
except KeyError:
logger.error(traceback.format_exc())
try:
del request.session['password']
except KeyError:
logger.error(traceback.format_exc())
#request.session.set_expiry(1)
return HttpResponseRedirect(reverse('webmobile_login'))
@isUserConnected
def index (request, eid=None, **kwargs):
conn = None
try:
conn = kwargs["conn"]
except:
logger.error(traceback.format_exc())
return HttpResponse(traceback.format_exc())
experimenter = None
if eid is not None:
experimenter = conn.getObject("Experimenter", eid)
return render_to_response('webmobile/index.html', {'client': conn, 'experimenter': experimenter})
@isUserConnected
def recent (request, obj_type, eid=None, **kwargs):
conn = None
try:
conn = kwargs["conn"]
except:
logger.error(traceback.format_exc())
return HttpResponse(traceback.format_exc())
experimenter = None
if eid:
experimenter = conn.getObject("Experimenter", eid)
# By default, get 3 each of Projects, Datasets, Images, Ratings, Comments, Tags
obj_count = 3
obj_types = None
if obj_type == 'images': # Get the last 12 images
obj_types = ['Image']
obj_count = 12
elif obj_type == 'anns': # 4 each of Tags, Comments, Rating
obj_types = ['Annotation']
obj_count = 4
if obj_type == 'rois':
recentResults = webmobile_util.listRois(conn, eid)
else:
recentItems = webmobile_util.listMostRecentObjects(conn, obj_count, obj_types, eid)
recentResults = [ webmobile_util.RecentEvent(r) for r in recentItems ]
# list members for links to other's recent activity
groupId = conn.getEventContext().groupId
members = conn.containedExperimenters(groupId)
return render_to_response('webmobile/timeline/recent.html', {'client':conn, 'recent':recentResults,
'exp':experimenter, 'members':members, 'obj_type':str(obj_type) })
@isUserConnected
def recent_full_page (request, **kwargs):
"""
Mock-up full page for Usability testing of recent views.
"""
conn = None
try:
conn = kwargs["conn"]
except:
logger.error(traceback.format_exc())
return HttpResponse(traceback.format_exc())
exp = conn.getObject("Experimenter", conn.getEventContext().userId)
return render_to_response('webmobile/timeline/recent_full_page.html', {'client':conn, 'exp':exp })
@isUserConnected
def collab_annotations (request, myData=True, **kwargs):
"""
Page displays recent annotations of OTHER users on MY data (myData=True) or
MY annotations on data belonging to OTHER users.
"""
conn = None
try:
conn = kwargs["conn"]
except:
logger.error(traceback.format_exc())
return HttpResponse(traceback.format_exc())
collabAnns = webmobile_util.listCollabAnnotations(conn, myData)
return render_to_response('webmobile/timeline/recent_collab.html', {'client':conn, 'recent':collabAnns, 'myData':myData })
def image_viewer (request, iid, **kwargs):
""" This view is responsible for showing pixel data as images """
conn = getBlitzConnection (request, useragent="OMERO.webmobile")
if conn is None or not conn.isConnected():
return HttpResponseRedirect(reverse('webmobile_login'))
kwargs['viewport_server'] = '/webclient'
return webgateway_views.full_viewer(request, iid, _conn=conn, **kwargs)
|
joshmoore/openmicroscopy
|
components/tools/OmeroWeb/omeroweb/webmobile/views.py
|
Python
|
gpl-2.0
| 20,846
|
from netfilterqueue import NetfilterQueue
from dpkt import ip, icmp, tcp, udp
from scapy.all import *
import socket
def print_and_accept(pkt):
data=pkt.get_payload()
res = ip.IP(data)
res2 = IP(data)
i = ICMP(data)
t = TCP(data)
u = UDP(data)
print "SOURCE IP: %s\tDESTINATION IP: %s" % (socket.inet_ntoa(res.src),socket.inet_ntoa(res.dst))
print res2.show2()
resp=srp1(Ether(dst="ff:ff:ff:ff:ff:ff")/ARP(pdst='192.168.0.34'),iface="eth0",timeout=2)
print resp.dst
eth_dst = resp.src
eth_src = resp.dst
eth = Ether(src=eth_src, dst=eth_dst)
eth.type = 2048
sendp(eth/res2/res2,iface="eth0")
pkt.accept()
nfqueue = NetfilterQueue()
nfqueue.bind(6, print_and_accept)
try:
nfqueue.run()
except KeyboardInterrupt, ex:
print ex
|
rafaelsilvag/pyNFRouter
|
test/teste.py
|
Python
|
gpl-2.0
| 798
|
########################################################################
# #
# Anomalous Diffusion #
# #
########################################################################
import steps.interface
########################################################################
# Create Model
from steps.model import *
from steps.geom import *
from steps.rng import *
from steps.sim import *
from steps.saving import *
from steps.visual import *
import time
mdl = Model()
r = ReactionManager()
with mdl:
X = Species.Create()
vsys = VolumeSystem.Create()
with vsys:
dif_X = Diffusion.Create(X, 2e-09)
########################################################################
# Create Gemoetry
tetmesh = TetMesh.LoadAbaqus('2_20_0.7.inp', scale=1e-06, ebs=None, shadow_mesh="2_20_0.7_conf")
########################################################################
# Create Random number generator
rng = RNG('mt19937', 512, int(time.time()%4294967295))
########################################################################
# Initialize simulation
sim = Simulation('Tetexact', mdl, tetmesh, rng)
sim.injection.X.Count = 2000
########################################################################
# Visualization
rs = ResultSelector(sim)
# Create control
sc = SimControl(end_time = 1.0, upd_interval = 0.00001)
with sc:
with SimDisplay('Show Spine Species'):
# Static mesh element
ElementDisplay(rs.dend, color=[0, 0, 1, 0.2])
# Dynamic element
ElementDisplay(rs.LIST('dend', 'shaft').X, color=[1.0, 0.0, 0.0, 1.0], spec_size=0.1)
with SimDisplay('Hide Spine Species'):
ElementDisplay(rs.dend, color=[0, 0, 1, 0.2])
ElementDisplay(rs.shaft.X, color=[1.0, 0.0, 0.0, 1.0], spec_size=0.1)
with PlotDisplay('Plots'):
SpatialPlot(rs.TETS(tetmesh.shaft.tets).X.Count, axis=[0, 0, 1], nbins=100)
# Enter visualization loop
sc.run()
|
CNS-OIST/STEPS_Example
|
publication_models/API_2/Chen_FNeuroinf_2014/AD/AD_single.py
|
Python
|
gpl-2.0
| 2,125
|
from django.contrib.auth import authenticate, login, get_user_model
from mailin import Mailin
from string import punctuation
def authorize(request):
email = request.POST.get('Email')
password = request.POST.get('Password')
if len(email) < 6 or len(password) < 10:
return {'ERROR' : 'Too short'}
else:
user = authenticate(username = email, password = password)
if user is not None:
login(request,user)
return {'VALID' : 'Logged in succesfully'}
else:
return {'ERROR' : 'Username or password incorrect!'}
return {'ERROR' : 'An unknown error occurred'}
def check_password(password, confirm):
if password != confirm:
return {'ERROR' : 'The two passwords do not match.'}
elif len(password) < 10:
return {'ERROR' : 'The password is too short.'}
security_combo = [0,0,0]
for c in password:
if c.isupper():
security_combo[0] = 1
elif c.isalpha():
security_combo[1] = 1
elif c.isdigit():
security_combo[2] = 1
elif c in punctuation:
security_combo[2] = 1
if 0 in security_combo:
return {'ERROR' : 'Password is not complex enough. Password requires 1 lower, 1 upper and 1 symbol or number.'}
return {'VALID' : 'Good'}
def register(request):
email = request.POST.get('Email')
password = request.POST.get('Password')
confirm = request.POST.get('confirmPassword')
security_check = check_password(password, confirm)
if 'ERROR' in security_check:
return security_check
else:
user = get_user_model().objects.create_user(email=email, password=password)
if 'VALID' in user:
#return user;
user_object = user['VALID']
Send_registration_email(user_object.user_email,user_object.activation_url);
return user
else:
return user
def Send_registration_email(emailAddress, activation_url):
file = open('/var/www/html/ShotForTheHeart/ShotForTheHeart/Credentials').read()
credentials = eval(file)
mailSystem = Mailin("https://api.sendinblue.com/v2.0", credentials['email'])
message = {
'to' : {'knoop.rick@gmail.com':'Rick Knoop'},
'from' : ['sftheart@uoguelph.ca' , 'Shot for the heart Guelph'],
'subject' : 'Activate your account',
'html' : 'Hello<br>You recently decided to register for an account at the Shot for the Heart website. Please click the link below to activate your account.<br><br>http://shotfortheheart.ca/register/'+activation_url+'<br><br>Thanks,<br>Shot for the Heart system administator.',
}
result = mailSystem.send_email(message)
if 'failure' in result['code']:
try:
file = open('/var/www/html/ShotForTheHeart/emailError.log', 'w+')
file.write(str(timezone.now)+' email address: '+str(user_email)+' Error information: '+str(result)+'\n\n')
file.close()
except:
pass
return {'ERROR': 'Your account was created correctly, but the email failed. Please contact sftheart@uoguelph.ca'}
else:
return {'VALID': 'Everything worked succesfully'}
|
knoopr/ShotForTheHeart
|
ShotForTheHeart/utils.py
|
Python
|
gpl-2.0
| 2,874
|
# -*- mode: python; indent-tabs-mode: nil; tab-width: 3 -*-
# vim: set tabstop=3 shiftwidth=3 expandtab:
#
# Copyright (C) 2001-2005 Ichiro Fujinaga, Michael Droettboom,
# and Karl MacMillan
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# TODO: These are fixed values. We need an intelligent way to vary them.
# This whole approach to fuzziness is syntactically convenient, but maybe
# not very efficient.
FUDGE_AMOUNT = 3
FUDGE_AMOUNT_2 = 6
from gamera.core import Rect, Point, Dim
# This is a factory function that looks like a constructor
def Fudge(o, amount=FUDGE_AMOUNT):
# For rectangles, just return a new rectangle that is slightly larger
if isinstance(o, Rect):
return Rect(Point(int(o.ul_x - amount), int(o.ul_y - amount)), Dim(int(o.ncols + amount * 2), int(o.nrows + amount * 2)))
# For integers, return one of our "fudge number proxies"
elif isinstance(o, int):
return FudgeInt(o, amount)
elif isinstance(o, float):
return FudgeFloat(o, amount)
F = Fudge
class FudgeNumber(object):
def __lt__(self, other):
return self.below < other
def __le__(self, other):
return self.below <= other
def __eq__(self, other):
return self.below <= other and self.above >= other
def __ne__(self, other):
return other < self.below and other > self.above
def __gt__(self, other):
return self.above > other
def __ge__(self, other):
return self.above >= other
class FudgeInt(FudgeNumber, int):
def __init__(self, value, amount=FUDGE_AMOUNT):
int.__init__(self, value)
self.below = int(value - amount)
self.above = int(value + amount)
class FudgeFloat(FudgeNumber, float):
def __init__(self, value, amount=FUDGE_AMOUNT):
int.__init__(self, value)
self.below = float(value - amount)
self.above = float(value + amount)
|
DDMAL/Gamera
|
gamera/fudge.py
|
Python
|
gpl-2.0
| 2,585
|
from tictactoe import game, player
import unittest
from unittest import mock
class GameTest(unittest.TestCase):
def setUp(self):
self.num_of_players = 2
self.width = 3
self.height = 3
self.game = game.Game(2, 3, 3)
def test_init(self):
self.assertEqual(self.game.board, None)
self.assertEqual(self.game.width, self.width)
self.assertEqual(self.game.height, self.height)
self.assertEqual(self.game.num_of_players, self.num_of_players)
self.assertEqual(self.game.players, [])
self.assertEqual(self.game.round_counter, 0)
self.assertEqual(self.game.on_turn, 0)
def test_setup(self):
input_seq = ['Luke', 'x', 'Leia', 'o']
with mock.patch('builtins.input', side_effect=input_seq):
self.game.setup()
expected = [('Luke', 'x'), ('Leia', 'o')]
for e, p in zip(expected, self.game.players):
self.assertEqual(p.name, e[0])
self.assertEqual(p.symbol, e[1])
def test_play_round(self):
# setup
input_seq = ['Luke', 'x', 'Leia', 'o']
with mock.patch('builtins.input', side_effect=input_seq):
self.game.setup()
input_seq = ['2', '5', '3', '1', '9', '6', '7', '4']
with mock.patch('builtins.input', side_effect=input_seq):
self.game.play_round()
finished, winner = self.game.board.finished()
self.assertTrue(finished)
self.assertEqual(winner, 1)
expected_board = [[1, 0, 0], [1, 1, 1], [0, None, 0]]
self.assertEqual(self.game.board.grid, expected_board)
|
jureslak/racunalniske-delavnice
|
fmf/python_v_divjini/projekt/test/test_game.py
|
Python
|
gpl-2.0
| 1,623
|
from django.template import RequestContext
from django.shortcuts import render_to_response, HttpResponse, HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
from django.contrib import messages
from django.core.urlresolvers import reverse
import re
import json
from ..models import WorkOrder, Shipment
@login_required
def work_orders (request, status = 'incomplete'):
context = RequestContext(request)
context_dict = dict()
open_orders = WorkOrder.objects.exclude(status = 4).exclude(status = 999)
finished_orders = WorkOrder.objects.filter(status = 4)
terminated_orders = WorkOrder.objects.filter(status = 999)
unmatched_orders = WorkOrder.objects.exclude(status = 999) \
.exclude(shipment__isnull = False)
header_list = ['Order ID', 'Shipment', 'Owner', 'Create Date', 'Status', '']
if status == 'incomplete':
context_dict['orders'] = open_orders
context_dict['count'] = open_orders.count()
elif status == 'complete':
context_dict['orders'] = finished_orders
header_list.pop() # Remove the blank column header over the Delete buttons
header_list.insert(3, 'Finish Date')
context_dict['count'] = finished_orders.count()
elif status == 'terminated':
context_dict['orders'] = terminated_orders
header_list.pop()
header_list.insert(4, 'Termination Date')
context_dict['count'] = terminated_orders.count()
elif status == 'unmatched':
context_dict['orders'] = unmatched_orders
context_dict['count'] = unmatched_orders.count()
else:
context_dict['orders'] = open_orders
context_dict['count'] = open_orders.count()
context_dict['status'] = status
context_dict['headers'] = header_list
return render_to_response('tracker/workorder_list.html', context_dict, context)
@login_required
def work_order_detail (request, id):
context = RequestContext(request)
context_dict = dict()
order = WorkOrder.objects.get(id = id)
header_list = ['Owner', 'Acct', 'Create Date', 'Shipment', 'Quantity', 'Status']
if order.status == 4:
header_list.index('Complete Date', 3)
context_dict['headers'] = header_list
context_dict['order'] = order
context_dict['orderop_headers'] = ['Op ID', 'Time', 'Status', 'User']
return render_to_response('tracker/workorder_detail.html', context_dict, context)
@login_required
def remove_work_order (request, id):
try:
order = WorkOrder.objects.get(id = id)
order.remove_order()
messages.add_message(request, messages.SUCCESS, "Order {} removed.".format(order.id))
except WorkOrder.DoesNotExist:
messages.add_message(request, messages.ERROR, "Can't find any Work Order with ID {}".format(id))
return HttpResponseRedirect(reverse('work_order_list', args = ['incomplete']))
@login_required
def link_work_order (request, orderid):
"""
Function to handle linking WorkOrder and Shipment objects
"""
if request.method != 'POST':
return HttpResponseRedirect(reverse('work_orders'))
else:
# TODO: Alert the user to discrepancies b/w the Work Order and the Shipment (i.e. different quantity)
order = WorkOrder.objects.get(id = orderid)
ship_desc = request.POST.get('shipid')
ship_id = re.findall('#(\d+):', ship_desc)[0]
shipment = Shipment.objects.get(shipid = ship_id)
order.shipment = shipment
order.save()
messages.add_message(request, messages.SUCCESS, "Order {0} and Shipment {1} linked successfully.".format(
order.id, shipment.shipid
))
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
@csrf_exempt
def get_unmatched_shipments (request, order_id):
"""
AJAX function to list Shipments that have no associated Work Order.
Returns shipments belonging to a particular Customer (order owner)
that are unmatched and still in storage (redundant?)
"""
context_dict = dict()
order = WorkOrder.objects.get(id = order_id)
_owner = order.owner
_list = Shipment.objects.exclude(status = 4) \
.filter(owner = _owner) \
.exclude(_workorder__isnull = False)
context_dict['list'] = [str(shipment) for shipment in _list]
return HttpResponse(json.dumps(context_dict), content_type = 'application/json')
@csrf_exempt
def get_unmatched_orders (request, ship_id):
"""
AJAX function to list Work Orders that have no associated Shipment.
Returns Work Orders belonging to a particular Customer (shipment owner)
that are unmatched and not deleted
"""
# TODO: Method to get unmatched orders by Acct ID
context_dict = dict()
shipment = Shipment.objects.get(shipid = ship_id)
_owner = shipment.owner
_list = WorkOrder.objects.exclude(status = 999) \
.filter(owner = _owner) \
.exclude(shipment__isnull = False)
context_dict['list'] = [str(order) for order in _list]
return HttpResponse(json.dumps(context_dict), content_type = 'application/json')
|
dannysellers/django_orders
|
tracker/views/workorder_views.py
|
Python
|
gpl-2.0
| 5,164
|
#Es necesario cambiar estos datos por los parametros de nuestro servidor, usuarios, password
userDb = "userDb"
passDb = "passDb"
mail = "*********@gmail.com"
passMail = "passMail"
nameDb = "domotics_db"
urlDb = "urlDb"
serverPort = 8080
#Security Code Device
updateCode = "UPDATE device SET code = '%s' WHERE id = '%s' AND (code = '%s' OR connectionStatus = 0)"
updateCodeRemote = "UPDATE device SET code = '%s' WHERE idDevice = '%s'"
#manage Port
selectGetPort = "SELECT port FROM device WHERE id = '%s' AND code ='%s'"
#Remotes
selectGetDevicesRemote = "SELECT deviceRemote.id AS id, deviceRemote.pipeSend AS pipeSend, deviceRemote.pipeRecv AS pipeRecv, deviceRemote.type AS type FROM device deviceRemote, device deviceCentral WHERE deviceRemote.idDevice = deviceCentral.id AND deviceCentral.id = '%s' AND deviceCentral.code = '%s'"
#Type device
selectGetTypeDevice = "SELECT type FROM device WHERE id = '%s' AND code ='%s'"
#Get User id
selectUserId = "SELECT id FROM user WHERE login = '%s' AND password = '%s'"
#Check users and mails
selectUserExists = "SELECT login FROM user WHERE login = '%s'"
selectMailExists = "SELECT login FROM user WHERE mail = '%s'"
selectUserExistsCheck = "SELECT login FROM user WHERE login = '%s' AND active = '1'"
selectMailExistsWithoutCheck = "SELECT login FROM user WHERE mail = '%s' AND active != '1'"
#SignIn user
insertSignIn = "INSERT INTO user (login, name, mail, password, active) VALUES ('%s', '%s', '%s', '%s', '%d')"
updateSignIn = "UPDATE user SET login = '%s', name = '%s', password = '%s', active = '%d' WHERE mail = '%s'"
#Check SignIn
updateCheckSignIn = "UPDATE user SET active = 1 WHERE login = '%s' AND password = '%s' AND active = '%s'"
#LogIn
selectLogIn = "SELECT id, name, active FROM user WHERE login = '%s' AND password = '%s' AND active = '1'"
#List locations of user
selectLocationsUser = "SELECT location.id AS id, location.name AS name, location.security AS security FROM user, location, userLocation WHERE userLocation.idUser = user.id AND userLocation.idLocation = location.id AND user.id = '%s'"
#Check Device User
checkDeviceUser = "SELECT device.id AS idDevice FROM user, device, userLocation, locationDevice WHERE device.id = locationDevice.idDevice AND locationDevice.idLocation = userLocation.idLocation AND userLocation.idUser = user.id AND user.id = '%s' AND device.id = '%s'"
#Check Location User
checkLocationUser = "SELECT userLocation.idLocation AS idLocation FROM userLocation WHERE userLocation.idUser = '%s' AND userLocation.idLocation = '%s'"
#list devices of locations and user
selectDevicesLocation = "SELECT device.id AS id, device.name AS name, device.publicIp AS publicIp, device.privateIp AS privateIp, device.port AS port, DATE_FORMAT(device.timeStamp,'%%d/%%m/%%Y %%H:%%i:%%s') AS timeStamp, device.connectionStatus AS connectionStatus, device.RIPMotion AS RIPMotion, device.alarm AS alarm, device.type AS type, device.idDevice AS idDevice, device.pipeSend AS pipeSend, device.pipeRecv AS pipeRecv, device.code AS code, device.connectionMode AS connectionMode, device.version AS version FROM user, location, device, userLocation, locationDevice WHERE device.id = locationDevice.idDevice AND locationDevice.idLocation = location.id AND location.id = '%s' AND location.id = userLocation.idLocation AND userLocation.idUser = user.id AND user.id = '%s'"
#create new location
selectCheckLocationUser = "SELECT location.name AS name FROM user, location, userLocation WHERE userLocation.idUser = user.id AND userLocation.idLocation = location.id AND user.id = '%s' AND location.name = '%s'"
insertLocation = "INSERT INTO location (name, security) VALUES ('%s','1')"
insertLocationUser = "INSERT INTO userLocation (idUser, idLocation) VALUES ('%s','%s')"
#edit location
selectCheckUpdateLocationUser = "SELECT location.name AS name FROM user, location, userLocation WHERE userLocation.idUser = user.id AND userLocation.idLocation = location.id AND user.id = '%s' AND location.name = '%s' AND location.id != '%s'"
updateLocation = "UPDATE location SET name = '%s' WHERE id = '%s'"
updateLocationSecurity = "UPDATE location SET security = '%s' WHERE id = '%s'"
#delete location
deleteUserLocation = "DELETE FROM userLocation WHERE idLocation = '%s'"
deleteLocation = "DELETE FROM location WHERE id = '%s'"
#insert device
insertDeviceServer = "INSERT INTO device (name, port, timeStamp, type, idDevice) VALUES ('%s', '%s', NOW(), '%s', '%s')"
insertLocationDevice = "INSERT INTO locationDevice (idLocation, idDevice) VALUES ('%s', '%s')"
#Update Devices
updateDevice = "UPDATE device SET name = '%s', port = '%s', connectionMode = '%s', RIPMotion = '%s' WHERE id = '%s'"
updateDevicePipes = "UPDATE device SET pipeSend = '%s', pipeRecv = '%s' WHERE id = '%s'"
updateIpDevice = "UPDATE device SET publicIp = '%s', privateIp = '%s' WHERE id = '%s' AND code = '%s'"
updateNotOnline = "UPDATE device SET connectionStatus = '0' WHERE connectionStatus != '0' AND TIMEDIFF(NOW(), device.timeStamp) > TIME('00:01:00')"
updateOnline = "UPDATE device SET connectionStatus = '%s', device.timeStamp = NOW() WHERE id = '%s' AND code = '%s'"
#Check Device Remote for Delete
checkDeviceRemote = "SELECT id FROM device WHERE idDevice = '%s'"
#Delete devices
deleteTimerDevice = "DELETE FROM timer WHERE idDevice = '%s'"
deleteAlertDevice = "DELETE FROM alert WHERE idDevice = '%s'"
deleteSensorsData = "DELETE FROM sensors WHERE idDevice = '%s'"
deleteLocationDevice = "DELETE FROM locationDevice WHERE idDevice = '%s'"
deleteDevice = "DELETE FROM device WHERE id = '%s'"
#Security
selectLocationSecurity = "SELECT user.mail AS email, user.name AS nameUser, location.id AS idLocation, location.security AS security, location.name AS nameLocation, device.name AS nameDevice, device.RIPMotion AS RIPMotion, device.alarm AS alarm FROM location, device, locationDevice, userLocation, user WHERE device.id = locationDevice.idDevice AND locationDevice.idLocation = location.id AND device.id ='%s' AND device.code = '%s' AND userLocation.idLocation = location.id AND userLocation.idUser = user.id"
updateAlarm = "UPDATE device SET alarm = '%s' WHERE id = '%s'"
selectDevicesLocationOpenPort = "SELECT device.id AS id, device.publicIp AS publicIp, device.port AS port, device.name AS name, device.code AS code FROM device, locationDevice WHERE locationDevice.idLocation = '%s' AND locationDevice.idDevice = device.id AND device.connectionStatus = '1' AND device.RIPMotion = '1'"
selectDevicesLocationUserOpenPort = "SELECT device.publicIp AS publicIp, device.port AS port, device.name AS name, device.code AS code FROM device, locationDevice, userLocation WHERE locationDevice.idLocation = '%s' AND locationDevice.idDevice = device.id AND device.connectionStatus = '1' AND userLocation.idLocation = locationDevice.idLocation AND userLocation.idUser = '%s'"
selectDevicesOtherLocationOpenPort = "SELECT device.publicIp AS publicIp, device.port AS port, device.name AS name, device.code AS code FROM device, locationDevice WHERE locationDevice.idLocation <> '%s' AND locationDevice.idDevice = device.id AND device.connectionStatus = '1'"
selectDevicesLocationOpenPortCameras = "SELECT device.publicIp AS publicIp, device.port AS port, device.name AS name, device.code AS code FROM device, locationDevice WHERE locationDevice.idLocation = '%s' AND locationDevice.idDevice = device.id AND device.connectionStatus = '1' AND device.type = '2'"
checkDeviceAlarmStatus = "SELECT alarm FROM device WHERE id = '%s' AND code ='%s'"
#Alert
insertAlert = "INSERT INTO alert (date, time, type, idDevice) VALUES (CURRENT_DATE(), CURRENT_TIME(), '%s', '%s')"
checkInsertAlert = "SELECT id FROM alert WHERE alert.type = '%s' AND alert.idDevice = '%s' AND alert.date = CURRENT_DATE() AND CURRENT_TIME()-alert.time < TIME('00:02:00')"
selectAlert = "SELECT DATE_FORMAT(alert.date,'%%d/%%m/%%Y') AS date, DATE_FORMAT(alert.time,'%%H:%%i') AS time, alert.type AS type FROM device, alert, locationDevice, userLocation WHERE device.id = alert.idDevice AND device.id = '%s' AND alert.date = STR_TO_DATE('%s','%%d/%%m/%%Y') AND locationDevice.idDevice = device.id AND locationDevice.idLocation = userLocation.idLocation AND userLocation.idUser = '%s' ORDER BY alert.id DESC"
#Sensors
insertSensors = "INSERT INTO sensors (temperature, humidity, pressure, brightness, date, time, idDevice) VALUES ('%s', '%s', '%s', '%s', CURRENT_DATE(), CURRENT_TIME(), '%s')"
selectSensors = "SELECT temperature, humidity, pressure, brightness, DATE_FORMAT(sensors.time,'%%H:%%i') AS time FROM device, sensors, locationDevice, userLocation WHERE device.id = sensors.idDevice AND device.id = '%s' AND sensors.date = STR_TO_DATE('%s','%%d/%%m/%%Y') AND locationDevice.idDevice = device.id AND locationDevice.idLocation = userLocation.idLocation AND userLocation.idUser = '%s' ORDER BY sensors.id DESC"
#Timer
selectTimer = "SELECT id, name, active, DATE_FORMAT(time,'%%H:%%i') AS time, action FROM timer WHERE idDevice = '%s' ORDER BY time"
insertTimer = "INSERT INTO timer (name, active, time, action, idDevice) VALUES ('%s', '1', '%s', '%s', '%s')"
updateTimer = "UPDATE timer SET name = '%s', active = '%s', time = '%s', action = '%s' WHERE id = '%s' and idDevice = '%s'"
deleteTimer = "DELETE FROM timer WHERE id = '%s' and idDevice = '%s'"
selectTimerAutomation = "SELECT timer.action AS action, CURRENT_TIME()-timer.time AS diff FROM timer, device WHERE timer.idDevice = '%s' AND timer.idDevice = device.id AND device.code = '%s' AND timer.active = '1' AND CURRENT_TIME()-timer.time < TIME('00:01:00') AND CURRENT_TIME > timer.time ORDER BY 1"
#SoftwareUpdate
selectDeviceVersion = "SELECT version FROM device WHERE id = '%s' AND code ='%s'"
updateVersionDevice = "UPDATE device SET version = '%s' WHERE id = '%s' AND code = '%s'"
|
PascualArroyo/Domotics
|
Server/myconfig.py
|
Python
|
gpl-2.0
| 9,785
|
from sys import exit
from random import randint
class Scene(object):
def enter(self):
print "This scene is not yet configured. Subclass it and implement enrer()."
exit(1)
class Engine(object):
def __init__(self, scene_map):
self.scene_map = scene_map
def play(self):
current_scene = self.scene_map.opening_scene()
last_scene = self.scene_map.next_scene('finished')
while current_scene != last_scene:
next_scene_name = current_scene.enter()
current_scene = self.scene_map.next_scene(next_scene_name)
# ne sure tp print out the last scene
current_scene.enter()
class Death(Scene):
quips = [
"You died. You kinda suck at this.",
"Your mum would be proud..if she were smarter.",
"Such a luser.",
"I have a small puppy that's better at this."
]
def enter(self):
print Death.quips[randint(0, len(self.quips)-1)]
exit(1)
class CentralCorridor(Scene):
def enter(self):
print "The Gothons of Planet #25 have invaded your ship and destroyed your entire crew. You are the last surviving member and your last mission is to get the neutron destruct bomb from the Weapons Armory, put it in the bridge, and blow the ship up after getting into an escape pod."
print "\n"
print "You're running down the central corridor to the Weapons Armory when a Gothon jumps out, red scaly skin, dark grimy teeth, and evil clown costumes flowing around his hate filled body. He's blocking the door to the Armory and about to pull a weapon to blast you."
action = raw_input("> ")
if action == "shoot!":
print "Quick on the drae you yank out your blaster anf fire it at the Gothon. His clown costume is flowing and moving around his body, which throws off your aim. Your laser hits his costume but misses him entirly. This makes him fly into an insane rage and blast you repeadedly in the face until you are dead. Then he eats you."
return 'death'
elif action == "dodge!":
print "Like a world class boxer you dodge, weave, slip and slide right as the Gothon's blaster cracks a laser past your head. In the middle of your artful dodge your foor slips and you bang your head on the metal wall and pass out. You wake up shortly after only to die as the Gothon stomps on your head and eats you."
return 'death'
elif action == "tell a joke":
print "Lucky for you they made you learn Gothon insults in the academy. You tell the one Gothon joke you know: \nLbhe zbgure vf fb sbg, jura fur fvgf nebhaq gur ubhfr, fur fvgf nebhaq gur. \n The Gothon stops, tries not to laugh, then busts out laughing and can't stop. While he's laughing you run up and shoot him square in the head putting him down, then jump through the Weapon Armory door."
return 'laser_weapon_armory'
else:
print "DOES NOT COMPUTE!"
return 'central_corridor'
class LaserWeaponArmory(Scene):
def enter(self):
print "A lot of things happen in here. Blablabla."
code = "%d%d%d" % (randint(1,9), randint(1,9), randint(1,9))
guess = raw_input("[keypad]> ")
guesses = 0
while guess != code and guesses < 10:
print "BZZZZZEED!"
guesses += 1
guess = raw_input("[keypad]> ")
if guess == code:
print "Go to the bridge."
return 'the_bridge'
else:
print "Ups. Ypu die."
return 'death'
class TheBridge(Scene):
def enter(self):
print "You have a bomb under your arm and haven't pulled your weapon yet as more Gorthons emerge."
action = raw_input("> ")
if action == "throw the bomb":
print "You die."
return 'death'
elif action == "slowly place the bomb":
print "You run to the escape podto get off this tin can."
return 'escape_pod'
else:
print "DOES NOT COMPUTE!"
return 'the_bridge'
class EscapePod(Scene):
def enter(self):
print "There's 5 pods, which one do you take?"
good_pot = randint(1,5)
guess = raw_input("[pod #]> ")
if int(guess) != good_pod:
print "You die."
return 'death'
else:
print "You won!"
return 'finished'
class Finished(Scene):
def enter(self):
print "You won! Good job!"
return 'finished'
class Map(object):
scenes = {
'central_corridor': CentralCorridor(),
'laser_weapon_armory': LaserWeaponArmory(),
'the_bridge': TheBridge(),
'escape_pod': EscapePod(),
'death': Death(),
'finished': Finished(),
}
def __init__(self, start_scene):
self.start_scene = start_scene
def next_scene(self, scene_name):
val = Map.scenes.get(scene_name)
return val
def opening_scene(self):
return self.next_scene(self.start_scene)
a_map = Map('central_corridor')
a_game = Engine(a_map)
a_game.play()
|
tridvaodin/Assignments-Valya-Maskaliova
|
LPTHW/ex43.py
|
Python
|
gpl-2.0
| 5,149
|
#!/usr/bin/env python
import rospy
import actionlib
from actionlib_msgs.msg import *
from geometry_msgs.msg import PoseStampted
import yaml
import os
import os.path
class GetLocals:
def __init__(self):
rospy.Subscriber('move_base_simple/goal', PoseStampted, self.goal_callback)
print('\nPLEASE, SEND A GOAL WITH NAV 2D GOAL USING GRAPHIC USER INTERFACE TO SAVE A POINT!\n')
def goal_callback(self, data):
self.fname = os.path.expanduser('~') + '/catkin_ws/src/approach_control/approach_control_config/config/locals.yaml'
stream = open(self.fname, 'r')
self.data = yaml.load(stream)
self.keys = self.data.keys()
local = raw_input('Please, write the location for this point (if doesnt exist it will be create): \n options: ' + str(self.keys))
if [x for x in self.keys if x == local]:
self.data[local] = [[data.pose.position.x, data.pose.position.y], [0.0, 0.0, data.pose.orientation.z, data.pose.orientation.w]]
with open(self.fname, 'w') as yaml_file:
yaml_file.write(yaml.dump(self.data, default_flow_style = False))
rospy.loginfo('Point Saved!')
rospy.loginfo('\nPLEASE, SEND A GOAL WITH NAV 2D GOAL USING GRAPHIC USER INTERFACE TO SAVE A POINT!\n')
else:
c = raw_input('Save as a new place? (Y/N)')
if c.lower() == 'y':
self.data[local] = [[data.pose.position.x, data.pose.position.y], [0.0, 0.0, data.pose.orientation.z, data.pose.orientation.w]]
with open(self.fname, 'w') as yaml_file:
yaml_file.write(yaml.dump(self.data, default_flow_style = False))
rospy.loginfo('Point Saved!')
rospy.loginfo('\nPLEASE, SEND A GOAL WITH NAV 2D GOAL USING GRAPHIC USER INTERFACE TO SAVE A POINT!\n')
else:
rospy.logerr('Point not Saved!')
rospy.loginfo('\nPLEASE, SEND A GOAL WITH NAV 2D GOAL USING GRAPHIC USER INTERFACE TO SAVE A POINT!\n')
if __name__ == '__main__':
GetLocals()
rospy.init_node('getlocals', anonymous = True)
try:
rospy.spin()
except KeyboardInterrupt:
rospy.loginfo('Shutting down!')
|
amasiero/approach_control
|
approach_control_navigation/nodes/get_locals.py
|
Python
|
gpl-2.0
| 1,989
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (C) 2005-2008 Francisco José Rodríguez Bogado, #
# Diego Muñoz Escalante. #
# (pacoqueen@users.sourceforge.net, escalant3@users.sourceforge.net) #
# #
# This file is part of GeotexInn. #
# #
# GeotexInn is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2 of the License, or #
# (at your option) any later version. #
# #
# GeotexInn is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with GeotexInn; if not, write to the Free Software #
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA #
###############################################################################
###################################################################
## formulacion_geotextiles.py - Formulación HARCODED de geotextiles
###################################################################
## NOTAS:
## Ventana hecha a volapié para crear los descuentos predefinidos.
## No permite definir más material para el descuento automático ni
## cambiar las formulaciones, solo cantidades y productos de compra
## empleados en cada "categoría".
## ----------------------------------------------------------------
##
###################################################################
## Changelog:
##
##
###################################################################
#
###################################################################
from ventana import Ventana
from formularios import utils
import re
import pygtk
pygtk.require('2.0')
import gtk
from framework import pclases
from utils import _float as float
try:
from psycopg import ProgrammingError as psycopg_ProgrammingError
except ImportError:
from psycopg2 import ProgrammingError as psycopg_ProgrammingError
class FormulacionGeotextiles(Ventana):
def __init__(self, objeto = None, usuario = None):
"""
Constructor. objeto puede ser un objeto de pclases con el que
comenzar la ventana (en lugar del primero de la tabla, que es
el que se muestra por defecto).
"""
Ventana.__init__(self, 'formulacion_geotextiles.glade', objeto, usuario = usuario)
connections = {'b_salir/clicked': self.salir,
'_b_guardar/clicked': self.guardar,
'b_ensimaje/clicked': self.buscar_mp,
'b_antiestatico/clicked': self.buscar_mp,
'b_tubos/clicked': self.buscar_mp,
'b_plastico/clicked': self.buscar_mp,
'b_casquillos/clicked': self.buscar_mp,
'b_precinto/clicked': self.buscar_mp,
'b_grapas/clicked': self.buscar_mp,
'b_add_consumo/clicked': self.add_consumo_por_producto,
'b_drop_consumo/clicked': self.drop_consumo_por_producto,
'b_cambiar_producto_compra/clicked':
self.cambiar_producto_compra,
'b_add_producto_a_consumo/clicked':
self.add_producto_a_consumo
}
self.add_connections(connections)
cols = (("Descripción (opcional)", "gobject.TYPE_STRING",
True, True, True, None),
("Material", "gobject.TYPE_STRING", False, True, False, None),
("Cantidad", "gobject.TYPE_STRING",
True, True, True, self.cambiar_cantidad),
("Unidad", "gobject.TYPE_STRING",
True, True, True, self.cambiar_unidad),
("ID", "gobject.TYPE_STRING", False, False, False, None))
# Unidad: Deberá ser algo así como:
# % para porcentaje del peso.
# algo/u para descontar m, k o en lo que quiera que se mida el producto de compra por cada unidad fabricada.
# algo/kg para descontar m, k o en lo que quiera que se mida el producto de compra por cada kg (m² en rollos) fabricado.
# algo/x m para descontar m, k o en lo que quiera que se mida el producto de compra por cada x metros de ancho de cada
# rollo fabricado (sólo para geotextiles y geocompuestos)
utils.preparar_treeview(self.wids['tv_consumos'], cols, multi = True)
# En el treeview cada nodo padre será una materia prima con su descuento y tal. Los nodos hijos contendrán el producto
# de venta al que se aplica ese descuento automático.
self.wids['tv_consumos'].connect("row-activated", self.abrir_producto)
self.comprobar_registro()
self.rellenar_widgets()
gtk.main()
def abrir_producto(self, tv, path, view_column):
"""
Abre el producto de compra si la línea marcada es de
consumo y el de venta si es un "contenido" de consumo.
"""
model = tv.get_model()
ide = model[path][-1]
if model[path].parent == None:
from formularios import productos_compra
consumo = pclases.ConsumoAdicional.get(ide)
producto_compra = consumo.productoCompra
v = productos_compra.ProductosCompra(producto_compra) # @UnusedVariable
else:
from formularios import productos_de_venta_rollos
v = productos_de_venta_rollos.ProductosDeVentaRollos(pclases.ProductoVenta.get(ide)) # @UnusedVariable
def guardar(self, b):
"""
Guarda los valores para la cantidad en los campos de los registros correspondientes.
"""
try:
self.cas['ensimaje'].cantidad = float(self.wids['e_censimaje'].get_text())
except:
utils.dialogo_info(titulo = "ERROR DE FORMATO", texto = "Corrija el formato numérico usado y vuelva a intentarlo.")
try:
self.cas['antiestatico'].cantidad = float(self.wids['e_cantiestatico'].get_text())
except:
utils.dialogo_info(titulo = "ERROR DE FORMATO", texto = "Corrija el formato numérico usado y vuelva a intentarlo.")
try:
self.cas['tubos'].cantidad = float(self.wids['e_ctubos'].get_text())
except:
utils.dialogo_info(titulo = "ERROR DE FORMATO", texto = "Corrija el formato numérico usado y vuelva a intentarlo.")
try:
self.cas['plastico'].cantidad = float(self.wids['e_cplastico'].get_text())
except:
utils.dialogo_info(titulo = "ERROR DE FORMATO", texto = "Corrija el formato numérico usado y vuelva a intentarlo.")
try:
self.cas['precinto'].cantidad = float(self.wids['e_cprecinto'].get_text())
except:
utils.dialogo_info(titulo = "ERROR DE FORMATO", texto = "Corrija el formato numérico usado y vuelva a intentarlo.")
try:
self.cas['casquillos'].cantidad = float(self.wids['e_ccasquillos'].get_text())
except:
utils.dialogo_info(titulo = "ERROR DE FORMATO", texto = "Corrija el formato numérico usado y vuelva a intentarlo.")
try:
self.cas['grapas'].cantidad = float(self.wids['e_cgrapas'].get_text())
except:
utils.dialogo_info(titulo = "ERROR DE FORMATO", texto = "Corrija el formato numérico usado y vuelva a intentarlo.")
self.rellenar_widgets()
def rellenar_widgets(self):
"""
Introduce los valores actuales de la formulación.
"""
#self.wids['e_censimaje'].set_text('%.5f' % self.cas['ensimaje'].cantidad)
#self.wids['e_ensimaje'].set_text(self.cas['ensimaje'].productoCompra and self.cas['ensimaje'].productoCompra.descripcion or '')
#self.wids['e_cantiestatico'].set_text('%.5f' % self.cas['antiestatico'].cantidad)
#self.wids['e_antiestatico'].set_text(self.cas['antiestatico'].productoCompra and self.cas['antiestatico'].productoCompra.descripcion or '')
#self.wids['e_ctubos'].set_text('%.5f' % self.cas['tubos'].cantidad)
#self.wids['e_tubos'].set_text(self.cas['tubos'].productoCompra and self.cas['tubos'].productoCompra.descripcion or '')
#self.wids['e_cplastico'].set_text('%.5f' % self.cas['plastico'].cantidad)
#self.wids['e_plastico'].set_text(self.cas['plastico'].productoCompra and self.cas['plastico'].productoCompra.descripcion or '')
#self.wids['e_cprecinto'].set_text('%.5f' % self.cas['precinto'].cantidad)
#self.wids['e_precinto'].set_text(self.cas['precinto'].productoCompra and self.cas['precinto'].productoCompra.descripcion or '')
#self.wids['e_ccasquillos'].set_text('%.5f' % self.cas['casquillos'].cantidad)
#self.wids['e_casquillos'].set_text(self.cas['casquillos'].productoCompra and self.cas['casquillos'].productoCompra.descripcion or '')
#self.wids['e_cgrapas'].set_text('%.5f' % self.cas['grapas'].cantidad)
#self.wids['e_grapas'].set_text(self.cas['grapas'].productoCompra and self.cas['grapas'].productoCompra.descripcion or '')
self.rellenar_consumos_adicionales_por_producto()
def rellenar_consumos_adicionales_por_producto(self):
"""
Rellena los consumos adicionales específicos por
producto fabricado.
"""
model = self.wids['tv_consumos'].get_model()
model.clear()
self.wids['tv_consumos'].freeze_child_notify()
self.wids['tv_consumos'].set_model(None)
consumos = pclases.ConsumoAdicional.select("""
id IN (SELECT consumo_adicional__producto_venta.consumo_adicional_id
FROM consumo_adicional__producto_venta
WHERE producto_venta_id IN (SELECT id
FROM producto_venta
WHERE campos_especificos_rollo_id IS NOT NULL))
""", orderBy = "id")
for consumo in consumos:
if consumo.productoCompra and consumo.productoCompra.obsoleto:
continue
padre = model.append(None,
(consumo.nombre,
consumo.productoCompra
and consumo.productoCompra.descripcion
or "-",
utils.float2str(consumo.cantidad, 5),
consumo.unidad,
consumo.id))
for producto in consumo.productosVenta:
model.append(padre, ("",
producto.descripcion,
"",
"",
producto.id))
self.wids['tv_consumos'].set_model(model)
self.wids['tv_consumos'].thaw_child_notify()
def comprobar_registro(self):
"""
Comprueba si existe el registro de la formulación de la línea
de geotextiles y los registros de descuento automático relacionados.
Si no existen, los crea con valores por defecto.
(De geocompuestos todavía no se ha dicho nada.)
"""
try:
linea = pclases.LineaDeProduccion.select(pclases.LineaDeProduccion.q.nombre.contains("de geotextiles"))[0]
except:
utils.dialogo_info(titulo = "ERROR GRAVE", texto = "No se encontró la línea de geotextiles en la BD.\nCierre la ventana y contacte con el administrador de la aplicación.", padre = self.wids['ventana'])
return
self.objeto = linea.formulacion
if self.objeto == None:
self.objeto = pclases.Formulacion(nombre = "GEOTEXTILES", observaciones = "Generado automáticamente.")
pclases.Auditoria.nuevo(self.objeto, self.usuario, __file__)
linea.formulacion = self.objeto
nombres_ca_existentes = [ca.nombre for ca in self.objeto.consumosAdicionales]
nombres_ca = {'ensimaje': (0.3, ' %'),
# 'antiestatico': (0.3, ' %'),
'tubos': (1, ' ud / 5.5 m'),
'plastico': (0.414, ' k / 5.5 m'),
'casquillos': (2, ' ud / ud'),
# 'precinto': (10, ' m / ud'),
'grapas': (6, ' ud / ud'),
'agujas': (1, 'ud / 5.5 m'),
}
self.cas = {}
for nombre in nombres_ca:
if nombre not in nombres_ca_existentes:
ca = pclases.ConsumoAdicional(nombre = nombre,
cantidad = nombres_ca[nombre][0],
unidad = nombres_ca[nombre][1],
formulacionID = self.objeto.id,
productoCompraID = None)
pclases.Auditoria.nuevo(ca, self.usuario, __file__)
for productoVenta in pclases.ProductoVenta.select(pclases.ProductoVenta.q.camposEspecificosRolloID != None):
ca.addProductoVenta(productoVenta)
self.cas[nombre] = ca
else:
self.cas[nombre] = [ca for ca in self.objeto.consumosAdicionales if ca.nombre == nombre][0]
def refinar_resultados_busqueda(self, resultados):
"""
Muestra en una ventana de resultados todos los
registros de "resultados".
Devuelve el id (primera columna de la ventana
de resultados) de la fila seleccionada o None
si se canceló.
"""
filas_res = []
for r in resultados:
filas_res.append((r.id, r.codigo, r.descripcion))
idproducto = utils.dialogo_resultado(filas_res,
titulo = 'Seleccione producto',
cabeceras = ('ID Interno', 'Código', 'Descripción'))
if idproducto < 0:
return None
else:
return idproducto
def buscar_producto(self):
"""
Muestra una ventana de búsqueda y a continuación los
resultados. El objeto seleccionado se devolverá
a no ser que se pulse en Cancelar en
la ventana de resultados, en cuyo caso se deveulve
None.
"""
a_buscar = utils.dialogo_entrada("Introduzca código o descripción de producto:")
if a_buscar != None:
try:
ida_buscar = int(a_buscar)
except ValueError:
ida_buscar = -1
criterio = pclases.OR(
pclases.ProductoCompra.q.codigo.contains(a_buscar),
pclases.ProductoCompra.q.descripcion.contains(a_buscar),
pclases.ProductoCompra.q.id == ida_buscar)
resultados = pclases.ProductoCompra.select(pclases.AND(
criterio,
pclases.ProductoCompra.q.obsoleto == False,
pclases.ProductoCompra.q.controlExistencias == True))
if resultados.count() > 1:
## Refinar los resultados
idproducto = self.refinar_resultados_busqueda(resultados)
if idproducto == None:
return None
resultados = [pclases.ProductoCompra.get(idproducto)]
elif resultados.count() < 1:
## Sin resultados de búsqueda
utils.dialogo_info('SIN RESULTADOS',
'La búsqueda no produjo resultados.\n'
'Pruebe a cambiar el texto buscado o '
'déjelo en blanco para ver una lista '
'completa.\n(Atención: Ver la lista '
'completa puede resultar lento si el '
'número de elementos es muy alto)',
padre = self.wids['ventana'])
return None
## Un único resultado
return resultados[0]
else:
return "CANCELAR"
def buscar_mp(self, b):
"""
Muestra un cuadro de búsqueda de productos de compra.
Relaciona el seleccionado con el registro correspondiente
en función del botón pulsado.
"""
producto = self.buscar_producto()
if producto == "CANCELAR":
return
nombreb = b.name.replace('b_', '')
trans = {'ensimaje': 'ensimaje',
'antiestatico': 'antiestatico',
'tubos': 'tubos',
'plastico': 'plastico',
'casquillos': 'casquillos',
'precinto': 'precinto',
'grapas': 'grapas'}
nombre = trans[nombreb]
self.cas[nombre].productoCompra = producto
self.rellenar_widgets()
def add_consumo_por_producto(self, boton):
"""
Añade un consumo automático por producto.
"""
productos = self.pedir_productos_venta()
if productos:
producto_compra = self.pedir_producto_compra()
if (productos != None and len(productos) > 0
and producto_compra != None):
nombre = self.pedir_nombre()
if nombre == None:
return
cantidad = self.pedir_cantidad()
if cantidad == None:
return
unidad = self.pedir_unidad(producto_compra)
if unidad == None:
return
nuevo_consumo_adicional = pclases.ConsumoAdicional(
formulacionID = self.objeto.id,
productoCompraID = producto_compra.id,
nombre = nombre,
cantidad = cantidad,
unidad = unidad)
pclases.Auditoria.nuevo(nuevo_consumo_adicional, self.usuario,
__file__)
for producto in productos:
nuevo_consumo_adicional.addProductoVenta(producto)
self.rellenar_consumos_adicionales_por_producto()
def drop_consumo_por_producto(self, boton):
"""
Elimina el consumo o consumos seleccionados en el TreeView.
"""
texto = """
Si ha seleccionado un consumo se eliminará el consumo completo.
Si seleccionó uno o varios productos, se eliminarán del consumo al
que pertenece, por lo que ya no empleará el material relacionado
cuando se fabriquen artículos del mismo.
¿Está seguro de querer continuar?
"""
model, paths = self.wids['tv_consumos'].get_selection().get_selected_rows()
if paths and utils.dialogo(titulo = "¿ELIMINAR?", texto = texto, padre = self.wids['ventana']):
for path in paths:
if model[path].parent == None:
id_consumo = model[path][-1]
consumo_adicional_por_producto = pclases.ConsumoAdicional.get(id_consumo)
try:
for p in consumo_adicional_por_producto.productosVenta:
consumo_adicional_por_producto.removeProductoVenta(p)
consumo_adicional_por_producto.destroy(ventana = __file__)
except psycopg_ProgrammingError, msg:
utils.dialogo_info(titulo = "ERROR: INFORME A LOS DESARROLLADORES",
texto = "Ocurrió un error al eliminar el consumo.\nDEBUG: Traza de la excepción:\n%s" % (msg),
padre = self.wids['ventana'])
else:
id_consumo = model[path].parent[-1]
idproductov = model[path][-1]
consumo_adicional_por_producto = pclases.ConsumoAdicional.get(id_consumo)
productov = pclases.ProductoVenta.get(idproductov)
consumo_adicional_por_producto.removeProductoVenta(productov)
self.rellenar_consumos_adicionales_por_producto()
def cambiar_cantidad(self, cell, path, texto):
"""
Cambia la cantidad del descuento adicional por producto.
"""
try:
cantidad = utils._float(texto)
except ValueError:
utils.dialogo_info(titulo = "FORMATO INCORRECTO",
texto = "El texto %s no es válido." % (texto),
padre = self.wids['ventana'])
return
model = self.wids['tv_consumos'].get_model()
if model[path].parent == None:
idconsumo = model[path][-1]
consumo = pclases.ConsumoAdicional.get(idconsumo)
consumo.cantidad = cantidad
self.rellenar_consumos_adicionales_por_producto()
def cambiar_unidad(self, cell, path, texto):
"""
Cambia la unidad de descuento para el descuento adicional por producto
"""
model = self.wids['tv_consumos'].get_model()
if model[path].parent == None:
idconsumo = model[path][-1]
consumo = pclases.ConsumoAdicional.get(idconsumo)
if comprobar_unidad(texto, consumo.cantidad):
consumo.unidad = texto
self.rellenar_consumos_adicionales_por_producto()
def pedir_producto(self):
"""
Solicita un código, nombre o descripcicón
de producto, muestra una ventana de resultados
coincidentes con la búsqueda y devuelve una
lista de ids de productos o [] si se cancela o
no se encuentra.
"""
productos = None
txt = utils.dialogo_entrada(texto = 'Introduzca código, nombre o descripción del geotextil:',
titulo = 'BUSCAR PRODUCTO VENTA',
padre = self.wids['ventana'])
if txt != None:
criterio = pclases.OR(pclases.ProductoVenta.q.codigo.contains(txt),
pclases.ProductoVenta.q.nombre.contains(txt),
pclases.ProductoVenta.q.descripcion.contains(txt))
criterio = pclases.AND(criterio, pclases.ProductoVenta.q.camposEspecificosRolloID != None)
prods = pclases.ProductoVenta.select(criterio)
productos = [p for p in prods]
return productos
def pedir_productos_venta(self):
"""
Muestra una ventana de búsqueda de un producto y
devuelve uno o varios objetos productos dentro de
una tupla o None si se cancela.
"""
productos = self.pedir_producto()
if productos == None:
return
if productos == []:
utils.dialogo_info(titulo = "NO ENCONTRADO",
texto = "Producto no encontrado",
padre = self.wids['ventana'])
return
filas = [(p.id, p.codigo, p.descripcion) for p in productos]
idsproducto = utils.dialogo_resultado(filas,
titulo = "SELECCIONE UNO O VARIOS PRODUCTOS",
padre = self.wids['ventana'],
cabeceras = ("ID", "Código", "Descripción"),
multi = True)
if idsproducto and idsproducto!= [-1]:
return [pclases.ProductoVenta.get(ide) for ide in idsproducto]
def pedir_producto_compra(self):
"""
Devuelve UN producto de compra obtenido a partir
de una búsqueda, etc.
"""
producto = None
a_buscar = utils.dialogo_entrada(titulo = "BUSCAR MATERIAL",
texto = "Introduzca texto a buscar en productos de compra:",
padre = self.wids['ventana'])
if a_buscar != None:
resultados = utils.buscar_productos_compra(a_buscar)
if resultados.count() > 1:
## Refinar los resultados:
filas_res = []
for r in resultados:
filas_res.append((r.id, r.codigo, r.descripcion))
idproducto = utils.dialogo_resultado(filas_res,
titulo = 'Seleccione producto',
cabeceras = ('ID Interno', 'Código', 'Descripción'),
padre = self.wids['ventana'])
if idproducto < 0:
return
producto = pclases.ProductoCompra.get(idproducto)
# id es clave primaria, esta comprensión debería devolver un único producto
elif resultados.count() < 1:
## La búsqueda no produjo resultados.
utils.dialogo_info('SIN RESULTADOS',
'La búsqueda no produjo ningún resultado.\nIntente una '
'búsqueda menos restrictiva usando un texto más corto.',
padre = self.wids['ventana'])
return None
else:
producto = resultados[0]
return producto
def pedir_nombre(self):
"""
Pide un texto y lo devuelve. Sin más.
"""
return utils.dialogo_entrada(titulo = "NOMBRE CONSUMO",
texto = "Introduzca un nombre identificativo si lo desea:",
padre = self.wids['ventana'])
def pedir_cantidad(self):
"""
Pide una cantidad que debe ser un número float.
"""
res = utils.dialogo_entrada(titulo = "CANTIDAD",
texto = "Introduzca la cantidad a consumir del producto de compra (sin unidades):",
padre = self.wids['ventana'])
try:
res = utils._float(res)
except ValueError:
utils.dialogo_info(titulo = "CANTIDAD INCORRECTA",
texto = "El texto introducido %s no es correcto." % (res),
padre = self.wids['ventana'])
res = None
return res
def pedir_unidad(self, productoCompra):
"""
Pide la unidad del descuento y comprueba que sea correcta.
Recibe el producto de compra para mostrar el valor por defecto.
"""
txt = """
Introduzca las unidades para el descuento de materiales.
Por ejemplo:
% (porcentaje en las unidades del material
por peso de producto terminado).
ud / 5 ud (unidad del material por cada 5 unidades
de producto terminado).
m / kg (metro de material por kilo de producto).
kg / 5.5 m (kg de material por cada 5.5 metros de producto).
NOTA: La unidad del materal que se descuenta debe ser la misma
que consta en catálogo, pedidos de compra, etc.
No use puntos en las unidades de medida.
"""
defecto = "%s / ud" % (productoCompra.unidad.replace(".", " "))
res = utils.dialogo_entrada(titulo = "INTRODUZCA UNIDAD",
texto = txt,
padre = self.wids['ventana'],
valor_por_defecto = defecto)
seguir = True
while seguir and not comprobar_unidad(res):
seguir = utils.dialogo(titulo = "FORMATO INCORRECTO",
texto = "El texto introducido %s no tiene el formato correcto.\n\n\n¿Desea volver a intentarlo?" % (res),
padre = self.wids['ventana'])
if seguir == False:
res = None
return res
def cambiar_producto_compra(self, boton):
"""
Cambia por otro el producto que se consume en el registro de
consumo adicional seleccionado.
"""
producto_compra = self.buscar_producto()
if producto_compra == "CANCELAR":
return
sel = self.wids['tv_consumos'].get_selection()
model, paths = sel.get_selected_rows()
for path in paths:
if model[path].parent == None:
id_consumo = model[path][-1]
else:
id_consumo = model[path].parent[-1]
consumo_adicional_por_producto = pclases.ConsumoAdicional.get(
id_consumo)
consumo_adicional_por_producto.productoCompra = producto_compra
self.rellenar_consumos_adicionales_por_producto()
def add_producto_a_consumo(self, boton):
"""
Añade un producto de venta a un consumo existente.
"""
model, paths = self.wids['tv_consumos'].get_selection().get_selected_rows()
if paths != []:
productos = self.pedir_productos_venta()
if productos:
for path in paths:
if model[path].parent == None:
id_consumo = model[path][-1]
else:
id_consumo = model[path].parent[-1]
consumo_adicional_por_producto = pclases.ConsumoAdicional.get(id_consumo)
for producto in productos:
if producto not in consumo_adicional_por_producto.productosVenta:
consumo_adicional_por_producto.addProductoVenta(producto)
else:
utils.dialogo_info(titulo = "YA EXISTE",
texto = "El producto %s ya consume según la fórmula de %s.\n\nPulse «Aceptar» para continuar." % (producto.descripcion, consumo_adicional_por_producto.nombre),
padre = self.wids['ventana'])
self.rellenar_consumos_adicionales_por_producto()
else:
utils.dialogo_info(titulo = "SELECCIONE UN CONSUMO",
texto = "Debe seleccionar un consumo existente.",
padre = self.wids['ventana'])
def comprobar_unidad(txt, cantidadpc = 1.0):
"""
Comprueba si la unidad de descuento "txt" cumple con
alguna de las unidades interpretables por el programa.
cantidadpc se usa para agregarlo a la parte "unidad" y
chequear todo el conjunto en el proceso.
"""
res = False
txt = "%s %s" % (utils.float2str(cantidadpc, 5), txt)
txt = txt.strip()
# TODO: De momento lo hago así porque no sé de qué modo ni dónde guardarlo:
regexp_porcentaje = re.compile("^-?\d+[\.,]?\d*\s*%$")
regexp_fraccion = re.compile("-?\d+[\.,]?\d*\s*\w*\s*/\s*-?\d*[\.,]?\d*\s*\w+")
if regexp_porcentaje.findall(txt) != []:
cantidad = parsear_porcentaje(txt) # @UnusedVariable
res = True
elif regexp_fraccion.findall(txt) != []:
cantidad, unidad, cantidad_pv, unidad_pv = parsear_fraccion(txt) # @UnusedVariable
res = True
return res
def parsear_porcentaje(txt):
"""
Devuelve la cantidad del porcentaje como fracción de 1.
"""
regexp_float = re.compile("^-?\d+[\.,]?\d*")
num = regexp_float.findall(txt)[0]
return utils._float(num)
def parsear_fraccion(txt):
"""
Devuelve la cantidad de producto compra y unidad que hay que descontar
por cada cantidad de producto venta y unidad (que también se devuelven).
Es necesario que venga la cantidadpc aunque en el registro, en el campo
"unidad" no aparece.
"""
regexp_float = re.compile("-?\d+[\.,]?\d*")
regexp_unidad = re.compile("\w+")
cantidades = regexp_float.findall(txt)
if len(cantidades) == 1:
cantidadpc = cantidades[0]
cantidadpv = '1'
txt = txt.replace(cantidadpc, "")
elif len(cantidades) == 2:
cantidadpc, cantidadpv = cantidades[0:2]
txt = txt.replace(cantidadpc, "")
txt = txt.replace(cantidadpv, "")
else:
cantidadpc = '1'
cantidadpv = '1'
txt = txt.replace("/", "")
unidadpc, unidadpv = regexp_unidad.findall(txt)[0:2]
cantidadpc = utils._float(cantidadpc)
cantidadpv = utils._float(cantidadpv)
return cantidadpc, unidadpc, cantidadpv, unidadpv
if __name__ == '__main__':
f = FormulacionGeotextiles()
|
pacoqueen/ginn
|
ginn/formularios/formulacion_geotextiles.py
|
Python
|
gpl-2.0
| 34,430
|
#!/usr/bin/python
# plot interval CSV output from perf/toplev
# perf stat -I1000 -x, -o file ...
# toplev -I1000 -x, -o file ...
# intervalplotcompare.py file (or stdin)
# delimeter must be ,
# this is for data that is not normalized
# TODO: move legend somewhere else where it doesn't overlap?
from __future__ import division
import csv
import os
import sys
import matplotlib.pyplot as plt
import collections
import argparse
import shutil
import numpy
import time
import config
csv_file_handle = {}
value = dict()
def plot(files, pstyle = 'ggplot', output=None, seq=None, xkcd=False):
global csv_file_handle
global value
csv_file_handle ={}
# op_sum = {'1':['L1-dcache-loads','L1-dcache-stores','L1-dcache-prefetches','L1-icache-loads'],
# '2':['L1-dcache-load-misses','L1-dcache-store-misses','L1-dcache-prefetch-misses','L1-icache-load-misses'],
# '3':[ 'LLC-loads','LLC-stores','LLC-prefetches'],
# '4':['LLC-load-misses','LLC-store-misses','LLC-prefetch-misses'],
# '5':['dTLB-loads','dTLB-stores','iTLB-loads'],
# '6':['dTLB-load-misses','dTLB-store-misses','iTLB-load-misses'],
# 'Bandwidth':['offcore_response_corewb_local_dram_0','offcore_response_prefetch_any_llc_miss_0','LLC-prefetches','cache-misses']}
#
# op_div = [['cache-references','uops_retired_any'],['cache-misses','uops_retired_any'], ['instructions','cycles'],
# ['cache-misses','cache-references']]
#enable for i7
op_sum = {
'contention': ['cache-misses'],
'band': ['cache-references', 'cache-misses'],
'total_bandwidth': ['cache-references'],
}
op_diff ={}
op_div= [['cache-references','uops_retired_any'],['cache-misses','uops_retired_any'],
['instructions','cycles'],['cache-misses','cache-references'],['cache-references','cycles'], ['cache-misses','cycles']]
print pstyle
if pstyle:
try:
from mpltools import style
style.use( pstyle)
except ImportError:
print "Need mpltools for setting styles (pip install mpltools)"
import gen_level
try:
import brewer2mpl
all_colors = brewer2mpl.get_map('Paired', 'Qualitative', 12).hex_colors
except ImportError:
print "Install brewer2mpl for better colors (pip install brewer2mpl)"
all_colors = ('green','orange','red','blue',
'black','olive','purple','#6960EC', '#F0FFFF',
'#728C00', '#827B60', '#F87217', '#E55451', # 16
'#F88017', '#C11B17', '#17BFC2', '#C48793') # 20
cur_colors = collections.defaultdict(lambda: all_colors)
assigned = dict() # assigned= {'mbw-cache-references': [0,2345,..], 'soplex-cache-references': [32,532,12,..], ..} Events and values for all processes
if len(files) < 2 :
print "More than one file needed. Exiting!"
sys.exit(0)
for file in files:
processname = file.split("/")[-1]
if file:
try:
inf = open( file, "r")
except:
return
else:
inf = sys.stdin
csv_file_handle[processname] = csv.reader(inf)
timestamps = dict()
val = ""
first_time = True
event_list = [] # event_list= [cache-references, instructions,..]
for processname,rc in csv_file_handle.items():
for r in rc:
if config.burst:
if len(r) == 2:
ts=0
val, event = r
if first_time and event not in event_list:
event_list.append(event)
event = str(processname)+"-"+event
else:
continue
if event not in assigned:
level = gen_level.get_level(event)
assigned[event] = cur_colors[level][0]
cur_colors[level] = cur_colors[level][1:]
if len(cur_colors[level]) == 0:
cur_colors[level] = all_colors
value[event] = []
timestamps[event] = []
timestamps[event].append(float(ts))
try:
value[event].append(float(val.replace("%","")))
except ValueError:
value[event].append(0.0)
first_time = False
levels = dict()
for j in assigned.keys():
levels[gen_level.get_level(j)] = True
if xkcd:
try:
plt.xkcd()
except NameError:
print "Please update matplotlib. Cannot enable xkcd mode."
#print value
if config.normalize:
for key in value:
entries= value[key]
normalized_values = [numpy.float64(entry)/max(entries) for entry in entries]
value[key] = normalized_values
if seq:
os.umask(0000)
if os.path.exists(seq):
shutil.rmtree(seq)
os.makedirs(seq)
else:
os.makedirs(seq)
n = 1
print "Assigned Keys: ", assigned.keys()
#print "event list: ", event_list
for l in levels.keys():
ax = plt.subplot(len(levels), 1, n)
if val.find('%') >= 0:
ax.set_ylim(0, 100)
t = []
for j in event_list:
print j, gen_level.get_level(j), l
for processname in csv_file_handle:
if gen_level.get_level(j) == l:
t.append(j)
ax.plot(value[str(processname)+"-"+j], label = str(processname)+"-"+j )
if seq:
leg = ax.legend( loc='upper left')
leg.get_frame().set_alpha(0.5)
plt.savefig(seq+"/"+j)
plt.cla()
leg = ax.legend(t, loc='upper left')
leg.get_frame().set_alpha(0.5)
n += 1
if len(op_diff) > 0:
for key, components in op_diff.items():
print components
#print [(value[component]) for component in components]
#print [len(value[component]) for component in components]
diff_value={}
if key =='contention' :
print "KEY: ", key
ax1 = plt.subplot(2,1,1)
ax2 = plt.subplot(2,1,2)
else:
ax = plt.subplot(1, 1, 1)
for processname in csv_file_handle:
diff_value[processname]=[x-y for x,y in zip(value[str(processname)+"-"+components[0]],value[str(processname)+"-"+components[1]])]
#print sum_value
#print "DONE!!"
# print len(sum_value)
# print len(timestamps[components[0]])
if key is not 'contention':
ax.plot(diff_value[processname], label = str(processname)+"-"+'-'.join(components))
else:
ax1.plot(diff_value[processname], label = str(processname)+"-"+'-'.join(components))
if seq:
if key is not 'contention':
leg = ax.legend(loc='upper left')
leg.get_frame().set_alpha(0.5)
else:
leg = ax1.legend(loc='upper left')
leg.get_frame().set_alpha(0.5)
if key =='contention':
#plot the drop in performance of each process:
perf_drop = compute_contention(diff_value)
for process, drop in perf_drop.items():
ax2.plot(drop, label="Drop in perf of "+str(process))
#change to a function later
avg_perf_drop = sum(drop)/len(drop)
f_handle= open(config.execution_time_dir+'/estimateddrop-'+process+'-'+
''.join([p if p is not process else '' for p,d in perf_drop.items()])+'.log','w+')
f_handle.write(str(avg_perf_drop))
f_handle.close()
leg=ax2.legend(loc= 'upper left')
leg.get_frame().set_alpha(0.5)
plt.savefig(seq+"/"+'+'.join(components))
plt.cla()
if len(op_sum) > 0:
for key, components in op_sum.items():
print components
#print [(value[component]) for component in components]
#print [len(value[component]) for component in components]
sum_value={}
if key =='contention' :
print "KEY: ", key
ax1 = plt.subplot(2,1,1)
ax2 = plt.subplot(2,1,2)
else:
ax = plt.subplot(1, 1, 1)
for processname in csv_file_handle:
sum_value[processname]=sum(map(numpy.array, [value[str(processname)+"-"+component] for component in components]))
#print sum_value
#print "DONE!!"
# print len(sum_value)
# print len(timestamps[components[0]])
if key is not 'contention':
ax.plot(sum_value[processname], label = str(processname)+"-"+'+'.join(components))
else:
ax1.plot(sum_value[processname], label = str(processname)+"-"+'+'.join(components))
if seq:
if key is not 'contention':
leg = ax.legend(loc='upper left')
leg.get_frame().set_alpha(0.5)
else:
leg = ax1.legend(loc='upper left')
leg.get_frame().set_alpha(0.5)
if key =='contention':
#plot the drop in performance of each process:
perf_drop = compute_contention(sum_value)
#print perf_drop
for process, drop in perf_drop.items():
ax2.plot(drop, label="Drop in perf of "+str(process))
#change to a function later
if len(drop)>0:
avg_perf_drop = sum(drop)/len(drop)
f_handle= open(config.execution_time_dir+'/estimateddrop-'+process+'-'+
''.join([p if p is not process else '' for p,d in perf_drop.items()])+'.log','w+')
f_handle.write(str(avg_perf_drop))
f_handle.close()
leg=ax2.legend(loc= 'upper left')
leg.get_frame().set_alpha(0.5)
elif key =='total_bandwidth':
plt.cla()
ax = plt.subplot(1, 1, 1)
total_bw = total_bandwidth(sum_value)
ax.plot(total_bw['total'], label = 'Total Bandwidth')
leg = ax.legend(loc='upper left')
leg.get_frame().set_alpha(0.5)
plt.savefig(seq+"/"+key+": "+'+'.join(components))
plt.cla()
if len(op_div) > 0:
ax = plt.subplot(1, 1, 1)
for components in op_div:
print components
for processname in csv_file_handle:
ax.plot([numpy.float64(x)/y for x,y in zip(value[str(processname)+"-"+components[0]],value[str(processname)+"-"+components[1]])], label= str(processname)+"-"+'/'.join(components))
if seq:
leg = ax.legend( loc='upper left')
leg.get_frame().set_alpha(0.5)
plt.savefig(seq+"/"+'_'.join(components))
plt.cla()
plt.xlabel('Time')
if val.find('%') >= 0:
plt.ylabel('Bottleneck %')
else:
plt.ylabel("Counter value")
if output:
plt.savefig(output)
else:
if not seq:
plt.show()
def compute_drop(all_bw):
'''
:param all_list:dictionary containing bandwidth usage of each process. ex: {'process1':[..], 'process2':[]}
:return: returns a dictionary of estimated drop in performance of each application
'''
max_Bandwidth = 4.3*10**8
drop_in_performance = {}
for processname in csv_file_handle:
#In each iteration, compute the drop in performance for processname
percentage_share= {}
for current_process,bw_usage_list in all_bw.items():
# For processname, compute the percentage share of unused bandwidth by all other processes
if current_process == processname:
continue
percentage_share[current_process] = []
for i in xrange(0, len(all_bw[processname])):
if i< len(bw_usage_list):
percentage_share[current_process].append(bw_usage_list[i]/(max_Bandwidth - all_bw[processname][i]))
#else:
# percentage_share[current_process].append(0)
drop_in_performance[processname] = sum(map(numpy.array, [percentage_share[process] for process in percentage_share]))
return drop_in_performance
def total_bandwidth(all_bw):
bw_used = {}
list_total_bw_used = []
for processname in csv_file_handle:
for i in xrange(0, len(all_bw[processname])):
list_total_bw_used.append (sum([all_bw[remaining_process][i]
if i< len(all_bw[remaining_process])
else 0 for remaining_process in csv_file_handle]))
break
bw_used['total'] = list_total_bw_used
return bw_used
def compute_contention(all_bw):
'''
:param all_list:dictionary containing bandwidth usage of each process. ex: {'process1':[..], 'process2':[]}
:return: returns a dictionary of estimated drop in performance of each application
'''
max_Bandwidth = 9*10**8 #for a 5 second sampling
increase_in_performance = {}
no_of_corunners = len(csv_file_handle)
# print "Co Runners: ", no_of_corunners
pressure_experienced= {}
for processname in csv_file_handle:
#In each iteration, compute the drop in performance for processname
pressure_experienced[processname] = []
for i in xrange(0, len(all_bw[processname])):
current_no_of_corunners = no_of_corunners
list_total_bw_used = [all_bw[remaining_process][i] if i< len(all_bw[remaining_process]) else 0 for remaining_process in csv_file_handle]
for val in list_total_bw_used:
if val == 0:
current_no_of_corunners -=1
total_bw_used=sum(list_total_bw_used)
bw_free = max_Bandwidth -total_bw_used
normalized_total_intensity_of_pressure_in_bw= total_bw_used/max_Bandwidth
normalized_intensity_of_my_bw_usage = all_bw[processname][i]/max_Bandwidth
total_cache_misses = sum([value[str(remaining_process)+"-"+'cache-misses'][i] if i< len(value[str(remaining_process)+"-"+'cache-misses']) else 0 for remaining_process in csv_file_handle])
normalized_cache_misses = total_cache_misses/max_Bandwidth
if processname == 'memcached':
pressure_experienced[processname].append( normalized_total_intensity_of_pressure_in_bw )
#else:
# percentage_share[current_process].append(0)
#print pressure_experienced
return pressure_experienced
if __name__ =='__main__':
p = argparse.ArgumentParser(
usage='plot interval CSV output from perf stat/toplev',
description='''
perf stat -I1000 -x, -o file ...
toplev -I1000 -x, -o file ...
intervalplot.py file (or stdin)
delimeter must be ,
this is for data that is not normalized.''')
p.add_argument('--xkcd', action='store_true', help='enable xkcd mode')
p.add_argument('--style', help='set mpltools style (e.g. ggplot)')
p.add_argument('file', help='CSV file to plot (or stdin)', nargs='?')
p.add_argument('--output', '-o', help='Output to file. Otherwise show.',
nargs='?')
p.add_argument('--seq', help = 'Save the plots as individual images', nargs='?')
p.add_argument('--auto', action='store_true', help='enable auto mode and plot from config files automatically')
args = p.parse_args()
#print args
if args.auto:
for key,values in config.map_pid_filename.items():
print "Plot and Store: ", values['filename']
filename = config.csv_dir+values['filename']
result_folder = config.result_dir+values['filename']
print result_folder
plot(filename, seq=result_folder)
else:
plot(args.files, args.style, args.output, args.seq, args.xkcd)
__author__ = 'navaneeth'
|
navaneethrameshan/PMU-burst
|
compareintervalplot.py
|
Python
|
gpl-2.0
| 16,603
|
import os
from twisted.python.compat import iteritems
from landscape.lib.fs import read_text_file
from landscape.constants import APT_PREFERENCES_SIZE_LIMIT
from landscape.client.monitor.plugin import DataWatcher
class AptPreferences(DataWatcher):
"""
Report the system APT preferences configuration.
"""
persist_name = "apt-preferences"
message_type = "apt-preferences"
message_key = "data"
run_interval = 900 # 15 minutes
scope = "package"
size_limit = APT_PREFERENCES_SIZE_LIMIT
def __init__(self, etc_apt_directory="/etc/apt"):
self._etc_apt_directory = etc_apt_directory
def get_data(self):
"""Return a C{dict} mapping APT preferences files to their contents.
If no APT preferences configuration is set at all on the system, then
simply return C{None}
"""
data = {}
preferences_filename = os.path.join(self._etc_apt_directory,
u"preferences")
if os.path.exists(preferences_filename):
data[preferences_filename] = read_text_file(preferences_filename)
preferences_directory = os.path.join(self._etc_apt_directory,
u"preferences.d")
if os.path.isdir(preferences_directory):
for entry in os.listdir(preferences_directory):
filename = os.path.join(preferences_directory, entry)
if os.path.isfile(filename):
data[filename] = read_text_file(filename)
if data == {}:
return None
item_size_limit = self.size_limit // len(data.keys())
for filename, contents in iteritems(data):
if len(filename) + len(contents) > item_size_limit:
truncated_contents_size = item_size_limit - len(filename)
data[filename] = data[filename][0:truncated_contents_size]
return data
def run(self):
return self.exchange(urgent=True)
|
CanonicalLtd/landscape-client
|
landscape/client/monitor/aptpreferences.py
|
Python
|
gpl-2.0
| 2,007
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from argparse import ArgumentParser
from .core import Core
def getopt(argv):
parser = ArgumentParser(description='Another webui for youtube-dl')
parser.add_argument('-c', '--config', metavar="CONFIG_FILE", help="config file")
parser.add_argument('--host', metavar="ADDR", help="the address server listens on")
parser.add_argument('--port', metavar="PORT", help="the port server listens on")
return vars(parser.parse_args())
def main(argv=None):
from os import getpid
print("pid is {}".format(getpid()))
print("-----------------------------------")
cmd_args = getopt(argv)
core = Core(cmd_args=cmd_args)
core.start()
|
d0u9/youtube-dl-webui
|
youtube_dl_webui/__init__.py
|
Python
|
gpl-2.0
| 755
|
# Standard Modules
import apt
from datetime import datetime
import decimal
import json
import os
import Queue
import random
import socket
import subprocess
import sys
import traceback
# Kodi Modules
import xbmc
import xbmcaddon
import xbmcgui
# Custom modules
__libpath__ = xbmc.translatePath(os.path.join(xbmcaddon.Addon().getAddonInfo('path'), 'resources','lib'))
sys.path.append(__libpath__)
import comms
import simple_scheduler as sched
import OSMC_Backups
from CompLogger import comprehensive_logger as clog
__addon__ = xbmcaddon.Addon()
__addonid__ = __addon__.getAddonInfo('id')
__scriptPath__ = __addon__.getAddonInfo('path')
__setting__ = __addon__.getSetting
__image_file__ = os.path.join(__scriptPath__,'resources','media','update_available.png')
DIALOG = xbmcgui.Dialog()
def lang(id):
san = __addon__.getLocalizedString(id).encode( 'utf-8', 'ignore' )
return san
def log(message, label = ''):
logmsg = '%s : %s - %s ' % (__addonid__ , str(label), str(message.encode( 'utf-8', 'ignore' )))
xbmc.log(msg = logmsg, level=xbmc.LOGDEBUG)
@clog(log)
def exit_osmc_settings_addon():
address = '/var/tmp/osmc.settings.sockfile'
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(address)
sock.sendall('exit')
sock.close()
return 'OSMC Settings addon called to exit'
def get_hardware_prefix():
''' Returns the prefix for the hardware type. rbp, rbp2, etc '''
with open('/proc/cmdline', 'r') as f:
line = f.readline()
settings = line.split(' ')
prefix = None
for setting in settings:
if setting.startswith('osmcdev='):
return setting[len('osmcdev='):]
return None
class Monitah(xbmc.Monitor):
def __init__(self, **kwargs):
super(Monitah, self).__init__()
self.parent_queue = kwargs['parent_queue']
@clog(log)
def onAbortRequested(self):
msg = json.dumps(('kill_yourself', {}))
self.parent_queue.put(msg)
@clog(log)
def onSettingsChanged(self):
msg = json.dumps(('update_settings', {}))
self.parent_queue.put(msg)
class Main(object):
''' This service allows for the checking for new updates, then:
- posts a notification on the home screen to say there is an update available, or
- calls for the download of the updates
- calls for the installation of the updates
- restarts Kodi to implement changes
The check for updates is done using the python-apt module. This module must be run as root, so is being called in
external scripts from the command line using sudo. The other script communicates with the update service using a socket file.
'''
# MAIN METHOD
def __init__(self):
self.first_run = True
# set the hardware prefix
self.hw_prefix = get_hardware_prefix()
# list of packages that require an external update
self.EXTERNAL_UPDATE_REQUIRED_LIST = [
"mediacenter",
"lirc-osmc",
"eventlircd-osmc",
"libcec-osmc",
"dbus",
"dbus-x11"
]
# list of packages that may break compatibility with addons and databases.
self.UPDATE_WARNING = False
self.UPDATE_WARNING_LIST = [
"-mediacenter-osmc",
]
# Items that start with a hyphen should have the hardware prefix attached
self.UPDATE_WARNING_LIST = [(str(self.hw_prefix) + x) if x[0] =='-' else x for x in self.UPDATE_WARNING_LIST]
log('UPDATE_WARNING_LIST: %s' % self.UPDATE_WARNING_LIST)
# the time that the service started
self.service_start = datetime.now()
# dictionary containing the permissable actions (communicated from the child apt scripts)
# and the corresponding methods in the parent
self.action_dict = {
'apt_cache update complete' : self.apt_update_complete,
'apt_cache update_manual complete' : self.apt_update_manual_complete,
'apt_cache commit complete' : self.apt_commit_complete,
'apt_cache fetch complete' : self.apt_fetch_complete,
'progress_bar' : self.progress_bar,
'update_settings' : self.update_settings,
'update_now' : self.update_now,
'user_update_now' : self.user_update_now,
'kill_yourself' : self.kill_yourself,
'settings_command' : self.settings_command,
'apt_error' : self.apt_error,
'apt_action_list_error' : self.apt_action_list_error,
'action_list' : self.action_list,
'apt_cache action_list complete' : self.action_list_complete,
'pre_backup_complete' : self.pre_backup_complete,
}
# queue for communication with the comm and Main
self.parent_queue = Queue.Queue()
self.randomid = random.randint(0,1000)
self.EXTERNAL_UPDATE_REQUIRED = 1
# create socket, listen for comms
self.listener = comms.communicator(self.parent_queue, socket_file='/var/tmp/osmc.settings.update.sockfile')
self.listener.start()
# grab the settings, saves them into a dict called seld.s
self.update_settings()
# a class to handle scheduling update checks
self.scheduler = sched.SimpleScheduler(self.s)
log(self.scheduler.trigger_time, 'trigger_time')
# this holding pattern holds a function that represents the completion of a process that was put on hold
# while the user was watching media or the system was active
self.function_holding_pattern = False
# monitor for identifying addon settings updates and kodi abort requests
self.monitor = Monitah(parent_queue = self.parent_queue)
# window onto which to paste the update notification
self.window = xbmcgui.Window(10000)
# property which determines whether the notification should be pasted to the window
self.window.setProperty('OSMC_notification','false')
# ControlImage(x, y, width, height, filename[, aspectRatio, colorDiffuse])
self.update_image = xbmcgui.ControlImage(50, 1695, 175, 75, __image_file__)
self.try_image_position_again = False
self.try_count = 0
self.position_icon()
self.window.addControl(self.update_image)
self.update_image.setVisibleCondition('[SubString(Window(Home).Property(OSMC_notification), true, left)]')
# self.window.setProperty('OSMC_notification', 'true') # USE THIS TO TEST THE UPDATE_ICON
# this flag is present when updates have been downloaded but the user wants to choose when to install using
# the manual control in the settings
self.block_update_file = '/var/tmp/.suppress_osmc_update_checks'
# if the file is present, then suppress further update checks and show the notification
if os.path.isfile(self.block_update_file):
self.skip_update_check = True
# if the user has suppressed icon notification of updates and has chosen not to install the updates
# its their own damned fault if osmc never get updated
if not self.s['suppress_icon']:
self.window.setProperty('OSMC_notification', 'true')
else:
self.skip_update_check = False
# check for the external update failed
fail_check_file = '/var/tmp/.osmc_failed_update'
if os.path.isfile(fail_check_file):
with open(fail_check_file, 'r') as f:
package = f.readline()
ok = DIALOG.ok(lang(32087), lang(32088) % package, '', lang(32089))
try:
os.remove(fail_check_file)
except:
pass
self.freespace_supressor = 172200
self.freespace_remedy = 'reboot' # change this to 'apt' to give the user the option to clean the apt files
# keep alive method
self._daemon()
# MAIN METHOD
@clog(log, nowait=True)
def _daemon(self):
self.keep_alive = True
count = 0 # FOR TESTING ONLY
while self.keep_alive:
# periodic announcement to confirm the service is alive
# FOR TESTING ONLY
if not count % 100: # FOR TESTING ONLY
xml = xbmc.getInfoLabel('Window.Property(xmlfile)')
log('blurp %s - %s' % (self.randomid, xml)) # FOR TESTING ONLY
count += 1 # FOR TESTING ONLY
# FOR TESTING ONLY
# freespace checker, (runs 5 minutes after boot)
self.automatic_freespace_checker()
# check the scheduler for the update trigger
if self.scheduler.check_trigger():
self.update_now()
log(self.scheduler.trigger_time, 'trigger_time')
# check the action queue
self.check_action_queue()
# check the holding pattern, call item in holding pattern
if self.function_holding_pattern:
self.function_holding_pattern()
# try to position the icon again, ubiquifonts may not have had time to post the screen height and width
# to Home yet.
if self.try_image_position_again:
self.position_icon()
# check for an early exit
if not self.keep_alive: break
# this controls the frequency of the instruction processing
xbmc.sleep(500)
self.exit_procedure()
# HOLDING PATTERN METHOD
@clog(log, nowait=True)
def holding_pattern_update(self):
check, _ = self.check_update_conditions()
if check:
self.function_holding_pattern = False
self.user_update_now()
# HOLDING PATTERN METHOD
@clog(log)
def holding_pattern_fetched(self, bypass=False):
# stay in the holding pattern until the user returns to the Home screen
if 'Home.xml' in xbmc.getInfoLabel('Window.Property(xmlfile)') or bypass:
# if there is an update warning (for a major version change in Kodi) then alert the user
if self.UPDATE_WARNING:
confirm_update = self.display_update_warning()
if not confirm_update:
# remove the function from the holding pattern
self.function_holding_pattern = False
# skip all future update checks (the user will have to run the check for updates manually.)
self.skip_future_update_checks()
return 'User declined to update major version of Kodi, skipping future update checks'
self.function_holding_pattern = False
if not self.EXTERNAL_UPDATE_REQUIRED:
install_now = DIALOG.yesno(lang(32072), lang(32073), lang(32074))
if install_now:
self.call_child_script('commit')
return 'Called child script - commit'
else:
exit_install = DIALOG.yesno(lang(32072), lang(32075), lang(32076))
if exit_install:
exit_osmc_settings_addon()
xbmc.sleep(1000)
subprocess.Popen(['sudo', 'systemctl', 'start', 'manual-update'])
return 'Running external update proceedure'
# if the code reaches this far, the user has elected not to install right away
# so we will need to suppress further update checks until the update occurs
# we put a file there to make sure the suppression carries over after a reboot
self.skip_future_update_checks()
if not self.s['suppress_icon']:
self.window.setProperty('OSMC_notification', 'true')
return 'skip_update_check= %s' % self.skip_update_check
# MAIN METHOD
def skip_future_update_checks(self):
''' Sets the conditions for future update checks to be blocked. '''
# create the file that will prevent further update checks until the updates have been installed
with open(self.block_update_file, 'w') as f:
f.write('d')
# trigger the flag to skip update checks
self.skip_update_check = True
# MAIN METHOD
@clog(log)
def exit_procedure(self):
# stop the listener
self.listener.stop()
# del self.listener
# log('listener cleaned up')
# del self.monitor
# log('del self.monitor')
# del self.update_image
# log('del self.update_image')
# del self.window
# log('del self.window')
# self.takedown_notification()
# log('notification control removed from window(10000)')
# MAIN METHOD
def check_action_queue(self):
''' Checks the queue for data, if present it calls the appropriate method and supplies the data '''
try:
# the only thing the script should be sent is a tuple ('instruction as string', data as dict),
# everything else is ignored
raw_comm_from_script = self.parent_queue.get(False)
# tell the queue that we are done with the task at hand
self.parent_queue.task_done()
# de-serialise the message into its original tuple
comm_from_script = json.loads(raw_comm_from_script)
log(comm_from_script, 'comm_from_script')
# process the information from the child scripts
if comm_from_script:
# retrieve the relevant method
method = self.action_dict.get(comm_from_script[0], False)
if method:
# call the appropriate method with the data
method(**comm_from_script[1])
else:
log(comm_from_script, 'instruction has no assigned method')
except Queue.Empty:
# the only exception that should be handled is when the queue is empty
pass
# MAIN METHOD
@clog(log)
def check_update_conditions(self, connection_only=False):
''' Checks the users update conditions are met.
Checks for:
- active player
- idle time
- internet connectivity
connection_only, limits the check to just the internet connection
'''
if not connection_only:
result_raw = xbmc.executeJSONRPC('{ "jsonrpc": "2.0", "method": "Player.GetActivePlayers", "id": 1 }')
result = json.loads(result_raw)
log(result, 'result of Player.GetActivePlayers')
players = result.get('result', False)
if players:
log('Update CONDITION : player playing')
return False, 'Update CONDITION : player playing'
idle = xbmc.getGlobalIdleTime()
if self.s['update_on_idle'] and idle < 60:
msg = 'Update CONDITION : idle time = %s' % idle
return False, 'Update CONDITION : idle time = %s' % idle
return True, ''
# MAIN METHOD
@clog(log)
def takedown_notification(self):
try:
self.window.removeControl(self.update_image)
except Exception as e:
log(e, 'an EXCEPTION occurred')
# MAIN METHOD
@clog(log)
def call_child_script(self, action):
# check whether the install is an alpha version
if self.check_for_unsupported_version() == 'alpha': return
subprocess.Popen(['sudo', 'python','%s/apt_cache_action.py' % __libpath__, action])
# MAIN METHOD
def position_icon(self):
''' Sets the position of the icon.
Original image dimensions are 175 wide and 75 tall. This is for 1080p '''
self.try_image_position_again = False
pos_horiz = self.s['pos_x'] / 100.0
pos_vertic = self.s['pos_y'] / 100.0
width = 175 # as % of 1920: 0.0911458333333
height = 75 # as % of 1080: 0.0694444444444
width_pct = 0.0911458333333
height_pct = 0.0694444444444
# retrieve the skin height and width (supplied by ubiquifonts and stored in Home)
try:
screen_height = self.window.getProperty("SkinHeight")
screen_width = self.window.getProperty("SkinWidth")
except:
screen_height = 1080
screen_width = 1920
if screen_height == '':
if self.try_count >= 50:
self.try_count = 0
screen_height = 1080
screen_width = 1920
else:
self.try_image_position_again = True
self.try_count += 1
return
screen_height = int(screen_height)
screen_width = int(screen_width)
# determine new dimensions of the image
img_height = int(round(decimal.Decimal(screen_height * height_pct), 0))
img_width = int(round(decimal.Decimal(screen_width * width_pct), 0))
# determine the new coordinates of the image
adj_height = screen_height - img_height
adj_width = screen_width - img_width
pos_top = int(round(decimal.Decimal(adj_height * pos_vertic), 0))
pos_left = int(round(decimal.Decimal(adj_width * pos_horiz), 0))
log('=============================')
log(screen_height)
log(screen_width)
log(adj_height)
log(adj_width)
log(img_height)
log(img_width)
log(pos_top)
log(pos_left)
log('=============================')
# reposition the image
self.update_image.setPosition(pos_left, pos_top)
# rescale the image
self.update_image.setWidth(img_width)
self.update_image.setHeight(img_height)
# MAIN METHOD
@clog(log, maxlength=1000)
def update_settings(self):
''' Updates the settings for the service while the service is still running '''
if self.first_run:
''' Construct the settings dicionary '''
self.first_run = False
self.scheduler_settings = ['check_freq', 'check_weekday', 'check_day', 'check_time', 'check_hour', 'check_minute']
self.icon_settings = ['pos_x', 'pos_y']
self.on_upd = [lang(x) for x in [32057,32058,32095,32060,32061]]
# self.on_upd = [lang(x) for x in [32059,32061]] # 2==> 0, 4 ==> 1
self.s = {}
self.s['on_upd_detected'] = int( __setting__('on_upd_detected') )
# this is to deprecate the automatic installation of non-system updates
# changed to Download, and Prompt
if self.s['on_upd_detected'] == 4:
__addon__.setSetting('on_upd_detected', '2')
self.s['on_upd_detected'] = 2
self.s['check_freq'] = int( __setting__('check_freq') )
self.s['check_weekday'] = int(float( __setting__('check_weekday') ))
self.s['check_day'] = int(float( __setting__('check_day') ))
self.s['check_time'] = int(float( __setting__('check_time') ))
self.s['check_hour'] = int(float( __setting__('check_hour') ))
self.s['check_minute'] = int(float( __setting__('check_minute') ))
self.s['pos_x'] = int(float( __setting__('pos_x') ))
self.s['pos_y'] = int(float( __setting__('pos_y') ))
self.s['suppress_progress'] = True if __setting__('suppress_progress') == 'true' else False
self.s['suppress_icon'] = True if __setting__('suppress_icon') == 'true' else False
self.s['update_on_idle'] = True if __setting__('update_on_idle') == 'true' else False
self.s['home_prompts_only'] = True if __setting__('home_prompts_only') == 'true' else False
# self.s['export_library'] = True if __setting__('export_library') == 'true' else False
# self.s['export_video'] = True if __setting__('export_video') == 'true' else False
# self.s['multifile_vid_export'] = True if __setting__('multifile_vid_export') == 'true' else False
# self.s['export_music'] = True if __setting__('export_music') == 'true' else False
# self.s['create_tarball'] = True if __setting__('create_tarball') == 'true' else False
self.s['location_selection'] = __setting__('location_selection')
self.s['backup_location'] = __setting__('backup_location')
self.s['backup_location_typed'] = __setting__('backup_location_typed')
self.s['tarball_count'] = int(float( __setting__('tarball_count') ))
self.s['backup_on_update'] = True if __setting__('backup_on_update') == 'true' else False
self.s['backup_addons'] = True if __setting__('backup_addons') == 'true' else False
self.s['backup_addon_data'] = True if __setting__('backup_addon_data') == 'true' else False
self.s['backup_Database'] = True if __setting__('backup_Database') == 'true' else False
self.s['backup_keymaps'] = True if __setting__('backup_keymaps') == 'true' else False
self.s['backup_library'] = True if __setting__('backup_library') == 'true' else False
self.s['backup_playlists'] = True if __setting__('backup_playlists') == 'true' else False
self.s['backup_Thumbnails'] = True if __setting__('backup_Thumbnails') == 'true' else False
self.s['backup_favourites'] = True if __setting__('backup_favourites') == 'true' else False
self.s['backup_keyboard'] = True if __setting__('backup_keyboard') == 'true' else False
self.s['backup_remote'] = True if __setting__('backup_remote') == 'true' else False
self.s['backup_LCD'] = True if __setting__('backup_LCD') == 'true' else False
self.s['backup_profiles'] = True if __setting__('backup_profiles') == 'true' else False
self.s['backup_RssFeeds'] = True if __setting__('backup_RssFeeds') == 'true' else False
self.s['backup_sources'] = True if __setting__('backup_sources') == 'true' else False
self.s['backup_upnpserver'] = True if __setting__('backup_upnpserver') == 'true' else False
self.s['backup_peripheral_data'] = True if __setting__('backup_peripheral_data') == 'true' else False
self.s['backup_guisettings'] = True if __setting__('backup_guisettings') == 'true' else False
self.s['backup_advancedsettings'] = True if __setting__('backup_advancedsettings') == 'true' else False
return "initial run", self.s
else:
''' Construct a temporary dictionary for comparison with the existing settings dict '''
tmp_s = {}
tmp_s['on_upd_detected'] = int( __setting__('on_upd_detected') )
tmp_s['check_freq'] = int( __setting__('check_freq') )
tmp_s['check_weekday'] = int(float( __setting__('check_weekday') ))
tmp_s['check_day'] = int(float( __setting__('check_day') ))
tmp_s['check_time'] = int(float( __setting__('check_time') ))
tmp_s['check_hour'] = int(float( __setting__('check_hour') ))
tmp_s['check_minute'] = int(float( __setting__('check_minute') ))
tmp_s['pos_x'] = int(float( __setting__('pos_x') ))
tmp_s['pos_y'] = int(float( __setting__('pos_y') ))
tmp_s['suppress_progress'] = True if __setting__('suppress_progress') == 'true' else False
tmp_s['suppress_icon'] = True if __setting__('suppress_icon') == 'true' else False
tmp_s['update_on_idle'] = True if __setting__('update_on_idle') == 'true' else False
tmp_s['home_prompts_only'] = True if __setting__('home_prompts_only') == 'true' else False
tmp_s['suppress_progress'] = True if __setting__('suppress_progress') == 'true' else False
tmp_s['suppress_icon'] = True if __setting__('suppress_icon') == 'true' else False
tmp_s['update_on_idle'] = True if __setting__('update_on_idle') == 'true' else False
tmp_s['home_prompts_only'] = True if __setting__('home_prompts_only') == 'true' else False
# tmp_s['export_library'] = True if __setting__('export_library') == 'true' else False
# tmp_s['export_video'] = True if __setting__('export_video') == 'true' else False
# tmp_s['multifile_vid_export'] = True if __setting__('multifile_vid_export') == 'true' else False
# tmp_s['export_music'] = True if __setting__('export_music') == 'true' else False
# tmp_s['create_tarball'] = True if __setting__('create_tarball') == 'true' else False
tmp_s['location_selection'] = __setting__('location_selection')
tmp_s['backup_location'] = __setting__('backup_location')
tmp_s['backup_location_typed'] = __setting__('backup_location_typed')
tmp_s['tarball_count'] = int(float( __setting__('tarball_count') ))
tmp_s['backup_on_update'] = True if __setting__('backup_on_update') == 'true' else False
tmp_s['backup_addons'] = True if __setting__('backup_addons') == 'true' else False
tmp_s['backup_addon_data'] = True if __setting__('backup_addon_data') == 'true' else False
tmp_s['backup_Database'] = True if __setting__('backup_Database') == 'true' else False
tmp_s['backup_keymaps'] = True if __setting__('backup_keymaps') == 'true' else False
tmp_s['backup_library'] = True if __setting__('backup_library') == 'true' else False
tmp_s['backup_playlists'] = True if __setting__('backup_playlists') == 'true' else False
tmp_s['backup_Thumbnails'] = True if __setting__('backup_Thumbnails') == 'true' else False
tmp_s['backup_favourites'] = True if __setting__('backup_favourites') == 'true' else False
tmp_s['backup_keyboard'] = True if __setting__('backup_keyboard') == 'true' else False
tmp_s['backup_remote'] = True if __setting__('backup_remote') == 'true' else False
tmp_s['backup_LCD'] = True if __setting__('backup_LCD') == 'true' else False
tmp_s['backup_profiles'] = True if __setting__('backup_profiles') == 'true' else False
tmp_s['backup_RssFeeds'] = True if __setting__('backup_RssFeeds') == 'true' else False
tmp_s['backup_sources'] = True if __setting__('backup_sources') == 'true' else False
tmp_s['backup_upnpserver'] = True if __setting__('backup_upnpserver') == 'true' else False
tmp_s['backup_peripheral_data'] = True if __setting__('backup_peripheral_data') == 'true' else False
tmp_s['backup_guisettings'] = True if __setting__('backup_guisettings') == 'true' else False
tmp_s['backup_advancedsettings'] = True if __setting__('backup_advancedsettings') == 'true' else False
# flags to determine whether the update scheduler needs to be reconstructed or icon repositioned
update_scheduler = False
reposition_icon = False
# check the items in the temp dict and if they are differenct from the current settings, change the current settings,
# prompt action if certain settings are changed (like the scheduler settings)
for k, v in tmp_s.iteritems():
if v == self.s[k]:
continue
else:
self.s[k] = v
if k in self.scheduler_settings:
update_scheduler = True
elif k in self.icon_settings:
reposition_icon = True
# if the user has elected to type the backup location, then overwrite the backup_location with the typed version
if self.s['location_selection'] == '1':
self.s['backup_location'] = self.s['backup_location_typed']
# reconstruct the scheduler if needed
if update_scheduler:
self.scheduler = sched.SimpleScheduler(self.s)
# reposition the icon on the home screen
if reposition_icon:
self.position_icon()
log(self.scheduler.trigger_time, 'trigger_time')
return self.s
# ACTION METHOD
def apt_error(self, **kwargs):
package = kwargs.get('package','not provided')
log('apt_updater encountered and error: \nException : %s \nPackage : %s \nError : %s' % (kwargs.get('exception','not provided'),package,kwargs.get('error','not provided')))
# kill the progress bar
self.progress_bar(kill=True)
# specifically handle a failure to connect to the apt server
if 'Unable to connect to' in kwargs.get('exception', ''):
ok = DIALOG.ok(lang(32087), lang(32131), lang(32132))
else:
# generic error handling
# notify the user that an error has occured with an update
ok = DIALOG.ok(lang(32087), lang(32088) % package, '', lang(32089))
# ACTION METHOD
def apt_action_list_error(self, **kwargs):
package = kwargs.get('package','not provided')
log('apt_updater encountered and error: \nException : %s \nPackages : %s \nError : %s' % (kwargs.get('exception','not provided'),package,kwargs.get('error','not provided')))
# kill the progress bar
self.progress_bar(kill=True)
# notify the user that an error has occured with an update
ok = DIALOG.ok(lang(32087), lang(32112), '', lang(32113))
# ACTION METHOD
def action_list(self, action):
# check whether the install is an alpha version
if self.check_for_unsupported_version() == 'alpha': return
# check for sufficient space, only proceed if it is available
root_space, _ = self.check_target_location_for_size(location='/', requirement=300)
if root_space:
subprocess.Popen(['sudo', 'python','%s/apt_cache_action.py' % __libpath__, 'action_list', action])
else:
okey_dokey = DIALOG.ok(lang(32077), lang(32129), lang(32130))
def action_list_complete(self):
# notify the user that the installation or uninstall of their desired apfs has completed successfully
# prompt for immediate reboot if needed.
if any([os.path.isfile('/tmp/reboot-needed'), os.path.isfile('fname/var/run/reboot-required')]):
reboot = DIALOG.yesno(lang(32090), lang(32091), lang(32133), yeslabel=lang(32081), nolabel=lang(32082))
if reboot:
exit_osmc_settings_addon()
xbmc.sleep(1000)
xbmc.executebuiltin('Reboot')
else:
ok = DIALOG.ok(lang(32090), lang(32091))
# ACTION METHOD
# @clog(log, maxlength=2500)
def progress_bar(self, **kwargs):
''' Controls the creation and updating of the background prgress bar in kodi.
The data gets sent from the apt_cache_action script via the socket
percent, must be an integer
heading, string containing the running total of items, bytes and speed
message, string containing the name of the package or the active process.
'''
# return immediately if the user has suppressed on-screen progress updates or kwargs is empty
if self.s['suppress_progress'] or not kwargs: return
# check for kill order in kwargs
kill = kwargs.get('kill', False)
if kill:
# if it is present, kill the dialog and delete it
try:
self.pDialog.close()
del self.pDialog
return 'Killed pDialog'
except:
pass
return 'Failed to kill pDialog'
# retrieve the necessary data for the progress dialog, if the data isnt supplied, then use 'nix' in its place
# the progress dialog update has 3 optional arguments
percent = kwargs.get('percent','nix')
heading = kwargs.get('heading','nix')
message = kwargs.get('message', 'nix')
# create a dict of the actionable arguments
keys = ['percent', 'heading', 'message']
args = [percent, heading, message]
update_args = {k:v for k, v in zip(keys, args) if v != 'nix'}
# try to update the progress dialog
try:
log(update_args, 'update_args')
self.pDialog.update(**update_args)
except AttributeError:
# on an AttributeError create the dialog and start showing it, the AttributeError will be raised if pDialog doesnt exist
self.pDialog = xbmcgui.DialogProgressBG()
self.pDialog.create(lang(32077), lang(32078))
self.pDialog.update(**update_args)
except Exception as e:
# on any other error, just log it and try to remove the dialog from the screen
log(e, 'pDialog has encountered and error')
try:
self.pDialog.close()
del self.pDialog
return 'Killed pDialog'
except:
pass
return 'Failed to kill pDialog'
# ACTION METHOD
@clog(log)
def kill_yourself(self):
self.keep_alive = False
# ACTION METHOD
@clog(log, nowait=True)
def update_now(self):
''' Calls for an update check via the external script. This method checks if media is playing or whether the system has
been idle for two minutes before allowing the update. If an update is requested, but media is playing or the system
isnt idle, then the update request is put into a loop, with the daemon checking periodically to see if the situation
has changed. '''
# do not do anything while there is something in the holding pattern
if self.function_holding_pattern: return
# check whether the install is an alpha version
if self.check_for_unsupported_version() == 'alpha': return
check, _ = self.check_update_conditions()
if check:
if self.s['backup_on_update']:
# run the backup, once the backup is completed the script calls pre_backup_complete to continue with the update
# that is the reason for the "else"
self.update_settings()
bckp = OSMC_Backups.osmc_backup(self.s, self.progress_bar, self.parent_queue)
try:
bckp.start_backup()
except Exception as e:
# if there is an error, then abort the Update. We dont want to run the update unless the user has backed up
log('Backup Error Type and Args: %s : %s \n\n %s' % (type(e).__name__, e.args, traceback.format_exc()))
else:
# run the update
self.call_child_script('update')
else:
self.function_holding_pattern = self.holding_pattern_update
# ACTION METHOD
@clog(log)
def user_update_now(self):
''' Similar to update_now, but as this is a users request, forego all the player and idle checks. '''
# check whether the install is an alpha version
if self.check_for_unsupported_version() == 'alpha': return
self.call_child_script('update')
# ACTION METHOD
@clog(log)
def pre_backup_complete(self):
''' This method is called when the pre-update backup is completed. No need to worry about checking the
update conditions, just run the update. '''
self.call_child_script('update')
# ACTION METHOD
@clog(log)
def apt_commit_complete(self):
# on commit complete, remove the notification from the Home window
self.window.setProperty('OSMC_notification', 'false')
# remove the file that blocks further update checks
try:
os.remove(self.block_update_file)
except:
pass
# run an apt-cache clean
self.clean_apt_cache()
if self.check_if_reboot_required():
# the files flagging that an installed package needs a reboot are present
# 0 "Prompt for all actions" -- PROMPT
# 1 "Display icon on home screen only" -- PROMPT
# 2 "Download updates, then prompt" -- PROMPT
# 3 "Download and display icon" -- PROMPT
# 4 "Download, install, prompt if restart needed" -- PROMPT
# display dialogue saying that osmc needs to reboot
reboot = DIALOG.yesno(lang(32077), lang(32079), lang(32080), yeslabel=lang(32081), nolabel=lang(32082))
if reboot:
exit_osmc_settings_addon()
xbmc.sleep(1000)
xbmc.executebuiltin('Reboot')
else:
# skip further update checks until osmc has rebooted
self.skip_update_check = True
# ACTION METHOD
@clog(log)
def apt_fetch_complete(self):
# Download and display icon
if self.s['on_upd_detected'] == 3:
# create the file that will prevent further update checks until the updates have been installed
with open(self.block_update_file, 'w') as f:
f.write('d')
# turn on the "install now" setting in Settings.xml
__addon__.setSetting('install_now_visible', 'true')
return 'Download complete, leaving icon displayed'
else:
# Download updates, then prompt
# Download, install, prompt if restart needed (restart is needed)
# Prompt for all actions
if self.s['home_prompts_only']:
self.function_holding_pattern = self.holding_pattern_fetched
return 'Download complete, putting into holding pattern'
else:
self.holding_pattern_fetched(bypass=True)
return 'Download complete, prompting user'
# ACTION METHOD
@clog(log)
def settings_command(self, action):
''' Dispatch user call from the addons settings. '''
if action == 'update':
result = self.settings_command_action()
elif action == 'backup':
result = self.settings_command_backup()
elif action == 'restore':
result = self.settings_command_restore()
elif action == 'install':
result = self.settings_command_install()
return result
#ACTION METHOD
def settings_command_action(self):
''' User called for a manual update '''
check_connection, _ = self.check_update_conditions(connection_only=True)
if not check_connection:
DIALOG.ok('OSMC', 'Update not permitted.', 'Unable to reach internet.')
return 'manual update cancelled, no connection'
else:
self.call_child_script('update_manual')
return 'Called child action - update_manual'
#ACTION METHOD
def settings_command_backup(self):
''' User called to initiate a backup '''
self.update_settings()
bckp = OSMC_Backups.osmc_backup(self.s, self.progress_bar)
try:
bckp.start_backup()
except Exception as e:
log('Backup Error Type and Args: %s : %s \n\n %s' % (type(e).__name__, e.args, traceback.format_exc()))
ok = DIALOG.ok(lang(32096), lang(32097))
return 'Called BACKUP script complete'
#ACTION METHOD
def settings_command_restore(self):
''' User called to inititate a restore '''
self.update_settings()
bckp = OSMC_Backups.osmc_backup(self.s, self.progress_bar)
try:
bckp.start_restore()
restart_required = bckp.restoring_guisettings
if bckp.success != 'Full':
ok = DIALOG.ok('Restore','Some items failed to restore.','See log for details.')
for x in bckp.success:
if x.endswith('userdata/guisettings.xml'):
restart_required = False
if restart_required:
user_input_restart_now = DIALOG.yesno(lang(32110), lang(32098), lang(32099), yeslabel=lang(32100), nolabel=lang(32101))
if user_input_restart_now:
subprocess.Popen(['sudo', 'systemctl', 'restart', 'mediacenter'])
except Exception as e:
log('Backup Error Type and Args: %s : %s \n\n %s' % (type(e).__name__, e.args, traceback.format_exc()))
ok = DIALOG.ok(lang(32096), lang(32097))
return 'Called RESTORE script complete'
#ACTION METHOD
def settings_command_install(self):
''' User called to install updates '''
# check, _ = self.check_for_legit_updates()
# if check == 'bail':
# return 'Update not legit, bail'
# if not self.EXTERNAL_UPDATE_REQUIRED:
# __addon__.setSetting('install_now_visible', 'false')
# self.call_child_script('commit')
# return 'Called child action - commit'
# else:
# warn the user if there is a major Kodi update that will be installed
# bail if they decide not to proceed
if self.UPDATE_WARNING:
confirm = self.display_update_warning()
if not confirm: return
ans = DIALOG.yesno(lang(32072), lang(32075), lang(32076))
if ans:
__addon__.setSetting('install_now_visible', 'false')
exit_osmc_settings_addon()
xbmc.sleep(1000)
subprocess.Popen(['sudo', 'systemctl', 'start', 'manual-update'])
return "Calling external update"
#ACTION METHOD
@clog(log)
def check_for_broken_installs(self):
try:
apt.apt_pkg.init_config()
apt.apt_pkg.init_system()
self.cache = apt.apt_pkg.Cache()
except apt.cache.LockFailedException:
return 'bail', 'global lock placed on package system'
except:
return 'bail', 'apt_pkg cache failed to open'
dirty_states = {apt.apt_pkg.CURSTATE_HALF_CONFIGURED, apt.apt_pkg.CURSTATE_HALF_INSTALLED, apt.apt_pkg.CURSTATE_UNPACKED}
try:
for pkg in self.cache.packages:
if pkg.current_state in dirty_states:
log(' found in a partially installed state', pkg.name)
self.EXTERNAL_UPDATE_REQUIRED = 1
return 'broken install found', 'EXTERNAL_UPDATE_REQUIRED set to 1'
else:
return 'passed', 'no broken packages found'
except:
return 'bail', 'check for partially installed packages failed'
# ACTION METHOD
@clog(log)
def check_for_legit_updates(self):
self.UPDATE_WARNING = False
self.EXTERNAL_UPDATE_REQUIRED = 1
# check for sufficient disk space, requirement in MB
root_space, _ = self.check_target_location_for_size(location='/', requirement=300)
boot_space, _ = self.check_target_location_for_size(location='/boot', requirement=30)
if not root_space or not boot_space:
okey_dokey = DIALOG.ok(lang(32077), lang(32129), lang(32130))
return 'bail', 'Sufficient freespace: root=%s, boot=%s' % (root_space, boot_space)
check, msg = self.check_for_broken_installs()
if check == 'bail':
return check, msg
try:
self.cache = apt.Cache()
self.cache.open(None)
except apt.cache.LockFailedException:
return 'bail', 'global lock placed on package system'
except:
return 'bail', 'apt cache failed to open'
try:
self.cache.upgrade(True)
except:
return 'bail', 'apt cache failed to upgrade'
available_updates = []
log('The following packages have newer versions and are upgradable: ')
for pkg in self.cache.get_changes():
if pkg.is_upgradable:
log(' is upgradeable', pkg.shortname)
available_updates.append(pkg.shortname.lower())
# check whether the package is one that should be monitored for significant version change
if pkg.shortname in self.UPDATE_WARNING_LIST:
#send the package for a major update check
self.UPDATE_WARNING = self.check_for_major_release(pkg)
# if 'osmc' isnt in the name of any available updates, then return without doing anything
if not any(['osmc' in x for x in available_updates]):
# suppress the on-screen update notification
self.window.setProperty('OSMC_notification', 'false')
# delete the block_update_file if it exists, so that the icon doesnt display on next boot
try:
os.remove(self.block_update_file)
except:
pass
return 'bail', 'There are no osmc packages'
if not any([bl in av for bl in self.EXTERNAL_UPDATE_REQUIRED_LIST for av in available_updates]):
# self.EXTERNAL_UPDATE_REQUIRED = 0 ##### changed to force all updates to occur with Kodi closed.
self.EXTERNAL_UPDATE_REQUIRED = 1
# display update available notification
if not self.s['suppress_icon']:
self.window.setProperty('OSMC_notification', 'true')
# display a warning to the user
if self.UPDATE_WARNING:
if self.s['on_upd_detected'] not in [1, 2, 3, 4]:
confirm_update = self.display_update_warning()
if not confirm_update:
return 'bail', 'User declined to update major version of Kodi'
return 'passed', 'legit updates available'
def display_update_warning(self):
''' Displays a modal warning to the user that a major update is available, but that this could potentially cause
addon or database incompatibility.'''
user_confirm = DIALOG.yesno(lang(32077), lang(32128), lang(32127), lang(32126), yeslabel=lang(32125), nolabel=lang(32124))
return user_confirm
# ACTION METHOD
@clog(log)
def apt_update_manual_complete(self):
self.apt_update_complete(data='manual_update_complete')
# ACTION METHOD
@clog(log)
def apt_update_complete(self, data=None):
check, result = self.check_for_legit_updates()
if check == 'bail':
if 'Sufficient freespace:' in result:
# send kill message to progress bar
self.progress_bar(kill=True)
elif data == 'manual_update_complete':
okey_dokey = DIALOG.ok(lang(32077), lang(32092))
# send kill message to progress bar
self.progress_bar(kill=True)
return 'Updates not legit, bail'
# The following section implements the procedure that the user has chosen to take place when updates are detected
if self.s['on_upd_detected'] == 0 or data == 'manual_update_complete':
# show all prompts (default)
if self.EXTERNAL_UPDATE_REQUIRED == 1:
# Downloading all the debs at once require su access. So we call an external script to download the updates
# to the default apt_cache. That other script provides a progress update to this parent script,
# which is displayed as a background progress bar
self.call_child_script('fetch')
return "We can't upgrade from within Kodi as it needs updating itself"
else:
install = DIALOG.yesno(lang(32072), lang(32083), lang(32084))
if install:
self.call_child_script('commit') # Actually installs
self.window.setProperty('OSMC_notification', 'false')
else:
okey_dokey = DIALOG.ok(lang(32072), lang(32085), lang(32086))
# send kill message to progress bar
self.progress_bar(kill=True)
# create the file that will prevent further update checks until the updates have been installed
with open(self.block_update_file, 'w') as f:
f.write('d')
# trigger the flag to skip update checks
self.skip_update_check = True
return "Updates are available, no reboot is required"
elif self.s['on_upd_detected'] == 1:
# Display icon on home screen only
return 'Displaying icon on home screen only'
elif (self.s['on_upd_detected'] in [2, 3]) or (self.s['on_upd_detected'] == 4 and self.EXTERNAL_UPDATE_REQUIRED):
# Download updates, then prompt
# Download and display icon
# Download, install, prompt if restart needed (restart is needed)
# Download, install, auto-restart if needed
self.call_child_script('fetch')
return 'Downloading updates'
elif self.s['on_upd_detected'] == 4 and not self.EXTERNAL_UPDATE_REQUIRED:
# Download, install, prompt if restart needed (restart is not needed)
if self.UPDATE_WARNING:
confirm = self.display_update_warning()
if not confirm: return 'user declined to do a major version update'
self.call_child_script('commit')
return 'Download, install, prompt if restart needed'
@clog(log)
def check_for_major_release(self, pkg):
''' Checks a package to see whether it is a major release. This should trigger a warning to users that things might break'''
dig = '1234567890'
log('Checking package (%s) for major version change.' % pkg.shortname)
# get version of current package, raw_local_version_string
rlv = subprocess.check_output(["/usr/bin/dpkg-query", "-W", "-f", "'${version}\n'", pkg.shortname])
log('dpkg query results: %s' % rlv)
lv = ''.join([x for x in rlv[:rlv.index(".")] if x in list(dig)])
log('Local version number: %s' % lv)
# get version of updating package, raw_remote_version_string
versions = pkg.versions
log('Versions available: %s' % versions)
if not versions: return False
rrv = versions[0].version
log('First version selected: %s' % rrv)
rv = ''.join([x for x in rrv[:rrv.index(".")] if x in list(dig)])
log('Available version string: %s' % rv)
try:
if int(lv) < int(rv):
return True
except:
pass
return False
@clog(log)
def check_if_reboot_required(self):
''' Checks for the existence of two specific files that indicate an installed package mandates a reboot. '''
flag_files = ['/tmp/reboot-needed', '/var/run/reboot-required']
if any([os.path.isfile(x) for x in flag_files]):
return True
else:
return False
def clean_apt_cache(self):
try:
os.system('sudo apt-cache clean')
except:
pass
def check_for_unsupported_version(self):
''' Checks if this version is an Alpha, prevent updates '''
fnull = open(os.devnull, 'w')
process = subprocess.call(['/usr/bin/dpkg-query', '-l', 'rbp-mediacenter-osmc'], stderr=fnull, stdout=fnull)
fnull.close()
if process == 0:
ok = DIALOG.ok(lang(32102), lang(32103), lang(32104))
return 'alpha'
else:
return 'proceed'
def check_target_location_for_size(self, location, requirement):
''' Checks the target location to see if there is sufficient space for the update.
Returns tuple of boolean if there is sufficient disk space and actual freespace recorded '''
mb_to_b = requirement * 1048576.0
try:
st = os.statvfs(location)
if st.f_frsize:
available = st.f_frsize * st.f_bavail
else:
available = st.f_bsize * st.f_bavail
# available = st.f_bfree/float(st.f_blocks) * 100 * st.f_bsize
log('local required disk space: %s' % mb_to_b)
log('local available disk space: %s' % available)
return mb_to_b < available, available / 1048570
except:
return False, 0
def automatic_freespace_checker(self):
''' Daily checker of freespace on /. Notifies user in Home window when there is less than 50mb remaining. '''
if self.freespace_supressor > 172800:
self.freespace_supressor = 0
freespace, value = self.check_target_location_for_size(location='/', requirement=250)
if not freespace:
if 'Home.xml' in xbmc.getInfoLabel('Window.Property(xmlfile)'):
if self.freespace_remedy == 'apt':
# THIS SECTION IS CURRENTLY DISABLED
# TO ENABLE IT CHANGE THE INIT FREESPACE_REMEDY TO 'apt'
resp = DIALOG.yesno( 'OSMC',
'Your system is running out of storage (<%sMB left).' % int(value),
'Would you like to try and clear unused system files?'
)
if resp:
subprocess.Popen(['sudo', 'apt-get', 'autoremove', '&&', 'apt-get', 'clean'])
self.freespace_remedy = 'reboot'
# wait 10 minutes before next space check
self.freespace_supressor = 171600
else: # self.freespace_remedy == 'reboot'
resp = DIALOG.ok( 'OSMC',
'Your system is running out of storage (<%sMB left).' % int(value),
'Try rebooting a couple times to clear out temporary files.'
)
|
fernandog/osmc
|
package/mediacenter-addon-osmc/src/script.module.osmcsetting.updates/resources/lib/update_service.py
|
Python
|
gpl-2.0
| 47,883
|
#!/usr/bin/env python
# **********************************************************************
#
# Copyright (c) 2003-2017 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), "scripts"))
from Util import runTests
runTests()
|
ljx0305/ice
|
allTests.py
|
Python
|
gpl-2.0
| 476
|
#!/usr/bin/env python3
###############################################################################
# #
# Copyright 2019. Triad National Security, LLC. All rights reserved. #
# This program was produced under U.S. Government contract 89233218CNA000001 #
# for Los Alamos National Laboratory (LANL), which is operated by Triad #
# National Security, LLC for the U.S. Department of Energy/National Nuclear #
# Security Administration. #
# #
# All rights in the program are reserved by Triad National Security, LLC, and #
# the U.S. Department of Energy/National Nuclear Security Administration. The #
# Government is granted for itself and others acting on its behalf a #
# nonexclusive, paid-up, irrevocable worldwide license in this material to #
# reproduce, prepare derivative works, distribute copies to the public, #
# perform publicly and display publicly, and to permit others to do so. #
# #
###############################################################################
'''
Created on Jun 9, 2015
@author: dwalker
@change: 2016/02/10 roy Added sys.path.append for being able to unit test this
file as well as with the test harness.
@change: 2016/04/06 eball Updated name to ConfigureProfileManagement
@change: 2016/11/02 eball Updated name to ConfigurePasswordPolicy
'''
import unittest
import sys, os
sys.path.append("../../../..")
from src.tests.lib.RuleTestTemplate import RuleTest
from src.tests.lib.logdispatcher_mock import LogPriority
from src.stonix_resources.rules.ConfigurePasswordPolicy import ConfigurePasswordPolicy
from src.stonix_resources.CommandHelper import CommandHelper
from src.stonix_resources.KVEditorStonix import KVEditorStonix
class zzzTestRuleConfigurePasswordPolicy(RuleTest):
def setUp(self):
RuleTest.setUp(self)
self.rule = ConfigurePasswordPolicy(self.config, self.environ,
self.logdispatch,
self.statechglogger)
self.rulename = self.rule.rulename
self.rulenumber = self.rule.rulenumber
self.ch = CommandHelper(self.logdispatch)
def tearDown(self):
pass
def runTest(self):
self.simpleRuleTest()
def setConditionsForRule(self):
'''@author: dwalker
@note: This unit test will install two incorrect profiles on purpose
to force system non-compliancy
'''
success = True
goodprofiles = {}
pwprofile = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]))) + \
"/src/stonix_resources/files/stonix4macPasscodeProfileFor" + \
"OSXElCapitan10.11.mobileconfig"
secprofile = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]))) + \
"/src/stonix_resources/files/stonix4macSecurity&Privacy" + \
"ForOSXElcapitan10.11.mobileconfig"
pwprofiledict = {"com.apple.mobiledevice.passwordpolicy":
{"allowSimple": ["1", "bool"],
"forcePIN": ["1", "bool"],
"maxFailedAttempts": ["5", "int", "less"],
"maxPINAgeInDays": ["180", "int", "more"],
"minComplexChars": ["1", "int", "more"],
"minLength": ["8", "int", "more"],
"minutesUntilFailedLoginReset":
["15", "int", "more"],
"pinHistory": ["5", "int", "more"],
"requireAlphanumeric": ["1", "bool"]}}
spprofiledict = {"com.apple.screensaver": "",
"com.apple.loginwindow": "",
"com.apple.systempolicy.managed": "",
"com.apple.SubmitDiagInfo": "",
"com.apple.preference.security": "",
"com.apple.MCX": "",
"com.apple.applicationaccess": "",
"com.apple.systempolicy.control": ""}
self.rule.pwprofile = pwprofile
self.rule.secprofile = secprofile
goodprofiles[pwprofile] = pwprofiledict
goodprofiles[secprofile] = spprofiledict
cmd = ["/usr/sbin/system_profiler", "SPConfigurationProfileDataType"]
if self.ch.executeCommand(cmd):
output = self.ch.getOutput()
if output:
for item, values in list(goodprofiles.items()):
self.editor = KVEditorStonix(self.statechglogger,
self.logdispatch, "profiles", "",
"", values, "", "", output)
if self.editor.report():
cmd = ["/usr/bin/profiles", "-R", "-F", item]
if not self.ch.executeCommand(cmd):
success = False
else:
cmd = ["/usr/bin/profiles", "-I", "-F,", item + "fake"]
if not self.ch.executeCommand(cmd):
success = False
else:
success = False
return success
def checkReportForRule(self, pCompliance, pRuleSuccess):
'''check on whether report was correct
:param self: essential if you override this definition
:param pCompliance: the self.iscompliant value of rule
:param pRuleSuccess: did report run successfully
:returns: boolean - If successful True; If failure False
@author: ekkehard j. koch
'''
self.logdispatch.log(LogPriority.DEBUG, "pCompliance = " +
str(pCompliance) + ".")
self.logdispatch.log(LogPriority.DEBUG, "pRuleSuccess = " +
str(pRuleSuccess) + ".")
success = True
return success
def checkFixForRule(self, pRuleSuccess):
'''check on whether fix was correct
:param self: essential if you override this definition
:param pRuleSuccess: did report run successfully
:returns: boolean - If successful True; If failure False
@author: ekkehard j. koch
'''
self.logdispatch.log(LogPriority.DEBUG, "pRuleSuccess = " +
str(pRuleSuccess) + ".")
success = True
return success
def checkUndoForRule(self, pRuleSuccess):
'''check on whether undo was correct
:param self: essential if you override this definition
:param pRuleSuccess: did report run successfully
:returns: boolean - If successful True; If failure False
@author: ekkehard j. koch
'''
self.logdispatch.log(LogPriority.DEBUG, "pRuleSuccess = " +
str(pRuleSuccess) + ".")
success = True
return success
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
CSD-Public/stonix
|
src/tests/rules/unit_tests/zzzTestRuleConfigurePasswordPolicy.py
|
Python
|
gpl-2.0
| 7,397
|
#encoding=utf-8
import pymysql
import json
class MysqlHelper:
"""mysql 帮助类"""
@staticmethod
def insert(word,asymbol,esymbol,explain,cizu,liju,xiangguancihui,aspoken,espoken):
db=pymysql.connect(host="192.168.180.187",user="root",password="123456",db="lytest",charset="utf8")
cursor=db.cursor()
print(word.encode("utf8"))
print("--------------------------------insert into mysql db")
cursor.execute("insert into mfg_t_wordtest (f_word,f_asymbol,f_esymbol,f_explain,f_cizu,f_liju,f_xiangguancihui,f_aspoken,f_espoken,f_biaoji,f_type) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,0,0)",(word,asymbol,esymbol,"{"+json.dumps(explain,ensure_ascii=False,indent=2)+"}",json.dumps(cizu,ensure_ascii=False,indent=2),json.dumps(liju,ensure_ascii=False,indent=2),json.dumps(xiangguancihui,ensure_ascii=False,indent=2),aspoken,espoken))
db.commit()
db.close()
|
skymyyang/YouDaoWord
|
MysqlHelper.py
|
Python
|
gpl-2.0
| 913
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"Fully test this module's functionality through the use of fixtures."
from megacosm.generators import Govt, Country, City
import unittest2 as unittest
import fakeredis
import fixtures
from config import TestConfiguration
class TestGovt(unittest.TestCase):
def setUp(self):
""" """
self.redis = fakeredis.FakeRedis()
fixtures.govt.import_fixtures(self)
fixtures.city.import_fixtures(self)
fixtures.region.import_fixtures(self)
fixtures.country.import_fixtures(self)
fixtures.organization.import_fixtures(self)
fixtures.business.import_fixtures(self)
fixtures.leader.import_fixtures(self)
fixtures.npc.import_fixtures(self)
fixtures.motivation.import_fixtures(self)
fixtures.phobia.import_fixtures(self)
self.redis.lpush('npc_race','gnome')
def tearDown(self):
self.redis.flushall()
def test_random_govt(self):
""" """
govt = Govt(self.redis)
self.assertEqual('far longer than should be allowed', govt.age['name'])
def test_static_body(self):
""" """
country=Country(self.redis)
govt = Govt(self.redis,{'body':country})
self.assertIn('Central Afkil', str(govt.body))
self.assertEqual(type(govt.body), Country)
def test_static_body_country(self):
""" """
govt = Govt(self.redis,{'kind':'country'})
self.assertIn('Central Afkil', str(govt.body))
self.assertEqual(type(govt.body), Country)
def test_str(self):
""" """
govt = Govt(self.redis,{'kind':'country'})
self.assertIn('absolute monarchy', str(govt))
def test_static_body_tacos(self):
""" What happens if you pass in an unsupported kind? it defaults to country."""
govt = Govt(self.redis,{'kind':'tacos'})
self.assertIn('Central Afkil', str(govt.body))
self.assertEqual(type(govt.body), Country)
def test_static_body_city(self):
""" """
self.redis.lpush('govt_kind', 'city')
self.redis.hset('govtcity_govttype_description', 'councilmanager', '{ "name":"council/manager", "description":"things are run by a council, which selects a manager for administrative tasks"}')
self.redis.lpush('govtcity_govttype', 'councilmanager')
govt = Govt(self.redis,{'kind':'city'})
self.assertIn('Alta DeAllentle Gate', str(govt.body))
self.assertEqual(type(govt.body), City)
|
CityGenerator/Megacosm-Generator
|
tests/test_govt.py
|
Python
|
gpl-2.0
| 2,530
|
# coding=utf-8
"""Writers test.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'riccardo.klinger@geolicious.de'
__date__ = '2015-03-26'
__copyright__ = 'Copyright 2015, Riccardo Klinger / Geolicious'
import os
import difflib
from collections import OrderedDict
# This import is to enable SIP API V2
# noinspection PyUnresolvedReferences
import qgis # pylint: disable=unused-import
from qgis.core import QgsProject, QgsCoordinateReferenceSystem
from qgis2web.olwriter import OpenLayersWriter
from qgis2web.leafletWriter import LeafletWriter
from qgis2web.utils import tempFolder
from osgeo import gdal
from qgis2web.test.utilities import get_test_data_path, load_layer
from qgis.testing import unittest, start_app
from qgis.testing.mocked import get_iface
print("test_qgis2web_writers")
start_app()
def GDAL_COMPUTE_VERSION(maj, min, rev):
return maj * 1000000 + min * 10000 + rev * 100
def isLtrRepo():
"""
Returns true if using the LTR repository
"""
return 'QGIS_REPO' in os.environ and os.environ["QGIS_REPO"] == "http://qgis.org/debian-ltr"
class qgis2web_WriterTest(unittest.TestCase):
"""Test writers"""
maxDiff = None
def setUp(self):
"""Runs before each test"""
QgsProject.instance().writeEntryBool("ScaleBar", "/Enabled", False)
self.iface = get_iface()
def tearDown(self):
"""Runs after each test"""
QgsProject.instance().removeAllMapLayers()
def defaultParams(self):
return {'Data export': {'Minify GeoJSON files': True,
'Exporter': 'Export to folder',
'Precision': 'maintain'},
'Scale/Zoom': {'Min zoom level': '1',
'Restrict to extent': False,
'Extent': 'Fit to layers extent',
'Max zoom level': '28'},
'Appearance': {
'Add address search': False,
'Geolocate user': False,
'Base layer': [],
'Search layer': None,
'Add layers list': 'None',
'Attribute filter': [],
'Add abstract': 'None',
'Measure tool': 'None',
'Match project CRS': False,
'Template': 'full-screen',
'Widget Background': '#000000',
'Widget Icon': '#ffffff',
'Layer search': 'None',
'Highlight on hover': False,
'Show popups on hover': False
}}
def test01_LeafletWriterResults(self):
""" Test writer results from a leaflet writer"""
layer_path = get_test_data_path('layer', 'airports.shp')
layer = load_layer(layer_path)
QgsProject.instance().addMapLayer(layer)
# Export to web map
writer = LeafletWriter()
writer.params = self.defaultParams()
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[('ID', 'no label'), ('fk_region', 'no label'), ('ELEV', 'no label'), ('NAME', 'no label'),
('USE', 'no label')])]
writer.json = [False]
writer.getFeatureInfo = [False]
result = writer.write(self.iface, tempFolder())
self.assertTrue(result.index_file)
self.assertTrue(len(result.files) > 1)
self.assertTrue(result.folder)
def test02_OpenLayersWriterResults(self):
""" Test writer results from a OL writer"""
layer_path = get_test_data_path('layer', 'airports.shp')
layer = load_layer(layer_path)
QgsProject.instance().addMapLayer(layer)
# Export to web map
writer = OpenLayersWriter()
writer.params = self.defaultParams()
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[('ID', 'no label'), ('fk_region', 'no label'), ('ELEV', 'no label'), ('NAME', 'no label'),
('USE', 'no label')])]
writer.json = [False]
writer.getFeatureInfo = [False]
result = writer.write(self.iface, tempFolder())
self.assertTrue(result.index_file)
self.assertTrue(len(result.files) > 1)
self.assertTrue(result.folder)
def test09_Leaflet_json_pnt_single(self):
"""Leaflet JSON point single"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'airports_single.qml')
layer = load_layer(layer_path)
_, _ = layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
control_file = open(
get_test_data_path('control', 'leaflet_json_point_single.html'), 'r')
control_output = control_file.read()
control_file.close()
# Export to web map
writer = LeafletWriter()
writer.params = self.defaultParams()
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[('ID', 'no label'), ('fk_region', 'no label'), ('ELEV', 'no label'), ('NAME', 'no label'),
('USE', 'no label')])]
writer.json = [False]
result = writer.write(self.iface, tempFolder()).index_file
# Open the test file
test_file = open(result)
test_output = test_file.read()
test_file.close()
# Compare with control file
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
# def test10_Leaflet_wfs_pnt_single(self):
# """Leaflet WFS point single"""
# layer_url = (
# 'http://balleter.nationalparks.gov.uk/geoserver/wfs?SERVICE=WFS&VERSION=1.0.0&REQUEST=GetFeature&TYPENAME=dnpa_inspire:tpo_points&SRSNAME=EPSG:27700&BBOX=233720,53549,297567,96689')
# layer_style = get_test_data_path('style', 'point_single.qml')
# layer = load_wfs_layer(layer_url, 'point')
# layer.loadNamedStyle(layer_style)
# QgsProject.instance().addMapLayer(layer)
# control_file = open(
# get_test_data_path('control', 'leaflet_wfs_point_single.html'), 'r')
# control_output = control_file.read()
# control_file.close()
# # Export to web map
# writer = LeafletWriter()
# writer.params = self.defaultParams()
# writer.groups = {}
# writer.layers = [layer]
# writer.visible = [True]
# writer.cluster = [False]
# writer.popup = [OrderedDict([(u'ref', u'no label'), (u'tpo_name', u'no label'), (u'area_ha', u'no label'), (u'digitised', u'no label'), (u'objtype', u'no label')])
# ]
# writer.json = [False]
# result = writer.write(self.iface, tempFolder()).index_file
# # Open the test file
# test_file = open(result)
# test_output = test_file.read()
# test_file.close()
# self.assertEqual(
# test_output, control_output, diff(control_output, test_output))
def test11_Leaflet_json_line_single(self):
"""Leaflet JSON line single"""
layer_path = get_test_data_path('layer', 'pipelines.shp')
style_path = get_test_data_path('style', 'pipelines_single.qml')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
control_file = open(
get_test_data_path('control', 'leaflet_json_line_single.html'), 'r')
control_output = control_file.read()
control_file.close()
# Export to web map
writer = LeafletWriter()
writer.params = self.defaultParams()
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict([(u'cat', u'no label'), (u'LOCDESC', u'no label'), (u'F_CODE', u'no label'), (u'F_CODEDESC', u'no label')])
]
writer.json = [False]
result = writer.write(self.iface, tempFolder()).index_file
test_file = open(result)
test_output = test_file.read()
test_file.close()
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
# def test12_Leaflet_wfs_line_single(self):
# """Leaflet WFS line single"""
# layer_url = ('http://balleter.nationalparks.gov.uk/geoserver/wfs?'
# 'SERVICE=WFS&VERSION=1.0.0&REQUEST=GetFeature&TYPENAME'
# '=broads_inspire:centreline&SRSNAME=EPSG:27700')
# layer_style = get_test_data_path('style', 'line_single.qml')
# layer = load_wfs_layer(layer_url, 'centreline')
# layer.loadNamedStyle(layer_style)
# QgsProject.instance().addMapLayer(layer)
# control_file = open(
# get_test_data_path('control', 'leaflet_wfs_line_single.html'), 'r')
# control_output = control_file.read()
# control_file.close()
# # Export to web map
# writer = LeafletWriter()
# writer.params = self.defaultParams()
# writer.groups = {}
# writer.layers = [layer]
# writer.visible = [True]
# writer.cluster = [False]
# writer.popup = [OrderedDict([(u'objecttype', u'no label'), (u'name', u'no label'), (u'navigable', u'no label'), (u'responsibleparty', u'no label'), (u'broad', u'no label'), (u'from_', u'no label'), (u'to_', u'no label'), (u'reachid', u'no label'), (u'globalid', u'no label'), (u'route', u'no label'), (u'shape_stlength__', u'no label')])
# ]
# writer.json = [False]
# result = writer.write(self.iface, tempFolder()).index_file
# test_file = open(result)
# test_output = test_file.read()
# test_file.close()
# self.assertEqual(
# test_output, control_output, diff(control_output, test_output))
def test13_Leaflet_json_poly_single(self):
"""Leaflet JSON polygon single"""
layer_path = get_test_data_path('layer', 'lakes.shp')
style_path = get_test_data_path('style', 'lakes_single.qml')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
control_file = open(
get_test_data_path('control', 'leaflet_json_polygon_single.html'), 'r')
control_output = control_file.read()
control_file.close()
# Export to web map
writer = LeafletWriter()
writer.params = self.defaultParams()
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict([(u'cat', u'no label'), (u'NAMES', u'no label'), (u'AREA_MI', u'no label'), (u'xlabel', u'no label'), (u'ylabel', u'no label'), (u'rotation', u'no label')])
]
writer.json = [False]
result = writer.write(self.iface, tempFolder()).index_file
test_file = open(result)
test_output = test_file.read()
test_file.close()
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
# def test14_Leaflet_wfs_poly_single(self):
# """Leaflet WFS polygon single"""
# layer_url = ('http://balleter.nationalparks.gov.uk/geoserver/wfs?'
# 'SERVICE=WFS&VERSION=1.0.0&REQUEST=GetFeature&TYPENAME'
# '=dnpa_inspire:con_areas&SRSNAME=EPSG:27700')
# layer_style = get_test_data_path('style', 'polygon_single.qml')
# control_path = get_test_data_path(
# 'control', 'leaflet_wfs_polygon_single.html')
# layer = load_wfs_layer(layer_url, 'polygon')
# layer.loadNamedStyle(layer_style)
# QgsProject.instance().addMapLayer(layer)
# control_file = open(control_path, 'r')
# control_output = control_file.read()
# control_file.close()
# # Export to web map
# writer = LeafletWriter()
# writer.params = self.defaultParams()
# writer.groups = {}
# writer.layers = [layer]
# writer.visible = [True]
# writer.cluster = [False]
# writer.popup = [OrderedDict([(u'name', u'no label'), (u'details', u'no label'), (u'date', u'no label'), (u'area_ha', u'no label'), (u'web_page', u'no label')])
# ]
# writer.json = [False]
# result = writer.write(self.iface, tempFolder()).index_file
# test_file = open(result)
# test_output = test_file.read()
# test_file.close()
# self.assertEqual(
# test_output, control_output, diff(control_output, test_output))
def test15_Leaflet_json_pnt_categorized(self):
"""Leaflet JSON point categorized"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'airports_categorized.qml')
control_path = get_test_data_path(
'control', 'leaflet_json_point_categorized.html')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
control_file = open(control_path, 'r')
control_output = control_file.read()
control_file.close()
# Export to web map
writer = LeafletWriter()
writer.params = self.defaultParams()
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[('ID', 'no label'), ('fk_region', 'no label'), ('ELEV', 'no label'), ('NAME', 'no label'),
('USE', 'no label')])]
writer.json = [False]
result = writer.write(self.iface, tempFolder()).index_file
test_file = open(result)
test_output = test_file.read()
test_file.close()
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
# def test16_Leaflet_wfs_pnt_categorized(self):
# """Leaflet WFS point categorized"""
# layer_url = (
# 'http://balleter.nationalparks.gov.uk/geoserver/wfs?SERVICE=WFS&VERSION=1.0.0&REQUEST=GetFeature&TYPENAME=dnpa_inspire:tpo_points&SRSNAME=EPSG:27700&BBOX=233720,53549,297567,96689')
# layer_style = get_test_data_path('style', 'wfs_point_categorized.qml')
# control_path = get_test_data_path(
# 'control', 'leaflet_wfs_point_categorized.html')
# layer = load_wfs_layer(layer_url, 'point')
# layer.loadNamedStyle(layer_style)
# QgsProject.instance().addMapLayer(layer)
# control_file = open(control_path, 'r')
# control_output = control_file.read()
# control_file.close()
# # Export to web map
# writer = LeafletWriter()
# writer.params = self.defaultParams()
# writer.groups = {}
# writer.layers = [layer]
# writer.visible = [True]
# writer.cluster = [False]
# writer.popup = [OrderedDict([(u'ref', u'no label'), (u'tpo_name', u'no label'), (u'area_ha', u'no label'), (u'digitised', u'no label'), (u'objtype', u'no label')])
# ]
# writer.json = [False]
# result = writer.write(self.iface, tempFolder()).index_file
# test_file = open(result)
# test_output = test_file.read()
# test_file.close()
# self.assertEqual(
# test_output, control_output, diff(control_output, test_output))
def test17_Leaflet_json_line_categorized(self):
"""Leaflet JSON line categorized"""
layer_path = get_test_data_path('layer', 'pipelines.shp')
style_path = get_test_data_path('style', 'pipelines_categorized.qml')
control_path = get_test_data_path(
'control', 'leaflet_json_line_categorized.html')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
control_file = open(control_path, 'r')
control_output = control_file.read()
control_file.close()
# Export to web map
writer = LeafletWriter()
writer.params = self.defaultParams()
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[(u'cat', u'no label'), (u'LOCDESC', u'no label'), (u'F_CODE', u'no label'),
(u'F_CODEDESC', u'no label')])]
writer.json = [False]
result = writer.write(self.iface, tempFolder()).index_file
test_file = open(result)
test_output = test_file.read()
test_file.close()
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
def test19_Leaflet_json_poly_categorized(self):
"""Leaflet JSON polygon categorized"""
layer_path = get_test_data_path('layer', 'lakes.shp')
style_path = get_test_data_path('style', 'lakes_categorized.qml')
control_path = get_test_data_path(
'control', 'leaflet_json_polygon_categorized.html')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
control_file = open(control_path, 'r')
control_output = control_file.read()
control_file.close()
# Export to web map
writer = LeafletWriter()
writer.params = self.defaultParams()
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[(u'cat', u'no label'), (u'NAMES', u'no label'), (u'AREA_MI', u'no label'),
(u'xlabel', u'no label'), (u'ylabel', u'no label'), (u'rotation', u'no label')])]
writer.json = [False]
result = writer.write(self.iface, tempFolder()).index_file
test_file = open(result)
test_output = test_file.read()
test_file.close()
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
# def test20_Leaflet_wfs_poly_categorized(self):
# """Leaflet WFS polygon categorized"""
# layer_url = ('http://balleter.nationalparks.gov.uk/geoserver/wfs?'
# 'SERVICE=WFS&VERSION=1.0.0&REQUEST=GetFeature&TYPENAME'
# '=dnpa_inspire:con_areas&SRSNAME=EPSG:27700')
# layer_style = get_test_data_path('style', 'wfs_polygon_categorized.qml')
# control_path = get_test_data_path(
# 'control', 'leaflet_wfs_polygon_categorized.html')
# layer = load_wfs_layer(layer_url, 'polygon')
# layer.loadNamedStyle(layer_style)
# QgsProject.instance().addMapLayer(layer)
# control_file = open(control_path, 'r')
# control_output = control_file.read()
# control_file.close()
# # Export to web map
# writer = LeafletWriter()
# writer.params = self.defaultParams()
# writer.groups = {}
# writer.layers = [layer]
# writer.visible = [True]
# writer.cluster = [False]
# writer.popup = [OrderedDict([(u'name', u'no label'), (u'details', u'no label'), (
# u'date', u'no label'), (u'area_ha', u'no label'), (u'web_page', u'no label')])]
# writer.json = [False]
# result = writer.write(self.iface, tempFolder()).index_file
# test_file = open(result)
# test_output = test_file.read()
# test_file.close()
# self.assertEqual(
# test_output, control_output, diff(control_output, test_output))
def test21_Leaflet_json_pnt_graduated(self):
"""Leaflet JSON point graduated"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'airports_graduated.qml')
control_path = get_test_data_path(
'control', 'leaflet_json_point_graduated.html')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
control_file = open(control_path, 'r')
control_output = control_file.read()
control_file.close()
# Export to web map
writer = LeafletWriter()
writer.params = self.defaultParams()
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[(u'ID', u'no label'), (u'fk_region', u'no label'), (u'ELEV', u'no label'),
(u'NAME', u'no label'), (u'USE', u'no label')])]
writer.json = [False]
result = writer.write(self.iface, tempFolder()).index_file
test_file = open(result)
test_output = test_file.read()
test_file.close()
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
# def test22_Leaflet_wfs_pnt_graduated(self):
# """Leaflet WFS point graduated"""
# layer_url = (
# 'http://balleter.nationalparks.gov.uk/geoserver/wfs?SERVICE=WFS&VERSION=1.0.0&REQUEST=GetFeature&TYPENAME=dnpa_inspire:tpo_points&SRSNAME=EPSG:27700&BBOX=233720,53549,297567,96689')
# layer_style = get_test_data_path('style', 'wfs_point_graduated.qml')
# control_path = get_test_data_path(
# 'control', 'leaflet_wfs_point_graduated.html')
# layer = load_wfs_layer(layer_url, 'point')
# layer.loadNamedStyle(layer_style)
# QgsProject.instance().addMapLayer(layer)
# control_file = open(control_path, 'r')
# control_output = control_file.read()
# control_file.close()
# # Export to web map
# writer = LeafletWriter()
# writer.params = self.defaultParams()
# writer.groups = {}
# writer.layers = [layer]
# writer.visible = [True]
# writer.cluster = [False]
# writer.popup = [OrderedDict([(u'ref', u'no label'), (u'tpo_name', u'no label'), (u'area_ha', u'no label'), (u'digitised', u'no label'), (u'objtype', u'no label')])
# ]
# writer.json = [False]
# result = writer.write(self.iface, tempFolder()).index_file
# test_file = open(result)
# test_output = test_file.read()
# test_file.close()
# self.assertEqual(
# test_output, control_output, diff(control_output, test_output))
def test23_Leaflet_json_line_graduated(self):
"""Leaflet JSON line graduated"""
layer_path = get_test_data_path('layer', 'pipelines.shp')
layer_style = get_test_data_path('style', 'pipelines_graduated.qml')
control_path = get_test_data_path(
'control', 'leaflet_json_line_graduated.html')
layer = load_layer(layer_path)
layer.loadNamedStyle(layer_style)
QgsProject.instance().addMapLayer(layer)
control_file = open(control_path, 'r')
control_output = control_file.read()
control_file.close()
# Export to web map
writer = LeafletWriter()
writer.params = self.defaultParams()
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [
OrderedDict([(u'cat', u'no label'), (u'LOCDESC', u'no label'), (u'F_CODE', u'no label'), (u'F_CODEDESC', u'no label')])]
writer.json = [False]
result = writer.write(self.iface, tempFolder()).index_file
test_file = open(result)
test_output = test_file.read()
test_file.close()
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
# def test24_Leaflet_wfs_line_graduated(self):
# """Leaflet WFS line graduated"""
# layer_url = ('http://balleter.nationalparks.gov.uk/geoserver/wfs?'
# 'SERVICE=WFS&VERSION=1.0.0&REQUEST=GetFeature&TYPENAME'
# '=broads_inspire:centreline&SRSNAME=EPSG:27700')
# layer_style = get_test_data_path('style', 'wfs_line_graduated.qml')
# control_path = get_test_data_path(
# 'control', 'leaflet_wfs_line_graduated.html')
# layer = load_wfs_layer(layer_url, 'centreline')
# layer.loadNamedStyle(layer_style)
# QgsProject.instance().addMapLayer(layer)
# control_file = open(control_path, 'r')
# control_output = control_file.read()
# control_file.close()
# # Export to web map
# writer = LeafletWriter()
# writer.params = self.defaultParams()
# writer.groups = {}
# writer.layers = [layer]
# writer.visible = [True]
# writer.cluster = [False]
# writer.popup = [OrderedDict([(u'objecttype', u'no label'), (u'name', u'no label'), (u'navigable', u'no label'), (u'responsibleparty', u'no label'), (u'broad', u'no label'), (u'from_', u'no label'), (u'to_', u'no label'), (u'reachid', u'no label'), (u'globalid', u'no label'), (u'route', u'no label'), (u'shape_stlength__', u'no label')])
# ]
# writer.json = [False]
# result = writer.write(self.iface, tempFolder()).index_file
# test_file = open(result)
# test_output = test_file.read()
# test_file.close()
# self.assertEqual(
# test_output, control_output, diff(control_output, test_output))
def test25_Leaflet_json_poly_graduated(self):
"""Leaflet JSON polygon graduated"""
layer_path = get_test_data_path('layer', 'lakes.shp')
layer_style = get_test_data_path('style', 'lakes_graduated.qml')
control_path = get_test_data_path(
'control', 'leaflet_json_polygon_graduated.html')
layer = load_layer(layer_path)
layer.loadNamedStyle(layer_style)
QgsProject.instance().addMapLayer(layer)
control_file = open(control_path, 'r')
control_output = control_file.read()
control_file.close()
# Export to web map
writer = LeafletWriter()
writer.params = self.defaultParams()
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[(u'cat', u'no label'), (u'NAMES', u'no label'), (u'AREA_MI', u'no label'),
(u'xlabel', u'no label'), (u'ylabel', u'no label'), (u'rotation', u'no label')])]
writer.json = [False]
result = writer.write(self.iface, tempFolder()).index_file
test_file = open(result)
test_output = test_file.read()
test_file.close()
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
# def test26_Leaflet_wfs_poly_graduated(self):
# """Leaflet WFS polygon graduated"""
# layer_url = ('http://balleter.nationalparks.gov.uk/geoserver/wfs?'
# 'SERVICE=WFS&VERSION=1.0.0&REQUEST=GetFeature&TYPENAME'
# '=dnpa_inspire:con_areas&SRSNAME=EPSG:27700')
# layer_style = get_test_data_path('style', 'wfs_polygon_graduated.qml')
# control_path = get_test_data_path(
# 'control', 'leaflet_wfs_polygon_graduated.html')
# layer = load_wfs_layer(layer_url, 'polygon')
# layer.loadNamedStyle(layer_style)
# QgsProject.instance().addMapLayer(layer)
# control_file = open(control_path, 'r')
# control_output = control_file.read()
# control_file.close()
# # Export to web map
# writer = LeafletWriter()
# writer.params = self.defaultParams()
# writer.groups = {}
# writer.layers = [layer]
# writer.visible = [True]
# writer.cluster = [False]
# writer.popup = [OrderedDict(
# [(u'name', u'no label'), (u'details', u'no label'), (u'date', u'no label'),
# (u'area_ha', u'no label'), (u'web_page', u'no label')])
# ]
# writer.json = [False]
# result = writer.write(self.iface, tempFolder()).index_file
# test_file = open(result)
# test_output = test_file.read()
# test_file.close()
# self.assertEqual(
# test_output, control_output, diff(control_output, test_output))
def test27_OL3_pnt_single(self):
"""OL3 point single"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'airports_single.qml')
control_path = get_test_data_path(
'control', 'ol3_json_point_single.html')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
control_file = open(control_path, 'r')
control_output = control_file.read()
control_file.close()
# Export to web map
writer = OpenLayersWriter()
writer.params = self.defaultParams()
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict([(u'ID', u'no label'), (u'fk_region', u'no label'), (u'ELEV', u'no label'), (u'NAME', u'no label'), (u'USE', u'no label')])
]
writer.json = [False]
writer.getFeatureInfo = [False]
result = writer.write(self.iface, tempFolder()).index_file
test_file = open(result)
test_output = test_file.read()
test_file.close()
test_style_file = open(
result.replace(
'file://', '').replace(
'index.html', 'styles/airports_0_style.js'))
test_style_output = test_style_file.read()
test_style_file.close()
test_output += test_style_output
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
def test28_OL3_line_single(self):
"""OL3 line single"""
layer_path = get_test_data_path('layer', 'pipelines.shp')
style_path = get_test_data_path('style', 'pipelines_single.qml')
control_path = get_test_data_path(
'control', 'ol3_json_line_single.html')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
control_file = open(control_path, 'r')
control_output = control_file.read()
control_file.close()
# Export to web map
writer = OpenLayersWriter()
writer.params = self.defaultParams()
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[(u'cat', u'no label'), (u'LOCDESC', u'no label'), (u'F_CODE', u'no label'),
(u'F_CODEDESC', u'no label')])]
writer.json = [False]
writer.getFeatureInfo = [False]
result = writer.write(self.iface, tempFolder()).index_file
test_file = open(result)
test_output = test_file.read()
test_file.close()
test_style_file = open(
result.replace(
'file://', '').replace(
'index.html', 'styles/pipelines_0_style.js'))
test_style_output = test_style_file.read()
test_style_file.close()
test_output += test_style_output
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
def test29_OL3_poly_single(self):
"""OL3 polygon single"""
layer_path = get_test_data_path('layer', 'lakes.shp')
style_path = get_test_data_path('style', 'lakes_single.qml')
control_path = get_test_data_path(
'control', 'ol3_json_polygon_single.html')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
control_file = open(control_path, 'r')
control_output = control_file.read()
control_file.close()
# Export to web map
writer = OpenLayersWriter()
writer.params = self.defaultParams()
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.cluster = [False]
writer.popup = [OrderedDict(
[(u'cat', u'no label'), (u'NAMES', u'no label'), (u'AREA_MI', u'no label'),
(u'xlabel', u'no label'), (u'ylabel', u'no label'), (u'rotation', u'no label')])]
writer.json = [False]
writer.getFeatureInfo = [False]
result = writer.write(self.iface, tempFolder()).index_file
test_file = open(result)
test_output = test_file.read()
test_file.close()
test_style_file = open(
result.replace(
'file://', '').replace(
'index.html', 'styles/lakes_0_style.js'))
test_style_output = test_style_file.read()
test_style_file.close()
test_output += test_style_output
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
def test30_OL3_pnt_categorized(self):
"""OL3 point categorized"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'airports_categorized.qml')
control_path = get_test_data_path(
'control', 'ol3_json_point_categorized.html')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
control_file = open(control_path, 'r')
control_output = control_file.read()
control_file.close()
# Export to web map
writer = OpenLayersWriter()
writer.params = self.defaultParams()
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[(u'ID', u'no label'), (u'fk_region', u'no label'), (u'ELEV', u'no label'),
(u'NAME', u'no label'), (u'USE', u'no label')])]
writer.json = [False]
writer.getFeatureInfo = [False]
result = writer.write(self.iface, tempFolder()).index_file
test_file = open(result)
test_output = test_file.read()
test_file.close()
test_style_file = open(
result.replace(
'file://', '').replace(
'index.html', 'styles/airports_0_style.js'))
test_style_output = test_style_file.read()
test_style_file.close()
test_output += test_style_output
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
def test31_OL3_line_categorized(self):
"""OL3 line categorized"""
layer_path = get_test_data_path('layer', 'pipelines.shp')
style_path = get_test_data_path('style', 'pipelines_categorized.qml')
control_path = get_test_data_path(
'control', 'ol3_json_line_categorized.html')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
control_file = open(control_path, 'r')
control_output = control_file.read()
control_file.close()
# Export to web map
writer = OpenLayersWriter()
writer.params = self.defaultParams()
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[(u'cat', u'no label'), (u'LOCDESC', u'no label'), (u'F_CODE', u'no label'),
(u'F_CODEDESC', u'no label')])]
writer.json = [False]
writer.getFeatureInfo = [False]
result = writer.write(self.iface, tempFolder()).index_file
test_file = open(result)
test_output = test_file.read()
test_file.close()
test_style_file = open(
result.replace(
'index.html', 'styles/pipelines_0_style.js'))
test_style_output = test_style_file.read()
test_style_file.close()
test_output += test_style_output
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
def test32_OL3_poly_categorized(self):
"""OL3 polygon categorized"""
layer_path = get_test_data_path('layer', 'lakes.shp')
style_path = get_test_data_path('style', 'lakes_categorized.qml')
control_path = get_test_data_path(
'control', 'ol3_json_polygon_categorized.html')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
control_file = open(control_path, 'r')
control_output = control_file.read()
control_file.close()
# Export to web map
writer = OpenLayersWriter()
writer.params = self.defaultParams()
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[(u'cat', u'no label'), (u'NAMES', u'no label'), (u'AREA_MI', u'no label'),
(u'xlabel', u'no label'), (u'ylabel', u'no label'), (u'rotation', u'no label')])]
writer.json = [False]
writer.getFeatureInfo = [False]
result = writer.write(self.iface, tempFolder()).index_file
test_file = open(result)
test_output = test_file.read()
test_file.close()
test_style_file = open(result.replace(
'index.html', 'styles/lakes_0_style.js'))
test_style_output = test_style_file.read()
test_style_file.close()
test_output += test_style_output
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
def test33_OL3_pnt_graduated(self):
"""OL3 point graduated"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'airports_graduated.qml')
control_path = get_test_data_path(
'control', 'ol3_json_point_graduated.html')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
control_file = open(control_path, 'r')
control_output = control_file.read()
control_file.close()
# Export to web map
writer = OpenLayersWriter()
writer.params = self.defaultParams()
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[(u'ID', u'no label'), (u'fk_region', u'no label'), (u'ELEV', u'no label'),
(u'NAME', u'no label'), (u'USE', u'no label')])]
writer.json = [False]
writer.getFeatureInfo = [False]
result = writer.write(self.iface, tempFolder()).index_file
test_file = open(result)
test_output = test_file.read()
test_file.close()
test_style_file = open(result.replace(
'index.html', 'styles/airports_0_style.js'))
test_style_output = test_style_file.read()
test_style_file.close()
test_output += test_style_output
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
def test34_OL3_line_graduated(self):
"""OL3 line graduated"""
layer_path = get_test_data_path('layer', 'pipelines.shp')
style_path = get_test_data_path('style', 'pipelines_graduated.qml')
control_path = get_test_data_path(
'control', 'ol3_json_line_graduated.html')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
control_file = open(control_path, 'r')
control_output = control_file.read()
control_file.close()
# Export to web map
writer = OpenLayersWriter()
writer.params = self.defaultParams()
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[(u'cat', u'no label'), (u'LOCDESC', u'no label'), (u'F_CODE', u'no label'),
(u'F_CODEDESC', u'no label')])]
writer.json = [False]
writer.getFeatureInfo = [False]
result = writer.write(self.iface, tempFolder()).index_file
test_file = open(result)
test_output = test_file.read()
test_file.close()
test_style_file = open(result.replace(
'index.html', 'styles/pipelines_0_style.js'))
test_style_output = test_style_file.read()
test_style_file.close()
test_output += test_style_output
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
def test35_OL3_poly_graduated(self):
"""OL3 polygon graduated"""
layer_path = get_test_data_path('layer', 'lakes.shp')
style_path = get_test_data_path('style', 'lakes_graduated.qml')
control_path = get_test_data_path(
'control', 'ol3_json_polygon_graduated.html')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
control_file = open(control_path, 'r')
control_output = control_file.read()
control_file.close()
# Export to web map
writer = OpenLayersWriter()
writer.params = self.defaultParams()
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[(u'cat', u'no label'), (u'NAMES', u'no label'), (u'AREA_MI', u'no label'),
(u'xlabel', u'no label'), (u'ylabel', u'no label'), (u'rotation', u'no label')])]
writer.json = [False]
writer.getFeatureInfo = [False]
result = writer.write(self.iface, tempFolder()).index_file
test_file = open(result)
test_output = test_file.read()
test_file.close()
test_style_file = open(result.replace(
'index.html', 'styles/lakes_0_style.js'))
test_style_output = test_style_file.read()
test_style_file.close()
test_output += test_style_output
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
def test36_OL3_layer_list(self):
"""OL3 A layer list is present when selected"""
layer_path = get_test_data_path('layer', 'airports.shp')
layer = load_layer(layer_path)
QgsProject.instance().addMapLayer(layer)
# Export to web map
writer = OpenLayersWriter()
writer.params = self.defaultParams()
writer.params['Appearance']['Add layers list'] = 'Collapsed'
writer.params['Appearance']['Template'] = 'canvas-size'
writer.params['Scale/Zoom']['Extent'] = 'Canvas extent'
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict([(u'ID', u'no label'), (u'fk_region', u'no label'), (u'ELEV', u'no label'), (u'NAME', u'no label'), (u'USE', u'no label')])
]
writer.json = [False]
writer.getFeatureInfo = [False]
result = writer.write(self.iface, tempFolder()).index_file
test_qgis2web_output = read_output(result, 'resources/qgis2web.js')
assert 'new ol.control.LayerSwitcher' in test_qgis2web_output
test_layers_output = read_output(result, 'layers/layers.js')
assert """title: '<img src="styles/legend/airports_0.png" /> airports'""" in test_layers_output
def test40_Leaflet_scalebar(self):
"""Leaflet scale bar"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'airports_single.qml')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
control_file = open(
get_test_data_path(
'control', 'leaflet_scalebar.html'), 'r')
control_output = control_file.read()
control_file.close()
# Check the 'Add scale bar' checkbox
QgsProject.instance().writeEntryBool("ScaleBar", "/Enabled", True)
# Export to web map
writer = LeafletWriter()
writer.params = self.defaultParams()
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[(u'ID', u'no label'), (u'fk_region', u'no label'), (u'ELEV', u'no label'),
(u'NAME', u'no label'), (u'USE', u'no label')])
]
writer.json = [False]
result = writer.write(self.iface, tempFolder()).index_file
# Open the test file
test_file = open(result)
test_output = test_file.read()
test_file.close()
# Compare with control file
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
def test41_OL3_scalebar(self):
"""OL3 scale bar"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'airports_single.qml')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
control_file = open(
get_test_data_path(
'control', 'ol3_scalebar.js'), 'r')
control_output = control_file.read()
control_file.close()
# Check the 'Add scale bar' checkbox
QgsProject.instance().writeEntryBool("ScaleBar", "/Enabled", True)
# Export to web map
writer = OpenLayersWriter()
writer.params = self.defaultParams()
writer.params['Appearance']['Template'] = 'canvas-size'
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[(u'ID', u'no label'), (u'fk_region', u'no label'), (u'ELEV', u'no label'),
(u'NAME', u'no label'), (u'USE', u'no label')])
]
writer.json = [False]
writer.getFeatureInfo = [False]
result = writer.write(self.iface, tempFolder()).index_file
# Reset scale bar setting
QgsProject.instance().writeEntryBool("ScaleBar", "/Enabled", False)
# Open the test file
test_output = read_output(result, 'resources/qgis2web.js')
# Compare with control file
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
def test42_Leaflet_measure(self):
"""Leaflet measure"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'airports_single.qml')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
control_file = open(
get_test_data_path(
'control', 'leaflet_measure.html'), 'r')
control_output = control_file.read()
control_file.close()
# Export to web map
writer = LeafletWriter()
writer.params = self.defaultParams()
writer.params['Appearance']['Measure tool'] = 'Metric'
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[(u'ID', u'no label'), (u'fk_region', u'no label'), (u'ELEV', u'no label'),
(u'NAME', u'no label'), (u'USE', u'no label')])
]
writer.json = [False]
result = writer.write(self.iface, tempFolder()).index_file
# Open the test file
test_file = open(result)
test_output = test_file.read()
test_file.close()
# Compare with control file
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
def test43_OL3_measure(self):
"""OL3 measure control"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'airports_single.qml')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
# Export to web map
writer = OpenLayersWriter()
writer.params = self.defaultParams()
writer.params['Appearance']['Measure tool'] = 'Metric'
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[(u'ID', u'no label'), (u'fk_region', u'no label'), (u'ELEV', u'no label'),
(u'NAME', u'no label'), (u'USE', u'no label')])
]
writer.json = [False]
writer.getFeatureInfo = [False]
result = writer.write(self.iface, tempFolder()).index_file
control_file = open(
get_test_data_path(
'control', 'ol3_measure.html'), 'r')
control_output = control_file.read()
control_file.close()
# Open the test file
test_output = read_output(result, 'index.html')
# Compare with control file
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
control_file = open(
get_test_data_path(
'control', 'ol3_measure.js'), 'r')
control_output = control_file.read()
control_file.close()
# Open the test file
test_output = read_output(result, 'resources/qgis2web.js')
# Compare with control file
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
def test44_Leaflet_address(self):
"""Leaflet address search"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'airports_single.qml')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
control_file = open(
get_test_data_path(
'control', 'leaflet_address.html'), 'r')
control_output = control_file.read()
control_file.close()
# Export to web map
writer = LeafletWriter()
writer.params = self.defaultParams()
writer.params['Appearance']['Add address search'] = True
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[(u'ID', u'no label'), (u'fk_region', u'no label'), (u'ELEV', u'no label'),
(u'NAME', u'no label'), (u'USE', u'no label')])
]
writer.json = [False]
result = writer.write(self.iface, tempFolder()).index_file
# Open the test file
test_file = open(result)
test_output = test_file.read()
test_file.close()
# Compare with control file
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
def test45_OL3_address(self):
"""OL3 address search"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'airports_single.qml')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
# Export to web map
writer = OpenLayersWriter()
writer.params = self.defaultParams()
writer.params['Appearance']['Add address search'] = True
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[(u'ID', u'no label'), (u'fk_region', u'no label'), (u'ELEV', u'no label'),
(u'NAME', u'no label'), (u'USE', u'no label')])
]
writer.json = [False]
writer.getFeatureInfo = [False]
result = writer.write(self.iface, tempFolder()).index_file
control_file = open(
get_test_data_path(
'control', 'ol3_address.html'), 'r')
control_output = control_file.read()
control_file.close()
# Open the test file
test_output = read_output(result, 'index.html')
# Compare with control file
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
control_file = open(
get_test_data_path(
'control', 'ol3_address.js'), 'r')
control_output = control_file.read()
control_file.close()
# Open the test file
test_output = read_output(result, 'resources/qgis2web.js')
# Compare with control file
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
def test46_Leaflet_geolocate(self):
"""Leaflet geolocate user"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'airports_single.qml')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
control_file = open(
get_test_data_path(
'control', 'leaflet_geolocate.html'), 'r')
control_output = control_file.read()
control_file.close()
# Export to web map
writer = LeafletWriter()
writer.params = self.defaultParams()
writer.params['Appearance']['Geolocate user'] = True
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[(u'ID', u'no label'), (u'fk_region', u'no label'), (u'ELEV', u'no label'),
(u'NAME', u'no label'), (u'USE', u'no label')])
]
writer.json = [False]
result = writer.write(self.iface, tempFolder()).index_file
# Open the test file
test_file = open(result)
test_output = test_file.read()
test_file.close()
# Compare with control file
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
def test47_OL3_geolocate(self):
"""OL3 geolocate user"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'airports_single.qml')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
# Export to web map
writer = OpenLayersWriter()
writer.params = self.defaultParams()
writer.params['Appearance']['Geolocate user'] = True
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[(u'ID', u'no label'), (u'fk_region', u'no label'), (u'ELEV', u'no label'),
(u'NAME', u'no label'), (u'USE', u'no label')])
]
writer.json = [False]
writer.getFeatureInfo = [False]
result = writer.write(self.iface, tempFolder()).index_file
control_file = open(
get_test_data_path(
'control', 'ol3_geolocate.js'), 'r')
control_output = control_file.read()
control_file.close()
# Open the test file
test_output = read_output(result, 'resources/qgis2web.js')
# Compare with control file
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
def test48_Leaflet_highlight(self):
"""Leaflet highlight on hover"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'airports_single.qml')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
control_file = open(
get_test_data_path(
'control', 'leaflet_highlight.html'), 'r')
control_output = control_file.read()
control_file.close()
# Export to web map
writer = LeafletWriter()
writer.params = self.defaultParams()
writer.params['Appearance']['Highlight on hover'] = True
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[(u'ID', u'no label'), (u'fk_region', u'no label'), (u'ELEV', u'no label'),
(u'NAME', u'no label'), (u'USE', u'no label')])
]
writer.json = [False]
result = writer.write(self.iface, tempFolder()).index_file
# Open the test file
test_file = open(result)
test_output = test_file.read()
test_file.close()
# Compare with control file
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
def test49_OL3_highlight(self):
"""OL3 highlight on hover"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'airports_single.qml')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
# Export to web map
writer = OpenLayersWriter()
writer.params = self.defaultParams()
writer.params['Appearance']['Highlight on hover'] = True
writer.params['Appearance']['Template'] = 'canvas-size'
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[(u'ID', u'no label'), (u'fk_region', u'no label'), (u'ELEV', u'no label'),
(u'NAME', u'no label'), (u'USE', u'no label')])
]
writer.json = [False]
writer.getFeatureInfo = [False]
result = writer.write(self.iface, tempFolder()).index_file
control_file = open(
get_test_data_path(
'control', 'ol3_highlight.js'), 'r')
control_output = control_file.read()
control_file.close()
# Open the test file
test_output = read_output(result, 'resources/qgis2web.js')
# Compare with control file
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
def test50_Leaflet_CRS(self):
"""Leaflet match CRS"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'airports_single.qml')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
crs = QgsCoordinateReferenceSystem("EPSG:2964")
self.iface.mapCanvas().setDestinationCrs(crs)
control_file = open(
get_test_data_path(
'control', 'leaflet_crs.html'), 'r')
control_output = control_file.read()
control_file.close()
# Export to web map
writer = LeafletWriter()
writer.params = self.defaultParams()
writer.params['Appearance']['Match project CRS'] = True
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[(u'ID', u'no label'), (u'fk_region', u'no label'), (u'ELEV', u'no label'),
(u'NAME', u'no label'), (u'USE', u'no label')])
]
writer.json = [False]
result = writer.write(self.iface, tempFolder()).index_file
# Open the test file
test_file = open(result)
test_output = test_file.read()
test_file.close()
# Compare with control file
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
def test51_OL3_CRS(self):
"""OL3 match CRS"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'airports_single.qml')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
crs = QgsCoordinateReferenceSystem("EPSG:2964")
self.iface.mapCanvas().setDestinationCrs(crs)
# Export to web map
writer = OpenLayersWriter()
writer.params = self.defaultParams()
writer.params['Appearance']['Match project CRS'] = True
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[(u'ID', u'no label'), (u'fk_region', u'no label'), (u'ELEV', u'no label'),
(u'NAME', u'no label'), (u'USE', u'no label')])
]
writer.json = [False]
writer.getFeatureInfo = [False]
result = writer.write(self.iface, tempFolder()).index_file
control_file = open(
get_test_data_path(
'control', 'ol3_crs.html'), 'r')
control_output = control_file.read()
control_file.close()
# Open the test file
test_file = open(result)
test_output = test_file.read()
test_file.close()
# Compare with control file
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
control_file = open(
get_test_data_path(
'control', 'ol3_crs.js'), 'r')
control_output = control_file.read()
control_file.close()
# Open the test file
test_output = read_output(result, 'layers/layers.js')
# Compare with control file
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
def test52_Leaflet_layerslist(self):
"""Leaflet add layers list"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'airports_single.qml')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
control_file = open(
get_test_data_path(
'control', 'leaflet_layerslist.html'), 'r')
control_output = control_file.read()
control_file.close()
# Export to web map
writer = LeafletWriter()
writer.params = self.defaultParams()
writer.params['Appearance']['Add layers list'] = 'Collapsed'
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[(u'ID', u'no label'), (u'fk_region', u'no label'), (u'ELEV', u'no label'),
(u'NAME', u'no label'), (u'USE', u'no label')])
]
writer.json = [False]
result = writer.write(self.iface, tempFolder()).index_file
# Open the test file
test_file = open(result)
test_output = test_file.read()
test_file.close()
# Compare with control file
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
def test53_Leaflet_visible(self):
"""Leaflet visible"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'airports_single.qml')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
control_file = open(
get_test_data_path(
'control', 'leaflet_visible.html'), 'r')
control_output = control_file.read()
control_file.close()
# Export to web map
writer = LeafletWriter()
writer.params = self.defaultParams()
writer.groups = {}
writer.layers = [layer]
writer.visible = [False]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[(u'ID', u'no label'), (u'fk_region', u'no label'), (u'ELEV', u'no label'),
(u'NAME', u'no label'), (u'USE', u'no label')])
]
writer.json = [False]
result = writer.write(self.iface, tempFolder()).index_file
# Open the test file
test_file = open(result)
test_output = test_file.read()
test_file.close()
# Compare with control file
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
def test54_OL3_visible(self):
"""OL3 visible"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'airports_single.qml')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
# Export to web map
writer = OpenLayersWriter()
writer.params = self.defaultParams()
writer.params['Appearance']['Template'] = 'canvas-size'
writer.groups = {}
writer.layers = [layer]
writer.visible = [False]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[(u'ID', u'no label'), (u'fk_region', u'no label'), (u'ELEV', u'no label'),
(u'NAME', u'no label'), (u'USE', u'no label')])
]
writer.json = [False]
writer.getFeatureInfo = [False]
result = writer.write(self.iface, tempFolder()).index_file
control_file = open(
get_test_data_path(
'control', 'ol3_visible.js'), 'r')
control_output = control_file.read()
control_file.close()
# Open the test file
test_output = read_output(result, 'layers/layers.js')
# Compare with control file
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
def test55_Leaflet_cluster(self):
"""Leaflet cluster"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'airports_single.qml')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
control_file = open(
get_test_data_path(
'control', 'leaflet_cluster.html'), 'r')
control_output = control_file.read()
control_file.close()
# Export to web map
writer = LeafletWriter()
writer.params = self.defaultParams()
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [True]
writer.popup = [OrderedDict(
[(u'ID', u'no label'), (u'fk_region', u'no label'), (u'ELEV', u'no label'),
(u'NAME', u'no label'), (u'USE', u'no label')])
]
writer.json = [False]
result = writer.write(self.iface, tempFolder()).index_file
# Open the test file
test_file = open(result)
test_output = test_file.read()
test_file.close()
# Compare with control file
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
def test56_OL3_cluster(self):
"""OL3 cluster"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'airports_single.qml')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
# Export to web map
writer = OpenLayersWriter()
writer.params = self.defaultParams()
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [True]
writer.popup = [OrderedDict(
[(u'ID', u'no label'), (u'fk_region', u'no label'), (u'ELEV', u'no label'),
(u'NAME', u'no label'), (u'USE', u'no label')])
]
writer.json = [False]
writer.getFeatureInfo = [False]
result = writer.write(self.iface, tempFolder()).index_file
control_file = open(
get_test_data_path(
'control', 'ol3_cluster.js'), 'r')
control_output = control_file.read()
control_file.close()
# Open the test file
test_output = read_output(result, 'layers/layers.js')
# Compare with control file
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
@unittest.skipIf(int(gdal.VersionInfo('VERSION_NUM')) >= GDAL_COMPUTE_VERSION(2, 0, 0), 'Test requires updating for GDAL 2.0')
def test62_leaflet_precision(self):
"""Leaflet precision"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'airports_single.qml')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
# Export to web map
writer = LeafletWriter()
writer.params = self.defaultParams()
writer.params['Appearance']['Template'] = 'canvas-size'
writer.params['Data export']['Precision'] = '3'
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [True]
writer.popup = [OrderedDict(
[(u'ID', u'no label'), (u'fk_region', u'no label'), (u'ELEV', u'no label'),
(u'NAME', u'no label'), (u'USE', u'no label')])
]
writer.json = [False]
result = writer.write(self.iface, tempFolder()).index_file
control_file = open(
get_test_data_path(
'control', 'leaflet_precision.js'), 'r')
control_output = control_file.read()
control_file.close()
# Open the test file
test_output = read_output(result, 'data/airports_0.js')
# Compare with control file
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
@unittest.skipIf(int(gdal.VersionInfo('VERSION_NUM')) >= GDAL_COMPUTE_VERSION(2, 0, 0), 'Test requires updating for GDAL 2.0')
def test63_ol3_precision(self):
"""OL3 precision"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'airports_single.qml')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
# Export to web map
writer = OpenLayersWriter()
writer.params = self.defaultParams()
writer.params['Appearance']['Template'] = 'canvas-size'
writer.params['Data export']['Precision'] = '2'
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [True]
writer.popup = [OrderedDict(
[(u'ID', u'no label'), (u'fk_region', u'no label'), (u'ELEV', u'no label'),
(u'NAME', u'no label'), (u'USE', u'no label')])
]
writer.json = [False]
writer.getFeatureInfo = [False]
result = writer.write(self.iface, tempFolder()).index_file
control_file = open(
get_test_data_path(
'control', 'ol3_precision.js'), 'r')
control_output = control_file.read()
control_file.close()
# Open the test file
test_output = read_output(result, 'layers/airports_0.js')
# Compare with control file
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
@unittest.skipIf(int(gdal.VersionInfo('VERSION_NUM')) >= GDAL_COMPUTE_VERSION(2, 0, 0), 'Test requires updating for GDAL 2.0')
def test67_leaflet_minify(self):
"""Leaflet minify"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'airports_single.qml')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
# Export to web map
writer = LeafletWriter()
writer.params = self.defaultParams()
writer.params['Data export']['Precision'] = '6'
writer.params['Data export']['Minify GeoJSON files'] = True
writer.params['Appearance']['Template'] = 'canvas-size'
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[(u'ID', u'no label'), (u'fk_region', u'no label'), (u'ELEV', u'no label'),
(u'NAME', u'no label'), (u'USE', u'no label')])
]
writer.json = [False]
result = writer.write(self.iface, tempFolder()).index_file
control_file = open(
get_test_data_path(
'control', 'leaflet_minify.js'), 'r')
control_output = control_file.read()
control_file.close()
# Open the test file
test_output = read_output(result, 'data/airports_0.js')
# Compare with control file
self.assertEqual(test_output, control_output)
@unittest.skipIf(int(gdal.VersionInfo('VERSION_NUM')) >= GDAL_COMPUTE_VERSION(2, 0, 0), 'Test requires updating for GDAL 2.0')
def test68_ol3_minify(self):
"""OL3 minify"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'airports_single.qml')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
# Export to web map
writer = OpenLayersWriter()
writer.params = self.defaultParams()
writer.params['Data export']['Precision'] = '2'
writer.params['Data export']['Minify GeoJSON files'] = True
writer.params['Appearance']['Template'] = 'canvas-size'
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[(u'ID', u'no label'), (u'fk_region', u'no label'), (u'ELEV', u'no label'),
(u'NAME', u'no label'), (u'USE', u'no label')])
]
writer.json = [False]
writer.getFeatureInfo = [False]
result = writer.write(self.iface, tempFolder()).index_file
control_file = open(
get_test_data_path(
'control', 'ol3_minify.js'), 'r')
control_output = control_file.read()
control_file.close()
# Open the test file
test_output = read_output(result, 'layers/airports_0.js')
# Compare with control file
self.assertEqual(test_output, control_output)
def test69_Leaflet_canvasextent(self):
"""Leaflet canvas extent"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'airports_single.qml')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
# Export to web map
writer = LeafletWriter()
writer.params = self.defaultParams()
writer.params['Scale/Zoom']['Extent'] = 'Canvas extent'
writer.params['Appearance']['Template'] = 'canvas-size'
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[(u'ID', u'no label'), (u'fk_region', u'no label'), (u'ELEV', u'no label'),
(u'NAME', u'no label'), (u'USE', u'no label')])
]
writer.json = [False]
result = writer.write(self.iface, tempFolder()).index_file
# Open the test file
test_file = open(result)
test_output = test_file.read()
test_file.close()
# Test for expected output
assert "}).fitBounds([[" in test_output
def test70_Leaflet_maxzoom(self):
"""Leaflet max zoom"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'airports_single.qml')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
# Export to web map
writer = LeafletWriter()
writer.params = self.defaultParams()
writer.params['Scale/Zoom']['Max zoom level'] = '20'
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[(u'ID', u'no label'), (u'fk_region', u'no label'), (u'ELEV', u'no label'),
(u'NAME', u'no label'), (u'USE', u'no label')])
]
writer.json = [False]
result = writer.write(self.iface, tempFolder()).index_file
control_file = open(
get_test_data_path(
'control', 'leaflet_maxzoom.html'), 'r')
control_output = control_file.read()
control_file.close()
# Open the test file
test_file = open(result)
test_output = test_file.read()
test_file.close()
# Test for expected output
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
def test71_ol3_maxzoom(self):
"""OL3 max zoom"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'airports_single.qml')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
# Export to web map
writer = OpenLayersWriter()
writer.params = self.defaultParams()
writer.params['Scale/Zoom']['Max zoom level'] = '20'
writer.params['Appearance']['Template'] = 'canvas-size'
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[(u'ID', u'no label'), (u'fk_region', u'no label'), (u'ELEV', u'no label'),
(u'NAME', u'no label'), (u'USE', u'no label')])
]
writer.json = [False]
writer.getFeatureInfo = [False]
result = writer.write(self.iface, tempFolder()).index_file
control_file = open(
get_test_data_path(
'control', 'ol3_maxzoom.js'), 'r')
control_output = control_file.read()
control_file.close()
# Open the test file
test_output = read_output(result, 'resources/qgis2web.js')
# Compare with control file
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
def test72_Leaflet_minzoom(self):
"""Leaflet min zoom"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'airports_single.qml')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
# Export to web map
writer = LeafletWriter()
writer.params = self.defaultParams()
writer.params['Scale/Zoom']['Min zoom level'] = '6'
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[(u'ID', u'no label'), (u'fk_region', u'no label'), (u'ELEV', u'no label'),
(u'NAME', u'no label'), (u'USE', u'no label')])
]
writer.json = [False]
result = writer.write(self.iface, tempFolder()).index_file
control_file = open(
get_test_data_path(
'control', 'leaflet_minzoom.html'), 'r')
control_output = control_file.read()
control_file.close()
# Open the test file
test_file = open(result)
test_output = test_file.read()
test_file.close()
# Test for expected output
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
def test73_ol3_minzoom(self):
"""OL3 min zoom"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'airports_single.qml')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
# Export to web map
writer = OpenLayersWriter()
writer.params = self.defaultParams()
writer.params['Scale/Zoom']['Min zoom level'] = '6'
writer.params['Appearance']['Template'] = 'canvas-size'
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[(u'ID', u'no label'), (u'fk_region', u'no label'), (u'ELEV', u'no label'),
(u'NAME', u'no label'), (u'USE', u'no label')])
]
writer.json = [False]
writer.getFeatureInfo = [False]
result = writer.write(self.iface, tempFolder()).index_file
control_file = open(
get_test_data_path(
'control', 'ol3_minzoom.js'), 'r')
control_output = control_file.read()
control_file.close()
# Open the test file
test_output = read_output(result, 'resources/qgis2web.js')
# Compare with control file
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
def test74_Leaflet_restricttoextent(self):
"""Leaflet restrict to extent"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'airports_single.qml')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
# Export to web map
writer = LeafletWriter()
writer.params = self.defaultParams()
writer.params['Scale/Zoom']['Restrict to extent'] = True
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[(u'ID', u'no label'), (u'fk_region', u'no label'), (u'ELEV', u'no label'),
(u'NAME', u'no label'), (u'USE', u'no label')])
]
writer.json = [False]
result = writer.write(self.iface, tempFolder()).index_file
control_file = open(
get_test_data_path(
'control', 'leaflet_restricttoextent.html'), 'r')
control_output = control_file.read()
control_file.close()
# Open the test file
test_file = open(result)
test_output = test_file.read()
test_file.close()
# Test for expected output
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
def test75_ol3_restricttoextent(self):
"""OL3 restrict to extent"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'airports_single.qml')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
# Export to web map
writer = OpenLayersWriter()
writer.params = self.defaultParams()
writer.params['Scale/Zoom']['Restrict to extent'] = True
writer.params['Appearance']['Template'] = 'canvas-size'
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[(u'ID', u'no label'), (u'fk_region', u'no label'), (u'ELEV', u'no label'),
(u'NAME', u'no label'), (u'USE', u'no label')])
]
writer.json = [False]
writer.getFeatureInfo = [False]
result = writer.write(self.iface, tempFolder()).index_file
# Open the test file
test_output = read_output(result, 'resources/qgis2web.js')
# Test for expected output
assert "extent: [" in test_output
def test76_Leaflet_25d(self):
"""Leaflet 2.5d"""
layer_path = get_test_data_path('layer', 'lakes.shp')
style_path = get_test_data_path('style', '25d.qml')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
# Export to web map
writer = LeafletWriter()
writer.params = self.defaultParams()
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict([(u'cat', u'no label'), (u'NAMES', u'no label'), (u'AREA_MI', u'no label'), (u'xlabel', u'no label'), (u'ylabel', u'no label'), (u'rotation', u'no label')])
]
writer.json = [False]
result = writer.write(self.iface, tempFolder()).index_file
control_file = open(
get_test_data_path(
'control', 'leaflet_25d.html'), 'r')
control_output = control_file.read()
control_file.close()
# Open the test file
test_file = open(result)
test_output = test_file.read()
test_file.close()
# Test for expected output
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
def test77_OL3_25d(self):
"""OL3 2.5d"""
layer_path = get_test_data_path('layer', 'lakes.shp')
style_path = get_test_data_path('style', '25d.qml')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
# Export to web map
writer = OpenLayersWriter()
writer.params = self.defaultParams()
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[(u'cat', u'no label'), (u'NAMES', u'no label'), (u'AREA_MI', u'no label'),
(u'xlabel', u'no label'), (u'ylabel', u'no label'), (u'rotation', u'no label')])
]
writer.json = [False]
writer.getFeatureInfo = [False]
result = writer.write(self.iface, tempFolder()).index_file
control_file = open(
get_test_data_path(
'control', 'ol3_25d.html'), 'r')
control_output = control_file.read()
control_file.close()
# Open the test file
test_file = open(result)
test_output = test_file.read()
test_file.close()
# Test for expected output
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
def test78_Leaflet_raster(self):
"""Leaflet raster"""
layer_path = get_test_data_path('layer', 'test.png')
# style_path = get_test_data_path('style', '25d.qml')
layer = load_layer(layer_path)
# layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
# Export to web map
writer = LeafletWriter()
writer.params = self.defaultParams()
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict()]
writer.json = [False]
result = writer.write(self.iface, tempFolder()).index_file
control_file = open(
get_test_data_path(
'control', 'leaflet_raster.html'), 'r')
control_output = control_file.read()
control_file.close()
# Open the test file
test_file = open(result)
test_output = test_file.read()
test_file.close()
# Test for expected output
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
# test for exported raster file
assert os.path.exists(result.replace('index.html', 'data/test_0.png'))
def test79_OL3_raster(self):
"""OL3 raster"""
layer_path = get_test_data_path('layer', 'test.png')
# style_path = get_test_data_path('style', '25d.qml')
layer = load_layer(layer_path)
# layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
# Export to web map
writer = OpenLayersWriter()
writer.params = self.defaultParams()
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict()]
writer.json = [False]
writer.getFeatureInfo = [False]
result = writer.write(self.iface, tempFolder()).index_file
control_file = open(
get_test_data_path(
'control', 'ol3_raster.js'), 'r')
control_output = control_file.read()
control_file.close()
# Open the test file
test_output = read_output(result, 'layers/layers.js')
# Test for expected output
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
# test for exported raster file
assert os.path.exists(result.replace('index.html', 'layers/test_0.png'))
def test80_OL3_heatmap(self):
"""OL3 heatmap"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'heatmap.qml')
control_path = get_test_data_path(
'control', 'ol3_heatmap.js')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
control_file = open(control_path, 'r')
control_output = control_file.read()
control_file.close()
# Export to web map
writer = OpenLayersWriter()
writer.params = self.defaultParams()
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict([(u'ID', u'no label'), (u'fk_region', u'no label'), (u'ELEV', u'no label'), (u'NAME', u'no label'), (u'USE', u'no label')])
]
writer.json = [False]
writer.getFeatureInfo = [False]
result = writer.write(self.iface, tempFolder()).index_file
test_style_file = open(
result.replace(
'file://', '').replace(
'index.html', 'layers/layers.js'))
test_style_output = test_style_file.read()
test_style_file.close()
test_output = test_style_output
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
def test81_Leaflet_heatmap(self):
"""Leaflet heatmap"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'heatmap.qml')
control_path = get_test_data_path(
'control', 'leaflet_heatmap.html')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
control_file = open(control_path, 'r')
control_output = control_file.read()
control_file.close()
# Export to web map
writer = LeafletWriter()
writer.params = self.defaultParams()
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict([(u'ID', u'no label'), (u'fk_region', u'no label'), (u'ELEV', u'no label'), (u'NAME', u'no label'), (u'USE', u'no label')])
]
writer.json = [False]
result = writer.write(self.iface, tempFolder()).index_file
test_file = open(result)
test_output = test_file.read()
test_file.close()
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
# def test83_OL3_WMS(self):
# """OL3 WMS"""
# layer_url = (
# 'contextualWMSLegend=0&crs=EPSG:3857&dpiMode=all&featureCount=10&format=image/png&layers=GBR_BGS_625k_BLT&styles=&url=http://ogc.bgs.ac.uk/cgi-bin/BGS_Bedrock_and_Superficial_Geology/wms?')
# layer = load_wms_layer(layer_url, 'wms')
# QgsProject.instance().addMapLayer(layer)
# control_file = open(
# get_test_data_path('control', 'ol3_wms.js'), 'r')
# control_output = control_file.read()
# control_file.close()
# # Export to web map
# writer = OpenLayersWriter()
# writer.params = self.defaultParams()
# writer.groups = {}
# writer.layers = [layer]
# writer.visible = [True]
# writer.cluster = [False]
# writer.popup = [OrderedDict([(u'ref', u'no label'), (u'tpo_name', u'no label'), (u'area_ha', u'no label'), (u'digitised', u'no label'), (u'objtype', u'no label')])
# ]
# writer.json = [False]
# writer.getFeatureInfo = [False]
# result = writer.write(self.iface, tempFolder()).index_file
# # Open the test file
# test_style_file = open(
# result.replace(
# 'file://', '').replace(
# 'index.html', 'layers/layers.js'))
# test_style_output = test_style_file.read()
# test_style_file.close()
# test_output = test_style_output
# self.assertEqual(
# test_output, control_output, diff(control_output, test_output))
# def test84_Leaflet_WMS(self):
# """Leaflet WMS"""
# layer_url = (
# 'contextualWMSLegend=0&crs=EPSG:3857&dpiMode=all&featureCount=10&format=image/png&layers=GBR_BGS_625k_BLT&styles=&url=http://ogc.bgs.ac.uk/cgi-bin/BGS_Bedrock_and_Superficial_Geology/wms?')
# layer = load_wms_layer(layer_url, 'wms')
# QgsProject.instance().addMapLayer(layer)
# control_file = open(
# get_test_data_path('control', 'leaflet_wms.html'), 'r')
# control_output = control_file.read()
# control_file.close()
# Export to web map
# writer = LeafletWriter()
# writer.params = self.defaultParams()
# writer.groups = {}
# writer.layers = [layer]
# writer.visible = [True]
# writer.cluster = [False]
# writer.popup = [OrderedDict([(u'ref', u'no label'), (u'tpo_name', u'no label'), (u'area_ha', u'no label'), (u'digitised', u'no label'), (u'objtype', u'no label')])
# ]
# writer.json = [False]
# writer.getFeatureInfo = [False]
# result = writer.write(self.iface, tempFolder()).index_file
# Open the test file
# test_style_file = open(
# result.replace(
# 'file://', ''))
# test_style_output = test_style_file.read()
# test_style_file.close()
# test_output = test_style_output
# self.assertEqual(
# test_output, control_output, diff(control_output, test_output))
def test85_Leaflet_rulebased(self):
"""Leaflet rule-based"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'airports_rule-based.qml')
control_path = get_test_data_path(
'control', 'leaflet_rule-based.html')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
control_file = open(control_path, 'r')
control_output = control_file.read()
control_file.close()
# Export to web map
writer = LeafletWriter()
writer.params = self.defaultParams()
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[('ID', 'no label'), ('fk_region', 'no label'), ('ELEV', 'no label'), ('NAME', 'no label'),
('USE', 'no label')])]
writer.json = [False]
result = writer.write(self.iface, tempFolder()).index_file
test_file = open(result)
test_output = test_file.read()
test_file.close()
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
def test86_OL3_rulebased(self):
"""OL3 rule-based"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'airports_rule-based.qml')
control_path = get_test_data_path(
'control', 'ol3_rule-based.js')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
control_file = open(control_path, 'r')
control_output = control_file.read()
control_file.close()
# Export to web map
writer = OpenLayersWriter()
writer.params = self.defaultParams()
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[('ID', 'no label'), ('fk_region', 'no label'), ('ELEV', 'no label'), ('NAME', 'no label'),
('USE', 'no label')])]
writer.json = [False]
writer.getFeatureInfo = [False]
result = writer.write(self.iface, tempFolder()).index_file
test_style_file = open(
result.replace(
'file://', '').replace(
'index.html', 'styles/airports_0_style.js'))
test_style_output = test_style_file.read()
test_style_file.close()
test_output = test_style_output
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
def test87_Leaflet_labels(self):
"""Leaflet labels"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'airports_labels.qml')
control_path = get_test_data_path(
'control', 'leaflet_labels.html')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
control_file = open(control_path, 'r')
control_output = control_file.read()
control_file.close()
# Export to web map
writer = LeafletWriter()
writer.params = self.defaultParams()
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[('ID', 'no label'), ('fk_region', 'no label'), ('ELEV', 'no label'), ('NAME', 'no label'),
('USE', 'no label')])]
writer.json = [False]
result = writer.write(self.iface, tempFolder()).index_file
test_file = open(result)
test_output = test_file.read()
test_file.close()
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
def test88_OL3_labels(self):
"""OL3 labels"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'airports_labels.qml')
control_path = get_test_data_path(
'control', 'ol3_labels.js')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
control_file = open(control_path, 'r')
control_output = control_file.read()
control_file.close()
# Export to web map
writer = OpenLayersWriter()
writer.params = self.defaultParams()
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[('ID', 'no label'), ('fk_region', 'no label'), ('ELEV', 'no label'), ('NAME', 'no label'),
('USE', 'no label')])]
writer.json = [False]
writer.getFeatureInfo = [False]
result = writer.write(self.iface, tempFolder()).index_file
test_style_file = open(
result.replace(
'file://', '').replace(
'index.html', 'styles/airports_0_style.js'))
test_style_output = test_style_file.read()
test_style_file.close()
test_output = test_style_output
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
# def test89_OL3_WMTS(self):
# """OL3 WMTS"""
# layer_url = (
# 'contextualWMSLegend=0&crs=EPSG:3857&dpiMode=7&featureCount=10&format=image/jpeg&layers=EMAP8&styles=default&tileMatrixSet=GoogleMapsCompatible&url=http://wmts.nlsc.gov.tw/wmts')
# layer = load_wms_layer(layer_url, 'wms')
# QgsProject.instance().addMapLayer(layer)
# control_file = open(
# get_test_data_path('control', 'ol3_wmts.js'), 'r')
# control_output = control_file.read()
# control_file.close()
# Export to web map
# writer = OpenLayersWriter()
# writer.params = self.defaultParams()
# writer.groups = {}
# writer.layers = [layer]
# writer.visible = [True]
# writer.cluster = [False]
# writer.popup = [OrderedDict([(u'ref', u'no label'), (u'tpo_name', u'no label'), (u'area_ha', u'no label'), (u'digitised', u'no label'), (u'objtype', u'no label')])
# ]
# writer.json = [False]
# writer.getFeatureInfo = [False]
# result = writer.write(self.iface, tempFolder()).index_file
# Open the test file
# test_style_file = open(
# result.replace(
# 'file://', '').replace(
# 'index.html', 'layers/layers.js'))
# test_style_output = test_style_file.read()
# test_style_file.close()
# test_output = test_style_output
# self.assertEqual(
# test_output, control_output, diff(control_output, test_output))
# def test90_Leaflet_WMTS(self):
# """Leaflet WMTS"""
# layer_url = (
# 'contextualWMSLegend=0&crs=EPSG:3857&dpiMode=7&featureCount=10&format=image/jpeg&layers=EMAP8&styles=default&tileMatrixSet=GoogleMapsCompatible&url=http://wmts.nlsc.gov.tw/wmts')
# layer = load_wms_layer(layer_url, 'wms')
# QgsProject.instance().addMapLayer(layer)
# control_file = open(
# get_test_data_path('control', 'leaflet_wmts.html'), 'r')
# control_output = control_file.read()
# control_file.close()
# Export to web map
# writer = LeafletWriter()
# writer.params = self.defaultParams()
# writer.groups = {}
# writer.layers = [layer]
# writer.visible = [True]
# writer.cluster = [False]
# writer.popup = [OrderedDict([(u'ref', u'no label'), (u'tpo_name', u'no label'), (u'area_ha', u'no label'), (u'digitised', u'no label'), (u'objtype', u'no label')])
# ]
# writer.json = [False]
# writer.getFeatureInfo = [False]
# result = writer.write(self.iface, tempFolder()).index_file
# Open the test file
# test_style_file = open(
# result.replace(
# 'file://', ''))
# test_style_output = test_style_file.read()
# test_style_file.close()
# test_output = test_style_output
# self.assertEqual(
# test_output, control_output, diff(control_output, test_output))
def test91_Leaflet_scaledependent(self):
"""Leaflet scale-dependent"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'airports_scaledependent.qml')
control_path = get_test_data_path(
'control', 'leaflet_scaledependent.html')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
control_file = open(control_path, 'r')
control_output = control_file.read()
control_file.close()
# Export to web map
writer = LeafletWriter()
writer.params = self.defaultParams()
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[('ID', 'no label'), ('fk_region', 'no label'), ('ELEV', 'no label'), ('NAME', 'no label'),
('USE', 'no label')])]
writer.json = [False]
result = writer.write(self.iface, tempFolder()).index_file
test_file = open(result)
test_output = test_file.read()
test_file.close()
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
def test92_Leaflet_categorized_25d(self):
"""Leaflet categorized 2.5D"""
layer_path = get_test_data_path('layer', 'lakes.shp')
style_path = get_test_data_path('style', 'categorized_25d.qml')
control_path = get_test_data_path(
'control', 'leaflet_categorized_25d.html')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
control_file = open(control_path, 'r')
control_output = control_file.read()
control_file.close()
# Export to web map
writer = LeafletWriter()
writer.params = self.defaultParams()
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict([(u'cat', u'no label'), (u'NAMES', u'no label'), (u'AREA_MI', u'no label'), (u'xlabel', u'no label'), (u'ylabel', u'no label'), (u'rotation', u'no label')])
]
writer.json = [False]
result = writer.write(self.iface, tempFolder()).index_file
test_file = open(result)
test_output = test_file.read()
test_file.close()
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
def test93_Leaflet_graduated_25d(self):
"""Leaflet graduated 2.5D"""
layer_path = get_test_data_path('layer', 'lakes.shp')
style_path = get_test_data_path('style', 'graduated_25d.qml')
control_path = get_test_data_path(
'control', 'leaflet_graduated_25d.html')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
control_file = open(control_path, 'r')
control_output = control_file.read()
control_file.close()
# Export to web map
writer = LeafletWriter()
writer.params = self.defaultParams()
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict([(u'cat', u'no label'), (u'NAMES', u'no label'), (u'AREA_MI', u'no label'), (u'xlabel', u'no label'), (u'ylabel', u'no label'), (u'rotation', u'no label')])
]
writer.json = [False]
result = writer.write(self.iface, tempFolder()).index_file
test_file = open(result)
test_output = test_file.read()
test_file.close()
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
def test94_Leaflet_svg(self):
"""Leaflet SVG"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'svg.qml')
control_path = get_test_data_path(
'control', 'leaflet_svg.html')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
control_file = open(control_path, 'r')
control_output = control_file.read()
control_file.close()
# Export to web map
writer = LeafletWriter()
writer.params = self.defaultParams()
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[('ID', 'no label'), ('fk_region', 'no label'), ('ELEV', 'no label'), ('NAME', 'no label'),
('USE', 'no label')])]
writer.json = [False]
result = writer.write(self.iface, tempFolder()).index_file
test_file = open(result)
test_output = test_file.read()
test_file.close()
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
# test for exported marker file
assert os.path.exists(result.replace('index.html', 'markers/qgis2web.svg'))
def test95_OL3_svg(self):
"""OL3 SVG"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'svg.qml')
control_path = get_test_data_path(
'control', 'ol3_svg.js')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
control_file = open(control_path, 'r')
control_output = control_file.read()
control_file.close()
# Export to web map
writer = OpenLayersWriter()
writer.params = self.defaultParams()
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[('ID', 'no label'), ('fk_region', 'no label'), ('ELEV', 'no label'), ('NAME', 'no label'),
('USE', 'no label')])]
writer.json = [False]
writer.getFeatureInfo = [False]
result = writer.write(self.iface, tempFolder()).index_file
test_style_file = open(
result.replace(
'file://', '').replace(
'index.html', 'styles/airports_0_style.js'))
test_style_output = test_style_file.read()
test_style_file.close()
test_output = test_style_output
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
# test for exported marker file
assert os.path.exists(result.replace('index.html', 'styles/qgis2web.svg'))
def test96_Leaflet_layer_groups(self):
"""Leaflet layer groups"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'airports_single.qml')
control_path = get_test_data_path(
'control', 'leaflet_groups.html')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
control_file = open(control_path, 'r')
control_output = control_file.read()
control_file.close()
# Export to web map
writer = LeafletWriter()
writer.params = self.defaultParams()
writer.groups = {'group1': [layer]}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [{}]
writer.json = [False]
result = writer.write(self.iface, tempFolder()).index_file
test_file = open(result)
test_output = test_file.read()
test_file.close()
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
def test97_OL3_layergroups(self):
"""OL3 layer groups"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'airports_single.qml')
control_path = get_test_data_path(
'control', 'ol3_groups.js')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
control_file = open(control_path, 'r')
control_output = control_file.read()
control_file.close()
# Export to web map
writer = OpenLayersWriter()
writer.params = self.defaultParams()
writer.groups = {'group1': [layer]}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [{}]
writer.json = [False]
writer.getFeatureInfo = [False]
result = writer.write(self.iface, tempFolder()).index_file
# Open the test file
test_style_file = open(
result.replace(
'file://', '').replace(
'index.html', 'layers/layers.js'))
test_style_output = test_style_file.read()
test_style_file.close()
test_output = test_style_output
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
def test98_Leaflet_shapes(self):
"""Leaflet shapes"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'airports_shapes.qml')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
control_file = open(
get_test_data_path(
'control', 'leaflet_shapes.html'), 'r')
control_output = control_file.read()
control_file.close()
# Export to web map
writer = LeafletWriter()
writer.params = self.defaultParams()
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[('ID', 'no label'), ('fk_region', 'no label'), ('ELEV', 'no label'), ('NAME', 'no label'),
('USE', 'no label')])]
writer.json = [False]
result = writer.write(self.iface, tempFolder()).index_file
# Open the test file
test_file = open(result)
test_output = test_file.read()
test_file.close()
# Compare with control file
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
def test99_OL3_shapes(self):
"""OL3 shapes"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'airports_shapes.qml')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
control_file = open(
get_test_data_path(
'control', 'ol3_shapes.js'), 'r')
control_output = control_file.read()
control_file.close()
# Export to web map
writer = OpenLayersWriter()
writer.params = self.defaultParams()
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[('ID', 'no label'), ('fk_region', 'no label'), ('ELEV', 'no label'), ('NAME', 'no label'),
('USE', 'no label')])]
writer.json = [False]
writer.getFeatureInfo = [False]
result = writer.write(self.iface, tempFolder()).index_file
# Open the test file
test_style_file = open(
result.replace(
'file://', '').replace(
'index.html', 'styles/airports_0_style.js'))
test_style_output = test_style_file.read()
test_style_file.close()
test_output = test_style_output
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
def test100_Leaflet_line_pattern_fill(self):
"""Leaflet line pattern fill"""
layer_path = get_test_data_path('layer', 'lakes.shp')
style_path = get_test_data_path('style', 'lakes_linepatternfill.qml')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
control_file = open(
get_test_data_path(
'control', 'leaflet_linepatternfill.html'), 'r')
control_output = control_file.read()
control_file.close()
# Export to web map
writer = LeafletWriter()
writer.params = self.defaultParams()
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict(
[(u'cat', u'no label'), (u'NAMES', u'no label'), (u'AREA_MI', u'no label'),
(u'xlabel', u'no label'), (u'ylabel', u'no label'), (u'rotation', u'no label')])]
writer.json = [False]
result = writer.write(self.iface, tempFolder()).index_file
# Open the test file
test_file = open(result)
test_output = test_file.read()
test_file.close()
# Compare with control file
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
def test101_Leaflet_raster_crs(self):
"""Leaflet raster with original CRS"""
layer_path = get_test_data_path('layer', 'test.png')
# style_path = get_test_data_path('style', '25d.qml')
layer = load_layer(layer_path)
# layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
crs = QgsCoordinateReferenceSystem("EPSG:27700")
self.iface.mapCanvas().setDestinationCrs(crs)
# Export to web map
writer = LeafletWriter()
writer.params = self.defaultParams()
writer.params['Appearance']['Match project CRS'] = True
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [True]
writer.cluster = [False]
writer.popup = [OrderedDict()]
writer.json = [False]
result = writer.write(self.iface, tempFolder()).index_file
control_file = open(
get_test_data_path(
'control', 'leaflet_raster_crs.html'), 'r')
control_output = control_file.read()
control_file.close()
# Open the test file
test_file = open(result)
test_output = test_file.read()
test_file.close()
# Test for expected output
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
# test for exported raster file
assert os.path.exists(result.replace('index.html', 'data/test_0.png'))
def test102_Leaflet_interactive(self):
"""Leaflet interactive"""
layer_path = get_test_data_path('layer', 'airports.shp')
style_path = get_test_data_path('style', 'airports_single.qml')
layer = load_layer(layer_path)
layer.loadNamedStyle(style_path)
QgsProject.instance().addMapLayer(layer)
control_file = open(
get_test_data_path(
'control', 'leaflet_interactive.html'), 'r')
control_output = control_file.read()
control_file.close()
# Export to web map
writer = LeafletWriter()
writer.params = self.defaultParams()
writer.groups = {}
writer.layers = [layer]
writer.visible = [True]
writer.interactive = [False]
writer.cluster = [False]
writer.popup = [OrderedDict(
[(u'ID', u'no label'), (u'fk_region', u'no label'), (u'ELEV', u'no label'),
(u'NAME', u'no label'), (u'USE', u'no label')])
]
writer.json = [False]
result = writer.write(self.iface, tempFolder()).index_file
# Open the test file
test_file = open(result)
test_output = test_file.read()
test_file.close()
# Compare with control file
self.assertEqual(
test_output, control_output, diff(control_output, test_output))
def read_output(url, path):
""" Given a url for the index.html file of a preview or export and the
relative path to an output file open the file and return it's contents as a
string """
abs_path = url.replace('file://', '').replace('index.html', path)
with open(abs_path) as f:
return f.read()
def diff(control_output, test_output):
""" Produce a unified diff given two strings splitting on newline """
return '\n'.join(list(difflib.unified_diff(control_output.split('\n'), test_output.split('\n'), lineterm='')))
if __name__ == "__main__":
suite = unittest.TestSuite()
suite.addTests(unittest.makeSuite(qgis2web_WriterTest))
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
|
tomchadwin/qgis2web
|
qgis2web/test/test_qgis2web_writers.py
|
Python
|
gpl-2.0
| 124,843
|
#!/usr/bin/python
# -*- coding: iso-8859-15 -*-
#
# module_dumper.py - WIDS/WIPS framework file dumper module
# Copyright (C) 2009 Peter Krebs, Herbert Haas
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by the
# Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, see http://www.gnu.org/licenses/gpl-2.0.html
"""Dumper module
Test module which outputs any input values in a file.
"""
# Imports
#
# Custom modules
import fw_modules.module_template
from fw_modules.module_exceptions import *
# Standard modules
import time
# Third-party modules
class DumperClass(fw_modules.module_template.ModuleClass):
"""DumperClass
Receives messages and dumps them into file.
"""
def __init__(self, controller_reference, parameter_dictionary, module_logger):
"""Constructor
"""
fw_modules.module_template.ModuleClass.__init__(self, controller=controller_reference, param_dict=parameter_dictionary, logger=module_logger)
# Default values.
try:
self.dumpfile_path = self.param_dict['dumpfile']
except KeyError:
raise FwModuleSetupError, self.module_identifier + ": ERROR: No dumpfile specified"
self.module_logger.error("No dumpfile specified")
return None
# Helper values.
self.DUMPFILE = None
def after_run(self):
"""after_run()
Closes dumpfile.
"""
try:
self.DUMPFILE.close()
except IOError:
self.module_logger.warning("Couldn't close dumpfile properly")
def before_run(self):
"""before_run()
Opens dumpfile.
"""
try:
self.DUMPFILE = open(self.dumpfile_path, "w")
except IOError:
self.module_logger.error("Couldn't open file " + str(self.dumpfile_path))
return False
else:
return True
def dump_to_file(self, data):
"""dump_to_file()
Dumps input to file.
"""
self.module_logger.debug("Dumped data: " + str(data))
try:
self.DUMPFILE.write(data + "\n")
self.DUMPFILE.flush()
except IOError as err:
self.module_logger.warning("Couldn't dump to file; details: " + err.__str__())
def process(self, input):
"""process()
Main action.
"""
self.module_logger.debug("Raw input: " + str(input))
self.dump_to_file(input)
def main(controller_reference, parameter_dictionary, module_logger):
dumper_class = DumperClass(controller_reference, parameter_dictionary, module_logger)
return dumper_class
if __name__ == "__main__":
print "Warning: This module is not intended to be executed directly. Only do this for test purposes."
|
pkrebs/WIDPS
|
fw_modules/module_dumper.py
|
Python
|
gpl-2.0
| 3,313
|
#!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: 2016 IBM
# Author: Prudhvi Miryala<mprudhvi@linux.vnet.ibm.com>
#
# test multicasting
# to test we need to enable multicast option on host
# then ping from peer to multicast group
import netifaces
from avocado import Test
from avocado.utils.software_manager import SoftwareManager
from avocado.utils.ssh import Session
from avocado.utils import process
from avocado.utils import distro
from avocado.utils.network.interfaces import NetworkInterface
from avocado.utils.network.hosts import LocalHost
class ReceiveMulticastTest(Test):
'''
check multicast receive
using ping tool
'''
def setUp(self):
'''
To check and install dependencies for the test
'''
self.peer = self.params.get("peer_ip", default="")
self.user = self.params.get("user_name", default="root")
self.peer_password = self.params.get("peer_password",
'*', default="None")
interfaces = netifaces.interfaces()
self.iface = self.params.get("interface", default="")
if self.iface not in interfaces:
self.cancel("%s interface is not available" % self.iface)
self.ipaddr = self.params.get("host_ip", default="")
self.netmask = self.params.get("netmask", default="")
local = LocalHost()
self.networkinterface = NetworkInterface(self.iface, local)
try:
self.networkinterface.add_ipaddr(self.ipaddr, self.netmask)
self.networkinterface.save(self.ipaddr, self.netmask)
except Exception:
self.networkinterface.save(self.ipaddr, self.netmask)
self.networkinterface.bring_up()
self.session = Session(self.peer, user=self.user,
password=self.peer_password)
if not self.session.connect():
self.cancel("failed connecting to peer")
self.count = self.params.get("count", default="500000")
smm = SoftwareManager()
pkgs = ["net-tools"]
detected_distro = distro.detect()
if detected_distro.name == "Ubuntu":
pkgs.extend(["openssh-client", "iputils-ping"])
elif detected_distro.name == "SuSE":
pkgs.extend(["openssh", "iputils"])
else:
pkgs.extend(["openssh-clients", "iputils"])
for pkg in pkgs:
if not smm.check_installed(pkg) and not smm.install(pkg):
self.cancel("%s package is need to test" % pkg)
if self.peer == "":
self.cancel("peer ip should specify in input")
cmd = "ip addr show | grep %s" % self.peer
output = self.session.cmd(cmd)
result = ""
result = result.join(output.stdout.decode("utf-8"))
self.peerif = result.split()[-1]
if self.peerif == "":
self.cancel("unable to get peer interface")
cmd = "ip -f inet -o addr show %s | awk '{print $4}' | cut -d / -f1"\
% self.iface
self.local_ip = process.system_output(cmd, shell=True).strip()
if self.local_ip == "":
self.cancel("unable to get local ip")
def test_multicast(self):
'''
ping to peer machine
'''
cmd = "echo 0 > /proc/sys/net/ipv4/icmp_echo_ignore_broadcasts"
if process.system(cmd, shell=True, verbose=True,
ignore_status=True) != 0:
self.fail("unable to set value to icmp_echo_ignore_broadcasts")
cmd = "ip link set %s allmulticast on" % self.iface
if process.system(cmd, shell=True, verbose=True,
ignore_status=True) != 0:
self.fail("unable to set all mulicast option to test interface")
cmd = "ip route add 224.0.0.0/4 dev %s" % self.peerif
output = self.session.cmd(cmd)
if not output.exit_status == 0:
self.fail("Unable to add route for Peer interafce")
cmd = "timeout 600 ping -I %s 224.0.0.1 -c %s -f" % (self.peerif,
self.count)
output = self.session.cmd(cmd)
if not output.exit_status == 0:
self.fail("multicast test failed")
def tearDown(self):
'''
delete multicast route and turn off multicast option
'''
cmd = "ip route del 224.0.0.0/4"
output = self.session.cmd(cmd)
if not output.exit_status == 0:
self.log.info("Unable to delete multicast route added for peer")
cmd = "echo 1 > /proc/sys/net/ipv4/icmp_echo_ignore_broadcasts"
if process.system(cmd, shell=True, verbose=True,
ignore_status=True) != 0:
self.log.info("unable to unset all mulicast option")
cmd = "ip link set %s allmulticast off" % self.iface
if process.system(cmd, shell=True, verbose=True,
ignore_status=True) != 0:
self.log.info("unable to unset all mulicast option")
self.networkinterface.remove_ipaddr(self.ipaddr, self.netmask)
try:
self.networkinterface.restore_from_backup()
except Exception:
self.log.info("backup file not availbale, could not restore file.")
self.session.quit()
|
narasimhan-v/avocado-misc-tests-1
|
io/net/multicast.py
|
Python
|
gpl-2.0
| 5,757
|
import subprocess
import smtplib
import socket
from email.mime.text import MIMEText
import datetime
# Change to your own account information
to = 'rk.ryan.king@gmail.com'
gmail_user = 'rk.ryan.king@gmail.com'
gmail_password = 'nzwaahcmdzjchxsz'
smtpserver = smtplib.SMTP('smtp.gmail.com', 587)
smtpserver.ehlo()
smtpserver.starttls()
smtpserver.ehlo
smtpserver.login(gmail_user, gmail_password)
today = datetime.date.today()
# Very Linux Specific
arg='ip route list'
p=subprocess.Popen(arg,shell=True,stdout=subprocess.PIPE)
data = p.communicate()
split_data = data[0].split()
ipaddr = split_data[split_data.index('src')+1]
my_ip = 'Your ip is %s' % ipaddr
msg = MIMEText(my_ip)
msg['Subject'] = 'IP For RaspberryPi on %s' % today.strftime('%b %d %Y')
msg['From'] = gmail_user
msg['To'] = to
smtpserver.sendmail(gmail_user, [to], msg.as_string())
smtpserver.quit()
|
rkryan/seniordesign
|
pi/utils/startup_ip.py
|
Python
|
gpl-2.0
| 865
|
from django.contrib import admin
from Blogs.models import(Post, Comment)
admin.site.register(Post)
admin.site.register(Comment)
|
aliunsal/blog
|
Blogs/admin.py
|
Python
|
gpl-2.0
| 128
|
#===============================================================================
# Copyright 2012 NetApp, Inc. All Rights Reserved,
# contribution by Jorge Mora <mora@netapp.com>
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#===============================================================================
"""
ETHERNET module
Decode ethernet layer (RFC 894) Ethernet II.
"""
import nfstest_config as c
from baseobj import BaseObj
from macaddr import MacAddr
from packet.internet.ipv4 import IPv4
from packet.internet.ipv6 import IPv6
from packet.internet.arp import ARP,RARP
# Module constants
__author__ = "Jorge Mora (%s)" % c.NFSTEST_AUTHOR_EMAIL
__copyright__ = "Copyright (C) 2012 NetApp, Inc."
__license__ = "GPL v2"
__version__ = "1.1"
_ETHERNET_map = {
0x0800: 'IPv4',
0x86dd: 'IPv6',
0x0806: 'ARP',
0x8035: 'RARP',
}
class ETHERNET(BaseObj):
"""Ethernet object
Usage:
from packet.link.ethernet import ETHERNET
x = ETHERNET(pktt)
Object definition:
ETHERNET(
dst = MacAddr(), # destination MAC address
src = MacAddr(), # source MAC address
type = int, # payload type
data = string, # raw data of payload if type is not supported
)
"""
# Class attributes
_attrlist = ("dst", "src", "type", "data")
def __init__(self, pktt):
"""Constructor
Initialize object's private data.
pktt:
Packet trace object (packet.pktt.Pktt) so this layer has
access to the parent layers.
"""
unpack = pktt.unpack
ulist = unpack.unpack(14, "!6s6sH")
self.dst = MacAddr(ulist[0].encode('hex'))
self.src = MacAddr(ulist[1].encode('hex'))
self.type = ulist[2]
pktt.pkt.ethernet = self
if self.type == 0x0800:
# Decode IPv4 packet
IPv4(pktt)
elif self.type == 0x86dd:
# Decode IPv6 packet
IPv6(pktt)
elif self.type == 0x0806:
# Decode ARP packet
ARP(pktt)
elif self.type == 0x8035:
# Decode RARP packet
RARP(pktt)
else:
self.data = unpack.getbytes()
def __str__(self):
"""String representation of object
The representation depends on the verbose level set by debug_repr().
If set to 0 the generic object representation is returned.
If set to 1 the representation of the object is condensed:
'00:0c:29:54:09:ef -> 60:33:4b:29:6e:9d '
If set to 2 the representation of the object also includes the type
of payload:
'00:0c:29:54:09:ef -> 60:33:4b:29:6e:9d, type: 0x800(IPv4)'
"""
rdebug = self.debug_repr()
if rdebug == 1:
out = "%s -> %s " % (self.src, self.dst)
elif rdebug == 2:
etype = _ETHERNET_map.get(self.type, None)
etype = hex(self.type) if etype is None else "%s(%s)" % (hex(self.type), etype)
out = "%s -> %s, type: %s" % (self.src, self.dst, etype)
else:
out = BaseObj.__str__(self)
return out
|
rasky/nfstest
|
packet/link/ethernet.py
|
Python
|
gpl-2.0
| 3,670
|
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""
bkr job-cancel: Cancel running Beaker jobs
==========================================
.. program:: bkr job-cancel
Synopsis
--------
:program:`bkr job-cancel` [--msg <message>] [*options*] <taskspec>...
Description
-----------
Specify one or more <taskspec> arguments to be cancelled.
The <taskspec> arguments follow the same format as in other :program:`bkr`
subcommands (for example, ``J:1234``). See :ref:`Specifying tasks <taskspec>`
in :manpage:`bkr(1)`.
Only jobs and recipe sets may be cancelled. It does not make sense to cancel
individual recipes within a recipe set, or tasks within a recipe, so Beaker
does not permit this.
Options
-------
.. option:: --msg <message>
Optionally you can provide a message describing the reason for the
cancellation. This message will be recorded against all outstanding tasks in
the cancelled recipe set, and will be visible in the Beaker web UI.
Common :program:`bkr` options are described in the :ref:`Options
<common-options>` section of :manpage:`bkr(1)`.
Exit status
-----------
Non-zero on error, otherwise zero.
Examples
--------
Cancel job 1234 with a helpful message::
bkr job-cancel --msg "Selected wrong distro, resubmitting job" J:1234
See also
--------
:manpage:`bkr(1)`
"""
from __future__ import print_function
from bkr.client import BeakerCommand
class Job_Cancel(BeakerCommand):
"""
Cancel Jobs/Recipes
"""
enabled = True
def options(self):
self.parser.add_option(
"--msg",
default=None,
help="Optional message to record as to why you cancelled",
)
self.parser.usage = "%%prog %s [options] [J:<id> | RS:<id> ...]" % self.normalized_name
def run(self, *args, **kwargs):
if len(args) < 1:
self.parser.error('Please specify a taskspec to cancel')
self.check_taskspec_args(args, permitted_types=['J', 'RS', 'T'])
msg = kwargs.pop("msg", None)
self.set_hub(**kwargs)
for task in args:
self.hub.taskactions.stop(task, 'cancel', msg)
print('Cancelled %s' % task)
|
beaker-project/beaker
|
Client/src/bkr/client/commands/cmd_job_cancel.py
|
Python
|
gpl-2.0
| 2,402
|
import pytest
from spinner import transforms
from spinner import coordinates
from spinner import cabinet
from example_cabinet_params import exact
def test_hex_to_cartesian():
h = coordinates.Hexagonal
c = coordinates.Cartesian2D
# Test single element cases
o0 = "o0"
o1 = "o1"
o2 = "o2"
assert transforms.hex_to_cartesian(
[(o0, h(0,0,0)), (o1, h(0,1,0)), (o2, h(1,1,0))]) == \
[(o0, c(0,0)), (o1, c(0,2)), (o2, c(1,1))]
def test_hex_to_skew_cartesian():
h = coordinates.Hexagonal
c = coordinates.Cartesian2D
# Test single element cases
o0 = "o0"
o1 = "o1"
o2 = "o2"
assert transforms.hex_to_skewed_cartesian(
[(o0, h(0,0,0)), (o1, h(0,1,0)), (o2, h(1,1,0))]) == \
[(o0, c(0,0)), (o1, c(1,2)), (o2, c(2,1))]
def test_rhombus_to_rect():
c2 = coordinates.Cartesian2D
c3 = coordinates.Cartesian3D
o0 = "o0"
o1 = "o1"
o2 = "o2"
assert transforms.rhombus_to_rect([]) == []
assert transforms.rhombus_to_rect(
[(o0, c2(-1,0)), (o1, c2(0,0)), (o2, c2(1,1))]) == \
[(o0, c2(1,0)), (o1, c2(0,0)), (o2, c2(1,1))]
assert transforms.rhombus_to_rect(
[(o0, c3(-1,-1,-1)), (o1, c3(0,1,1)), (o2, c3(1,1,0))]) == \
[(o0, c3(1,1,1)), (o1, c3(0,1,1)), (o2, c3(1,1,0))]
def test_compress():
c = coordinates.Cartesian2D
o0 = "o0"
o1 = "o1"
o2 = "o2"
o3 = "o3"
o4 = "o4"
o5 = "o5"
assert transforms.compress([(o0, c(0,0)), (o1, c(1,1)), (o2, c(2,0)),
(o3, c(0,2)), (o4, c(1,3)), (o5, c(2,2))]) == \
[(o0, c(0,0)), (o1, c(1,0)), (o2, c(2,0)),
(o3, c(0,1)), (o4, c(1,1)), (o5, c(2,1))]
def test_flip_axes():
c = coordinates.Cartesian2D
o0 = "o0"
o1 = "o1"
assert transforms.flip_axes([(o0, c(1,2)), (o1, c(3,4))]) == \
[(o0, c(2,1)), (o1, c(4,3))]
def test_folds():
c = coordinates.Cartesian2D
o0 = "o0"
o1 = "o1"
o2 = "o2"
o3 = "o3"
assert transforms.fold([], (1,1)) == []
# No folding
assert transforms.fold(
[(o0, c(0,0)), (o1, c(1,0)), (o2, c(2,0)), (o3, c(3,0))], (1,1)) == \
[(o0, c(0,0)), (o1, c(1,0)), (o2, c(2,0)), (o3, c(3,0))]
# Fold on X
assert transforms.fold(
[(o0, c(0,0)), (o1, c(1,0)), (o2, c(2,0)), (o3, c(3,0))], (2,1)) == \
[(o0, c(0,0)), (o1, c(2,0)), (o2, c(3,0)), (o3, c(1,0))]
# Fold on Y
assert transforms.fold(
[(o0, c(0,0)), (o1, c(0,1)), (o2, c(0,2)), (o3, c(0,3))], (1,2)) == \
[(o0, c(0,0)), (o1, c(0,2)), (o2, c(0,3)), (o3, c(0,1))]
def test_cabinetise():
c = coordinates.Cartesian2D
s = coordinates.Cabinet
o0 = "o0"
o1 = "o1"
o2 = "o2"
o3 = "o3"
assert transforms.cabinetise([], num_cabinets=0, frames_per_cabinet=0) == []
assert transforms.cabinetise(
[(o0, c(0,0)), (o1, c(1,0)), (o2, c(0,1)), (o3, c(1,1))],
num_cabinets=2, frames_per_cabinet=2, boards_per_frame=1) == \
[(o0, s(0,0,0)), (o1, s(1,0,0)), (o2, s(0,1,0)), (o3, s(1,1,0))]
def test_remove_gaps():
c = coordinates.Cabinet
o0 = "o0"
o1 = "o1"
o2 = "o2"
# Empty case
assert transforms.remove_gaps([]) == []
# Singletons (with and without need to move)
assert transforms.remove_gaps([(o0, c(0,0,0))]) == [(o0, c(0,0,0))]
assert transforms.remove_gaps([(o0, c(1,2,0))]) == [(o0, c(1,2,0))]
assert transforms.remove_gaps([(o0, c(1,2,3))]) == [(o0, c(1,2,0))]
# With and without gaps
assert set(transforms.remove_gaps(
[(o0, c(0,0,0)), (o1, c(0,0,1))])) ==\
set([(o0, c(0,0,0)), (o1, c(0,0,1))])
assert set(transforms.remove_gaps(
[(o0, c(0,0,0)), (o1, c(0,0,2))])) ==\
set([(o0, c(0,0,0)), (o1, c(0,0,1))])
assert set(transforms.remove_gaps(
[(o0, c(0,0,5)), (o1, c(0,0,2))])) ==\
set([(o0, c(0,0,1)), (o1, c(0,0,0))])
# Independent frames with restructuring needs
assert set(transforms.remove_gaps(
[(o0, c(1,0,5)), (o1, c(0,1,2))])) ==\
set([(o0, c(1,0,0)), (o1, c(0,1,0))])
assert set(transforms.remove_gaps(
[(o0, c(0,0,0)), (o1, c(0,0,3)), (o2, c(1,0,3))])) ==\
set([(o0, c(0,0,0)), (o1, c(0,0,1)), (o2, c(1,0,0))])
def test_cabinet_to_physical():
c = cabinet.Cabinet(**exact)
o0 = "o0"
o1 = "o1"
o2 = "o2"
o3 = "o3"
boards = transforms.cabinet_to_physical([(o0, coordinates.Cabinet(0, 0, 0)),
(o1, coordinates.Cabinet(0, 0, 1)),
(o2, coordinates.Cabinet(0, 1, 1)),
(o3, coordinates.Cabinet(1, 1, 1)),
], c)
b2c = dict(boards)
# Make sure all boards make it through
assert len(boards) == len(b2c)
assert set([o0, o1, o2, o3]) == set(b2c)
# Check all board positions
assert b2c[o0] == (42.0, 2.0, 2.0)
assert b2c[o1] == (40.5, 2.0, 2.0)
assert b2c[o2] == (40.5, 5.0, 2.0)
assert b2c[o3] == (14.0, 5.0, 2.0)
|
SpiNNakerManchester/SpiNNer
|
tests/test_transforms.py
|
Python
|
gpl-2.0
| 4,699
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-22 21:06
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('statusboard', '0015_merge_20170222_2058'),
]
operations = [
migrations.AddField(
model_name='service',
name='position',
field=models.PositiveIntegerField(default=0),
),
]
|
edigiacomo/django-statusboard
|
statusboard/migrations/0016_service_position.py
|
Python
|
gpl-2.0
| 464
|
import json
import sys
import urllib
keyword = sys.argv[1]
def getTimes(query,num):
"Questa funzione fa una ricerca su NY Times"
url = "http://query.nytimes.com/svc/cse/v2/sitesearch.json?query="+query.replace(" ","%20")+"&pt=article&page="+str(num)
jtext = urllib.urlopen(url)
return jtext
def search(term):
page_number = 0
meta = 1
while meta > 0 and page_number<1:
gt = getTimes(term,page_number)
resp = json.load(gt)
meta = int(resp['results']['meta']['payload'])
for res in resp['results']['results']:
print res['snippet']
headline = res['hdl']
# snippet = res['snippet']
# author = res['cre']
url = res['url']
print headline.encode('utf-8')
print url
page_number+=1
search(keyword)
|
exedre/webscraping-course-2014
|
esempi/esempio0_json.py
|
Python
|
gpl-2.0
| 845
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import time
## PhysicalParameters
Density=2400
frictionAngle=radians(35)
tc = 0.001
en = 0.3
es = 0.3
## Import wall's geometry
params=utils.getViscoelasticFromSpheresInteraction(tc,en,es)
facetMat=O.materials.append(ViscElMat(frictionAngle=frictionAngle,**params)) # **params sets kn, cn, ks, cs
sphereMat=O.materials.append(ViscElMat(density=Density,frictionAngle=frictionAngle,**params))
from woo import ymport
fctIds=O.bodies.append(ymport.stl('baraban.stl',color=(1,0,0),material=facetMat))
## Spheres
sphereRadius = 0.2
nbSpheres = (10,10,10)
#nbSpheres = (50,50,50)
for i in range(nbSpheres[0]):
for j in range(nbSpheres[1]):
for k in range(nbSpheres[2]):
x = (i*2 - nbSpheres[0])*sphereRadius*1.1
y = (j*2 - nbSpheres[1])*sphereRadius*1.1
z = (k*2 - nbSpheres[2])*sphereRadius*1.1
s=utils.sphere([x,y,z],sphereRadius,material=sphereMat)
O.bodies.append(s)
## Timestep
O.dt=.2*tc
## Engines
O.engines=[
## Resets forces and momenta the act on bodies
ForceResetter(),
## Using bounding boxes find possible body collisions.
InsertionSortCollider([Bo1_Sphere_Aabb(),Bo1_Facet_Aabb()]),
## Interactions
InteractionLoop(
## Create geometry information about each potential collision.
[Ig2_Sphere_Sphere_ScGeom(), Ig2_Facet_Sphere_ScGeom()],
## Create physical information about the interaction.
[Ip2_ViscElMat_ViscElMat_ViscElPhys()],
## Constitutive law
[Law2_ScGeom_ViscElPhys_Basic()],
),
## Apply gravity
GravityEngine(gravity=[0,-9.81,0]),
## Cundall damping must been disabled!
NewtonIntegrator(damping=0),
## Saving results
#VTKRecorder(virtPeriod=0.04,fileName='/tmp/stlimp-',recorders=['spheres','facets']),
## Apply kinematics to walls
RotationEngine(ids=fctIds,rotationAxis=[0,0,1],rotateAroundZero=True,angularVelocity=0.5)
]
from woo import qt
qt.View()
#O.saveTmp()
#O.run()
|
woodem/woo
|
obsolete/examples/baraban/baraban.py
|
Python
|
gpl-2.0
| 2,018
|
#!/usr/bin/python
import os
import re
import sys
import json
import urllib
import socket
import subprocess
import cgi, cgitb
from os import listdir
from os.path import isfile, join
#http://178.62.51.54:13930/event=CREATE&login_name=henrik&pathway_name=test_commit.pml
def peos_notify(patient_id):
EXECUTION_PATH = "../peos/os/kernel/"
#Error constants
ERROR_USER_NOT_EXIST = 1
ERROR_SCRIPT_FAIL = 2
os.chdir(os.path.dirname(os.path.realpath(__file__)))
os.chdir(EXECUTION_PATH)
process = subprocess.Popen(["./peos", "-l", str(patient_id), "-u" ], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = process.communicate()
return output, error
|
thysol/CS4098
|
backend/peos_notify.py
|
Python
|
gpl-2.0
| 701
|