repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
Gagaro/django | refs/heads/master | django/contrib/postgres/aggregates/statistics.py | 493 | from django.db.models import FloatField, IntegerField
from django.db.models.aggregates import Aggregate
__all__ = [
'CovarPop', 'Corr', 'RegrAvgX', 'RegrAvgY', 'RegrCount', 'RegrIntercept',
'RegrR2', 'RegrSlope', 'RegrSXX', 'RegrSXY', 'RegrSYY', 'StatAggregate',
]
class StatAggregate(Aggregate):
def __init__(self, y, x, output_field=FloatField()):
if not x or not y:
raise ValueError('Both y and x must be provided.')
super(StatAggregate, self).__init__(y=y, x=x, output_field=output_field)
self.x = x
self.y = y
self.source_expressions = self._parse_expressions(self.y, self.x)
def get_source_expressions(self):
return self.y, self.x
def set_source_expressions(self, exprs):
self.y, self.x = exprs
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
return super(Aggregate, self).resolve_expression(query, allow_joins, reuse, summarize)
class Corr(StatAggregate):
function = 'CORR'
class CovarPop(StatAggregate):
def __init__(self, y, x, sample=False):
self.function = 'COVAR_SAMP' if sample else 'COVAR_POP'
super(CovarPop, self).__init__(y, x)
class RegrAvgX(StatAggregate):
function = 'REGR_AVGX'
class RegrAvgY(StatAggregate):
function = 'REGR_AVGY'
class RegrCount(StatAggregate):
function = 'REGR_COUNT'
def __init__(self, y, x):
super(RegrCount, self).__init__(y=y, x=x, output_field=IntegerField())
def convert_value(self, value, expression, connection, context):
if value is None:
return 0
return int(value)
class RegrIntercept(StatAggregate):
function = 'REGR_INTERCEPT'
class RegrR2(StatAggregate):
function = 'REGR_R2'
class RegrSlope(StatAggregate):
function = 'REGR_SLOPE'
class RegrSXX(StatAggregate):
function = 'REGR_SXX'
class RegrSXY(StatAggregate):
function = 'REGR_SXY'
class RegrSYY(StatAggregate):
function = 'REGR_SYY'
|
oleg-cherednik/hackerrank | refs/heads/master | Python/Basic Data Types/Nested Lists/solution.py | 1 | #!/bin/python3
if __name__ == '__main__':
data = []
for _ in range(int(input())):
name = input()
score = float(input())
data.append([name, score])
mark = sorted(list(set([marks for name, marks in data])))[1]
data = sorted(data)
for item in data:
if item[1] == mark:
print(item[0])
|
eJRF/ejrf | refs/heads/master | questionnaire/features/pages/sections.py | 1 | from questionnaire.features.pages.base import PageObject
class CreateSectionPage(PageObject):
def __init__(self, browser, questionnaire):
super(CreateSectionPage, self).__init__(browser)
self.url = '/questionnaire/entry/%s/section/new/' % questionnaire.id
def verify_current_position_of_section(self, position):
if self.browser.driver.name == 'internet explorer':
self.assert_page_html_contains(
"<option selected=\"selected\" value=\"%s\">%s</option>" % (position, position))
else:
self.assert_page_html_contains(
"<option value=\"%s\" selected=\"selected\">%s</option>" % (position, position))
class CreateSubSectionPage(PageObject):
def __init__(self, browser, questionnaire, section):
super(CreateSubSectionPage, self).__init__(browser)
self.url = '/questionnaire/entry/%s/section/%s/subsection/new/' % (questionnaire.id, section.id) |
aequitas/home-assistant | refs/heads/dev | homeassistant/components/picotts/tts.py | 7 | """Support for the Pico TTS speech service."""
import logging
import os
import shutil
import subprocess
import tempfile
import voluptuous as vol
from homeassistant.components.tts import CONF_LANG, PLATFORM_SCHEMA, Provider
_LOGGER = logging.getLogger(__name__)
SUPPORT_LANGUAGES = ['en-US', 'en-GB', 'de-DE', 'es-ES', 'fr-FR', 'it-IT']
DEFAULT_LANG = 'en-US'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_LANG, default=DEFAULT_LANG): vol.In(SUPPORT_LANGUAGES),
})
def get_engine(hass, config):
"""Set up Pico speech component."""
if shutil.which("pico2wave") is None:
_LOGGER.error("'pico2wave' was not found")
return False
return PicoProvider(config[CONF_LANG])
class PicoProvider(Provider):
"""The Pico TTS API provider."""
def __init__(self, lang):
"""Initialize Pico TTS provider."""
self._lang = lang
self.name = 'PicoTTS'
@property
def default_language(self):
"""Return the default language."""
return self._lang
@property
def supported_languages(self):
"""Return list of supported languages."""
return SUPPORT_LANGUAGES
def get_tts_audio(self, message, language, options=None):
"""Load TTS using pico2wave."""
with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as tmpf:
fname = tmpf.name
cmd = ['pico2wave', '--wave', fname, '-l', language, message]
subprocess.call(cmd)
data = None
try:
with open(fname, 'rb') as voice:
data = voice.read()
except OSError:
_LOGGER.error("Error trying to read %s", fname)
return (None, None)
finally:
os.remove(fname)
if data:
return ("wav", data)
return (None, None)
|
faircoin/faircoin | refs/heads/master | share/rpcuser/rpcuser.py | 115 | #!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import hashlib
import sys
import os
from random import SystemRandom
import base64
import hmac
if len(sys.argv) < 2:
sys.stderr.write('Please include username as an argument.\n')
sys.exit(0)
username = sys.argv[1]
#This uses os.urandom() underneath
cryptogen = SystemRandom()
#Create 16 byte hex salt
salt_sequence = [cryptogen.randrange(256) for i in range(16)]
hexseq = list(map(hex, salt_sequence))
salt = "".join([x[2:] for x in hexseq])
#Create 32 byte b64 password
password = base64.urlsafe_b64encode(os.urandom(32))
digestmod = hashlib.sha256
if sys.version_info.major >= 3:
password = password.decode('utf-8')
digestmod = 'SHA256'
m = hmac.new(bytearray(salt, 'utf-8'), bytearray(password, 'utf-8'), digestmod)
result = m.hexdigest()
print("String to be appended to bitcoin.conf:")
print("rpcauth="+username+":"+salt+"$"+result)
print("Your password:\n"+password)
|
sbtlaarzc/vispy | refs/heads/master | vispy/visuals/image.py | 3 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division
import numpy as np
from ..gloo import Texture2D, VertexBuffer
from ..color import get_colormap
from .shaders import Function, FunctionChain
from .transforms import NullTransform
from .visual import Visual
from ..ext.six import string_types
VERT_SHADER = """
uniform int method; // 0=subdivide, 1=impostor
attribute vec2 a_position;
attribute vec2 a_texcoord;
varying vec2 v_texcoord;
void main() {
v_texcoord = a_texcoord;
gl_Position = $transform(vec4(a_position, 0., 1.));
}
"""
FRAG_SHADER = """
uniform vec2 image_size;
uniform int method; // 0=subdivide, 1=impostor
uniform sampler2D u_texture;
varying vec2 v_texcoord;
vec4 map_local_to_tex(vec4 x) {
// Cast ray from 3D viewport to surface of image
// (if $transform does not affect z values, then this
// can be optimized as simply $transform.map(x) )
vec4 p1 = $transform(x);
vec4 p2 = $transform(x + vec4(0, 0, 0.5, 0));
p1 /= p1.w;
p2 /= p2.w;
vec4 d = p2 - p1;
float f = p2.z / d.z;
vec4 p3 = p2 - d * f;
// finally map local to texture coords
return vec4(p3.xy / image_size, 0, 1);
}
void main()
{
vec2 texcoord;
if( method == 0 ) {
texcoord = v_texcoord;
}
else {
// vertex shader ouptuts clip coordinates;
// fragment shader maps to texture coordinates
texcoord = map_local_to_tex(vec4(v_texcoord, 0, 1)).xy;
}
gl_FragColor = $color_transform($get_data(texcoord));
}
""" # noqa
_null_color_transform = 'vec4 pass(vec4 color) { return color; }'
_c2l = 'float cmap(vec4 color) { return (color.r + color.g + color.b) / 3.; }'
_texture_lookup = """
vec4 texture_lookup(vec2 texcoord) {
if(texcoord.x < 0.0 || texcoord.x > 1.0 ||
texcoord.y < 0.0 || texcoord.y > 1.0) {
discard;
}
return texture2D($texture, texcoord);
}"""
class ImageVisual(Visual):
"""Visual subclass displaying an image.
Parameters
----------
data : ndarray
ImageVisual data. Can be shape (M, N), (M, N, 3), or (M, N, 4).
method : str
Selects method of rendering image in case of non-linear transforms.
Each method produces similar results, but may trade efficiency
and accuracy. If the transform is linear, this parameter is ignored
and a single quad is drawn around the area of the image.
* 'auto': Automatically select 'impostor' if the image is drawn
with a nonlinear transform; otherwise select 'subdivide'.
* 'subdivide': ImageVisual is represented as a grid of triangles
with texture coordinates linearly mapped.
* 'impostor': ImageVisual is represented as a quad covering the
entire view, with texture coordinates determined by the
transform. This produces the best transformation results, but may
be slow.
grid: tuple (rows, cols)
If method='subdivide', this tuple determines the number of rows and
columns in the image grid.
cmap : str | ColorMap
Colormap to use for luminance images.
clim : str | tuple
Limits to use for the colormap. Can be 'auto' to auto-set bounds to
the min and max of the data.
**kwargs : dict
Keyword arguments to pass to `Visual`.
Notes
-----
The colormap functionality through ``cmap`` and ``clim`` are only used
if the data are 2D.
"""
def __init__(self, data=None, method='auto', grid=(1, 1),
cmap='cubehelix', clim='auto', **kwargs):
self._data = None
self._interpolation = 'nearest'
self._method = method
self._grid = grid
self._need_texture_upload = True
self._need_vertex_update = True
self._need_colortransform_update = True
self._texture = Texture2D(np.zeros((1, 1, 4)),
interpolation=self._interpolation)
self._subdiv_position = VertexBuffer()
self._subdiv_texcoord = VertexBuffer()
# impostor quad covers entire viewport
vertices = np.array([[-1, -1], [1, -1], [1, 1],
[-1, -1], [1, 1], [-1, 1]],
dtype=np.float32)
self._impostor_coords = VertexBuffer(vertices)
self._null_tr = NullTransform()
self._init_view(self)
super(ImageVisual, self).__init__(vcode=VERT_SHADER, fcode=FRAG_SHADER)
self.set_gl_state('translucent', cull_face=False)
self._draw_mode = 'triangles'
# by default, this visual pulls data from a texture
self._data_lookup_fn = Function(_texture_lookup)
self.shared_program.frag['get_data'] = self._data_lookup_fn
self._data_lookup_fn['texture'] = self._texture
self.clim = clim
self.cmap = cmap
if data is not None:
self.set_data(data)
self.freeze()
def set_data(self, image):
"""Set the data
Parameters
----------
image : array-like
The image data.
"""
data = np.asarray(image)
if self._data is None or self._data.shape != data.shape:
self._need_vertex_update = True
self._data = data
self._need_texture_upload = True
def view(self):
v = Visual.view(self)
self._init_view(v)
return v
def _init_view(self, view):
# Store some extra variables per-view
view._need_method_update = True
view._method_used = None
@property
def clim(self):
return (self._clim if isinstance(self._clim, string_types) else
tuple(self._clim))
@clim.setter
def clim(self, clim):
if isinstance(clim, string_types):
if clim != 'auto':
raise ValueError('clim must be "auto" if a string')
else:
clim = np.array(clim, float)
if clim.shape != (2,):
raise ValueError('clim must have two elements')
self._clim = clim
self._need_vertex_update = True
self.update()
@property
def cmap(self):
return self._cmap
@cmap.setter
def cmap(self, cmap):
self._cmap = get_colormap(cmap)
self._need_colortransform_update = True
self.update()
@property
def method(self):
return self._method
@method.setter
def method(self, m):
if self._method != m:
self._method = m
self._need_vertex_update = True
self.update()
@property
def size(self):
return self._data.shape[:2][::-1]
def _build_vertex_data(self):
"""Rebuild the vertex buffers used for rendering the image when using
the subdivide method.
"""
grid = self._grid
w = 1.0 / grid[1]
h = 1.0 / grid[0]
quad = np.array([[0, 0, 0], [w, 0, 0], [w, h, 0],
[0, 0, 0], [w, h, 0], [0, h, 0]],
dtype=np.float32)
quads = np.empty((grid[1], grid[0], 6, 3), dtype=np.float32)
quads[:] = quad
mgrid = np.mgrid[0.:grid[1], 0.:grid[0]].transpose(1, 2, 0)
mgrid = mgrid[:, :, np.newaxis, :]
mgrid[..., 0] *= w
mgrid[..., 1] *= h
quads[..., :2] += mgrid
tex_coords = quads.reshape(grid[1]*grid[0]*6, 3)
tex_coords = np.ascontiguousarray(tex_coords[:, :2])
vertices = tex_coords * self.size
self._subdiv_position.set_data(vertices.astype('float32'))
self._subdiv_texcoord.set_data(tex_coords.astype('float32'))
def _update_method(self, view):
"""Decide which method to use for *view* and configure it accordingly.
"""
method = self._method
if method == 'auto':
if view.transforms.get_transform().Linear:
method = 'subdivide'
else:
method = 'impostor'
view._method_used = method
if method == 'subdivide':
view.view_program['method'] = 0
view.view_program['a_position'] = self._subdiv_position
view.view_program['a_texcoord'] = self._subdiv_texcoord
elif method == 'impostor':
view.view_program['method'] = 1
view.view_program['a_position'] = self._impostor_coords
view.view_program['a_texcoord'] = self._impostor_coords
else:
raise ValueError("Unknown image draw method '%s'" % method)
self.shared_program['image_size'] = self.size
view._need_method_update = False
self._prepare_transforms(view)
def _build_color_transform(self):
data = self._data
if data.ndim == 2 or data.shape[2] == 1:
fun = FunctionChain(None, [Function(_c2l),
Function(self._cmap.glsl_map)])
else:
fun = Function(_null_color_transform)
self.shared_program.frag['color_transform'] = fun
self._need_colortransform_update = False
def _build_texture(self):
data = self._data
if data.dtype == np.float64:
data = data.astype(np.float32)
if data.ndim == 2 or data.shape[2] == 1:
# deal with clim on CPU b/c of texture depth limits :(
# can eventually do this by simulating 32-bit float... maybe
clim = self._clim
if isinstance(clim, string_types) and clim == 'auto':
clim = np.min(data), np.max(data)
clim = np.asarray(clim, dtype=np.float32)
data = data - clim[0] # not inplace so we don't modify orig data
if clim[1] - clim[0] > 0:
data /= clim[1] - clim[0]
else:
data[:] = 1 if data[0, 0] != 0 else 0
self._clim = np.array(clim)
self._texture.set_data(data)
self._need_texture_upload = False
def _compute_bounds(self, axis, view):
if axis > 1:
return (0, 0)
else:
return (0, self.size[axis])
def _prepare_transforms(self, view):
trs = view.transforms
prg = view.view_program
method = view._method_used
if method == 'subdivide':
prg.vert['transform'] = trs.get_transform()
prg.frag['transform'] = self._null_tr
else:
prg.vert['transform'] = self._null_tr
prg.frag['transform'] = trs.get_transform().inverse
def _prepare_draw(self, view):
if self._data is None:
return False
if self._need_texture_upload:
self._build_texture()
if self._need_colortransform_update:
self._build_color_transform()
if self._need_vertex_update:
self._build_vertex_data()
if view._need_method_update:
self._update_method(view)
|
rhertzog/django | refs/heads/master | tests/defer/models.py | 282 | """
Tests for defer() and only().
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
class Secondary(models.Model):
first = models.CharField(max_length=50)
second = models.CharField(max_length=50)
@python_2_unicode_compatible
class Primary(models.Model):
name = models.CharField(max_length=50)
value = models.CharField(max_length=50)
related = models.ForeignKey(Secondary, models.CASCADE)
def __str__(self):
return self.name
class Child(Primary):
pass
class BigChild(Primary):
other = models.CharField(max_length=50)
class ChildProxy(Child):
class Meta:
proxy = True
class RefreshPrimaryProxy(Primary):
class Meta:
proxy = True
def refresh_from_db(self, using=None, fields=None, **kwargs):
# Reloads all deferred fields if any of the fields is deferred.
if fields is not None:
fields = set(fields)
deferred_fields = self.get_deferred_fields()
if fields.intersection(deferred_fields):
fields = fields.union(deferred_fields)
super(RefreshPrimaryProxy, self).refresh_from_db(using, fields, **kwargs)
|
rastrexando-eu/rastrexando-eu | refs/heads/master | web/tests/test_views.py | 1 | import datetime as dt
import pytest
from core import factories
@pytest.mark.django_db
def test_landing(client):
factories.RastrexoFactory(name='I Rastrexo de Couso', date=dt.date.today(), status='ACCEPTED')
response = client.get('/')
assert 200 == response.status_code
assert 'I Rastrexo de Couso' in response.rendered_content
assert '<script type="application/ld+json">[{"name":"I Rastrexo de Couso"' in response.rendered_content
@pytest.mark.django_db
def test_sitemap(client):
year = dt.datetime.now().year
r1 = factories.RastrexoFactory(name='Rastrexo Star Wars', date=dt.date(year - 1, 1, 1), status='ACCEPTED')
r2 = factories.RastrexoFactory(name='I Rastrexo de Couso', date=dt.date(year, 1, 1), status='ACCEPTED')
r3 = factories.RastrexoFactory(name='Rastrexo Rexeitado', date=dt.date(year, 1, 1), status='REJECTED')
r4 = factories.RastrexoFactory(name='Rastrexo Cancelado', date=dt.date(year, 1, 1), status='CANCELLED')
response = client.get('/sitemap.xml')
assert 200 == response.status_code
assert '/buscar/' in response.rendered_content
assert r1.slug in response.rendered_content
assert r2.slug in response.rendered_content
assert r3.slug not in response.rendered_content
assert r4.slug in response.rendered_content
@pytest.mark.django_db
def test_rastrexo_detail_view_404_with_invalid_status(client):
r = factories.RastrexoFactory(name='Rastrexo Rexeitado', date=dt.date(2017, 1, 1), status='REJECTED')
response = client.get(r.get_absolute_url())
assert 404 == response.status_code
@pytest.mark.django_db
def test_rastrexo_detail_view(client):
r = factories.RastrexoFactory(name='Rastrexo de Couso', date=dt.date(2017, 1, 1), status='ACCEPTED')
response = client.get(r.get_absolute_url())
assert 200 == response.status_code
assert r.name in response.rendered_content
@pytest.mark.django_db
def test_search_view(client):
today = dt.date.today()
factories.RastrexoFactory(name='Rastrexo de Couso', city='Gondomar', date=today)
factories.RastrexoFactory(name='Rastrexo Infantil', city='Gondomar', date=today)
factories.RastrexoFactory(name='Rastrexo Cristelos', city='Tomiño', date=today)
response = client.get(f'/buscar/?q=gondomar')
assert 200 == response.status_code
assert 'Rastrexo de Couso' in response.rendered_content
assert 'Rastrexo Infantil' in response.rendered_content
assert 'Rastrexo Cristelos' not in response.rendered_content
|
SGCreations/Flask | refs/heads/master | Work/Trivia - Module 5/env/Lib/site-packages/pip/_vendor/html5lib/filters/alphabeticalattributes.py | 1731 | from __future__ import absolute_import, division, unicode_literals
from . import _base
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
class Filter(_base.Filter):
def __iter__(self):
for token in _base.Filter.__iter__(self):
if token["type"] in ("StartTag", "EmptyTag"):
attrs = OrderedDict()
for name, value in sorted(token["data"].items(),
key=lambda x: x[0]):
attrs[name] = value
token["data"] = attrs
yield token
|
sih4sing5hong5/environment_education | refs/heads/master | 題庫/介面.py | 1 |
import json
from django.contrib.auth.decorators import login_required
from django.forms.models import model_to_dict
from django.http.response import JsonResponse
from django.shortcuts import render, redirect
from django.views.decorators.csrf import csrf_exempt
from 題庫.models import xls檔案表
from 題庫.models import 作答紀錄表
@login_required(login_url='/accounts/login')
def 練習(request):
網址 = '題庫/作答.html'
return render(request, 網址, {
'名': request.user.last_name + request.user.first_name,
'題目陣列': xls檔案表.上新的檔案().隨機揀題號()
})
@login_required(login_url='/accounts/login')
def 送出答案(request):
xls檔案 = xls檔案表.上新的檔案()
答對 = []
答錯 = []
for (題號, 選的答案) in _提出題號佮答案(request.POST):
if xls檔案.題號(題號).答案 == int(選的答案):
答對.append(題號)
else:
答錯.append(題號)
作答紀錄表.試驗結果(request.user, xls檔案, 答錯, 答對)
return redirect('看作答紀錄')
@login_required(login_url='/accounts/login')
def 看作答紀錄(request):
網址 = '題庫/作答結果.html'
return render(request, 網址, {
'名': request.user.last_name + request.user.first_name,
'作答狀況陣列': _管理員看著的作答狀況(request.user),
'作答紀錄陣列': 作答紀錄表.揣出作答紀錄(request.user),
})
@login_required(login_url='/accounts/login')
def 看解釋(request, 題號):
xls檔案 = xls檔案表.上新的檔案()
網址 = '題庫/解釋.html'
return render(request, 網址, {
'名': request.user.last_name + request.user.first_name,
'題目': xls檔案.題號(題號),
})
@login_required(login_url='/accounts/login')
def 搶答題目(request):
全部 = []
for 題目 in xls檔案表.上新的檔案().隨機揀題號():
全部.append(model_to_dict(題目))
return JsonResponse({'全部題目': 全部})
@csrf_exempt
@login_required(login_url='/accounts/login')
def 送出搶答(request):
xls檔案 = xls檔案表.上新的檔案()
答對=json.loads(request.POST['答對'])
答錯=json.loads(request.POST['答錯'])
作答紀錄表.試驗結果(request.user, xls檔案, 答錯, 答對)
return JsonResponse({'結果': '好'})
def _管理員看著的作答狀況(user):
if (user.email) in ('ecologist0721@yahoo.com.tw', 'ihcaoe@gmail.com'):
return 作答紀錄表.揣出全部作答狀況()
return []
def _提出題號佮答案(POST):
for 第幾個 in range(xls檔案表.揀題目數量):
try:
題號 = POST['id[{}]'.format(第幾個)]
except:
題號 = -1
try:
選的答案 = POST['ans[{}]'.format(第幾個)]
except:
選的答案 = -1
yield (題號, 選的答案)
|
nylas/sync-engine | refs/heads/master | migrations/versions/068_outlook.py | 9 | """outlook
Revision ID: 1ceff61ec112
Revises: 2e6120c97485
Create Date: 2014-07-22 10:17:33.115621
"""
# revision identifiers, used by Alembic.
revision = '1ceff61ec112'
down_revision = '322c2800c401'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table('outlookaccount',
sa.Column('id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['id'], [u'imapaccount.id'],
ondelete='CASCADE'),
sa.Column('refresh_token_id',
sa.Integer(), nullable=True),
sa.Column('scope', sa.String(length=512), nullable=True),
sa.Column('locale', sa.String(length=8), nullable=True),
sa.Column('client_id', sa.String(length=256),
nullable=True),
sa.Column('client_secret', sa.String(length=256),
nullable=True),
sa.Column('o_id', sa.String(length=32), nullable=True),
sa.Column('o_id_token', sa.String(length=1024),
nullable=True),
sa.Column('link', sa.String(length=256), nullable=True),
sa.Column('name', sa.String(length=256), nullable=True),
sa.Column('gender', sa.String(length=16), nullable=True),
sa.Column('family_name', sa.String(length=256),
nullable=True),
sa.Column('given_name', sa.String(length=256),
nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.alter_column('secret', 'secret', type_=sa.String(length=2048))
def downgrade():
op.drop_table('outlookaccount')
|
dan-blanchard/thriftpy | refs/heads/master | thriftpy/rpc.py | 22 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import contextlib
import warnings
from thriftpy.protocol import TBinaryProtocolFactory
from thriftpy.server import TThreadedServer
from thriftpy.thrift import TProcessor, TClient
from thriftpy.transport import (
TBufferedTransportFactory,
TServerSocket,
TSocket,
)
def make_client(service, host="localhost", port=9090, unix_socket=None,
proto_factory=TBinaryProtocolFactory(),
trans_factory=TBufferedTransportFactory(),
timeout=None):
if unix_socket:
socket = TSocket(unix_socket=unix_socket)
elif host and port:
socket = TSocket(host, port, socket_timeout=timeout)
else:
raise ValueError("Either host/port or unix_socket must be provided.")
transport = trans_factory.get_transport(socket)
protocol = proto_factory.get_protocol(transport)
transport.open()
return TClient(service, protocol)
def make_server(service, handler,
host="localhost", port=9090, unix_socket=None,
proto_factory=TBinaryProtocolFactory(),
trans_factory=TBufferedTransportFactory()):
processor = TProcessor(service, handler)
if unix_socket:
server_socket = TServerSocket(unix_socket=unix_socket)
elif host and port:
server_socket = TServerSocket(host=host, port=port)
else:
raise ValueError("Either host/port or unix_socket must be provided.")
server = TThreadedServer(processor, server_socket,
iprot_factory=proto_factory,
itrans_factory=trans_factory)
return server
@contextlib.contextmanager
def client_context(service, host="localhost", port=9090, unix_socket=None,
proto_factory=TBinaryProtocolFactory(),
trans_factory=TBufferedTransportFactory(),
timeout=3000, socket_timeout=3000, connect_timeout=None):
if timeout:
warnings.warn("`timeout` deprecated, use `socket_timeout` and "
"`connect_timeout` instead.")
socket_timeout = connect_timeout = timeout
if unix_socket:
socket = TSocket(unix_socket=unix_socket,
connect_timeout=connect_timeout,
socket_timeout=socket_timeout)
elif host and port:
socket = TSocket(host, port,
connect_timeout=connect_timeout,
socket_timeout=socket_timeout)
else:
raise ValueError("Either host/port or unix_socket must be provided.")
try:
transport = trans_factory.get_transport(socket)
protocol = proto_factory.get_protocol(transport)
transport.open()
yield TClient(service, protocol)
finally:
transport.close()
|
scorphus/thefuck | refs/heads/master | tests/rules/test_grep_arguments_order.py | 5 | import pytest
from thefuck.rules.grep_arguments_order import get_new_command, match
from thefuck.types import Command
output = 'grep: {}: No such file or directory'.format
@pytest.fixture(autouse=True)
def os_path(monkeypatch):
monkeypatch.setattr('os.path.isfile', lambda x: not x.startswith('-'))
@pytest.mark.parametrize('script, file', [
('grep test.py test', 'test'),
('grep -lir . test', 'test'),
('egrep test.py test', 'test'),
('egrep -lir . test', 'test')])
def test_match(script, file):
assert match(Command(script, output(file)))
@pytest.mark.parametrize('script, output', [
('cat test.py', output('test')),
('grep test test.py', ''),
('grep -lir test .', ''),
('egrep test test.py', ''),
('egrep -lir test .', '')])
def test_not_match(script, output):
assert not match(Command(script, output))
@pytest.mark.parametrize('script, output, result', [
('grep test.py test', output('test'), 'grep test test.py'),
('grep -lir . test', output('test'), 'grep -lir test .'),
('grep . test -lir', output('test'), 'grep test -lir .'),
('egrep test.py test', output('test'), 'egrep test test.py'),
('egrep -lir . test', output('test'), 'egrep -lir test .'),
('egrep . test -lir', output('test'), 'egrep test -lir .')])
def test_get_new_command(script, output, result):
assert get_new_command(Command(script, output)) == result
|
Glasgow2015/team-10 | refs/heads/master | env/lib/python2.7/site-packages/pip/commands/search.py | 344 | import sys
import textwrap
import pip.download
from pip.basecommand import Command, SUCCESS
from pip.util import get_terminal_size
from pip.log import logger
from pip.backwardcompat import xmlrpclib, reduce, cmp
from pip.exceptions import CommandError
from pip.status_codes import NO_MATCHES_FOUND
from pip._vendor import pkg_resources
from distutils.version import StrictVersion, LooseVersion
class SearchCommand(Command):
"""Search for PyPI packages whose name or summary contains <query>."""
name = 'search'
usage = """
%prog [options] <query>"""
summary = 'Search PyPI for packages.'
def __init__(self, *args, **kw):
super(SearchCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'--index',
dest='index',
metavar='URL',
default='https://pypi.python.org/pypi',
help='Base URL of Python Package Index (default %default)')
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
if not args:
raise CommandError('Missing required argument (search query).')
query = args
index_url = options.index
pypi_hits = self.search(query, index_url)
hits = transform_hits(pypi_hits)
terminal_width = None
if sys.stdout.isatty():
terminal_width = get_terminal_size()[0]
print_results(hits, terminal_width=terminal_width)
if pypi_hits:
return SUCCESS
return NO_MATCHES_FOUND
def search(self, query, index_url):
pypi = xmlrpclib.ServerProxy(index_url)
hits = pypi.search({'name': query, 'summary': query}, 'or')
return hits
def transform_hits(hits):
"""
The list from pypi is really a list of versions. We want a list of
packages with the list of versions stored inline. This converts the
list from pypi into one we can use.
"""
packages = {}
for hit in hits:
name = hit['name']
summary = hit['summary']
version = hit['version']
score = hit['_pypi_ordering']
if score is None:
score = 0
if name not in packages.keys():
packages[name] = {'name': name, 'summary': summary, 'versions': [version], 'score': score}
else:
packages[name]['versions'].append(version)
# if this is the highest version, replace summary and score
if version == highest_version(packages[name]['versions']):
packages[name]['summary'] = summary
packages[name]['score'] = score
# each record has a unique name now, so we will convert the dict into a list sorted by score
package_list = sorted(packages.values(), key=lambda x: x['score'], reverse=True)
return package_list
def print_results(hits, name_column_width=25, terminal_width=None):
installed_packages = [p.project_name for p in pkg_resources.working_set]
for hit in hits:
name = hit['name']
summary = hit['summary'] or ''
if terminal_width is not None:
# wrap and indent summary to fit terminal
summary = textwrap.wrap(summary, terminal_width - name_column_width - 5)
summary = ('\n' + ' ' * (name_column_width + 3)).join(summary)
line = '%s - %s' % (name.ljust(name_column_width), summary)
try:
logger.notify(line)
if name in installed_packages:
dist = pkg_resources.get_distribution(name)
logger.indent += 2
try:
latest = highest_version(hit['versions'])
if dist.version == latest:
logger.notify('INSTALLED: %s (latest)' % dist.version)
else:
logger.notify('INSTALLED: %s' % dist.version)
logger.notify('LATEST: %s' % latest)
finally:
logger.indent -= 2
except UnicodeEncodeError:
pass
def compare_versions(version1, version2):
try:
return cmp(StrictVersion(version1), StrictVersion(version2))
# in case of abnormal version number, fall back to LooseVersion
except ValueError:
pass
try:
return cmp(LooseVersion(version1), LooseVersion(version2))
except TypeError:
# certain LooseVersion comparions raise due to unorderable types,
# fallback to string comparison
return cmp([str(v) for v in LooseVersion(version1).version],
[str(v) for v in LooseVersion(version2).version])
def highest_version(versions):
return reduce((lambda v1, v2: compare_versions(v1, v2) == 1 and v1 or v2), versions)
|
rpiotti/glances | refs/heads/master | glances/plugins/glances_docker.py | 11 | # -*- coding: utf-8 -*-
#
# This file is part of Glances.
#
# Copyright (C) 2015 Nicolargo <nicolas@nicolargo.com>
#
# Glances is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Glances is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Docker plugin."""
import numbers
import os
import re
# Import Glances libs
from glances.core.glances_logging import logger
from glances.core.glances_timer import getTimeSinceLastUpdate
from glances.plugins.glances_plugin import GlancesPlugin
# Docker-py library (optional and Linux-only)
# https://github.com/docker/docker-py
try:
import docker
import requests
except ImportError as e:
logger.debug("Docker library not found (%s). Glances cannot grab Docker info." % e)
docker_tag = False
else:
docker_tag = True
class Plugin(GlancesPlugin):
"""Glances Docker plugin.
stats is a list
"""
def __init__(self, args=None):
"""Init the plugin."""
GlancesPlugin.__init__(self, args=args)
# The plgin can be disable using: args.disable_docker
self.args = args
# We want to display the stat in the curse interface
self.display_curse = True
# Init the Docker API
self.docker_client = False
def connect(self, version=None):
"""Connect to the Docker server."""
# Init connection to the Docker API
try:
if version is None:
ret = docker.Client(base_url='unix://var/run/docker.sock')
else:
ret = docker.Client(base_url='unix://var/run/docker.sock',
version=version)
except NameError:
# docker lib not found
return None
try:
ret.version()
except requests.exceptions.ConnectionError as e:
# Connexion error (Docker not detected)
# Let this message in debug mode
logger.debug("Can't connect to the Docker server (%s)" % e)
return None
except docker.errors.APIError as e:
if version is None:
# API error (Version mismatch ?)
logger.debug("Docker API error (%s)" % e)
# Try the connection with the server version
version = re.search('server\:\ (.*)\)\".*\)', str(e))
if version:
logger.debug("Try connection with Docker API version %s" % version.group(1))
ret = self.connect(version=version.group(1))
else:
logger.debug("Can not retreive Docker server version")
ret = None
else:
# API error
logger.error("Docker API error (%s)" % e)
ret = None
except Exception as e:
# Others exceptions...
# Connexion error (Docker not detected)
logger.error("Can't connect to the Docker server (%s)" % e)
ret = None
# Log an info if Docker plugin is disabled
if ret is None:
logger.debug("Docker plugin is disable because an error has been detected")
return ret
def reset(self):
"""Reset/init the stats."""
self.stats = {}
@GlancesPlugin._log_result_decorator
def update(self):
"""Update Docker stats using the input method."""
# Reset stats
self.reset()
# Get the current Docker API client
if not self.docker_client:
# First time, try to connect to the server
self.docker_client = self.connect()
if self.docker_client is None:
global docker_tag
docker_tag = False
# The Docker-py lib is mandatory
if not docker_tag or (self.args is not None and self.args.disable_docker):
return self.stats
if self.input_method == 'local':
# Update stats
# Exemple: {
# "KernelVersion": "3.16.4-tinycore64",
# "Arch": "amd64",
# "ApiVersion": "1.15",
# "Version": "1.3.0",
# "GitCommit": "c78088f",
# "Os": "linux",
# "GoVersion": "go1.3.3"
# }
self.stats['version'] = self.docker_client.version()
# Example: [{u'Status': u'Up 36 seconds',
# u'Created': 1420378904,
# u'Image': u'nginx:1',
# u'Ports': [{u'Type': u'tcp', u'PrivatePort': 443},
# {u'IP': u'0.0.0.0', u'Type': u'tcp', u'PublicPort': 8080, u'PrivatePort': 80}],
# u'Command': u"nginx -g 'daemon off;'",
# u'Names': [u'/webstack_nginx_1'],
# u'Id': u'b0da859e84eb4019cf1d965b15e9323006e510352c402d2f442ea632d61faaa5'}]
self.stats['containers'] = self.docker_client.containers()
# Get stats for all containers
for c in self.stats['containers']:
if not hasattr(self, 'docker_stats'):
# Create a dict with all the containers' stats instance
self.docker_stats = {}
# TODO: Find a way to correct this
# The following optimization is not compatible with the network stats
# The self.docker_client.stats method should be call every time in order to have network stats refreshed
# Nevertheless, if we call it every time, Glances is slow...
if c['Id'] not in self.docker_stats:
# Create the stats instance for the current container
try:
self.docker_stats[c['Id']] = self.docker_client.stats(c['Id'], decode=True)
logger.debug("Create Docker stats object for container {}".format(c['Id']))
except Exception as e:
# Correct Issue #602
logger.error("Can not call Docker stats method {}".format(e))
# Get the docker stats
try:
# self.docker_stats[c['Id']] = self.docker_client.stats(c['Id'], decode=True)
all_stats = self.docker_stats[c['Id']].next()
except Exception:
all_stats = {}
c['cpu'] = self.get_docker_cpu(c['Id'], all_stats)
c['memory'] = self.get_docker_memory(c['Id'], all_stats)
# c['network'] = self.get_docker_network(c['Id'], all_stats)
elif self.input_method == 'snmp':
# Update stats using SNMP
# Not available
pass
return self.stats
def get_docker_cpu_old(self, container_id):
"""Return the container CPU usage by reading /sys/fs/cgroup/.
Input: id is the full container id
Output: a dict {'total': 1.49, 'user': 0.65, 'system': 0.84}
"""
ret = {}
# Read the stats
try:
with open('/sys/fs/cgroup/cpuacct/docker/' + container_id + '/cpuacct.stat', 'r') as f:
for line in f:
m = re.search(r"(system|user)\s+(\d+)", line)
if m:
ret[m.group(1)] = int(m.group(2))
except IOError as e:
logger.error("Can not grab container CPU stat ({0})".format(e))
return ret
if isinstance(ret["system"], numbers.Number) and isinstance(ret["user"], numbers.Number):
ret["total"] = ret["system"] + ret["user"]
# Return the stats
return ret
def get_docker_cpu(self, container_id, all_stats):
"""Return the container CPU usage.
Input: id is the full container id
all_stats is the output of the stats method of the Docker API
Output: a dict {'total': 1.49}
"""
cpu_new = {}
ret = {'total': 0.0}
# Read the stats
# For each container, you will find a pseudo-file cpuacct.stat,
# containing the CPU usage accumulated by the processes of the container.
# Those times are expressed in ticks of 1/USER_HZ of a second.
# On x86 systems, USER_HZ is 100.
try:
cpu_new['total'] = all_stats['cpu_stats']['cpu_usage']['total_usage']
cpu_new['system'] = all_stats['cpu_stats']['system_cpu_usage']
cpu_new['nb_core'] = len(all_stats['cpu_stats']['cpu_usage']['percpu_usage'])
except KeyError as e:
# all_stats do not have CPU information
logger.debug("Can not grab CPU usage for container {0} ({1}). Trying fallback method.".format(container_id, e))
# Trying fallback to old grab method
ret = self.get_docker_cpu_old(container_id)
# Get the user ticks
ticks = self.get_user_ticks()
for k in ret.keys():
ret[k] = float(ret[k]) / ticks
else:
# Previous CPU stats stored in the cpu_old variable
if not hasattr(self, 'cpu_old'):
# First call, we init the cpu_old variable
self.cpu_old = {}
try:
self.cpu_old[container_id] = cpu_new
except (IOError, UnboundLocalError):
pass
if container_id not in self.cpu_old:
try:
self.cpu_old[container_id] = cpu_new
except (IOError, UnboundLocalError):
pass
else:
#
cpu_delta = float(cpu_new['total'] - self.cpu_old[container_id]['total'])
system_delta = float(cpu_new['system'] - self.cpu_old[container_id]['system'])
if cpu_delta > 0.0 and system_delta > 0.0:
ret['total'] = (cpu_delta / system_delta) * float(cpu_new['nb_core']) * 100
# Save stats to compute next stats
self.cpu_old[container_id] = cpu_new
# Return the stats
return ret
def get_docker_memory_old(self, container_id):
"""Return the container MEMORY usage by reading /sys/fs/cgroup/.
Input: id is the full container id
Output: a dict {'rss': 1015808, 'cache': 356352}
"""
ret = {}
# Read the stats
try:
with open('/sys/fs/cgroup/memory/docker/' + container_id + '/memory.stat', 'r') as f:
for line in f:
m = re.search(r"(rss|cache)\s+(\d+)", line)
if m:
ret[m.group(1)] = int(m.group(2))
except IOError as e:
logger.error("Can not grab container MEM stat ({0})".format(e))
return ret
# Return the stats
return ret
def get_docker_memory(self, container_id, all_stats):
"""Return the container MEMORY.
Input: id is the full container id
all_stats is the output of the stats method of the Docker API
Output: a dict {'rss': 1015808, 'cache': 356352, 'usage': ..., 'max_usage': ...}
"""
ret = {}
# Read the stats
try:
ret['rss'] = all_stats['memory_stats']['stats']['rss']
ret['cache'] = all_stats['memory_stats']['stats']['cache']
ret['usage'] = all_stats['memory_stats']['usage']
ret['max_usage'] = all_stats['memory_stats']['max_usage']
except KeyError as e:
# all_stats do not have MEM information
logger.debug("Can not grab MEM usage for container {0} ({1}). Trying fallback method.".format(container_id, e))
# Trying fallback to old grab method
ret = self.get_docker_memory_old(container_id)
# Return the stats
return ret
def get_docker_network(self, container_id, all_stats):
"""Return the container network usage using the Docker API (v1.0 or higher).
Input: id is the full container id
Output: a dict {'time_since_update': 3000, 'rx': 10, 'tx': 65}.
"""
# Init the returned dict
network_new = {}
# Read the rx/tx stats (in bytes)
try:
netiocounters = all_stats["network"]
except KeyError as e:
# all_stats do not have NETWORK information
logger.debug("Can not grab NET usage for container {0} ({1})".format(container_id, e))
# No fallback available...
return network_new
# Previous network interface stats are stored in the network_old variable
if not hasattr(self, 'netiocounters_old'):
# First call, we init the network_old var
self.netiocounters_old = {}
try:
self.netiocounters_old[container_id] = netiocounters
except (IOError, UnboundLocalError):
pass
if container_id not in self.netiocounters_old:
try:
self.netiocounters_old[container_id] = netiocounters
except (IOError, UnboundLocalError):
pass
else:
# By storing time data we enable Rx/s and Tx/s calculations in the
# XML/RPC API, which would otherwise be overly difficult work
# for users of the API
network_new['time_since_update'] = getTimeSinceLastUpdate('docker_net_{}'.format(container_id))
network_new['rx'] = netiocounters["rx_bytes"] - self.netiocounters_old[container_id]["rx_bytes"]
network_new['tx'] = netiocounters["tx_bytes"] - self.netiocounters_old[container_id]["tx_bytes"]
network_new['cumulative_rx'] = netiocounters["rx_bytes"]
network_new['cumulative_tx'] = netiocounters["tx_bytes"]
# Save stats to compute next bitrate
self.netiocounters_old[container_id] = netiocounters
# Return the stats
return network_new
def get_user_ticks(self):
"""Return the user ticks by reading the environment variable."""
return os.sysconf(os.sysconf_names['SC_CLK_TCK'])
def msg_curse(self, args=None):
"""Return the dict to display in the curse interface."""
# Init the return message
ret = []
# Only process if stats exist (and non null) and display plugin enable...
if not self.stats or args.disable_docker or len(self.stats['containers']) == 0:
return ret
# Build the string message
# Title
msg = '{0}'.format('CONTAINERS')
ret.append(self.curse_add_line(msg, "TITLE"))
msg = ' {0}'.format(len(self.stats['containers']))
ret.append(self.curse_add_line(msg))
msg = ' ({0} {1})'.format('served by Docker',
self.stats['version']["Version"])
ret.append(self.curse_add_line(msg))
ret.append(self.curse_new_line())
# Header
ret.append(self.curse_new_line())
msg = '{0:>14}'.format('Id')
ret.append(self.curse_add_line(msg))
msg = ' {0:20}'.format('Name')
ret.append(self.curse_add_line(msg))
msg = '{0:>26}'.format('Status')
ret.append(self.curse_add_line(msg))
msg = '{0:>6}'.format('CPU%')
ret.append(self.curse_add_line(msg))
msg = '{0:>7}'.format('MEM')
ret.append(self.curse_add_line(msg))
# msg = '{0:>6}'.format('Rx/s')
# ret.append(self.curse_add_line(msg))
# msg = '{0:>6}'.format('Tx/s')
# ret.append(self.curse_add_line(msg))
msg = ' {0:8}'.format('Command')
ret.append(self.curse_add_line(msg))
# Data
for container in self.stats['containers']:
ret.append(self.curse_new_line())
# Id
msg = '{0:>14}'.format(container['Id'][0:12])
ret.append(self.curse_add_line(msg))
# Name
name = container['Names'][0]
if len(name) > 20:
name = '_' + name[:-19]
else:
name = name[:20]
msg = ' {0:20}'.format(name)
ret.append(self.curse_add_line(msg))
# Status
status = self.container_alert(container['Status'])
msg = container['Status'].replace("minute", "min")
msg = '{0:>26}'.format(msg[0:25])
ret.append(self.curse_add_line(msg, status))
# CPU
try:
msg = '{0:>6.1f}'.format(container['cpu']['total'])
except KeyError:
msg = '{0:>6}'.format('?')
ret.append(self.curse_add_line(msg))
# MEM
try:
msg = '{0:>7}'.format(self.auto_unit(container['memory']['usage']))
except KeyError:
msg = '{0:>7}'.format('?')
ret.append(self.curse_add_line(msg))
# NET RX/TX
# for r in ['rx', 'tx']:
# try:
# value = self.auto_unit(int(container['network'][r] // container['network']['time_since_update'] * 8)) + "b"
# msg = '{0:>6}'.format(value)
# except KeyError:
# msg = '{0:>6}'.format('?')
# ret.append(self.curse_add_line(msg))
# Command
msg = ' {0}'.format(container['Command'])
ret.append(self.curse_add_line(msg))
return ret
def container_alert(self, status):
"""Analyse the container status."""
if "Paused" in status:
return 'CAREFUL'
else:
return 'OK'
|
facebookexperimental/eden | refs/heads/master | eden/hg-server/tests/heredoctest.py | 2 | from __future__ import absolute_import, print_function
import sys
globalvars = {}
lines = sys.stdin.readlines()
while lines:
l = lines.pop(0)
if l.startswith("SALT"):
print(l[:-1])
elif l.startswith(">>> "):
snippet = l[4:]
while lines and lines[0].startswith("... "):
l = lines.pop(0)
snippet += l[4:]
c = compile(snippet, "<heredoc>", "single")
try:
exec(c, globalvars)
except Exception as inst:
print(repr(inst))
|
telminov/personnel-testing | refs/heads/master | core/views/user.py | 1 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import redirect
from core.forms import UserCreateForm, UserSearchForm, UserUpdateForm
from core.models import User
from core.views.base import CreateOrUpdateView, ListView, DeleteView
from django.core.urlresolvers import reverse_lazy, reverse
class UserListView(ListView):
model = User
context_object_name = 'users'
template_name = 'core/management/users.html'
title = 'Управление пользователями'
def get_queryset(self):
qs = super(UserListView, self).get_queryset()
if self.request.GET.get('department'):
qs = qs.filter(departments__in=[self.request.GET['department']])
return qs
def get_context_data(self, **kwargs):
context = super(UserListView, self).get_context_data(**kwargs)
context['form'] = UserSearchForm(self.request.GET or None)
return context
user_list_view = UserListView.as_view()
class UserDeletedListView(ListView):
model = User
context_object_name = 'users'
template_name = 'core/management/deleted_users.html'
title = 'Управление удалёнными пользователями'
def get_queryset(self):
return self.model.default_objects.filter(deleted_at__isnull=False)
def post(self):
pass
user_deleted_list_view = UserDeletedListView.as_view()
class UserCreateOrUpdateView(CreateOrUpdateView):
model = User
form_class_create = UserCreateForm
form_class_update = UserUpdateForm
template_name = 'core/management/user_edit.html'
pk_url_kwarg = 'user_id'
success_url = reverse_lazy('user_list_view')
def get_initial(self):
initial = {}
if self.is_update():
initial['departments'] = self.get_object().departments.all()
initial['user_id'] = self.object.id
return initial
def form_valid(self, form, commit=True):
redirect = super(UserCreateOrUpdateView, self).form_valid(form, commit=False)
if self.is_create():
self.object.set_password(form.cleaned_data['password'])
self.object.save()
self.object.save()
self.object.departments.clear()
for department in form.cleaned_data.get('departments', []):
self.object.departments.add(department)
return redirect
def get_title(self):
if self.is_create():
return 'Создание пользователя'
else:
user = self.get_object()
return 'Редактирование пользователя %s (%s)' % (user.username, user.email or 'email отсутствует')
user_create_or_update_view = UserCreateOrUpdateView.as_view()
class UserDeleteView(DeleteView):
model = User
pk_url_kwarg = 'user_id'
success_url = reverse_lazy('user_list_view')
template_name = 'core/management/user_delete.html'
title = 'Удаление пользователя'
user_delete_view = UserDeleteView.as_view()
def user_undelete_view(request, user_id):
user = User.default_objects.get(id=user_id)
user.deleted_at = None
user.save()
return redirect(reverse('user_list_view'))
|
anthgur/servo | refs/heads/master | tests/wpt/web-platform-tests/fetch/api/resources/echo-content.py | 80 | def main(request, response):
headers = [("X-Request-Method", request.method),
("X-Request-Content-Length", request.headers.get("Content-Length", "NO")),
("X-Request-Content-Type", request.headers.get("Content-Type", "NO"))]
content = request.body
return headers, content
|
excellalabs/face-off | refs/heads/main | core/features/steps/helper.py | 4 | from selenium.webdriver.support.wait import WebDriverWait
import random
import time
from selenium.webdriver.common.action_chains import ActionChains
# Helper methods to consolidate steps.py
def login(context, username, password):
context.browser.find_element_by_name('signIn').click()
context.browser.find_element_by_name('login').send_keys(username)
context.browser.find_element_by_name('password').send_keys(password)
context.browser.find_element_by_class_name('yj-btn').click()
def click_card(driver, index):
driver.find_element_by_id('colleague' + str(index)).click()
def get_card(driver, index):
return driver.find_element_by_id('colleague' + str(index))
def get_share_icon(driver, index):
return driver.find_element_by_id('turnFlag ' + str(index))
def get_hover(driver, index):
#element = wd.find_element_by_link_text(self.locator)
hov = ActionChains(driver).move_to_element(driver.find_element_by_id('colleague' + str(index)))
hov.perform()
def get_user(driver):
return driver.find_element_by_id('userName')
def click_next_round(driver):
driver.find_element_by_id('nextRound').click()
def play_through_education_mode(driver):
driver.find_element_by_id('easyGame').click()
click_card(driver, 0)
click_next_round(driver)
click_card(driver, 2)
click_next_round(driver)
click_card(driver, 3)
click_next_round(driver)
click_card(driver, 1)
click_next_round(driver)
click_card(driver, 0)
driver.find_element_by_id('resultsSubmit').click()
def play_through_competitive_mode(driver):
# First round
random_card = random.randint(1, 3)
print(random_card)
click_card(driver, random_card)
time.sleep(5)
# Second round
random_card = random.randint(1, 3)
print(random_card)
click_card(driver, random_card)
time.sleep(5)
# Third round
random_card = random.randint(1, 3)
print(random_card)
click_card(driver, random_card)
time.sleep(5)
# Fourth round
random_card = random.randint(1, 3)
print(random_card)
click_card(driver, random_card)
time.sleep(5)
# Fifth round
random_card = random.randint(1, 3)
print(random_card)
click_card(driver, random_card)
time.sleep(5)
|
mrunge/openstack_horizon | refs/heads/master | openstack_horizon/dashboards/project/data_processing/job_binaries/panel.py | 1 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.utils.translation import ugettext_lazy as _
import horizon_lib
from openstack_horizon.dashboards.project import dashboard
class JobBinariesPanel(horizon_lib.Panel):
name = _("Job Binaries")
slug = 'data_processing.job_binaries'
permissions = ('openstack.services.data_processing',)
dashboard.Project.register(JobBinariesPanel)
|
ovnicraft/odoo | refs/heads/8.0 | addons/website_mail/models/mail_message.py | 264 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.tools import html2plaintext
from openerp.tools.translate import _
from openerp.osv import osv, fields, expression
class MailMessage(osv.Model):
_inherit = 'mail.message'
def _get_description_short(self, cr, uid, ids, name, arg, context=None):
res = dict.fromkeys(ids, False)
for message in self.browse(cr, uid, ids, context=context):
if message.subject:
res[message.id] = message.subject
else:
plaintext_ct = '' if not message.body else html2plaintext(message.body)
res[message.id] = plaintext_ct[:30] + '%s' % (' [...]' if len(plaintext_ct) >= 30 else '')
return res
_columns = {
'description': fields.function(
_get_description_short, type='char',
help='Message description: either the subject, or the beginning of the body'
),
'website_published': fields.boolean(
'Published', help="Visible on the website as a comment", copy=False,
),
}
def default_get(self, cr, uid, fields_list, context=None):
defaults = super(MailMessage, self).default_get(cr, uid, fields_list, context=context)
# Note: explicitly implemented in default_get() instead of _defaults,
# to avoid setting to True for all existing messages during upgrades.
# TODO: this default should probably be dynamic according to the model
# on which the messages are attached, thus moved to create().
if 'website_published' in fields_list:
defaults.setdefault('website_published', True)
return defaults
def _search(self, cr, uid, args, offset=0, limit=None, order=None,
context=None, count=False, access_rights_uid=None):
""" Override that adds specific access rights of mail.message, to restrict
messages to published messages for public users. """
if uid != SUPERUSER_ID:
group_ids = self.pool.get('res.users').browse(cr, uid, uid, context=context).groups_id
group_user_id = self.pool.get("ir.model.data").get_object_reference(cr, uid, 'base', 'group_public')[1]
if group_user_id in [group.id for group in group_ids]:
args = expression.AND([[('website_published', '=', True)], list(args)])
return super(MailMessage, self)._search(cr, uid, args, offset=offset, limit=limit, order=order,
context=context, count=count, access_rights_uid=access_rights_uid)
def check_access_rule(self, cr, uid, ids, operation, context=None):
""" Add Access rules of mail.message for non-employee user:
- read:
- raise if the type is comment and subtype NULL (internal note)
"""
if uid != SUPERUSER_ID:
group_ids = self.pool.get('res.users').browse(cr, uid, uid, context=context).groups_id
group_user_id = self.pool.get("ir.model.data").get_object_reference(cr, uid, 'base', 'group_public')[1]
if group_user_id in [group.id for group in group_ids]:
cr.execute('SELECT id FROM "%s" WHERE website_published IS FALSE AND id = ANY (%%s)' % (self._table), (ids,))
if cr.fetchall():
raise osv.except_osv(
_('Access Denied'),
_('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % (self._description, operation))
return super(MailMessage, self).check_access_rule(cr, uid, ids=ids, operation=operation, context=context)
|
PythonNut/servo | refs/heads/master | python/mach/mach/commands/commandinfo.py | 96 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, # You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import, print_function, unicode_literals
from mach.decorators import (
CommandProvider,
Command,
CommandArgument,
)
@CommandProvider
class BuiltinCommands(object):
def __init__(self, context):
self.context = context
@Command('mach-commands', category='misc',
description='List all mach commands.')
def commands(self):
print("\n".join(self.context.commands.command_handlers.keys()))
@Command('mach-debug-commands', category='misc',
description='Show info about available mach commands.')
@CommandArgument('match', metavar='MATCH', default=None, nargs='?',
help='Only display commands containing given substring.')
def debug_commands(self, match=None):
import inspect
handlers = self.context.commands.command_handlers
for command in sorted(handlers.keys()):
if match and match not in command:
continue
handler = handlers[command]
cls = handler.cls
method = getattr(cls, getattr(handler, 'method'))
print(command)
print('=' * len(command))
print('')
print('File: %s' % inspect.getsourcefile(method))
print('Class: %s' % cls.__name__)
print('Method: %s' % handler.method)
print('')
|
titasakgm/brc-stock | refs/heads/master | openerp/addons/account_asset/wizard/account_asset_change_duration.py | 40 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from lxml import etree
from openerp.osv import fields, osv
class asset_modify(osv.osv_memory):
_name = 'asset.modify'
_description = 'Modify Asset'
_columns = {
'name': fields.char('Reason', size=64, required=True),
'method_number': fields.integer('Number of Depreciations', required=True),
'method_period': fields.integer('Period Length'),
'method_end': fields.date('Ending date'),
'note': fields.text('Notes'),
}
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
""" Returns views and fields for current model.
@param cr: A database cursor
@param user: ID of the user currently logged in
@param view_id: list of fields, which required to read signatures
@param view_type: defines a view type. it can be one of (form, tree, graph, calender, gantt, search, mdx)
@param context: context arguments, like lang, time zone
@param toolbar: contains a list of reports, wizards, and links related to current model
@return: Returns a dictionary that contains definition for fields, views, and toolbars
"""
if not context:
context = {}
asset_obj = self.pool.get('account.asset.asset')
result = super(asset_modify, self).fields_view_get(cr, uid, view_id, view_type, context=context, toolbar=toolbar, submenu=submenu)
asset_id = context.get('active_id', False)
active_model = context.get('active_model', '')
if active_model == 'account.asset.asset' and asset_id:
asset = asset_obj.browse(cr, uid, asset_id, context=context)
doc = etree.XML(result['arch'])
if asset.method_time == 'number':
node = doc.xpath("//field[@name='method_end']")[0]
node.set('invisible', '1')
elif asset.method_time == 'end':
node = doc.xpath("//field[@name='method_number']")[0]
node.set('invisible', '1')
result['arch'] = etree.tostring(doc)
return result
def default_get(self, cr, uid, fields, context=None):
""" To get default values for the object.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param fields: List of fields for which we want default values
@param context: A standard dictionary
@return: A dictionary which of fields with values.
"""
if not context:
context = {}
asset_obj = self.pool.get('account.asset.asset')
res = super(asset_modify, self).default_get(cr, uid, fields, context=context)
asset_id = context.get('active_id', False)
asset = asset_obj.browse(cr, uid, asset_id, context=context)
if 'name' in fields:
res.update({'name': asset.name})
if 'method_number' in fields and asset.method_time == 'number':
res.update({'method_number': asset.method_number})
if 'method_period' in fields:
res.update({'method_period': asset.method_period})
if 'method_end' in fields and asset.method_time == 'end':
res.update({'method_end': asset.method_end})
return res
def modify(self, cr, uid, ids, context=None):
""" Modifies the duration of asset for calculating depreciation
and maintains the history of old values.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of Ids
@param context: A standard dictionary
@return: Close the wizard.
"""
if not context:
context = {}
asset_obj = self.pool.get('account.asset.asset')
history_obj = self.pool.get('account.asset.history')
asset_id = context.get('active_id', False)
asset = asset_obj.browse(cr, uid, asset_id, context=context)
data = self.browse(cr, uid, ids[0], context=context)
history_vals = {
'asset_id': asset_id,
'name': data.name,
'method_time': asset.method_time,
'method_number': asset.method_number,
'method_period': asset.method_period,
'method_end': asset.method_end,
'user_id': uid,
'date': time.strftime('%Y-%m-%d'),
'note': data.note,
}
history_obj.create(cr, uid, history_vals, context=context)
asset_vals = {
'method_number': data.method_number,
'method_period': data.method_period,
'method_end': data.method_end,
}
asset_obj.write(cr, uid, [asset_id], asset_vals, context=context)
asset_obj.compute_depreciation_board(cr, uid, [asset_id], context=context)
return {'type': 'ir.actions.act_window_close'}
asset_modify()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
yarbelk/django-admin2 | refs/heads/develop | djadmin2/viewmixins.py | 2 | # -*- coding: utf-8 -*-
from __future__ import division, absolute_import, unicode_literals
import os
from django.contrib.auth.views import redirect_to_login
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse, reverse_lazy
from django.forms.models import modelform_factory
from django.http import HttpResponseRedirect
from django.utils.encoding import force_text
from django.utils.text import get_text_list
from django.utils.translation import ugettext as _
# braces 1.3 views exported AccessMixin
# in braces 1.4 this was moved views._access and not exported in views
# not sure if this was the intent of braces or an oversight
# if intent - should look at AccessMixin vs. using a more specific mixin
try:
from braces.views import AccessMixin
except ImportError:
from braces.views._access import AccessMixin
from . import settings, permissions
from .utils import admin2_urlname, model_options
class PermissionMixin(AccessMixin):
do_not_call_in_templates = True
permission_classes = (permissions.IsStaffPermission,)
login_url = reverse_lazy('admin2:dashboard')
def __init__(self, **kwargs):
self.permissions = [
permission_class()
for permission_class in self.permission_classes]
super(PermissionMixin, self).__init__(**kwargs)
def has_permission(self, obj=None):
'''
Return ``True`` if the permission for this view shall be granted,
``False`` otherwise. Supports object-level permission by passing the
related object as first argument.
'''
for permission in self.permissions:
if not permission.has_permission(self.request, self, obj):
return False
return True
def dispatch(self, request, *args, **kwargs):
# Raise exception or redirect to login if user doesn't have
# permissions.
if not self.has_permission():
if self.raise_exception:
raise PermissionDenied # return a forbidden response
else:
return redirect_to_login(
request.get_full_path(),
self.get_login_url(),
self.get_redirect_field_name())
return super(PermissionMixin, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(PermissionMixin, self).get_context_data(**kwargs)
permission_checker = permissions.TemplatePermissionChecker(
self.request, self.model_admin)
context.update({
'permissions': permission_checker,
})
return context
class Admin2Mixin(PermissionMixin):
# are set in the ModelAdmin2 class when creating the view via
# .as_view(...)
model_admin = None
model_name = None
app_label = None
index_path = reverse_lazy('admin2:dashboard')
def get_template_names(self):
return [os.path.join(
settings.ADMIN2_THEME_DIRECTORY, self.default_template_name)]
def get_model(self):
return self.model
def get_queryset(self):
return self.get_model()._default_manager.all()
def get_form_class(self):
if self.form_class is not None:
return self.form_class
return modelform_factory(self.get_model())
def is_user(self, request):
return hasattr(request, 'user') and not (request.user.is_active and
request.user.is_staff)
def dispatch(self, request, *args, **kwargs):
if self.is_user(request):
from .views import LoginView
if request.path == reverse('admin2:logout'):
return HttpResponseRedirect(self.index_path)
if request.path == self.index_path:
extra = {
'next': request.GET.get('next', self.index_path)
}
return LoginView().dispatch(request, extra_context=extra,
*args, **kwargs)
return super(Admin2Mixin, self).dispatch(request, *args, **kwargs)
class AdminModel2Mixin(Admin2Mixin):
model_admin = None
def get_context_data(self, **kwargs):
context = super(AdminModel2Mixin, self).get_context_data(**kwargs)
model = self.get_model()
model_meta = model_options(model)
app_verbose_names = self.model_admin.admin.app_verbose_names
context.update({
'app_label': model_meta.app_label,
'app_verbose_name': app_verbose_names.get(model_meta.app_label),
'model_name': model_meta.verbose_name,
'model_name_pluralized': model_meta.verbose_name_plural
})
return context
def get_model(self):
return self.model
def get_queryset(self):
return self.get_model()._default_manager.all()
def get_form_class(self):
if self.form_class is not None:
return self.form_class
return modelform_factory(self.get_model(), fields='__all__')
class Admin2ModelFormMixin(object):
def get_success_url(self):
if '_continue' in self.request.POST:
view_name = admin2_urlname(self, 'update')
return reverse(view_name, kwargs={'pk': self.object.pk})
if '_addanother' in self.request.POST:
return reverse(admin2_urlname(self, 'create'))
# default to index view
return reverse(admin2_urlname(self, 'index'))
def construct_change_message(self, request, form, formsets):
""" Construct a change message from a changed object """
change_message = []
if form.changed_data:
change_message.append(
_('Changed {0}.'.format(
get_text_list(form.changed_data, _('and')))))
if formsets:
for formset in formsets:
for added_object in formset.new_objects:
change_message.append(
_('Added {0} "{1}".'.format(
force_text(added_object._meta.verbose_name),
force_text(added_object))))
for changed_object, changed_fields in formset.changed_objects:
change_message.append(
_('Changed {0} for {1} "{2}".'.format(
get_text_list(changed_fields, _('and')),
force_text(changed_object._meta.verbose_name),
force_text(changed_object))))
for deleted_object in formset.deleted_objects:
change_message.append(
_('Deleted {0} "{1}".'.format(
force_text(deleted_object._meta.verbose_name),
force_text(deleted_object))))
change_message = ' '.join(change_message)
return change_message or _('No fields changed.')
|
Zkin/pf-kernel-updates | refs/heads/zk-4.4 | tools/perf/scripts/python/check-perf-trace.py | 1997 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
|
TheKK/Shedskin | refs/heads/master | tests/122.py | 6 |
# (c) Mark Dufour
# --- mark.dufour@gmail.com
import random
random.seed(42)
from math import sqrt, e
sigmoid = lambda x: pow((1+pow(e,-x)),-1) # [lambda0]
deriv = lambda x: pow(e,-x) * pow((1+pow(e,-x)),-2) # [lambda0]
class link: # in_node: [node], weight: [float], activation: [], out_node: [node], delta: [], input: [], output: [], unit: []
def __init__(self, in_node, out_node): # self: [nlink], in_node: [node]*, out_node: [node]*
self.in_node = in_node; self.out_node = out_node # [node]
self.weight = (random.random()-0.5)/2 # [float]
class node: # in_node: [], weight: [], activation: [float], out_node: [], delta: [float], output: [list(nlink)], input: [list(nlink)], unit: []
def __init__(self, input_nodes): # self: [node], input_nodes: [list(node)]
self.input, self.output = [], [] # [list(nlink)], [list(nlink)]
for node in input_nodes: # [list(node)]
l = link(node,self) # [nlink]
self.input.append(l) # []
node.output.append(l) # []
def incoming(node): return sum([link.in_node.activation * link.weight for link in node.input]) # [float]
def neural_network_output(network, input): # network: [list(list(node))], input: [list(int)]
# set input layer activations
for index, node in enumerate(network[0]): # [tuple(int, node)]
node.activation = input[index] # [int]
# forward propagate output
for layer in network[1:]: # [list(list(node))]
for node in layer: # [list(node)]
node.activation = sigmoid(incoming(node)) # [float]
return [node.activation for node in network[-1]] # [list(float)]
def back_propagate_error(network, answer): # network: [list(list(node))], answer: [list(int)]
#output = [node.activation for node in network[-1]] # [list(float)]
# output layer deltas
for index, node in enumerate(network[-1]): # [tuple(int, node)]
node.delta = deriv(incoming(node)) * (answer[index] - node.activation) # [float]
# backward propagate error
for layer in network[-2::-1]: # [list(list(node))]
for node in layer: # [list(node)]
node.delta = deriv(incoming(node)) * sum([link.out_node.delta * link.weight for link in node.output]) # [float]
for link in node.output: # [list(nlink)]
link.weight += alpha * node.activation * link.out_node.delta # [float]
def append_error(network, examples): # network: [list(list(node))], examples: [list(tuple(list(int)))]
compare = [(neural_network_output(network, example)[0], answer[0]) for example, answer in examples] # [list(tuple(float, int))]
errors.append(sqrt((1.0/len(examples))*sum([pow(answer-output,2) for output, answer in compare]))) # [tuple(float, int)]
def train_network(network, examples, epochs): # network: [list(list(node))], examples: [list(tuple(list(int)))], epochs: [int]
global errors
errors = [] # [list(float)]
append_error(network, examples) # []
for epoch in range(epochs): # [list(int)]
for example, answer in examples: # [tuple(list(int))]
output = neural_network_output(network, example) # [list(float)]
back_propagate_error(network, answer) # []
#print_weights(network)
append_error(network, examples) # []
#def print_weights(network):
# for number, layer in enumerate(network[-2::-1]):
# print 'layer', number
# for node in layer:
# print [link.weight for link in node.output]
alpha = 0.5 # [float]
input_layer = [node([]) for n in range(10)] # [list(node)]
hidden_layer = [node(input_layer) for n in range(4)] # [list(node)]
output_layer = [node(hidden_layer) for n in range(1)] # [list(node)]
network = [input_layer, hidden_layer, output_layer] # [list(list(node))]
examples = [ ([1,0,0,1,1,2,0,1,0,0], [1]), # [list(tuple(list(int)))]
([1,0,0,1,2,0,0,0,2,2], [0]), # [tuple(list(int))]
([0,1,0,0,1,0,0,0,3,0], [1]), # [list(int)]
([1,0,1,1,2,0,1,0,2,1], [1]), # [tuple(list(int))]
([1,0,1,0,2,2,0,1,0,3], [0]), # [tuple(list(int))]
([0,1,0,1,1,1,1,1,1,0], [1]), # [tuple(list(int))]
([0,1,0,0,0,0,1,0,3,0], [0]), # [list(int)]
([0,0,0,1,1,1,1,1,2,0], [1]), # [list(int)]
([0,1,1,0,2,0,1,0,3,3], [0]), # [list(int)]
([1,1,1,1,2,2,0,1,1,1], [0]), # [list(int)]
([0,0,0,0,0,0,0,0,2,0], [0]), # [list(int)]
([1,1,1,1,2,0,0,0,3,2], [1]) ] # [list(int)]
epochs = 1000 # [int]
train_network(network, examples, epochs) # []
for hup in [neural_network_output(network, example) for example, answer in examples]:
print [('%.2f'%hap) for hap in hup]
|
mlperf/training_results_v0.7 | refs/heads/master | Fujitsu/benchmarks/resnet/implementations/implementation_open/mxnet/python/mxnet/gluon/model_zoo/vision/inception.py | 11 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable= arguments-differ
"""Inception, implemented in Gluon."""
__all__ = ['Inception3', 'inception_v3']
import os
from ....context import cpu
from ...block import HybridBlock
from ... import nn
from ...contrib.nn import HybridConcurrent
from .... import base
# Helpers
def _make_basic_conv(**kwargs):
out = nn.HybridSequential(prefix='')
out.add(nn.Conv2D(use_bias=False, **kwargs))
out.add(nn.BatchNorm(epsilon=0.001))
out.add(nn.Activation('relu'))
return out
def _make_branch(use_pool, *conv_settings):
out = nn.HybridSequential(prefix='')
if use_pool == 'avg':
out.add(nn.AvgPool2D(pool_size=3, strides=1, padding=1))
elif use_pool == 'max':
out.add(nn.MaxPool2D(pool_size=3, strides=2))
setting_names = ['channels', 'kernel_size', 'strides', 'padding']
for setting in conv_settings:
kwargs = {}
for i, value in enumerate(setting):
if value is not None:
kwargs[setting_names[i]] = value
out.add(_make_basic_conv(**kwargs))
return out
def _make_A(pool_features, prefix):
out = HybridConcurrent(axis=1, prefix=prefix)
with out.name_scope():
out.add(_make_branch(None,
(64, 1, None, None)))
out.add(_make_branch(None,
(48, 1, None, None),
(64, 5, None, 2)))
out.add(_make_branch(None,
(64, 1, None, None),
(96, 3, None, 1),
(96, 3, None, 1)))
out.add(_make_branch('avg',
(pool_features, 1, None, None)))
return out
def _make_B(prefix):
out = HybridConcurrent(axis=1, prefix=prefix)
with out.name_scope():
out.add(_make_branch(None,
(384, 3, 2, None)))
out.add(_make_branch(None,
(64, 1, None, None),
(96, 3, None, 1),
(96, 3, 2, None)))
out.add(_make_branch('max'))
return out
def _make_C(channels_7x7, prefix):
out = HybridConcurrent(axis=1, prefix=prefix)
with out.name_scope():
out.add(_make_branch(None,
(192, 1, None, None)))
out.add(_make_branch(None,
(channels_7x7, 1, None, None),
(channels_7x7, (1, 7), None, (0, 3)),
(192, (7, 1), None, (3, 0))))
out.add(_make_branch(None,
(channels_7x7, 1, None, None),
(channels_7x7, (7, 1), None, (3, 0)),
(channels_7x7, (1, 7), None, (0, 3)),
(channels_7x7, (7, 1), None, (3, 0)),
(192, (1, 7), None, (0, 3))))
out.add(_make_branch('avg',
(192, 1, None, None)))
return out
def _make_D(prefix):
out = HybridConcurrent(axis=1, prefix=prefix)
with out.name_scope():
out.add(_make_branch(None,
(192, 1, None, None),
(320, 3, 2, None)))
out.add(_make_branch(None,
(192, 1, None, None),
(192, (1, 7), None, (0, 3)),
(192, (7, 1), None, (3, 0)),
(192, 3, 2, None)))
out.add(_make_branch('max'))
return out
def _make_E(prefix):
out = HybridConcurrent(axis=1, prefix=prefix)
with out.name_scope():
out.add(_make_branch(None,
(320, 1, None, None)))
branch_3x3 = nn.HybridSequential(prefix='')
out.add(branch_3x3)
branch_3x3.add(_make_branch(None,
(384, 1, None, None)))
branch_3x3_split = HybridConcurrent(axis=1, prefix='')
branch_3x3_split.add(_make_branch(None,
(384, (1, 3), None, (0, 1))))
branch_3x3_split.add(_make_branch(None,
(384, (3, 1), None, (1, 0))))
branch_3x3.add(branch_3x3_split)
branch_3x3dbl = nn.HybridSequential(prefix='')
out.add(branch_3x3dbl)
branch_3x3dbl.add(_make_branch(None,
(448, 1, None, None),
(384, 3, None, 1)))
branch_3x3dbl_split = HybridConcurrent(axis=1, prefix='')
branch_3x3dbl.add(branch_3x3dbl_split)
branch_3x3dbl_split.add(_make_branch(None,
(384, (1, 3), None, (0, 1))))
branch_3x3dbl_split.add(_make_branch(None,
(384, (3, 1), None, (1, 0))))
out.add(_make_branch('avg',
(192, 1, None, None)))
return out
def make_aux(classes):
out = nn.HybridSequential(prefix='')
out.add(nn.AvgPool2D(pool_size=5, strides=3))
out.add(_make_basic_conv(channels=128, kernel_size=1))
out.add(_make_basic_conv(channels=768, kernel_size=5))
out.add(nn.Flatten())
out.add(nn.Dense(classes))
return out
# Net
class Inception3(HybridBlock):
r"""Inception v3 model from
`"Rethinking the Inception Architecture for Computer Vision"
<http://arxiv.org/abs/1512.00567>`_ paper.
Parameters
----------
classes : int, default 1000
Number of classification classes.
"""
def __init__(self, classes=1000, **kwargs):
super(Inception3, self).__init__(**kwargs)
# self.use_aux_logits = use_aux_logits
with self.name_scope():
self.features = nn.HybridSequential(prefix='')
self.features.add(_make_basic_conv(channels=32, kernel_size=3, strides=2))
self.features.add(_make_basic_conv(channels=32, kernel_size=3))
self.features.add(_make_basic_conv(channels=64, kernel_size=3, padding=1))
self.features.add(nn.MaxPool2D(pool_size=3, strides=2))
self.features.add(_make_basic_conv(channels=80, kernel_size=1))
self.features.add(_make_basic_conv(channels=192, kernel_size=3))
self.features.add(nn.MaxPool2D(pool_size=3, strides=2))
self.features.add(_make_A(32, 'A1_'))
self.features.add(_make_A(64, 'A2_'))
self.features.add(_make_A(64, 'A3_'))
self.features.add(_make_B('B_'))
self.features.add(_make_C(128, 'C1_'))
self.features.add(_make_C(160, 'C2_'))
self.features.add(_make_C(160, 'C3_'))
self.features.add(_make_C(192, 'C4_'))
self.features.add(_make_D('D_'))
self.features.add(_make_E('E1_'))
self.features.add(_make_E('E2_'))
self.features.add(nn.AvgPool2D(pool_size=8))
self.features.add(nn.Dropout(0.5))
self.output = nn.Dense(classes)
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
# Constructor
def inception_v3(pretrained=False, ctx=cpu(),
root=os.path.join(base.data_dir(), 'models'), **kwargs):
r"""Inception v3 model from
`"Rethinking the Inception Architecture for Computer Vision"
<http://arxiv.org/abs/1512.00567>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters.
"""
net = Inception3(**kwargs)
if pretrained:
from ..model_store import get_model_file
net.load_parameters(get_model_file('inceptionv3', root=root), ctx=ctx)
return net
|
yinsu/grpc | refs/heads/master | src/python/grpcio/grpc/framework/crust/_control.py | 2 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""State and behavior for translating between sync and async control flow."""
import collections
import enum
import sys
import threading
import time
from grpc.framework.foundation import abandonment
from grpc.framework.foundation import callable_util
from grpc.framework.foundation import future
from grpc.framework.foundation import stream
from grpc.framework.interfaces.base import base
from grpc.framework.interfaces.base import utilities
from grpc.framework.interfaces.face import face
_DONE_CALLBACK_LOG_MESSAGE = 'Exception calling Future "done" callback!'
_INTERNAL_ERROR_LOG_MESSAGE = ':-( RPC Framework (Crust) Internal Error! )-:'
_CANNOT_SET_INITIAL_METADATA = (
'Could not set initial metadata - has it already been set, or has a ' +
'payload already been sent?')
_CANNOT_SET_TERMINAL_METADATA = (
'Could not set terminal metadata - has it already been set, or has RPC ' +
'completion already been indicated?')
_CANNOT_SET_CODE = (
'Could not set code - has it already been set, or has RPC completion ' +
'already been indicated?')
_CANNOT_SET_DETAILS = (
'Could not set details - has it already been set, or has RPC completion ' +
'already been indicated?')
class _DummyOperator(base.Operator):
def advance(
self, initial_metadata=None, payload=None, completion=None,
allowance=None):
pass
_DUMMY_OPERATOR = _DummyOperator()
class _Awaited(
collections.namedtuple('_Awaited', ('kind', 'value',))):
@enum.unique
class Kind(enum.Enum):
NOT_YET_ARRIVED = 'not yet arrived'
ARRIVED = 'arrived'
_NOT_YET_ARRIVED = _Awaited(_Awaited.Kind.NOT_YET_ARRIVED, None)
_ARRIVED_AND_NONE = _Awaited(_Awaited.Kind.ARRIVED, None)
class _Transitory(
collections.namedtuple('_Transitory', ('kind', 'value',))):
@enum.unique
class Kind(enum.Enum):
NOT_YET_SEEN = 'not yet seen'
PRESENT = 'present'
GONE = 'gone'
_NOT_YET_SEEN = _Transitory(_Transitory.Kind.NOT_YET_SEEN, None)
_GONE = _Transitory(_Transitory.Kind.GONE, None)
class _Termination(
collections.namedtuple(
'_Termination', ('terminated', 'abortion', 'abortion_error',))):
"""Values indicating whether and how an RPC has terminated.
Attributes:
terminated: A boolean indicating whether or not the RPC has terminated.
abortion: A face.Abortion value describing the RPC's abortion or None if the
RPC did not abort.
abortion_error: A face.AbortionError describing the RPC's abortion or None
if the RPC did not abort.
"""
_NOT_TERMINATED = _Termination(False, None, None)
_OPERATION_OUTCOME_KIND_TO_TERMINATION_CONSTRUCTOR = {
base.Outcome.Kind.COMPLETED: lambda *unused_args: _Termination(
True, None, None),
base.Outcome.Kind.CANCELLED: lambda *args: _Termination(
True, face.Abortion(face.Abortion.Kind.CANCELLED, *args),
face.CancellationError(*args)),
base.Outcome.Kind.EXPIRED: lambda *args: _Termination(
True, face.Abortion(face.Abortion.Kind.EXPIRED, *args),
face.ExpirationError(*args)),
base.Outcome.Kind.LOCAL_SHUTDOWN: lambda *args: _Termination(
True, face.Abortion(face.Abortion.Kind.LOCAL_SHUTDOWN, *args),
face.LocalShutdownError(*args)),
base.Outcome.Kind.REMOTE_SHUTDOWN: lambda *args: _Termination(
True, face.Abortion(face.Abortion.Kind.REMOTE_SHUTDOWN, *args),
face.RemoteShutdownError(*args)),
base.Outcome.Kind.RECEPTION_FAILURE: lambda *args: _Termination(
True, face.Abortion(face.Abortion.Kind.NETWORK_FAILURE, *args),
face.NetworkError(*args)),
base.Outcome.Kind.TRANSMISSION_FAILURE: lambda *args: _Termination(
True, face.Abortion(face.Abortion.Kind.NETWORK_FAILURE, *args),
face.NetworkError(*args)),
base.Outcome.Kind.LOCAL_FAILURE: lambda *args: _Termination(
True, face.Abortion(face.Abortion.Kind.LOCAL_FAILURE, *args),
face.LocalError(*args)),
base.Outcome.Kind.REMOTE_FAILURE: lambda *args: _Termination(
True, face.Abortion(face.Abortion.Kind.REMOTE_FAILURE, *args),
face.RemoteError(*args)),
}
def _wait_once_until(condition, until):
if until is None:
condition.wait()
else:
remaining = until - time.time()
if remaining < 0:
raise future.TimeoutError()
else:
condition.wait(timeout=remaining)
def _done_callback_as_operation_termination_callback(
done_callback, rendezvous):
def operation_termination_callback(operation_outcome):
rendezvous.set_outcome(operation_outcome)
done_callback(rendezvous)
return operation_termination_callback
def _abortion_callback_as_operation_termination_callback(
rpc_abortion_callback, rendezvous_set_outcome):
def operation_termination_callback(operation_outcome):
termination = rendezvous_set_outcome(operation_outcome)
if termination.abortion is not None:
rpc_abortion_callback(termination.abortion)
return operation_termination_callback
class Rendezvous(base.Operator, future.Future, stream.Consumer, face.Call):
"""A rendez-vous for the threads of an operation.
Instances of this object present iterator and stream.Consumer interfaces for
interacting with application code and present a base.Operator interface and
maintain a base.Operator internally for interacting with base interface code.
"""
def __init__(self, operator, operation_context):
self._condition = threading.Condition()
self._operator = operator
self._operation_context = operation_context
self._up_initial_metadata = _NOT_YET_ARRIVED
self._up_payload = None
self._up_allowance = 1
self._up_completion = _NOT_YET_ARRIVED
self._down_initial_metadata = _NOT_YET_SEEN
self._down_payload = None
self._down_allowance = 1
self._down_terminal_metadata = _NOT_YET_SEEN
self._down_code = _NOT_YET_SEEN
self._down_details = _NOT_YET_SEEN
self._termination = _NOT_TERMINATED
# The semantics of future.Future.cancel and future.Future.cancelled are
# slightly wonky, so they have to be tracked separately from the rest of the
# result of the RPC. This field tracks whether cancellation was requested
# prior to termination of the RPC
self._cancelled = False
def set_operator_and_context(self, operator, operation_context):
with self._condition:
self._operator = operator
self._operation_context = operation_context
def _down_completion(self):
if self._down_terminal_metadata.kind is _Transitory.Kind.NOT_YET_SEEN:
terminal_metadata = None
self._down_terminal_metadata = _GONE
elif self._down_terminal_metadata.kind is _Transitory.Kind.PRESENT:
terminal_metadata = self._down_terminal_metadata.value
self._down_terminal_metadata = _GONE
else:
terminal_metadata = None
if self._down_code.kind is _Transitory.Kind.NOT_YET_SEEN:
code = None
self._down_code = _GONE
elif self._down_code.kind is _Transitory.Kind.PRESENT:
code = self._down_code.value
self._down_code = _GONE
else:
code = None
if self._down_details.kind is _Transitory.Kind.NOT_YET_SEEN:
details = None
self._down_details = _GONE
elif self._down_details.kind is _Transitory.Kind.PRESENT:
details = self._down_details.value
self._down_details = _GONE
else:
details = None
return utilities.completion(terminal_metadata, code, details)
def _set_outcome(self, outcome):
if not self._termination.terminated:
self._operator = _DUMMY_OPERATOR
self._operation_context = None
self._down_initial_metadata = _GONE
self._down_payload = None
self._down_terminal_metadata = _GONE
self._down_code = _GONE
self._down_details = _GONE
if self._up_initial_metadata.kind is _Awaited.Kind.NOT_YET_ARRIVED:
initial_metadata = None
else:
initial_metadata = self._up_initial_metadata.value
if self._up_completion.kind is _Awaited.Kind.NOT_YET_ARRIVED:
terminal_metadata = None
else:
terminal_metadata = self._up_completion.value.terminal_metadata
if outcome.kind is base.Outcome.Kind.COMPLETED:
code = self._up_completion.value.code
details = self._up_completion.value.message
else:
code = outcome.code
details = outcome.details
self._termination = _OPERATION_OUTCOME_KIND_TO_TERMINATION_CONSTRUCTOR[
outcome.kind](initial_metadata, terminal_metadata, code, details)
self._condition.notify_all()
return self._termination
def advance(
self, initial_metadata=None, payload=None, completion=None,
allowance=None):
with self._condition:
if initial_metadata is not None:
self._up_initial_metadata = _Awaited(
_Awaited.Kind.ARRIVED, initial_metadata)
if payload is not None:
if self._up_initial_metadata.kind is _Awaited.Kind.NOT_YET_ARRIVED:
self._up_initial_metadata = _ARRIVED_AND_NONE
self._up_payload = payload
self._up_allowance -= 1
if completion is not None:
if self._up_initial_metadata.kind is _Awaited.Kind.NOT_YET_ARRIVED:
self._up_initial_metadata = _ARRIVED_AND_NONE
self._up_completion = _Awaited(
_Awaited.Kind.ARRIVED, completion)
if allowance is not None:
if self._down_payload is not None:
self._operator.advance(payload=self._down_payload)
self._down_payload = None
self._down_allowance += allowance - 1
else:
self._down_allowance += allowance
self._condition.notify_all()
def cancel(self):
with self._condition:
if self._operation_context is not None:
self._operation_context.cancel()
self._cancelled = True
return False
def cancelled(self):
with self._condition:
return self._cancelled
def running(self):
with self._condition:
return not self._termination.terminated
def done(self):
with self._condition:
return self._termination.terminated
def result(self, timeout=None):
until = None if timeout is None else time.time() + timeout
with self._condition:
while True:
if self._termination.terminated:
if self._termination.abortion is None:
return self._up_payload
elif self._termination.abortion.kind is face.Abortion.Kind.CANCELLED:
raise future.CancelledError()
else:
raise self._termination.abortion_error # pylint: disable=raising-bad-type
else:
_wait_once_until(self._condition, until)
def exception(self, timeout=None):
until = None if timeout is None else time.time() + timeout
with self._condition:
while True:
if self._termination.terminated:
if self._termination.abortion is None:
return None
else:
return self._termination.abortion_error
else:
_wait_once_until(self._condition, until)
def traceback(self, timeout=None):
until = None if timeout is None else time.time() + timeout
with self._condition:
while True:
if self._termination.terminated:
if self._termination.abortion_error is None:
return None
else:
abortion_error = self._termination.abortion_error
break
else:
_wait_once_until(self._condition, until)
try:
raise abortion_error
except face.AbortionError:
return sys.exc_info()[2]
def add_done_callback(self, fn):
with self._condition:
if self._operation_context is not None:
outcome = self._operation_context.add_termination_callback(
_done_callback_as_operation_termination_callback(fn, self))
if outcome is None:
return
else:
self._set_outcome(outcome)
fn(self)
def consume(self, value):
with self._condition:
while True:
if self._termination.terminated:
return
elif 0 < self._down_allowance:
self._operator.advance(payload=value)
self._down_allowance -= 1
return
else:
self._condition.wait()
def terminate(self):
with self._condition:
if self._termination.terminated:
return
elif self._down_code.kind is _Transitory.Kind.GONE:
# Conform to specified idempotence of terminate by ignoring extra calls.
return
else:
completion = self._down_completion()
self._operator.advance(completion=completion)
def consume_and_terminate(self, value):
with self._condition:
while True:
if self._termination.terminated:
return
elif 0 < self._down_allowance:
completion = self._down_completion()
self._operator.advance(payload=value, completion=completion)
return
else:
self._condition.wait()
def __iter__(self):
return self
def next(self):
with self._condition:
while True:
if self._termination.abortion_error is not None:
raise self._termination.abortion_error
elif self._up_payload is not None:
payload = self._up_payload
self._up_payload = None
if self._up_completion.kind is _Awaited.Kind.NOT_YET_ARRIVED:
self._operator.advance(allowance=1)
return payload
elif self._up_completion.kind is _Awaited.Kind.ARRIVED:
raise StopIteration()
else:
self._condition.wait()
def is_active(self):
with self._condition:
return not self._termination.terminated
def time_remaining(self):
if self._operation_context is None:
return 0
else:
return self._operation_context.time_remaining()
def add_abortion_callback(self, abortion_callback):
with self._condition:
if self._operation_context is None:
return self._termination.abortion
else:
outcome = self._operation_context.add_termination_callback(
_abortion_callback_as_operation_termination_callback(
abortion_callback, self.set_outcome))
if outcome is not None:
return self._set_outcome(outcome).abortion
else:
return self._termination.abortion
def initial_metadata(self):
with self._condition:
while True:
if self._up_initial_metadata.kind is _Awaited.Kind.ARRIVED:
return self._up_initial_metadata.value
elif self._termination.terminated:
return None
else:
self._condition.wait()
def terminal_metadata(self):
with self._condition:
while True:
if self._up_completion.kind is _Awaited.Kind.ARRIVED:
return self._up_completion.value.terminal_metadata
elif self._termination.terminated:
return None
else:
self._condition.wait()
def code(self):
with self._condition:
while True:
if self._up_completion.kind is _Awaited.Kind.ARRIVED:
return self._up_completion.value.code
elif self._termination.terminated:
return None
else:
self._condition.wait()
def details(self):
with self._condition:
while True:
if self._up_completion.kind is _Awaited.Kind.ARRIVED:
return self._up_completion.value.message
elif self._termination.terminated:
return None
else:
self._condition.wait()
def set_initial_metadata(self, initial_metadata):
with self._condition:
if (self._down_initial_metadata.kind is not
_Transitory.Kind.NOT_YET_SEEN):
raise ValueError(_CANNOT_SET_INITIAL_METADATA)
else:
self._down_initial_metadata = _GONE
self._operator.advance(initial_metadata=initial_metadata)
def set_terminal_metadata(self, terminal_metadata):
with self._condition:
if (self._down_terminal_metadata.kind is not
_Transitory.Kind.NOT_YET_SEEN):
raise ValueError(_CANNOT_SET_TERMINAL_METADATA)
else:
self._down_terminal_metadata = _Transitory(
_Transitory.Kind.PRESENT, terminal_metadata)
def set_code(self, code):
with self._condition:
if self._down_code.kind is not _Transitory.Kind.NOT_YET_SEEN:
raise ValueError(_CANNOT_SET_CODE)
else:
self._down_code = _Transitory(_Transitory.Kind.PRESENT, code)
def set_details(self, details):
with self._condition:
if self._down_details.kind is not _Transitory.Kind.NOT_YET_SEEN:
raise ValueError(_CANNOT_SET_DETAILS)
else:
self._down_details = _Transitory(_Transitory.Kind.PRESENT, details)
def set_outcome(self, outcome):
with self._condition:
return self._set_outcome(outcome)
def pool_wrap(behavior, operation_context):
"""Wraps an operation-related behavior so that it may be called in a pool.
Args:
behavior: A callable related to carrying out an operation.
operation_context: A base_interfaces.OperationContext for the operation.
Returns:
A callable that when called carries out the behavior of the given callable
and handles whatever exceptions it raises appropriately.
"""
def translation(*args):
try:
behavior(*args)
except (
abandonment.Abandoned,
face.CancellationError,
face.ExpirationError,
face.LocalShutdownError,
face.RemoteShutdownError,
face.NetworkError,
face.RemoteError,
) as e:
if operation_context.outcome() is None:
operation_context.fail(e)
except Exception as e:
operation_context.fail(e)
return callable_util.with_exceptions_logged(
translation, _INTERNAL_ERROR_LOG_MESSAGE)
|
tomshen/penn-projects | refs/heads/master | projects/views.py | 1 | from django.template import Context, loader
from projects.models import Project
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.shortcuts import render_to_response, get_object_or_404, render
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.utils import timezone
from projects.forms import ProjectForm
def index(request):
latest_projects = Project.objects.all().order_by('-sub_date')
approved_projects = []
for project in latest_projects:
if project.approved:
approved_projects.append(project)
paginator = Paginator(approved_projects, 20)
page = request.GET.get('page')
try:
page_projects = paginator.page(page)
except PageNotAnInteger:
page_projects = paginator.page(1)
except EmptyPage:
page_projects = paginator.page(paginator.num_pages)
t = loader.get_template('projects/index.html')
c = Context({
'page_projects': page_projects,
})
return HttpResponse(t.render(c))
def projectdisplay(request, project_id):
p = get_object_or_404(Project, pk=project_id)
return render_to_response('projects/projectdisplay.html', {'project': p})
def projectsubmit(request):
if request.method == 'POST':
form = ProjectForm(request.POST)
if form.is_valid():
pd = form.save(commit=False)
pd.sub_date = timezone.now()
pd.approved = False
# format the authors list
al = pd.authors.strip().replace('\r\n', '*').replace('\r', '*').replace('\n', '*').split('*')
print al
if len(al) > 1:
al[-1] = 'and ' + al[-1].strip()
if len(al) > 2:
pd.authors = ', '.join(al)
else:
pd.authors = ' '.join(al)
# make the thumbnail url a direct link if not already
if pd.thumbnail_url:
thumb = pd.thumbnail_url.strip()
if 'imgur' in thumb and 'i.' not in thumb:
pd.thumbnail_url = 'i.' + thumb.replace('https://', '').replace('http://', '')
if '.jpg' not in pd.thumbnail_url:
pd.thumbnail_url += '.jpg'
pd.save()
return render(request, 'projects/projectsubmit.html', {'submitted': True})
else:
form = ProjectForm()
return render(request, 'projects/projectsubmit.html', {'form': form, 'submitted': False})
|
PXke/invenio | refs/heads/dev-pu | invenio/legacy/websubmit/inveniounoconv.py | 1 | #!@OPENOFFICE_PYTHON@
##
## This file is part of Invenio.
## Copyright (C) 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from __future__ import print_function
"""
Run-Unoconv-as-nobody wrapper.
"""
### This program is free software; you can redistribute it and/or modify
### it under the terms of the GNU General Public License as published by
### the Free Software Foundation; version 2 only
###
### This program is distributed in the hope that it will be useful,
### but WITHOUT ANY WARRANTY; without even the implied warranty of
### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
### GNU General Public License for more details.
###
### You should have received a copy of the GNU General Public License
### along with this program; if not, write to the Free Software
### Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
### Copyright 2007-2010 Dag Wieers <dag@wieers.com>
from distutils.version import LooseVersion
import getopt
import glob
import os
import subprocess
import sys
import time
import signal
import errno
from invenio.legacy.websubmit.file_converter import CFG_OPENOFFICE_TMPDIR
CFG_SOFFICE_PID = os.path.join(CFG_OPENOFFICE_TMPDIR, 'soffice.pid')
__version__ = "$Revision$"
# $Source$
VERSION = '0.6'
doctypes = ('document', 'graphics', 'presentation', 'spreadsheet')
global convertor, office, ooproc, product
ooproc = None
exitcode = 0
class Office:
def __init__(self, basepath, urepath, unopath, pyuno, binary, python, pythonhome):
self.basepath = basepath
self.urepath = urepath
self.unopath = unopath
self.pyuno = pyuno
self.binary = binary
self.python = python
self.pythonhome = pythonhome
def __str__(self):
return self.basepath
def __repr__(self):
return self.basepath
### The first thing we ought to do is find a suitable Office installation
### with a compatible pyuno library that we can import.
###
### See: http://user.services.openoffice.org/en/forum/viewtopic.php?f=45&t=36370&p=166783
def find_offices():
ret = []
extrapaths = []
### Try using UNO_PATH first (in many incarnations, we'll see what sticks)
if 'UNO_PATH' in os.environ:
extrapaths += [ os.environ['UNO_PATH'],
os.path.dirname(os.environ['UNO_PATH']),
os.path.dirname(os.path.dirname(os.environ['UNO_PATH'])) ]
else:
if os.name in ( 'nt', 'os2' ):
if 'PROGRAMFILES' in os.environ.keys():
extrapaths += glob.glob(os.environ['PROGRAMFILES']+'\\LibreOffice*') + \
glob.glob(os.environ['PROGRAMFILES']+'\\OpenOffice.org*')
if 'PROGRAMFILES(X86)' in os.environ.keys():
extrapaths += glob.glob(os.environ['PROGRAMFILES(X86)']+'\\LibreOffice*') + \
glob.glob(os.environ['PROGRAMFILES(X86)']+'\\OpenOffice.org*')
elif os.name in ( 'mac', ) or sys.platform in ( 'darwin', ):
extrapaths += [ '/Applications/LibreOffice.app/Contents',
'/Applications/NeoOffice.app/Contents',
'/Applications/OpenOffice.org.app/Contents' ]
else:
extrapaths += glob.glob('/usr/lib*/libreoffice*') + \
glob.glob('/usr/lib*/openoffice*') + \
glob.glob('/usr/lib*/ooo*') + \
glob.glob('/opt/libreoffice*') + \
glob.glob('/opt/openoffice*') + \
glob.glob('/opt/ooo*') + \
glob.glob('/usr/local/libreoffice*') + \
glob.glob('/usr/local/openoffice*') + \
glob.glob('/usr/local/ooo*') + \
glob.glob('/usr/local/lib/libreoffice*')
### Find a working set for python UNO bindings
for basepath in extrapaths:
if os.name in ( 'nt', 'os2' ):
officelibraries = ( 'pyuno.pyd', )
officebinaries = ( 'soffice.exe' ,)
pythonbinaries = ( 'python.exe', )
pythonhomes = ()
elif os.name in ( 'mac', ) or sys.platform in ( 'darwin', ):
officelibraries = ( 'pyuno.so', 'pyuno.dylib' )
officebinaries = ( 'soffice.bin', )
pythonbinaries = ( 'python.bin', 'python' )
pythonhomes = ( 'OOoPython.framework/Versions/*/lib/python*', )
else:
officelibraries = ( 'pyuno.so', )
officebinaries = ( 'soffice.bin', )
pythonbinaries = ( 'python.bin', 'python', )
pythonhomes = ( 'python-core-*', )
### Older LibreOffice/OpenOffice and Windows use basis-link/ or basis/
libpath = 'error'
for basis in ( 'basis-link', 'basis', '' ):
for lib in officelibraries:
if os.path.isfile(os.path.join(basepath, basis, 'program', lib)):
libpath = os.path.join(basepath, basis, 'program')
officelibrary = os.path.join(libpath, lib)
info(3, "Found %s in %s" % (lib, libpath))
# Break the inner loop...
break
# Continue if the inner loop wasn't broken.
else:
continue
# Inner loop was broken, break the outer.
break
else:
continue
### MacOSX have soffice binaries installed in MacOS subdirectory, not program
unopath = 'error'
for basis in ( 'basis-link', 'basis', '' ):
for bin in officebinaries:
if os.path.isfile(os.path.join(basepath, basis, 'program', bin)):
unopath = os.path.join(basepath, basis, 'program')
officebinary = os.path.join(unopath, bin)
info(3, "Found %s in %s" % (bin, unopath))
# Break the inner loop...
break
# Continue if the inner loop wasn't broken.
else:
continue
# Inner loop was broken, break the outer.
break
else:
continue
### Windows/MacOSX does not provide or need a URE/lib directory ?
urepath = ''
for basis in ( 'basis-link', 'basis', '' ):
for ure in ( 'ure-link', 'ure', 'URE', '' ):
if os.path.isfile(os.path.join(basepath, basis, ure, 'lib', 'unorc')):
urepath = os.path.join(basepath, basis, ure)
info(3, "Found %s in %s" % ('unorc', os.path.join(urepath, 'lib')))
# Break the inner loop...
break
# Continue if the inner loop wasn't broken.
else:
continue
# Inner loop was broken, break the outer.
break
pythonhome = None
for home in pythonhomes:
if glob.glob(os.path.join(libpath, home)):
pythonhome = glob.glob(os.path.join(libpath, home))[0]
info(3, "Found %s in %s" % (home, pythonhome))
break
# if not os.path.isfile(os.path.join(basepath, program, officebinary)):
# continue
# info(3, "Found %s in %s" % (officebinary, os.path.join(basepath, program)))
# if not glob.glob(os.path.join(basepath, basis, program, 'python-core-*')):
# continue
for pythonbinary in pythonbinaries:
if os.path.isfile(os.path.join(unopath, pythonbinary)):
info(3, "Found %s in %s" % (pythonbinary, unopath))
ret.append(Office(basepath, urepath, unopath, officelibrary, officebinary,
os.path.join(unopath, pythonbinary), pythonhome))
else:
info(3, "Considering %s" % basepath)
ret.append(Office(basepath, urepath, unopath, officelibrary, officebinary,
sys.executable, None))
return ret
def office_environ(office):
### Set PATH so that crash_report is found
os.environ['PATH'] = os.path.join(office.basepath, 'program') + os.pathsep + os.environ['PATH']
### Set UNO_PATH so that "officehelper.bootstrap()" can find soffice executable:
os.environ['UNO_PATH'] = office.unopath
### Set URE_BOOTSTRAP so that "uno.getComponentContext()" bootstraps a complete
### UNO environment
if os.name in ( 'nt', 'os2' ):
os.environ['URE_BOOTSTRAP'] = 'vnd.sun.star.pathname:' + os.path.join(office.basepath, 'program', 'fundamental.ini')
else:
os.environ['URE_BOOTSTRAP'] = 'vnd.sun.star.pathname:' + os.path.join(office.basepath, 'program', 'fundamentalrc')
### Set LD_LIBRARY_PATH so that "import pyuno" finds libpyuno.so:
if 'LD_LIBRARY_PATH' in os.environ:
os.environ['LD_LIBRARY_PATH'] = office.unopath + os.pathsep + \
os.path.join(office.urepath, 'lib') + os.pathsep + \
os.environ['LD_LIBRARY_PATH']
else:
os.environ['LD_LIBRARY_PATH'] = office.unopath + os.pathsep + \
os.path.join(office.urepath, 'lib')
if office.pythonhome:
for libpath in ( os.path.join(office.pythonhome, 'lib'),
os.path.join(office.pythonhome, 'lib', 'lib-dynload'),
os.path.join(office.pythonhome, 'lib', 'lib-tk'),
os.path.join(office.pythonhome, 'lib', 'site-packages'),
office.unopath):
sys.path.insert(0, libpath)
else:
### Still needed for system python using LibreOffice UNO bindings
### Although we prefer to use a system UNO binding in this case
sys.path.append(office.unopath)
def debug_office():
if 'URE_BOOTSTRAP' in os.environ:
print('URE_BOOTSTRAP=%s' % os.environ['URE_BOOTSTRAP'], file=sys.stderr)
if 'UNO_PATH' in os.environ:
print('UNO_PATH=%s' % os.environ['UNO_PATH'], file=sys.stderr)
if 'UNO_TYPES' in os.environ:
print('UNO_TYPES=%s' % os.environ['UNO_TYPES'], file=sys.stderr)
print('PATH=%s' % os.environ['PATH'])
if 'PYTHONHOME' in os.environ:
print('PYTHONHOME=%s' % os.environ['PYTHONHOME'], file=sys.stderr)
if 'PYTHONPATH' in os.environ:
print('PYTHONPATH=%s' % os.environ['PYTHONPATH'], file=sys.stderr)
if 'LD_LIBRARY_PATH' in os.environ:
print('LD_LIBRARY_PATH=%s' % os.environ['LD_LIBRARY_PATH'], file=sys.stderr)
def python_switch(office):
if office.pythonhome:
os.environ['PYTHONHOME'] = office.pythonhome
os.environ['PYTHONPATH'] = os.path.join(office.pythonhome, 'lib') + os.pathsep + \
os.path.join(office.pythonhome, 'lib', 'lib-dynload') + os.pathsep + \
os.path.join(office.pythonhome, 'lib', 'lib-tk') + os.pathsep + \
os.path.join(office.pythonhome, 'lib', 'site-packages') + os.pathsep + \
office.unopath
os.environ['HOME'] = CFG_OPENOFFICE_TMPDIR
os.environ['UNO_PATH'] = office.unopath
info(3, "-> Switching from %s to %s" % (sys.executable, office.python))
if os.name in ('nt', 'os2'):
### os.execv is broken on Windows and can't properly parse command line
### arguments and executable name if they contain whitespaces. subprocess
### fixes that behavior.
ret = subprocess.call([office.python] + sys.argv[0:])
sys.exit(ret)
else:
### Set LD_LIBRARY_PATH so that "import pyuno" finds libpyuno.so:
if 'LD_LIBRARY_PATH' in os.environ:
os.environ['LD_LIBRARY_PATH'] = office.unopath + os.pathsep + \
os.path.join(office.urepath, 'lib') + os.pathsep + \
os.environ['LD_LIBRARY_PATH']
else:
os.environ['LD_LIBRARY_PATH'] = office.unopath + os.pathsep + \
os.path.join(office.urepath, 'lib')
try:
os.execvpe(office.python, [office.python, ] + sys.argv[0:], os.environ)
except OSError:
### Mac OS X versions prior to 10.6 do not support execv in
### a process that contains multiple threads. Instead of
### re-executing in the current process, start a new one
### and cause the current process to exit. This isn't
### ideal since the new process is detached from the parent
### terminal and thus cannot easily be killed with ctrl-C,
### but it's better than not being able to autoreload at
### all.
### Unfortunately the errno returned in this case does not
### appear to be consistent, so we can't easily check for
### this error specifically.
ret = os.spawnvpe(os.P_WAIT, office.python, [office.python, ] + sys.argv[0:], os.environ)
sys.exit(ret)
class Fmt:
def __init__(self, doctype, name, extension, summary, filter):
self.doctype = doctype
self.name = name
self.extension = extension
self.summary = summary
self.filter = filter
def __str__(self):
return "%s [.%s]" % (self.summary, self.extension)
def __repr__(self):
return "%s/%s" % (self.name, self.doctype)
class FmtList:
def __init__(self):
self.list = []
def add(self, doctype, name, extension, summary, filter):
self.list.append(Fmt(doctype, name, extension, summary, filter))
def byname(self, name):
ret = []
for fmt in self.list:
if fmt.name == name:
ret.append(fmt)
return ret
def byextension(self, extension):
ret = []
for fmt in self.list:
if os.extsep + fmt.extension == extension:
ret.append(fmt)
return ret
def bydoctype(self, doctype, name):
ret = []
for fmt in self.list:
if fmt.name == name and fmt.doctype == doctype:
ret.append(fmt)
return ret
def display(self, doctype):
print("The following list of %s formats are currently available:\n" % doctype, file=sys.stderr)
for fmt in self.list:
if fmt.doctype == doctype:
print(" %-8s - %s" % (fmt.name, fmt), file=sys.stderr)
print(file=sys.stderr)
fmts = FmtList()
### TextDocument
fmts.add('document', 'bib', 'bib', 'BibTeX', 'BibTeX_Writer') ### 22
fmts.add('document', 'doc', 'doc', 'Microsoft Word 97/2000/XP', 'MS Word 97') ### 29
fmts.add('document', 'doc6', 'doc', 'Microsoft Word 6.0', 'MS WinWord 6.0') ### 24
fmts.add('document', 'doc95', 'doc', 'Microsoft Word 95', 'MS Word 95') ### 28
fmts.add('document', 'docbook', 'xml', 'DocBook', 'DocBook File') ### 39
fmts.add('document', 'docx', 'docx', 'Microsoft Office Open XML', 'Office Open XML Text')
fmts.add('document', 'docx7', 'docx', 'Microsoft Office Open XML', 'MS Word 2007 XML')
fmts.add('document', 'fodt', 'fodt', 'OpenDocument Text (Flat XML)', 'OpenDocument Text Flat XML')
fmts.add('document', 'html', 'html', 'HTML Document (OpenOffice.org Writer)', 'HTML (StarWriter)') ### 3
fmts.add('document', 'latex', 'ltx', 'LaTeX 2e', 'LaTeX_Writer') ### 31
fmts.add('document', 'mediawiki', 'txt', 'MediaWiki', 'MediaWiki')
fmts.add('document', 'odt', 'odt', 'ODF Text Document', 'writer8') ### 10
fmts.add('document', 'ooxml', 'xml', 'Microsoft Office Open XML', 'MS Word 2003 XML') ### 11
fmts.add('document', 'ott', 'ott', 'Open Document Text', 'writer8_template') ### 21
fmts.add('document', 'pdb', 'pdb', 'AportisDoc (Palm)', 'AportisDoc Palm DB')
fmts.add('document', 'pdf', 'pdf', 'Portable Document Format', 'writer_pdf_Export') ### 18
fmts.add('document', 'psw', 'psw', 'Pocket Word', 'PocketWord File')
fmts.add('document', 'rtf', 'rtf', 'Rich Text Format', 'Rich Text Format') ### 16
fmts.add('document', 'sdw', 'sdw', 'StarWriter 5.0', 'StarWriter 5.0') ### 23
fmts.add('document', 'sdw4', 'sdw', 'StarWriter 4.0', 'StarWriter 4.0') ### 2
fmts.add('document', 'sdw3', 'sdw', 'StarWriter 3.0', 'StarWriter 3.0') ### 20
fmts.add('document', 'stw', 'stw', 'Open Office.org 1.0 Text Document Template', 'writer_StarOffice_XML_Writer_Template') ### 9
fmts.add('document', 'sxw', 'sxw', 'Open Office.org 1.0 Text Document', 'StarOffice XML (Writer)') ### 1
fmts.add('document', 'text', 'txt', 'Text Encoded', 'Text (encoded)') ### 26
fmts.add('document', 'txt', 'txt', 'Text', 'Text') ### 34
fmts.add('document', 'uot', 'uot', 'Unified Office Format text','UOF text') ### 27
fmts.add('document', 'vor', 'vor', 'StarWriter 5.0 Template', 'StarWriter 5.0 Vorlage/Template') ### 6
fmts.add('document', 'vor4', 'vor', 'StarWriter 4.0 Template', 'StarWriter 4.0 Vorlage/Template') ### 5
fmts.add('document', 'vor3', 'vor', 'StarWriter 3.0 Template', 'StarWriter 3.0 Vorlage/Template') ### 4
fmts.add('document', 'xhtml', 'html', 'XHTML Document', 'XHTML Writer File') ### 33
### WebDocument
fmts.add('web', 'etext', 'txt', 'Text Encoded (OpenOffice.org Writer/Web)', 'Text (encoded) (StarWriter/Web)') ### 14
fmts.add('web', 'html10', 'html', 'OpenOffice.org 1.0 HTML Template', 'writer_web_StarOffice_XML_Writer_Web_Template') ### 11
fmts.add('web', 'html', 'html', 'HTML Document', 'HTML') ### 2
fmts.add('web', 'html', 'html', 'HTML Document Template', 'writerweb8_writer_template') ### 13
fmts.add('web', 'mediawiki', 'txt', 'MediaWiki', 'MediaWiki_Web') ### 9
fmts.add('web', 'pdf', 'pdf', 'PDF - Portable Document Format', 'writer_web_pdf_Export') ### 10
fmts.add('web', 'sdw3', 'sdw', 'StarWriter 3.0 (OpenOffice.org Writer/Web)', 'StarWriter 3.0 (StarWriter/Web)') ### 3
fmts.add('web', 'sdw4', 'sdw', 'StarWriter 4.0 (OpenOffice.org Writer/Web)', 'StarWriter 4.0 (StarWriter/Web)') ### 4
fmts.add('web', 'sdw', 'sdw', 'StarWriter 5.0 (OpenOffice.org Writer/Web)', 'StarWriter 5.0 (StarWriter/Web)') ### 5
fmts.add('web', 'txt', 'txt', 'OpenOffice.org Text (OpenOffice.org Writer/Web)', 'writerweb8_writer') ### 12
fmts.add('web', 'text10', 'txt', 'OpenOffice.org 1.0 Text Document (OpenOffice.org Writer/Web)', 'writer_web_StarOffice_XML_Writer') ### 15
fmts.add('web', 'text', 'txt', 'Text (OpenOffice.org Writer/Web)', 'Text (StarWriter/Web)') ### 8
fmts.add('web', 'vor4', 'vor', 'StarWriter/Web 4.0 Template', 'StarWriter/Web 4.0 Vorlage/Template') ### 6
fmts.add('web', 'vor', 'vor', 'StarWriter/Web 5.0 Template', 'StarWriter/Web 5.0 Vorlage/Template') ### 7
### Spreadsheet
fmts.add('spreadsheet', 'csv', 'csv', 'Text CSV', 'Text - txt - csv (StarCalc)') ### 16
fmts.add('spreadsheet', 'dbf', 'dbf', 'dBASE', 'dBase') ### 22
fmts.add('spreadsheet', 'dif', 'dif', 'Data Interchange Format', 'DIF') ### 5
fmts.add('spreadsheet', 'fods', 'fods', 'OpenDocument Spreadsheet (Flat XML)', 'OpenDocument Spreadsheet Flat XML')
fmts.add('spreadsheet', 'html', 'html', 'HTML Document (OpenOffice.org Calc)', 'HTML (StarCalc)') ### 7
fmts.add('spreadsheet', 'ods', 'ods', 'ODF Spreadsheet', 'calc8') ### 15
fmts.add('spreadsheet', 'ooxml', 'xml', 'Microsoft Excel 2003 XML', 'MS Excel 2003 XML') ### 23
fmts.add('spreadsheet', 'ots', 'ots', 'ODF Spreadsheet Template', 'calc8_template') ### 14
fmts.add('spreadsheet', 'pdf', 'pdf', 'Portable Document Format', 'calc_pdf_Export') ### 34
fmts.add('spreadsheet', 'pxl', 'pxl', 'Pocket Excel', 'Pocket Excel')
fmts.add('spreadsheet', 'sdc', 'sdc', 'StarCalc 5.0', 'StarCalc 5.0') ### 31
fmts.add('spreadsheet', 'sdc4', 'sdc', 'StarCalc 4.0', 'StarCalc 4.0') ### 11
fmts.add('spreadsheet', 'sdc3', 'sdc', 'StarCalc 3.0', 'StarCalc 3.0') ### 29
fmts.add('spreadsheet', 'slk', 'slk', 'SYLK', 'SYLK') ### 35
fmts.add('spreadsheet', 'stc', 'stc', 'OpenOffice.org 1.0 Spreadsheet Template', 'calc_StarOffice_XML_Calc_Template') ### 2
fmts.add('spreadsheet', 'sxc', 'sxc', 'OpenOffice.org 1.0 Spreadsheet', 'StarOffice XML (Calc)') ### 3
fmts.add('spreadsheet', 'uos', 'uos', 'Unified Office Format spreadsheet', 'UOF spreadsheet') ### 9
fmts.add('spreadsheet', 'vor3', 'vor', 'StarCalc 3.0 Template', 'StarCalc 3.0 Vorlage/Template') ### 18
fmts.add('spreadsheet', 'vor4', 'vor', 'StarCalc 4.0 Template', 'StarCalc 4.0 Vorlage/Template') ### 19
fmts.add('spreadsheet', 'vor', 'vor', 'StarCalc 5.0 Template', 'StarCalc 5.0 Vorlage/Template') ### 20
fmts.add('spreadsheet', 'xhtml', 'xhtml', 'XHTML', 'XHTML Calc File') ### 26
fmts.add('spreadsheet', 'xls', 'xls', 'Microsoft Excel 97/2000/XP', 'MS Excel 97') ### 12
fmts.add('spreadsheet', 'xls5', 'xls', 'Microsoft Excel 5.0', 'MS Excel 5.0/95') ### 8
fmts.add('spreadsheet', 'xls95', 'xls', 'Microsoft Excel 95', 'MS Excel 95') ### 10
fmts.add('spreadsheet', 'xlt', 'xlt', 'Microsoft Excel 97/2000/XP Template', 'MS Excel 97 Vorlage/Template') ### 6
fmts.add('spreadsheet', 'xlt5', 'xlt', 'Microsoft Excel 5.0 Template', 'MS Excel 5.0/95 Vorlage/Template') ### 28
fmts.add('spreadsheet', 'xlt95', 'xlt', 'Microsoft Excel 95 Template', 'MS Excel 95 Vorlage/Template') ### 21
### Graphics
fmts.add('graphics', 'bmp', 'bmp', 'Windows Bitmap', 'draw_bmp_Export') ### 21
fmts.add('graphics', 'emf', 'emf', 'Enhanced Metafile', 'draw_emf_Export') ### 15
fmts.add('graphics', 'eps', 'eps', 'Encapsulated PostScript', 'draw_eps_Export') ### 48
fmts.add('graphics', 'fodg', 'fodg', 'OpenDocument Drawing (Flat XML)', 'OpenDocument Drawing Flat XML')
fmts.add('graphics', 'gif', 'gif', 'Graphics Interchange Format', 'draw_gif_Export') ### 30
fmts.add('graphics', 'html', 'html', 'HTML Document (OpenOffice.org Draw)', 'draw_html_Export') ### 37
fmts.add('graphics', 'jpg', 'jpg', 'Joint Photographic Experts Group', 'draw_jpg_Export') ### 3
fmts.add('graphics', 'met', 'met', 'OS/2 Metafile', 'draw_met_Export') ### 43
fmts.add('graphics', 'odd', 'odd', 'OpenDocument Drawing', 'draw8') ### 6
fmts.add('graphics', 'otg', 'otg', 'OpenDocument Drawing Template', 'draw8_template') ### 20
fmts.add('graphics', 'pbm', 'pbm', 'Portable Bitmap', 'draw_pbm_Export') ### 14
fmts.add('graphics', 'pct', 'pct', 'Mac Pict', 'draw_pct_Export') ### 41
fmts.add('graphics', 'pdf', 'pdf', 'Portable Document Format', 'draw_pdf_Export') ### 28
fmts.add('graphics', 'pgm', 'pgm', 'Portable Graymap', 'draw_pgm_Export') ### 11
fmts.add('graphics', 'png', 'png', 'Portable Network Graphic', 'draw_png_Export') ### 2
fmts.add('graphics', 'ppm', 'ppm', 'Portable Pixelmap', 'draw_ppm_Export') ### 5
fmts.add('graphics', 'ras', 'ras', 'Sun Raster Image', 'draw_ras_Export') ## 31
fmts.add('graphics', 'std', 'std', 'OpenOffice.org 1.0 Drawing Template', 'draw_StarOffice_XML_Draw_Template') ### 53
fmts.add('graphics', 'svg', 'svg', 'Scalable Vector Graphics', 'draw_svg_Export') ### 50
fmts.add('graphics', 'svm', 'svm', 'StarView Metafile', 'draw_svm_Export') ### 55
fmts.add('graphics', 'swf', 'swf', 'Macromedia Flash (SWF)', 'draw_flash_Export') ### 23
fmts.add('graphics', 'sxd', 'sxd', 'OpenOffice.org 1.0 Drawing', 'StarOffice XML (Draw)') ### 26
fmts.add('graphics', 'sxd3', 'sxd', 'StarDraw 3.0', 'StarDraw 3.0') ### 40
fmts.add('graphics', 'sxd5', 'sxd', 'StarDraw 5.0', 'StarDraw 5.0') ### 44
fmts.add('graphics', 'sxw', 'sxw', 'StarOffice XML (Draw)', 'StarOffice XML (Draw)')
fmts.add('graphics', 'tiff', 'tiff', 'Tagged Image File Format', 'draw_tif_Export') ### 13
fmts.add('graphics', 'vor', 'vor', 'StarDraw 5.0 Template', 'StarDraw 5.0 Vorlage') ### 36
fmts.add('graphics', 'vor3', 'vor', 'StarDraw 3.0 Template', 'StarDraw 3.0 Vorlage') ### 35
fmts.add('graphics', 'wmf', 'wmf', 'Windows Metafile', 'draw_wmf_Export') ### 8
fmts.add('graphics', 'xhtml', 'xhtml', 'XHTML', 'XHTML Draw File') ### 45
fmts.add('graphics', 'xpm', 'xpm', 'X PixMap', 'draw_xpm_Export') ### 19
### Presentation
fmts.add('presentation', 'bmp', 'bmp', 'Windows Bitmap', 'impress_bmp_Export') ### 15
fmts.add('presentation', 'emf', 'emf', 'Enhanced Metafile', 'impress_emf_Export') ### 16
fmts.add('presentation', 'eps', 'eps', 'Encapsulated PostScript', 'impress_eps_Export') ### 17
fmts.add('presentation', 'fodp', 'fodp', 'OpenDocument Presentation (Flat XML)', 'OpenDocument Presentation Flat XML')
fmts.add('presentation', 'gif', 'gif', 'Graphics Interchange Format', 'impress_gif_Export') ### 18
fmts.add('presentation', 'html', 'html', 'HTML Document (OpenOffice.org Impress)', 'impress_html_Export') ### 43
fmts.add('presentation', 'jpg', 'jpg', 'Joint Photographic Experts Group', 'impress_jpg_Export') ### 19
fmts.add('presentation', 'met', 'met', 'OS/2 Metafile', 'impress_met_Export') ### 20
fmts.add('presentation', 'odg', 'odg', 'ODF Drawing (Impress)', 'impress8_draw') ### 29
fmts.add('presentation', 'odp', 'odp', 'ODF Presentation', 'impress8') ### 9
fmts.add('presentation', 'otp', 'otp', 'ODF Presentation Template', 'impress8_template') ### 38
fmts.add('presentation', 'pbm', 'pbm', 'Portable Bitmap', 'impress_pbm_Export') ### 21
fmts.add('presentation', 'pct', 'pct', 'Mac Pict', 'impress_pct_Export') ### 22
fmts.add('presentation', 'pdf', 'pdf', 'Portable Document Format', 'impress_pdf_Export') ### 23
fmts.add('presentation', 'pgm', 'pgm', 'Portable Graymap', 'impress_pgm_Export') ### 24
fmts.add('presentation', 'png', 'png', 'Portable Network Graphic', 'impress_png_Export') ### 25
fmts.add('presentation', 'potm', 'potm', 'Microsoft PowerPoint 2007/2010 XML Template', 'Impress MS PowerPoint 2007 XML Template')
fmts.add('presentation', 'pot', 'pot', 'Microsoft PowerPoint 97/2000/XP Template', 'MS PowerPoint 97 Vorlage') ### 3
fmts.add('presentation', 'ppm', 'ppm', 'Portable Pixelmap', 'impress_ppm_Export') ### 26
fmts.add('presentation', 'pptx', 'pptx', 'Microsoft PowerPoint 2007/2010 XML', 'Impress MS PowerPoint 2007 XML') ### 36
fmts.add('presentation', 'pps', 'pps', 'Microsoft PowerPoint 97/2000/XP (Autoplay)', 'MS PowerPoint 97 Autoplay') ### 36
fmts.add('presentation', 'ppt', 'ppt', 'Microsoft PowerPoint 97/2000/XP', 'MS PowerPoint 97') ### 36
fmts.add('presentation', 'pwp', 'pwp', 'PlaceWare', 'placeware_Export') ### 30
fmts.add('presentation', 'ras', 'ras', 'Sun Raster Image', 'impress_ras_Export') ### 27
fmts.add('presentation', 'sda', 'sda', 'StarDraw 5.0 (OpenOffice.org Impress)', 'StarDraw 5.0 (StarImpress)') ### 8
fmts.add('presentation', 'sdd', 'sdd', 'StarImpress 5.0', 'StarImpress 5.0') ### 6
fmts.add('presentation', 'sdd3', 'sdd', 'StarDraw 3.0 (OpenOffice.org Impress)', 'StarDraw 3.0 (StarImpress)') ### 42
fmts.add('presentation', 'sdd4', 'sdd', 'StarImpress 4.0', 'StarImpress 4.0') ### 37
fmts.add('presentation', 'sxd', 'sxd', 'OpenOffice.org 1.0 Drawing (OpenOffice.org Impress)', 'impress_StarOffice_XML_Draw') ### 31
fmts.add('presentation', 'sti', 'sti', 'OpenOffice.org 1.0 Presentation Template', 'impress_StarOffice_XML_Impress_Template') ### 5
fmts.add('presentation', 'svg', 'svg', 'Scalable Vector Graphics', 'impress_svg_Export') ### 14
fmts.add('presentation', 'svm', 'svm', 'StarView Metafile', 'impress_svm_Export') ### 13
fmts.add('presentation', 'swf', 'swf', 'Macromedia Flash (SWF)', 'impress_flash_Export') ### 34
fmts.add('presentation', 'sxi', 'sxi', 'OpenOffice.org 1.0 Presentation', 'StarOffice XML (Impress)') ### 41
fmts.add('presentation', 'tiff', 'tiff', 'Tagged Image File Format', 'impress_tif_Export') ### 12
fmts.add('presentation', 'uop', 'uop', 'Unified Office Format presentation', 'UOF presentation') ### 4
fmts.add('presentation', 'vor', 'vor', 'StarImpress 5.0 Template', 'StarImpress 5.0 Vorlage') ### 40
fmts.add('presentation', 'vor3', 'vor', 'StarDraw 3.0 Template (OpenOffice.org Impress)', 'StarDraw 3.0 Vorlage (StarImpress)') ###1
fmts.add('presentation', 'vor4', 'vor', 'StarImpress 4.0 Template', 'StarImpress 4.0 Vorlage') ### 39
fmts.add('presentation', 'vor5', 'vor', 'StarDraw 5.0 Template (OpenOffice.org Impress)', 'StarDraw 5.0 Vorlage (StarImpress)') ### 2
fmts.add('presentation', 'wmf', 'wmf', 'Windows Metafile', 'impress_wmf_Export') ### 11
fmts.add('presentation', 'xhtml', 'xml', 'XHTML', 'XHTML Impress File') ### 33
fmts.add('presentation', 'xpm', 'xpm', 'X PixMap', 'impress_xpm_Export') ### 10
class Options:
def __init__(self, args):
self.connection = None
self.debug = False
self.doctype = None
self.exportfilter = []
self.filenames = []
self.format = None
self.importfilter = ""
self.listener = False
self.nolaunch = False
self.kill = False
self.output = None
self.password = None
self.pipe = None
self.port = '2002'
self.server = 'localhost'
self.showlist = False
self.stdout = False
self.template = None
self.timeout = 6
self.verbose = 0
self.remove = None
### Get options from the commandline
try:
opts, args = getopt.getopt (args, 'c:Dd:e:f:hi:Llko:np:s:T:t:vr:',
['connection=', 'debug', 'doctype=', 'export', 'format=',
'help', 'import', 'listener', 'kill', 'no-launch', 'output=',
'outputpath', 'password=', 'pipe=', 'port=', 'server=',
'timeout=', 'show', 'stdout', 'template', 'verbose',
'version', 'remove='] )
except getopt.error as exc:
print('unoconv: %s, try unoconv -h for a list of all the options' % str(exc))
sys.exit(255)
for opt, arg in opts:
if opt in ['-h', '--help']:
self.usage()
print()
self.help()
sys.exit(1)
elif opt in ['-c', '--connection']:
self.connection = arg
elif opt in ['--debug']:
self.debug = True
elif opt in ['-d', '--doctype']:
self.doctype = arg
elif opt in ['-e', '--export']:
l = arg.split('=')
if len(l) == 2:
(name, value) = l
if value in ('True', 'true'):
self.exportfilter.append( PropertyValue( name, 0, True, 0 ) )
elif value in ('False', 'false'):
self.exportfilter.append( PropertyValue( name, 0, False, 0 ) )
else:
try:
self.exportfilter.append( PropertyValue( name, 0, int(value), 0 ) )
except ValueError:
self.exportfilter.append( PropertyValue( name, 0, value, 0 ) )
else:
print('Warning: Option %s cannot be parsed, ignoring.' % arg, file=sys.stderr)
# self.exportfilter = arg
elif opt in ['-f', '--format']:
self.format = arg
elif opt in ['-i', '--import']:
self.importfilter = arg
elif opt in ['-l', '--listener']:
self.listener = True
elif opt in ['-k', '--kill']:
self.kill = True
elif opt in ['-n', '--no-launch']:
self.nolaunch = True
elif opt in ['-o', '--output']:
self.output = arg
elif opt in ['--outputpath']:
print('Warning: This option is deprecated by --output.', file=sys.stderr)
self.output = arg
elif opt in ['--password']:
self.password = arg
elif opt in ['--pipe']:
self.pipe = arg
elif opt in ['-p', '--port']:
self.port = arg
elif opt in ['-s', '--server']:
self.server = arg
elif opt in ['--show']:
self.showlist = True
elif opt in ['--stdout']:
self.stdout = True
elif opt in ['-t', '--template']:
self.template = arg
elif opt in ['-T', '--timeout']:
self.timeout = int(arg)
elif opt in ['-v', '--verbose']:
self.verbose = self.verbose + 1
elif opt in ['-r', '--remove']:
self.remove = arg
elif opt in ['--version']:
self.version()
sys.exit(255)
### Enable verbosity
if self.verbose >= 2:
print('Verbosity set to level %d' % self.verbose, file=sys.stderr)
self.filenames = args
if self.remove:
if os.path.exists(self.remove):
os.remove(self.remove)
print("%s file created by OpenOffice was successfully removed." % self.remove, file=sys.stderr)
sys.stderr.flush()
sys.exit(0)
if self.kill:
from invenio.utils.shell import run_shell_command
run_shell_command('killall %s', [os.path.basename(office.binary)])
time.sleep(1)
run_shell_command('killall -9 %s', [os.path.basename(office.binary)])
print('soffice.bin was hopefully already killed.', file=sys.stderr)
sys.exit(0)
if not self.listener and not self.showlist and self.doctype != 'list' and not self.filenames:
print('unoconv: you have to provide a filename as argument', file=sys.stderr)
print('Try `unoconv -h\' for more information.', file=sys.stderr)
sys.exit(255)
### Set connection string
if not self.connection:
if not self.pipe:
self.connection = "socket,host=%s,port=%s;urp;StarOffice.ComponentContext" % (self.server, self.port)
# self.connection = "socket,host=%s,port=%s;urp;" % (self.server, self.port)
else:
self.connection = "pipe,name=%s;urp;StarOffice.ComponentContext" % (self.pipe)
### Make it easier for people to use a doctype (first letter is enough)
if self.doctype:
for doctype in doctypes:
if doctype.startswith(self.doctype):
self.doctype = doctype
### Check if the user request to see the list of formats
if self.showlist or self.format == 'list':
if self.doctype:
fmts.display(self.doctype)
else:
for t in doctypes:
fmts.display(t)
sys.exit(0)
### If no format was specified, probe it or provide it
if not self.format:
l = sys.argv[0].split('2')
if len(l) == 2:
self.format = l[1]
else:
self.format = 'pdf'
def version(self):
### Get office product information
product = uno.getComponentContext().ServiceManager.createInstance("com.sun.star.configuration.ConfigurationProvider").createInstanceWithArguments("com.sun.star.configuration.ConfigurationAccess", UnoProps(nodepath="/org.openoffice.Setup/Product"))
print('unoconv %s' % VERSION)
print('Written by Dag Wieers <dag@wieers.com>')
print('Patched to run within Invenio by <info@invenio-software.org>')
print('Homepage at http://dag.wieers.com/home-made/unoconv/')
print()
print('platform %s/%s' % (os.name, sys.platform))
print('python %s' % sys.version)
print(product.ooName, product.ooSetupVersion)
print()
print('build revision $Rev$')
def usage(self):
print('usage: unoconv [options] file [file2 ..]', file=sys.stderr)
def help(self):
print('''Convert from and to any format supported by LibreOffice
unoconv options:
-c, --connection=string use a custom connection string
-d, --doctype=type specify document type
(document, graphics, presentation, spreadsheet)
-e, --export=name=value set export filter options
eg. -e PageRange=1-2
-f, --format=format specify the output format
-i, --import=string set import filter option string
eg. -i utf8
-l, --listener start a permanent listener to use by unoconv clients
-k, --kill kill any listener on the local machine (Invenio)
-r, --remove=filename remove a file created by LibreOffice (Invenio)
-n, --no-launch fail if no listener is found (default: launch one)
-o, --output=name output basename, filename or directory
--pipe=name alternative method of connection using a pipe
-p, --port=port specify the port (default: 2002)
to be used by client or listener
--password=string provide a password to decrypt the document
-s, --server=server specify the server address (default: localhost)
to be used by client or listener
--show list the available output formats
--stdout write output to stdout
-t, --template=file import the styles from template (.ott)
-T, --timeout=secs timeout after secs if connection to listener fails
-v, --verbose be more and more verbose (-vvv for debugging)
''', file=sys.stderr)
class Convertor:
def __init__(self):
global exitcode, ooproc, office, product
unocontext = None
### Do the LibreOffice component dance
self.context = uno.getComponentContext()
self.svcmgr = self.context.ServiceManager
resolver = self.svcmgr.createInstanceWithContext("com.sun.star.bridge.UnoUrlResolver", self.context)
### Test for an existing connection
info(3, 'Connection type: %s' % op.connection)
try:
unocontext = resolver.resolve("uno:%s" % op.connection)
except NoConnectException as e:
# info(3, "Existing listener not found.\n%s" % e)
info(3, "Existing listener not found.")
if op.nolaunch:
die(113, "Existing listener not found. Unable start listener by parameters. Aborting.")
### Start our own OpenOffice instance
info(3, "Launching our own listener using %s." % office.binary)
try:
product = self.svcmgr.createInstance("com.sun.star.configuration.ConfigurationProvider").createInstanceWithArguments("com.sun.star.configuration.ConfigurationAccess", UnoProps(nodepath="/org.openoffice.Setup/Product"))
if product.ooName != "LibreOffice" or LooseVersion(product.ooSetupVersion) <= LooseVersion('3.3'):
ooproc = subprocess.Popen([office.binary, "-headless", "-invisible", "-nocrashreport", "-nodefault", "-nofirststartwizard", "-nologo", "-norestore", "-accept=%s" % op.connection], env=os.environ)
else:
ooproc = subprocess.Popen([office.binary, "--headless", "--invisible", "--nocrashreport", "--nodefault", "--nofirststartwizard", "--nologo", "--norestore", "--accept=%s" % op.connection], env=os.environ)
info(2, '%s listener successfully started. (pid=%s)' % (product.ooName, ooproc.pid))
### Try connection to it for op.timeout seconds (flakky OpenOffice)
timeout = 0
while timeout <= op.timeout:
### Is it already/still running ?
retcode = ooproc.poll()
if retcode != None:
info(3, "Process %s (pid=%s) exited with %s." % (office.binary, ooproc.pid, retcode))
break
try:
unocontext = resolver.resolve("uno:%s" % op.connection)
break
except NoConnectException:
time.sleep(0.5)
timeout += 0.5
except:
raise
else:
error("Failed to connect to %s (pid=%s) in %d seconds.\n%s" % (office.binary, ooproc.pid, op.timeout, e))
except Exception as e:
raise
error("Launch of %s failed.\n%s" % (office.binary, e))
if not unocontext:
die(251, "Unable to connect or start own listener. Aborting.")
### And some more LibreOffice magic
unosvcmgr = unocontext.ServiceManager
self.desktop = unosvcmgr.createInstanceWithContext("com.sun.star.frame.Desktop", unocontext)
self.cwd = unohelper.systemPathToFileUrl( os.getcwd() )
### List all filters
# self.filters = unosvcmgr.createInstanceWithContext( "com.sun.star.document.FilterFactory", unocontext)
# for filter in self.filters.getElementNames():
# print filter
# #print dir(filter), dir(filter.format)
def getformat(self, inputfn):
doctype = None
### Get the output format from mapping
if op.doctype:
outputfmt = fmts.bydoctype(op.doctype, op.format)
else:
outputfmt = fmts.byname(op.format)
if not outputfmt:
outputfmt = fmts.byextension(os.extsep + op.format)
### If no doctype given, check list of acceptable formats for input file ext doctype
### FIXME: This should go into the for-loop to match each individual input filename
if outputfmt:
inputext = os.path.splitext(inputfn)[1]
inputfmt = fmts.byextension(inputext)
if inputfmt:
for fmt in outputfmt:
if inputfmt[0].doctype == fmt.doctype:
doctype = inputfmt[0].doctype
outputfmt = fmt
break
else:
outputfmt = outputfmt[0]
# print >> sys.stderr, 'unoconv: format `%s\' is part of multiple doctypes %s, selecting `%s\'.' % (format, [fmt.doctype for fmt in outputfmt], outputfmt[0].doctype)
else:
outputfmt = outputfmt[0]
### No format found, throw error
if not outputfmt:
if doctype:
print('unoconv: format [%s/%s] is not known to unoconv.' % (op.doctype, op.format), file=sys.stderr)
else:
print('unoconv: format [%s] is not known to unoconv.' % op.format, file=sys.stderr)
die(1)
return outputfmt
def convert(self, inputfn):
global exitcode
document = None
outputfmt = self.getformat(inputfn)
if op.verbose > 0:
print('Input file:', inputfn, file=sys.stderr)
if not os.path.exists(inputfn):
print('unoconv: file `%s\' does not exist.' % inputfn, file=sys.stderr)
exitcode = 1
try:
### Import phase
phase = "import"
### Load inputfile
inputprops = UnoProps(Hidden=True, ReadOnly=True, UpdateDocMode=QUIET_UPDATE, FilterOptions=op.importfilter)
# if op.password:
# info = UnoProps(algorithm-name="PBKDF2", salt="salt", iteration-count=1024, hash="hash")
# inputprops += UnoProps(ModifyPasswordInfo=info)
inputurl = unohelper.absolutize(self.cwd, unohelper.systemPathToFileUrl(inputfn))
# print dir(self.desktop)
document = self.desktop.loadComponentFromURL( inputurl , "_blank", 0, inputprops )
if not document:
raise UnoException("The document '%s' could not be opened." % inputurl, None)
### Import style template
phase = "import-style"
if op.template:
if os.path.exists(op.template):
info(1, "Template file: %s" % op.template)
templateprops = UnoProps(OverwriteStyles=True)
templateurl = unohelper.absolutize(self.cwd, unohelper.systemPathToFileUrl(op.template))
document.StyleFamilies.loadStylesFromURL(templateurl, templateprops)
else:
print('unoconv: template file `%s\' does not exist.' % op.template, file=sys.stderr)
exitcode = 1
### Update document links
phase = "update-links"
try:
document.updateLinks()
except AttributeError:
# the document doesn't implement the XLinkUpdate interface
pass
### Update document indexes
phase = "update-indexes"
try:
document.refresh()
indexes = document.getDocumentIndexes()
except AttributeError:
# the document doesn't implement the XRefreshable and/or
# XDocumentIndexesSupplier interfaces
pass
else:
for i in range(0, indexes.getCount()):
indexes.getByIndex(i).update()
info(1, "Selected output format: %s" % outputfmt)
info(2, "Selected office filter: %s" % outputfmt.filter)
info(2, "Used doctype: %s" % outputfmt.doctype)
### Export phase
phase = "export"
outputprops = UnoProps(FilterName=outputfmt.filter, OutputStream=OutputStream(), Overwrite=True)
# PropertyValue( "FilterData" , 0, ( PropertyValue( "SelectPdfVersion" , 0, 1 , uno.getConstantByName( "com.sun.star.beans.PropertyState.DIRECT_VALUE" ) ) ), uno.getConstantByName( "com.sun.star.beans.PropertyState.DIRECT_VALUE" ) ),
### Cannot use UnoProps for FilterData property
if op.exportfilter:
outputprops += ( PropertyValue( "FilterData", 0, uno.Any("[]com.sun.star.beans.PropertyValue", tuple( op.exportfilter ), ), 0 ), )
if outputfmt.filter == 'Text (encoded)':
outputprops += UnoProps(FilterOptions="UTF8, LF")
elif outputfmt.filter == 'Text':
outputprops += UnoProps(FilterOptions="UTF8")
elif outputfmt.filter == 'Text - txt - csv (StarCalc)':
outputprops += UnoProps(FilterOptions="44,34,0")
elif outputfmt.filter in ('writer_pdf_Export', 'impress_pdf_Export', 'calc_pdf_Export', 'draw_pdf_Export'):
outputprops += UnoProps(SelectPdfVersion=1)
if not op.stdout:
(outputfn, ext) = os.path.splitext(inputfn)
if not op.output:
outputfn = outputfn + os.extsep + outputfmt.extension
elif os.path.isdir(op.output):
outputfn = os.path.join(op.output, os.path.basename(outputfn) + os.extsep + outputfmt.extension)
elif len(op.filenames) > 1:
outputfn = op.output + os.extsep + outputfmt.extension
else:
outputfn = op.output
outputurl = unohelper.absolutize( self.cwd, unohelper.systemPathToFileUrl(outputfn) )
info(1, "Output file: %s" % outputfn)
else:
outputurl = "private:stream"
try:
document.storeToURL(outputurl, tuple(outputprops) )
except IOException as e:
from invenio.ext.logging import get_pretty_traceback
print(get_pretty_traceback(), file=sys.stderr)
raise UnoException("Unable to store document to %s with properties %s. Exception: %s" % (outputurl, outputprops, e), None)
phase = "dispose"
document.dispose()
document.close(True)
except SystemError as e:
error("unoconv: SystemError during %s phase: %s" % (phase, e))
exitcode = 1
except RuntimeException as e:
error("unoconv: RuntimeException during %s phase: Office probably died. %s" % (phase, e))
exitcode = 6
except DisposedException as e:
error("unoconv: DisposedException during %s phase: Office probably died. %s" % (phase, e))
exitcode = 7
except IllegalArgumentException as e:
error("UNO IllegalArgument during %s phase: Source file cannot be read. %s" % (phase, e))
exitcode = 8
except IOException as e:
# for attr in dir(e): print '%s: %s', (attr, getattr(e, attr))
error("unoconv: IOException during %s phase: %s" % (phase, e.Message))
exitcode = 3
except CannotConvertException as e:
# for attr in dir(e): print '%s: %s', (attr, getattr(e, attr))
error("unoconv: CannotConvertException during %s phase: %s" % (phase, e.Message))
exitcode = 4
except UnoException as e:
if hasattr(e, 'ErrCode'):
error("unoconv: UnoException during %s phase in %s (ErrCode %d)" % (phase, repr(e.__class__), e.ErrCode))
exitcode = e.ErrCode
pass
if hasattr(e, 'Message'):
error("unoconv: UnoException during %s phase: %s" % (phase, e.Message))
exitcode = 5
else:
error("unoconv: UnoException during %s phase in %s" % (phase, repr(e.__class__)))
exitcode = 2
pass
class Listener:
def __init__(self):
global product
info(1, "Start listener on %s:%s" % (op.server, op.port))
self.context = uno.getComponentContext()
self.svcmgr = self.context.ServiceManager
try:
resolver = self.svcmgr.createInstanceWithContext("com.sun.star.bridge.UnoUrlResolver", self.context)
product = self.svcmgr.createInstance("com.sun.star.configuration.ConfigurationProvider").createInstanceWithArguments("com.sun.star.configuration.ConfigurationAccess", UnoProps(nodepath="/org.openoffice.Setup/Product"))
try:
unocontext = resolver.resolve("uno:%s" % op.connection)
except NoConnectException as e:
pass
else:
info(1, "Existing %s listener found, nothing to do." % product.ooName)
return
if product.ooName != "LibreOffice" or LooseVersion(product.ooSetupVersion) <= LooseVersion('3.3'):
subprocess.call([office.binary, "-headless", "-invisible", "-nocrashreport", "-nodefault", "-nologo", "-nofirststartwizard", "-norestore", "-accept=%s" % op.connection], env=os.environ)
else:
subprocess.call([office.binary, "--headless", "--invisible", "--nocrashreport", "--nodefault", "--nologo", "--nofirststartwizard", "--norestore", "--accept=%s" % op.connection], env=os.environ)
except Exception as e:
error("Launch of %s failed.\n%s" % (office.binary, e))
else:
info(1, "Existing %s listener found, nothing to do." % product.ooName)
def error(msg):
"Output error message"
print(msg, file=sys.stderr)
def info(level, msg):
"Output info message"
if 'op' not in globals():
pass
elif op.verbose >= 3 and level >= 3:
print("DEBUG:", msg, file=sys.stderr)
elif not op.stdout and level <= op.verbose:
print(msg, file=sys.stdout)
elif level <= op.verbose:
print(msg, file=sys.stderr)
def die(ret, msg=None):
"Print optional error and exit with errorcode"
global convertor, ooproc, office
if msg:
error('Error: %s' % msg)
### Did we start our own listener instance ?
if not op.listener and ooproc and convertor:
### If there is a GUI now attached to the instance, disable listener
if convertor.desktop.getCurrentFrame():
info(2, 'Trying to stop %s GUI listener.' % product.ooName)
try:
if product.ooName != "LibreOffice" or product.ooSetupVersion <= 3.3:
subprocess.Popen([office.binary, "-headless", "-invisible", "-nocrashreport", "-nodefault", "-nofirststartwizard", "-nologo", "-norestore", "-unaccept=%s" % op.connection], env=os.environ)
else:
subprocess.Popen([office.binary, "--headless", "--invisible", "--nocrashreport", "--nodefault", "--nofirststartwizard", "--nologo", "--norestore", "--unaccept=%s" % op.connection], env=os.environ)
ooproc.wait()
info(2, '%s listener successfully disabled.' % product.ooName)
except Exception as e:
error("Terminate using %s failed.\n%s" % (office.binary, e))
### If there is no GUI attached to the instance, terminate instance
else:
info(3, 'Terminating %s instance.' % product.ooName)
try:
convertor.desktop.terminate()
except DisposedException:
info(2, '%s instance unsuccessfully closed, sending TERM signal.' % product.ooName)
try:
ooproc.terminate()
except AttributeError:
os.kill(ooproc.pid, 15)
info(3, 'Waiting for %s instance to exit.' % product.ooName)
ooproc.wait()
### LibreOffice processes may get stuck and we have to kill them
### Is it still running ?
if ooproc.poll() == None:
info(1, '%s instance still running, please investigate...' % product.ooName)
ooproc.wait()
info(2, '%s instance unsuccessfully terminated, sending KILL signal.' % product.ooName)
try:
ooproc.kill()
except AttributeError:
os.kill(ooproc.pid, 9)
info(3, 'Waiting for %s with pid %s to disappear.' % (ooproc.pid, product.ooName))
ooproc.wait()
# allow Python GC to garbage collect pyuno object *before* exit call
# which avoids random segmentation faults --vpa
convertor = None
sys.exit(ret)
def main():
os.environ['HOME'] = CFG_OPENOFFICE_TMPDIR
exitcode = 0
info(3, 'sysname=%s, platform=%s, python=%s, python-version=%s' % (os.name, sys.platform, sys.executable, sys.version))
for of in find_offices():
if of.python != sys.executable and not sys.executable.startswith(of.basepath):
python_switch(of)
office_environ(of)
# debug_office()
try:
import uno, unohelper
office = of
break
except:
# debug_office()
print("unoconv: Cannot find a suitable pyuno library and python binary combination in %s" % of, file=sys.stderr)
print("ERROR:", sys.exc_info()[1], file=sys.stderr)
print(file=sys.stderr)
else:
# debug_office()
print("unoconv: Cannot find a suitable office installation on your system.", file=sys.stderr)
print("ERROR: Please locate your office installation and send your feedback to:", file=sys.stderr)
print(" http://github.com/dagwieers/unoconv/issues", file=sys.stderr)
sys.exit(1)
### Now that we have found a working pyuno library, let's import some classes
from com.sun.star.beans import PropertyValue
from com.sun.star.connection import NoConnectException
from com.sun.star.document.UpdateDocMode import QUIET_UPDATE
from com.sun.star.lang import DisposedException, IllegalArgumentException
from com.sun.star.io import IOException, XOutputStream
from com.sun.star.script import CannotConvertException
from com.sun.star.uno import Exception as UnoException
from com.sun.star.uno import RuntimeException
### And now that we have those classes, build on them
class OutputStream( unohelper.Base, XOutputStream ):
def __init__( self ):
self.closed = 0
def closeOutput(self):
self.closed = 1
def writeBytes( self, seq ):
sys.stdout.write( seq.value )
def flush( self ):
pass
def UnoProps(**args):
props = []
for key in args:
prop = PropertyValue()
prop.Name = key
prop.Value = args[key]
props.append(prop)
return tuple(props)
op = Options(sys.argv[1:])
info(2, "Using office base path: %s" % office.basepath)
info(2, "Using office binary path: %s" % office.unopath)
try:
main()
except KeyboardInterrupt as e:
die(6, 'Exiting on user request')
except:
from invenio.ext.logging import register_exception
register_exception(alert_admin=True)
die(exitcode)
global convertor, exitcode
convertor = None
try:
if op.listener:
listener = Listener()
if op.filenames:
convertor = Convertor()
for inputfn in op.filenames:
convertor.convert(inputfn)
except NoConnectException as e:
error("unoconv: could not find an existing connection to LibreOffice at %s:%s." % (op.server, op.port))
if op.connection:
info(0, "Please start an LibreOffice instance on server '%s' by doing:\n\n unoconv --listener --server %s --port %s\n\nor alternatively:\n\n soffice -nologo -nodefault -accept=\"%s\"" % (op.server, op.server, op.port, op.connection))
else:
info(0, "Please start an LibreOffice instance on server '%s' by doing:\n\n unoconv --listener --server %s --port %s\n\nor alternatively:\n\n soffice -nologo -nodefault -accept=\"socket,host=%s,port=%s;urp;\"" % (op.server, op.server, op.port, op.server, op.port))
info(0, "Please start an soffice instance on server '%s' by doing:\n\n soffice -nologo -nodefault -accept=\"socket,host=localhost,port=%s;urp;\"" % (op.server, op.port))
exitcode = 1
# except UnboundLocalError:
# die(252, "Failed to connect to remote listener.")
except OSError:
error("Warning: failed to launch Office suite. Aborting.")
|
sameetb-cuelogic/edx-platform-test | refs/heads/master | lms/djangoapps/survey/admin.py | 92 | """
Provide accessors to these models via the Django Admin pages
"""
from django import forms
from django.contrib import admin
from survey.models import SurveyForm
class SurveyFormAdminForm(forms.ModelForm):
"""Form providing validation of SurveyForm content."""
class Meta(object): # pylint: disable=missing-docstring
model = SurveyForm
fields = ('name', 'form')
def clean_form(self):
"""Validate the HTML template."""
form = self.cleaned_data["form"]
SurveyForm.validate_form_html(form)
return form
class SurveyFormAdmin(admin.ModelAdmin):
"""Admin for SurveyForm"""
form = SurveyFormAdminForm
admin.site.register(SurveyForm, SurveyFormAdmin)
|
Drooids/odoo | refs/heads/8.0 | addons/mail/tests/test_mail_features.py | 172 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from ..mail_mail import mail_mail
from ..mail_thread import mail_thread
from .common import TestMail
from openerp.tools import mute_logger, email_split, html2plaintext
from openerp.tools.mail import html_sanitize
class test_mail(TestMail):
def test_000_alias_setup(self):
""" Test basic mail.alias setup works, before trying to use them for routing """
cr, uid = self.cr, self.uid
self.user_valentin_id = self.res_users.create(cr, uid,
{'name': 'Valentin Cognito', 'email': 'valentin.cognito@gmail.com', 'login': 'valentin.cognito', 'alias_name': 'valentin.cognito'})
self.user_valentin = self.res_users.browse(cr, uid, self.user_valentin_id)
self.assertEquals(self.user_valentin.alias_name, self.user_valentin.login, "Login should be used as alias")
self.user_pagan_id = self.res_users.create(cr, uid,
{'name': 'Pagan Le Marchant', 'email': 'plmarchant@gmail.com', 'login': 'plmarchant@gmail.com', 'alias_name': 'plmarchant@gmail.com'})
self.user_pagan = self.res_users.browse(cr, uid, self.user_pagan_id)
self.assertEquals(self.user_pagan.alias_name, 'plmarchant', "If login is an email, the alias should keep only the local part")
self.user_barty_id = self.res_users.create(cr, uid,
{'name': 'Bartholomew Ironside', 'email': 'barty@gmail.com', 'login': 'b4r+_#_R3wl$$', 'alias_name': 'b4r+_#_R3wl$$'})
self.user_barty = self.res_users.browse(cr, uid, self.user_barty_id)
self.assertEquals(self.user_barty.alias_name, 'b4r+_-_r3wl-', 'Disallowed chars should be replaced by hyphens')
def test_00_followers_function_field(self):
""" Tests designed for the many2many function field 'follower_ids'.
We will test to perform writes using the many2many commands 0, 3, 4,
5 and 6. """
cr, uid, user_admin, partner_bert_id, group_pigs = self.cr, self.uid, self.user_admin, self.partner_bert_id, self.group_pigs
# Data: create 'disturbing' values in mail.followers: same res_id, other res_model; same res_model, other res_id
group_dummy_id = self.mail_group.create(cr, uid,
{'name': 'Dummy group'}, {'mail_create_nolog': True})
self.mail_followers.create(cr, uid,
{'res_model': 'mail.thread', 'res_id': self.group_pigs_id, 'partner_id': partner_bert_id})
self.mail_followers.create(cr, uid,
{'res_model': 'mail.group', 'res_id': group_dummy_id, 'partner_id': partner_bert_id})
# Pigs just created: should be only Admin as follower
follower_ids = set([follower.id for follower in group_pigs.message_follower_ids])
self.assertEqual(follower_ids, set([user_admin.partner_id.id]), 'Admin should be the only Pigs fan')
# Subscribe Bert through a '4' command
group_pigs.write({'message_follower_ids': [(4, partner_bert_id)]})
group_pigs.refresh()
follower_ids = set([follower.id for follower in group_pigs.message_follower_ids])
self.assertEqual(follower_ids, set([partner_bert_id, user_admin.partner_id.id]), 'Bert and Admin should be the only Pigs fans')
# Unsubscribe Bert through a '3' command
group_pigs.write({'message_follower_ids': [(3, partner_bert_id)]})
group_pigs.refresh()
follower_ids = set([follower.id for follower in group_pigs.message_follower_ids])
self.assertEqual(follower_ids, set([user_admin.partner_id.id]), 'Admin should be the only Pigs fan')
# Set followers through a '6' command
group_pigs.write({'message_follower_ids': [(6, 0, [partner_bert_id])]})
group_pigs.refresh()
follower_ids = set([follower.id for follower in group_pigs.message_follower_ids])
self.assertEqual(follower_ids, set([partner_bert_id]), 'Bert should be the only Pigs fan')
# Add a follower created on the fly through a '0' command
group_pigs.write({'message_follower_ids': [(0, 0, {'name': 'Patrick Fiori'})]})
partner_patrick_id = self.res_partner.search(cr, uid, [('name', '=', 'Patrick Fiori')])[0]
group_pigs.refresh()
follower_ids = set([follower.id for follower in group_pigs.message_follower_ids])
self.assertEqual(follower_ids, set([partner_bert_id, partner_patrick_id]), 'Bert and Patrick should be the only Pigs fans')
# Finally, unlink through a '5' command
group_pigs.write({'message_follower_ids': [(5, 0)]})
group_pigs.refresh()
follower_ids = set([follower.id for follower in group_pigs.message_follower_ids])
self.assertFalse(follower_ids, 'Pigs group should not have fans anymore')
# Test dummy data has not been altered
fol_obj_ids = self.mail_followers.search(cr, uid, [('res_model', '=', 'mail.thread'), ('res_id', '=', self.group_pigs_id)])
follower_ids = set([follower.partner_id.id for follower in self.mail_followers.browse(cr, uid, fol_obj_ids)])
self.assertEqual(follower_ids, set([partner_bert_id]), 'Bert should be the follower of dummy mail.thread data')
fol_obj_ids = self.mail_followers.search(cr, uid, [('res_model', '=', 'mail.group'), ('res_id', '=', group_dummy_id)])
follower_ids = set([follower.partner_id.id for follower in self.mail_followers.browse(cr, uid, fol_obj_ids)])
self.assertEqual(follower_ids, set([partner_bert_id, user_admin.partner_id.id]), 'Bert and Admin should be the followers of dummy mail.group data')
def test_05_message_followers_and_subtypes(self):
""" Tests designed for the subscriber API as well as message subtypes """
cr, uid, user_admin, user_raoul, group_pigs = self.cr, self.uid, self.user_admin, self.user_raoul, self.group_pigs
# Data: message subtypes
self.mail_message_subtype.create(cr, uid, {'name': 'mt_mg_def', 'default': True, 'res_model': 'mail.group'})
self.mail_message_subtype.create(cr, uid, {'name': 'mt_other_def', 'default': True, 'res_model': 'crm.lead'})
self.mail_message_subtype.create(cr, uid, {'name': 'mt_all_def', 'default': True, 'res_model': False})
mt_mg_nodef = self.mail_message_subtype.create(cr, uid, {'name': 'mt_mg_nodef', 'default': False, 'res_model': 'mail.group'})
mt_all_nodef = self.mail_message_subtype.create(cr, uid, {'name': 'mt_all_nodef', 'default': False, 'res_model': False})
default_group_subtypes = self.mail_message_subtype.search(cr, uid, [('default', '=', True), '|', ('res_model', '=', 'mail.group'), ('res_model', '=', False)])
# ----------------------------------------
# CASE1: test subscriptions with subtypes
# ----------------------------------------
# Do: subscribe Raoul, should have default subtypes
group_pigs.message_subscribe_users([user_raoul.id])
group_pigs.refresh()
# Test: 2 followers (Admin and Raoul)
follower_ids = [follower.id for follower in group_pigs.message_follower_ids]
self.assertEqual(set(follower_ids), set([user_raoul.partner_id.id, user_admin.partner_id.id]),
'message_subscribe: Admin and Raoul should be the only 2 Pigs fans')
# Raoul follows default subtypes
fol_ids = self.mail_followers.search(cr, uid, [
('res_model', '=', 'mail.group'),
('res_id', '=', self.group_pigs_id),
('partner_id', '=', user_raoul.partner_id.id)
])
fol_obj = self.mail_followers.browse(cr, uid, fol_ids)[0]
fol_subtype_ids = set([subtype.id for subtype in fol_obj.subtype_ids])
self.assertEqual(set(fol_subtype_ids), set(default_group_subtypes),
'message_subscribe: Raoul subscription subtypes are incorrect, should be all default ones')
# Do: subscribe Raoul with specified new subtypes
group_pigs.message_subscribe_users([user_raoul.id], subtype_ids=[mt_mg_nodef])
# Test: 2 followers (Admin and Raoul)
follower_ids = [follower.id for follower in group_pigs.message_follower_ids]
self.assertEqual(set(follower_ids), set([user_raoul.partner_id.id, user_admin.partner_id.id]),
'message_subscribe: Admin and Raoul should be the only 2 Pigs fans')
# Test: 2 lines in mail.followers (no duplicate for Raoul)
fol_ids = self.mail_followers.search(cr, uid, [
('res_model', '=', 'mail.group'),
('res_id', '=', self.group_pigs_id),
])
self.assertEqual(len(fol_ids), 2,
'message_subscribe: subscribing an already-existing follower should not create new entries in mail.followers')
# Test: Raoul follows only specified subtypes
fol_ids = self.mail_followers.search(cr, uid, [
('res_model', '=', 'mail.group'),
('res_id', '=', self.group_pigs_id),
('partner_id', '=', user_raoul.partner_id.id)
])
fol_obj = self.mail_followers.browse(cr, uid, fol_ids)[0]
fol_subtype_ids = set([subtype.id for subtype in fol_obj.subtype_ids])
self.assertEqual(set(fol_subtype_ids), set([mt_mg_nodef]),
'message_subscribe: Raoul subscription subtypes are incorrect, should be only specified')
# Do: Subscribe Raoul without specified subtypes: should not erase existing subscription subtypes
group_pigs.message_subscribe_users([user_raoul.id, user_raoul.id])
group_pigs.message_subscribe_users([user_raoul.id])
group_pigs.refresh()
# Test: 2 followers (Admin and Raoul)
follower_ids = [follower.id for follower in group_pigs.message_follower_ids]
self.assertEqual(set(follower_ids), set([user_raoul.partner_id.id, user_admin.partner_id.id]),
'message_subscribe: Admin and Raoul should be the only 2 Pigs fans')
# Test: Raoul follows default subtypes
fol_ids = self.mail_followers.search(cr, uid, [
('res_model', '=', 'mail.group'),
('res_id', '=', self.group_pigs_id),
('partner_id', '=', user_raoul.partner_id.id)
])
fol_obj = self.mail_followers.browse(cr, uid, fol_ids)[0]
fol_subtype_ids = set([subtype.id for subtype in fol_obj.subtype_ids])
self.assertEqual(set(fol_subtype_ids), set([mt_mg_nodef]),
'message_subscribe: Raoul subscription subtypes are incorrect, should be only specified')
# Do: Unsubscribe Raoul twice through message_unsubscribe_users
group_pigs.message_unsubscribe_users([user_raoul.id, user_raoul.id])
group_pigs.refresh()
# Test: 1 follower (Admin)
follower_ids = [follower.id for follower in group_pigs.message_follower_ids]
self.assertEqual(follower_ids, [user_admin.partner_id.id], 'Admin must be the only Pigs fan')
# Test: 1 lines in mail.followers (no duplicate for Raoul)
fol_ids = self.mail_followers.search(cr, uid, [
('res_model', '=', 'mail.group'),
('res_id', '=', self.group_pigs_id)
])
self.assertEqual(len(fol_ids), 1,
'message_subscribe: group should have only 1 entry in mail.follower for 1 follower')
# Do: subscribe Admin with subtype_ids
group_pigs.message_subscribe_users([uid], [mt_mg_nodef, mt_all_nodef])
fol_ids = self.mail_followers.search(cr, uid, [('res_model', '=', 'mail.group'), ('res_id', '=', self.group_pigs_id), ('partner_id', '=', user_admin.partner_id.id)])
fol_obj = self.mail_followers.browse(cr, uid, fol_ids)[0]
fol_subtype_ids = set([subtype.id for subtype in fol_obj.subtype_ids])
self.assertEqual(set(fol_subtype_ids), set([mt_mg_nodef, mt_all_nodef]), 'subscription subtypes are incorrect')
# ----------------------------------------
# CASE2: test mail_thread fields
# ----------------------------------------
subtype_data = group_pigs._get_subscription_data(None, None)[group_pigs.id]['message_subtype_data']
self.assertEqual(set(subtype_data.keys()), set(['Discussions', 'mt_mg_def', 'mt_all_def', 'mt_mg_nodef', 'mt_all_nodef']), 'mail.group available subtypes incorrect')
self.assertFalse(subtype_data['Discussions']['followed'], 'Admin should not follow Discussions in pigs')
self.assertTrue(subtype_data['mt_mg_nodef']['followed'], 'Admin should follow mt_mg_nodef in pigs')
self.assertTrue(subtype_data['mt_all_nodef']['followed'], 'Admin should follow mt_all_nodef in pigs')
def test_11_notification_url(self):
""" Tests designed to test the URL added in notification emails. """
cr, uid, group_pigs = self.cr, self.uid, self.group_pigs
# Test URL formatting
base_url = self.registry('ir.config_parameter').get_param(cr, uid, 'web.base.url')
# Partner data
partner_raoul = self.res_partner.browse(cr, uid, self.partner_raoul_id)
partner_bert_id = self.res_partner.create(cr, uid, {'name': 'bert'})
partner_bert = self.res_partner.browse(cr, uid, partner_bert_id)
# Mail data
mail_mail_id = self.mail_mail.create(cr, uid, {'state': 'exception'})
mail = self.mail_mail.browse(cr, uid, mail_mail_id)
# Test: link for nobody -> None
url = mail_mail._get_partner_access_link(self.mail_mail, cr, uid, mail)
self.assertEqual(url, None,
'notification email: mails not send to a specific partner should not have any URL')
# Test: link for partner -> None
url = mail_mail._get_partner_access_link(self.mail_mail, cr, uid, mail, partner=partner_bert)
self.assertEqual(url, None,
'notification email: mails send to a not-user partner should not have any URL')
# Test: link for user -> signin
url = mail_mail._get_partner_access_link(self.mail_mail, cr, uid, mail, partner=partner_raoul)
self.assertIn(base_url, url,
'notification email: link should contain web.base.url')
self.assertIn('db=%s' % cr.dbname, url,
'notification email: link should contain database name')
self.assertIn('action=mail.action_mail_redirect', url,
'notification email: link should contain the redirect action')
self.assertIn('login=%s' % partner_raoul.user_ids[0].login, url,
'notification email: link should contain the user login')
# Test: link for user -> with model and res_id
mail_mail_id = self.mail_mail.create(cr, uid, {'model': 'mail.group', 'res_id': group_pigs.id})
mail = self.mail_mail.browse(cr, uid, mail_mail_id)
url = mail_mail._get_partner_access_link(self.mail_mail, cr, uid, mail, partner=partner_raoul)
self.assertIn(base_url, url,
'notification email: link should contain web.base.url')
self.assertIn('db=%s' % cr.dbname, url,
'notification email: link should contain database name')
self.assertIn('action=mail.action_mail_redirect', url,
'notification email: link should contain the redirect action')
self.assertIn('login=%s' % partner_raoul.user_ids[0].login, url,
'notification email: link should contain the user login')
self.assertIn('model=mail.group', url,
'notification email: link should contain the model when having not notification email on a record')
self.assertIn('res_id=%s' % group_pigs.id, url,
'notification email: link should contain the res_id when having not notification email on a record')
# Test: link for user -> with model and res_id
mail_mail_id = self.mail_mail.create(cr, uid, {'notification': True, 'model': 'mail.group', 'res_id': group_pigs.id})
mail = self.mail_mail.browse(cr, uid, mail_mail_id)
url = mail_mail._get_partner_access_link(self.mail_mail, cr, uid, mail, partner=partner_raoul)
self.assertIn(base_url, url,
'notification email: link should contain web.base.url')
self.assertIn('db=%s' % cr.dbname, url,
'notification email: link should contain database name')
self.assertIn('action=mail.action_mail_redirect', url,
'notification email: link should contain the redirect action')
self.assertIn('login=%s' % partner_raoul.user_ids[0].login, url,
'notification email: link should contain the user login')
self.assertIn('message_id=%s' % mail.mail_message_id.id, url,
'notification email: link based on message should contain the mail_message id')
self.assertNotIn('model=mail.group', url,
'notification email: link based on message should not contain model')
self.assertNotIn('res_id=%s' % group_pigs.id, url,
'notification email: link based on message should not contain res_id')
@mute_logger('openerp.addons.mail.mail_thread', 'openerp.models')
def test_12_inbox_redirection(self):
""" Tests designed to test the inbox redirection of emails notification URLs. """
cr, uid, user_admin, group_pigs = self.cr, self.uid, self.user_admin, self.group_pigs
model, act_id = self.ir_model_data.get_object_reference(cr, uid, 'mail', 'action_mail_inbox_feeds')
# Data: post a message on pigs
msg_id = self.group_pigs.message_post(body='My body', partner_ids=[self.partner_bert_id], type='comment', subtype='mail.mt_comment')
# No specific parameters -> should redirect to Inbox
action = mail_thread.message_redirect_action(self.mail_thread, cr, self.user_raoul_id, {'params': {}})
self.assertEqual(
action.get('type'), 'ir.actions.client',
'URL redirection: action without parameters should redirect to client action Inbox'
)
self.assertEqual(
action.get('id'), act_id,
'URL redirection: action without parameters should redirect to client action Inbox'
)
# Raoul has read access to Pigs -> should redirect to form view of Pigs
action = mail_thread.message_redirect_action(self.mail_thread, cr, self.user_raoul_id, {'params': {'message_id': msg_id}})
self.assertEqual(
action.get('type'), 'ir.actions.act_window',
'URL redirection: action with message_id for read-accredited user should redirect to Pigs'
)
self.assertEqual(
action.get('res_id'), group_pigs.id,
'URL redirection: action with message_id for read-accredited user should redirect to Pigs'
)
action = mail_thread.message_redirect_action(self.mail_thread, cr, self.user_raoul_id, {'params': {'model': 'mail.group', 'res_id': group_pigs.id}})
self.assertEqual(
action.get('type'), 'ir.actions.act_window',
'URL redirection: action with message_id for read-accredited user should redirect to Pigs'
)
self.assertEqual(
action.get('res_id'), group_pigs.id,
'URL redirection: action with message_id for read-accredited user should redirect to Pigs'
)
# Bert has no read access to Pigs -> should redirect to Inbox
action = mail_thread.message_redirect_action(self.mail_thread, cr, self.user_bert_id, {'params': {'message_id': msg_id}})
self.assertEqual(
action.get('type'), 'ir.actions.client',
'URL redirection: action without parameters should redirect to client action Inbox'
)
self.assertEqual(
action.get('id'), act_id,
'URL redirection: action without parameters should redirect to client action Inbox'
)
action = mail_thread.message_redirect_action(self.mail_thread, cr, self.user_bert_id, {'params': {'model': 'mail.group', 'res_id': group_pigs.id}})
self.assertEqual(
action.get('type'), 'ir.actions.client',
'URL redirection: action without parameters should redirect to client action Inbox'
)
self.assertEqual(
action.get('id'), act_id,
'URL redirection: action without parameters should redirect to client action Inbox'
)
def test_20_message_post(self):
""" Tests designed for message_post. """
cr, uid, user_raoul, group_pigs = self.cr, self.uid, self.user_raoul, self.group_pigs
# --------------------------------------------------
# Data creation
# --------------------------------------------------
# 0 - Update existing users-partners
self.res_users.write(cr, uid, [uid], {'email': 'a@a', 'notify_email': 'always'})
self.res_users.write(cr, uid, [self.user_raoul_id], {'email': 'r@r'})
# 1 - Bert Tartopoils, with email, should receive emails for comments and emails
p_b_id = self.res_partner.create(cr, uid, {'name': 'Bert Tartopoils', 'email': 'b@b'})
# 2 - Carine Poilvache, with email, should receive emails for emails
p_c_id = self.res_partner.create(cr, uid, {'name': 'Carine Poilvache', 'email': 'c@c', 'notify_email': 'none'})
# 3 - Dédé Grosbedon, without email, to test email verification; should receive emails for every message
p_d_id = self.res_partner.create(cr, uid, {'name': 'Dédé Grosbedon', 'email': 'd@d', 'notify_email': 'always'})
# 4 - Attachments
attach1_id = self.ir_attachment.create(cr, user_raoul.id, {
'name': 'Attach1', 'datas_fname': 'Attach1',
'datas': 'bWlncmF0aW9uIHRlc3Q=',
'res_model': 'mail.compose.message', 'res_id': 0})
attach2_id = self.ir_attachment.create(cr, user_raoul.id, {
'name': 'Attach2', 'datas_fname': 'Attach2',
'datas': 'bWlncmF0aW9uIHRlc3Q=',
'res_model': 'mail.compose.message', 'res_id': 0})
attach3_id = self.ir_attachment.create(cr, user_raoul.id, {
'name': 'Attach3', 'datas_fname': 'Attach3',
'datas': 'bWlncmF0aW9uIHRlc3Q=',
'res_model': 'mail.compose.message', 'res_id': 0})
# 5 - Mail data
_subject = 'Pigs'
_mail_subject = 'Re: %s' % (group_pigs.name)
_body1 = '<p>Pigs rules</p>'
_body2 = '<html>Pigs rocks</html>'
_attachments = [
('List1', 'My first attachment'),
('List2', 'My second attachment')
]
# --------------------------------------------------
# CASE1: post comment + partners + attachments
# --------------------------------------------------
# Data: set alias_domain to see emails with alias
self.registry('ir.config_parameter').set_param(self.cr, self.uid, 'mail.catchall.domain', 'schlouby.fr')
# Data: change Pigs name to test reply_to
self.mail_group.write(cr, uid, [self.group_pigs_id], {'name': '"Pigs" !ù $%-'})
# Do: subscribe Raoul
new_follower_ids = [self.partner_raoul_id]
group_pigs.message_subscribe(new_follower_ids)
# Test: group followers = Raoul + uid
group_fids = [follower.id for follower in group_pigs.message_follower_ids]
test_fids = new_follower_ids + [self.partner_admin_id]
self.assertEqual(set(test_fids), set(group_fids),
'message_subscribe: incorrect followers after subscribe')
# Do: Raoul message_post on Pigs
self._init_mock_build_email()
msg1_id = self.mail_group.message_post(cr, user_raoul.id, self.group_pigs_id,
body=_body1, subject=_subject, partner_ids=[p_b_id, p_c_id],
attachment_ids=[attach1_id, attach2_id], attachments=_attachments,
type='comment', subtype='mt_comment')
msg = self.mail_message.browse(cr, uid, msg1_id)
msg_message_id = msg.message_id
msg_pids = [partner.id for partner in msg.notified_partner_ids]
msg_aids = [attach.id for attach in msg.attachment_ids]
sent_emails = self._build_email_kwargs_list
# Test: mail_message: subject and body not modified
self.assertEqual(_subject, msg.subject, 'message_post: mail.message subject incorrect')
self.assertEqual(_body1, msg.body, 'message_post: mail.message body incorrect')
# Test: mail_message: notified_partner_ids = group followers + partner_ids - author
test_pids = set([self.partner_admin_id, p_b_id, p_c_id])
self.assertEqual(test_pids, set(msg_pids), 'message_post: mail.message notified partners incorrect')
# Test: mail_message: attachments (4, attachment_ids + attachments)
test_aids = set([attach1_id, attach2_id])
msg_attach_names = set([attach.name for attach in msg.attachment_ids])
test_attach_names = set(['Attach1', 'Attach2', 'List1', 'List2'])
self.assertEqual(len(msg_aids), 4,
'message_post: mail.message wrong number of attachments')
self.assertEqual(msg_attach_names, test_attach_names,
'message_post: mail.message attachments incorrectly added')
self.assertTrue(test_aids.issubset(set(msg_aids)),
'message_post: mail.message attachments duplicated')
for attach in msg.attachment_ids:
self.assertEqual(attach.res_model, 'mail.group',
'message_post: mail.message attachments were not linked to the document')
self.assertEqual(attach.res_id, group_pigs.id,
'message_post: mail.message attachments were not linked to the document')
if 'List' in attach.name:
self.assertIn((attach.name, attach.datas.decode('base64')), _attachments,
'message_post: mail.message attachment name / data incorrect')
dl_attach = self.mail_message.download_attachment(cr, user_raoul.id, id_message=msg.id, attachment_id=attach.id)
self.assertIn((dl_attach['filename'], dl_attach['base64'].decode('base64')), _attachments,
'message_post: mail.message download_attachment is incorrect')
# Test: followers: same as before (author was already subscribed)
group_pigs.refresh()
group_fids = [follower.id for follower in group_pigs.message_follower_ids]
test_fids = new_follower_ids + [self.partner_admin_id]
self.assertEqual(set(test_fids), set(group_fids),
'message_post: wrong followers after posting')
# Test: mail_mail: notifications have been deleted
self.assertFalse(self.mail_mail.search(cr, uid, [('mail_message_id', '=', msg1_id)]),
'message_post: mail.mail notifications should have been auto-deleted!')
# Test: notifications emails: to a and b, c is email only, r is author
test_emailto = ['Administrator <a@a>', 'Bert Tartopoils <b@b>']
# test_emailto = ['"Followers of -Pigs-" <a@a>', '"Followers of -Pigs-" <b@b>']
self.assertEqual(len(sent_emails), 2,
'message_post: notification emails wrong number of send emails')
self.assertEqual(set([m['email_to'][0] for m in sent_emails]), set(test_emailto),
'message_post: notification emails wrong recipients (email_to)')
for sent_email in sent_emails:
self.assertEqual(sent_email['email_from'], 'Raoul Grosbedon <raoul@schlouby.fr>',
'message_post: notification email wrong email_from: should use alias of sender')
self.assertEqual(len(sent_email['email_to']), 1,
'message_post: notification email sent to more than one email address instead of a precise partner')
self.assertIn(sent_email['email_to'][0], test_emailto,
'message_post: notification email email_to incorrect')
self.assertEqual(sent_email['reply_to'], u'"YourCompany \\"Pigs\\" !ù $%-" <group+pigs@schlouby.fr>',
'message_post: notification email reply_to incorrect')
self.assertEqual(_subject, sent_email['subject'],
'message_post: notification email subject incorrect')
self.assertIn(_body1, sent_email['body'],
'message_post: notification email body incorrect')
self.assertIn('Pigs rules', sent_email['body_alternative'],
'message_post: notification email body alternative should contain the body')
self.assertNotIn('<p>', sent_email['body_alternative'],
'message_post: notification email body alternative still contains html')
self.assertFalse(sent_email['references'],
'message_post: references should be False when sending a message that is not a reply')
# Test: notification linked to this message = group followers = notified_partner_ids
notif_ids = self.mail_notification.search(cr, uid, [('message_id', '=', msg1_id)])
notif_pids = set([notif.partner_id.id for notif in self.mail_notification.browse(cr, uid, notif_ids)])
self.assertEqual(notif_pids, test_pids,
'message_post: mail.message created mail.notification incorrect')
# Data: Pigs name back to normal
self.mail_group.write(cr, uid, [self.group_pigs_id], {'name': 'Pigs'})
# --------------------------------------------------
# CASE2: reply + parent_id + parent notification
# --------------------------------------------------
# Data: remove alias_domain to see emails with alias
param_ids = self.registry('ir.config_parameter').search(cr, uid, [('key', '=', 'mail.catchall.domain')])
self.registry('ir.config_parameter').unlink(cr, uid, param_ids)
# Do: Raoul message_post on Pigs
self._init_mock_build_email()
msg2_id = self.mail_group.message_post(cr, user_raoul.id, self.group_pigs_id,
body=_body2, type='email', subtype='mt_comment',
partner_ids=[p_d_id], parent_id=msg1_id, attachment_ids=[attach3_id],
context={'mail_post_autofollow': True})
msg = self.mail_message.browse(cr, uid, msg2_id)
msg_pids = [partner.id for partner in msg.notified_partner_ids]
msg_aids = [attach.id for attach in msg.attachment_ids]
sent_emails = self._build_email_kwargs_list
# Test: mail_message: subject is False, body, parent_id is msg_id
self.assertEqual(msg.subject, False, 'message_post: mail.message subject incorrect')
self.assertEqual(msg.body, html_sanitize(_body2), 'message_post: mail.message body incorrect')
self.assertEqual(msg.parent_id.id, msg1_id, 'message_post: mail.message parent_id incorrect')
# Test: mail_message: notified_partner_ids = group followers
test_pids = [self.partner_admin_id, p_d_id]
self.assertEqual(set(test_pids), set(msg_pids), 'message_post: mail.message partners incorrect')
# Test: mail_message: notifications linked to this message = group followers = notified_partner_ids
notif_ids = self.mail_notification.search(cr, uid, [('message_id', '=', msg2_id)])
notif_pids = [notif.partner_id.id for notif in self.mail_notification.browse(cr, uid, notif_ids)]
self.assertEqual(set(test_pids), set(notif_pids), 'message_post: mail.message notification partners incorrect')
# Test: mail_mail: notifications deleted
self.assertFalse(self.mail_mail.search(cr, uid, [('mail_message_id', '=', msg2_id)]), 'mail.mail notifications should have been auto-deleted!')
# Test: emails send by server (to a, b, c, d)
test_emailto = [u'Administrator <a@a>', u'Bert Tartopoils <b@b>', u'Carine Poilvache <c@c>', u'D\xe9d\xe9 Grosbedon <d@d>']
# test_emailto = [u'"Followers of Pigs" <a@a>', u'"Followers of Pigs" <b@b>', u'"Followers of Pigs" <c@c>', u'"Followers of Pigs" <d@d>']
# self.assertEqual(len(sent_emails), 3, 'sent_email number of sent emails incorrect')
for sent_email in sent_emails:
self.assertEqual(sent_email['email_from'], 'Raoul Grosbedon <r@r>',
'message_post: notification email wrong email_from: should use email of sender when no alias domain set')
self.assertEqual(len(sent_email['email_to']), 1,
'message_post: notification email sent to more than one email address instead of a precise partner')
self.assertIn(sent_email['email_to'][0], test_emailto,
'message_post: notification email email_to incorrect')
self.assertEqual(email_split(sent_email['reply_to']), ['r@r'], # was '"Followers of Pigs" <r@r>', but makes no sense
'message_post: notification email reply_to incorrect: should have raoul email')
self.assertEqual(_mail_subject, sent_email['subject'],
'message_post: notification email subject incorrect')
self.assertIn(html_sanitize(_body2), sent_email['body'],
'message_post: notification email does not contain the body')
self.assertIn('Pigs rocks', sent_email['body_alternative'],
'message_post: notification email body alternative should contain the body')
self.assertNotIn('<p>', sent_email['body_alternative'],
'message_post: notification email body alternative still contains html')
self.assertIn(msg_message_id, sent_email['references'],
'message_post: notification email references lacks parent message message_id')
# Test: attachments + download
for attach in msg.attachment_ids:
self.assertEqual(attach.res_model, 'mail.group',
'message_post: mail.message attachment res_model incorrect')
self.assertEqual(attach.res_id, self.group_pigs_id,
'message_post: mail.message attachment res_id incorrect')
# Test: Dédé has been notified -> should also have been notified of the parent message
msg = self.mail_message.browse(cr, uid, msg1_id)
msg_pids = set([partner.id for partner in msg.notified_partner_ids])
test_pids = set([self.partner_admin_id, p_b_id, p_c_id, p_d_id])
self.assertEqual(test_pids, msg_pids, 'message_post: mail.message parent notification not created')
# Do: reply to last message
msg3_id = self.mail_group.message_post(cr, user_raoul.id, self.group_pigs_id, body='Test', parent_id=msg2_id)
msg = self.mail_message.browse(cr, uid, msg3_id)
# Test: check that its parent will be the first message
self.assertEqual(msg.parent_id.id, msg1_id, 'message_post did not flatten the thread structure')
def test_25_message_compose_wizard(self):
""" Tests designed for the mail.compose.message wizard. """
cr, uid, user_raoul, group_pigs = self.cr, self.uid, self.user_raoul, self.group_pigs
mail_compose = self.registry('mail.compose.message')
# --------------------------------------------------
# Data creation
# --------------------------------------------------
# 0 - Update existing users-partners
self.res_users.write(cr, uid, [uid], {'email': 'a@a'})
self.res_users.write(cr, uid, [self.user_raoul_id], {'email': 'r@r'})
# 1 - Bert Tartopoils, with email, should receive emails for comments and emails
p_b_id = self.res_partner.create(cr, uid, {'name': 'Bert Tartopoils', 'email': 'b@b'})
# 2 - Carine Poilvache, with email, should receive emails for emails
p_c_id = self.res_partner.create(cr, uid, {'name': 'Carine Poilvache', 'email': 'c@c', 'notify_email': 'always'})
# 3 - Dédé Grosbedon, without email, to test email verification; should receive emails for every message
p_d_id = self.res_partner.create(cr, uid, {'name': 'Dédé Grosbedon', 'email': 'd@d', 'notify_email': 'always'})
# 4 - Create a Bird mail.group, that will be used to test mass mailing
group_bird_id = self.mail_group.create(cr, uid,
{
'name': 'Bird',
'description': 'Bird resistance',
}, context={'mail_create_nolog': True})
group_bird = self.mail_group.browse(cr, uid, group_bird_id)
# 5 - Mail data
_subject = 'Pigs'
_body = 'Pigs <b>rule</b>'
_reply_subject = 'Re: %s' % _subject
_attachments = [
{'name': 'First', 'datas_fname': 'first.txt', 'datas': 'My first attachment'.encode('base64')},
{'name': 'Second', 'datas_fname': 'second.txt', 'datas': 'My second attachment'.encode('base64')}
]
_attachments_test = [('first.txt', 'My first attachment'), ('second.txt', 'My second attachment')]
# 6 - Subscribe Bert to Pigs
group_pigs.message_subscribe([p_b_id])
# --------------------------------------------------
# CASE1: wizard + partners + context keys
# --------------------------------------------------
# Do: Raoul wizard-composes on Pigs with auto-follow for partners, not for author
compose_id = mail_compose.create(cr, user_raoul.id,
{
'subject': _subject,
'body': _body,
'partner_ids': [(4, p_c_id), (4, p_d_id)],
}, context={
'default_composition_mode': 'comment',
'default_model': 'mail.group',
'default_res_id': self.group_pigs_id,
})
compose = mail_compose.browse(cr, uid, compose_id)
# Test: mail.compose.message: composition_mode, model, res_id
self.assertEqual(compose.composition_mode, 'comment', 'compose wizard: mail.compose.message incorrect composition_mode')
self.assertEqual(compose.model, 'mail.group', 'compose wizard: mail.compose.message incorrect model')
self.assertEqual(compose.res_id, self.group_pigs_id, 'compose wizard: mail.compose.message incorrect res_id')
# Do: Post the comment
mail_compose.send_mail(cr, user_raoul.id, [compose_id], {'mail_post_autofollow': True, 'mail_create_nosubscribe': True})
group_pigs.refresh()
message = group_pigs.message_ids[0]
# Test: mail.group: followers (c and d added by auto follow key; raoul not added by nosubscribe key)
pigs_pids = [p.id for p in group_pigs.message_follower_ids]
test_pids = [self.partner_admin_id, p_b_id, p_c_id, p_d_id]
self.assertEqual(set(pigs_pids), set(test_pids),
'compose wizard: mail_post_autofollow and mail_create_nosubscribe context keys not correctly taken into account')
# Test: mail.message: subject, body inside p
self.assertEqual(message.subject, _subject, 'compose wizard: mail.message incorrect subject')
self.assertEqual(message.body, '<p>%s</p>' % _body, 'compose wizard: mail.message incorrect body')
# Test: mail.message: notified_partner_ids = admin + bert (followers) + c + d (recipients)
msg_pids = [partner.id for partner in message.notified_partner_ids]
test_pids = [self.partner_admin_id, p_b_id, p_c_id, p_d_id]
self.assertEqual(set(msg_pids), set(test_pids),
'compose wizard: mail.message notified_partner_ids incorrect')
# --------------------------------------------------
# CASE2: reply + attachments
# --------------------------------------------------
# Do: Reply with attachments
compose_id = mail_compose.create(cr, user_raoul.id,
{
'attachment_ids': [(0, 0, _attachments[0]), (0, 0, _attachments[1])]
}, context={
'default_composition_mode': 'comment',
'default_res_id': self.group_pigs_id,
'default_parent_id': message.id
})
compose = mail_compose.browse(cr, uid, compose_id)
# Test: mail.compose.message: model, res_id, parent_id
self.assertEqual(compose.model, 'mail.group', 'compose wizard: mail.compose.message incorrect model')
self.assertEqual(compose.res_id, self.group_pigs_id, 'compose wizard: mail.compose.message incorrect res_id')
self.assertEqual(compose.parent_id.id, message.id, 'compose wizard: mail.compose.message incorrect parent_id')
# Test: mail.compose.message: subject as Re:.., body, parent_id
self.assertEqual(compose.subject, _reply_subject, 'compose wizard: mail.compose.message incorrect subject')
self.assertFalse(compose.body, 'compose wizard: mail.compose.message body should not contain parent message body')
self.assertEqual(compose.parent_id and compose.parent_id.id, message.id, 'compose wizard: mail.compose.message parent_id incorrect')
# Test: mail.compose.message: attachments
for attach in compose.attachment_ids:
self.assertIn((attach.datas_fname, attach.datas.decode('base64')), _attachments_test,
'compose wizard: mail.message attachment name / data incorrect')
# --------------------------------------------------
# CASE3: mass_mail on Pigs and Bird
# --------------------------------------------------
# Do: Compose in mass_mail_mode on pigs and bird
compose_id = mail_compose.create(
cr, user_raoul.id, {
'subject': _subject,
'body': '${object.description}',
'partner_ids': [(4, p_c_id), (4, p_d_id)],
}, context={
'default_composition_mode': 'mass_mail',
'default_model': 'mail.group',
'default_res_id': False,
'active_ids': [self.group_pigs_id, group_bird_id],
})
compose = mail_compose.browse(cr, uid, compose_id)
# Do: Post the comment, get created message for each group
mail_compose.send_mail(cr, user_raoul.id, [compose_id], context={
'default_res_id': -1,
'active_ids': [self.group_pigs_id, group_bird_id]
})
# check mail_mail
mail_mail_ids = self.mail_mail.search(cr, uid, [('subject', '=', _subject)])
for mail_mail in self.mail_mail.browse(cr, uid, mail_mail_ids):
self.assertEqual(set([p.id for p in mail_mail.recipient_ids]), set([p_c_id, p_d_id]),
'compose wizard: mail_mail mass mailing: mail.mail in mass mail incorrect recipients')
# check logged messages
group_pigs.refresh()
group_bird.refresh()
message1 = group_pigs.message_ids[0]
message2 = group_bird.message_ids[0]
# Test: Pigs and Bird did receive their message
test_msg_ids = self.mail_message.search(cr, uid, [], limit=2)
self.assertIn(message1.id, test_msg_ids, 'compose wizard: Pigs did not receive its mass mailing message')
self.assertIn(message2.id, test_msg_ids, 'compose wizard: Bird did not receive its mass mailing message')
# Test: mail.message: subject, body, subtype, notified partners (nobody + specific recipients)
self.assertEqual(message1.subject, _subject,
'compose wizard: message_post: mail.message in mass mail subject incorrect')
self.assertEqual(message1.body, '<p>%s</p>' % group_pigs.description,
'compose wizard: message_post: mail.message in mass mail body incorrect')
# self.assertEqual(set([p.id for p in message1.notified_partner_ids]), set([p_c_id, p_d_id]),
# 'compose wizard: message_post: mail.message in mass mail incorrect notified partners')
self.assertEqual(message2.subject, _subject,
'compose wizard: message_post: mail.message in mass mail subject incorrect')
self.assertEqual(message2.body, '<p>%s</p>' % group_bird.description,
'compose wizard: message_post: mail.message in mass mail body incorrect')
# self.assertEqual(set([p.id for p in message2.notified_partner_ids]), set([p_c_id, p_d_id]),
# 'compose wizard: message_post: mail.message in mass mail incorrect notified partners')
# Test: mail.group followers: author not added as follower in mass mail mode
pigs_pids = [p.id for p in group_pigs.message_follower_ids]
test_pids = [self.partner_admin_id, p_b_id, p_c_id, p_d_id]
self.assertEqual(set(pigs_pids), set(test_pids),
'compose wizard: mail_post_autofollow and mail_create_nosubscribe context keys not correctly taken into account')
bird_pids = [p.id for p in group_bird.message_follower_ids]
test_pids = [self.partner_admin_id]
self.assertEqual(set(bird_pids), set(test_pids),
'compose wizard: mail_post_autofollow and mail_create_nosubscribe context keys not correctly taken into account')
# Do: Compose in mass_mail, coming from list_view, we have an active_domain that should be supported
compose_id = mail_compose.create(cr, user_raoul.id,
{
'subject': _subject,
'body': '${object.description}',
'partner_ids': [(4, p_c_id), (4, p_d_id)],
}, context={
'default_composition_mode': 'mass_mail',
'default_model': 'mail.group',
'default_res_id': False,
'active_ids': [self.group_pigs_id],
'active_domain': [('name', 'in', ['Pigs', 'Bird'])],
})
compose = mail_compose.browse(cr, uid, compose_id)
# Do: Post the comment, get created message for each group
mail_compose.send_mail(
cr, user_raoul.id, [compose_id], context={
'default_res_id': -1,
'active_ids': [self.group_pigs_id, group_bird_id]
})
group_pigs.refresh()
group_bird.refresh()
message1 = group_pigs.message_ids[0]
message2 = group_bird.message_ids[0]
# Test: Pigs and Bird did receive their message
test_msg_ids = self.mail_message.search(cr, uid, [], limit=2)
self.assertIn(message1.id, test_msg_ids, 'compose wizard: Pigs did not receive its mass mailing message')
self.assertIn(message2.id, test_msg_ids, 'compose wizard: Bird did not receive its mass mailing message')
def test_30_needaction(self):
""" Tests for mail.message needaction. """
cr, uid, user_admin, user_raoul, group_pigs = self.cr, self.uid, self.user_admin, self.user_raoul, self.group_pigs
na_admin_base = self.mail_message._needaction_count(cr, uid, domain=[])
na_demo_base = self.mail_message._needaction_count(cr, user_raoul.id, domain=[])
# Test: number of unread notification = needaction on mail.message
notif_ids = self.mail_notification.search(cr, uid, [
('partner_id', '=', user_admin.partner_id.id),
('is_read', '=', False)
])
na_count = self.mail_message._needaction_count(cr, uid, domain=[])
self.assertEqual(len(notif_ids), na_count, 'unread notifications count does not match needaction count')
# Do: post 2 message on group_pigs as admin, 3 messages as demo user
for dummy in range(2):
group_pigs.message_post(body='My Body', subtype='mt_comment')
raoul_pigs = group_pigs.sudo(user_raoul)
for dummy in range(3):
raoul_pigs.message_post(body='My Demo Body', subtype='mt_comment')
# Test: admin has 3 new notifications (from demo), and 3 new needaction
notif_ids = self.mail_notification.search(cr, uid, [
('partner_id', '=', user_admin.partner_id.id),
('is_read', '=', False)
])
self.assertEqual(len(notif_ids), na_admin_base + 3, 'Admin should have 3 new unread notifications')
na_admin = self.mail_message._needaction_count(cr, uid, domain=[])
na_admin_group = self.mail_message._needaction_count(cr, uid, domain=[('model', '=', 'mail.group'), ('res_id', '=', self.group_pigs_id)])
self.assertEqual(na_admin, na_admin_base + 3, 'Admin should have 3 new needaction')
self.assertEqual(na_admin_group, 3, 'Admin should have 3 needaction related to Pigs')
# Test: demo has 0 new notifications (not a follower, not receiving its own messages), and 0 new needaction
notif_ids = self.mail_notification.search(cr, uid, [
('partner_id', '=', user_raoul.partner_id.id),
('is_read', '=', False)
])
self.assertEqual(len(notif_ids), na_demo_base + 0, 'Demo should have 0 new unread notifications')
na_demo = self.mail_message._needaction_count(cr, user_raoul.id, domain=[])
na_demo_group = self.mail_message._needaction_count(cr, user_raoul.id, domain=[('model', '=', 'mail.group'), ('res_id', '=', self.group_pigs_id)])
self.assertEqual(na_demo, na_demo_base + 0, 'Demo should have 0 new needaction')
self.assertEqual(na_demo_group, 0, 'Demo should have 0 needaction related to Pigs')
def test_40_track_field(self):
""" Testing auto tracking of fields. """
def _strip_string_spaces(body):
return body.replace(' ', '').replace('\n', '')
# Data: subscribe Raoul to Pigs, because he will change the public attribute and may loose access to the record
cr, uid = self.cr, self.uid
self.mail_group.message_subscribe_users(cr, uid, [self.group_pigs_id], [self.user_raoul_id])
# Data: res.users.group, to test group_public_id automatic logging
group_system_ref = self.registry('ir.model.data').get_object_reference(cr, uid, 'base', 'group_system')
group_system_id = group_system_ref and group_system_ref[1] or False
# Data: custom subtypes
mt_private_id = self.mail_message_subtype.create(cr, uid, {'name': 'private', 'description': 'Private public'})
self.ir_model_data.create(cr, uid, {'name': 'mt_private', 'model': 'mail.message.subtype', 'module': 'mail', 'res_id': mt_private_id})
mt_name_supername_id = self.mail_message_subtype.create(cr, uid, {'name': 'name_supername', 'description': 'Supername name'})
self.ir_model_data.create(cr, uid, {'name': 'mt_name_supername', 'model': 'mail.message.subtype', 'module': 'mail', 'res_id': mt_name_supername_id})
mt_group_public_set_id = self.mail_message_subtype.create(cr, uid, {'name': 'group_public_set', 'description': 'Group set'})
self.ir_model_data.create(cr, uid, {'name': 'mt_group_public_set', 'model': 'mail.message.subtype', 'module': 'mail', 'res_id': mt_group_public_set_id})
mt_group_public_id = self.mail_message_subtype.create(cr, uid, {'name': 'group_public', 'description': 'Group changed'})
self.ir_model_data.create(cr, uid, {'name': 'mt_group_public', 'model': 'mail.message.subtype', 'module': 'mail', 'res_id': mt_group_public_id})
# Data: alter mail_group model for testing purposes (test on classic, selection and many2one fields)
cls = type(self.mail_group)
self.assertNotIn('_track', cls.__dict__)
cls._track = {
'public': {
'mail.mt_private': lambda self, cr, uid, obj, ctx=None: obj.public == 'private',
},
'name': {
'mail.mt_name_supername': lambda self, cr, uid, obj, ctx=None: obj.name == 'supername',
},
'group_public_id': {
'mail.mt_group_public_set': lambda self, cr, uid, obj, ctx=None: obj.group_public_id,
'mail.mt_group_public': lambda self, cr, uid, obj, ctx=None: True,
},
}
visibility = {'public': 'onchange', 'name': 'always', 'group_public_id': 'onchange'}
for key in visibility:
self.assertFalse(hasattr(getattr(cls, key), 'track_visibility'))
getattr(cls, key).track_visibility = visibility[key]
@self.addCleanup
def cleanup():
delattr(cls, '_track')
for key in visibility:
del getattr(cls, key).track_visibility
# Test: change name -> always tracked, not related to a subtype
self.mail_group.write(cr, self.user_raoul_id, [self.group_pigs_id], {'public': 'public'})
self.group_pigs.refresh()
self.assertEqual(len(self.group_pigs.message_ids), 1, 'tracked: a message should have been produced')
# Test: first produced message: no subtype, name change tracked
last_msg = self.group_pigs.message_ids[-1]
self.assertFalse(last_msg.subtype_id, 'tracked: message should not have been linked to a subtype')
self.assertIn(u'SelectedGroupOnly\u2192Public', _strip_string_spaces(last_msg.body), 'tracked: message body incorrect')
self.assertIn('Pigs', _strip_string_spaces(last_msg.body), 'tracked: message body does not hold always tracked field')
# Test: change name as supername, public as private -> 2 subtypes
self.mail_group.write(cr, self.user_raoul_id, [self.group_pigs_id], {'name': 'supername', 'public': 'private'})
self.group_pigs.refresh()
self.assertEqual(len(self.group_pigs.message_ids), 3, 'tracked: two messages should have been produced')
# Test: first produced message: mt_name_supername
last_msg = self.group_pigs.message_ids[-2]
self.assertEqual(last_msg.subtype_id.id, mt_private_id, 'tracked: message should be linked to mt_private subtype')
self.assertIn('Private public', last_msg.body, 'tracked: message body does not hold the subtype description')
self.assertIn(u'Pigs\u2192supername', _strip_string_spaces(last_msg.body), 'tracked: message body incorrect')
# Test: second produced message: mt_name_supername
last_msg = self.group_pigs.message_ids[-3]
self.assertEqual(last_msg.subtype_id.id, mt_name_supername_id, 'tracked: message should be linked to mt_name_supername subtype')
self.assertIn('Supername name', last_msg.body, 'tracked: message body does not hold the subtype description')
self.assertIn(u'Public\u2192Private', _strip_string_spaces(last_msg.body), 'tracked: message body incorrect')
self.assertIn(u'Pigs\u2192supername', _strip_string_spaces(last_msg.body), 'tracked feature: message body does not hold always tracked field')
# Test: change public as public, group_public_id -> 2 subtypes, name always tracked
self.mail_group.write(cr, self.user_raoul_id, [self.group_pigs_id], {'public': 'public', 'group_public_id': group_system_id})
self.group_pigs.refresh()
self.assertEqual(len(self.group_pigs.message_ids), 5, 'tracked: one message should have been produced')
# Test: first produced message: mt_group_public_set_id, with name always tracked, public tracked on change
last_msg = self.group_pigs.message_ids[-4]
self.assertEqual(last_msg.subtype_id.id, mt_group_public_set_id, 'tracked: message should be linked to mt_group_public_set_id')
self.assertIn('Group set', last_msg.body, 'tracked: message body does not hold the subtype description')
self.assertIn(u'Private\u2192Public', _strip_string_spaces(last_msg.body), 'tracked: message body does not hold changed tracked field')
self.assertIn(u'HumanResources/Employee\u2192Administration/Settings', _strip_string_spaces(last_msg.body), 'tracked: message body does not hold always tracked field')
# Test: second produced message: mt_group_public_id, with name always tracked, public tracked on change
last_msg = self.group_pigs.message_ids[-5]
self.assertEqual(last_msg.subtype_id.id, mt_group_public_id, 'tracked: message should be linked to mt_group_public_id')
self.assertIn('Group changed', last_msg.body, 'tracked: message body does not hold the subtype description')
self.assertIn(u'Private\u2192Public', _strip_string_spaces(last_msg.body), 'tracked: message body does not hold changed tracked field')
self.assertIn(u'HumanResources/Employee\u2192Administration/Settings', _strip_string_spaces(last_msg.body), 'tracked: message body does not hold always tracked field')
# Test: change group_public_id to False -> 1 subtype, name always tracked
self.mail_group.write(cr, self.user_raoul_id, [self.group_pigs_id], {'group_public_id': False})
self.group_pigs.refresh()
self.assertEqual(len(self.group_pigs.message_ids), 6, 'tracked: one message should have been produced')
# Test: first produced message: mt_group_public_set_id, with name always tracked, public tracked on change
last_msg = self.group_pigs.message_ids[-6]
self.assertEqual(last_msg.subtype_id.id, mt_group_public_id, 'tracked: message should be linked to mt_group_public_id')
self.assertIn('Group changed', last_msg.body, 'tracked: message body does not hold the subtype description')
self.assertIn(u'Administration/Settings\u2192', _strip_string_spaces(last_msg.body), 'tracked: message body does not hold always tracked field')
# Test: change not tracked field, no tracking message
self.mail_group.write(cr, self.user_raoul_id, [self.group_pigs_id], {'description': 'Dummy'})
self.group_pigs.refresh()
self.assertEqual(len(self.group_pigs.message_ids), 6, 'tracked: No message should have been produced')
|
holtwick/pyxer | refs/heads/master | tests/runtests.py | 2 | import sys
import os
import glob
import unittest
#Allow us to import the parent module
os.chdir(os.path.split(os.path.abspath(__file__))[0])
sys.path.insert(0, os.path.abspath(os.curdir))
sys.path.insert(0, os.path.abspath(os.pardir))
# sys.path.insert(0, os.path.join(os.path.abspath(os.pardir), "src"))
def buildTestSuite():
suite = unittest.TestSuite()
for testcase in glob.glob('test_*.py'):
print "***", testcase
module = os.path.splitext(testcase)[0]
suite.addTest(__import__("test_routing").buildTestSuite())
return suite
def main():
results = unittest.TextTestRunner().run(buildTestSuite())
return results
if __name__ == "__main__":
results = main()
if not results.wasSuccessful():
sys.exit(1)
|
vikas17a/Algorithm-Implementations | refs/heads/master | Vigenere_Cipher/Python/Valery S/VigenereCipher_test.py | 20 | #Tests for vigenere_cipher.py
from VigenereCipher import VigenereCipher
VC = VigenereCipher()
assert(VC.cipher('take', 'care') == 'VABI')
assert(VC.decipher('VABI', 'care') == 'TAKE')
assert(VC.cipher('ABCDEFGHIJKLMNOPQRSTUVWXYZ', 'ABCDEFGHIJKLMNOPQRSTUVWXYZ') == 'ACEGIKMOQSUWYACEGIKMOQSUWY')
assert(VC.decipher('ACEGIKMOQSUWYACEGIKMOQSUWY', 'ABCDEFGHIJKLMNOPQRSTUVWXYZ') == 'ABCDEFGHIJKLMNOPQRSTUVWXYZ') |
eddiehung/python-escpos | refs/heads/master | escpos/constants.py | 1 | """ ESC/POS Commands (Constants) """
# Feed control sequences
CTL_LF = '\x0a' # Print and line feed
CTL_FF = '\x0c' # Form feed
CTL_CR = '\x0d' # Carriage return
CTL_HT = '\x09' # Horizontal tab
CTL_SET_HT = '\x1b\x44' # Set horizontal tab positions
CTL_VT = '\x1b\x64\x04' # Vertical tab
# Printer hardware
HW_INIT = '\x1b\x40' # Clear data in buffer and reset modes
HW_SELECT = '\x1b\x3d\x01' # Printer select
HW_RESET = '\x1b\x3f\x0a\x00' # Reset printer hardware
# Cash Drawer
CD_KICK_2 = '\x1b\x70\x00' # Sends a pulse to pin 2 []
CD_KICK_5 = '\x1b\x70\x01' # Sends a pulse to pin 5 []
# Paper
PAPER_FULL_CUT = '\x1d\x56\x00' # Full cut paper
PAPER_PART_CUT = '\x1d\x56\x01' # Partial cut paper
# Text format
TXT_NORMAL = '\x1b\x21\x00' # Normal text
TXT_2HEIGHT = '\x1b\x21\x10' # Double height text
TXT_2WIDTH = '\x1b\x21\x20' # Double width text
TXT_4SQUARE = '\x1b\x21\x30' # Quad area text
TXT_UNDERL_OFF = '\x1b\x2d\x00' # Underline font OFF
TXT_UNDERL_ON = '\x1b\x2d\x01' # Underline font 1-dot ON
TXT_UNDERL2_ON = '\x1b\x2d\x02' # Underline font 2-dot ON
TXT_BOLD_OFF = '\x1b\x45\x00' # Bold font OFF
TXT_BOLD_ON = '\x1b\x45\x01' # Bold font ON
TXT_FONT_A = '\x1b\x4d\x00' # Font type A
TXT_FONT_B = '\x1b\x4d\x01' # Font type B
TXT_ALIGN_LT = '\x1b\x61\x00' # Left justification
TXT_ALIGN_CT = '\x1b\x61\x01' # Centering
TXT_ALIGN_RT = '\x1b\x61\x02' # Right justification
TXT_UPSIDEDOWN_OFF = '\x1b\x7b\x00' # Upside down mode OFF
TXT_UPSIDEDOWN_ON = '\x1b\x7b\x01' # Upside down mode ON
# Char code table
CHARCODE_PC437 = '\x1b\x74\x00' # USA: Standard Europe
CHARCODE_JIS = '\x1b\x74\x01' # Japanese Katakana
CHARCODE_PC850 = '\x1b\x74\x02' # Multilingual
CHARCODE_PC860 = '\x1b\x74\x03' # Portuguese
CHARCODE_PC863 = '\x1b\x74\x04' # Canadian-French
CHARCODE_PC865 = '\x1b\x74\x05' # Nordic
CHARCODE_WEU = '\x1b\x74\x06' # Simplified Kanji, Hirakana
CHARCODE_GREEK = '\x1b\x74\x07' # Simplified Kanji
CHARCODE_HEBREW = '\x1b\x74\x08' # Simplified Kanji
CHARCODE_PC1252 = '\x1b\x74\x11' # Western European Windows Code Set
CHARCODE_PC866 = '\x1b\x74\x12' # Cirillic #2
CHARCODE_PC852 = '\x1b\x74\x13' # Latin 2
CHARCODE_PC858 = '\x1b\x74\x14' # Euro
CHARCODE_THAI42 = '\x1b\x74\x15' # Thai character code 42
CHARCODE_THAI11 = '\x1b\x74\x16' # Thai character code 11
CHARCODE_THAI13 = '\x1b\x74\x17' # Thai character code 13
CHARCODE_THAI14 = '\x1b\x74\x18' # Thai character code 14
CHARCODE_THAI16 = '\x1b\x74\x19' # Thai character code 16
CHARCODE_THAI17 = '\x1b\x74\x1a' # Thai character code 17
CHARCODE_THAI18 = '\x1b\x74\x1b' # Thai character code 18
# Barcode format
BARCODE_TXT_OFF = '\x1d\x48\x00' # HRI barcode chars OFF
BARCODE_TXT_ABV = '\x1d\x48\x01' # HRI barcode chars above
BARCODE_TXT_BLW = '\x1d\x48\x02' # HRI barcode chars below
BARCODE_TXT_BTH = '\x1d\x48\x03' # HRI barcode chars both above and below
BARCODE_FONT_A = '\x1d\x66\x00' # Font type A for HRI barcode chars
BARCODE_FONT_B = '\x1d\x66\x01' # Font type B for HRI barcode chars
BARCODE_HEIGHT = '\x1d\x68\x64' # Barcode Height [1-255]
BARCODE_WIDTH = '\x1d\x77\x03' # Barcode Width [2-6]
BARCODE_UPC_A = '\x1d\x6b\x00' # Barcode type UPC-A
BARCODE_UPC_E = '\x1d\x6b\x01' # Barcode type UPC-E
BARCODE_EAN13 = '\x1d\x6b\x02' # Barcode type EAN13
BARCODE_EAN8 = '\x1d\x6b\x03' # Barcode type EAN8
BARCODE_CODE39 = '\x1d\x6b\x04' # Barcode type CODE39
BARCODE_ITF = '\x1d\x6b\x05' # Barcode type ITF
BARCODE_NW7 = '\x1d\x6b\x06' # Barcode type NW7
# Image format
S_RASTER_N = '\x1d\x76\x30\x00' # Set raster image normal size
S_RASTER_2W = '\x1d\x76\x30\x01' # Set raster image double width
S_RASTER_2H = '\x1d\x76\x30\x02' # Set raster image double height
S_RASTER_Q = '\x1d\x76\x30\x03' # Set raster image quadruple
# Printing Density
PD_N50 = '\x1d\x7c\x00' # Printing Density -50%
PD_N37 = '\x1d\x7c\x01' # Printing Density -37.5%
PD_N25 = '\x1d\x7c\x02' # Printing Density -25%
PD_N12 = '\x1d\x7c\x03' # Printing Density -12.5%
PD_0 = '\x1d\x7c\x04' # Printing Density 0%
PD_P50 = '\x1d\x7c\x08' # Printing Density +50%
PD_P37 = '\x1d\x7c\x07' # Printing Density +37.5%
PD_P25 = '\x1d\x7c\x06' # Printing Density +25%
PD_P12 = '\x1d\x7c\x05' # Printing Density +12.5%
|
denisov-vlad/redash | refs/heads/master | tests/query_runner/test_databricks.py | 3 | from unittest import TestCase
from redash.query_runner.databricks import split_sql_statements
class TestSplitMultipleSQLStatements(TestCase):
def _assertSplitSql(self, sql, expected_stmt):
stmt = split_sql_statements(sql)
# ignore leading and trailing whitespaces when comparing
self.assertListEqual([s.strip() for s in stmt], [s.strip() for s in expected_stmt])
# - it should split statements by semicolon
# - it should keep semicolon in string literals
# - it should keep semicolon in quoted names (tables, columns, aliases)
# - it should keep semicolon in comments
# - it should remove semicolon after the statement
def test_splits_multiple_statements_by_semicolon(self):
self._assertSplitSql(
"""
select 1 as "column", 'a;b;c' as "column ; 2"
from "table;";
select 2 as column, if(true, x, "y;z") from table2 as "alias ; 2";
select 3 -- comment with ; semicolon
from table3
""",
[
"""
select 1 as "column", 'a;b;c' as "column ; 2"
from "table;"
""",
"""
select 2 as column, if(true, x, "y;z") from table2 as "alias ; 2"
""",
"""
select 3 -- comment with ; semicolon
from table3
"""
]
)
# - it should keep whitespaces
# - it should keep letter case
# - it should keep all unknown characters/symbols/etc.
def test_keeps_original_syntax(self):
self._assertSplitSql(
"""
selECT #TesT#;
INSERT LoReM
IPSUM %^&*()
""",
[
"""
selECT #TesT#
""",
"""
INSERT LoReM
IPSUM %^&*()
"""
]
)
self._assertSplitSql(
"""
set test_var = 'hello';
select ${test_var}, 123 from table;
select 'qwerty' from ${test_var};
select now()
""",
[
"set test_var = 'hello'",
"select ${test_var}, 123 from table",
"select 'qwerty' from ${test_var}",
"select now()"
]
)
# - it should keep all comments to semicolon after statement
# - it should remove comments after semicolon after statement
def test_keeps_comments(self):
self._assertSplitSql(
"""
-- comment 1
SELECT x -- comment 2
-- comment 3
; -- comment 4
-- comment 5
DELETE FROM table -- comment 6
""",
[
"""
-- comment 1
SELECT x -- comment 2
-- comment 3
""",
"""
-- comment 5
DELETE FROM table
""",
]
)
# - it should skip empty statements
# - it should skip comment-only statements
def test_skips_empty_statements(self):
self._assertSplitSql(
"""
;
-- comment 1
;
SELECT * FROM table;
-- comment 2
;
""",
[
"""
SELECT * FROM table
"""
]
)
# special case - if all statements were empty it should return the only empty statement
self._assertSplitSql(
";; -- comment 1",
[""]
)
|
ghdk/networkx | refs/heads/master | examples/algorithms/krackhardt_centrality.py | 44 | #!/usr/bin/env python
"""
Centrality measures of Krackhardt social network.
"""
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
__date__ = "$Date: 2005-05-12 14:33:11 -0600 (Thu, 12 May 2005) $"
__credits__ = """"""
__revision__ = "$Revision: 998 $"
# Copyright (C) 2004 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
from networkx import *
G=krackhardt_kite_graph()
print("Betweenness")
b=betweenness_centrality(G)
for v in G.nodes():
print("%0.2d %5.3f"%(v,b[v]))
print("Degree centrality")
d=degree_centrality(G)
for v in G.nodes():
print("%0.2d %5.3f"%(v,d[v]))
print("Closeness centrality")
c=closeness_centrality(G)
for v in G.nodes():
print("%0.2d %5.3f"%(v,c[v]))
|
alexnikolov/German-dictionary | refs/heads/master | german_dictionary/quiz.py | 1 | from german_dictionary.db_handler import DatabaseHandler
from german_dictionary.word import Word
from random import randint
class Quiz:
def __init__(self, database, parts_of_speech, fields_to_be_guessed):
self.database = database
self.parts_of_speech = parts_of_speech
self.fields_to_be_guessed = fields_to_be_guessed
self.score = 0
self.answers = 0
self.words_to_guess = DatabaseHandler.\
extract_parts_of_speech(parts_of_speech, self.database)
self.words_to_guess = list(map(lambda x: Word(x), self.words_to_guess))
self.current_word = self.\
words_to_guess[randint(0, len(self.words_to_guess) - 1)]
def guess(self, suggestions):
suggestions_with_fields = zip(suggestions, self.fields_to_be_guessed)
guess_results = []
for suggestion in suggestions_with_fields:
correct_answer = self.current_word.word_hash[suggestion[1]]
guess_results.append(self.evaluate_answer(suggestion,
self.split_answers_to_set(correct_answer)))
self.update_score(guess_results)
self.pick_new_current_word()
return guess_results
def pick_new_current_word(self):
self.words_to_guess.remove(self.current_word)
if len(self.words_to_guess) > 0:
self.current_word = self.\
words_to_guess[randint(0, len(self.words_to_guess) - 1)]
def update_score(self, guess_results):
field_scores = list(map(lambda result: result[0], guess_results))
word_score = sum(field_scores) / len(field_scores)
self.score = (self.answers * self.score + word_score) / \
(self.answers + 1)
self.answers += 1
def split_answers_to_set(self, answers):
split_answers = answers.split(',')
stripped_answers = map(lambda a: a.lstrip().rstrip(), split_answers)
return set(stripped_answers)
def evaluate_answer(self, suggestion_with_field, correct_answers):
suggested_answers = self.split_answers_to_set(suggestion_with_field[0])
if suggested_answers == correct_answers:
return [1, 'Correct']
elif len(suggested_answers & correct_answers) is 0:
return [0, correct_answers]
else:
return [len(correct_answers & suggested_answers) /
len(correct_answers), correct_answers]
def answer_statements(self, guess_results):
answer_statement = ''
for index, field in enumerate(guess_results):
if field[1] is 'Correct':
answer_statement += '{} guessed correctly\n'.\
format(self.fields_to_be_guessed[index])
elif field[0] > 0:
answer_statement += 'Almost, this is the full answer: {}\n'.\
format(', '.join(field[1]))
else:
answer_statement += 'Nope, this is the correct answer: {}\n'.\
format(', '.join(field[1]))
return answer_statement
def hint(self, field):
if field == 'Meaning':
meaning = self.current_word.word_hash['Meaning']
if len(self.split_answers_to_set(meaning)) > 1:
return "Hint: One meaning starts with '{}'".format(meaning[0])
return "Hint: It starts with '{}'".format(meaning[0])
return 'Hints are not available for this quiz.'
|
mibanescu/pulp | refs/heads/master | server/pulp/server/db/fields.py | 8 | """
This defines custom fields to be stored in the database.
Each field class is inherited from one or more mongoengine fields
and it provides it's own validation code.
"""
from isodate import ISO8601Error
from mongoengine import StringField
from pulp.common import dateutils
class ISO8601StringField(StringField):
"""
This represents a string which is an ISO8601 representation of datetime.datetime.
"""
def __init__(self, **kwargs):
super(ISO8601StringField, self).__init__(**kwargs)
def validate(self, value):
super(ISO8601StringField, self).validate(value)
try:
dateutils.parse_iso8601_datetime(value)
except ISO8601Error, e:
self.error(str(e))
|
RJVB/audacity | refs/heads/master | lib-src/lv2/lv2/plugins/eg03-metro.lv2/waflib/Options.py | 330 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os,tempfile,optparse,sys,re
from waflib import Logs,Utils,Context
cmds='distclean configure build install clean uninstall check dist distcheck'.split()
options={}
commands=[]
lockfile=os.environ.get('WAFLOCK','.lock-waf_%s_build'%sys.platform)
try:cache_global=os.path.abspath(os.environ['WAFCACHE'])
except KeyError:cache_global=''
platform=Utils.unversioned_sys_platform()
class opt_parser(optparse.OptionParser):
def __init__(self,ctx):
optparse.OptionParser.__init__(self,conflict_handler="resolve",version='waf %s (%s)'%(Context.WAFVERSION,Context.WAFREVISION))
self.formatter.width=Logs.get_term_cols()
p=self.add_option
self.ctx=ctx
jobs=ctx.jobs()
p('-j','--jobs',dest='jobs',default=jobs,type='int',help='amount of parallel jobs (%r)'%jobs)
p('-k','--keep',dest='keep',default=0,action='count',help='keep running happily even if errors are found')
p('-v','--verbose',dest='verbose',default=0,action='count',help='verbosity level -v -vv or -vvv [default: 0]')
p('--nocache',dest='nocache',default=False,action='store_true',help='ignore the WAFCACHE (if set)')
p('--zones',dest='zones',default='',action='store',help='debugging zones (task_gen, deps, tasks, etc)')
gr=optparse.OptionGroup(self,'configure options')
self.add_option_group(gr)
gr.add_option('-o','--out',action='store',default='',help='build dir for the project',dest='out')
gr.add_option('-t','--top',action='store',default='',help='src dir for the project',dest='top')
default_prefix=os.environ.get('PREFIX')
if not default_prefix:
if platform=='win32':
d=tempfile.gettempdir()
default_prefix=d[0].upper()+d[1:]
else:
default_prefix='/usr/local/'
gr.add_option('--prefix',dest='prefix',default=default_prefix,help='installation prefix [default: %r]'%default_prefix)
gr.add_option('--download',dest='download',default=False,action='store_true',help='try to download the tools if missing')
gr=optparse.OptionGroup(self,'build and install options')
self.add_option_group(gr)
gr.add_option('-p','--progress',dest='progress_bar',default=0,action='count',help='-p: progress bar; -pp: ide output')
gr.add_option('--targets',dest='targets',default='',action='store',help='task generators, e.g. "target1,target2"')
gr=optparse.OptionGroup(self,'step options')
self.add_option_group(gr)
gr.add_option('--files',dest='files',default='',action='store',help='files to process, by regexp, e.g. "*/main.c,*/test/main.o"')
default_destdir=os.environ.get('DESTDIR','')
gr=optparse.OptionGroup(self,'install/uninstall options')
self.add_option_group(gr)
gr.add_option('--destdir',help='installation root [default: %r]'%default_destdir,default=default_destdir,dest='destdir')
gr.add_option('-f','--force',dest='force',default=False,action='store_true',help='force file installation')
gr.add_option('--distcheck-args',help='arguments to pass to distcheck',default=None,action='store')
def get_usage(self):
cmds_str={}
for cls in Context.classes:
if not cls.cmd or cls.cmd=='options':
continue
s=cls.__doc__ or''
cmds_str[cls.cmd]=s
if Context.g_module:
for(k,v)in Context.g_module.__dict__.items():
if k in['options','init','shutdown']:
continue
if type(v)is type(Context.create_context):
if v.__doc__ and not k.startswith('_'):
cmds_str[k]=v.__doc__
just=0
for k in cmds_str:
just=max(just,len(k))
lst=[' %s: %s'%(k.ljust(just),v)for(k,v)in cmds_str.items()]
lst.sort()
ret='\n'.join(lst)
return'''waf [commands] [options]
Main commands (example: ./waf build -j4)
%s
'''%ret
class OptionsContext(Context.Context):
cmd='options'
fun='options'
def __init__(self,**kw):
super(OptionsContext,self).__init__(**kw)
self.parser=opt_parser(self)
self.option_groups={}
def jobs(self):
count=int(os.environ.get('JOBS',0))
if count<1:
if'NUMBER_OF_PROCESSORS'in os.environ:
count=int(os.environ.get('NUMBER_OF_PROCESSORS',1))
else:
if hasattr(os,'sysconf_names'):
if'SC_NPROCESSORS_ONLN'in os.sysconf_names:
count=int(os.sysconf('SC_NPROCESSORS_ONLN'))
elif'SC_NPROCESSORS_CONF'in os.sysconf_names:
count=int(os.sysconf('SC_NPROCESSORS_CONF'))
if not count and os.name not in('nt','java'):
try:
tmp=self.cmd_and_log(['sysctl','-n','hw.ncpu'],quiet=0)
except Exception:
pass
else:
if re.match('^[0-9]+$',tmp):
count=int(tmp)
if count<1:
count=1
elif count>1024:
count=1024
return count
def add_option(self,*k,**kw):
return self.parser.add_option(*k,**kw)
def add_option_group(self,*k,**kw):
try:
gr=self.option_groups[k[0]]
except KeyError:
gr=self.parser.add_option_group(*k,**kw)
self.option_groups[k[0]]=gr
return gr
def get_option_group(self,opt_str):
try:
return self.option_groups[opt_str]
except KeyError:
for group in self.parser.option_groups:
if group.title==opt_str:
return group
return None
def parse_args(self,_args=None):
global options,commands
(options,leftover_args)=self.parser.parse_args(args=_args)
commands=leftover_args
if options.destdir:
options.destdir=os.path.abspath(os.path.expanduser(options.destdir))
if options.verbose>=1:
self.load('errcheck')
def execute(self):
super(OptionsContext,self).execute()
self.parse_args()
|
drummonds/galleria | refs/heads/master | galleria/contacts/admin.py | 1 | from django.contrib import admin
from .models import ContactType, Contact, PhoneNumber, Address, Note
class ContactTypeAdmin(admin.ModelAdmin):
list_display = ('__str__', 'name')
list_editable = ('name',)
class NoteInline(admin.TabularInline):
model = Note
def formfield_for_dbfield(self, db_field, **kwargs):
field = super(NoteInline, self).formfield_for_dbfield(db_field, **kwargs)
if db_field.name == 'note':
field.widget.attrs['class'] = 'h3noteclass ' + field.widget.attrs.get('class', '')
field.widget.attrs['width'] = '800px'
return field
class AddressInline(admin.StackedInline):
model = Address
class PhoneNumberInline(admin.TabularInline):
model = PhoneNumber
class ContactAdmin(admin.ModelAdmin):
list_display = ('__str__', 'name_first', 'name_last')
inlines = [NoteInline, AddressInline, PhoneNumberInline]
search_fields = ['name_first', 'name_last']
filter_horizontal = ('categories',)
class Media:
css = {
'all': ('contacts/notes.css',) # appended to static root
}
admin.site.register(ContactType, ContactTypeAdmin)
admin.site.register(Contact, ContactAdmin)
|
labordoc/labordoc-next | refs/heads/labordoc-next | modules/websubmit/lib/functions/Print_Success_Approval_Request.py | 39 | ## This file is part of Invenio.
## Copyright (C) 2008, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""This is Print_Success_Approval_Request. It creates a "success
message" that is shown to the user to indicate that their approval
request has successfully been registered.
"""
__revision__ = "$Id$"
def Print_Success_Approval_Request(parameters, curdir, form, user_info=None):
"""
This function creates a "success message" that is to be shown to the
user to indicate that their approval request has successfully been
registered.
@parameters: None.
@return: (string) - the "success" message for the user.
"""
text = """<br />
<div>
The approval request for your document has successfully been
registered and the referee has been informed.<br />
You will be notified by email when a decision has been made.
</div>
<br />"""
return text
|
NewpTone/stacklab-nova | refs/heads/master | nova/network/l3.py | 7 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nicira Networks, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova.network import linux_net
from nova.openstack.common import log as logging
from nova import utils
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
class L3Driver(object):
"""Abstract class that defines a generic L3 API"""
def __init__(self, l3_lib=None):
raise NotImplementedError()
def initialize(self, **kwargs):
"""Set up basic L3 networking functionality"""
raise NotImplementedError()
def initialize_network(self, network):
"""Enable rules for a specific network"""
raise NotImplementedError()
def initialize_gateway(self, network):
"""Set up a gateway on this network"""
raise NotImplementedError()
def remove_gateway(self, network_ref):
"""Remove an existing gateway on this network"""
raise NotImplementedError()
def is_initialized(self):
""":returns: True/False (whether the driver is initialized)"""
raise NotImplementedError()
def add_floating_ip(self, floating_ip, fixed_ip, l3_interface_id):
"""Add a floating IP bound to the fixed IP with an optional
l3_interface_id. Some drivers won't care about the
l3_interface_id so just pass None in that case"""
raise NotImplementedError()
def remove_floating_ip(self, floating_ip, fixed_ip, l3_interface_id):
raise NotImplementedError()
def add_vpn(self, public_ip, port, private_ip):
raise NotImplementedError()
def remove_vpn(self, public_ip, port, private_ip):
raise NotImplementedError()
def teardown(self):
raise NotImplementedError()
class LinuxNetL3(L3Driver):
"""L3 driver that uses linux_net as the backend"""
def __init__(self):
self.initialized = False
def initialize(self, **kwargs):
if self.initialized:
return
LOG.debug("Initializing linux_net L3 driver")
linux_net.init_host()
linux_net.ensure_metadata_ip()
linux_net.metadata_forward()
self.initialized = True
def is_initialized(self):
return self.initialized
def initialize_network(self, cidr):
linux_net.add_snat_rule(cidr)
def initialize_gateway(self, network_ref):
mac_address = utils.generate_mac_address()
dev = linux_net.plug(network_ref, mac_address,
gateway=(network_ref['gateway'] is not None))
linux_net.initialize_gateway_device(dev, network_ref)
def remove_gateway(self, network_ref):
linux_net.unplug(network_ref)
def add_floating_ip(self, floating_ip, fixed_ip, l3_interface_id):
linux_net.bind_floating_ip(floating_ip, l3_interface_id)
linux_net.ensure_floating_forward(floating_ip, fixed_ip,
l3_interface_id)
def remove_floating_ip(self, floating_ip, fixed_ip, l3_interface_id):
linux_net.unbind_floating_ip(floating_ip, l3_interface_id)
linux_net.remove_floating_forward(floating_ip, fixed_ip,
l3_interface_id)
def add_vpn(self, public_ip, port, private_ip):
linux_net.ensure_vpn_forward(public_ip, port, private_ip)
def remove_vpn(self, public_ip, port, private_ip):
# Linux net currently doesn't implement any way of removing
# the VPN forwarding rules
pass
def teardown(self):
pass
class NullL3(L3Driver):
"""The L3 driver that doesn't do anything. This class can be used when
nova-network shuld not manipulate L3 forwarding at all (e.g., in a Flat
or FlatDHCP scenario"""
def __init__(self):
pass
def initialize(self, **kwargs):
pass
def is_initialized(self):
return True
def initialize_network(self, cidr):
pass
def initialize_gateway(self, network_ref):
pass
def remove_gateway(self, network_ref):
pass
def add_floating_ip(self, floating_ip, fixed_ip, l3_interface_id):
pass
def remove_floating_ip(self, floating_ip, fixed_ip, l3_interface_id):
pass
def add_vpn(self, public_ip, port, private_ip):
pass
def remove_vpn(self, public_ip, port, private_ip):
pass
def teardown(self):
pass
|
joram/sickbeard-orange | refs/heads/ThePirateBay | lib/unidecode/x00c.py | 252 | data = (
'[?]', # 0x00
'N', # 0x01
'N', # 0x02
'H', # 0x03
'[?]', # 0x04
'a', # 0x05
'aa', # 0x06
'i', # 0x07
'ii', # 0x08
'u', # 0x09
'uu', # 0x0a
'R', # 0x0b
'L', # 0x0c
'[?]', # 0x0d
'e', # 0x0e
'ee', # 0x0f
'ai', # 0x10
'[?]', # 0x11
'o', # 0x12
'oo', # 0x13
'au', # 0x14
'k', # 0x15
'kh', # 0x16
'g', # 0x17
'gh', # 0x18
'ng', # 0x19
'c', # 0x1a
'ch', # 0x1b
'j', # 0x1c
'jh', # 0x1d
'ny', # 0x1e
'tt', # 0x1f
'tth', # 0x20
'dd', # 0x21
'ddh', # 0x22
'nn', # 0x23
't', # 0x24
'th', # 0x25
'd', # 0x26
'dh', # 0x27
'n', # 0x28
'[?]', # 0x29
'p', # 0x2a
'ph', # 0x2b
'b', # 0x2c
'bh', # 0x2d
'm', # 0x2e
'y', # 0x2f
'r', # 0x30
'rr', # 0x31
'l', # 0x32
'll', # 0x33
'[?]', # 0x34
'v', # 0x35
'sh', # 0x36
'ss', # 0x37
's', # 0x38
'h', # 0x39
'[?]', # 0x3a
'[?]', # 0x3b
'[?]', # 0x3c
'[?]', # 0x3d
'aa', # 0x3e
'i', # 0x3f
'ii', # 0x40
'u', # 0x41
'uu', # 0x42
'R', # 0x43
'RR', # 0x44
'[?]', # 0x45
'e', # 0x46
'ee', # 0x47
'ai', # 0x48
'[?]', # 0x49
'o', # 0x4a
'oo', # 0x4b
'au', # 0x4c
'', # 0x4d
'[?]', # 0x4e
'[?]', # 0x4f
'[?]', # 0x50
'[?]', # 0x51
'[?]', # 0x52
'[?]', # 0x53
'[?]', # 0x54
'+', # 0x55
'+', # 0x56
'[?]', # 0x57
'[?]', # 0x58
'[?]', # 0x59
'[?]', # 0x5a
'[?]', # 0x5b
'[?]', # 0x5c
'[?]', # 0x5d
'[?]', # 0x5e
'[?]', # 0x5f
'RR', # 0x60
'LL', # 0x61
'[?]', # 0x62
'[?]', # 0x63
'[?]', # 0x64
'[?]', # 0x65
'0', # 0x66
'1', # 0x67
'2', # 0x68
'3', # 0x69
'4', # 0x6a
'5', # 0x6b
'6', # 0x6c
'7', # 0x6d
'8', # 0x6e
'9', # 0x6f
'[?]', # 0x70
'[?]', # 0x71
'[?]', # 0x72
'[?]', # 0x73
'[?]', # 0x74
'[?]', # 0x75
'[?]', # 0x76
'[?]', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'[?]', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
'[?]', # 0x80
'[?]', # 0x81
'N', # 0x82
'H', # 0x83
'[?]', # 0x84
'a', # 0x85
'aa', # 0x86
'i', # 0x87
'ii', # 0x88
'u', # 0x89
'uu', # 0x8a
'R', # 0x8b
'L', # 0x8c
'[?]', # 0x8d
'e', # 0x8e
'ee', # 0x8f
'ai', # 0x90
'[?]', # 0x91
'o', # 0x92
'oo', # 0x93
'au', # 0x94
'k', # 0x95
'kh', # 0x96
'g', # 0x97
'gh', # 0x98
'ng', # 0x99
'c', # 0x9a
'ch', # 0x9b
'j', # 0x9c
'jh', # 0x9d
'ny', # 0x9e
'tt', # 0x9f
'tth', # 0xa0
'dd', # 0xa1
'ddh', # 0xa2
'nn', # 0xa3
't', # 0xa4
'th', # 0xa5
'd', # 0xa6
'dh', # 0xa7
'n', # 0xa8
'[?]', # 0xa9
'p', # 0xaa
'ph', # 0xab
'b', # 0xac
'bh', # 0xad
'm', # 0xae
'y', # 0xaf
'r', # 0xb0
'rr', # 0xb1
'l', # 0xb2
'll', # 0xb3
'[?]', # 0xb4
'v', # 0xb5
'sh', # 0xb6
'ss', # 0xb7
's', # 0xb8
'h', # 0xb9
'[?]', # 0xba
'[?]', # 0xbb
'[?]', # 0xbc
'[?]', # 0xbd
'aa', # 0xbe
'i', # 0xbf
'ii', # 0xc0
'u', # 0xc1
'uu', # 0xc2
'R', # 0xc3
'RR', # 0xc4
'[?]', # 0xc5
'e', # 0xc6
'ee', # 0xc7
'ai', # 0xc8
'[?]', # 0xc9
'o', # 0xca
'oo', # 0xcb
'au', # 0xcc
'', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'[?]', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'[?]', # 0xd3
'[?]', # 0xd4
'+', # 0xd5
'+', # 0xd6
'[?]', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'[?]', # 0xdc
'[?]', # 0xdd
'lll', # 0xde
'[?]', # 0xdf
'RR', # 0xe0
'LL', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'0', # 0xe6
'1', # 0xe7
'2', # 0xe8
'3', # 0xe9
'4', # 0xea
'5', # 0xeb
'6', # 0xec
'7', # 0xed
'8', # 0xee
'9', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
|
Toshakins/wagtail | refs/heads/master | wagtail/contrib/settings/views.py | 5 | from __future__ import absolute_import, unicode_literals
from django.core.exceptions import PermissionDenied
from django.http import Http404
from django.shortcuts import get_object_or_404, redirect, render
from django.utils.lru_cache import lru_cache
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from wagtail.wagtailadmin import messages
from wagtail.wagtailadmin.edit_handlers import (
ObjectList, extract_panel_definitions_from_model_class)
from wagtail.wagtailcore.models import Site
from .forms import SiteSwitchForm
from .permissions import user_can_edit_setting_type
from .registry import registry
def get_model_from_url_params(app_name, model_name):
"""
retrieve a content type from an app_name / model_name combo.
Throw Http404 if not a valid setting type
"""
model = registry.get_by_natural_key(app_name, model_name)
if model is None:
raise Http404
return model
@lru_cache()
def get_setting_edit_handler(model):
if hasattr(model, 'edit_handler'):
return model.edit_handler.bind_to_model(model)
panels = extract_panel_definitions_from_model_class(model, ['site'])
return ObjectList(panels).bind_to_model(model)
def edit_current_site(request, app_name, model_name):
# Redirect the user to the edit page for the current site
# (or the current request does not correspond to a site, the first site in the list)
site = request.site or Site.objects.first()
return redirect('wagtailsettings:edit', app_name, model_name, site.pk)
def edit(request, app_name, model_name, site_pk):
model = get_model_from_url_params(app_name, model_name)
if not user_can_edit_setting_type(request.user, model):
raise PermissionDenied
site = get_object_or_404(Site, pk=site_pk)
setting_type_name = model._meta.verbose_name
instance = model.for_site(site)
edit_handler_class = get_setting_edit_handler(model)
form_class = edit_handler_class.get_form_class(model)
if request.method == 'POST':
form = form_class(request.POST, request.FILES, instance=instance)
if form.is_valid():
form.save()
messages.success(
request,
_("{setting_type} updated.").format(
setting_type=capfirst(setting_type_name),
instance=instance
)
)
return redirect('wagtailsettings:edit', app_name, model_name, site.pk)
else:
messages.error(request, _("The setting could not be saved due to errors."))
edit_handler = edit_handler_class(instance=instance, form=form)
else:
form = form_class(instance=instance)
edit_handler = edit_handler_class(instance=instance, form=form)
# Show a site switcher form if there are multiple sites
site_switcher = None
if Site.objects.count() > 1:
site_switcher = SiteSwitchForm(site, model)
return render(request, 'wagtailsettings/edit.html', {
'opts': model._meta,
'setting_type_name': setting_type_name,
'instance': instance,
'edit_handler': edit_handler,
'form': form,
'site': site,
'site_switcher': site_switcher,
'tabbed': edit_handler_class.__name__ == '_TabbedInterface',
})
|
zlfben/gem5 | refs/heads/develop | src/arch/x86/isa/insts/simd64/integer/compare/compare_and_write_minimum_or_maximum.py | 91 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop PMINUB_MMX_MMX {
mmini mmx, mmx, mmxm, size=1, ext=0
};
def macroop PMINUB_MMX_M {
ldfp ufp1, seg, sib, disp, dataSize=8
mmini mmx, mmx, ufp1, size=1, ext=0
};
def macroop PMINUB_MMX_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
mmini mmx, mmx, ufp1, size=1, ext=0
};
def macroop PMINSW_MMX_MMX {
mmini mmx, mmx, mmxm, size=2, ext=Signed
};
def macroop PMINSW_MMX_M {
ldfp ufp1, seg, sib, disp, dataSize=8
mmini mmx, mmx, ufp1, size=2, ext=Signed
};
def macroop PMINSW_MMX_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
mmini mmx, mmx, ufp1, size=2, ext=Signed
};
def macroop PMAXUB_MMX_MMX {
mmaxi mmx, mmx, mmxm, size=1, ext=0
};
def macroop PMAXUB_MMX_M {
ldfp ufp1, seg, sib, disp, dataSize=8
mmaxi mmx, mmx, ufp1, size=1, ext=0
};
def macroop PMAXUB_MMX_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
mmaxi mmx, mmx, ufp1, size=1, ext=0
};
def macroop PMAXSW_MMX_MMX {
mmaxi mmx, mmx, mmxm, size=2, ext=Signed
};
def macroop PMAXSW_MMX_M {
ldfp ufp1, seg, sib, disp, dataSize=8
mmaxi mmx, mmx, ufp1, size=2, ext=Signed
};
def macroop PMAXSW_MMX_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
mmaxi mmx, mmx, ufp1, size=2, ext=Signed
};
'''
|
aleGpereira/libcloud | refs/heads/trunk | libcloud/test/dns/test_pointdns.py | 17 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
import sys
import unittest
from libcloud.utils.py3 import httplib
from libcloud.dns.types import RecordType
from libcloud.dns.types import ZoneDoesNotExistError
from libcloud.dns.types import RecordDoesNotExistError
from libcloud.dns.drivers.pointdns import PointDNSDriver
from libcloud.dns.drivers.pointdns import PointDNSException
from libcloud.test import MockHttp
from libcloud.test.file_fixtures import DNSFileFixtures
from libcloud.test.secrets import DNS_PARAMS_POINTDNS
class PointDNSTests(unittest.TestCase):
def setUp(self):
PointDNSDriver.connectionCls.conn_classes = (
None, PointDNSMockHttp)
PointDNSMockHttp.type = None
self.driver = PointDNSDriver(*DNS_PARAMS_POINTDNS)
def assertHasKeys(self, dictionary, keys):
for key in keys:
self.assertTrue(key in dictionary, 'key "%s" not in dictionary' %
(key))
def test_list_record_types(self):
record_types = self.driver.list_record_types()
self.assertEqual(len(record_types), 10)
self.assertTrue(RecordType.A in record_types)
self.assertTrue(RecordType.AAAA in record_types)
self.assertTrue(RecordType.ALIAS in record_types)
self.assertTrue(RecordType.CNAME in record_types)
self.assertTrue(RecordType.MX in record_types)
self.assertTrue(RecordType.NS in record_types)
self.assertTrue(RecordType.PTR in record_types)
self.assertTrue(RecordType.SRV in record_types)
self.assertTrue(RecordType.SSHFP in record_types)
self.assertTrue(RecordType.TXT in record_types)
def test_list_zones_success(self):
PointDNSMockHttp.type = 'GET'
zones = self.driver.list_zones()
self.assertEqual(len(zones), 2)
zone1 = zones[0]
self.assertEqual(zone1.id, '1')
self.assertEqual(zone1.type, 'master')
self.assertEqual(zone1.domain, 'example.com')
self.assertEqual(zone1.ttl, 3600)
self.assertHasKeys(zone1.extra, ['group', 'user-id'])
zone2 = zones[1]
self.assertEqual(zone2.id, '2')
self.assertEqual(zone2.type, 'master')
self.assertEqual(zone2.domain, 'example2.com')
self.assertEqual(zone2.ttl, 3600)
self.assertHasKeys(zone2.extra, ['group', 'user-id'])
def test_list_records_success(self):
PointDNSMockHttp.type = 'GET'
zone = self.driver.list_zones()[0]
records = self.driver.list_records(zone=zone)
self.assertEqual(len(records), 2)
record1 = records[0]
self.assertEqual(record1.id, '141')
self.assertEqual(record1.name, 'site.example.com')
self.assertEqual(record1.type, RecordType.A)
self.assertEqual(record1.data, '1.2.3.4')
self.assertHasKeys(record1.extra, ['ttl', 'zone_id', 'aux'])
record2 = records[1]
self.assertEqual(record2.id, '150')
self.assertEqual(record2.name, 'site.example1.com')
self.assertEqual(record2.type, RecordType.A)
self.assertEqual(record2.data, '1.2.3.6')
self.assertHasKeys(record2.extra, ['ttl', 'zone_id', 'aux'])
def test_get_zone_success(self):
PointDNSMockHttp.type = 'GET'
zone1 = self.driver.get_zone(zone_id='1')
self.assertEqual(zone1.id, '1')
self.assertEqual(zone1.type, 'master')
self.assertEqual(zone1.domain, 'example.com')
self.assertEqual(zone1.ttl, 3600)
self.assertHasKeys(zone1.extra, ['group', 'user-id'])
def test_get_zone_zone_not_exists(self):
PointDNSMockHttp.type = 'GET_ZONE_NOT_EXIST'
try:
self.driver.get_zone(zone_id='1')
except ZoneDoesNotExistError:
pass
else:
self.fail('Exception was not thrown')
def test_get_record_success(self):
PointDNSMockHttp.type = 'GET'
record = self.driver.get_record(zone_id='1',
record_id='141')
self.assertEqual(record.id, '141')
self.assertEqual(record.name, 'site.example.com')
self.assertEqual(record.type, RecordType.A)
self.assertEqual(record.data, '1.2.3.4')
self.assertHasKeys(record.extra, ['ttl', 'zone_id', 'aux'])
def test_get_record_record_not_exists(self):
PointDNSMockHttp.type = 'GET_RECORD_NOT_EXIST'
try:
self.driver.get_record(zone_id='1',
record_id='141')
except RecordDoesNotExistError:
pass
else:
self.fail('Exception was not thrown')
def test_create_zone_success(self):
PointDNSMockHttp.type = 'CREATE'
zone = self.driver.create_zone(domain='example.com')
self.assertEqual(zone.id, '2')
self.assertEqual(zone.domain, 'example.com')
self.assertEqual(zone.ttl, 3600)
self.assertEqual(zone.type, 'master')
self.assertHasKeys(zone.extra, ['group', 'user-id'])
def test_create_zone_with_error(self):
PointDNSMockHttp.type = 'CREATE_ZONE_WITH_ERROR'
try:
self.driver.create_zone(domain='example.com')
except PointDNSException:
pass
else:
self.fail('Exception was not thrown')
def test_create_record_success(self):
PointDNSMockHttp.type = 'GET'
zone = self.driver.list_zones()[0]
PointDNSMockHttp.type = 'CREATE'
record = self.driver.create_record(name='site.example.com', zone=zone,
type=RecordType.A,
data='1.2.3.4')
self.assertEqual(record.id, '143')
self.assertEqual(record.name, 'site.example.com')
self.assertEqual(record.type, RecordType.A)
self.assertEqual(record.data, '1.2.3.4')
self.assertHasKeys(record.extra, ['ttl', 'zone_id', 'aux'])
def test_create_record_with_error(self):
PointDNSMockHttp.type = 'GET'
zone = self.driver.list_zones()[0]
PointDNSMockHttp.type = 'CREATE_WITH_ERROR'
try:
self.driver.create_record(name='site.example.com',
zone=zone, type=RecordType.A,
data='1.2.3.4')
except PointDNSException:
pass
else:
self.fail('Exception was not thrown')
def test_update_zone_success(self):
PointDNSMockHttp.type = 'GET'
zone = self.driver.list_zones()[0]
PointDNSMockHttp.type = 'ZONE_UPDATE'
extra = {'user-id': 6}
_zone = self.driver.update_zone(zone, zone.domain, zone.ttl,
extra=extra)
self.assertEqual(_zone.extra.get('user-id'), 6)
def test_update_zone_with_error(self):
PointDNSMockHttp.type = 'GET'
zone = self.driver.list_zones()[0]
PointDNSMockHttp.type = 'UPDATE_ZONE_WITH_ERROR'
extra = {'user-id': 6}
try:
self.driver.update_zone(zone, zone.domain, zone.ttl, extra=extra)
except PointDNSException:
pass
else:
self.fail('Exception was not thrown')
def test_update_record_success(self):
PointDNSMockHttp.type = 'GET'
record = self.driver.get_record(zone_id='1',
record_id='141')
PointDNSMockHttp.type = 'UPDATE'
extra = {'ttl': 4500}
record1 = self.driver.update_record(record=record, name='updated.com',
type=RecordType.A, data='1.2.3.5',
extra=extra)
self.assertEqual(record.data, '1.2.3.4')
self.assertEqual(record.extra.get('ttl'), 3600)
self.assertEqual(record1.data, '1.2.3.5')
self.assertEqual(record1.extra.get('ttl'), 4500)
def test_update_record_with_error(self):
PointDNSMockHttp.type = 'GET'
record = self.driver.get_record(zone_id='1',
record_id='141')
PointDNSMockHttp.type = 'UPDATE_RECORD_WITH_ERROR'
extra = {'ttl': 4500}
try:
self.driver.update_record(record=record, name='updated.com',
type=RecordType.A, data='1.2.3.5',
extra=extra)
except PointDNSException:
pass
else:
self.fail('Exception was not thrown')
def test_delete_zone_success(self):
PointDNSMockHttp.type = 'GET'
zone = self.driver.list_zones()[0]
PointDNSMockHttp.type = 'DELETE'
status = self.driver.delete_zone(zone=zone)
self.assertTrue(status)
def test_delete_zone_zone_not_exists(self):
PointDNSMockHttp.type = 'GET'
zone = self.driver.list_zones()[0]
PointDNSMockHttp.type = 'DELETE_ZONE_NOT_EXIST'
try:
self.driver.delete_zone(zone=zone)
except ZoneDoesNotExistError:
pass
else:
self.fail('Exception was not thrown')
def test_delete_record_success(self):
PointDNSMockHttp.type = 'GET'
zone = self.driver.list_zones()[0]
records = self.driver.list_records(zone=zone)
self.assertEqual(len(records), 2)
record = records[1]
PointDNSMockHttp.type = 'DELETE'
status = self.driver.delete_record(record=record)
self.assertTrue(status)
def test_delete_record_record_not_exists(self):
PointDNSMockHttp.type = 'GET'
zone = self.driver.list_zones()[0]
records = self.driver.list_records(zone=zone)
self.assertEqual(len(records), 2)
record = records[1]
PointDNSMockHttp.type = 'DELETE_RECORD_NOT_EXIST'
try:
self.driver.delete_record(record=record)
except RecordDoesNotExistError:
pass
else:
self.fail('Exception was not thrown')
def test_ex_list_redirects_success(self):
PointDNSMockHttp.type = 'GET'
zone = self.driver.list_zones()[0]
PointDNSMockHttp.type = 'LIST'
redirects = self.driver.ex_list_redirects(zone)
self.assertEqual(len(redirects), 2)
redirect1 = redirects[0]
self.assertEqual(redirect1.id, '36843229')
self.assertEqual(redirect1.name, 'redirect2.domain1.com.')
self.assertEqual(redirect1.type, '302')
self.assertEqual(redirect1.data, 'http://other.com')
self.assertEqual(redirect1.iframe, None)
self.assertEqual(redirect1.query, False)
self.assertEqual(zone, redirect1.zone)
redirect2 = redirects[1]
self.assertEqual(redirect2.id, '36843497')
self.assertEqual(redirect2.name, 'redirect1.domain1.com.')
self.assertEqual(redirect2.type, '302')
self.assertEqual(redirect2.data, 'http://someother.com')
self.assertEqual(redirect2.iframe, None)
self.assertEqual(redirect2.query, False)
self.assertEqual(zone, redirect1.zone)
def test_ex_list_mail_redirects(self):
PointDNSMockHttp.type = 'GET'
zone = self.driver.list_zones()[0]
PointDNSMockHttp.type = 'LIST'
mail_redirects = self.driver.ex_list_mail_redirects(zone)
self.assertEqual(len(mail_redirects), 2)
mail_redirect1 = mail_redirects[0]
self.assertEqual(mail_redirect1.id, '5')
self.assertEqual(mail_redirect1.source, 'admin')
self.assertEqual(mail_redirect1.destination, 'user@example-site.com')
self.assertEqual(zone, mail_redirect1.zone)
mail_redirect2 = mail_redirects[1]
self.assertEqual(mail_redirect2.id, '7')
self.assertEqual(mail_redirect2.source, 'new_admin')
self.assertEqual(mail_redirect2.destination,
'second.user@example-site.com')
self.assertEqual(zone, mail_redirect2.zone)
def test_ex_create_redirect(self):
PointDNSMockHttp.type = 'GET'
zone = self.driver.list_zones()[0]
PointDNSMockHttp.type = 'CREATE'
redirect = self.driver.ex_create_redirect('http://other.com',
'redirect2', '302', zone,
iframe='An Iframe',
query=True)
self.assertEqual(redirect.id, '36843229')
self.assertEqual(redirect.name, 'redirect2.domain1.com.')
self.assertEqual(redirect.type, '302')
self.assertEqual(redirect.data, 'http://other.com')
self.assertEqual(redirect.iframe, 'An Iframe')
self.assertEqual(redirect.query, True)
self.assertEqual(zone.id, redirect.zone.id)
def test_ex_create_redirect_with_error(self):
PointDNSMockHttp.type = 'GET'
zone = self.driver.list_zones()[0]
PointDNSMockHttp.type = 'CREATE_WITH_ERROR'
try:
self.driver.ex_create_redirect('http://other.com', 'redirect2',
'302', zone, iframe='An Iframe',
query=True)
except PointDNSException:
e = sys.exc_info()[1]
# The API actually responds with httplib.UNPROCESSABLE_ENTITY code,
# but httplib.responses doesn't have it.
self.assertEqual(e.http_code, httplib.METHOD_NOT_ALLOWED)
else:
self.fail('Exception was not thrown')
def test_ex_create_mail_redirect(self):
PointDNSMockHttp.type = 'GET'
zone = self.driver.list_zones()[0]
PointDNSMockHttp.type = 'CREATE'
mail_redirect = self.driver.ex_create_mail_redirect(
'user@example-site.com', 'admin', zone)
self.assertEqual(mail_redirect.id, '5')
self.assertEqual(mail_redirect.source, 'admin')
self.assertEqual(mail_redirect.destination, 'user@example-site.com')
self.assertEqual(zone.id, mail_redirect.zone.id)
def test_ex_create_mail_redirect_with_error(self):
PointDNSMockHttp.type = 'GET'
zone = self.driver.list_zones()[0]
PointDNSMockHttp.type = 'CREATE_WITH_ERROR'
try:
self.driver.ex_create_mail_redirect('user@example-site.com',
'admin', zone)
except PointDNSException:
e = sys.exc_info()[1]
# The API actually responds with httplib.UNPROCESSABLE_ENTITY code,
# but httplib.responses doesn't have it.
self.assertEqual(e.http_code, httplib.METHOD_NOT_ALLOWED)
else:
self.fail('Exception was not thrown')
def test_ex_get_redirect(self):
PointDNSMockHttp.type = 'GET'
zone = self.driver.list_zones()[0]
redirect = self.driver.ex_get_redirect(zone.id, '36843229')
self.assertEqual(redirect.id, '36843229')
self.assertEqual(redirect.name, 'redirect2.domain1.com.')
self.assertEqual(redirect.type, '302')
self.assertEqual(redirect.data, 'http://other.com')
self.assertEqual(redirect.iframe, None)
self.assertEqual(redirect.query, False)
self.assertEqual(zone.id, redirect.zone.id)
def test_ex_get_redirect_with_error(self):
PointDNSMockHttp.type = 'GET'
zone = self.driver.list_zones()[0]
PointDNSMockHttp.type = 'GET_WITH_ERROR'
try:
self.driver.ex_get_redirect(zone.id, '36843229')
except PointDNSException:
e = sys.exc_info()[1]
# The API actually responds with httplib.UNPROCESSABLE_ENTITY code,
# but httplib.responses doesn't have it.
self.assertEqual(e.http_code, httplib.METHOD_NOT_ALLOWED)
else:
self.fail('Exception was not thrown')
def test_ex_get_redirect_not_found(self):
PointDNSMockHttp.type = 'GET'
zone = self.driver.list_zones()[0]
PointDNSMockHttp.type = 'GET_NOT_FOUND'
try:
self.driver.ex_get_redirect(zone.id, '36843229')
except PointDNSException:
e = sys.exc_info()[1]
self.assertEqual(e.http_code, httplib.NOT_FOUND)
self.assertEqual(e.value, "Couldn't found redirect")
else:
self.fail('Exception was not thrown')
def test_ex_get_mail_redirects(self):
PointDNSMockHttp.type = 'GET'
zone = self.driver.list_zones()[0]
mail_redirect = self.driver.ex_get_mail_redirects(zone.id, '5')
self.assertEqual(mail_redirect.id, '5')
self.assertEqual(mail_redirect.source, 'admin')
self.assertEqual(mail_redirect.destination, 'user@example-site.com')
self.assertEqual(zone.id, mail_redirect.zone.id)
def test_ex_get_mail_redirects_with_error(self):
PointDNSMockHttp.type = 'GET'
zone = self.driver.list_zones()[0]
PointDNSMockHttp.type = 'GET_WITH_ERROR'
try:
self.driver.ex_get_mail_redirects(zone.id, '5')
except PointDNSException:
e = sys.exc_info()[1]
# The API actually responds with httplib.UNPROCESSABLE_ENTITY code,
# but httplib.responses doesn't have it.
self.assertEqual(e.http_code, httplib.METHOD_NOT_ALLOWED)
else:
self.fail('Exception was not thrown')
def test_ex_update_redirect(self):
PointDNSMockHttp.type = 'GET'
zone = self.driver.list_zones()[0]
redirect = self.driver.ex_get_redirect(zone.id, '36843229')
PointDNSMockHttp.type = 'UPDATE'
_redirect = self.driver.ex_update_redirect(
redirect, 'http://updatedother.com', 'redirect3', '302')
self.assertEqual(_redirect.id, '36843229')
self.assertEqual(_redirect.name, 'redirect3.domain1.com.')
self.assertEqual(_redirect.type, '302')
self.assertEqual(_redirect.data, 'http://updatedother.com')
self.assertEqual(_redirect.iframe, None)
self.assertEqual(_redirect.query, False)
self.assertEqual(zone.id, _redirect.zone.id)
def test_ex_update_redirect_with_error(self):
PointDNSMockHttp.type = 'GET'
zone = self.driver.list_zones()[0]
redirect = self.driver.ex_get_redirect(zone.id, '36843229')
PointDNSMockHttp.type = 'UPDATE_WITH_ERROR'
try:
self.driver.ex_update_redirect(
redirect, 'http://updatedother.com', 'redirect3', '302')
except PointDNSException:
e = sys.exc_info()[1]
# The API actually responds with httplib.UNPROCESSABLE_ENTITY code,
# but httplib.responses doesn't have it.
self.assertEqual(e.http_code, httplib.METHOD_NOT_ALLOWED)
else:
self.fail('Exception was not thrown')
def test_ex_update_mail_redirect(self):
PointDNSMockHttp.type = 'GET'
zone = self.driver.list_zones()[0]
mailredirect = self.driver.ex_get_mail_redirects(zone.id, '5')
PointDNSMockHttp.type = 'UPDATE'
_mailredirect = self.driver.ex_update_mail_redirect(
mailredirect, 'new_user@example-site.com', 'new_admin')
self.assertEqual(_mailredirect.id, '5')
self.assertEqual(_mailredirect.source, 'new_admin')
self.assertEqual(_mailredirect.destination,
'new_user@example-site.com')
self.assertEqual(zone.id, _mailredirect.zone.id)
def test_ex_update_mail_redirect_with_error(self):
PointDNSMockHttp.type = 'GET'
zone = self.driver.list_zones()[0]
mailredirect = self.driver.ex_get_mail_redirects(zone.id, '5')
PointDNSMockHttp.type = 'UPDATE_WITH_ERROR'
try:
self.driver.ex_update_mail_redirect(
mailredirect, 'new_user@example-site.com', 'new_admin')
except PointDNSException:
e = sys.exc_info()[1]
# The API actually responds with httplib.UNPROCESSABLE_ENTITY code,
# but httplib.responses doesn't have it.
self.assertEqual(e.http_code, httplib.METHOD_NOT_ALLOWED)
else:
self.fail('Exception was not thrown')
def test_ex_delete_redirect(self):
PointDNSMockHttp.type = 'GET'
zone = self.driver.list_zones()[0]
redirect = self.driver.ex_get_redirect(zone.id, '36843229')
PointDNSMockHttp.type = 'DELETE'
status = self.driver.ex_delete_redirect(redirect)
self.assertTrue(status)
def test_ex_delete_redirect_with_error(self):
PointDNSMockHttp.type = 'GET'
zone = self.driver.list_zones()[0]
redirect = self.driver.ex_get_redirect(zone.id, '36843229')
PointDNSMockHttp.type = 'DELETE_WITH_ERROR'
try:
self.driver.ex_delete_redirect(redirect)
except PointDNSException:
e = sys.exc_info()[1]
# The API actually responds with httplib.UNPROCESSABLE_ENTITY code,
# but httplib.responses doesn't have it.
self.assertEqual(e.http_code, httplib.METHOD_NOT_ALLOWED)
else:
self.fail('Exception was not thrown')
def test_ex_delete_redirect_not_found(self):
PointDNSMockHttp.type = 'GET'
zone = self.driver.list_zones()[0]
redirect = self.driver.ex_get_redirect(zone.id, '36843229')
PointDNSMockHttp.type = 'DELETE_NOT_FOUND'
try:
self.driver.ex_delete_redirect(redirect)
except PointDNSException:
e = sys.exc_info()[1]
self.assertEqual(e.http_code, httplib.NOT_FOUND)
self.assertEqual(e.value, "Couldn't found redirect")
else:
self.fail('Exception was not thrown')
def test_ex_delete_mail_redirect(self):
PointDNSMockHttp.type = 'GET'
zone = self.driver.list_zones()[0]
mailredirect = self.driver.ex_get_mail_redirects(zone.id, '5')
PointDNSMockHttp.type = 'DELETE'
status = self.driver.ex_delete_mail_redirect(mailredirect)
self.assertTrue(status)
def test_ex_delete_mail_redirect_with_error(self):
PointDNSMockHttp.type = 'GET'
zone = self.driver.list_zones()[0]
mailredirect = self.driver.ex_get_mail_redirects(zone.id, '5')
PointDNSMockHttp.type = 'DELETE_WITH_ERROR'
try:
self.driver.ex_delete_mail_redirect(mailredirect)
except PointDNSException:
e = sys.exc_info()[1]
# The API actually responds with httplib.UNPROCESSABLE_ENTITY code,
# but httplib.responses doesn't have it.
self.assertEqual(e.http_code, httplib.METHOD_NOT_ALLOWED)
else:
self.fail('Exception was not thrown')
def test_ex_delete_mail_redirect_not_found(self):
PointDNSMockHttp.type = 'GET'
zone = self.driver.list_zones()[0]
mailredirect = self.driver.ex_get_mail_redirects(zone.id, '5')
PointDNSMockHttp.type = 'DELETE_NOT_FOUND'
try:
self.driver.ex_delete_mail_redirect(mailredirect)
except PointDNSException:
e = sys.exc_info()[1]
self.assertEqual(e.http_code, httplib.NOT_FOUND)
self.assertEqual(e.value, "Couldn't found mail redirect")
else:
self.fail('Exception was not thrown')
class PointDNSMockHttp(MockHttp):
fixtures = DNSFileFixtures('pointdns')
def _zones_GET(self, method, url, body, headers):
body = self.fixtures.load('_zones_GET.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _zones_CREATE(self, method, url, body, headers):
body = self.fixtures.load('_zones_CREATE.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _zones_CREATE_ZONE_WITH_ERROR(self, method, url, body, headers):
body = self.fixtures.load('error.json')
return (httplib.PAYMENT_REQUIRED, body, {},
httplib.responses[httplib.PAYMENT_REQUIRED])
def _zones_1_GET(self, method, url, body, headers):
body = self.fixtures.load('_zones_GET_1.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _zones_1_ZONE_UPDATE(self, method, url, body, headers):
body = self.fixtures.load('_zones_1_ZONE_UPDATE.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _zones_1_UPDATE_ZONE_WITH_ERROR(self, method, url, body, headers):
body = self.fixtures.load('error.json')
return (httplib.PAYMENT_REQUIRED, body, {},
httplib.responses[httplib.PAYMENT_REQUIRED])
def _zones_1_GET_ZONE_NOT_EXIST(self, method, url, body, headers):
body = self.fixtures.load('not_found.json')
return (httplib.NOT_FOUND, body, {},
httplib.responses[httplib.NOT_FOUND])
def _zones_example_com_UPDATE(self, method, url, body, headers):
body = self.fixtures.load('_zones_example_com_UPDATE.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _zones_1_DELETE(self, method, url, body, headers):
body = self.fixtures.load('_zones_1_DELETE.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _zones_1_DELETE_ZONE_NOT_EXIST(self, method, url, body, headers):
body = self.fixtures.load('not_found.json')
return (httplib.NOT_FOUND, body, {},
httplib.responses[httplib.NOT_FOUND])
def _zones_1_records_CREATE(self, method, url, body, headers):
body = self.fixtures.load('_zones_example_com_records_CREATE.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _zones_1_records_CREATE_WITH_ERROR(self, method, url, body, headers):
body = self.fixtures.load('error.json')
return (httplib.PAYMENT_REQUIRED, body, {},
httplib.responses[httplib.PAYMENT_REQUIRED])
def _zones_1_records_GET(self, method, url, body, headers):
body = self.fixtures.load('_zones_1_records_GET.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _zones_1_records_141_GET_RECORD_NOT_EXIST(self, method, url, body,
headers):
body = self.fixtures.load('not_found.json')
return (httplib.NOT_FOUND, body, {},
httplib.responses[httplib.NOT_FOUND])
def _zones_1_records_141_GET(self, method, url, body, headers):
body = self.fixtures.load('_zones_1_records_141_GET.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _zones_1_records_141_UPDATE(self, method, url, body, headers):
body = self.fixtures.load('_zones_1_records_141_UPDATE.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _zones_1_records_141_UPDATE_RECORD_WITH_ERROR(self, method, url, body,
headers):
body = self.fixtures.load('error.json')
return (httplib.PAYMENT_REQUIRED, body, {},
httplib.responses[httplib.PAYMENT_REQUIRED])
def _zones_1_records_150_DELETE(self, method, url, body, headers):
body = self.fixtures.load('_zones_1_records_150_DELETE.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _zones_1_records_150_DELETE_RECORD_NOT_EXIST(self, method, url, body,
headers):
body = self.fixtures.load('not_found.json')
return (httplib.NOT_FOUND, body, {},
httplib.responses[httplib.NOT_FOUND])
def _zones_1_redirects_LIST(self, method, url, body, headers):
body = self.fixtures.load('_zones_1_redirects_LIST.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _zones_1_mail_redirects_LIST(self, method, url, body, headers):
body = self.fixtures.load('_zones_1_mail_redirects_LIST.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _zones_1_redirects_CREATE(self, method, url, body, headers):
body = self.fixtures.load('_zones_1_redirects_CREATE.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _zones_1_redirects_CREATE_WITH_ERROR(self, method, url, body, headers):
body = self.fixtures.load('redirect_error.json')
return (httplib.METHOD_NOT_ALLOWED, body, {},
httplib.responses[httplib.METHOD_NOT_ALLOWED])
def _zones_1_mail_redirects_CREATE(self, method, url, body, headers):
body = self.fixtures.load('_zones_1_mail_redirects_CREATE.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _zones_1_mail_redirects_CREATE_WITH_ERROR(self, method, url, body,
headers):
body = self.fixtures.load('redirect_error.json')
return (httplib.METHOD_NOT_ALLOWED, body, {},
httplib.responses[httplib.METHOD_NOT_ALLOWED])
def _zones_1_redirects_36843229_GET_WITH_ERROR(self, method, url, body,
headers):
body = self.fixtures.load('redirect_error.json')
return (httplib.METHOD_NOT_ALLOWED, body, {},
httplib.responses[httplib.METHOD_NOT_ALLOWED])
def _zones_1_redirects_36843229_GET(self, method, url, body, headers):
body = self.fixtures.load('_zones_1_redirects_GET.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _zones_1_redirects_36843229_GET_NOT_FOUND(self, method, url, body,
headers):
body = self.fixtures.load('not_found.json')
return (httplib.NOT_FOUND, body, {},
httplib.responses[httplib.NOT_FOUND])
def _zones_1_mail_redirects_5_GET(self, method, url, body, headers):
body = self.fixtures.load('_zones_1_mail_redirects_GET.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _zones_1_mail_redirects_5_GET_WITH_ERROR(self, method, url, body,
headers):
body = self.fixtures.load('redirect_error.json')
return (httplib.METHOD_NOT_ALLOWED, body, {},
httplib.responses[httplib.METHOD_NOT_ALLOWED])
def _zones_1_redirects_36843229_UPDATE(self, method, url, body, headers):
body = self.fixtures.load('_zones_1_redirects_UPDATE.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _zones_1_redirects_36843229_UPDATE_WITH_ERROR(self, method, url, body,
headers):
body = self.fixtures.load('redirect_error.json')
return (httplib.METHOD_NOT_ALLOWED, body, {},
httplib.responses[httplib.METHOD_NOT_ALLOWED])
def _zones_1_mail_redirects_5_UPDATE(self, method, url, body, headers):
body = self.fixtures.load('_zones_1_mail_redirects_UPDATE.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _zones_1_mail_redirects_5_UPDATE_WITH_ERROR(self, method, url, body,
headers):
body = self.fixtures.load('redirect_error.json')
return (httplib.METHOD_NOT_ALLOWED, body, {},
httplib.responses[httplib.METHOD_NOT_ALLOWED])
def _zones_1_redirects_36843229_DELETE(self, method, url, body, headers):
body = self.fixtures.load('_zones_1_redirects_DELETE.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _zones_1_mail_redirects_5_DELETE(self, method, url, body, headers):
body = self.fixtures.load('_zones_1_mail_redirects_DELETE.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _zones_1_redirects_36843229_DELETE_WITH_ERROR(self, method, url, body,
headers):
body = self.fixtures.load('redirect_error.json')
return (httplib.METHOD_NOT_ALLOWED, body, {},
httplib.responses[httplib.METHOD_NOT_ALLOWED])
def _zones_1_mail_redirects_5_DELETE_WITH_ERROR(self, method, url, body,
headers):
body = self.fixtures.load('redirect_error.json')
return (httplib.METHOD_NOT_ALLOWED, body, {},
httplib.responses[httplib.METHOD_NOT_ALLOWED])
def _zones_1_redirects_36843229_DELETE_NOT_FOUND(self, method, url, body,
headers):
body = self.fixtures.load('not_found.json')
return (httplib.NOT_FOUND, body, {},
httplib.responses[httplib.NOT_FOUND])
def _zones_1_mail_redirects_5_DELETE_NOT_FOUND(self, method, url, body,
headers):
body = self.fixtures.load('not_found.json')
return (httplib.NOT_FOUND, body, {},
httplib.responses[httplib.NOT_FOUND])
if __name__ == '__main__':
sys.exit(unittest.main())
|
krintoxi/NoobSec-Toolkit | refs/heads/master | NoobSecToolkit - MAC OSX/tools/inject/tamper/randomcase.py | 10 | #!/usr/bin/env python
"""
Copyright (c) 2006-2015 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import re
from lib.core.common import randomRange
from lib.core.data import kb
from lib.core.enums import PRIORITY
__priority__ = PRIORITY.NORMAL
def dependencies():
pass
def tamper(payload, **kwargs):
"""
Replaces each keyword character with random case value
Tested against:
* Microsoft SQL Server 2005
* MySQL 4, 5.0 and 5.5
* Oracle 10g
* PostgreSQL 8.3, 8.4, 9.0
Notes:
* Useful to bypass very weak and bespoke web application firewalls
that has poorly written permissive regular expressions
* This tamper script should work against all (?) databases
>>> import random
>>> random.seed(0)
>>> tamper('INSERT')
'INseRt'
"""
retVal = payload
if payload:
for match in re.finditer(r"[A-Za-z_]+", retVal):
word = match.group()
if word.upper() in kb.keywords:
while True:
_ = ""
for i in xrange(len(word)):
_ += word[i].upper() if randomRange(0, 1) else word[i].lower()
if len(_) > 1 and _ not in (_.lower(), _.upper()):
break
retVal = retVal.replace(word, _)
return retVal
|
Gadal/sympy | refs/heads/master | sympy/polys/tests/test_polyutils.py | 46 | """Tests for useful utilities for higher level polynomial classes. """
from sympy import S, Integer, sin, cos, sqrt, symbols, pi, Eq, Integral, exp
from sympy.utilities.pytest import raises
from sympy.polys.polyutils import (
_nsort,
_sort_gens,
_unify_gens,
_analyze_gens,
_sort_factors,
parallel_dict_from_expr,
dict_from_expr,
)
from sympy.polys.polyerrors import (
GeneratorsNeeded,
PolynomialError,
)
from sympy.polys.domains import ZZ
x, y, z, p, q, r, s, t, u, v, w = symbols('x,y,z,p,q,r,s,t,u,v,w')
A, B = symbols('A,B', commutative=False)
def test__nsort():
# issue 6137
r = S('''[3/2 + sqrt(-14/3 - 2*(-415/216 + 13*I/12)**(1/3) - 4/sqrt(-7/3 +
61/(18*(-415/216 + 13*I/12)**(1/3)) + 2*(-415/216 + 13*I/12)**(1/3)) -
61/(18*(-415/216 + 13*I/12)**(1/3)))/2 - sqrt(-7/3 + 61/(18*(-415/216
+ 13*I/12)**(1/3)) + 2*(-415/216 + 13*I/12)**(1/3))/2, 3/2 - sqrt(-7/3
+ 61/(18*(-415/216 + 13*I/12)**(1/3)) + 2*(-415/216 +
13*I/12)**(1/3))/2 - sqrt(-14/3 - 2*(-415/216 + 13*I/12)**(1/3) -
4/sqrt(-7/3 + 61/(18*(-415/216 + 13*I/12)**(1/3)) + 2*(-415/216 +
13*I/12)**(1/3)) - 61/(18*(-415/216 + 13*I/12)**(1/3)))/2, 3/2 +
sqrt(-14/3 - 2*(-415/216 + 13*I/12)**(1/3) + 4/sqrt(-7/3 +
61/(18*(-415/216 + 13*I/12)**(1/3)) + 2*(-415/216 + 13*I/12)**(1/3)) -
61/(18*(-415/216 + 13*I/12)**(1/3)))/2 + sqrt(-7/3 + 61/(18*(-415/216
+ 13*I/12)**(1/3)) + 2*(-415/216 + 13*I/12)**(1/3))/2, 3/2 + sqrt(-7/3
+ 61/(18*(-415/216 + 13*I/12)**(1/3)) + 2*(-415/216 +
13*I/12)**(1/3))/2 - sqrt(-14/3 - 2*(-415/216 + 13*I/12)**(1/3) +
4/sqrt(-7/3 + 61/(18*(-415/216 + 13*I/12)**(1/3)) + 2*(-415/216 +
13*I/12)**(1/3)) - 61/(18*(-415/216 + 13*I/12)**(1/3)))/2]''')
ans = [r[1], r[0], r[-1], r[-2]]
assert _nsort(r) == ans
assert len(_nsort(r, separated=True)[0]) == 0
b, c, a = exp(-1000), exp(-999), exp(-1001)
assert _nsort((b, c, a)) == [a, b, c]
def test__sort_gens():
assert _sort_gens([]) == ()
assert _sort_gens([x]) == (x,)
assert _sort_gens([p]) == (p,)
assert _sort_gens([q]) == (q,)
assert _sort_gens([x, p]) == (x, p)
assert _sort_gens([p, x]) == (x, p)
assert _sort_gens([q, p]) == (p, q)
assert _sort_gens([q, p, x]) == (x, p, q)
assert _sort_gens([x, p, q], wrt=x) == (x, p, q)
assert _sort_gens([x, p, q], wrt=p) == (p, x, q)
assert _sort_gens([x, p, q], wrt=q) == (q, x, p)
assert _sort_gens([x, p, q], wrt='x') == (x, p, q)
assert _sort_gens([x, p, q], wrt='p') == (p, x, q)
assert _sort_gens([x, p, q], wrt='q') == (q, x, p)
assert _sort_gens([x, p, q], wrt='x,q') == (x, q, p)
assert _sort_gens([x, p, q], wrt='q,x') == (q, x, p)
assert _sort_gens([x, p, q], wrt='p,q') == (p, q, x)
assert _sort_gens([x, p, q], wrt='q,p') == (q, p, x)
assert _sort_gens([x, p, q], wrt='x, q') == (x, q, p)
assert _sort_gens([x, p, q], wrt='q, x') == (q, x, p)
assert _sort_gens([x, p, q], wrt='p, q') == (p, q, x)
assert _sort_gens([x, p, q], wrt='q, p') == (q, p, x)
assert _sort_gens([x, p, q], wrt=[x, 'q']) == (x, q, p)
assert _sort_gens([x, p, q], wrt=[q, 'x']) == (q, x, p)
assert _sort_gens([x, p, q], wrt=[p, 'q']) == (p, q, x)
assert _sort_gens([x, p, q], wrt=[q, 'p']) == (q, p, x)
assert _sort_gens([x, p, q], wrt=['x', 'q']) == (x, q, p)
assert _sort_gens([x, p, q], wrt=['q', 'x']) == (q, x, p)
assert _sort_gens([x, p, q], wrt=['p', 'q']) == (p, q, x)
assert _sort_gens([x, p, q], wrt=['q', 'p']) == (q, p, x)
assert _sort_gens([x, p, q], sort='x > p > q') == (x, p, q)
assert _sort_gens([x, p, q], sort='p > x > q') == (p, x, q)
assert _sort_gens([x, p, q], sort='p > q > x') == (p, q, x)
assert _sort_gens([x, p, q], wrt='x', sort='q > p') == (x, q, p)
assert _sort_gens([x, p, q], wrt='p', sort='q > x') == (p, q, x)
assert _sort_gens([x, p, q], wrt='q', sort='p > x') == (q, p, x)
X = symbols('x0,x1,x2,x10,x11,x12,x20,x21,x22')
assert _sort_gens(X) == X
def test__unify_gens():
assert _unify_gens([], []) == ()
assert _unify_gens([x], [x]) == (x,)
assert _unify_gens([y], [y]) == (y,)
assert _unify_gens([x, y], [x]) == (x, y)
assert _unify_gens([x], [x, y]) == (x, y)
assert _unify_gens([x, y], [x, y]) == (x, y)
assert _unify_gens([y, x], [y, x]) == (y, x)
assert _unify_gens([x], [y]) == (x, y)
assert _unify_gens([y], [x]) == (y, x)
assert _unify_gens([x], [y, x]) == (y, x)
assert _unify_gens([y, x], [x]) == (y, x)
assert _unify_gens([x, y, z], [x, y, z]) == (x, y, z)
assert _unify_gens([z, y, x], [x, y, z]) == (z, y, x)
assert _unify_gens([x, y, z], [z, y, x]) == (x, y, z)
assert _unify_gens([z, y, x], [z, y, x]) == (z, y, x)
assert _unify_gens([x, y, z], [t, x, p, q, z]) == (t, x, y, p, q, z)
def test__analyze_gens():
assert _analyze_gens((x, y, z)) == (x, y, z)
assert _analyze_gens([x, y, z]) == (x, y, z)
assert _analyze_gens(([x, y, z],)) == (x, y, z)
assert _analyze_gens(((x, y, z),)) == (x, y, z)
def test__sort_factors():
assert _sort_factors([], multiple=True) == []
assert _sort_factors([], multiple=False) == []
F = [[1, 2, 3], [1, 2], [1]]
G = [[1], [1, 2], [1, 2, 3]]
assert _sort_factors(F, multiple=False) == G
F = [[1, 2], [1, 2, 3], [1, 2], [1]]
G = [[1], [1, 2], [1, 2], [1, 2, 3]]
assert _sort_factors(F, multiple=False) == G
F = [[2, 2], [1, 2, 3], [1, 2], [1]]
G = [[1], [1, 2], [2, 2], [1, 2, 3]]
assert _sort_factors(F, multiple=False) == G
F = [([1, 2, 3], 1), ([1, 2], 1), ([1], 1)]
G = [([1], 1), ([1, 2], 1), ([1, 2, 3], 1)]
assert _sort_factors(F, multiple=True) == G
F = [([1, 2], 1), ([1, 2, 3], 1), ([1, 2], 1), ([1], 1)]
G = [([1], 1), ([1, 2], 1), ([1, 2], 1), ([1, 2, 3], 1)]
assert _sort_factors(F, multiple=True) == G
F = [([2, 2], 1), ([1, 2, 3], 1), ([1, 2], 1), ([1], 1)]
G = [([1], 1), ([1, 2], 1), ([2, 2], 1), ([1, 2, 3], 1)]
assert _sort_factors(F, multiple=True) == G
F = [([2, 2], 1), ([1, 2, 3], 1), ([1, 2], 2), ([1], 1)]
G = [([1], 1), ([2, 2], 1), ([1, 2], 2), ([1, 2, 3], 1)]
assert _sort_factors(F, multiple=True) == G
def test__dict_from_expr_if_gens():
assert dict_from_expr(
Integer(17), gens=(x,)) == ({(0,): Integer(17)}, (x,))
assert dict_from_expr(
Integer(17), gens=(x, y)) == ({(0, 0): Integer(17)}, (x, y))
assert dict_from_expr(
Integer(17), gens=(x, y, z)) == ({(0, 0, 0): Integer(17)}, (x, y, z))
assert dict_from_expr(
Integer(-17), gens=(x,)) == ({(0,): Integer(-17)}, (x,))
assert dict_from_expr(
Integer(-17), gens=(x, y)) == ({(0, 0): Integer(-17)}, (x, y))
assert dict_from_expr(Integer(
-17), gens=(x, y, z)) == ({(0, 0, 0): Integer(-17)}, (x, y, z))
assert dict_from_expr(
Integer(17)*x, gens=(x,)) == ({(1,): Integer(17)}, (x,))
assert dict_from_expr(
Integer(17)*x, gens=(x, y)) == ({(1, 0): Integer(17)}, (x, y))
assert dict_from_expr(Integer(
17)*x, gens=(x, y, z)) == ({(1, 0, 0): Integer(17)}, (x, y, z))
assert dict_from_expr(
Integer(17)*x**7, gens=(x,)) == ({(7,): Integer(17)}, (x,))
assert dict_from_expr(
Integer(17)*x**7*y, gens=(x, y)) == ({(7, 1): Integer(17)}, (x, y))
assert dict_from_expr(Integer(17)*x**7*y*z**12, gens=(
x, y, z)) == ({(7, 1, 12): Integer(17)}, (x, y, z))
assert dict_from_expr(x + 2*y + 3*z, gens=(x,)) == \
({(1,): Integer(1), (0,): 2*y + 3*z}, (x,))
assert dict_from_expr(x + 2*y + 3*z, gens=(x, y)) == \
({(1, 0): Integer(1), (0, 1): Integer(2), (0, 0): 3*z}, (x, y))
assert dict_from_expr(x + 2*y + 3*z, gens=(x, y, z)) == \
({(1, 0, 0): Integer(
1), (0, 1, 0): Integer(2), (0, 0, 1): Integer(3)}, (x, y, z))
assert dict_from_expr(x*y + 2*x*z + 3*y*z, gens=(x,)) == \
({(1,): y + 2*z, (0,): 3*y*z}, (x,))
assert dict_from_expr(x*y + 2*x*z + 3*y*z, gens=(x, y)) == \
({(1, 1): Integer(1), (1, 0): 2*z, (0, 1): 3*z}, (x, y))
assert dict_from_expr(x*y + 2*x*z + 3*y*z, gens=(x, y, z)) == \
({(1, 1, 0): Integer(
1), (1, 0, 1): Integer(2), (0, 1, 1): Integer(3)}, (x, y, z))
assert dict_from_expr(2**y*x, gens=(x,)) == ({(1,): 2**y}, (x,))
assert dict_from_expr(Integral(x, (x, 1, 2)) + x) == (
{(0, 1): 1, (1, 0): 1}, (x, Integral(x, (x, 1, 2))))
raises(PolynomialError, lambda: dict_from_expr(2**y*x, gens=(x, y)))
def test__dict_from_expr_no_gens():
raises(GeneratorsNeeded, lambda: dict_from_expr(Integer(17)))
assert dict_from_expr(x) == ({(1,): Integer(1)}, (x,))
assert dict_from_expr(y) == ({(1,): Integer(1)}, (y,))
assert dict_from_expr(x*y) == ({(1, 1): Integer(1)}, (x, y))
assert dict_from_expr(
x + y) == ({(1, 0): Integer(1), (0, 1): Integer(1)}, (x, y))
assert dict_from_expr(sqrt(2)) == ({(1,): Integer(1)}, (sqrt(2),))
raises(GeneratorsNeeded, lambda: dict_from_expr(sqrt(2), greedy=False))
assert dict_from_expr(x*y, domain=ZZ[x]) == ({(1,): x}, (y,))
assert dict_from_expr(x*y, domain=ZZ[y]) == ({(1,): y}, (x,))
assert dict_from_expr(3*sqrt(
2)*pi*x*y, extension=None) == ({(1, 1, 1, 1): 3}, (x, y, pi, sqrt(2)))
assert dict_from_expr(3*sqrt(
2)*pi*x*y, extension=True) == ({(1, 1, 1): 3*sqrt(2)}, (x, y, pi))
assert dict_from_expr(3*sqrt(
2)*pi*x*y, extension=True) == ({(1, 1, 1): 3*sqrt(2)}, (x, y, pi))
f = cos(x)*sin(x) + cos(x)*sin(y) + cos(y)*sin(x) + cos(y)*sin(y)
assert dict_from_expr(f) == ({(0, 1, 0, 1): 1, (0, 1, 1, 0): 1,
(1, 0, 0, 1): 1, (1, 0, 1, 0): 1}, (cos(x), cos(y), sin(x), sin(y)))
def test__parallel_dict_from_expr_if_gens():
assert parallel_dict_from_expr([x + 2*y + 3*z, Integer(7)], gens=(x,)) == \
([{(1,): Integer(1), (0,): 2*y + 3*z}, {(0,): Integer(7)}], (x,))
def test__parallel_dict_from_expr_no_gens():
assert parallel_dict_from_expr([x*y, Integer(3)]) == \
([{(1, 1): Integer(1)}, {(0, 0): Integer(3)}], (x, y))
assert parallel_dict_from_expr([x*y, 2*z, Integer(3)]) == \
([{(1, 1, 0): Integer(
1)}, {(0, 0, 1): Integer(2)}, {(0, 0, 0): Integer(3)}], (x, y, z))
def test_parallel_dict_from_expr():
assert parallel_dict_from_expr([Eq(x, 1), Eq(
x**2, 2)]) == ([{(0,): -Integer(1), (1,): Integer(1)},
{(0,): -Integer(2), (2,): Integer(1)}], (x,))
raises(PolynomialError, lambda: parallel_dict_from_expr([A*B - B*A]))
def test_dict_from_expr():
assert dict_from_expr(Eq(x, 1)) == \
({(0,): -Integer(1), (1,): Integer(1)}, (x,))
raises(PolynomialError, lambda: dict_from_expr(A*B - B*A))
|
Peddle/hue | refs/heads/master | desktop/core/ext-py/boto-2.38.0/boto/ec2/cloudwatch/listelement.py | 152 | # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class ListElement(list):
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'member':
self.append(value)
|
KaranToor/MA450 | refs/heads/master | google-cloud-sdk/platform/gsutil/third_party/rsa/rsa/pem.py | 216 | # -*- coding: utf-8 -*-
#
# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Functions that load and write PEM-encoded files.'''
import base64
from rsa._compat import b, is_bytes
def _markers(pem_marker):
'''
Returns the start and end PEM markers
'''
if is_bytes(pem_marker):
pem_marker = pem_marker.decode('utf-8')
return (b('-----BEGIN %s-----' % pem_marker),
b('-----END %s-----' % pem_marker))
def load_pem(contents, pem_marker):
'''Loads a PEM file.
@param contents: the contents of the file to interpret
@param pem_marker: the marker of the PEM content, such as 'RSA PRIVATE KEY'
when your file has '-----BEGIN RSA PRIVATE KEY-----' and
'-----END RSA PRIVATE KEY-----' markers.
@return the base64-decoded content between the start and end markers.
@raise ValueError: when the content is invalid, for example when the start
marker cannot be found.
'''
(pem_start, pem_end) = _markers(pem_marker)
pem_lines = []
in_pem_part = False
for line in contents.splitlines():
line = line.strip()
# Skip empty lines
if not line:
continue
# Handle start marker
if line == pem_start:
if in_pem_part:
raise ValueError('Seen start marker "%s" twice' % pem_start)
in_pem_part = True
continue
# Skip stuff before first marker
if not in_pem_part:
continue
# Handle end marker
if in_pem_part and line == pem_end:
in_pem_part = False
break
# Load fields
if b(':') in line:
continue
pem_lines.append(line)
# Do some sanity checks
if not pem_lines:
raise ValueError('No PEM start marker "%s" found' % pem_start)
if in_pem_part:
raise ValueError('No PEM end marker "%s" found' % pem_end)
# Base64-decode the contents
pem = b('').join(pem_lines)
return base64.decodestring(pem)
def save_pem(contents, pem_marker):
'''Saves a PEM file.
@param contents: the contents to encode in PEM format
@param pem_marker: the marker of the PEM content, such as 'RSA PRIVATE KEY'
when your file has '-----BEGIN RSA PRIVATE KEY-----' and
'-----END RSA PRIVATE KEY-----' markers.
@return the base64-encoded content between the start and end markers.
'''
(pem_start, pem_end) = _markers(pem_marker)
b64 = base64.encodestring(contents).replace(b('\n'), b(''))
pem_lines = [pem_start]
for block_start in range(0, len(b64), 64):
block = b64[block_start:block_start + 64]
pem_lines.append(block)
pem_lines.append(pem_end)
pem_lines.append(b(''))
return b('\n').join(pem_lines)
|
fengmk2/node-gyp | refs/heads/master | gyp/tools/graphviz.py | 2679 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Using the JSON dumped by the dump-dependency-json generator,
generate input suitable for graphviz to render a dependency graph of
targets."""
import collections
import json
import sys
def ParseTarget(target):
target, _, suffix = target.partition('#')
filename, _, target = target.partition(':')
return filename, target, suffix
def LoadEdges(filename, targets):
"""Load the edges map from the dump file, and filter it to only
show targets in |targets| and their depedendents."""
file = open('dump.json')
edges = json.load(file)
file.close()
# Copy out only the edges we're interested in from the full edge list.
target_edges = {}
to_visit = targets[:]
while to_visit:
src = to_visit.pop()
if src in target_edges:
continue
target_edges[src] = edges[src]
to_visit.extend(edges[src])
return target_edges
def WriteGraph(edges):
"""Print a graphviz graph to stdout.
|edges| is a map of target to a list of other targets it depends on."""
# Bucket targets by file.
files = collections.defaultdict(list)
for src, dst in edges.items():
build_file, target_name, toolset = ParseTarget(src)
files[build_file].append(src)
print 'digraph D {'
print ' fontsize=8' # Used by subgraphs.
print ' node [fontsize=8]'
# Output nodes by file. We must first write out each node within
# its file grouping before writing out any edges that may refer
# to those nodes.
for filename, targets in files.items():
if len(targets) == 1:
# If there's only one node for this file, simplify
# the display by making it a box without an internal node.
target = targets[0]
build_file, target_name, toolset = ParseTarget(target)
print ' "%s" [shape=box, label="%s\\n%s"]' % (target, filename,
target_name)
else:
# Group multiple nodes together in a subgraph.
print ' subgraph "cluster_%s" {' % filename
print ' label = "%s"' % filename
for target in targets:
build_file, target_name, toolset = ParseTarget(target)
print ' "%s" [label="%s"]' % (target, target_name)
print ' }'
# Now that we've placed all the nodes within subgraphs, output all
# the edges between nodes.
for src, dsts in edges.items():
for dst in dsts:
print ' "%s" -> "%s"' % (src, dst)
print '}'
def main():
if len(sys.argv) < 2:
print >>sys.stderr, __doc__
print >>sys.stderr
print >>sys.stderr, 'usage: %s target1 target2...' % (sys.argv[0])
return 1
edges = LoadEdges('dump.json', sys.argv[1:])
WriteGraph(edges)
return 0
if __name__ == '__main__':
sys.exit(main())
|
fyfcauc/android_external_chromium-org | refs/heads/du44 | tools/json_schema_compiler/memoize.py | 128 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
def memoize(fn):
'''Decorates |fn| to memoize.
'''
memory = {}
def impl(*args, **optargs):
full_args = args + tuple(optargs.iteritems())
if full_args not in memory:
memory[full_args] = fn(*args, **optargs)
return memory[full_args]
return impl
|
cryptoproofinfo/webapp | refs/heads/master | popsite/tests.py | 24123 | from django.test import TestCase
# Create your tests here.
|
taige/PyTools | refs/heads/master | tsproxy/httphelper2.py | 1 | import asyncio
import collections
import logging
import time
from http.client import responses
from urllib.parse import urlparse
from urllib.parse import urlunparse
import aiohttp
from aiohttp import hdrs
from aiohttp import http_exceptions as errors
from aiohttp import http_parser
from tsproxy import common
NO_CONTENT = 204
NOT_MODIFIED = 304
logger = logging.getLogger(__name__)
RequestMessage = collections.namedtuple(
'RequestMessage',
['method', 'path', 'version', 'headers', 'raw_headers',
'should_close', 'compression', 'request_line', 'url', 'body', 'error', 'request_time'])
RequestURL = collections.namedtuple(
'RequestURL',
['full_url', 'full_path', 'scheme', 'netloc', 'hostname', 'port', 'path', 'query'])
ResponseMessage = collections.namedtuple(
'ResponseMessage',
['version', 'code', 'reason', 'headers', 'raw_headers',
'should_close', 'compression', 'chunked', 'content_length', 'response_line',
'head_length', 'body', 'raw_data', 'error', 'response_time'])
class HttpParser(http_parser.HttpParser):
def __init__(self, max_line_size=10240, max_headers=32768,
max_field_size=10240, **kwargs):
super().__init__(max_line_size=max_line_size, max_headers=max_headers, max_field_size=max_field_size, **kwargs)
def _parse_version(self, version):
try:
if version.startswith('HTTP/'):
n1, n2 = version[5:].split('.', 1)
obj_version = aiohttp.HttpVersion(int(n1), int(n2))
else:
raise errors.BadStatusLine(version)
except:
raise errors.BadStatusLine(version)
if obj_version <= aiohttp.HttpVersion10: # HTTP 1.0 must asks to not close
close = True
else: # HTTP 1.1 must ask to close.
close = False
return close
def parse_headers(self, lines, status=200, request_method='GET', default_close=True):
headers, raw_headers, close, compression, _, chunked = super().parse_headers(lines)
# are we using the chunked-style of transfer encoding?
# tr_enc = headers.get(hdrs.TRANSFER_ENCODING)
# if tr_enc and tr_enc.lower() == "chunked":
# chunked = True
# else:
# chunked = False
# do we have a Content-Length?
# NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked"
content_length = headers.get(hdrs.CONTENT_LENGTH)
# are we using the chunked-style of transfer encoding?
if content_length and not chunked:
try:
length = int(content_length)
except ValueError:
length = None
else:
if length < 0: # ignore nonsensical negative lengths
length = None
else:
length = None
# does the body have a fixed length? (of zero)
if length is None \
and (status == NO_CONTENT or status == NOT_MODIFIED or 100 <= status < 200 or request_method == "HEAD"):
length = 0
# then the headers weren't set in the request
return headers, raw_headers, default_close if close is None else close, compression, chunked, length
def bad_request(error=None, request_line=None, method=None, version=None, url=None, close=True, timeout=None, request_time=None):
if timeout and not error:
error = errors.BadHttpMessage('read request timeout(%d)' % timeout)
return RequestMessage(
method, None if not url else url.full_path, version, None, None,
close, None, request_line, url, None, error, request_time if request_time else time.time())
# ['version', 'code', 'reason', 'headers', 'raw_headers',
# 'should_close', 'compression', 'chunked', 'content_length', 'response_line',
# 'head_length', 'var', 'body', 'raw_data', 'error'])
def bad_response(error, response_line=None, raw_data=b'', timeout=None):
if timeout and not error:
error = errors.BadHttpMessage('read response timeout(%d)' % timeout)
return ResponseMessage(
None, error.code, error.message, None, None,
None, None, False, None, response_line,
len(raw_data), None, raw_data, error, time.time())
class HttpRequestParser(HttpParser):
def parse_request(self, reader, raw_data=b'', read_timeout=common.default_timeout):
# read HTTP message (request line + headers)
request_time = time.time()
request_line = method = version = url = close = None
try:
with common.Timeout(read_timeout):
if raw_data and raw_data.find(b'\r\n') > 0:
pass
else:
raw_data += yield from reader.readuntil(b'\r\n')
request_line, _ = raw_data.split(b'\r\n', 1)
request_line = request_line.decode('utf-8', 'surrogateescape')
method, version, url, close = self._parse_requestline(request_line)
while True:
end_index = raw_data.find(b'\r\n\r\n')
if raw_data and end_index > 0:
header_lines = raw_data[:end_index+4]
body = raw_data[end_index+4:]
break
else:
raw_data += yield from reader.readuntil(b'\r\n')
_request = self._parse_request(header_lines, request_line, method, version, url, close, request_time)
except EOFError:
return None
except errors.HttpProcessingError as bad_req:
return bad_request(bad_req, request_line, request_time=request_time)
except (TimeoutError, asyncio.TimeoutError):
return bad_request(errors.BadHttpMessage('read request timeout(%d)' % read_timeout), request_line, method, version, url, close, request_time=request_time)
except asyncio.LimitOverrunError as exc:
return bad_request(errors.LineTooLong('%s' % raw_data, exc.consumed), request_line, method, version, url, close, request_time=request_time)
chunk_len = len(reader)
if chunk_len > 0:
body += yield from reader.read_bytes(size=chunk_len)
if len(body) > 0:
_request = _request._replace(body=body)
return _request
def _parse_requestline(self, line):
# request line
# line = request_line.decode('utf-8', 'surrogateescape')
try:
method, path, version = line.split(None, 2)
except ValueError:
raise errors.BadStatusLine(line) from None
# method
method = method.upper()
if not http_parser.METHRE.match(method):
raise errors.BadStatusLine(method)
# version
close = self._parse_version(version)
# path
url = self.parse_path(path, method)
return method, version, url, close
def _parse_request(self, raw_data, line, method, version, url, default_close=True, request_time=None):
lines = raw_data.split(b'\r\n')
# read headers
headers, raw_headers, close, compression, _, _ = self.parse_headers(lines, default_close=default_close)
if close is None: # then the headers weren't set in the request
close = default_close
return RequestMessage(
method, url.full_path, version, headers, raw_headers,
close, compression, line, url, b'', None, request_time if request_time else time.time())
@staticmethod
def parse_path(path, method):
""" ['full_url', 'full_path', 'scheme', 'netloc', 'hostname', 'port', 'path', 'query']) """
url = urlparse(path)
result = {'scheme': url.scheme if url.scheme else 'http' if url.netloc else '',
'netloc': url.netloc,
'path': url.path,
'query': url.query}
result['full_url'] = urlunparse((result['scheme'], result['netloc'], url.path, url.params, url.query, url.fragment))
result['full_path'] = urlunparse(('', '', url.path, url.params, url.query, url.fragment))
if method == 'CONNECT':
hostname, port = url.path.split(':')
result['hostname'] = hostname
result['port'] = int(port) if port else 443
else:
result['hostname'] = url.hostname if url.hostname else None
result['port'] = int(url.port) if url.port else 80
return RequestURL(**result)
class HttpResponseParser(HttpParser):
"""Read response status line and headers.
BadStatusLine could be raised in case of any errors in status line.
Returns RawResponseMessage"""
def parse_response(self, raw_data, request_method='GET'):
# read HTTP message (response line + headers)
try:
if raw_data and raw_data.find(b'\r\n') > 0:
pass
else:
return None, 0
response_line, _ = raw_data.split(b'\r\n', 1)
response_line = response_line.decode('utf-8', 'surrogateescape')
version, status, reason, default_close = self._parse_responseline(response_line)
if raw_data and raw_data.find(b'\r\n\r\n') > 0:
consumed = raw_data.find(b'\r\n\r\n') + 4
else:
return None, 0
_response = self._parse_response(raw_data, response_line, version, status, reason, request_method, default_close)
except errors.HttpProcessingError as bad_req:
return bad_response(bad_req, response_line, raw_data), 0
body_len = len(raw_data) - consumed
if body_len > 0:
if _response.content_length is not None and body_len > _response.content_length:
body_len = _response.content_length
body = raw_data[consumed: consumed + body_len]
_response = _response._replace(body=body)
consumed += body_len
return _response, consumed
def _parse_response(self, raw_data, line, version, status, reason, request_method='GET', default_close=True):
lines = raw_data.split(b'\r\n')
# read headers
headers, raw_headers, close, compression, chunked, length = self.parse_headers(lines, status, request_method, default_close)
# ['version', 'code', 'reason', 'headers', 'raw_headers',
# 'should_close', 'compression', 'chunked', 'content_length', 'response_line',
# 'head_length', 'var', 'body', 'raw_data', 'error'])
return ResponseMessage(
version, status, reason.strip(), headers, raw_headers,
close, compression, chunked, length, line,
len(raw_data), b'', raw_data, None, time.time())
def _parse_responseline(self, line):
# response line
try:
version, status = line.split(None, 1)
except ValueError:
raise errors.BadStatusLine(line) from None
else:
try:
status, reason = status.split(None, 1)
except ValueError:
reason = ''
# version
close = self._parse_version(version)
# The status code is a three-digit number
try:
status = int(status)
except ValueError:
raise errors.BadStatusLine(line) from None
if status < 100 or status > 999:
raise errors.BadStatusLine(line)
return version, status, reason, close
# ['version', 'code', 'reason', 'headers', 'raw_headers',
# 'should_close', 'compression', 'chunked', 'content_length', 'response_line',
# 'head_length', 'var', 'body', 'raw_data', 'error'])
def https_proxy_response(version=None, headers=None):
if not version:
version = 'HTTP/1.1'
response_line = '%s 200 Connection established' % version
raw_data = (response_line.encode() + b'\r\nProxy-Agent: taige-Smart-Proxy/0.1.0\r\n')
raw_headers = []
if headers:
for key in headers:
raw_data += ('%s: %s\r\n' % (key, headers[key])).encode()
raw_headers.append((key, headers[key]))
raw_data += b'\r\n'
return ResponseMessage(
version, 200, 'Connection established', headers, raw_headers,
True, False, False, None, response_line,
len(raw_data), b'', raw_data, None, time.time())
def http_response(version=None, status=200, reason=None, headers=None, content=None):
if not version:
version = 'HTTP/1.1'
if not reason:
reason = responses[status] + '(TSP)'
response_line = '%s %d %s' % (version, status, reason)
raw_data = ('%s\r\n' % response_line).encode()
raw_headers = []
if headers:
for key in headers:
raw_data += ('%s: %s\r\n' % (key, headers[key])).encode()
raw_headers.append((key, headers[key]))
if content:
raw_data += ('Content-Length: %d\r\n' % len(content)).encode()
raw_data += b'\r\n' + content.encode()
return ResponseMessage(
version, status, reason, headers, raw_headers,
True, False, False, None if not content else len(content), response_line,
len(raw_data), content.encode() if content else b'', raw_data, None, time.time())
def test():
request_text = (
b'GET http://user:pass@pki.google.com/add?jp3.iss.tf&us1&hk2 HTTP/1.1\r\n'
# b'GET http://user:pass@pki.google.com/GIAG2.crt;some_par?sdsf=sdf#some_fra HTTP/1.1\r\n'
# b'HEAD /sdsdfs HTTP/1.1\r\n'
# b'GET http://pki.google.com/GIAG2.crt;some_par?sdsf=sdf#some_fra HTTP/1.1\r\n'
# b'CONNECT photos-thumb.dropbox.com:443 HTTP/1.1\r\n'
b'Host: pki.google.com\r\n'
b'Proxy-Connection: keep-alive\r\n'
b'Accept: */*\r\n'
b'User-Agent: ocspd/1.0.3\r\n'
b'Accept-Language: zh-cn\r\n'
b'Content-Length: 15\r\n'
b'Accept-Encoding: gzip, deflate\r\n'
b'Connection: keep-alive\r\n\r\n'
b'safasdfa;jd;afd'
)
response_text = (
b'HTTP/1.1 400 Bad Request\r\n'
b'Server: bfe/1.0.8.14\r\n'
b'Date: Sat, 19 Mar 2016 05:07:02 GMT\r\n\r\n'
b'AAAsdfsdfsdf'
)
parser = HttpRequestParser()
# res = parser.request_parse(request_text, hostname='www.google.com', port=80)
res = parser._parse_request(request_text, None, None, None, None)
test_parse(res)
def test_parse(res):
print(res)
# print(res.error_code) # None (check this first)
# print(res.command) # "GET"
print(res.path) # "/who/ken/trust.html"
print(res.version) # "HTTP/1.1"
print(len(res.headers)) # 3
# # print(request.headers.keys()) # ['accept-charset', 'host', 'accept']
key = 'Proxy-Connection'
if key in res.headers:
print('del %s => %s' % (key, res.headers[key]))
del res.headers[key]
for key in res.headers:
print('%s => %s' % (key, res.headers[key]))
# print(res.headers['host']) # "cm.bell-labs.com"
def test_unparse(res, parser):
unrequest = parser.unparse_request(res)
print("'" + unrequest.decode() + "'")
# res = parser.read_response(response_text, 'GET')
# print(res)
# print("'%s' '%d' '%s' '%d' '%s'" % (res.version, res.status, res.reason, res.head_length, res.headers))
# print("body='%s'" % response_text[res.head_length:].decode())
# request_text = (
# # b'GET http://pki.google.com/GIAG2.crt HTTP/1.1\r\n'
# b'CONNECT photos-thumb.dropbox.com:443 HTTP/1.1\r\n'
# b'Host: pki.google.com\r\n'
# b'Proxy-Connection: keep-alive\r\n'
# b'Accept: */*\r\n'
# b'User-Agent: ocspd/1.0.3\r\n'
# b'Accept-Language: zh-cn\r\n'
# b'Accept-Encoding: gzip, deflate\r\n'
# b'Connection: keep-alive\r\n\r\n'
# b'safasdfa;jd;afd'
# )
#
# request.do_parse(request_text)
#
# print(request.error_code) # None (check this first)
# print(request.command) # "GET"
# print(request.path) # "/who/ken/trust.html"
# print(request.request_version) # "HTTP/1.1"
# print(len(request.headers)) # 3
# # print(request.headers.keys()) # ['accept-charset', 'host', 'accept']
# for key in request.headers:
# print('%s => %s' % (key, request.headers[key]))
# print(request.headers['host']) # "cm.bell-labs.com"
if __name__ == '__main__':
test()
|
felipebetancur/numpy | refs/heads/master | numpy/core/tests/test_print.py | 63 | from __future__ import division, absolute_import, print_function
import sys
import locale
import nose
import numpy as np
from numpy.testing import (
run_module_suite, assert_, assert_equal
)
if sys.version_info[0] >= 3:
from io import StringIO
else:
from StringIO import StringIO
_REF = {np.inf: 'inf', -np.inf: '-inf', np.nan: 'nan'}
def check_float_type(tp):
for x in [0, 1, -1, 1e20]:
assert_equal(str(tp(x)), str(float(x)),
err_msg='Failed str formatting for type %s' % tp)
if tp(1e10).itemsize > 4:
assert_equal(str(tp(1e10)), str(float('1e10')),
err_msg='Failed str formatting for type %s' % tp)
else:
ref = '1e+10'
assert_equal(str(tp(1e10)), ref,
err_msg='Failed str formatting for type %s' % tp)
def test_float_types():
""" Check formatting.
This is only for the str function, and only for simple types.
The precision of np.float and np.longdouble aren't the same as the
python float precision.
"""
for t in [np.float32, np.double, np.longdouble]:
yield check_float_type, t
def check_nan_inf_float(tp):
for x in [np.inf, -np.inf, np.nan]:
assert_equal(str(tp(x)), _REF[x],
err_msg='Failed str formatting for type %s' % tp)
def test_nan_inf_float():
""" Check formatting of nan & inf.
This is only for the str function, and only for simple types.
The precision of np.float and np.longdouble aren't the same as the
python float precision.
"""
for t in [np.float32, np.double, np.longdouble]:
yield check_nan_inf_float, t
def check_complex_type(tp):
for x in [0, 1, -1, 1e20]:
assert_equal(str(tp(x)), str(complex(x)),
err_msg='Failed str formatting for type %s' % tp)
assert_equal(str(tp(x*1j)), str(complex(x*1j)),
err_msg='Failed str formatting for type %s' % tp)
assert_equal(str(tp(x + x*1j)), str(complex(x + x*1j)),
err_msg='Failed str formatting for type %s' % tp)
if tp(1e10).itemsize > 8:
assert_equal(str(tp(1e10)), str(complex(1e10)),
err_msg='Failed str formatting for type %s' % tp)
else:
ref = '(1e+10+0j)'
assert_equal(str(tp(1e10)), ref,
err_msg='Failed str formatting for type %s' % tp)
def test_complex_types():
"""Check formatting of complex types.
This is only for the str function, and only for simple types.
The precision of np.float and np.longdouble aren't the same as the
python float precision.
"""
for t in [np.complex64, np.cdouble, np.clongdouble]:
yield check_complex_type, t
def test_complex_inf_nan():
"""Check inf/nan formatting of complex types."""
TESTS = {
complex(np.inf, 0): "(inf+0j)",
complex(0, np.inf): "inf*j",
complex(-np.inf, 0): "(-inf+0j)",
complex(0, -np.inf): "-inf*j",
complex(np.inf, 1): "(inf+1j)",
complex(1, np.inf): "(1+inf*j)",
complex(-np.inf, 1): "(-inf+1j)",
complex(1, -np.inf): "(1-inf*j)",
complex(np.nan, 0): "(nan+0j)",
complex(0, np.nan): "nan*j",
complex(-np.nan, 0): "(nan+0j)",
complex(0, -np.nan): "nan*j",
complex(np.nan, 1): "(nan+1j)",
complex(1, np.nan): "(1+nan*j)",
complex(-np.nan, 1): "(nan+1j)",
complex(1, -np.nan): "(1+nan*j)",
}
for tp in [np.complex64, np.cdouble, np.clongdouble]:
for c, s in TESTS.items():
yield _check_complex_inf_nan, c, s, tp
def _check_complex_inf_nan(c, s, dtype):
assert_equal(str(dtype(c)), s)
# print tests
def _test_redirected_print(x, tp, ref=None):
file = StringIO()
file_tp = StringIO()
stdout = sys.stdout
try:
sys.stdout = file_tp
print(tp(x))
sys.stdout = file
if ref:
print(ref)
else:
print(x)
finally:
sys.stdout = stdout
assert_equal(file.getvalue(), file_tp.getvalue(),
err_msg='print failed for type%s' % tp)
def check_float_type_print(tp):
for x in [0, 1, -1, 1e20]:
_test_redirected_print(float(x), tp)
for x in [np.inf, -np.inf, np.nan]:
_test_redirected_print(float(x), tp, _REF[x])
if tp(1e10).itemsize > 4:
_test_redirected_print(float(1e10), tp)
else:
ref = '1e+10'
_test_redirected_print(float(1e10), tp, ref)
def check_complex_type_print(tp):
# We do not create complex with inf/nan directly because the feature is
# missing in python < 2.6
for x in [0, 1, -1, 1e20]:
_test_redirected_print(complex(x), tp)
if tp(1e10).itemsize > 8:
_test_redirected_print(complex(1e10), tp)
else:
ref = '(1e+10+0j)'
_test_redirected_print(complex(1e10), tp, ref)
_test_redirected_print(complex(np.inf, 1), tp, '(inf+1j)')
_test_redirected_print(complex(-np.inf, 1), tp, '(-inf+1j)')
_test_redirected_print(complex(-np.nan, 1), tp, '(nan+1j)')
def test_float_type_print():
"""Check formatting when using print """
for t in [np.float32, np.double, np.longdouble]:
yield check_float_type_print, t
def test_complex_type_print():
"""Check formatting when using print """
for t in [np.complex64, np.cdouble, np.clongdouble]:
yield check_complex_type_print, t
def test_scalar_format():
"""Test the str.format method with NumPy scalar types"""
tests = [('{0}', True, np.bool_),
('{0}', False, np.bool_),
('{0:d}', 130, np.uint8),
('{0:d}', 50000, np.uint16),
('{0:d}', 3000000000, np.uint32),
('{0:d}', 15000000000000000000, np.uint64),
('{0:d}', -120, np.int8),
('{0:d}', -30000, np.int16),
('{0:d}', -2000000000, np.int32),
('{0:d}', -7000000000000000000, np.int64),
('{0:g}', 1.5, np.float16),
('{0:g}', 1.5, np.float32),
('{0:g}', 1.5, np.float64),
('{0:g}', 1.5, np.longdouble)]
# Python 2.6 doesn't implement complex.__format__
if sys.version_info[:2] > (2, 6):
tests += [('{0:g}', 1.5+0.5j, np.complex64),
('{0:g}', 1.5+0.5j, np.complex128),
('{0:g}', 1.5+0.5j, np.clongdouble)]
for (fmat, val, valtype) in tests:
try:
assert_equal(fmat.format(val), fmat.format(valtype(val)),
"failed with val %s, type %s" % (val, valtype))
except ValueError as e:
assert_(False,
"format raised exception (fmt='%s', val=%s, type=%s, exc='%s')" %
(fmat, repr(val), repr(valtype), str(e)))
# Locale tests: scalar types formatting should be independent of the locale
def in_foreign_locale(func):
"""
Swap LC_NUMERIC locale to one in which the decimal point is ',' and not '.'
If not possible, raise nose.SkipTest
"""
if sys.platform == 'win32':
locales = ['FRENCH']
else:
locales = ['fr_FR', 'fr_FR.UTF-8', 'fi_FI', 'fi_FI.UTF-8']
def wrapper(*args, **kwargs):
curloc = locale.getlocale(locale.LC_NUMERIC)
try:
for loc in locales:
try:
locale.setlocale(locale.LC_NUMERIC, loc)
break
except locale.Error:
pass
else:
raise nose.SkipTest("Skipping locale test, because "
"French locale not found")
return func(*args, **kwargs)
finally:
locale.setlocale(locale.LC_NUMERIC, locale=curloc)
return nose.tools.make_decorator(func)(wrapper)
@in_foreign_locale
def test_locale_single():
assert_equal(str(np.float32(1.2)), str(float(1.2)))
@in_foreign_locale
def test_locale_double():
assert_equal(str(np.double(1.2)), str(float(1.2)))
@in_foreign_locale
def test_locale_longdouble():
assert_equal(str(np.longdouble(1.2)), str(float(1.2)))
if __name__ == "__main__":
run_module_suite()
|
FeiZhan/Algo-Collection | refs/heads/master | answers/leetcode/Intersection of Two Linked Lists/Intersection of Two Linked Lists.py | 1 | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def getIntersectionNode(self, headA, headB):
"""
:type head1, head1: ListNode
:rtype: ListNode
"""
lena = 0
currenta = headA
while currenta:
lena += 1
currenta = currenta.next
lenb = 0
currentb = headB
while currentb:
lenb += 1
currentb = currentb.next
currenta = headA
currentb = headB
for i in range(min(lena, lenb), max(lena, lenb)):
if lena > lenb:
currenta = currenta.next
else:
currentb = currentb.next
while currenta and currentb and currenta != currentb:
currenta = currenta.next
currentb = currentb.next
return currenta
|
agrista/odoo-saas | refs/heads/master | addons/l10n_fr_hr_payroll/report/__init__.py | 424 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import fiche_paye
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
pasv/Empire | refs/heads/master | lib/modules/situational_awareness/network/mapdomaintrusts.py | 10 | from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-MapDomainTrusts',
'Author': ['@harmj0y'],
'Description': ('Maps all reachable domain trusts with .CSV output. Part of PowerView.'),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
'MinPSVersion' : '2',
'Comments': [
'https://github.com/PowerShellEmpire/PowerTools/tree/master/PowerView'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'LDAP' : {
'Description' : 'Switch. Use LDAP for domain queries (less accurate).',
'Required' : False,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
# read in the common module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/situational_awareness/network/Invoke-MapDomainTrusts.ps1"
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
script = moduleCode
if self.options['LDAP']['Value'].lower() == "true":
script += "Invoke-MapDomainTrustsLDAP | ConvertTo-Csv -NoTypeInformation"
script += '| Out-String | %{$_ + \"`n\"};"`nInvoke-MapDomainTrustsLDAP completed"'
else:
script += "Invoke-MapDomainTrusts | ConvertTo-Csv -NoTypeInformation"
script += '| Out-String | %{$_ + \"`n\"};"`nInvoke-MapDomainTrusts completed"'
return script
|
1tush/sentry | refs/heads/master | src/sentry/api/endpoints/organization_projects.py | 5 | from __future__ import absolute_import
from rest_framework.response import Response
from sentry.api.base import DocSection
from sentry.api.bases.organization import OrganizationEndpoint
from sentry.api.serializers import serialize
from sentry.models import Project
class OrganizationProjectsEndpoint(OrganizationEndpoint):
doc_section = DocSection.ORGANIZATIONS
def get(self, request, organization):
"""
List an organization's projects
Return a list of projects bound to a organization.
{method} {path}
"""
if request.auth and hasattr(request.auth, 'project'):
team_list = [request.auth.project.team]
project_list = [request.auth.project]
else:
team_list = list(request.access.teams)
project_list = list(Project.objects.filter(
team__in=team_list,
).order_by('name'))
team_map = dict(
(t.id, c) for (t, c) in zip(team_list, serialize(team_list, request.user)),
)
context = []
for project, pdata in zip(project_list, serialize(project_list, request.user)):
pdata['team'] = team_map[project.team_id]
context.append(pdata)
return Response(context)
|
CLOUGH/info3180-project-3 | refs/heads/master | lib/werkzeug/_compat.py | 448 | import sys
import operator
import functools
try:
import builtins
except ImportError:
import __builtin__ as builtins
PY2 = sys.version_info[0] == 2
_identity = lambda x: x
if PY2:
unichr = unichr
text_type = unicode
string_types = (str, unicode)
integer_types = (int, long)
int_to_byte = chr
iterkeys = lambda d, *args, **kwargs: d.iterkeys(*args, **kwargs)
itervalues = lambda d, *args, **kwargs: d.itervalues(*args, **kwargs)
iteritems = lambda d, *args, **kwargs: d.iteritems(*args, **kwargs)
iterlists = lambda d, *args, **kwargs: d.iterlists(*args, **kwargs)
iterlistvalues = lambda d, *args, **kwargs: d.iterlistvalues(*args, **kwargs)
iter_bytes = lambda x: iter(x)
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
def fix_tuple_repr(obj):
def __repr__(self):
cls = self.__class__
return '%s(%s)' % (cls.__name__, ', '.join(
'%s=%r' % (field, self[index])
for index, field in enumerate(cls._fields)
))
obj.__repr__ = __repr__
return obj
def implements_iterator(cls):
cls.next = cls.__next__
del cls.__next__
return cls
def implements_to_string(cls):
cls.__unicode__ = cls.__str__
cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
return cls
def native_string_result(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs).encode('utf-8')
return functools.update_wrapper(wrapper, func)
def implements_bool(cls):
cls.__nonzero__ = cls.__bool__
del cls.__bool__
return cls
from itertools import imap, izip, ifilter
range_type = xrange
from StringIO import StringIO
from cStringIO import StringIO as BytesIO
NativeStringIO = BytesIO
def make_literal_wrapper(reference):
return lambda x: x
def normalize_string_tuple(tup):
"""Normalizes a string tuple to a common type. Following Python 2
rules, upgrades to unicode are implicit.
"""
if any(isinstance(x, text_type) for x in tup):
return tuple(to_unicode(x) for x in tup)
return tup
def try_coerce_native(s):
"""Try to coerce a unicode string to native if possible. Otherwise,
leave it as unicode.
"""
try:
return str(s)
except UnicodeError:
return s
wsgi_get_bytes = _identity
def wsgi_decoding_dance(s, charset='utf-8', errors='replace'):
return s.decode(charset, errors)
def wsgi_encoding_dance(s, charset='utf-8', errors='replace'):
if isinstance(s, bytes):
return s
return s.encode(charset, errors)
def to_bytes(x, charset=sys.getdefaultencoding(), errors='strict'):
if x is None:
return None
if isinstance(x, (bytes, bytearray, buffer)):
return bytes(x)
if isinstance(x, unicode):
return x.encode(charset, errors)
raise TypeError('Expected bytes')
def to_native(x, charset=sys.getdefaultencoding(), errors='strict'):
if x is None or isinstance(x, str):
return x
return x.encode(charset, errors)
else:
unichr = chr
text_type = str
string_types = (str, )
integer_types = (int, )
iterkeys = lambda d, *args, **kwargs: iter(d.keys(*args, **kwargs))
itervalues = lambda d, *args, **kwargs: iter(d.values(*args, **kwargs))
iteritems = lambda d, *args, **kwargs: iter(d.items(*args, **kwargs))
iterlists = lambda d, *args, **kwargs: iter(d.lists(*args, **kwargs))
iterlistvalues = lambda d, *args, **kwargs: iter(d.listvalues(*args, **kwargs))
int_to_byte = operator.methodcaller('to_bytes', 1, 'big')
def iter_bytes(b):
return map(int_to_byte, b)
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
fix_tuple_repr = _identity
implements_iterator = _identity
implements_to_string = _identity
implements_bool = _identity
native_string_result = _identity
imap = map
izip = zip
ifilter = filter
range_type = range
from io import StringIO, BytesIO
NativeStringIO = StringIO
def make_literal_wrapper(reference):
if isinstance(reference, text_type):
return lambda x: x
return lambda x: x.encode('latin1')
def normalize_string_tuple(tup):
"""Ensures that all types in the tuple are either strings
or bytes.
"""
tupiter = iter(tup)
is_text = isinstance(next(tupiter, None), text_type)
for arg in tupiter:
if isinstance(arg, text_type) != is_text:
raise TypeError('Cannot mix str and bytes arguments (got %s)'
% repr(tup))
return tup
try_coerce_native = _identity
def wsgi_get_bytes(s):
return s.encode('latin1')
def wsgi_decoding_dance(s, charset='utf-8', errors='replace'):
return s.encode('latin1').decode(charset, errors)
def wsgi_encoding_dance(s, charset='utf-8', errors='replace'):
if isinstance(s, bytes):
return s.decode('latin1', errors)
return s.encode(charset).decode('latin1', errors)
def to_bytes(x, charset=sys.getdefaultencoding(), errors='strict'):
if x is None:
return None
if isinstance(x, (bytes, bytearray, memoryview)):
return bytes(x)
if isinstance(x, str):
return x.encode(charset, errors)
raise TypeError('Expected bytes')
def to_native(x, charset=sys.getdefaultencoding(), errors='strict'):
if x is None or isinstance(x, str):
return x
return x.decode(charset, errors)
def to_unicode(x, charset=sys.getdefaultencoding(), errors='strict',
allow_none_charset=False):
if x is None:
return None
if not isinstance(x, bytes):
return text_type(x)
if charset is None and allow_none_charset:
return x
return x.decode(charset, errors)
|
bionikspoon/ebay | refs/heads/develop | docs/source/conf.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from os.path import abspath, relpath
import sphinx.environment
def _warn_node(func):
def wrapper(self, msg, node):
if not msg.startswith('nonlocal image URI found:'):
return func(self, msg, node)
return wrapper
sphinx.environment.BuildEnvironment.warn_node = _warn_node(
sphinx.environment.BuildEnvironment.warn_node)
sys.path.insert(0, abspath(relpath('../', __file__)))
import ebay
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.coverage',
'sphinx.ext.viewcode', ]
templates_path = ['_templates']
source_suffix = '.rst'
source_encoding = 'utf-8-sig'
master_doc = 'index'
# General information about the project.
project = u'Ebay API'
copyright = u'2015, Manu Phatak'
author = ebay.__author__
version = ebay.__version__
release = ebay.__version__
#language = None
#today = ''
#today_fmt = '%B %d, %Y'
exclude_patterns = ['build']
#default_role = None
#add_function_parentheses = True
#add_module_names = True
#show_authors = False
pygments_style = 'sphinx'
#modindex_common_prefix = []
#keep_warnings = False
viewcode_import = True
# -- Options for HTML output -------------------------------------------
html_theme = 'sphinx_rtd_theme'
#html_theme_options = {}
#html_theme_path = []
#html_title = None
#html_short_title = None
#html_logo = None
#html_favicon = None
html_static_path = ['_static']
#html_last_updated_fmt = '%b %d, %Y'
#html_use_smartypants = True
#html_sidebars = {}
#html_additional_pages = {}
#html_domain_indices = True
#html_use_index = True
#html_split_index = False
#html_show_sourcelink = True
#html_show_sphinx = True
#html_show_copyright = True
#html_use_opensearch = ''
#html_file_suffix = None
htmlhelp_basename = 'ebaydoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {}
#'papersize': 'letterpaper',
#'pointsize': '10pt',
#'preamble': '',
latex_documents = [
('index', 'ebay.tex',
u'Ebay API Documentation',
u'Manu Phatak', 'manual'),
]
#latex_logo = None
#latex_use_parts = False
#latex_show_pagerefs = False
#latex_show_urls = False
#latex_appendices = []
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
man_pages = [
('index', 'ebay',
u'Ebay API Documentation',
[u'Manu Phatak'], 1)
]
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
texinfo_documents = [
('index', 'ebay',
u'Ebay API Documentation',
u'Manu Phatak',
'ebay',
'One line description of project.',
'Miscellaneous'),
]
#texinfo_appendices = []
#texinfo_domain_indices = True
#texinfo_show_urls = 'footnote'
#texinfo_no_detailmenu = False |
leereilly/django-1 | refs/heads/master | django/contrib/gis/gdal/prototypes/__init__.py | 12133432 | |
spacy-io/spaCy | refs/heads/master | spacy/tests/lang/de/__init__.py | 12133432 | |
nikhilsaraf/Twitter-Analytics | refs/heads/master | venv/lib/python2.7/site-packages/requests/packages/urllib3/poolmanager.py | 359 | from __future__ import absolute_import
import collections
import functools
import logging
try: # Python 3
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
from ._collections import RecentlyUsedContainer
from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool
from .connectionpool import port_by_scheme
from .exceptions import LocationValueError, MaxRetryError, ProxySchemeUnknown
from .request import RequestMethods
from .util.url import parse_url
from .util.retry import Retry
__all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url']
log = logging.getLogger(__name__)
SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs',
'ssl_version', 'ca_cert_dir')
# The base fields to use when determining what pool to get a connection from;
# these do not rely on the ``connection_pool_kw`` and can be determined by the
# URL and potentially the ``urllib3.connection.port_by_scheme`` dictionary.
#
# All custom key schemes should include the fields in this key at a minimum.
BasePoolKey = collections.namedtuple('BasePoolKey', ('scheme', 'host', 'port'))
# The fields to use when determining what pool to get a HTTP and HTTPS
# connection from. All additional fields must be present in the PoolManager's
# ``connection_pool_kw`` instance variable.
HTTPPoolKey = collections.namedtuple(
'HTTPPoolKey', BasePoolKey._fields + ('timeout', 'retries', 'strict',
'block', 'source_address')
)
HTTPSPoolKey = collections.namedtuple(
'HTTPSPoolKey', HTTPPoolKey._fields + SSL_KEYWORDS
)
def _default_key_normalizer(key_class, request_context):
"""
Create a pool key of type ``key_class`` for a request.
According to RFC 3986, both the scheme and host are case-insensitive.
Therefore, this function normalizes both before constructing the pool
key for an HTTPS request. If you wish to change this behaviour, provide
alternate callables to ``key_fn_by_scheme``.
:param key_class:
The class to use when constructing the key. This should be a namedtuple
with the ``scheme`` and ``host`` keys at a minimum.
:param request_context:
A dictionary-like object that contain the context for a request.
It should contain a key for each field in the :class:`HTTPPoolKey`
"""
context = {}
for key in key_class._fields:
context[key] = request_context.get(key)
context['scheme'] = context['scheme'].lower()
context['host'] = context['host'].lower()
return key_class(**context)
# A dictionary that maps a scheme to a callable that creates a pool key.
# This can be used to alter the way pool keys are constructed, if desired.
# Each PoolManager makes a copy of this dictionary so they can be configured
# globally here, or individually on the instance.
key_fn_by_scheme = {
'http': functools.partial(_default_key_normalizer, HTTPPoolKey),
'https': functools.partial(_default_key_normalizer, HTTPSPoolKey),
}
pool_classes_by_scheme = {
'http': HTTPConnectionPool,
'https': HTTPSConnectionPool,
}
class PoolManager(RequestMethods):
"""
Allows for arbitrary requests while transparently keeping track of
necessary connection pools for you.
:param num_pools:
Number of connection pools to cache before discarding the least
recently used pool.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param \**connection_pool_kw:
Additional parameters are used to create fresh
:class:`urllib3.connectionpool.ConnectionPool` instances.
Example::
>>> manager = PoolManager(num_pools=2)
>>> r = manager.request('GET', 'http://google.com/')
>>> r = manager.request('GET', 'http://google.com/mail')
>>> r = manager.request('GET', 'http://yahoo.com/')
>>> len(manager.pools)
2
"""
proxy = None
def __init__(self, num_pools=10, headers=None, **connection_pool_kw):
RequestMethods.__init__(self, headers)
self.connection_pool_kw = connection_pool_kw
self.pools = RecentlyUsedContainer(num_pools,
dispose_func=lambda p: p.close())
# Locally set the pool classes and keys so other PoolManagers can
# override them.
self.pool_classes_by_scheme = pool_classes_by_scheme
self.key_fn_by_scheme = key_fn_by_scheme.copy()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.clear()
# Return False to re-raise any potential exceptions
return False
def _new_pool(self, scheme, host, port):
"""
Create a new :class:`ConnectionPool` based on host, port and scheme.
This method is used to actually create the connection pools handed out
by :meth:`connection_from_url` and companion methods. It is intended
to be overridden for customization.
"""
pool_cls = self.pool_classes_by_scheme[scheme]
kwargs = self.connection_pool_kw
if scheme == 'http':
kwargs = self.connection_pool_kw.copy()
for kw in SSL_KEYWORDS:
kwargs.pop(kw, None)
return pool_cls(host, port, **kwargs)
def clear(self):
"""
Empty our store of pools and direct them all to close.
This will not affect in-flight connections, but they will not be
re-used after completion.
"""
self.pools.clear()
def connection_from_host(self, host, port=None, scheme='http'):
"""
Get a :class:`ConnectionPool` based on the host, port, and scheme.
If ``port`` isn't given, it will be derived from the ``scheme`` using
``urllib3.connectionpool.port_by_scheme``.
"""
if not host:
raise LocationValueError("No host specified.")
request_context = self.connection_pool_kw.copy()
request_context['scheme'] = scheme or 'http'
if not port:
port = port_by_scheme.get(request_context['scheme'].lower(), 80)
request_context['port'] = port
request_context['host'] = host
return self.connection_from_context(request_context)
def connection_from_context(self, request_context):
"""
Get a :class:`ConnectionPool` based on the request context.
``request_context`` must at least contain the ``scheme`` key and its
value must be a key in ``key_fn_by_scheme`` instance variable.
"""
scheme = request_context['scheme'].lower()
pool_key_constructor = self.key_fn_by_scheme[scheme]
pool_key = pool_key_constructor(request_context)
return self.connection_from_pool_key(pool_key)
def connection_from_pool_key(self, pool_key):
"""
Get a :class:`ConnectionPool` based on the provided pool key.
``pool_key`` should be a namedtuple that only contains immutable
objects. At a minimum it must have the ``scheme``, ``host``, and
``port`` fields.
"""
with self.pools.lock:
# If the scheme, host, or port doesn't match existing open
# connections, open a new ConnectionPool.
pool = self.pools.get(pool_key)
if pool:
return pool
# Make a fresh ConnectionPool of the desired type
pool = self._new_pool(pool_key.scheme, pool_key.host, pool_key.port)
self.pools[pool_key] = pool
return pool
def connection_from_url(self, url):
"""
Similar to :func:`urllib3.connectionpool.connection_from_url` but
doesn't pass any additional parameters to the
:class:`urllib3.connectionpool.ConnectionPool` constructor.
Additional parameters are taken from the :class:`.PoolManager`
constructor.
"""
u = parse_url(url)
return self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
def urlopen(self, method, url, redirect=True, **kw):
"""
Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen`
with custom cross-host redirect logic and only sends the request-uri
portion of the ``url``.
The given ``url`` parameter must be absolute, such that an appropriate
:class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
"""
u = parse_url(url)
conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
kw['assert_same_host'] = False
kw['redirect'] = False
if 'headers' not in kw:
kw['headers'] = self.headers
if self.proxy is not None and u.scheme == "http":
response = conn.urlopen(method, url, **kw)
else:
response = conn.urlopen(method, u.request_uri, **kw)
redirect_location = redirect and response.get_redirect_location()
if not redirect_location:
return response
# Support relative URLs for redirecting.
redirect_location = urljoin(url, redirect_location)
# RFC 7231, Section 6.4.4
if response.status == 303:
method = 'GET'
retries = kw.get('retries')
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect)
try:
retries = retries.increment(method, url, response=response, _pool=conn)
except MaxRetryError:
if retries.raise_on_redirect:
raise
return response
kw['retries'] = retries
kw['redirect'] = redirect
log.info("Redirecting %s -> %s", url, redirect_location)
return self.urlopen(method, redirect_location, **kw)
class ProxyManager(PoolManager):
"""
Behaves just like :class:`PoolManager`, but sends all requests through
the defined proxy, using the CONNECT method for HTTPS URLs.
:param proxy_url:
The URL of the proxy to be used.
:param proxy_headers:
A dictionary contaning headers that will be sent to the proxy. In case
of HTTP they are being sent with each request, while in the
HTTPS/CONNECT case they are sent only once. Could be used for proxy
authentication.
Example:
>>> proxy = urllib3.ProxyManager('http://localhost:3128/')
>>> r1 = proxy.request('GET', 'http://google.com/')
>>> r2 = proxy.request('GET', 'http://httpbin.org/')
>>> len(proxy.pools)
1
>>> r3 = proxy.request('GET', 'https://httpbin.org/')
>>> r4 = proxy.request('GET', 'https://twitter.com/')
>>> len(proxy.pools)
3
"""
def __init__(self, proxy_url, num_pools=10, headers=None,
proxy_headers=None, **connection_pool_kw):
if isinstance(proxy_url, HTTPConnectionPool):
proxy_url = '%s://%s:%i' % (proxy_url.scheme, proxy_url.host,
proxy_url.port)
proxy = parse_url(proxy_url)
if not proxy.port:
port = port_by_scheme.get(proxy.scheme, 80)
proxy = proxy._replace(port=port)
if proxy.scheme not in ("http", "https"):
raise ProxySchemeUnknown(proxy.scheme)
self.proxy = proxy
self.proxy_headers = proxy_headers or {}
connection_pool_kw['_proxy'] = self.proxy
connection_pool_kw['_proxy_headers'] = self.proxy_headers
super(ProxyManager, self).__init__(
num_pools, headers, **connection_pool_kw)
def connection_from_host(self, host, port=None, scheme='http'):
if scheme == "https":
return super(ProxyManager, self).connection_from_host(
host, port, scheme)
return super(ProxyManager, self).connection_from_host(
self.proxy.host, self.proxy.port, self.proxy.scheme)
def _set_proxy_headers(self, url, headers=None):
"""
Sets headers needed by proxies: specifically, the Accept and Host
headers. Only sets headers not provided by the user.
"""
headers_ = {'Accept': '*/*'}
netloc = parse_url(url).netloc
if netloc:
headers_['Host'] = netloc
if headers:
headers_.update(headers)
return headers_
def urlopen(self, method, url, redirect=True, **kw):
"Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
u = parse_url(url)
if u.scheme == "http":
# For proxied HTTPS requests, httplib sets the necessary headers
# on the CONNECT to the proxy. For HTTP, we'll definitely
# need to set 'Host' at the very least.
headers = kw.get('headers', self.headers)
kw['headers'] = self._set_proxy_headers(url, headers)
return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw)
def proxy_from_url(url, **kw):
return ProxyManager(proxy_url=url, **kw)
|
rutgers-apl/Atomicity-Violation-Detector | refs/heads/master | tdebug-llvm/llvm/utils/lit/lit/formats/googletest.py | 6 | from __future__ import absolute_import
import os
import sys
import lit.Test
import lit.TestRunner
import lit.util
from .base import TestFormat
kIsWindows = sys.platform in ['win32', 'cygwin']
class GoogleTest(TestFormat):
def __init__(self, test_sub_dir, test_suffix):
self.test_sub_dir = os.path.normcase(str(test_sub_dir)).split(';')
self.test_suffix = str(test_suffix)
# On Windows, assume tests will also end in '.exe'.
if kIsWindows:
self.test_suffix += '.exe'
def getGTestTests(self, path, litConfig, localConfig):
"""getGTestTests(path) - [name]
Return the tests available in gtest executable.
Args:
path: String path to a gtest executable
litConfig: LitConfig instance
localConfig: TestingConfig instance"""
try:
lines = lit.util.capture([path, '--gtest_list_tests'],
env=localConfig.environment)
if kIsWindows:
lines = lines.replace('\r', '')
lines = lines.split('\n')
except:
litConfig.error("unable to discover google-tests in %r" % path)
raise StopIteration
nested_tests = []
for ln in lines:
if not ln.strip():
continue
prefix = ''
index = 0
while ln[index*2:index*2+2] == ' ':
index += 1
while len(nested_tests) > index:
nested_tests.pop()
ln = ln[index*2:]
if ln.endswith('.'):
nested_tests.append(ln)
elif any([name.startswith('DISABLED_')
for name in nested_tests + [ln]]):
# Gtest will internally skip these tests. No need to launch a
# child process for it.
continue
else:
yield ''.join(nested_tests) + ln
# Note: path_in_suite should not include the executable name.
def getTestsInExecutable(self, testSuite, path_in_suite, execpath,
litConfig, localConfig):
if not execpath.endswith(self.test_suffix):
return
(dirname, basename) = os.path.split(execpath)
# Discover the tests in this executable.
for testname in self.getGTestTests(execpath, litConfig, localConfig):
testPath = path_in_suite + (basename, testname)
yield lit.Test.Test(testSuite, testPath, localConfig, file_path=execpath)
def getTestsInDirectory(self, testSuite, path_in_suite,
litConfig, localConfig):
source_path = testSuite.getSourcePath(path_in_suite)
for filename in os.listdir(source_path):
filepath = os.path.join(source_path, filename)
if os.path.isdir(filepath):
# Iterate over executables in a directory.
if not os.path.normcase(filename) in self.test_sub_dir:
continue
dirpath_in_suite = path_in_suite + (filename, )
for subfilename in os.listdir(filepath):
execpath = os.path.join(filepath, subfilename)
for test in self.getTestsInExecutable(
testSuite, dirpath_in_suite, execpath,
litConfig, localConfig):
yield test
elif ('.' in self.test_sub_dir):
for test in self.getTestsInExecutable(
testSuite, path_in_suite, filepath,
litConfig, localConfig):
yield test
def execute(self, test, litConfig):
testPath,testName = os.path.split(test.getSourcePath())
while not os.path.exists(testPath):
# Handle GTest parametrized and typed tests, whose name includes
# some '/'s.
testPath, namePrefix = os.path.split(testPath)
testName = namePrefix + '/' + testName
cmd = [testPath, '--gtest_filter=' + testName]
if litConfig.useValgrind:
cmd = litConfig.valgrindArgs + cmd
if litConfig.noExecute:
return lit.Test.PASS, ''
out, err, exitCode = lit.util.executeCommand(
cmd, env=test.config.environment)
if exitCode:
return lit.Test.FAIL, out + err
passing_test_line = '[ PASSED ] 1 test.'
if passing_test_line not in out:
msg = ('Unable to find %r in gtest output:\n\n%s%s' %
(passing_test_line, out, err))
return lit.Test.UNRESOLVED, msg
return lit.Test.PASS,''
|
debugger06/MiroX | refs/heads/master | tv/lib/frontends/cli/util.py | 3 | # Miro - an RSS based video player application
# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011
# Participatory Culture Foundation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
#
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
import textwrap
def print_box(text):
print "-" * 78
leftspace = ' ' * ((78 - 2 - len(text)) / 2)
rightspace = ' ' * ((78 - 2 - len(text) + 1) / 2)
print "*%s%s%s*" % (leftspace, text, rightspace)
print "-" * 78
def print_text(text):
print textwrap.fill(text)
|
otadmor/Open-Knesset | refs/heads/master | persons/migrations/0001_initial.py | 15 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Title'
db.create_table('persons_title', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=64)),
))
db.send_create_signal('persons', ['Title'])
# Adding model 'Person'
db.create_table('persons_person', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=64)),
('mk', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='person', null=True, to=orm['mks.Member'])),
))
db.send_create_signal('persons', ['Person'])
# Adding M2M table for field titles on 'Person'
db.create_table('persons_person_titles', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('person', models.ForeignKey(orm['persons.person'], null=False)),
('title', models.ForeignKey(orm['persons.title'], null=False))
))
db.create_unique('persons_person_titles', ['person_id', 'title_id'])
# Adding model 'Role'
db.create_table('persons_role', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('text', self.gf('django.db.models.fields.CharField')(max_length=1024, null=True, blank=True)),
('person', self.gf('django.db.models.fields.related.ForeignKey')(related_name='roles', to=orm['persons.Person'])),
))
db.send_create_signal('persons', ['Role'])
def backwards(self, orm):
# Deleting model 'Title'
db.delete_table('persons_title')
# Deleting model 'Person'
db.delete_table('persons_person')
# Removing M2M table for field titles on 'Person'
db.delete_table('persons_person_titles')
# Deleting model 'Role'
db.delete_table('persons_role')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'mks.member': {
'Meta': {'object_name': 'Member'},
'area_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'average_monthly_committee_presence': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'average_weekly_presence_hours': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'bills_stats_approved': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_first': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_pre': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_proposed': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'blog': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['planet.Blog']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'current_party': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'members'", 'null': 'True', 'to': "orm['mks.Party']"}),
'current_role_descriptions': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_of_death': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'family_status': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_children': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'parties': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'all_members'", 'symmetrical': 'False', 'through': "orm['mks.Membership']", 'to': "orm['mks.Party']"}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'place_of_birth': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lat': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lon': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'residence_centrality': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'residence_economy': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'year_of_aliyah': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'mks.membership': {
'Meta': {'object_name': 'Membership'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Member']"}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Party']"}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'mks.party': {
'Meta': {'object_name': 'Party'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_coalition': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_members': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'number_of_seats': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'persons.person': {
'Meta': {'object_name': 'Person'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mk': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'person'", 'null': 'True', 'to': "orm['mks.Member']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'titles': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'persons'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['persons.Title']"})
},
'persons.role': {
'Meta': {'object_name': 'Role'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'roles'", 'to': "orm['persons.Person']"}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'})
},
'persons.title': {
'Meta': {'object_name': 'Title'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'planet.blog': {
'Meta': {'object_name': 'Blog'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '1024', 'db_index': 'True'})
}
}
complete_apps = ['persons']
|
gusyussh/learntosolveit | refs/heads/version1 | languages/python/design_caseinsensitivedict.py | 7 | #!/usr/bin/python
# $Id$
"""
Case Insenstive Dictionary Lookup. Dictionary keys are case sensitive. However
you might want some facilities to do a case-insenstive dictiionary lookup at
times. This provides the facility for the same.
"""
class CaseInsensitiveDict(dict):
def __init__(self, *args, **kwargs):
self._keystore = {}
d = dict(*args, **kwargs)
for k in d.keys():
self._keystore[self._get_lower(k)] = k
return super(CaseInsensitiveDict,self).__init__(*args,**kwargs)
def __setitem__(self, k, v):
self._keystore[self._get_lower(k)] = k
return super(CaseInsensitiveDict, self).__setitem__(k, v)
def __getitem__(self, k):
return super(CaseInsensitiveDict,
self).__getitem__(self._keystore[self._get_lower(k)])
@staticmethod
def _get_lower(k):
if isinstance(k,str):
return k.lower()
else:
return k
def test():
obj = CaseInsensitiveDict([('name','senthil')])
print obj
obj['Sname']='kumaran'
obj['spam'] ='eggs'
obj['SPAM']='ham'
print obj.items()
obj1 = dict(fname='FIRST')
obj.update(obj1)
print obj
print obj.keys()
print obj.items()
print obj['NAME']
print obj['SNAME']
if __name__ == '__main__':
test()
|
MrLoick/python-for-android | refs/heads/master | python3-alpha/python3-src/Lib/tkinter/test/test_ttk/__init__.py | 12133432 | |
arhik/nupic | refs/heads/master | tests/integration/nupic/algorithms/tp_likelihood_test.py | 5 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Sequence Likelihood Tests
=========================
LI1) Present three sequences
Seq#1: A-B-C-D-E
Seq#2: A-B-C-D-F
Seq#3: A-B-C-D-G
with the relative frequencies, such as [0.1,0.7,0.2]
Test: after presenting A-B-C-D, prediction scores should reflect the transition
probabilities for E, F and G, i.e. Run the test for several different
probability combinations.
LI2) Given a TP trained with LI1, compute the prediction score across a
list of sequences.
LI3) Given the following sequence and a one cell per column TP:
Seq1: a-b-b-c-d
There should be four segments a-b
"""
import numpy
import unittest2 as unittest
from nupic.research.TP import TP
from nupic.research.TP10X2 import TP10X2
from nupic.support.unittesthelpers import testcasebase
SEED = 42
VERBOSITY = 1
LONG = True
_RGEN = numpy.random.RandomState(SEED)
def _getSimplePatterns(numOnes, numPatterns):
"""Very simple patterns. Each pattern has numOnes consecutive
bits on. There are numPatterns*numOnes bits in the vector."""
numCols = numOnes * numPatterns
p = []
for i in xrange(numPatterns):
x = numpy.zeros(numCols, dtype='float32')
x[i*numOnes:(i+1)*numOnes] = 1
p.append(x)
return p
def _buildLikelihoodTrainingSet(numOnes=5, relativeFrequencies=None):
"""Two very simple high order sequences for debugging. Each pattern in the
sequence has a series of 1's in a specific set of columns."""
numPatterns = 7
p = _getSimplePatterns(numOnes, numPatterns)
s1 = [p[0], p[1], p[2], p[3], p[4]]
s2 = [p[0], p[1], p[2], p[3], p[5]]
s3 = [p[0], p[1], p[2], p[3], p[6]]
trainingSequences = [s1, s2, s3]
allPatterns = p
return (trainingSequences, relativeFrequencies, allPatterns)
def _createTPs(numCols, cellsPerColumn=4, checkSynapseConsistency=True):
"""Create TP and TP10X instances with identical parameters. """
# Keep these fixed for both TP's:
minThreshold = 4
activationThreshold = 4
newSynapseCount = 5
initialPerm = 0.6
connectedPerm = 0.5
permanenceInc = 0.1
permanenceDec = 0.001
globalDecay = 0.0
if VERBOSITY > 1:
print "Creating TP10X instance"
cppTp = TP10X2(numberOfCols=numCols, cellsPerColumn=cellsPerColumn,
initialPerm=initialPerm, connectedPerm=connectedPerm,
minThreshold=minThreshold, newSynapseCount=newSynapseCount,
permanenceInc=permanenceInc, permanenceDec=permanenceDec,
activationThreshold=activationThreshold,
globalDecay=globalDecay, burnIn=1,
seed=SEED, verbosity=VERBOSITY,
checkSynapseConsistency=checkSynapseConsistency,
pamLength=1000)
if VERBOSITY > 1:
print "Creating PY TP instance"
pyTp = TP(numberOfCols=numCols, cellsPerColumn=cellsPerColumn,
initialPerm=initialPerm, connectedPerm=connectedPerm,
minThreshold=minThreshold, newSynapseCount=newSynapseCount,
permanenceInc=permanenceInc, permanenceDec=permanenceDec,
activationThreshold=activationThreshold,
globalDecay=globalDecay, burnIn=1,
seed=SEED, verbosity=VERBOSITY,
pamLength=1000)
return cppTp, pyTp
def _computeTPMetric(tp=None, sequences=None, useResets=True, verbosity=1):
"""Given a trained TP and a list of sequences, compute the temporal pooler
performance metric on those sequences.
Parameters:
===========
tp: A trained temporal pooler.
sequences: A list of sequences. Each sequence is a list of numpy
vectors.
useResets: If True, the TP's reset method will be called before the
the start of each new sequence.
verbosity: An integer controlling the level of printouts. The higher
the number the more debug printouts.
Return value:
============
The following pair is returned: (score, numPredictions)
score: The average prediction score per pattern.
numPredictions: The total number of predictions that were made.
"""
datasetScore = 0
numPredictions = 0
tp.resetStats()
for seqIdx, seq in enumerate(sequences):
# Feed in a reset
if useResets:
tp.reset()
seq = numpy.array(seq, dtype='uint32')
if verbosity > 2:
print "--------------------------------------------------------"
for i, inputPattern in enumerate(seq):
if verbosity > 2:
print "sequence %d, element %d," % (seqIdx, i),
print "pattern", inputPattern
# Feed this input to the TP and get the stats
y = tp.infer(inputPattern)
if verbosity > 2:
stats = tp.getStats()
if stats['curPredictionScore'] > 0:
print " patternConfidence=", stats['curPredictionScore2']
# Print some diagnostics for debugging
if verbosity > 3:
print "\n\n"
predOut = numpy.sum(tp.predictedState['t'], axis=1)
actOut = numpy.sum(tp.activeState['t'], axis=1)
outout = numpy.sum(y.reshape(tp.activeState['t'].shape), axis=1)
print "Prediction non-zeros: ", predOut.nonzero()
print "Activestate non-zero: ", actOut.nonzero()
print "input non-zeros: ", inputPattern.nonzero()
print "Output non-zeros: ", outout.nonzero()
# Print and return final stats
stats = tp.getStats()
datasetScore = stats['predictionScoreAvg2']
numPredictions = stats['nPredictions']
print "Final results: datasetScore=", datasetScore,
print "numPredictions=", numPredictions
return datasetScore, numPredictions
def _createDataset(numSequences, originalSequences, relativeFrequencies):
"""Given a set of sequences, create a dataset consisting of numSequences
sequences. The i'th pattern in this dataset is chosen from originalSequences
according to the relative frequencies specified in relativeFrequencies."""
dataSet = []
trainingCummulativeFrequencies = numpy.cumsum(relativeFrequencies)
for _ in xrange(numSequences):
# Pick a training sequence to present, based on the given training
# frequencies.
whichSequence = numpy.searchsorted(trainingCummulativeFrequencies,
_RGEN.random_sample())
dataSet.append(originalSequences[whichSequence])
return dataSet
class TPLikelihoodTest(testcasebase.TestCaseBase):
def _testSequence(self,
trainingSet,
nSequencePresentations=1,
tp=None,
testSequences=None,
doResets=True,
relativeFrequencies=None):
"""Test a single set of sequences once and check that individual
predictions reflect the true relative frequencies. Return a success code
as well as the trained TP. Success code is 1 for pass, 0 for fail.
The trainingSet is a set of 3 sequences that share the same first 4
elements but differ in the 5th element. After feeding in the first 4 elements,
we want to correctly compute the confidences for the 5th element based on
the frequency with which each sequence was presented during learning.
For example:
trainingSequences[0]: (10% probable)
pat A: (array([0, 1, 2, 3, 4]),)
pat B: (array([5, 6, 7, 8, 9]),)
pat C: (array([10, 11, 12, 13, 14]),)
pat D: (array([15, 16, 17, 18, 19]),)
pat E: (array([20, 21, 22, 23, 24]),)
trainingSequences[1]: (20% probable)
pat A: (array([0, 1, 2, 3, 4]),)
pat B: (array([5, 6, 7, 8, 9]),)
pat C: (array([10, 11, 12, 13, 14]),)
pat D: (array([15, 16, 17, 18, 19]),)
pat F: (array([25, 26, 27, 28, 29]),)
trainingSequences[2]: (70% probable)
pat A: (array([0, 1, 2, 3, 4]),)
pat B: (array([5, 6, 7, 8, 9]),)
pat C: (array([10, 11, 12, 13, 14]),)
pat D: (array([15, 16, 17, 18, 19]),)
pat G: (array([30, 31, 32, 33, 34]),)
allTrainingPatterns:
pat A: (array([0, 1, 2, 3, 4]),)
pat B: (array([5, 6, 7, 8, 9]),)
pat C: (array([10, 11, 12, 13, 14]),)
pat D: (array([15, 16, 17, 18, 19]),)
pat E: (array([20, 21, 22, 23, 24]),)
pat F: (array([25, 26, 27, 28, 29]),)
pat G: (array([30, 31, 32, 33, 34]),)
"""
trainingSequences = trainingSet[0]
trainingFrequencies = trainingSet[1]
allTrainingPatterns = trainingSet[2]
trainingCummulativeFrequencies = numpy.cumsum(trainingFrequencies)
if testSequences == None:
testSequences = trainingSequences
# Learn
if VERBOSITY > 1:
print "============= Learning ================="
for r in xrange(nSequencePresentations):
# Pick a training sequence to present, based on the given training
# frequencies.
whichSequence = numpy.searchsorted(trainingCummulativeFrequencies,
_RGEN.random_sample())
trainingSequence = trainingSequences[whichSequence]
if VERBOSITY > 2:
print "=========Presentation #%d Sequence #%d==============" % \
(r, whichSequence)
if doResets:
tp.reset()
for t, x in enumerate(trainingSequence):
if VERBOSITY > 3:
print "Time step", t
print "Input: ", tp.printInput(x)
tp.learn(x)
if VERBOSITY > 4:
tp.printStates(printPrevious=(VERBOSITY > 4))
print
if VERBOSITY > 4:
print "Sequence finished. Complete state after sequence"
tp.printCells()
print
tp.finishLearning()
if VERBOSITY > 2:
print "Training completed. Complete state:"
tp.printCells()
print
print "TP parameters:"
print tp.printParameters()
# Infer
if VERBOSITY > 1:
print "============= Inference ================="
testSequence = testSequences[0]
slen = len(testSequence)
tp.collectStats = True
tp.resetStats()
if doResets:
tp.reset()
for t, x in enumerate(testSequence):
if VERBOSITY > 2:
print "Time step", t, '\nInput:', tp.printInput(x)
tp.infer(x)
if VERBOSITY > 3:
tp.printStates(printPrevious=(VERBOSITY > 4), printLearnState=False)
print
# We will exit with the confidence score for the last element
if t == slen-2:
tpNonZeros = [pattern.nonzero()[0] for pattern in allTrainingPatterns]
predictionScore2 = tp.checkPrediction2(tpNonZeros)[2]
if VERBOSITY > 0:
print "predictionScore:", predictionScore2
# The following test tests that the prediction scores for each pattern
# are within 10% of the its relative frequency. Here we check only
# the Positive Prediction Score
patternConfidenceScores = numpy.array([x[1] for x in predictionScore2])
# Normalize so that the sum is 1.0. This makes us independent of any
# potential scaling differences in the column confidence calculations of
# various TP implementations.
patternConfidenceScores /= patternConfidenceScores.sum()
msg = ('Prediction failed with predictionScore: %s. Expected %s but got %s.'
% (str(predictionScore2), str(relativeFrequencies),
str(patternConfidenceScores[4:])))
self.assertLess(abs(patternConfidenceScores[4]-relativeFrequencies[0]), 0.1,
msg=msg)
self.assertLess(abs(patternConfidenceScores[5]-relativeFrequencies[1]), 0.1,
msg=msg)
self.assertLess(abs(patternConfidenceScores[6]-relativeFrequencies[2]), 0.1,
msg=msg)
def _likelihoodTest1(self, numOnes=5, relativeFrequencies=None,
checkSynapseConsistency=True):
print "Sequence Likelihood test 1 with relativeFrequencies=",
print relativeFrequencies
trainingSet = _buildLikelihoodTrainingSet(numOnes, relativeFrequencies)
cppTp, pyTp = _createTPs(numCols=trainingSet[0][0][0].size,
checkSynapseConsistency=checkSynapseConsistency)
# Test both TP's. Currently the CPP TP has faster confidence estimation
self._testSequence(trainingSet, nSequencePresentations=200, tp=cppTp,
relativeFrequencies=relativeFrequencies)
self._testSequence(trainingSet, nSequencePresentations=500, tp=pyTp,
relativeFrequencies=relativeFrequencies)
def _likelihoodTest2(self, numOnes=5, relativeFrequencies=None,
checkSynapseConsistency=True):
print "Sequence Likelihood test 2 with relativeFrequencies=",
print relativeFrequencies
trainingSet = _buildLikelihoodTrainingSet(numOnes, relativeFrequencies)
cppTp, pyTp = _createTPs(numCols=trainingSet[0][0][0].size,
checkSynapseConsistency=checkSynapseConsistency)
# Test both TP's
for tp in [cppTp, pyTp]:
self._testSequence(trainingSet, nSequencePresentations=500, tp=tp,
relativeFrequencies=relativeFrequencies)
# Create a dataset with the same relative frequencies for testing the
# metric.
testDataSet = _createDataset(500, trainingSet[0], relativeFrequencies)
tp.collectStats = True
score, _ = _computeTPMetric(tp, testDataSet, verbosity=2)
# Create a dataset with very different relative frequencies
# This score should be lower than the one above.
testDataSet = _createDataset(500, trainingSet[0],
relativeFrequencies = [0.1, 0.1, 0.9])
score2, _ = _computeTPMetric(tp, testDataSet, verbosity=2)
self.assertLessEqual(score2, score)
def testLikelihood1Short(self):
self._likelihoodTest1(numOnes=5, relativeFrequencies=[0.1, 0.7, 0.2],
checkSynapseConsistency=LONG)
def testLikelihood1Long(self):
self._likelihoodTest1(numOnes=5, relativeFrequencies=[0.2, 0.5, 0.3])
self._likelihoodTest1(numOnes=5, relativeFrequencies=[0.5, 0.5, 0.0])
self._likelihoodTest1(numOnes=5, relativeFrequencies=[0.1, 0.5, 0.4])
def testLikelihood2Short(self):
self._likelihoodTest2(numOnes=5, relativeFrequencies=[0.1, 0.7, 0.2],
checkSynapseConsistency=LONG)
def testLikelihood2Long(self):
self._likelihoodTest2(numOnes=5, relativeFrequencies=[0.2, 0.5, 0.3])
self._likelihoodTest2(numOnes=5, relativeFrequencies=[0.5, 0.5, 0.0])
self._likelihoodTest2(numOnes=5, relativeFrequencies=[0.1, 0.5, 0.4])
if __name__ == "__main__":
unittest.main()
|
w4n9H/PythonSkillTree | refs/heads/master | Distributed/ProcessThread/LocalThread-2.py | 1 | # !/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: mango
@contact: w4n9@sina.com
@create: 16/7/4
hail hydra!
"""
__author__ = "mango"
__version__ = "0.1"
import threading, time
from time import sleep, ctime
def now():
return str(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()))
class myThread (threading.Thread):
def __init__(self, nloop, nsec):
super(myThread, self).__init__()
self.nloop = nloop
self.nsec = nsec
def run(self):
print 'start loop', self.nloop, 'at:', ctime()
sleep(self.nsec)
print 'loop', self.nloop, 'done at:', ctime()
def main():
thpool = []
print 'starting at:', now()
for i in xrange(10):
thpool.append(myThread(i, 2))
for th in thpool:
th.start()
for th in thpool:
th.join()
print 'all Done at:', now()
if __name__ == '__main__':
main() |
Red680812/X920D | refs/heads/master | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
|
lcnature/brainiak | refs/heads/master | tests/factoranalysis/test_htfa.py | 7 | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
def test_R():
from brainiak.factoranalysis.htfa import HTFA
with pytest.raises(TypeError) as excinfo:
HTFA()
assert "missing 2 required positional arguments" in str(excinfo.value)
def test_X():
from brainiak.factoranalysis.htfa import HTFA
import numpy as np
n_voxel = 100
n_tr = 20
K = 5
max_global_iter = 3
max_local_iter = 3
max_voxel = n_voxel
max_tr = n_tr
R = []
n_subj = 2
for s in np.arange(n_subj):
R.append(np.random.randint(2, high=102, size=(n_voxel, 3)))
htfa = HTFA(
K,
n_subj=n_subj,
max_global_iter=max_global_iter,
max_local_iter=max_local_iter,
max_voxel=max_voxel,
max_tr=max_tr)
X = np.random.rand(n_voxel, n_tr)
# Check that does NOT run with wrong data type
with pytest.raises(TypeError) as excinfo:
htfa.fit(X, R=R)
assert "Input data should be a list" in str(excinfo.value)
X = []
# Check that does NOT run with wrong array dimension
with pytest.raises(ValueError) as excinfo:
htfa.fit(X, R=R)
assert "Need at leat one subject to train the model" in str(excinfo.value)
X = []
X.append([1, 2, 3])
# Check that does NOT run with wrong array dimension
with pytest.raises(TypeError) as excinfo:
htfa.fit(X, R=R)
assert "data should be an array" in str(excinfo.value)
X = []
X.append(np.random.rand(n_voxel))
# Check that does NOT run with wrong array dimension
with pytest.raises(TypeError) as excinfo:
htfa.fit(X, R=R)
assert "subject data should be 2D array" in str(excinfo.value)
X = []
for s in np.arange(n_subj):
X.append(np.random.rand(n_voxel, n_tr))
R = np.random.randint(2, high=102, size=(n_voxel, 3))
# Check that does NOT run with wrong data type
with pytest.raises(TypeError) as excinfo:
htfa.fit(X, R=R)
assert "Coordinates should be a list" in str(excinfo.value)
R = []
R.append([1, 2, 3])
# Check that does NOT run with wrong data type
with pytest.raises(TypeError) as excinfo:
htfa.fit(X, R=R)
assert ("Each scanner coordinate matrix should be an array"
in str(excinfo.value))
R = []
R.append(np.random.rand(n_voxel))
# Check that does NOT run with wrong array dimension
with pytest.raises(TypeError) as excinfo:
htfa.fit(X, R=R)
assert ("Each scanner coordinate matrix should be 2D array"
in str(excinfo.value))
R = []
for s in np.arange(n_subj):
R.append(np.random.rand(n_voxel - 1, 3))
# Check that does NOT run with wrong array dimension
with pytest.raises(TypeError) as excinfo:
htfa.fit(X, R=R)
assert ("n_voxel should be the same in X[idx] and R[idx]"
in str(excinfo.value))
def test_can_run():
import numpy as np
from brainiak.factoranalysis.htfa import HTFA
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
n_voxel = 100
n_tr = 20
K = 5
max_global_iter = 3
max_local_iter = 3
max_voxel = n_voxel
max_tr = n_tr
R = []
n_subj = 2
for s in np.arange(n_subj):
R.append(np.random.randint(2, high=102, size=(n_voxel, 3)))
my_R = []
for idx in np.arange(n_subj):
if idx % size == rank:
my_R.append(R[idx])
htfa = HTFA(
K,
n_subj=n_subj,
max_global_iter=max_global_iter,
max_local_iter=max_local_iter,
max_voxel=max_voxel,
max_tr=max_tr,
verbose=True)
assert htfa, "Invalid HTFA instance!"
X = []
for s in np.arange(n_subj):
X.append(np.random.rand(n_voxel, n_tr))
my_data = []
for idx in np.arange(n_subj):
if idx % size == rank:
my_data.append(X[idx])
if rank == 0:
htfa.fit(my_data, R=my_R)
assert True, "Root successfully running HTFA"
assert htfa.global_prior_.shape[0] == htfa.prior_bcast_size,\
"Invalid result of HTFA! (wrong # element in global_prior)"
assert htfa.global_posterior_.shape[0] == htfa.prior_bcast_size,\
"Invalid result of HTFA! (wrong # element in global_posterior)"
else:
htfa.fit(my_data, R=my_R)
assert True, "worker successfully running HTFA"
print(htfa.local_weights_.shape)
assert htfa.local_weights_.shape[0] == n_tr * K,\
"Invalid result of HTFA! (wrong # element in local_weights)"
assert htfa.local_posterior_.shape[0] == htfa.prior_size,\
"Invalid result of HTFA! (wrong # element in local_posterior)"
|
masters3d/coursebuilder-masters3d | refs/heads/master | common/tags.py | 4 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handlers for custom HTML tags."""
__author__ = 'John Orr (jorr@google.com)'
import inspect
import logging
import mimetypes
import os
import pkgutil
from xml.etree import cElementTree
import html5lib
import safe_dom
import webapp2
import appengine_config
from common import schema_fields
from extensions import tags
from models import config
CAN_USE_DYNAMIC_TAGS = config.ConfigProperty(
'gcb_can_use_dynamic_tags', bool, safe_dom.Text(
'Whether lesson content can make use of custom HTML tags such as '
'<gcb-youtube videoid="...">. If this is enabled some legacy content '
'may be rendered differently. '),
default_value=True)
DUPLICATE_INSTANCE_ID_MESSAGE = (
'Error processing custom HTML tag: duplicate tag id')
INVALID_HTML_TAG_MESSAGE = 'Invalid HTML tag'
class BaseTag(object):
"""Base class for the custom HTML tags."""
@classmethod
def name(cls):
return cls.__name__
@classmethod
def vendor(cls):
return cls.__module__
@classmethod
def required_modules(cls):
"""Lists the inputEx modules required by the editor."""
return []
def render(self, node, handler): # pylint: disable=W0613
"""Receive a node and return a node.
Args:
node: cElementTree.Element. The DOM node for the tag which should be
rendered.
handler: controllers.utils.BaseHandler. The server runtime.
Returns:
A cElementTree.Element holding the rendered DOM.
"""
return cElementTree.XML('<div>[Unimplemented custom tag]</div>')
def get_icon_url(self):
"""Return the URL for the icon to be displayed in the rich text editor.
Images should be placed in a folder called 'resources' inside the main
package for the tag definitions.
Returns:
the URL for the icon to be displayed in the editor.
"""
return """
data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADAAAAAwCAYAAABXAvmHAAAAAXNSR0IArs
4c6QAAAAZiS0dEAP8A/wD/oL2nkwAAAAlwSFlzAAALEwAACxMBAJqcGAAAAAd0SU1FB90EGgAIFHpT6h
8AAAAZdEVYdENvbW1lbnQAQ3JlYXRlZCB3aXRoIEdJTVBXgQ4XAAAC30lEQVRo3u1ZP2sqQRCfVVGUXC
FqoZAmbSBYxFikMojBD2ErkgdC/AxpAn4A2wRMKptgCrWwSApBEG2DCidcI0gIxogXnXnFI5I87y6Jd6
seOHDN7LL7+83u/Nk5hoh/wMTCEJHMTMDGGDMzfrCAyWVL4DdCZLy72YwCxhgDIoKXlxcQRREeHx9BFE
WYTqfg9XohGAxCKBSCnZ0dcDqdhlrFEKlWq8QYIwD49ovFYjQajYiICBF17auLACLSbDaj3d3dObizsz
Nqt9v09PRE8Xhck0gul9NtONADnojI7XbPAXW73YV55XJZk8TFxcX6TuDk5GQORBAE1StxeXmpSaJery
99lWBZ69dqtQUgpVJJcW6/39cksL+/v/oTiEajC0DsdjvNZjPF+Q6HQ5PEsrJ0Huj1egs6WZbh+flZcX
4kEtFcr1KprDaRybKsqL++vlbU+/1+zfVEUVwtAZ/Pp6h/f39X1COi5nqBQGC1iaxUKine5eFwqDg/Fo
tx8QFdYfTm5uYLiPv7e0JExZD4OV/8/+3t7a0vkcmyTJIk0Xg8Vs0Dr6+vmta/vb1dbR74rTw8PKiCPz
09XV8m/qmEQiFF8IeHh7oLOq4EEJGazaam5ddajf5ElKJPNps1BDxXAohIjUbjC3CPx0OTycTQfbiewO
f3QDKZ5LIHVwIf4PP5vGFXZmUErq6uCAAok8lw9TFuBFKp1LxE4GF53eX0d10KSZLg+Pj4X/+SY/ePCw
HGGIzHYzg6OuLfG+W18MHBAYTDYf7daeLRLtv2RrcE9DdvC4UC5PN5mE6n3DvGhtU+RETn5+cLxVsikT
BHIru7u1N9uKTTaS4EDItCiAhWq1V13OVywWg02lwfGA6HmuNvb2+b7cQWi8XcUUgQBPB6varjWmMbE0
Y7nY5q4VYsFs0RRvv9PgmCMI8+VquVWq0WtzBqaC308bMPAGAwGAAiqvZQt8XcthbaELGZ/AbBX0kdVa
SPB+uxAAAAAElFTkSuQmCC
"""
def get_schema(self, unused_handler):
"""Return the list of fields which will be displayed in the editor.
This method assembles the list of fields which will be displayed in
the rich text editor when a user double-clicks on the icon for the tag.
The fields are a list of SchemaField objects in a FieldRegistry
container. Each SchemaField has the actual attribute name as used in
the tag, the display name for the form, and the type (usually
string).
Returns:
the list of fields to be displayed in the editor.
"""
reg = schema_fields.FieldRegistry('Unimplemented Custom Tag')
return reg
def unavailable_schema(self, message):
"""Utility to generate a schema for a "not available" message."""
reg = schema_fields.FieldRegistry(self.name())
reg.add_property(
schema_fields.SchemaField(
'unused_id', '', 'string', optional=True,
editable=False, extra_schema_dict_values={
'value': message,
'visu': {
'visuType': 'funcName',
'funcName': 'disableSave'}}))
return reg
class ContextAwareTag(BaseTag):
"""A tag which shares a context with other tags of the same type."""
class Context(object):
"""Carries the environment and other data used by the tag."""
def __init__(self, handler, env):
"""Initialize the context.
Args:
handler: controllers.utils.BaseHandler. The server runtime.
env: dict. A dict of values shared shared between instances of
the tag on the same page. Values stored in this dict will be
available to subsequent calls to render() on the same page,
and to the call to rollup_header_footer() made at the end of
the page. Use this to store things like JS library refs
which can be de-dup'd and put in the header or footer.
"""
self.handler = handler
self.env = env
def render(self, node, context): # pylint: disable=W0613
"""Receive a node and return a node.
Args:
node: cElementTree.Element. The DOM node for the tag which should be
rendered.
context: Context. The context shared between instances of the tag.
Returns:
A cElementTree.Element holding the rendered DOM.
"""
return super(ContextAwareTag, self).render(node, context.handler)
def rollup_header_footer(self, context):
"""Roll up header and footer from data stored in the tag environment.
This method is called once at the end of page processing. It receives
the context object, which has been passed to all rendering methods for
this tag on the page, and which accumulates data stored by the
renderers.
Args:
context: Context. Holds data set in an environment dict by previous
calls to render, containing, e.g., URLs of CSS or JS resources.
Returns:
A pair of cElementTree.Element's (header, footer).
"""
pass
class ResourcesHandler(webapp2.RequestHandler):
"""Content handler for resources associated with custom tags."""
def rebase_path(self, path):
"""Override this method to rebase the path to a different root."""
return path
def get(self):
"""Respond to HTTP GET methods."""
path = self.rebase_path(self.request.path)
if path.startswith('/'):
path = path[1:]
path = os.path.normpath(path)
resource_file = os.path.join(appengine_config.BUNDLE_ROOT, path)
mimetype = mimetypes.guess_type(resource_file)[0]
if mimetype is None:
mimetype = 'application/octet-stream'
try:
self.response.status = 200
self.response.headers['Content-Type'] = mimetype
self.response.cache_control.no_cache = None
self.response.cache_control.public = 'public'
self.response.cache_control.max_age = 600
stream = open(resource_file)
self.response.write(stream.read())
except IOError:
self.error(404)
class EditorBlacklists(object):
"""Lists tags which should not be supported by various editors."""
COURSE_SCOPE = set()
ASSESSMENT_SCOPE = set()
@classmethod
def register(cls, tag_name, editor_set):
editor_set.add(tag_name)
@classmethod
def unregister(cls, tag_name, editor_set):
if tag_name in editor_set:
editor_set.remove(tag_name)
class Registry(object):
"""A class that holds all dynamically registered tags."""
_bindings = {}
@classmethod
def add_tag_binding(cls, tag_name, clazz):
"""Registers a tag name to class binding."""
cls._bindings[tag_name] = clazz
@classmethod
def remove_tag_binding(cls, tag_name):
"""Unregisters a tag binding."""
if tag_name in cls._bindings:
del cls._bindings[tag_name]
@classmethod
def get_all_tags(cls):
return dict(cls._bindings.items())
def get_tag_bindings():
"""Return the bindings of tag names to implementing classes.
Tag bindings work by looking for classes which extend BaseTag and which
belong to packages inside extensions/tags. The tag name is then composed
from the package name and the class name, after lower-casing and separated
with a dash. E.g., the class
extensions.tags.gcb.YouTube
is bound to the tag name gcb-youtube.
Returns:
the bindings of tag names to implementing classes.
"""
bindings = {}
for loader, name, ispkg in pkgutil.walk_packages(tags.__path__):
if ispkg:
mod = loader.find_module(name).load_module(name)
for name, clazz in inspect.getmembers(mod, inspect.isclass):
if issubclass(clazz, BaseTag):
tag_name = ('%s-%s' % (mod.__name__, name)).lower()
bindings[tag_name] = clazz
return dict(bindings.items() + Registry.get_all_tags().items())
def html_string_to_element_tree(html_string):
parser = html5lib.HTMLParser(
tree=html5lib.treebuilders.getTreeBuilder('etree', cElementTree),
namespaceHTMLElements=False)
return parser.parseFragment('<div>%s</div>' % html_string)[0]
def html_to_safe_dom(html_string, handler):
"""Render HTML text as a tree of safe_dom elements."""
tag_bindings = get_tag_bindings()
node_list = safe_dom.NodeList()
if not html_string:
return node_list
# Set of all instance id's used in this dom tree, used to detect duplication
used_instance_ids = set([])
# A dictionary of environments, one for each tag type which appears in the
# page
tag_contexts = {}
def _generate_error_message_node_list(elt, error_message):
"""Generates a node_list representing an error message."""
logging.error(
'[%s, %s]: %s.', elt.tag, dict(**elt.attrib), error_message)
node_list = safe_dom.NodeList()
node_list.append(safe_dom.Element(
'span', className='gcb-error-tag'
).add_text(error_message))
if elt.tail:
node_list.append(safe_dom.Text(elt.tail))
return node_list
def _process_html_tree(elt):
"""Recursively parses an HTML tree into a safe_dom.NodeList()."""
# Return immediately with an error message if a duplicate instanceid is
# detected.
if 'instanceid' in elt.attrib:
if elt.attrib['instanceid'] in used_instance_ids:
return _generate_error_message_node_list(
elt, DUPLICATE_INSTANCE_ID_MESSAGE)
used_instance_ids.add(elt.attrib['instanceid'])
# Otherwise, attempt to parse this tag and all its child tags.
original_elt = elt
try:
if elt.tag in tag_bindings:
tag = tag_bindings[elt.tag]()
if isinstance(tag, ContextAwareTag):
# Get or initialize a environment dict for this type of tag.
# Each tag type gets a separate environment shared by all
# instances of that tag.
context = tag_contexts.get(elt.tag)
if context is None:
context = ContextAwareTag.Context(handler, {})
tag_contexts[elt.tag] = context
# Render the tag
elt = tag.render(elt, context)
else:
# Render the tag
elt = tag.render(elt, handler)
if elt.tag.lower() == 'script':
out_elt = safe_dom.ScriptElement()
else:
out_elt = safe_dom.Element(elt.tag)
out_elt.add_attribute(**elt.attrib)
if elt.text:
out_elt.add_text(elt.text)
for child in elt:
out_elt.add_children(
_process_html_tree(child))
node_list = safe_dom.NodeList()
node_list.append(out_elt)
if original_elt.tail:
node_list.append(safe_dom.Text(original_elt.tail))
return node_list
except Exception as e: # pylint: disable-msg=broad-except
logging.exception('Error handling tag: %s', elt.tag)
return _generate_error_message_node_list(
original_elt, '%s: %s' % (INVALID_HTML_TAG_MESSAGE, e))
root = html_string_to_element_tree(html_string)
if root.text:
node_list.append(safe_dom.Text(root.text))
for child_elt in root:
node_list.append(_process_html_tree(child_elt))
# After the page is processed, rollup any global header/footer data which
# the environment-aware tags have accumulated in their env's
for tag_name, context in tag_contexts.items():
header, footer = tag_bindings[tag_name]().rollup_header_footer(context)
node_list.insert(0, _process_html_tree(header))
node_list.append(_process_html_tree(footer))
return node_list
def get_components_from_html(html):
"""Returns a list of dicts representing the components in a lesson.
Args:
html: a block of html that may contain some HTML tags representing
custom components.
Returns:
A list of dicts. Each dict represents one component and has two
keys:
- instanceid: the instance id of the component
- cpt_name: the name of the component tag (e.g. gcb-googlegroup)
"""
parser = html5lib.HTMLParser(
tree=html5lib.treebuilders.getTreeBuilder('etree', cElementTree),
namespaceHTMLElements=False)
content = parser.parseFragment('<div>%s</div>' % html)[0]
components = []
for component in content.findall('.//*[@instanceid]'):
component_dict = {'cpt_name': component.tag}
component_dict.update(component.attrib)
components.append(component_dict)
return components
|
luckylavish/zamboni | refs/heads/master | mkt/commonplace/tests/test_views.py | 4 | from gzip import GzipFile
import json
from StringIO import StringIO
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
import mock
from nose.tools import eq_, ok_
from pyquery import PyQuery as pq
import mkt.site.tests
from mkt.commonplace.models import DeployBuildId
class CommonplaceTestMixin(mkt.site.tests.TestCase):
@mock.patch('mkt.commonplace.views.fxa_auth_info')
def _test_url(self, url, fxa_mock, url_kwargs=None):
"""Test that the given url can be requested, returns a 200, and returns
a valid gzipped response when requested with Accept-Encoding over ssl.
Return the result of a regular (non-gzipped) request."""
fxa_mock.return_value = ('fakestate', 'http://example.com/fakeauthurl')
if not url_kwargs:
url_kwargs = {}
res = self.client.get(url, url_kwargs, HTTP_ACCEPT_ENCODING='gzip',
**{'wsgi.url_scheme': 'https'})
eq_(res.status_code, 200)
eq_(res['Content-Encoding'], 'gzip')
eq_(sorted(res['Vary'].split(', ')),
['Accept-Encoding', 'Accept-Language', 'Cookie'])
ungzipped_content = GzipFile('', 'r', 0, StringIO(res.content)).read()
res = self.client.get(url, url_kwargs, **{'wsgi.url_scheme': 'https'})
eq_(res.status_code, 200)
eq_(sorted(res['Vary'].split(', ')),
['Accept-Encoding', 'Accept-Language', 'Cookie'])
eq_(ungzipped_content, res.content)
return res
class TestCommonplace(CommonplaceTestMixin):
def test_fireplace_firefox_accounts(self):
res = self._test_url('/server.html')
self.assertTemplateUsed(res, 'commonplace/index.html')
self.assertEquals(res.context['repo'], 'fireplace')
self.assertContains(res, 'splash.css')
self.assertNotContains(res, 'login.persona.org/include.js')
eq_(res['Cache-Control'], 'max-age=180')
self.assertContains(res, 'fakestate')
self.assertContains(res, 'http://example.com/fakeauthurl')
def test_commbadge(self):
res = self._test_url('/comm/')
self.assertTemplateUsed(res, 'commonplace/index.html')
self.assertEquals(res.context['repo'], 'commbadge')
self.assertNotContains(res, 'splash.css')
eq_(res['Cache-Control'], 'max-age=180')
def test_submission(self):
res = self._test_url('/submission/')
self.assertTemplateUsed(res, 'commonplace/index_react.html')
self.assertEquals(res.context['repo'], 'marketplace-submission')
eq_(res['Cache-Control'], 'max-age=180')
@mock.patch('mkt.commonplace.views.fxa_auth_info')
def test_transonic(self, mock_fxa):
mock_fxa.return_value = ('fakestate', 'http://example.com/fakeauthurl')
res = self._test_url('/curate/')
self.assertTemplateUsed(res, 'commonplace/index.html')
self.assertEquals(res.context['repo'], 'transonic')
self.assertNotContains(res, 'splash.css')
eq_(res['Cache-Control'], 'max-age=180')
@mock.patch('mkt.regions.middleware.RegionMiddleware.region_from_request')
def test_region_not_included_in_fireplace_if_sim_info(self, mock_region):
test_region = mock.Mock()
test_region.slug = 'testoland'
mock_region.return_value = test_region
for url in ('/server.html?mccs=blah',
'/server.html?mcc=blah&mnc=blah'):
res = self._test_url(url)
ok_('geoip_region' not in res.context, url)
self.assertNotContains(res, 'data-region')
@mock.patch('mkt.regions.middleware.RegionMiddleware.region_from_request')
def test_region_included_in_fireplace_if_sim_info(self, mock_region):
test_region = mock.Mock()
test_region.slug = 'testoland'
mock_region.return_value = test_region
for url in ('/server.html?nativepersona=true',
'/server.html?mcc=blah', # Incomplete info from SIM.
'/server.html',
'/server.html?'):
res = self._test_url(url)
self.assertEquals(res.context['geoip_region'], test_region)
self.assertContains(res, 'data-region="testoland"')
class TestIFrames(CommonplaceTestMixin):
def setUp(self):
self.iframe_install_url = reverse('commonplace.iframe-install')
self.potatolytics_url = reverse('commonplace.potatolytics')
def _test_trailing_slashes(self, allowed_origins):
"""Utility method to test that no origin ends with a trailing slash."""
eq_(filter(lambda v: v.endswith('/'), allowed_origins), [])
@override_settings(DOMAIN='marketplace.firefox.com')
def test_basic(self):
res = self._test_url(self.iframe_install_url)
allowed_origins = json.loads(res.context['allowed_origins'])
self._test_trailing_slashes(allowed_origins)
eq_(allowed_origins,
['app://packaged.marketplace.firefox.com',
'app://marketplace.firefox.com',
'https://marketplace.firefox.com',
'app://tarako.marketplace.firefox.com',
'https://hello.firefox.com',
'https://call.firefox.com'])
res = self._test_url(self.potatolytics_url)
allowed_origins = json.loads(res.context['allowed_origins'])
self._test_trailing_slashes(allowed_origins)
eq_(allowed_origins,
['app://packaged.marketplace.firefox.com',
'app://marketplace.firefox.com',
'https://marketplace.firefox.com',
'app://tarako.marketplace.firefox.com'])
@override_settings(DOMAIN='marketplace.allizom.org')
def test_basic_stage(self):
res = self._test_url(self.iframe_install_url)
allowed_origins = json.loads(res.context['allowed_origins'])
self._test_trailing_slashes(allowed_origins)
eq_(allowed_origins,
['app://packaged.marketplace.allizom.org',
'app://marketplace.allizom.org',
'https://marketplace.allizom.org',
'app://tarako.marketplace.allizom.org',
'https://hello.firefox.com',
'https://call.firefox.com'])
res = self._test_url(self.potatolytics_url)
allowed_origins = json.loads(res.context['allowed_origins'])
self._test_trailing_slashes(allowed_origins)
eq_(allowed_origins,
['app://packaged.marketplace.allizom.org',
'app://marketplace.allizom.org',
'https://marketplace.allizom.org',
'app://tarako.marketplace.allizom.org'])
@override_settings(DOMAIN='marketplace-dev.allizom.org')
def test_basic_dev(self):
res = self._test_url(self.iframe_install_url)
allowed_origins = json.loads(res.context['allowed_origins'])
self._test_trailing_slashes(allowed_origins)
eq_(allowed_origins,
['app://packaged.marketplace-dev.allizom.org',
'app://marketplace-dev.allizom.org',
'https://marketplace-dev.allizom.org',
'app://tarako.marketplace-dev.allizom.org',
'http://localhost:8675',
'https://localhost:8675',
'http://localhost',
'https://localhost',
'http://mp.dev',
'https://mp.dev',
'https://hello.firefox.com',
'https://call.firefox.com',
'https://loop-webapp-dev.stage.mozaws.net',
'https://call.stage.mozaws.net'])
res = self._test_url(self.potatolytics_url)
allowed_origins = json.loads(res.context['allowed_origins'])
self._test_trailing_slashes(allowed_origins)
eq_(allowed_origins,
['app://packaged.marketplace-dev.allizom.org',
'app://marketplace-dev.allizom.org',
'https://marketplace-dev.allizom.org',
'app://tarako.marketplace-dev.allizom.org',
'http://localhost:8675',
'https://localhost:8675',
'http://localhost',
'https://localhost',
'http://mp.dev',
'https://mp.dev'])
@override_settings(DOMAIN='example.com', DEBUG=True)
def test_basic_debug_true(self):
res = self._test_url(self.iframe_install_url)
allowed_origins = json.loads(res.context['allowed_origins'])
self._test_trailing_slashes(allowed_origins)
eq_(allowed_origins,
['app://packaged.example.com',
'app://example.com',
'https://example.com',
'app://tarako.example.com',
'http://localhost:8675',
'https://localhost:8675',
'http://localhost',
'https://localhost',
'http://mp.dev',
'https://mp.dev',
'https://hello.firefox.com',
'https://call.firefox.com',
'https://loop-webapp-dev.stage.mozaws.net',
'https://call.stage.mozaws.net'])
res = self._test_url(self.potatolytics_url)
allowed_origins = json.loads(res.context['allowed_origins'])
self._test_trailing_slashes(allowed_origins)
eq_(allowed_origins,
['app://packaged.example.com',
'app://example.com',
'https://example.com',
'app://tarako.example.com',
'http://localhost:8675',
'https://localhost:8675',
'http://localhost',
'https://localhost',
'http://mp.dev',
'https://mp.dev'])
class TestOpenGraph(mkt.site.tests.TestCase):
def _get_tags(self, res):
"""Returns title, image, description."""
doc = pq(res.content)
return (doc('[property="og:title"]').attr('content'),
doc('[property="og:image"]').attr('content'),
doc('[name="description"]').attr('content'))
def test_basic(self):
res = self.client.get(reverse('commonplace.fireplace'))
title, image, description = self._get_tags(res)
eq_(title, 'Firefox Marketplace')
ok_(description.startswith('The Firefox Marketplace is'))
def test_detail(self):
app = mkt.site.tests.app_factory(
description='Awesome <a href="/">Home</a> "helloareyouthere"')
res = self.client.get(reverse('detail', args=[app.app_slug]))
title, image, description = self._get_tags(res)
eq_(title, app.name)
eq_(image, app.get_icon_url(64))
ok_('<meta name="description" '
'content="Awesome Home "helloareyouthere"">'
in res.content)
eq_(description, 'Awesome Home "helloareyouthere"')
def test_detail_dne(self):
res = self.client.get(reverse('detail', args=['DO NOT EXISTS']))
title, image, description = self._get_tags(res)
eq_(title, 'Firefox Marketplace')
ok_(description.startswith('The Firefox Marketplace is'))
class TestBuildId(CommonplaceTestMixin):
def test_build_id_from_db(self):
DeployBuildId.objects.create(repo='fireplace', build_id='0118999')
res = self._test_url('/server.html')
doc = pq(res.content)
scripts = doc('script')
for script in scripts:
src = pq(script).attr('src')
if 'fireplace' in src:
ok_(src.endswith('?b=0118999'))
@mock.patch('mkt.commonplace.views.local_storage')
def test_fallback_to_build_id_txt(self, storage_mock):
storage_mock.open = mock.mock_open(read_data='0118999')
res = self._test_url('/server.html')
doc = pq(res.content)
scripts = doc('script')
for script in scripts:
src = pq(script).attr('src')
if 'fireplace' in src:
ok_(src.endswith('?b=0118999'))
class TestLangAttrs(CommonplaceTestMixin):
def test_lang_en(self):
res = self._test_url('/server.html')
doc = pq(res.content)
html = doc('html[lang][dir]')
eq_(html.attr('lang'), 'en-US')
eq_(html.attr('dir'), 'ltr')
def test_lang_fr(self):
res = self._test_url('/server.html?lang=fr')
doc = pq(res.content)
html = doc('html[lang][dir]')
eq_(html.attr('lang'), 'fr')
eq_(html.attr('dir'), 'ltr')
@override_settings(LANGUAGE_URL_MAP={'ar': 'ar'})
def test_lang_ar(self):
res = self._test_url('/server.html?lang=ar')
doc = pq(res.content)
html = doc('html[lang][dir]')
eq_(html.attr('lang'), 'ar')
eq_(html.attr('dir'), 'rtl')
@override_settings(LANGUAGE_URL_MAP={'rtl': 'rtl'})
def test_lang_rtl(self):
res = self._test_url('/server.html?lang=rtl')
doc = pq(res.content)
html = doc('html[lang][dir]')
eq_(html.attr('lang'), 'rtl')
eq_(html.attr('dir'), 'rtl')
|
kdwink/intellij-community | refs/heads/master | python/testData/stubs/MetaClass.py | 80 | class M(type):
pass
__metaclass__ = M
class C(object):
__metaclass__ = type
class D(object):
pass
|
freenas/samba | refs/heads/freenas/master | source4/torture/drs/python/cracknames.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) Catalyst .Net Ltd 2017
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import samba.tests
import ldb
import drs_base
from samba.dcerpc import drsuapi
class DrsCracknamesTestCase(drs_base.DrsBaseTestCase):
def setUp(self):
super(DrsCracknamesTestCase, self).setUp()
(self.drs, self.drs_handle) = self._ds_bind(self.dnsname_dc1)
self.ou = "ou=Cracknames_ou,%s" % self.ldb_dc1.get_default_basedn()
self.username = "Cracknames_user"
self.user = "cn=%s,%s" % (self.username, self.ou)
self.ldb_dc1.add({
"dn": self.ou,
"objectclass": "organizationalUnit"})
self.user_record = {
"dn": self.user,
"objectclass": "user",
"sAMAccountName" : self.username,
"userPrincipalName" : "test@test.com",
"servicePrincipalName" : "test/%s" % self.ldb_dc1.get_default_basedn(),
"displayName" : "test"}
self.ldb_dc1.add(self.user_record)
self.ldb_dc1.delete(self.user_record["dn"])
self.ldb_dc1.add(self.user_record)
# The formats specified in MS-DRSR 4.1.4.13; DS_NAME_FORMAT
# We don't support any of the ones specified in 4.1.4.1.2.
self.formats = {
drsuapi.DRSUAPI_DS_NAME_FORMAT_FQDN_1779,
drsuapi.DRSUAPI_DS_NAME_FORMAT_NT4_ACCOUNT,
drsuapi.DRSUAPI_DS_NAME_FORMAT_DISPLAY,
drsuapi.DRSUAPI_DS_NAME_FORMAT_GUID,
drsuapi.DRSUAPI_DS_NAME_FORMAT_CANONICAL,
drsuapi.DRSUAPI_DS_NAME_FORMAT_USER_PRINCIPAL,
drsuapi.DRSUAPI_DS_NAME_FORMAT_CANONICAL_EX,
drsuapi.DRSUAPI_DS_NAME_FORMAT_SERVICE_PRINCIPAL,
# We currently don't support this
#drsuapi.DRSUAPI_DS_NAME_FORMAT_SID_OR_SID_HISTORY,
# This format is not supported by Windows (or us)
#drsuapi.DRSUAPI_DS_NAME_FORMAT_DNS_DOMAIN,
}
def tearDown(self):
self.ldb_dc1.delete(self.user)
self.ldb_dc1.delete(self.ou)
super(DrsCracknamesTestCase, self).tearDown()
def test_Cracknames(self):
"""
Verifies that we can cracknames any of the standard formats
(DS_NAME_FORMAT) to a GUID, and that we can cracknames a
GUID to any of the standard formats.
GUID was chosen just so that we don't have to do an n^2 loop.
"""
(result, ctr) = self._do_cracknames(self.user,
drsuapi.DRSUAPI_DS_NAME_FORMAT_FQDN_1779,
drsuapi.DRSUAPI_DS_NAME_FORMAT_GUID)
self.assertEquals(ctr.count, 1)
self.assertEquals(ctr.array[0].status,
drsuapi.DRSUAPI_DS_NAME_STATUS_OK)
user_guid = ctr.array[0].result_name
for name_format in self.formats:
(result, ctr) = self._do_cracknames(user_guid,
drsuapi.DRSUAPI_DS_NAME_FORMAT_GUID,
name_format)
self.assertEquals(ctr.count, 1)
self.assertEquals(ctr.array[0].status,
drsuapi.DRSUAPI_DS_NAME_STATUS_OK,
"Expected 0, got %s, desired format is %s"
% (ctr.array[0].status, name_format))
(result, ctr) = self._do_cracknames(ctr.array[0].result_name,
name_format,
drsuapi.DRSUAPI_DS_NAME_FORMAT_GUID)
self.assertEquals(ctr.count, 1)
self.assertEquals(ctr.array[0].status,
drsuapi.DRSUAPI_DS_NAME_STATUS_OK,
"Expected 0, got %s, offered format is %s"
% (ctr.array[0].status, name_format))
def test_MultiValuedAttribute(self):
"""
Verifies that, if we try and cracknames with the desired output
being a multi-valued attribute, it returns
DRSUAPI_DS_NAME_STATUS_NOT_UNIQUE.
"""
username = "Cracknames_user_MVA"
user = "cn=%s,%s" % (username, self.ou)
user_record = {
"dn": user,
"objectclass": "user",
"sAMAccountName" : username,
"userPrincipalName" : "test2@test.com",
"servicePrincipalName" : ["test2/%s" % self.ldb_dc1.get_default_basedn(),
"test3/%s" % self.ldb_dc1.get_default_basedn()],
"displayName" : "test2"}
self.ldb_dc1.add(user_record)
(result, ctr) = self._do_cracknames(user,
drsuapi.DRSUAPI_DS_NAME_FORMAT_FQDN_1779,
drsuapi.DRSUAPI_DS_NAME_FORMAT_GUID)
self.assertEquals(ctr.count, 1)
self.assertEquals(ctr.array[0].status,
drsuapi.DRSUAPI_DS_NAME_STATUS_OK)
user_guid = ctr.array[0].result_name
(result, ctr) = self._do_cracknames(user_guid,
drsuapi.DRSUAPI_DS_NAME_FORMAT_GUID,
drsuapi.DRSUAPI_DS_NAME_FORMAT_SERVICE_PRINCIPAL)
self.assertEquals(ctr.count, 1)
self.assertEquals(ctr.array[0].status,
drsuapi.DRSUAPI_DS_NAME_STATUS_NOT_UNIQUE)
self.ldb_dc1.delete(user)
def test_NoSPNAttribute(self):
"""
Verifies that, if we try and cracknames with the desired output
being an SPN, it returns
DRSUAPI_DS_NAME_STATUS_NOT_UNIQUE.
"""
username = "Cracknames_no_SPN"
user = "cn=%s,%s" % (username, self.ou)
user_record = {
"dn": user,
"objectclass": "user",
"sAMAccountName" : username,
"userPrincipalName" : "test4@test.com",
"displayName" : "test4"}
self.ldb_dc1.add(user_record)
(result, ctr) = self._do_cracknames(user,
drsuapi.DRSUAPI_DS_NAME_FORMAT_FQDN_1779,
drsuapi.DRSUAPI_DS_NAME_FORMAT_GUID)
self.assertEquals(ctr.count, 1)
self.assertEquals(ctr.array[0].status,
drsuapi.DRSUAPI_DS_NAME_STATUS_OK)
user_guid = ctr.array[0].result_name
(result, ctr) = self._do_cracknames(user_guid,
drsuapi.DRSUAPI_DS_NAME_FORMAT_GUID,
drsuapi.DRSUAPI_DS_NAME_FORMAT_SERVICE_PRINCIPAL)
self.assertEquals(ctr.count, 1)
self.assertEquals(ctr.array[0].status,
drsuapi.DRSUAPI_DS_NAME_STATUS_NOT_FOUND)
self.ldb_dc1.delete(user)
def _do_cracknames(self, name, format_offered, format_desired):
req = drsuapi.DsNameRequest1()
names = drsuapi.DsNameString()
names.str = name
req.codepage = 1252 # German, but it doesn't really matter here
req.language = 1033
req.format_flags = 0
req.format_offered = format_offered
req.format_desired = format_desired
req.count = 1
req.names = [names]
(result, ctr) = self.drs.DsCrackNames(self.drs_handle, 1, req)
return (result, ctr)
|
jossef/vmwc | refs/heads/master | examples/virtual-switches-create-new.py | 1 | #!/usr/bin/env python
from vmwc import VMWareClient
def main():
host = '192.168.1.1'
username = '<username>'
password = '<password>'
virtual_switch_name = 'Custom Network 1'
with VMWareClient(host, username, password) as client:
# Good configuration for traffic recording
client.new_virtual_switch(virtual_switch_name, allow_promiscuous=True, allow_mac_changes=True, allow_forged_transmits=True)
if __name__ == '__main__':
main()
|
underbluewaters/marinemap | refs/heads/master | lingcod/raster_stats/tests.py | 3 | from lingcod.common.test_settings_manager import SettingsTestCase as TestCase
from lingcod.raster_stats.models import ZonalStatsCache, RasterDataset, zonal_stats, clear_cache
from django.contrib.gis.gdal.datasource import DataSource
from django.core import serializers
import os
import sys
def test_data():
rastpath = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'test_data/impact.tif')
rast, created = RasterDataset.objects.get_or_create(name="test_impact",filepath=rastpath,type='continuous')
polygons = []
shp = os.path.join(os.path.dirname(__file__), 'test_data/shapes.shp')
ds = DataSource(shp)
lyr = ds[0]
assert len(lyr) == 4
for feat in lyr:
polygons.append(feat.geom.geos)
del(lyr)
del(ds)
return rast, polygons
def test_categorical_data():
rastpath = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'test_data/landuse.tif')
rast, created = RasterDataset.objects.get_or_create(name="test_landuse",filepath=rastpath,type='categorical')
polygons = []
shp = os.path.join(os.path.dirname(__file__), 'test_data/poly1.shp')
ds = DataSource(shp)
lyr = ds[0]
for feat in lyr:
polygons.append(feat.geom.geos)
del(lyr)
del(ds)
return rast, polygons
class ZonalTest(TestCase):
def setUp(self):
clear_cache()
self.rast, self.polygons = test_data()
def test_zonal_util(self):
"""
Tests that starspan works and stuff
"""
# shouldnt have any nulls
zonal = zonal_stats(self.polygons[0], self.rast)
self.assertEqual(zonal.nulls,0)
# doesnt even touch the raster, all should be null
zonal = zonal_stats(self.polygons[1], self.rast)
self.assertEqual(zonal.pixels,None)
# Partly on and partly off the raster
# no nulls but pixel count should be low
zonal = zonal_stats(self.polygons[2], self.rast)
self.assertEqual(zonal.nulls,0)
self.assertEqual(zonal.pixels,225)
# All on the raster but should have nulls
zonal = zonal_stats(self.polygons[3], self.rast)
self.assertEqual(zonal.nulls,279)
def test_caching(self):
"""
Test that the caching mechanism works and we can turn it on/off
"""
clear_cache()
self.assertEqual( len(ZonalStatsCache.objects.all()), 0)
zonal = zonal_stats(self.polygons[0], self.rast)
self.assertEqual( zonal.from_cache, False)
self.assertEqual( len(ZonalStatsCache.objects.all()), 1)
zonal = zonal_stats(self.polygons[0], self.rast)
self.assertEqual( zonal.from_cache, True)
self.assertEqual( len(ZonalStatsCache.objects.all()), 1)
zonal = zonal_stats(self.polygons[0], self.rast, read_cache=False)
self.assertEqual( zonal.from_cache, False)
self.assertEqual( len(ZonalStatsCache.objects.all()), 1)
zonal = zonal_stats(self.polygons[3], self.rast, write_cache=False)
self.assertEqual( zonal.from_cache, False)
self.assertEqual( len(ZonalStatsCache.objects.all()), 1)
zonal = zonal_stats(self.polygons[3], self.rast)
self.assertEqual( zonal.from_cache, False)
self.assertEqual( len(ZonalStatsCache.objects.all()), 2)
zonal = zonal_stats(self.polygons[3], self.rast)
self.assertEqual( zonal.from_cache, True)
self.assertEqual( len(ZonalStatsCache.objects.all()), 2)
clear_cache()
self.assertEqual( len(ZonalStatsCache.objects.all()), 0)
class ZonalWebServiceTest(TestCase):
urls = 'lingcod.raster_stats.urls'
def setUp(self):
clear_cache()
self.rast, self.polygons = test_data()
def test_webservice(self):
data = {'geom_txt': self.polygons[0].wkt}
#self.settings_manager.set(ROOT_URLCONF = 'lingcod.raster_stats.urls')
response = self.client.get('/test_impact/', data)
self.failUnlessEqual(response.status_code, 200)
for obj in serializers.deserialize("json", response.content):
web_zonal = obj.object
util_zonal = zonal_stats(self.polygons[0], self.rast, read_cache=False)
self.failUnlessEqual(web_zonal.avg, util_zonal.avg)
class ZonalCategoriesTest(TestCase):
def setUp(self):
clear_cache()
self.rast, self.polygons = test_categorical_data()
def test_categories(self):
zonal = zonal_stats(self.polygons[0], self.rast)
sumpix = 0
for zc in zonal.categories.all():
sumpix += zc.count
self.assertEqual(zonal.pixels, sumpix)
|
tuxinhang1989/mezzanine | refs/heads/master | mezzanine/blog/management/__init__.py | 12133432 | |
jeremy-bernon/Lilith | refs/heads/master | lilith/internal/__init__.py | 12133432 | |
GhostThrone/django | refs/heads/master | django/utils/termcolors.py | 46 | """
termcolors.py
"""
from django.utils import six
color_names = ('black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white')
foreground = {color_names[x]: '3%s' % x for x in range(8)}
background = {color_names[x]: '4%s' % x for x in range(8)}
RESET = '0'
opt_dict = {'bold': '1', 'underscore': '4', 'blink': '5', 'reverse': '7', 'conceal': '8'}
def colorize(text='', opts=(), **kwargs):
"""
Returns your text, enclosed in ANSI graphics codes.
Depends on the keyword arguments 'fg' and 'bg', and the contents of
the opts tuple/list.
Returns the RESET code if no parameters are given.
Valid colors:
'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
Valid options:
'bold'
'underscore'
'blink'
'reverse'
'conceal'
'noreset' - string will not be auto-terminated with the RESET code
Examples:
colorize('hello', fg='red', bg='blue', opts=('blink',))
colorize()
colorize('goodbye', opts=('underscore',))
print(colorize('first line', fg='red', opts=('noreset',)))
print('this should be red too')
print(colorize('and so should this'))
print('this should not be red')
"""
code_list = []
if text == '' and len(opts) == 1 and opts[0] == 'reset':
return '\x1b[%sm' % RESET
for k, v in six.iteritems(kwargs):
if k == 'fg':
code_list.append(foreground[v])
elif k == 'bg':
code_list.append(background[v])
for o in opts:
if o in opt_dict:
code_list.append(opt_dict[o])
if 'noreset' not in opts:
text = '%s\x1b[%sm' % (text or '', RESET)
return '%s%s' % (('\x1b[%sm' % ';'.join(code_list)), text or '')
def make_style(opts=(), **kwargs):
"""
Returns a function with default parameters for colorize()
Example:
bold_red = make_style(opts=('bold',), fg='red')
print(bold_red('hello'))
KEYWORD = make_style(fg='yellow')
COMMENT = make_style(fg='blue', opts=('bold',))
"""
return lambda text: colorize(text, opts, **kwargs)
NOCOLOR_PALETTE = 'nocolor'
DARK_PALETTE = 'dark'
LIGHT_PALETTE = 'light'
PALETTES = {
NOCOLOR_PALETTE: {
'ERROR': {},
'SUCCESS': {},
'WARNING': {},
'NOTICE': {},
'SQL_FIELD': {},
'SQL_COLTYPE': {},
'SQL_KEYWORD': {},
'SQL_TABLE': {},
'HTTP_INFO': {},
'HTTP_SUCCESS': {},
'HTTP_REDIRECT': {},
'HTTP_NOT_MODIFIED': {},
'HTTP_BAD_REQUEST': {},
'HTTP_NOT_FOUND': {},
'HTTP_SERVER_ERROR': {},
'MIGRATE_HEADING': {},
'MIGRATE_LABEL': {},
'MIGRATE_SUCCESS': {},
'MIGRATE_FAILURE': {},
},
DARK_PALETTE: {
'ERROR': {'fg': 'red', 'opts': ('bold',)},
'SUCCESS': {'fg': 'green', 'opts': ('bold',)},
'WARNING': {'fg': 'yellow', 'opts': ('bold',)},
'NOTICE': {'fg': 'red'},
'SQL_FIELD': {'fg': 'green', 'opts': ('bold',)},
'SQL_COLTYPE': {'fg': 'green'},
'SQL_KEYWORD': {'fg': 'yellow'},
'SQL_TABLE': {'opts': ('bold',)},
'HTTP_INFO': {'opts': ('bold',)},
'HTTP_SUCCESS': {},
'HTTP_REDIRECT': {'fg': 'green'},
'HTTP_NOT_MODIFIED': {'fg': 'cyan'},
'HTTP_BAD_REQUEST': {'fg': 'red', 'opts': ('bold',)},
'HTTP_NOT_FOUND': {'fg': 'yellow'},
'HTTP_SERVER_ERROR': {'fg': 'magenta', 'opts': ('bold',)},
'MIGRATE_HEADING': {'fg': 'cyan', 'opts': ('bold',)},
'MIGRATE_LABEL': {'opts': ('bold',)},
'MIGRATE_SUCCESS': {'fg': 'green', 'opts': ('bold',)},
'MIGRATE_FAILURE': {'fg': 'red', 'opts': ('bold',)},
},
LIGHT_PALETTE: {
'ERROR': {'fg': 'red', 'opts': ('bold',)},
'SUCCESS': {'fg': 'green', 'opts': ('bold',)},
'WARNING': {'fg': 'yellow', 'opts': ('bold',)},
'NOTICE': {'fg': 'red'},
'SQL_FIELD': {'fg': 'green', 'opts': ('bold',)},
'SQL_COLTYPE': {'fg': 'green'},
'SQL_KEYWORD': {'fg': 'blue'},
'SQL_TABLE': {'opts': ('bold',)},
'HTTP_INFO': {'opts': ('bold',)},
'HTTP_SUCCESS': {},
'HTTP_REDIRECT': {'fg': 'green', 'opts': ('bold',)},
'HTTP_NOT_MODIFIED': {'fg': 'green'},
'HTTP_BAD_REQUEST': {'fg': 'red', 'opts': ('bold',)},
'HTTP_NOT_FOUND': {'fg': 'red'},
'HTTP_SERVER_ERROR': {'fg': 'magenta', 'opts': ('bold',)},
'MIGRATE_HEADING': {'fg': 'cyan', 'opts': ('bold',)},
'MIGRATE_LABEL': {'opts': ('bold',)},
'MIGRATE_SUCCESS': {'fg': 'green', 'opts': ('bold',)},
'MIGRATE_FAILURE': {'fg': 'red', 'opts': ('bold',)},
}
}
DEFAULT_PALETTE = DARK_PALETTE
def parse_color_setting(config_string):
"""Parse a DJANGO_COLORS environment variable to produce the system palette
The general form of a pallete definition is:
"palette;role=fg;role=fg/bg;role=fg,option,option;role=fg/bg,option,option"
where:
palette is a named palette; one of 'light', 'dark', or 'nocolor'.
role is a named style used by Django
fg is a background color.
bg is a background color.
option is a display options.
Specifying a named palette is the same as manually specifying the individual
definitions for each role. Any individual definitions following the pallete
definition will augment the base palette definition.
Valid roles:
'error', 'notice', 'sql_field', 'sql_coltype', 'sql_keyword', 'sql_table',
'http_info', 'http_success', 'http_redirect', 'http_bad_request',
'http_not_found', 'http_server_error'
Valid colors:
'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
Valid options:
'bold', 'underscore', 'blink', 'reverse', 'conceal'
"""
if not config_string:
return PALETTES[DEFAULT_PALETTE]
# Split the color configuration into parts
parts = config_string.lower().split(';')
palette = PALETTES[NOCOLOR_PALETTE].copy()
for part in parts:
if part in PALETTES:
# A default palette has been specified
palette.update(PALETTES[part])
elif '=' in part:
# Process a palette defining string
definition = {}
# Break the definition into the role,
# plus the list of specific instructions.
# The role must be in upper case
role, instructions = part.split('=')
role = role.upper()
styles = instructions.split(',')
styles.reverse()
# The first instruction can contain a slash
# to break apart fg/bg.
colors = styles.pop().split('/')
colors.reverse()
fg = colors.pop()
if fg in color_names:
definition['fg'] = fg
if colors and colors[-1] in color_names:
definition['bg'] = colors[-1]
# All remaining instructions are options
opts = tuple(s for s in styles if s in opt_dict.keys())
if opts:
definition['opts'] = opts
# The nocolor palette has all available roles.
# Use that palette as the basis for determining
# if the role is valid.
if role in PALETTES[NOCOLOR_PALETTE] and definition:
palette[role] = definition
# If there are no colors specified, return the empty palette.
if palette == PALETTES[NOCOLOR_PALETTE]:
return None
return palette
|
xiaoxq/apollo | refs/heads/master | modules/tools/dump_gpsbin/dump_gpsbin.py | 3 | #!/usr/bin/env python3
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""
Extract messages of gps topic from data record file,
and save them into specified binary file
Usage:
dump_gpsbin.py --input_file=a.record --output_dir=dir
See the gflags for more optional args.
"""
import os
import sys
import time
import gflags
import glog
from cyber.python.cyber_py3 import cyber
from cyber.python.cyber_py3 import record
from modules.drivers.gnss.proto.gnss_pb2 import RawData
# Requried flags.
gflags.DEFINE_string('input_file', None, 'Input record file path.')
# Optional flags.
gflags.DEFINE_string('output_dir', './', 'Output directory path.')
# Stable flags which rarely change.
gflags.DEFINE_string('gps_raw_data_channel',
'/apollo/sensor/gnss/raw_data',
'gps raw data channel.')
def process_record_file(args):
"""Read record file and extract the message with specified channels"""
freader = record.RecordReader(args.input_file)
glog.info('#processing record file {}'.format(args.input_file))
time.sleep(1)
output_file = os.path.join(args.output_dir, 'gpsimu.bin')
with open(output_file, 'wb') as outfile:
for channel, message, _type, _timestamp in freader.read_messages():
if channel == args.gps_raw_data_channel:
raw_data = RawData()
raw_data.ParseFromString(message)
outfile.write(raw_data.data)
def main():
"""Entry point."""
gflags.FLAGS(sys.argv)
process_record_file(gflags.FLAGS)
return
if __name__ == '__main__':
main()
|
antoinecarme/pyaf | refs/heads/master | tests/model_control/detailed/transf_None/model_control_one_enabled_None_MovingAverage_Seasonal_MonthOfYear_NoAR.py | 1 | import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['None'] , ['MovingAverage'] , ['Seasonal_MonthOfYear'] , ['NoAR'] ); |
rismalrv/edx-platform | refs/heads/master | common/test/acceptance/tests/lms/test_library.py | 92 | # -*- coding: utf-8 -*-
"""
End-to-end tests for LibraryContent block in LMS
"""
import ddt
import textwrap
from nose.plugins.attrib import attr
from ..helpers import UniqueCourseTest, TestWithSearchIndexMixin
from ...pages.studio.auto_auth import AutoAuthPage
from ...pages.studio.overview import CourseOutlinePage
from ...pages.studio.library import StudioLibraryContentEditor, StudioLibraryContainerXBlockWrapper
from ...pages.lms.courseware import CoursewarePage
from ...pages.lms.library import LibraryContentXBlockWrapper
from ...pages.common.logout import LogoutPage
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
from ...fixtures.library import LibraryFixture
SECTION_NAME = 'Test Section'
SUBSECTION_NAME = 'Test Subsection'
UNIT_NAME = 'Test Unit'
@attr('shard_3')
class LibraryContentTestBase(UniqueCourseTest):
""" Base class for library content block tests """
USERNAME = "STUDENT_TESTER"
EMAIL = "student101@example.com"
STAFF_USERNAME = "STAFF_TESTER"
STAFF_EMAIL = "staff101@example.com"
def populate_library_fixture(self, library_fixture):
"""
To be overwritten by subclassed tests. Used to install a library to
run tests on.
"""
def setUp(self):
"""
Set up library, course and library content XBlock
"""
super(LibraryContentTestBase, self).setUp()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.course_outline = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.library_fixture = LibraryFixture('test_org', self.unique_id, 'Test Library {}'.format(self.unique_id))
self.populate_library_fixture(self.library_fixture)
self.library_fixture.install()
self.library_info = self.library_fixture.library_info
self.library_key = self.library_fixture.library_key
# Install a course with library content xblock
self.course_fixture = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
library_content_metadata = {
'source_library_id': unicode(self.library_key),
'mode': 'random',
'max_count': 1,
'has_score': False
}
self.lib_block = XBlockFixtureDesc('library_content', "Library Content", metadata=library_content_metadata)
self.course_fixture.add_children(
XBlockFixtureDesc('chapter', SECTION_NAME).add_children(
XBlockFixtureDesc('sequential', SUBSECTION_NAME).add_children(
XBlockFixtureDesc('vertical', UNIT_NAME).add_children(
self.lib_block
)
)
)
)
self.course_fixture.install()
def _change_library_content_settings(self, count=1, capa_type=None):
"""
Performs library block refresh in Studio, configuring it to show {count} children
"""
unit_page = self._go_to_unit_page(True)
library_container_block = StudioLibraryContainerXBlockWrapper.from_xblock_wrapper(unit_page.xblocks[1])
library_container_block.edit()
editor = StudioLibraryContentEditor(self.browser, library_container_block.locator)
editor.count = count
if capa_type is not None:
editor.capa_type = capa_type
editor.save()
self._go_to_unit_page(change_login=False)
unit_page.wait_for_page()
unit_page.publish_action.click()
unit_page.wait_for_ajax()
self.assertIn("Published and Live", unit_page.publish_title)
@property
def library_xblocks_texts(self):
"""
Gets texts of all xblocks in library
"""
return frozenset(child.data for child in self.library_fixture.children)
def _go_to_unit_page(self, change_login=True):
"""
Open unit page in Studio
"""
if change_login:
LogoutPage(self.browser).visit()
self._auto_auth(self.STAFF_USERNAME, self.STAFF_EMAIL, True)
self.course_outline.visit()
subsection = self.course_outline.section(SECTION_NAME).subsection(SUBSECTION_NAME)
return subsection.expand_subsection().unit(UNIT_NAME).go_to()
def _goto_library_block_page(self, block_id=None):
"""
Open library page in LMS
"""
self.courseware_page.visit()
paragraphs = self.courseware_page.q(css='.course-content p')
if paragraphs and "You were most recently in" in paragraphs.text[0]:
paragraphs[0].find_element_by_tag_name('a').click()
block_id = block_id if block_id is not None else self.lib_block.locator
#pylint: disable=attribute-defined-outside-init
self.library_content_page = LibraryContentXBlockWrapper(self.browser, block_id)
self.library_content_page.wait_for_page()
def _auto_auth(self, username, email, staff):
"""
Logout and login with given credentials.
"""
AutoAuthPage(self.browser, username=username, email=email,
course_id=self.course_id, staff=staff).visit()
@ddt.ddt
@attr('shard_3')
class LibraryContentTest(LibraryContentTestBase):
"""
Test courseware.
"""
def populate_library_fixture(self, library_fixture):
"""
Populates library fixture with XBlock Fixtures
"""
library_fixture.add_children(
XBlockFixtureDesc("html", "Html1", data='html1'),
XBlockFixtureDesc("html", "Html2", data='html2'),
XBlockFixtureDesc("html", "Html3", data='html3'),
)
@ddt.data(1, 2, 3)
def test_shows_random_xblocks_from_configured(self, count):
"""
Scenario: Ensures that library content shows {count} random xblocks from library in LMS
Given I have a library, a course and a LibraryContent block in that course
When I go to studio unit page for library content xblock as staff
And I set library content xblock to display {count} random children
And I refresh library content xblock and pulbish unit
When I go to LMS courseware page for library content xblock as student
Then I can see {count} random xblocks from the library
"""
self._change_library_content_settings(count=count)
self._auto_auth(self.USERNAME, self.EMAIL, False)
self._goto_library_block_page()
children_contents = self.library_content_page.children_contents
self.assertEqual(len(children_contents), count)
self.assertLessEqual(children_contents, self.library_xblocks_texts)
def test_shows_all_if_max_set_to_greater_value(self):
"""
Scenario: Ensures that library content shows {count} random xblocks from library in LMS
Given I have a library, a course and a LibraryContent block in that course
When I go to studio unit page for library content xblock as staff
And I set library content xblock to display more children than library have
And I refresh library content xblock and pulbish unit
When I go to LMS courseware page for library content xblock as student
Then I can see all xblocks from the library
"""
self._change_library_content_settings(count=10)
self._auto_auth(self.USERNAME, self.EMAIL, False)
self._goto_library_block_page()
children_contents = self.library_content_page.children_contents
self.assertEqual(len(children_contents), 3)
self.assertEqual(children_contents, self.library_xblocks_texts)
@ddt.ddt
@attr('shard_3')
class StudioLibraryContainerCapaFilterTest(LibraryContentTestBase, TestWithSearchIndexMixin):
"""
Test Library Content block in LMS
"""
def setUp(self):
""" SetUp method """
self._create_search_index()
super(StudioLibraryContainerCapaFilterTest, self).setUp()
def tearDown(self):
self._cleanup_index_file()
super(StudioLibraryContainerCapaFilterTest, self).tearDown()
def _get_problem_choice_group_text(self, name, items):
""" Generates Choice Group CAPA problem XML """
items_text = "\n".join([
"<choice correct='{correct}'>{item}</choice>".format(correct=correct, item=item)
for item, correct in items
])
return textwrap.dedent("""
<problem>
<p>{name}</p>
<multiplechoiceresponse>
<choicegroup label="{name}" type="MultipleChoice">{items}</choicegroup>
</multiplechoiceresponse>
</problem>""").format(name=name, items=items_text)
def _get_problem_select_text(self, name, items, correct):
""" Generates Select Option CAPA problem XML """
items_text = ",".join(["'{0}'".format(item) for item in items])
return textwrap.dedent("""
<problem>
<p>{name}</p>
<optionresponse>
<optioninput label="{name}" options="({options})" correct="{correct}"></optioninput>
</optionresponse>
</problem>""").format(name=name, options=items_text, correct=correct)
def populate_library_fixture(self, library_fixture):
"""
Populates library fixture with XBlock Fixtures
"""
items = (
XBlockFixtureDesc(
"problem", "Problem Choice Group 1",
data=self._get_problem_choice_group_text("Problem Choice Group 1 Text", [("1", False), ('2', True)])
),
XBlockFixtureDesc(
"problem", "Problem Choice Group 2",
data=self._get_problem_choice_group_text("Problem Choice Group 2 Text", [("Q", True), ('W', False)])
),
XBlockFixtureDesc(
"problem", "Problem Select 1",
data=self._get_problem_select_text("Problem Select 1 Text", ["Option 1", "Option 2"], "Option 1")
),
XBlockFixtureDesc(
"problem", "Problem Select 2",
data=self._get_problem_select_text("Problem Select 2 Text", ["Option 3", "Option 4"], "Option 4")
),
)
library_fixture.add_children(*items)
@property
def _problem_headers(self):
""" Expected XBLock headers according to populate_library_fixture """
return frozenset(child.display_name.upper() for child in self.library_fixture.children)
def _set_library_content_settings(self, count=1, capa_type="Any Type"):
"""
Sets library content XBlock parameters, saves, publishes unit, goes to LMS unit page and
gets children XBlock headers to assert against them
"""
self._change_library_content_settings(count=count, capa_type=capa_type)
self._auto_auth(self.USERNAME, self.EMAIL, False)
self._goto_library_block_page()
return self.library_content_page.children_headers
def test_problem_type_selector(self):
"""
Scenario: Ensure setting "Any Type" for Problem Type does not filter out Problems
Given I have a library with two "Select Option" and two "Choice Group" problems, and a course containing
LibraryContent XBlock configured to draw XBlocks from that library
When I set library content xblock Problem Type to "Any Type" and Count to 3 and publish unit
When I go to LMS courseware page for library content xblock as student
Then I can see 3 xblocks from the library of any type
When I set library content xblock Problem Type to "Choice Group" and Count to 1 and publish unit
When I go to LMS courseware page for library content xblock as student
Then I can see 1 xblock from the library of "Choice Group" type
When I set library content xblock Problem Type to "Select Option" and Count to 2 and publish unit
When I go to LMS courseware page for library content xblock as student
Then I can see 2 xblock from the library of "Select Option" type
When I set library content xblock Problem Type to "Matlab" and Count to 2 and publish unit
When I go to LMS courseware page for library content xblock as student
Then I can see 0 xblocks from the library
"""
children_headers = self._set_library_content_settings(count=3, capa_type="Any Type")
self.assertEqual(len(children_headers), 3)
self.assertLessEqual(children_headers, self._problem_headers)
# Choice group test
children_headers = self._set_library_content_settings(count=1, capa_type="Multiple Choice")
self.assertEqual(len(children_headers), 1)
self.assertLessEqual(
children_headers,
set([header.upper() for header in ["Problem Choice Group 1", "Problem Choice Group 2"]])
)
# Choice group test
children_headers = self._set_library_content_settings(count=2, capa_type="Dropdown")
self.assertEqual(len(children_headers), 2)
self.assertEqual(
children_headers,
set([header.upper() for header in ["Problem Select 1", "Problem Select 2"]])
)
# Missing problem type test
children_headers = self._set_library_content_settings(count=2, capa_type="Custom Evaluated Script")
self.assertEqual(children_headers, set())
|
allenp/odoo | refs/heads/9.0 | addons/hw_posbox_homepage/__init__.py | 502 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import controllers
|
aodarc/tennis_club | refs/heads/master | members/migrations/0001_initial.py | 1 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-20 19:42
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Member',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('avatar', models.ImageField(upload_to='members/avatars/', verbose_name='Аватарка')),
('first_name', models.CharField(max_length=20, verbose_name="Ім'я")),
('last_name', models.CharField(max_length=30, verbose_name='Прізвище')),
('sex', models.CharField(choices=[('m', 'Чоловіча'), ('f', 'Жіноча'), ('o', 'Інша')], default='0', max_length=1, verbose_name='Стать')),
('birthday', models.DateField(verbose_name='Дата народження')),
('height', models.SmallIntegerField(help_text='Зріст вказувати в СМ', validators=[django.core.validators.MaxValueValidator(50), django.core.validators.MinValueValidator(230)], verbose_name='Зріст')),
('weight', models.SmallIntegerField(help_text='Вагу вказувати в КГ', validators=[django.core.validators.MaxValueValidator(150), django.core.validators.MinValueValidator(30)], verbose_name='Вага')),
('is_coach', models.BooleanField(default=False, verbose_name='Тренер')),
('is_boss', models.BooleanField(default=False, verbose_name='Керівник клубу')),
('rating', models.SmallIntegerField(default=0, verbose_name='Рейтинг')),
('games', models.IntegerField(default=0, validators=[django.core.validators.MinValueValidator(0)], verbose_name='Зіграні ігри')),
],
),
]
|
erudit/django-plupload | refs/heads/master | plupload/__init__.py | 12133432 | |
CollabQ/CollabQ | refs/heads/master | .google_appengine/lib/django/tests/regressiontests/httpwrappers/__init__.py | 12133432 | |
sullo/webbies | refs/heads/master | lib/__init__.py | 12133432 | |
dnlcrl/TensorFlow-Playground | refs/heads/master | 2.example-apps/android/__init__.py | 12133432 | |
jagguli/intellij-community | refs/heads/master | python/testData/resolve/multiFile/stackOverflowOnEmptyFile/tornado/options.py | 12133432 | |
KarlTDebiec/md_format_converter | refs/heads/master | Mol2TrajOutput.py | 2 | # -*- coding: utf-8 -*-
# md_format_converter.Mol2TrajOutput.py
#
# Copyright (C) 2012-2016 Karl T Debiec
# All rights reserved.
#
# This software may be modified and distributed under the terms of the
# BSD license. See the LICENSE file for details.
"""
Manages addition of mol2 output information to segments.
"""
################################### MODULES ###################################
from __future__ import absolute_import,division,print_function,unicode_literals
from .TrajOutput import TrajOutput
################################### CLASSES ###################################
class Mol2TrajOutput(TrajOutput):
"""
Manages addition of mol2 output information to segments.
"""
def __init__(self, manual_bonds=False, **kwargs):
"""
Initializes.
Arguments:
manual_bonds (bool): Write bonds to mol2 manually
kwargs (dict): additional keyword arguments
"""
import os
self.manual_bonds = manual_bonds
super(self.__class__, self).__init__(**kwargs)
def receive_segment(self, **kwargs):
"""
Receives a trajectory segment and sends to each target.
Arugments:
kwargs (dict): Additional keyword arguments
"""
import os
while True:
segment = yield
segment_mol2 = "{0}/{1:04d}/{1:04d}{2}.mol2".format(self.outpath,
int(segment.number), self.suffix)
if not os.path.isfile(segment_mol2) or self.force:
segment.outputs.append(
dict(
format = "mol2",
filename = segment_mol2,
selection = self.selection,
first = 0,
last = 0))
if self.manual_bonds:
segment.outputs[-1]["format"] = "mol2_manual_bonds"
for target in self.targets:
target.send(segment)
@staticmethod
def add_subparser(level1_subparser, level2_subparsers, level3_classes):
"""
Adds subparser for this input format to nascent parser.
Arguments:
level1_subparser (Subparser): Level 1 subparser to which level
2 subparser will be added
level2_subparsers (Subparsers): Nascent collection of level 2
subparsers to which level 2 subparser will be added
level3_classes (list): Classes for which level 3 subparsers
will be added
Returns:
(*Subparser*, *Subparsers*): New level 2 subparser and
associated collection of level 3 subparsers
"""
level2_subparser = level2_subparsers.add_parser(
name = "mol2",
usage = "convert.py {0} mol2".format(level1_subparser.name),
help = "mol2 output")
setattr(level2_subparser, "name", "mol2")
level3_subparsers = level2_subparser.add_subparsers(
title = "Converter")
for level3_class in level3_classes:
level3_subparser = level3_class.add_subparser(level1_subparser,
level2_subparser, level3_subparsers)
arg_groups = {ag.title: ag
for ag in level3_subparser._action_groups}
if level3_subparser.name == "vmd":
arg_groups["action"].add_argument("--manual-bonds",
action = "store_true",
dest = "manual_bonds",
help = "Write bonds to mol2 manually; useful for "
"topologies in which atoms are not well-ordered")
Mol2TrajOutput.add_shared_args(level3_subparser)
level3_subparser.set_defaults(output_coroutine=Mol2TrajOutput)
return level2_subparser, level3_subparsers
|
anant-dev/django | refs/heads/master | tests/syndication_tests/tests.py | 90 | from __future__ import unicode_literals
import datetime
from xml.dom import minidom
from django.contrib.sites.models import Site
from django.contrib.syndication import views
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase, override_settings
from django.test.utils import requires_tz_support
from django.utils import timezone
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.feedgenerator import (
Enclosure, SyndicationFeed, rfc2822_date, rfc3339_date,
)
from .models import Article, Entry
try:
import pytz
except ImportError:
pytz = None
TZ = timezone.get_default_timezone()
class FeedTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.e1 = Entry.objects.create(
title='My first entry', updated=datetime.datetime(1980, 1, 1, 12, 30),
published=datetime.datetime(1986, 9, 25, 20, 15, 00)
)
cls.e2 = Entry.objects.create(
title='My second entry', updated=datetime.datetime(2008, 1, 2, 12, 30),
published=datetime.datetime(2006, 3, 17, 18, 0)
)
cls.e3 = Entry.objects.create(
title='My third entry', updated=datetime.datetime(2008, 1, 2, 13, 30),
published=datetime.datetime(2005, 6, 14, 10, 45)
)
cls.e4 = Entry.objects.create(
title='A & B < C > D', updated=datetime.datetime(2008, 1, 3, 13, 30),
published=datetime.datetime(2005, 11, 25, 12, 11, 23)
)
cls.e5 = Entry.objects.create(
title='My last entry', updated=datetime.datetime(2013, 1, 20, 0, 0),
published=datetime.datetime(2013, 3, 25, 20, 0)
)
cls.a1 = Article.objects.create(title='My first article', entry=cls.e1)
def assertChildNodes(self, elem, expected):
actual = set(n.nodeName for n in elem.childNodes)
expected = set(expected)
self.assertEqual(actual, expected)
def assertChildNodeContent(self, elem, expected):
for k, v in expected.items():
self.assertEqual(
elem.getElementsByTagName(k)[0].firstChild.wholeText, v)
def assertCategories(self, elem, expected):
self.assertEqual(
set(i.firstChild.wholeText for i in elem.childNodes if i.nodeName == 'category'),
set(expected)
)
@override_settings(ROOT_URLCONF='syndication_tests.urls')
class SyndicationFeedTest(FeedTestCase):
"""
Tests for the high-level syndication feed framework.
"""
@classmethod
def setUpClass(cls):
super(SyndicationFeedTest, cls).setUpClass()
# This cleanup is necessary because contrib.sites cache
# makes tests interfere with each other, see #11505
Site.objects.clear_cache()
def test_rss2_feed(self):
"""
Test the structure and content of feeds generated by Rss201rev2Feed.
"""
response = self.client.get('/syndication/rss2/')
doc = minidom.parseString(response.content)
# Making sure there's only 1 `rss` element and that the correct
# RSS version was specified.
feed_elem = doc.getElementsByTagName('rss')
self.assertEqual(len(feed_elem), 1)
feed = feed_elem[0]
self.assertEqual(feed.getAttribute('version'), '2.0')
# Making sure there's only one `channel` element w/in the
# `rss` element.
chan_elem = feed.getElementsByTagName('channel')
self.assertEqual(len(chan_elem), 1)
chan = chan_elem[0]
# Find the last build date
d = Entry.objects.latest('published').published
last_build_date = rfc2822_date(timezone.make_aware(d, TZ))
self.assertChildNodes(
chan, [
'title', 'link', 'description', 'language', 'lastBuildDate',
'item', 'atom:link', 'ttl', 'copyright', 'category',
]
)
self.assertChildNodeContent(chan, {
'title': 'My blog',
'description': 'A more thorough description of my blog.',
'link': 'http://example.com/blog/',
'language': 'en',
'lastBuildDate': last_build_date,
'ttl': '600',
'copyright': 'Copyright (c) 2007, Sally Smith',
})
self.assertCategories(chan, ['python', 'django'])
# Ensure the content of the channel is correct
self.assertChildNodeContent(chan, {
'title': 'My blog',
'link': 'http://example.com/blog/',
})
# Check feed_url is passed
self.assertEqual(
chan.getElementsByTagName('atom:link')[0].getAttribute('href'),
'http://example.com/syndication/rss2/'
)
# Find the pubdate of the first feed item
d = Entry.objects.get(pk=1).published
pub_date = rfc2822_date(timezone.make_aware(d, TZ))
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), Entry.objects.count())
self.assertChildNodeContent(items[0], {
'title': 'My first entry',
'description': 'Overridden description: My first entry',
'link': 'http://example.com/blog/1/',
'guid': 'http://example.com/blog/1/',
'pubDate': pub_date,
'author': 'test@example.com (Sally Smith)',
})
self.assertCategories(items[0], ['python', 'testing'])
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description', 'guid', 'category', 'pubDate', 'author'])
# Assert that <guid> does not have any 'isPermaLink' attribute
self.assertIsNone(item.getElementsByTagName(
'guid')[0].attributes.get('isPermaLink'))
def test_rss2_feed_guid_permalink_false(self):
"""
Test if the 'isPermaLink' attribute of <guid> element of an item
in the RSS feed is 'false'.
"""
response = self.client.get(
'/syndication/rss2/guid_ispermalink_false/')
doc = minidom.parseString(response.content)
chan = doc.getElementsByTagName(
'rss')[0].getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
for item in items:
self.assertEqual(
item.getElementsByTagName('guid')[0].attributes.get(
'isPermaLink').value, "false")
def test_rss2_feed_guid_permalink_true(self):
"""
Test if the 'isPermaLink' attribute of <guid> element of an item
in the RSS feed is 'true'.
"""
response = self.client.get(
'/syndication/rss2/guid_ispermalink_true/')
doc = minidom.parseString(response.content)
chan = doc.getElementsByTagName(
'rss')[0].getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
for item in items:
self.assertEqual(
item.getElementsByTagName('guid')[0].attributes.get(
'isPermaLink').value, "true")
def test_rss2_single_enclosure(self):
response = self.client.get('/syndication/rss2/single-enclosure/')
doc = minidom.parseString(response.content)
chan = doc.getElementsByTagName('rss')[0].getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
for item in items:
enclosures = item.getElementsByTagName('enclosure')
self.assertEqual(len(enclosures), 1)
def test_rss2_multiple_enclosures(self):
with self.assertRaisesMessage(ValueError, (
"RSS feed items may only have one enclosure, see "
"http://www.rssboard.org/rss-profile#element-channel-item-enclosure"
)):
self.client.get('/syndication/rss2/multiple-enclosure/')
def test_rss091_feed(self):
"""
Test the structure and content of feeds generated by RssUserland091Feed.
"""
response = self.client.get('/syndication/rss091/')
doc = minidom.parseString(response.content)
# Making sure there's only 1 `rss` element and that the correct
# RSS version was specified.
feed_elem = doc.getElementsByTagName('rss')
self.assertEqual(len(feed_elem), 1)
feed = feed_elem[0]
self.assertEqual(feed.getAttribute('version'), '0.91')
# Making sure there's only one `channel` element w/in the
# `rss` element.
chan_elem = feed.getElementsByTagName('channel')
self.assertEqual(len(chan_elem), 1)
chan = chan_elem[0]
self.assertChildNodes(
chan, [
'title', 'link', 'description', 'language', 'lastBuildDate',
'item', 'atom:link', 'ttl', 'copyright', 'category',
]
)
# Ensure the content of the channel is correct
self.assertChildNodeContent(chan, {
'title': 'My blog',
'link': 'http://example.com/blog/',
})
self.assertCategories(chan, ['python', 'django'])
# Check feed_url is passed
self.assertEqual(
chan.getElementsByTagName('atom:link')[0].getAttribute('href'),
'http://example.com/syndication/rss091/'
)
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), Entry.objects.count())
self.assertChildNodeContent(items[0], {
'title': 'My first entry',
'description': 'Overridden description: My first entry',
'link': 'http://example.com/blog/1/',
})
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description'])
self.assertCategories(item, [])
def test_atom_feed(self):
"""
Test the structure and content of feeds generated by Atom1Feed.
"""
response = self.client.get('/syndication/atom/')
feed = minidom.parseString(response.content).firstChild
self.assertEqual(feed.nodeName, 'feed')
self.assertEqual(feed.getAttribute('xmlns'), 'http://www.w3.org/2005/Atom')
self.assertChildNodes(
feed,
['title', 'subtitle', 'link', 'id', 'updated', 'entry', 'rights', 'category', 'author']
)
for link in feed.getElementsByTagName('link'):
if link.getAttribute('rel') == 'self':
self.assertEqual(link.getAttribute('href'), 'http://example.com/syndication/atom/')
entries = feed.getElementsByTagName('entry')
self.assertEqual(len(entries), Entry.objects.count())
for entry in entries:
self.assertChildNodes(entry, [
'title',
'link',
'id',
'summary',
'category',
'updated',
'published',
'rights',
'author',
])
summary = entry.getElementsByTagName('summary')[0]
self.assertEqual(summary.getAttribute('type'), 'html')
def test_atom_feed_published_and_updated_elements(self):
"""
Test that the published and updated elements are not
the same and now adhere to RFC 4287.
"""
response = self.client.get('/syndication/atom/')
feed = minidom.parseString(response.content).firstChild
entries = feed.getElementsByTagName('entry')
published = entries[0].getElementsByTagName('published')[0].firstChild.wholeText
updated = entries[0].getElementsByTagName('updated')[0].firstChild.wholeText
self.assertNotEqual(published, updated)
def test_atom_single_enclosure(self):
response = self.client.get('/syndication/atom/single-enclosure/')
feed = minidom.parseString(response.content).firstChild
items = feed.getElementsByTagName('entry')
for item in items:
links = item.getElementsByTagName('link')
links = [link for link in links if link.getAttribute('rel') == 'enclosure']
self.assertEqual(len(links), 1)
def test_atom_multiple_enclosures(self):
response = self.client.get('/syndication/atom/multiple-enclosure/')
feed = minidom.parseString(response.content).firstChild
items = feed.getElementsByTagName('entry')
for item in items:
links = item.getElementsByTagName('link')
links = [link for link in links if link.getAttribute('rel') == 'enclosure']
self.assertEqual(len(links), 2)
def test_latest_post_date(self):
"""
Test that both the published and updated dates are
considered when determining the latest post date.
"""
# this feed has a `published` element with the latest date
response = self.client.get('/syndication/atom/')
feed = minidom.parseString(response.content).firstChild
updated = feed.getElementsByTagName('updated')[0].firstChild.wholeText
d = Entry.objects.latest('published').published
latest_published = rfc3339_date(timezone.make_aware(d, TZ))
self.assertEqual(updated, latest_published)
# this feed has an `updated` element with the latest date
response = self.client.get('/syndication/latest/')
feed = minidom.parseString(response.content).firstChild
updated = feed.getElementsByTagName('updated')[0].firstChild.wholeText
d = Entry.objects.exclude(pk=5).latest('updated').updated
latest_updated = rfc3339_date(timezone.make_aware(d, TZ))
self.assertEqual(updated, latest_updated)
def test_custom_feed_generator(self):
response = self.client.get('/syndication/custom/')
feed = minidom.parseString(response.content).firstChild
self.assertEqual(feed.nodeName, 'feed')
self.assertEqual(feed.getAttribute('django'), 'rocks')
self.assertChildNodes(
feed,
['title', 'subtitle', 'link', 'id', 'updated', 'entry', 'spam', 'rights', 'category', 'author']
)
entries = feed.getElementsByTagName('entry')
self.assertEqual(len(entries), Entry.objects.count())
for entry in entries:
self.assertEqual(entry.getAttribute('bacon'), 'yum')
self.assertChildNodes(entry, [
'title',
'link',
'id',
'summary',
'ministry',
'rights',
'author',
'updated',
'published',
'category',
])
summary = entry.getElementsByTagName('summary')[0]
self.assertEqual(summary.getAttribute('type'), 'html')
def test_title_escaping(self):
"""
Tests that titles are escaped correctly in RSS feeds.
"""
response = self.client.get('/syndication/rss2/')
doc = minidom.parseString(response.content)
for item in doc.getElementsByTagName('item'):
link = item.getElementsByTagName('link')[0]
if link.firstChild.wholeText == 'http://example.com/blog/4/':
title = item.getElementsByTagName('title')[0]
self.assertEqual(title.firstChild.wholeText, 'A & B < C > D')
def test_naive_datetime_conversion(self):
"""
Test that datetimes are correctly converted to the local time zone.
"""
# Naive date times passed in get converted to the local time zone, so
# check the received zone offset against the local offset.
response = self.client.get('/syndication/naive-dates/')
doc = minidom.parseString(response.content)
updated = doc.getElementsByTagName('updated')[0].firstChild.wholeText
d = Entry.objects.latest('published').published
latest = rfc3339_date(timezone.make_aware(d, TZ))
self.assertEqual(updated, latest)
def test_aware_datetime_conversion(self):
"""
Test that datetimes with timezones don't get trodden on.
"""
response = self.client.get('/syndication/aware-dates/')
doc = minidom.parseString(response.content)
published = doc.getElementsByTagName('published')[0].firstChild.wholeText
self.assertEqual(published[-6:], '+00:42')
@requires_tz_support
def test_feed_last_modified_time_naive_date(self):
"""
Tests the Last-Modified header with naive publication dates.
"""
response = self.client.get('/syndication/naive-dates/')
self.assertEqual(response['Last-Modified'], 'Tue, 26 Mar 2013 01:00:00 GMT')
def test_feed_last_modified_time(self):
"""
Tests the Last-Modified header with aware publication dates.
"""
response = self.client.get('/syndication/aware-dates/')
self.assertEqual(response['Last-Modified'], 'Mon, 25 Mar 2013 19:18:00 GMT')
# No last-modified when feed has no item_pubdate
response = self.client.get('/syndication/no_pubdate/')
self.assertFalse(response.has_header('Last-Modified'))
def test_feed_url(self):
"""
Test that the feed_url can be overridden.
"""
response = self.client.get('/syndication/feedurl/')
doc = minidom.parseString(response.content)
for link in doc.getElementsByTagName('link'):
if link.getAttribute('rel') == 'self':
self.assertEqual(link.getAttribute('href'), 'http://example.com/customfeedurl/')
def test_secure_urls(self):
"""
Test URLs are prefixed with https:// when feed is requested over HTTPS.
"""
response = self.client.get('/syndication/rss2/', **{
'wsgi.url_scheme': 'https',
})
doc = minidom.parseString(response.content)
chan = doc.getElementsByTagName('channel')[0]
self.assertEqual(
chan.getElementsByTagName('link')[0].firstChild.wholeText[0:5],
'https'
)
atom_link = chan.getElementsByTagName('atom:link')[0]
self.assertEqual(atom_link.getAttribute('href')[0:5], 'https')
for link in doc.getElementsByTagName('link'):
if link.getAttribute('rel') == 'self':
self.assertEqual(link.getAttribute('href')[0:5], 'https')
def test_item_link_error(self):
"""
Test that an ImproperlyConfigured is raised if no link could be found
for the item(s).
"""
self.assertRaises(ImproperlyConfigured,
self.client.get,
'/syndication/articles/')
def test_template_feed(self):
"""
Test that the item title and description can be overridden with
templates.
"""
response = self.client.get('/syndication/template/')
doc = minidom.parseString(response.content)
feed = doc.getElementsByTagName('rss')[0]
chan = feed.getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
self.assertChildNodeContent(items[0], {
'title': 'Title in your templates: My first entry\n',
'description': 'Description in your templates: My first entry\n',
'link': 'http://example.com/blog/1/',
})
def test_template_context_feed(self):
"""
Test that custom context data can be passed to templates for title
and description.
"""
response = self.client.get('/syndication/template_context/')
doc = minidom.parseString(response.content)
feed = doc.getElementsByTagName('rss')[0]
chan = feed.getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
self.assertChildNodeContent(items[0], {
'title': 'My first entry (foo is bar)\n',
'description': 'My first entry (foo is bar)\n',
})
def test_add_domain(self):
"""
Test add_domain() prefixes domains onto the correct URLs.
"""
self.assertEqual(
views.add_domain('example.com', '/foo/?arg=value'),
'http://example.com/foo/?arg=value'
)
self.assertEqual(
views.add_domain('example.com', '/foo/?arg=value', True),
'https://example.com/foo/?arg=value'
)
self.assertEqual(
views.add_domain('example.com', 'http://djangoproject.com/doc/'),
'http://djangoproject.com/doc/'
)
self.assertEqual(
views.add_domain('example.com', 'https://djangoproject.com/doc/'),
'https://djangoproject.com/doc/'
)
self.assertEqual(
views.add_domain('example.com', 'mailto:uhoh@djangoproject.com'),
'mailto:uhoh@djangoproject.com'
)
self.assertEqual(
views.add_domain('example.com', '//example.com/foo/?arg=value'),
'http://example.com/foo/?arg=value'
)
class FeedgeneratorTestCase(TestCase):
def test_add_item_warns_when_enclosure_kwarg_is_used(self):
feed = SyndicationFeed(title='Example', link='http://example.com', description='Foo')
with self.assertRaisesMessage(RemovedInDjango20Warning, (
'The enclosure keyword argument is deprecated, use enclosures instead.'
)):
feed.add_item(
title='Example Item',
link='https://example.com/item',
description='bar',
enclosure=Enclosure('http://example.com/favicon.ico', 0, 'image/png'),
)
|
asimonia/pricing-alerts | refs/heads/master | src/common/__init__.py | 12133432 | |
UQ-UQx/edx-platform_lti | refs/heads/master | cms/djangoapps/contentstore/__init__.py | 12133432 | |
kawasaki2013/python-for-android-x86 | refs/heads/master | python3-alpha/extra_modules/gdata/apps/organization/__init__.py | 12133432 | |
andreaso/ansible | refs/heads/devel | lib/ansible/modules/network/iosxr/__init__.py | 12133432 | |
zhoulingjun/django | refs/heads/master | django/core/__init__.py | 12133432 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.