repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
jjhelmus/scipy
|
refs/heads/master
|
scipy/_lib/setup.py
|
15
|
#!/usr/bin/env python
from __future__ import division, print_function, absolute_import
import os
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('_lib', parent_package, top_path)
config.add_data_files('tests/*.py')
include_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'src'))
depends = [os.path.join(include_dir, 'ccallback.h')]
config.add_extension("_ccallback_c",
sources=["_ccallback_c.c"],
depends=depends,
include_dirs=[include_dir])
config.add_extension("_test_ccallback",
sources=["src/_test_ccallback.c"],
depends=depends,
include_dirs=[include_dir])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
WZQ1397/automatic-repo
|
refs/heads/master
|
project/urlfetchANDcolor/zachcrwallib.py
|
1
|
import gzip
def defeatblock(i):
headers=['Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20130406 Firefox/23.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:18.0) Gecko/20100101 Firefox/18.0',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533+ \(KHTML, like Gecko) Element Browser 5.0',
'IBM WebExplorer /v0.94',
'Galaxy/1.0 [en] (Mac OS X 10.5.6; U; en)',
'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)',
'Opera/9.80 (Windows NT 6.0) Presto/2.12.388 Version/12.14',
'Mozilla/5.0 (iPad; CPU OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) \Version/6.0 Mobile/10A5355d Safari/8536.25',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) \Chrome/28.0.1468.0 Safari/537.36',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0; Trident/5.0; TheWorld)',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.24 (KHTML, like Gecko) Chrome/11.0.696.60 Safari/534.24',
'Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.3; .NET4.0C; .NET4.0E; SE 2.X MetaSr 1.0)']
return headers[i]
def ungzip(data,url):
try:
print(url,"正在解压缩...")
data = gzip.decompress(data)
print(url,"解压完毕...")
except:
print(url,"未经压缩,无需解压...")
return data
def color(type):
COLOR = {}
COLOR['COLOR_GREEN'] = "\033[01;32m"
COLOR['COLOR_RED'] = "\033[01;31m"
COLOR['COLOR_YELLOW'] = "\033[01;33m"
COLOR['COLOR_NONE'] = "\033[m"
return COLOR[type]
def judgepathsurfix(path):
if path == "":
if platform.system() == "Windows":
path = os.getcwd()+"\\"
else:
path = os.getcwd()+"/"
return path
def judgefile_whether_exist(filepath):
crush = 1 if os.path.exists(filepath) else print("NOEXIST! START!")
print(filepath)
if crush:
choice = str(input(COLOR_RED+"DO YOU WANT TO DELETE FILE?[y/N]"+COLOR_NONE+":\n"))
if choice.lower() == 'y' or choice.lower() == 'yes':
os.remove(filepath)
|
draklaw/lair
|
refs/heads/master
|
bin/tmx_to_ldl.py
|
1
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2018 Simon Boyé
#
# This file is part of lair.
#
# lair is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# lair is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with lair. If not, see <http://www.gnu.org/licenses/>.
#
from sys import argv, stderr
from math import cos, radians, sin
from collections import OrderedDict
from pathlib import Path, PurePath
import re
from pprint import pprint
from tiled import (
Color as TiledColor,
Loader, Map, Layer, TileLayer, ObjectLayer, GroupLayer, Object,
)
from ldl import LdlWriter, TypedList
from lair import ABox, OBox, Sampler, Texture, Transform, Vector
_vectorize_re = re.compile(r'^(.*)_([xyzw])$')
_vectorize_index = { 'x': 0, 'y': 1, 'z': 2, 'w': 3 }
def vectorize(properties):
"""
Transfoms properties XXX_x, XXX_y, ... into a vector property XXX.
Tile does not support vector properties (yet), so this is a workaround.
"""
d = OrderedDict()
for k, v in sorted(properties.items(), key=lambda t: t[0]):
match = _vectorize_re.match(k)
if match:
vec = d.setdefault(match.group(1), Vector())
i = _vectorize_index[match.group(2)]
while len(vec.coeffs) < i + 1:
vec.coeffs.append(0)
vec.coeffs[i] = v
else:
d[k] = v
return d
def rotate(angle, x, y):
"""Rotate the vector x, y by the angle alpha (in radians)."""
c = cos(angle)
s = sin(angle)
return x * c + y * s, x * -s + y * c
_align_attr = { 'x': 'halign', 'y': 'valign' }
_align_anchor = {
'left': 0, 'center': .5, 'right': 1,
'bottom': 0, 'top': 1
}
def get_anchor(obj, axis, default = None):
"""Find the anchor point of an object for a given axis. Anchor points can
be set at different places."""
assert axis == 'x' or axis == 'y'
prop = 'anchor_' + axis
anchor = obj.properties.get(prop)
if anchor is None:
anchor = obj.properties.get('sprite.' + prop)
if anchor is None and getattr(obj, 'text', None):
anchor = _align_anchor[getattr(obj.text, _align_attr[axis])]
if anchor is None:
anchor = default
return anchor
class TiledMapConverter:
"""Convert a Tiled map into a ldl file supported by Lair."""
def __init__(self, map_, filename, target_dir, sampler, font, loader):
self._map = map_
self._filename = filename
self._target_dir = target_dir
self._sampler = sampler
self._font = font
self._loader = loader
self._height = 0
self._tile_width = 0
self._tile_height = 0
self._tile_layers = []
self._fetch_tile_layers(self._map.layers)
def _fetch_tile_layers(self, layers):
for layer in layers:
if isinstance(layer, TileLayer):
self._tile_layers.append(layer)
elif isinstance(layer, GroupLayer):
self._fetch_tile_layers(layer.children)
def convert(self):
return self.map(self._map)
def map(self, map_):
d = OrderedDict()
self._height = map_.height * map_.tileheight
self._tile_width = map_.tilewidth
self._tile_height = map_.tileheight
d['width'] = map_.width
d['height'] = map_.height
d['properties'] = self.properties(map_.properties)
d['tilesets'] = list(map(self.tile_set, map_.tilesets))
d['tile_layers'] = list(map(self.tile_layer, self._tile_layers))
d['objects'] = list(filter(bool, map(self.object, map_.layers)))
return d
def tile_set(self, tileset):
d = OrderedDict()
d['h_tiles'] = tileset.columns
d['v_tiles'] = tileset.tilecount // max(1, tileset.columns)
d['image'] = self.path(tileset.image.source)
return d
def tile_layer(self, tile_layer):
d = OrderedDict()
tile_layer.convert_chunks_to_tiles()
d['offset'] = Vector(tile_layer.tile_offset_x,
tile_layer.height + tile_layer.tile_offset_y)
d['size'] = Vector(tile_layer.width, tile_layer.height)
d['tile_size'] = Vector(self._tile_width, self._tile_height)
d['tiles'] = TypedList(None, tile_layer.tiles).inline()
return d
def object(self, object):
d = OrderedDict()
# Test if the object is in fact a layer
if isinstance(object, TileLayer):
type_ = 'tile_layer'
elif isinstance(object, ObjectLayer) or isinstance(object, GroupLayer):
type_ = 'group'
elif isinstance(object, Object):
type_ = object.type
else:
print("Warning: unsupported object type: {}".format(type(layer)), file = stderr)
return None
properties = self.properties(object.properties)
template = None
if getattr(object, 'template', None):
template = self._loader.load_template(object.template)
text_object = None
if hasattr(object, 'text'):
text_object = object.text
gid = getattr(object, 'gid', None)
if gid is None and template:
gid = getattr(template.object, 'gid', None)
# Get useful properties
if isinstance(object, Object):
anchor_x = get_anchor(object, 'x')
if anchor_x is None and template:
anchor_x = get_anchor(template.object, 'x')
if anchor_x is None:
anchor_x = 0.5
anchor_y = get_anchor(object, 'y')
if anchor_y is None and template:
anchor_y = get_anchor(template.object, 'y')
if anchor_y is None:
anchor_y = 0.5
base_x = getattr(object, 'x', 0.0)
base_y = getattr(object, 'y', 0.0)
width = getattr(object, 'width', None)
if width is None and template:
width = template.object.width
height = getattr(object, 'height', None)
if height is None and template:
height = template.object.height
else:
anchor_x = 0
anchor_y = 0
base_x = getattr(object, 'offsetx', 0.0)
base_y = getattr(object, 'offsety', 0.0)
width = 0 #self._width
height = self._height
z = properties.get('z', 0.0)
rotation = -getattr(object, 'rotation', 0.0) # Rotation is inverted
# For some reason, tile objects origin is bottom-left instead of top-left
if gid is None:
# Move base coordinate bottom-left
base_y += height
# Tiled origin is top left of the map, Lair is bottom left
x, y = rotate(-radians(rotation), anchor_x * width, anchor_y * height)
x += base_x
y += self._height - base_y
# Set base properties
if getattr(object, 'template', None) is not None:
d['model'] = object.template.stem
if type_ is not None:
d['type'] = type_
d['name'] = object.name
d['enabled'] = properties.get('enabled', True)
d['transform'] = Transform([x, y, z], rotation)
# Group properties in vectors
properties = vectorize(properties)
# Separate properties by components
for k, v in properties.items():
comp_prop = k.split('.', 2)
if len(comp_prop) < 2:
comp_prop = [ 'properties', comp_prop[0] ]
d.setdefault(comp_prop[0], OrderedDict())[comp_prop[1]] = v
# Set sprite properties if object is a sprite object.
sprite_enabled = properties.get('sprite.enabled', True)
if getattr(object, 'gid', 0) and sprite_enabled:
tileset, tileindex = self._map.tiles[object.gid]
sprite = d.setdefault('sprite', OrderedDict())
sprite['texture'] = Texture(self.path(tileset.image.source), self._sampler)
sprite['tile_grid'] = Vector(
tileset.columns,
tileset.tilecount // tileset.columns,
)
sprite['tile_index'] = tileindex
# Add default sprite parameters
if 'sprite' in d:
sprite = d['sprite']
# Convert filename to Texture object
texture = sprite.get('texture')
if isinstance(texture, str):
sprite['texture'] = Texture(texture, self._sampler)
sprite.setdefault('anchor', Vector(anchor_x, anchor_y))
# Set collisions properties
if 'collision' in d:
collision = d['collision']
if 'shape' not in collision:
collision['shape'] = OBox(
Vector(
object.width * (0.5 - anchor_x),
object.height * (0.5 - anchor_y),
),
Vector(
object.width,
object.height,
)
)
# Support text objects
if text_object:
text = d.setdefault('text', OrderedDict())
text.setdefault('font', self._font)
text['text'] = text_object.text
if text_object.wrap:
text['size'] = Vector(width, height)
text['color'] = self.property(text_object.color)
text['anchor'] = Vector(
_align_anchor[text_object.halign],
_align_anchor[text_object.valign],
)
# Tile map
if isinstance(object, TileLayer):
tile_layer = d.setdefault('tile_layer', OrderedDict())
tile_layer['tile_map'] = self.path(self._filename)
tile_layer['layer_index'] = self._tile_layers.index(object)
if isinstance(object, GroupLayer):
d['children'] = list(map(self.object, object.children))
elif isinstance(object, ObjectLayer):
d['children'] = list(map(self.object, object.children))
return d
def properties(self, properties):
if not isinstance(properties, dict):
properties = properties_as_dict(properties)
d = OrderedDict()
for k, v in properties.items():
d[k] = self.property(v)
return d
def property(self, property):
if isinstance(property, TiledColor):
color = list(map(lambda c: c / 255, property.color))
return Vector(color)
elif isinstance(property, PurePath):
return self.path(property)
return property
def path(self, path):
path = Path(path).resolve()
target = Path(self._target_dir).resolve()
return str(path.relative_to(target))
def usage(ret=1):
stderr.write("""Usage: {} <tmx-input> [<ldl-output])
Convert a tmx file into a ldl file compatible with Lair.
""".format(argv[0]))
exit(ret)
def main(argv):
in_filename = None
out_dir = None
out_filename = None
sampler = 'bilinear_no_mipmap|clamp'
font = 'droid_sans_24.json'
arg_it = iter(argv[1:])
for arg in arg_it:
if arg[0] == '-':
if arg == '-d':
out_dir = PurePath(next(arg_it))
elif arg == '-f':
font = next(arg_it)
elif arg == '-s':
sampler = next(arg_it)
else:
print("Unknown option {}.".format(arg), file=stderr)
usage()
elif in_filename is None:
in_filename = PurePath(arg)
elif out_filename is None:
out_filename = PurePath(arg)
else:
usage()
if in_filename is None:
usage()
if out_filename is None:
out_filename = in_filename.with_suffix('.ldl')
if out_dir:
out_filename = out_dir / out_filename
if out_dir is None:
out_dir = out_filename.parent
loader = Loader(in_filename)
tilemap = loader.load_map(in_filename)
with open(str(out_filename), 'w') as out_file:
out = LdlWriter(out_file)
sampler = Sampler(sampler)
converter = TiledMapConverter(tilemap, out_filename, out_dir, sampler, font, loader)
tile_map_as_dict = converter.convert()
out.write(tile_map_as_dict)
if __name__ == '__main__':
main(argv)
|
Isendir/brython
|
refs/heads/master
|
www/src/Lib/test/badsyntax_pep3120.py
|
181
|
print("böse")
|
wilebeast/FireFox-OS
|
refs/heads/master
|
B2G/gecko/python/mach/mach/commands/settings.py
|
2
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import print_function, unicode_literals
from textwrap import TextWrapper
from mozbuild.base import MozbuildObject
from mach.base import CommandProvider
from mach.base import Command
@CommandProvider
class Settings(MozbuildObject):
"""Interact with settings for mach.
Currently, we only provide functionality to view what settings are
available. In the future, this module will be used to modify settings, help
people create configs via a wizard, etc.
"""
@Command('settings-list', help='Show available config settings.')
def list_settings(self):
"""List available settings in a concise list."""
for section in sorted(self.settings):
for option in sorted(self.settings[section]):
short, full = self.settings.option_help(section, option)
print('%s.%s -- %s' % (section, option, short))
@Command('settings-create',
help='Print a new settings file with usage info.')
def create(self):
"""Create an empty settings file with full documentation."""
wrapper = TextWrapper(initial_indent='# ', subsequent_indent='# ')
for section in sorted(self.settings):
print('[%s]' % section)
print('')
for option in sorted(self.settings[section]):
short, full = self.settings.option_help(section, option)
print(wrapper.fill(full))
print(';%s =' % option)
print('')
|
Roy1993sun/shadowsocks
|
refs/heads/master
|
tests/graceful_cli.py
|
977
|
#!/usr/bin/python
import socks
import time
SERVER_IP = '127.0.0.1'
SERVER_PORT = 8001
if __name__ == '__main__':
s = socks.socksocket()
s.set_proxy(socks.SOCKS5, SERVER_IP, 1081)
s.connect((SERVER_IP, SERVER_PORT))
s.send(b'test')
time.sleep(30)
s.close()
|
DirtyUnicorns/android_external_chromium_org
|
refs/heads/lollipop
|
third_party/tlslite/tlslite/integration/pop3_tls.py
|
115
|
# Author: Trevor Perrin
# See the LICENSE file for legal information regarding use of this file.
"""TLS Lite + poplib."""
import socket
from poplib import POP3, POP3_SSL_PORT
from tlslite.tlsconnection import TLSConnection
from tlslite.integration.clienthelper import ClientHelper
class POP3_TLS(POP3, ClientHelper):
"""This class extends L{poplib.POP3} with TLS support."""
def __init__(self, host, port = POP3_SSL_PORT,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
username=None, password=None,
certChain=None, privateKey=None,
checker=None,
settings=None):
"""Create a new POP3_TLS.
For client authentication, use one of these argument
combinations:
- username, password (SRP)
- certChain, privateKey (certificate)
For server authentication, you can either rely on the
implicit mutual authentication performed by SRP or
you can do certificate-based server
authentication with one of these argument combinations:
- x509Fingerprint
Certificate-based server authentication is compatible with
SRP or certificate-based client authentication.
The caller should be prepared to handle TLS-specific
exceptions. See the client handshake functions in
L{tlslite.TLSConnection.TLSConnection} for details on which
exceptions might be raised.
@type host: str
@param host: Server to connect to.
@type port: int
@param port: Port to connect to.
@type username: str
@param username: SRP username.
@type password: str
@param password: SRP password for mutual authentication.
Requires the 'username' argument.
@type certChain: L{tlslite.x509certchain.X509CertChain}
@param certChain: Certificate chain for client authentication.
Requires the 'privateKey' argument. Excludes the SRP argument.
@type privateKey: L{tlslite.utils.rsakey.RSAKey}
@param privateKey: Private key for client authentication.
Requires the 'certChain' argument. Excludes the SRP argument.
@type checker: L{tlslite.checker.Checker}
@param checker: Callable object called after handshaking to
evaluate the connection and raise an Exception if necessary.
@type settings: L{tlslite.handshakesettings.HandshakeSettings}
@param settings: Various settings which can be used to control
the ciphersuites, certificate types, and SSL/TLS versions
offered by the client.
"""
self.host = host
self.port = port
sock = socket.create_connection((host, port), timeout)
ClientHelper.__init__(self,
username, password,
certChain, privateKey,
checker,
settings)
connection = TLSConnection(sock)
ClientHelper._handshake(self, connection)
self.sock = connection
self.file = self.sock.makefile('rb')
self._debugging = 0
self.welcome = self._getresp()
|
abramovpav/abraworld_backend
|
refs/heads/master
|
music_library/admin.py
|
1
|
from django.contrib import admin
# Register your models here.
from music_library.models import Album, Track
admin.site.register(Album)
admin.site.register(Track)
|
chenkovsky/recpy
|
refs/heads/master
|
test.py
|
1
|
__author__ = 'chenkovsky'
import pandas as pd
import numpy as np
from . import knn
from sklearn.neighbors import KDTree
class TestRecommender:
def setUp(self):
data = {1: {1: 3.0, 2: 4.0, 3: 3.5, 4: 5.0, 5: 3.0},
2: {1: 3.0, 2: 4.0, 3: 2.0, 4: 3.0, 5: 3.0, 6: 2.0},
3: {2: 3.5, 3: 2.5, 4: 4.0, 5: 4.5, 6: 3.0},
4: {1: 2.5, 2: 3.5, 3: 2.5, 4: 3.5, 5: 3.0, 6: 3.0},
5: {2: 4.5, 3: 1.0, 4: 4.0},
6: {1: 3.0, 2: 3.5, 3: 3.5, 4: 5.0, 5: 3.0, 6: 1.5},
7: {1: 2.5, 2: 3.0, 4: 3.5, 5: 4.0}}
df =pd.DataFrame(data)
m = np.matrix(df)
m = m.transpose()
self.matrix = np.nan_to_num(m)
def testUserBasedKNNRecommender(self):
rec = knn.UserBasedKNNRecommender(self.matrix)
assert(rec.recommend(4)[0] == [4,0,5])
def testUserBasedKNNRecommenderLazy(self):
rec = knn.UserBasedKNNRecommender(self.matrix,lazy = True)
assert(rec.recommend(4)[0] == [4,0,5])
def testUserBasedKNNRecommenderKDTree(self):
rec = knn.UserBasedKNNRecommender(self.matrix,lazy = True, kdt = KDTree(self.matrix, metric= 'euclidean'))
assert(rec.recommend(4)[0] == [4,0,5])
def testItemBasedKNNRecommender(self):
rec = knn.ItemBasedKNNRecommender(self.matrix)
assert(rec.recommend(4)[0] == [4,0,5])
def testItemBasedKNNRecommenderLazy(self):
rec = knn.ItemBasedKNNRecommender(self.matrix, lazy = True)
assert(rec.recommend(4)[0] == [4,0,5])
|
markus-oberhumer/gcc
|
refs/heads/master
|
gcc/testsuite/g++.dg/gcov/gcov.py
|
2
|
import gzip
import json
import os
def gcov_from_env():
# return parsed JSON content a GCOV_PATH file
json_filename = os.environ['GCOV_PATH'] + '.gcov.json.gz'
json_data = gzip.open(json_filename).read()
return json.loads(json_data)
|
AutorestCI/azure-sdk-for-python
|
refs/heads/master
|
azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/account/models/capability_information.py
|
1
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CapabilityInformation(Model):
"""Subscription-level properties and limits for Data Lake Analytics.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar subscription_id: the subscription credentials that uniquely
identifies the subscription.
:vartype subscription_id: str
:ivar state: the subscription state. Possible values include:
'Registered', 'Suspended', 'Deleted', 'Unregistered', 'Warned'
:vartype state: str or
~azure.mgmt.datalake.analytics.account.models.SubscriptionState
:ivar max_account_count: the maximum supported number of accounts under
this subscription.
:vartype max_account_count: int
:ivar account_count: the current number of accounts under this
subscription.
:vartype account_count: int
:ivar migration_state: the Boolean value of true or false to indicate the
maintenance state.
:vartype migration_state: bool
"""
_validation = {
'subscription_id': {'readonly': True},
'state': {'readonly': True},
'max_account_count': {'readonly': True},
'account_count': {'readonly': True},
'migration_state': {'readonly': True},
}
_attribute_map = {
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
'max_account_count': {'key': 'maxAccountCount', 'type': 'int'},
'account_count': {'key': 'accountCount', 'type': 'int'},
'migration_state': {'key': 'migrationState', 'type': 'bool'},
}
def __init__(self):
super(CapabilityInformation, self).__init__()
self.subscription_id = None
self.state = None
self.max_account_count = None
self.account_count = None
self.migration_state = None
|
jessicalucci/NovaOrc
|
refs/heads/master
|
nova/cells/__init__.py
|
28
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cells
"""
|
ayoubg/gem5-graphics
|
refs/heads/master
|
gem5/util/minorview/__init__.py
|
55
|
# Copyright (c) 2013 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andrew Bardsley
|
treejames/erpnext
|
refs/heads/develop
|
erpnext/accounts/doctype/shipping_rule/test_shipping_rule.py
|
97
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import unittest
from erpnext.accounts.doctype.shipping_rule.shipping_rule import FromGreaterThanToError, ManyBlankToValuesError, OverlappingConditionError
test_records = frappe.get_test_records('Shipping Rule')
class TestShippingRule(unittest.TestCase):
def test_from_greater_than_to(self):
shipping_rule = frappe.copy_doc(test_records[0])
shipping_rule.name = test_records[0].get('name')
shipping_rule.get("conditions")[0].from_value = 101
self.assertRaises(FromGreaterThanToError, shipping_rule.insert)
def test_many_zero_to_values(self):
shipping_rule = frappe.copy_doc(test_records[0])
shipping_rule.name = test_records[0].get('name')
shipping_rule.get("conditions")[0].to_value = 0
self.assertRaises(ManyBlankToValuesError, shipping_rule.insert)
def test_overlapping_conditions(self):
for range_a, range_b in [
((50, 150), (0, 100)),
((50, 150), (100, 200)),
((50, 150), (75, 125)),
((50, 150), (25, 175)),
((50, 150), (50, 150)),
]:
shipping_rule = frappe.copy_doc(test_records[0])
shipping_rule.name = test_records[0].get('name')
shipping_rule.get("conditions")[0].from_value = range_a[0]
shipping_rule.get("conditions")[0].to_value = range_a[1]
shipping_rule.get("conditions")[1].from_value = range_b[0]
shipping_rule.get("conditions")[1].to_value = range_b[1]
self.assertRaises(OverlappingConditionError, shipping_rule.insert)
|
Doctor-Andonuts/taskwarriorandroid
|
refs/heads/master
|
cli/test/basetest/__init__.py
|
3
|
# -*- coding: utf-8 -*-
from .task import Task
from .taskd import Taskd
from .testing import TestCase, ServerTestCase
# flake8:noqa
# vim: ai sts=4 et sw=4
|
endlessm/chromium-browser
|
refs/heads/master
|
third_party/catapult/telemetry/third_party/modulegraph/modulegraph_tests/testdata/nspkg/src/child/namedpkg/slave.py
|
92
|
""" slave packages """
import os
|
elba7r/builder
|
refs/heads/master
|
frappe/core/doctype/report/__init__.py
|
1829
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
|
bdh1011/wau
|
refs/heads/master
|
venv/lib/python2.7/site-packages/nbconvert/preprocessors/convertfigures.py
|
4
|
"""Module containing a preprocessor that converts outputs in the notebook from
one format to another.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from .base import Preprocessor
from traitlets import Unicode
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class ConvertFiguresPreprocessor(Preprocessor):
"""
Converts all of the outputs in a notebook from one format to another.
"""
from_format = Unicode(config=True, help='Format the converter accepts')
to_format = Unicode(config=True, help='Format the converter writes')
def __init__(self, **kw):
"""
Public constructor
"""
super(ConvertFiguresPreprocessor, self).__init__(**kw)
def convert_figure(self, data_format, data):
raise NotImplementedError()
def preprocess_cell(self, cell, resources, cell_index):
"""
Apply a transformation on each cell,
See base.py
"""
# Loop through all of the datatypes of the outputs in the cell.
for output in cell.get('outputs', []):
if output.output_type in {'execute_result', 'display_data'} \
and self.from_format in output.data \
and self.to_format not in output.data:
output.data[self.to_format] = self.convert_figure(
self.from_format, output.data[self.from_format])
return cell, resources
|
40123151ChengYu/2015cd_midterm2
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/unittest/test/testmock/testhelpers.py
|
737
|
import unittest
from unittest.mock import (
call, _Call, create_autospec, MagicMock,
Mock, ANY, _CallList, patch, PropertyMock
)
from datetime import datetime
class SomeClass(object):
def one(self, a, b):
pass
def two(self):
pass
def three(self, a=None):
pass
class AnyTest(unittest.TestCase):
def test_any(self):
self.assertEqual(ANY, object())
mock = Mock()
mock(ANY)
mock.assert_called_with(ANY)
mock = Mock()
mock(foo=ANY)
mock.assert_called_with(foo=ANY)
def test_repr(self):
self.assertEqual(repr(ANY), '<ANY>')
self.assertEqual(str(ANY), '<ANY>')
def test_any_and_datetime(self):
mock = Mock()
mock(datetime.now(), foo=datetime.now())
mock.assert_called_with(ANY, foo=ANY)
def test_any_mock_calls_comparison_order(self):
mock = Mock()
d = datetime.now()
class Foo(object):
def __eq__(self, other):
return False
def __ne__(self, other):
return True
for d in datetime.now(), Foo():
mock.reset_mock()
mock(d, foo=d, bar=d)
mock.method(d, zinga=d, alpha=d)
mock().method(a1=d, z99=d)
expected = [
call(ANY, foo=ANY, bar=ANY),
call.method(ANY, zinga=ANY, alpha=ANY),
call(), call().method(a1=ANY, z99=ANY)
]
self.assertEqual(expected, mock.mock_calls)
self.assertEqual(mock.mock_calls, expected)
class CallTest(unittest.TestCase):
def test_call_with_call(self):
kall = _Call()
self.assertEqual(kall, _Call())
self.assertEqual(kall, _Call(('',)))
self.assertEqual(kall, _Call(((),)))
self.assertEqual(kall, _Call(({},)))
self.assertEqual(kall, _Call(('', ())))
self.assertEqual(kall, _Call(('', {})))
self.assertEqual(kall, _Call(('', (), {})))
self.assertEqual(kall, _Call(('foo',)))
self.assertEqual(kall, _Call(('bar', ())))
self.assertEqual(kall, _Call(('baz', {})))
self.assertEqual(kall, _Call(('spam', (), {})))
kall = _Call(((1, 2, 3),))
self.assertEqual(kall, _Call(((1, 2, 3),)))
self.assertEqual(kall, _Call(('', (1, 2, 3))))
self.assertEqual(kall, _Call(((1, 2, 3), {})))
self.assertEqual(kall, _Call(('', (1, 2, 3), {})))
kall = _Call(((1, 2, 4),))
self.assertNotEqual(kall, _Call(('', (1, 2, 3))))
self.assertNotEqual(kall, _Call(('', (1, 2, 3), {})))
kall = _Call(('foo', (1, 2, 4),))
self.assertNotEqual(kall, _Call(('', (1, 2, 4))))
self.assertNotEqual(kall, _Call(('', (1, 2, 4), {})))
self.assertNotEqual(kall, _Call(('bar', (1, 2, 4))))
self.assertNotEqual(kall, _Call(('bar', (1, 2, 4), {})))
kall = _Call(({'a': 3},))
self.assertEqual(kall, _Call(('', (), {'a': 3})))
self.assertEqual(kall, _Call(('', {'a': 3})))
self.assertEqual(kall, _Call(((), {'a': 3})))
self.assertEqual(kall, _Call(({'a': 3},)))
def test_empty__Call(self):
args = _Call()
self.assertEqual(args, ())
self.assertEqual(args, ('foo',))
self.assertEqual(args, ((),))
self.assertEqual(args, ('foo', ()))
self.assertEqual(args, ('foo',(), {}))
self.assertEqual(args, ('foo', {}))
self.assertEqual(args, ({},))
def test_named_empty_call(self):
args = _Call(('foo', (), {}))
self.assertEqual(args, ('foo',))
self.assertEqual(args, ('foo', ()))
self.assertEqual(args, ('foo',(), {}))
self.assertEqual(args, ('foo', {}))
self.assertNotEqual(args, ((),))
self.assertNotEqual(args, ())
self.assertNotEqual(args, ({},))
self.assertNotEqual(args, ('bar',))
self.assertNotEqual(args, ('bar', ()))
self.assertNotEqual(args, ('bar', {}))
def test_call_with_args(self):
args = _Call(((1, 2, 3), {}))
self.assertEqual(args, ((1, 2, 3),))
self.assertEqual(args, ('foo', (1, 2, 3)))
self.assertEqual(args, ('foo', (1, 2, 3), {}))
self.assertEqual(args, ((1, 2, 3), {}))
def test_named_call_with_args(self):
args = _Call(('foo', (1, 2, 3), {}))
self.assertEqual(args, ('foo', (1, 2, 3)))
self.assertEqual(args, ('foo', (1, 2, 3), {}))
self.assertNotEqual(args, ((1, 2, 3),))
self.assertNotEqual(args, ((1, 2, 3), {}))
def test_call_with_kwargs(self):
args = _Call(((), dict(a=3, b=4)))
self.assertEqual(args, (dict(a=3, b=4),))
self.assertEqual(args, ('foo', dict(a=3, b=4)))
self.assertEqual(args, ('foo', (), dict(a=3, b=4)))
self.assertEqual(args, ((), dict(a=3, b=4)))
def test_named_call_with_kwargs(self):
args = _Call(('foo', (), dict(a=3, b=4)))
self.assertEqual(args, ('foo', dict(a=3, b=4)))
self.assertEqual(args, ('foo', (), dict(a=3, b=4)))
self.assertNotEqual(args, (dict(a=3, b=4),))
self.assertNotEqual(args, ((), dict(a=3, b=4)))
def test_call_with_args_call_empty_name(self):
args = _Call(((1, 2, 3), {}))
self.assertEqual(args, call(1, 2, 3))
self.assertEqual(call(1, 2, 3), args)
self.assertTrue(call(1, 2, 3) in [args])
def test_call_ne(self):
self.assertNotEqual(_Call(((1, 2, 3),)), call(1, 2))
self.assertFalse(_Call(((1, 2, 3),)) != call(1, 2, 3))
self.assertTrue(_Call(((1, 2), {})) != call(1, 2, 3))
def test_call_non_tuples(self):
kall = _Call(((1, 2, 3),))
for value in 1, None, self, int:
self.assertNotEqual(kall, value)
self.assertFalse(kall == value)
def test_repr(self):
self.assertEqual(repr(_Call()), 'call()')
self.assertEqual(repr(_Call(('foo',))), 'call.foo()')
self.assertEqual(repr(_Call(((1, 2, 3), {'a': 'b'}))),
"call(1, 2, 3, a='b')")
self.assertEqual(repr(_Call(('bar', (1, 2, 3), {'a': 'b'}))),
"call.bar(1, 2, 3, a='b')")
self.assertEqual(repr(call), 'call')
self.assertEqual(str(call), 'call')
self.assertEqual(repr(call()), 'call()')
self.assertEqual(repr(call(1)), 'call(1)')
self.assertEqual(repr(call(zz='thing')), "call(zz='thing')")
self.assertEqual(repr(call().foo), 'call().foo')
self.assertEqual(repr(call(1).foo.bar(a=3).bing),
'call().foo.bar().bing')
self.assertEqual(
repr(call().foo(1, 2, a=3)),
"call().foo(1, 2, a=3)"
)
self.assertEqual(repr(call()()), "call()()")
self.assertEqual(repr(call(1)(2)), "call()(2)")
self.assertEqual(
repr(call()().bar().baz.beep(1)),
"call()().bar().baz.beep(1)"
)
def test_call(self):
self.assertEqual(call(), ('', (), {}))
self.assertEqual(call('foo', 'bar', one=3, two=4),
('', ('foo', 'bar'), {'one': 3, 'two': 4}))
mock = Mock()
mock(1, 2, 3)
mock(a=3, b=6)
self.assertEqual(mock.call_args_list,
[call(1, 2, 3), call(a=3, b=6)])
def test_attribute_call(self):
self.assertEqual(call.foo(1), ('foo', (1,), {}))
self.assertEqual(call.bar.baz(fish='eggs'),
('bar.baz', (), {'fish': 'eggs'}))
mock = Mock()
mock.foo(1, 2 ,3)
mock.bar.baz(a=3, b=6)
self.assertEqual(mock.method_calls,
[call.foo(1, 2, 3), call.bar.baz(a=3, b=6)])
def test_extended_call(self):
result = call(1).foo(2).bar(3, a=4)
self.assertEqual(result, ('().foo().bar', (3,), dict(a=4)))
mock = MagicMock()
mock(1, 2, a=3, b=4)
self.assertEqual(mock.call_args, call(1, 2, a=3, b=4))
self.assertNotEqual(mock.call_args, call(1, 2, 3))
self.assertEqual(mock.call_args_list, [call(1, 2, a=3, b=4)])
self.assertEqual(mock.mock_calls, [call(1, 2, a=3, b=4)])
mock = MagicMock()
mock.foo(1).bar()().baz.beep(a=6)
last_call = call.foo(1).bar()().baz.beep(a=6)
self.assertEqual(mock.mock_calls[-1], last_call)
self.assertEqual(mock.mock_calls, last_call.call_list())
def test_call_list(self):
mock = MagicMock()
mock(1)
self.assertEqual(call(1).call_list(), mock.mock_calls)
mock = MagicMock()
mock(1).method(2)
self.assertEqual(call(1).method(2).call_list(),
mock.mock_calls)
mock = MagicMock()
mock(1).method(2)(3)
self.assertEqual(call(1).method(2)(3).call_list(),
mock.mock_calls)
mock = MagicMock()
int(mock(1).method(2)(3).foo.bar.baz(4)(5))
kall = call(1).method(2)(3).foo.bar.baz(4)(5).__int__()
self.assertEqual(kall.call_list(), mock.mock_calls)
def test_call_any(self):
self.assertEqual(call, ANY)
m = MagicMock()
int(m)
self.assertEqual(m.mock_calls, [ANY])
self.assertEqual([ANY], m.mock_calls)
def test_two_args_call(self):
args = _Call(((1, 2), {'a': 3}), two=True)
self.assertEqual(len(args), 2)
self.assertEqual(args[0], (1, 2))
self.assertEqual(args[1], {'a': 3})
other_args = _Call(((1, 2), {'a': 3}))
self.assertEqual(args, other_args)
class SpecSignatureTest(unittest.TestCase):
def _check_someclass_mock(self, mock):
self.assertRaises(AttributeError, getattr, mock, 'foo')
mock.one(1, 2)
mock.one.assert_called_with(1, 2)
self.assertRaises(AssertionError,
mock.one.assert_called_with, 3, 4)
self.assertRaises(TypeError, mock.one, 1)
mock.two()
mock.two.assert_called_with()
self.assertRaises(AssertionError,
mock.two.assert_called_with, 3)
self.assertRaises(TypeError, mock.two, 1)
mock.three()
mock.three.assert_called_with()
self.assertRaises(AssertionError,
mock.three.assert_called_with, 3)
self.assertRaises(TypeError, mock.three, 3, 2)
mock.three(1)
mock.three.assert_called_with(1)
mock.three(a=1)
mock.three.assert_called_with(a=1)
def test_basic(self):
for spec in (SomeClass, SomeClass()):
mock = create_autospec(spec)
self._check_someclass_mock(mock)
def test_create_autospec_return_value(self):
def f():
pass
mock = create_autospec(f, return_value='foo')
self.assertEqual(mock(), 'foo')
class Foo(object):
pass
mock = create_autospec(Foo, return_value='foo')
self.assertEqual(mock(), 'foo')
def test_autospec_reset_mock(self):
m = create_autospec(int)
int(m)
m.reset_mock()
self.assertEqual(m.__int__.call_count, 0)
def test_mocking_unbound_methods(self):
class Foo(object):
def foo(self, foo):
pass
p = patch.object(Foo, 'foo')
mock_foo = p.start()
Foo().foo(1)
mock_foo.assert_called_with(1)
def test_create_autospec_unbound_methods(self):
# see mock issue 128
# this is expected to fail until the issue is fixed
return
class Foo(object):
def foo(self):
pass
klass = create_autospec(Foo)
instance = klass()
self.assertRaises(TypeError, instance.foo, 1)
# Note: no type checking on the "self" parameter
klass.foo(1)
klass.foo.assert_called_with(1)
self.assertRaises(TypeError, klass.foo)
def test_create_autospec_keyword_arguments(self):
class Foo(object):
a = 3
m = create_autospec(Foo, a='3')
self.assertEqual(m.a, '3')
def test_create_autospec_keyword_only_arguments(self):
def foo(a, *, b=None):
pass
m = create_autospec(foo)
m(1)
m.assert_called_with(1)
self.assertRaises(TypeError, m, 1, 2)
m(2, b=3)
m.assert_called_with(2, b=3)
def test_function_as_instance_attribute(self):
obj = SomeClass()
def f(a):
pass
obj.f = f
mock = create_autospec(obj)
mock.f('bing')
mock.f.assert_called_with('bing')
def test_spec_as_list(self):
# because spec as a list of strings in the mock constructor means
# something very different we treat a list instance as the type.
mock = create_autospec([])
mock.append('foo')
mock.append.assert_called_with('foo')
self.assertRaises(AttributeError, getattr, mock, 'foo')
class Foo(object):
foo = []
mock = create_autospec(Foo)
mock.foo.append(3)
mock.foo.append.assert_called_with(3)
self.assertRaises(AttributeError, getattr, mock.foo, 'foo')
def test_attributes(self):
class Sub(SomeClass):
attr = SomeClass()
sub_mock = create_autospec(Sub)
for mock in (sub_mock, sub_mock.attr):
self._check_someclass_mock(mock)
def test_builtin_functions_types(self):
# we could replace builtin functions / methods with a function
# with *args / **kwargs signature. Using the builtin method type
# as a spec seems to work fairly well though.
class BuiltinSubclass(list):
def bar(self, arg):
pass
sorted = sorted
attr = {}
mock = create_autospec(BuiltinSubclass)
mock.append(3)
mock.append.assert_called_with(3)
self.assertRaises(AttributeError, getattr, mock.append, 'foo')
mock.bar('foo')
mock.bar.assert_called_with('foo')
self.assertRaises(TypeError, mock.bar, 'foo', 'bar')
self.assertRaises(AttributeError, getattr, mock.bar, 'foo')
mock.sorted([1, 2])
mock.sorted.assert_called_with([1, 2])
self.assertRaises(AttributeError, getattr, mock.sorted, 'foo')
mock.attr.pop(3)
mock.attr.pop.assert_called_with(3)
self.assertRaises(AttributeError, getattr, mock.attr, 'foo')
def test_method_calls(self):
class Sub(SomeClass):
attr = SomeClass()
mock = create_autospec(Sub)
mock.one(1, 2)
mock.two()
mock.three(3)
expected = [call.one(1, 2), call.two(), call.three(3)]
self.assertEqual(mock.method_calls, expected)
mock.attr.one(1, 2)
mock.attr.two()
mock.attr.three(3)
expected.extend(
[call.attr.one(1, 2), call.attr.two(), call.attr.three(3)]
)
self.assertEqual(mock.method_calls, expected)
def test_magic_methods(self):
class BuiltinSubclass(list):
attr = {}
mock = create_autospec(BuiltinSubclass)
self.assertEqual(list(mock), [])
self.assertRaises(TypeError, int, mock)
self.assertRaises(TypeError, int, mock.attr)
self.assertEqual(list(mock), [])
self.assertIsInstance(mock['foo'], MagicMock)
self.assertIsInstance(mock.attr['foo'], MagicMock)
def test_spec_set(self):
class Sub(SomeClass):
attr = SomeClass()
for spec in (Sub, Sub()):
mock = create_autospec(spec, spec_set=True)
self._check_someclass_mock(mock)
self.assertRaises(AttributeError, setattr, mock, 'foo', 'bar')
self.assertRaises(AttributeError, setattr, mock.attr, 'foo', 'bar')
def test_descriptors(self):
class Foo(object):
@classmethod
def f(cls, a, b):
pass
@staticmethod
def g(a, b):
pass
class Bar(Foo):
pass
class Baz(SomeClass, Bar):
pass
for spec in (Foo, Foo(), Bar, Bar(), Baz, Baz()):
mock = create_autospec(spec)
mock.f(1, 2)
mock.f.assert_called_once_with(1, 2)
mock.g(3, 4)
mock.g.assert_called_once_with(3, 4)
def test_recursive(self):
class A(object):
def a(self):
pass
foo = 'foo bar baz'
bar = foo
A.B = A
mock = create_autospec(A)
mock()
self.assertFalse(mock.B.called)
mock.a()
mock.B.a()
self.assertEqual(mock.method_calls, [call.a(), call.B.a()])
self.assertIs(A.foo, A.bar)
self.assertIsNot(mock.foo, mock.bar)
mock.foo.lower()
self.assertRaises(AssertionError, mock.bar.lower.assert_called_with)
def test_spec_inheritance_for_classes(self):
class Foo(object):
def a(self):
pass
class Bar(object):
def f(self):
pass
class_mock = create_autospec(Foo)
self.assertIsNot(class_mock, class_mock())
for this_mock in class_mock, class_mock():
this_mock.a()
this_mock.a.assert_called_with()
self.assertRaises(TypeError, this_mock.a, 'foo')
self.assertRaises(AttributeError, getattr, this_mock, 'b')
instance_mock = create_autospec(Foo())
instance_mock.a()
instance_mock.a.assert_called_with()
self.assertRaises(TypeError, instance_mock.a, 'foo')
self.assertRaises(AttributeError, getattr, instance_mock, 'b')
# The return value isn't isn't callable
self.assertRaises(TypeError, instance_mock)
instance_mock.Bar.f()
instance_mock.Bar.f.assert_called_with()
self.assertRaises(AttributeError, getattr, instance_mock.Bar, 'g')
instance_mock.Bar().f()
instance_mock.Bar().f.assert_called_with()
self.assertRaises(AttributeError, getattr, instance_mock.Bar(), 'g')
def test_inherit(self):
class Foo(object):
a = 3
Foo.Foo = Foo
# class
mock = create_autospec(Foo)
instance = mock()
self.assertRaises(AttributeError, getattr, instance, 'b')
attr_instance = mock.Foo()
self.assertRaises(AttributeError, getattr, attr_instance, 'b')
# instance
mock = create_autospec(Foo())
self.assertRaises(AttributeError, getattr, mock, 'b')
self.assertRaises(TypeError, mock)
# attribute instance
call_result = mock.Foo()
self.assertRaises(AttributeError, getattr, call_result, 'b')
def test_builtins(self):
# used to fail with infinite recursion
create_autospec(1)
create_autospec(int)
create_autospec('foo')
create_autospec(str)
create_autospec({})
create_autospec(dict)
create_autospec([])
create_autospec(list)
create_autospec(set())
create_autospec(set)
create_autospec(1.0)
create_autospec(float)
create_autospec(1j)
create_autospec(complex)
create_autospec(False)
create_autospec(True)
def test_function(self):
def f(a, b):
pass
mock = create_autospec(f)
self.assertRaises(TypeError, mock)
mock(1, 2)
mock.assert_called_with(1, 2)
f.f = f
mock = create_autospec(f)
self.assertRaises(TypeError, mock.f)
mock.f(3, 4)
mock.f.assert_called_with(3, 4)
def test_skip_attributeerrors(self):
class Raiser(object):
def __get__(self, obj, type=None):
if obj is None:
raise AttributeError('Can only be accessed via an instance')
class RaiserClass(object):
raiser = Raiser()
@staticmethod
def existing(a, b):
return a + b
s = create_autospec(RaiserClass)
self.assertRaises(TypeError, lambda x: s.existing(1, 2, 3))
s.existing(1, 2)
self.assertRaises(AttributeError, lambda: s.nonexisting)
# check we can fetch the raiser attribute and it has no spec
obj = s.raiser
obj.foo, obj.bar
def test_signature_class(self):
class Foo(object):
def __init__(self, a, b=3):
pass
mock = create_autospec(Foo)
self.assertRaises(TypeError, mock)
mock(1)
mock.assert_called_once_with(1)
mock(4, 5)
mock.assert_called_with(4, 5)
def test_class_with_no_init(self):
# this used to raise an exception
# due to trying to get a signature from object.__init__
class Foo(object):
pass
create_autospec(Foo)
def test_signature_callable(self):
class Callable(object):
def __init__(self):
pass
def __call__(self, a):
pass
mock = create_autospec(Callable)
mock()
mock.assert_called_once_with()
self.assertRaises(TypeError, mock, 'a')
instance = mock()
self.assertRaises(TypeError, instance)
instance(a='a')
instance.assert_called_once_with(a='a')
instance('a')
instance.assert_called_with('a')
mock = create_autospec(Callable())
mock(a='a')
mock.assert_called_once_with(a='a')
self.assertRaises(TypeError, mock)
mock('a')
mock.assert_called_with('a')
def test_signature_noncallable(self):
class NonCallable(object):
def __init__(self):
pass
mock = create_autospec(NonCallable)
instance = mock()
mock.assert_called_once_with()
self.assertRaises(TypeError, mock, 'a')
self.assertRaises(TypeError, instance)
self.assertRaises(TypeError, instance, 'a')
mock = create_autospec(NonCallable())
self.assertRaises(TypeError, mock)
self.assertRaises(TypeError, mock, 'a')
def test_create_autospec_none(self):
class Foo(object):
bar = None
mock = create_autospec(Foo)
none = mock.bar
self.assertNotIsInstance(none, type(None))
none.foo()
none.foo.assert_called_once_with()
def test_autospec_functions_with_self_in_odd_place(self):
class Foo(object):
def f(a, self):
pass
a = create_autospec(Foo)
a.f(self=10)
a.f.assert_called_with(self=10)
def test_autospec_property(self):
class Foo(object):
@property
def foo(self):
return 3
foo = create_autospec(Foo)
mock_property = foo.foo
# no spec on properties
self.assertTrue(isinstance(mock_property, MagicMock))
mock_property(1, 2, 3)
mock_property.abc(4, 5, 6)
mock_property.assert_called_once_with(1, 2, 3)
mock_property.abc.assert_called_once_with(4, 5, 6)
def test_autospec_slots(self):
class Foo(object):
__slots__ = ['a']
foo = create_autospec(Foo)
mock_slot = foo.a
# no spec on slots
mock_slot(1, 2, 3)
mock_slot.abc(4, 5, 6)
mock_slot.assert_called_once_with(1, 2, 3)
mock_slot.abc.assert_called_once_with(4, 5, 6)
class TestCallList(unittest.TestCase):
def test_args_list_contains_call_list(self):
mock = Mock()
self.assertIsInstance(mock.call_args_list, _CallList)
mock(1, 2)
mock(a=3)
mock(3, 4)
mock(b=6)
for kall in call(1, 2), call(a=3), call(3, 4), call(b=6):
self.assertTrue(kall in mock.call_args_list)
calls = [call(a=3), call(3, 4)]
self.assertTrue(calls in mock.call_args_list)
calls = [call(1, 2), call(a=3)]
self.assertTrue(calls in mock.call_args_list)
calls = [call(3, 4), call(b=6)]
self.assertTrue(calls in mock.call_args_list)
calls = [call(3, 4)]
self.assertTrue(calls in mock.call_args_list)
self.assertFalse(call('fish') in mock.call_args_list)
self.assertFalse([call('fish')] in mock.call_args_list)
def test_call_list_str(self):
mock = Mock()
mock(1, 2)
mock.foo(a=3)
mock.foo.bar().baz('fish', cat='dog')
expected = (
"[call(1, 2),\n"
" call.foo(a=3),\n"
" call.foo.bar(),\n"
" call.foo.bar().baz('fish', cat='dog')]"
)
self.assertEqual(str(mock.mock_calls), expected)
def test_propertymock(self):
p = patch('%s.SomeClass.one' % __name__, new_callable=PropertyMock)
mock = p.start()
try:
SomeClass.one
mock.assert_called_once_with()
s = SomeClass()
s.one
mock.assert_called_with()
self.assertEqual(mock.mock_calls, [call(), call()])
s.one = 3
self.assertEqual(mock.mock_calls, [call(), call(), call(3)])
finally:
p.stop()
def test_propertymock_returnvalue(self):
m = MagicMock()
p = PropertyMock()
type(m).foo = p
returned = m.foo
p.assert_called_once_with()
self.assertIsInstance(returned, MagicMock)
self.assertNotIsInstance(returned, PropertyMock)
if __name__ == '__main__':
unittest.main()
|
ovnicraft/edx-platform
|
refs/heads/master
|
common/djangoapps/edxmako/management/commands/preprocess_assets.py
|
46
|
"""
Preprocess templatized asset files, enabling asset authors to use
Python/Django inside of Sass and CoffeeScript. This preprocessing
will happen before the invocation of the asset compiler (currently
handled by the assets paver file).
For this to work, assets need to be named with the appropriate
template extension (e.g., .mako for Mako templates). Currently Mako
is the only template engine supported.
"""
import os
import textwrap
from django.core.management.base import BaseCommand
from django.conf import settings
class Command(BaseCommand):
"""
Basic management command to preprocess asset template files.
"""
help = "Preprocess asset template files to ready them for compilation."
def add_arguments(self, parser):
parser.add_argument('files', type=unicode, nargs='+', help='files to pre-process')
parser.add_argument('dest_dir', type=unicode, help='destination directory')
def handle(self, *args, **options):
theme_name = getattr(settings, "THEME_NAME", None)
use_custom_theme = settings.FEATURES.get("USE_CUSTOM_THEME", False)
if not use_custom_theme or not theme_name:
# No custom theme, nothing to do!
return
dest_dir = options['dest_dir']
for source_file in options['files']:
self.process_one_file(source_file, dest_dir, theme_name)
def process_one_file(self, source_file, dest_dir, theme_name):
"""Pre-process a .scss file to replace our markers with real code."""
with open(source_file) as fsource:
original_content = fsource.read()
content = original_content.replace(
"//<THEME-OVERRIDE>",
"@import '{}';".format(theme_name),
)
if content != original_content:
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
dest_file = os.path.join(dest_dir, os.path.basename(source_file))
with open(dest_file, "w") as fout:
fout.write(textwrap.dedent("""\
/*
* This file is dynamically generated and ignored by Git.
* DO NOT MAKE CHANGES HERE. Instead, go edit its source:
* {}
*/
\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
""".format(source_file)))
fout.write(content)
|
ahb0327/intellij-community
|
refs/heads/master
|
python/testData/resolve/multiFile/importPrivateNameWithStar/ImportPrivateNameWithStar.py
|
83
|
from b import *
class Public(__Private): pass
# <ref>
|
credativ/pulp
|
refs/heads/master
|
server/pulp/server/db/migrations/0015_load_content_types.py
|
15
|
"""
This migration loads the content types into the database. Part of the process includes dropping
of search indexes and their recreation.
"""
import logging
from pulp.plugins.loader.api import load_content_types
_logger = logging.getLogger(__name__)
def migrate(*args, **kwargs):
"""
Perform the migration as described in this module's docblock.
:param args: unused
:type args: list
:param kwargs: unused
:type kwargs: dict
"""
load_content_types(drop_indices=True)
|
jjx02230808/project0223
|
refs/heads/master
|
sklearn/ensemble/bagging.py
|
14
|
"""Bagging meta-estimator."""
# Author: Gilles Louppe <g.louppe@gmail.com>
# License: BSD 3 clause
from __future__ import division
import itertools
import numbers
import numpy as np
from warnings import warn
from abc import ABCMeta, abstractmethod
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals.six import with_metaclass
from ..externals.six.moves import zip
from ..metrics import r2_score, accuracy_score
from ..tree import DecisionTreeClassifier, DecisionTreeRegressor
from ..utils import check_random_state, check_X_y, check_array, column_or_1d
from ..utils.random import sample_without_replacement
from ..utils.validation import has_fit_parameter, check_is_fitted
from ..utils.fixes import bincount
from ..utils.metaestimators import if_delegate_has_method
from ..utils.multiclass import check_classification_targets
from .base import BaseEnsemble, _partition_estimators
__all__ = ["BaggingClassifier",
"BaggingRegressor"]
MAX_INT = np.iinfo(np.int32).max
def _parallel_build_estimators(n_estimators, ensemble, X, y, sample_weight,
max_samples, seeds, verbose):
"""Private function used to build a batch of estimators within a job."""
# Retrieve settings
n_samples, n_features = X.shape
max_features = ensemble.max_features
if (not isinstance(max_samples, (numbers.Integral, np.integer)) and
(0.0 < max_samples <= 1.0)):
max_samples = int(max_samples * n_samples)
if (not isinstance(max_features, (numbers.Integral, np.integer)) and
(0.0 < max_features <= 1.0)):
max_features = int(max_features * n_features)
bootstrap = ensemble.bootstrap
bootstrap_features = ensemble.bootstrap_features
support_sample_weight = has_fit_parameter(ensemble.base_estimator_,
"sample_weight")
if not support_sample_weight and sample_weight is not None:
raise ValueError("The base estimator doesn't support sample weight")
# Build estimators
estimators = []
estimators_samples = []
estimators_features = []
for i in range(n_estimators):
if verbose > 1:
print("building estimator %d of %d" % (i + 1, n_estimators))
random_state = check_random_state(seeds[i])
seed = random_state.randint(MAX_INT)
estimator = ensemble._make_estimator(append=False)
try: # Not all estimator accept a random_state
estimator.set_params(random_state=seed)
except ValueError:
pass
# Draw features
if bootstrap_features:
features = random_state.randint(0, n_features, max_features)
else:
features = sample_without_replacement(n_features,
max_features,
random_state=random_state)
# Draw samples, using sample weights, and then fit
if support_sample_weight:
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,))
else:
curr_sample_weight = sample_weight.copy()
if bootstrap:
indices = random_state.randint(0, n_samples, max_samples)
sample_counts = bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
else:
not_indices = sample_without_replacement(
n_samples,
n_samples - max_samples,
random_state=random_state)
curr_sample_weight[not_indices] = 0
estimator.fit(X[:, features], y, sample_weight=curr_sample_weight)
samples = curr_sample_weight > 0.
# Draw samples, using a mask, and then fit
else:
if bootstrap:
indices = random_state.randint(0, n_samples, max_samples)
else:
indices = sample_without_replacement(n_samples,
max_samples,
random_state=random_state)
sample_counts = bincount(indices, minlength=n_samples)
estimator.fit((X[indices])[:, features], y[indices])
samples = sample_counts > 0.
estimators.append(estimator)
estimators_samples.append(samples)
estimators_features.append(features)
return estimators, estimators_samples, estimators_features
def _parallel_predict_proba(estimators, estimators_features, X, n_classes):
"""Private function used to compute (proba-)predictions within a job."""
n_samples = X.shape[0]
proba = np.zeros((n_samples, n_classes))
for estimator, features in zip(estimators, estimators_features):
if hasattr(estimator, "predict_proba"):
proba_estimator = estimator.predict_proba(X[:, features])
if n_classes == len(estimator.classes_):
proba += proba_estimator
else:
proba[:, estimator.classes_] += \
proba_estimator[:, range(len(estimator.classes_))]
else:
# Resort to voting
predictions = estimator.predict(X[:, features])
for i in range(n_samples):
proba[i, predictions[i]] += 1
return proba
def _parallel_predict_log_proba(estimators, estimators_features, X, n_classes):
"""Private function used to compute log probabilities within a job."""
n_samples = X.shape[0]
log_proba = np.empty((n_samples, n_classes))
log_proba.fill(-np.inf)
all_classes = np.arange(n_classes, dtype=np.int)
for estimator, features in zip(estimators, estimators_features):
log_proba_estimator = estimator.predict_log_proba(X[:, features])
if n_classes == len(estimator.classes_):
log_proba = np.logaddexp(log_proba, log_proba_estimator)
else:
log_proba[:, estimator.classes_] = np.logaddexp(
log_proba[:, estimator.classes_],
log_proba_estimator[:, range(len(estimator.classes_))])
missing = np.setdiff1d(all_classes, estimator.classes_)
log_proba[:, missing] = np.logaddexp(log_proba[:, missing],
-np.inf)
return log_proba
def _parallel_decision_function(estimators, estimators_features, X):
"""Private function used to compute decisions within a job."""
return sum(estimator.decision_function(X[:, features])
for estimator, features in zip(estimators,
estimators_features))
def _parallel_predict_regression(estimators, estimators_features, X):
"""Private function used to compute predictions within a job."""
return sum(estimator.predict(X[:, features])
for estimator, features in zip(estimators,
estimators_features))
class BaseBagging(with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for Bagging meta-estimator.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator=None,
n_estimators=10,
max_samples=1.0,
max_features=1.0,
bootstrap=True,
bootstrap_features=False,
oob_score=False,
warm_start=False,
n_jobs=1,
random_state=None,
verbose=0):
super(BaseBagging, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators)
self.max_samples = max_samples
self.max_features = max_features
self.bootstrap = bootstrap
self.bootstrap_features = bootstrap_features
self.oob_score = oob_score
self.warm_start = warm_start
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
def fit(self, X, y, sample_weight=None):
"""Build a Bagging ensemble of estimators from the training
set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
y : array-like, shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Note that this is supported only if the base estimator supports
sample weighting.
Returns
-------
self : object
Returns self.
"""
return self._fit(X, y, self.max_samples, sample_weight=sample_weight)
def _fit(self, X, y, max_samples, max_depth=None, sample_weight=None):
"""Build a Bagging ensemble of estimators from the training
set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
y : array-like, shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
max_samples : int or float, optional (default=None)
Argument to use instead of self.max_samples.
max_depth : int, optional (default=None)
Override value used when constructing base estimator. Only
supported if the base estimator has a max_depth parameter.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Note that this is supported only if the base estimator supports
sample weighting.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
# Convert data
X, y = check_X_y(X, y, ['csr', 'csc'])
# Remap output
n_samples, self.n_features_ = X.shape
y = self._validate_y(y)
# Check parameters
self._validate_estimator()
if max_depth is not None:
self.base_estimator_.max_depth = max_depth
# if max_samples is float:
if not isinstance(max_samples, (numbers.Integral, np.integer)):
max_samples = int(max_samples * X.shape[0])
if not (0 < max_samples <= X.shape[0]):
raise ValueError("max_samples must be in (0, n_samples]")
if isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
max_features = int(self.max_features * self.n_features_)
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
if self.warm_start and self.oob_score:
raise ValueError("Out of bag estimate only available"
" if warm_start=False")
if hasattr(self, "oob_score_") and self.warm_start:
del self.oob_score_
if not self.warm_start or len(self.estimators_) == 0:
# Free allocated memory, if any
self.estimators_ = []
self.estimators_samples_ = []
self.estimators_features_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
return self
# Parallel loop
n_jobs, n_estimators, starts = _partition_estimators(n_more_estimators,
self.n_jobs)
# Advance random state to state after training
# the first n_estimators
if self.warm_start and len(self.estimators_) > 0:
random_state.randint(MAX_INT, size=len(self.estimators_))
seeds = random_state.randint(MAX_INT, size=n_more_estimators)
all_results = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_build_estimators)(
n_estimators[i],
self,
X,
y,
sample_weight,
max_samples,
seeds[starts[i]:starts[i + 1]],
verbose=self.verbose)
for i in range(n_jobs))
# Reduce
self.estimators_ += list(itertools.chain.from_iterable(
t[0] for t in all_results))
self.estimators_samples_ += list(itertools.chain.from_iterable(
t[1] for t in all_results))
self.estimators_features_ += list(itertools.chain.from_iterable(
t[2] for t in all_results))
if self.oob_score:
self._set_oob_score(X, y)
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y(self, y):
# Default implementation
return column_or_1d(y, warn=True)
class BaggingClassifier(BaseBagging, ClassifierMixin):
"""A Bagging classifier.
A Bagging classifier is an ensemble meta-estimator that fits base
classifiers each on random subsets of the original dataset and then
aggregate their individual predictions (either by voting or by averaging)
to form a final prediction. Such a meta-estimator can typically be used as
a way to reduce the variance of a black-box estimator (e.g., a decision
tree), by introducing randomization into its construction procedure and
then making an ensemble out of it.
This algorithm encompasses several works from the literature. When random
subsets of the dataset are drawn as random subsets of the samples, then
this algorithm is known as Pasting [1]_. If samples are drawn with
replacement, then the method is known as Bagging [2]_. When random subsets
of the dataset are drawn as random subsets of the features, then the method
is known as Random Subspaces [3]_. Finally, when base estimators are built
on subsets of both samples and features, then the method is known as
Random Patches [4]_.
Read more in the :ref:`User Guide <bagging>`.
Parameters
----------
base_estimator : object or None, optional (default=None)
The base estimator to fit on random subsets of the dataset.
If None, then the base estimator is a decision tree.
n_estimators : int, optional (default=10)
The number of base estimators in the ensemble.
max_samples : int or float, optional (default=1.0)
The number of samples to draw from X to train each base estimator.
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples.
max_features : int or float, optional (default=1.0)
The number of features to draw from X to train each base estimator.
- If int, then draw `max_features` features.
- If float, then draw `max_features * X.shape[1]` features.
bootstrap : boolean, optional (default=True)
Whether samples are drawn with replacement.
bootstrap_features : boolean, optional (default=False)
Whether features are drawn with replacement.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
warm_start : bool, optional (default=False)
When set to True, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit
a whole new ensemble.
.. versionadded:: 0.17
*warm_start* constructor parameter.
n_jobs : int, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the building process.
Attributes
----------
base_estimator_ : list of estimators
The base estimator from which the ensemble is grown.
estimators_ : list of estimators
The collection of fitted base estimators.
estimators_samples_ : list of arrays
The subset of drawn samples (i.e., the in-bag samples) for each base
estimator.
estimators_features_ : list of arrays
The subset of drawn features for each base estimator.
classes_ : array of shape = [n_classes]
The classes labels.
n_classes_ : int or list
The number of classes.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] L. Breiman, "Pasting small votes for classification in large
databases and on-line", Machine Learning, 36(1), 85-103, 1999.
.. [2] L. Breiman, "Bagging predictors", Machine Learning, 24(2), 123-140,
1996.
.. [3] T. Ho, "The random subspace method for constructing decision
forests", Pattern Analysis and Machine Intelligence, 20(8), 832-844,
1998.
.. [4] G. Louppe and P. Geurts, "Ensembles on Random Patches", Machine
Learning and Knowledge Discovery in Databases, 346-361, 2012.
"""
def __init__(self,
base_estimator=None,
n_estimators=10,
max_samples=1.0,
max_features=1.0,
bootstrap=True,
bootstrap_features=False,
oob_score=False,
warm_start=False,
n_jobs=1,
random_state=None,
verbose=0):
super(BaggingClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
max_samples=max_samples,
max_features=max_features,
bootstrap=bootstrap,
bootstrap_features=bootstrap_features,
oob_score=oob_score,
warm_start=warm_start,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(BaggingClassifier, self)._validate_estimator(
default=DecisionTreeClassifier())
def _set_oob_score(self, X, y):
n_classes_ = self.n_classes_
classes_ = self.classes_
n_samples = y.shape[0]
predictions = np.zeros((n_samples, n_classes_))
for estimator, samples, features in zip(self.estimators_,
self.estimators_samples_,
self.estimators_features_):
mask = np.ones(n_samples, dtype=np.bool)
mask[samples] = False
if hasattr(estimator, "predict_proba"):
predictions[mask, :] += estimator.predict_proba(
(X[mask, :])[:, features])
else:
p = estimator.predict((X[mask, :])[:, features])
j = 0
for i in range(n_samples):
if mask[i]:
predictions[i, p[j]] += 1
j += 1
if (predictions.sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few estimators were used "
"to compute any reliable oob estimates.")
oob_decision_function = (predictions /
predictions.sum(axis=1)[:, np.newaxis])
oob_score = accuracy_score(y, classes_.take(np.argmax(predictions,
axis=1)))
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score
def _validate_y(self, y):
y = column_or_1d(y, warn=True)
check_classification_targets(y)
self.classes_, y = np.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
return y
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is computed as the class with
the highest mean predicted probability. If base estimators do not
implement a ``predict_proba`` method, then it resorts to voting.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
Returns
-------
y : array of shape = [n_samples]
The predicted classes.
"""
predicted_probabilitiy = self.predict_proba(X)
return self.classes_.take((np.argmax(predicted_probabilitiy, axis=1)),
axis=0)
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the mean predicted class probabilities of the base estimators in the
ensemble. If base estimators do not implement a ``predict_proba``
method, then it resorts to voting and the predicted class probabilities
of a an input sample represents the proportion of estimators predicting
each class.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
Returns
-------
p : array of shape = [n_samples, n_classes]
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
check_is_fitted(self, "classes_")
# Check data
X = check_array(X, accept_sparse=['csr', 'csc'])
if self.n_features_ != X.shape[1]:
raise ValueError("Number of features of the model must "
"match the input. Model n_features is {0} and "
"input n_features is {1}."
"".format(self.n_features_, X.shape[1]))
# Parallel loop
n_jobs, n_estimators, starts = _partition_estimators(self.n_estimators,
self.n_jobs)
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_predict_proba)(
self.estimators_[starts[i]:starts[i + 1]],
self.estimators_features_[starts[i]:starts[i + 1]],
X,
self.n_classes_)
for i in range(n_jobs))
# Reduce
proba = sum(all_proba) / self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the base
estimators in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
Returns
-------
p : array of shape = [n_samples, n_classes]
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
check_is_fitted(self, "classes_")
if hasattr(self.base_estimator_, "predict_log_proba"):
# Check data
X = check_array(X, accept_sparse=['csr', 'csc'])
if self.n_features_ != X.shape[1]:
raise ValueError("Number of features of the model must "
"match the input. Model n_features is {0} "
"and input n_features is {1} "
"".format(self.n_features_, X.shape[1]))
# Parallel loop
n_jobs, n_estimators, starts = _partition_estimators(
self.n_estimators, self.n_jobs)
all_log_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_predict_log_proba)(
self.estimators_[starts[i]:starts[i + 1]],
self.estimators_features_[starts[i]:starts[i + 1]],
X,
self.n_classes_)
for i in range(n_jobs))
# Reduce
log_proba = all_log_proba[0]
for j in range(1, len(all_log_proba)):
log_proba = np.logaddexp(log_proba, all_log_proba[j])
log_proba -= np.log(self.n_estimators)
return log_proba
else:
return np.log(self.predict_proba(X))
@if_delegate_has_method(delegate='base_estimator')
def decision_function(self, X):
"""Average of the decision functions of the base classifiers.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
Returns
-------
score : array, shape = [n_samples, k]
The decision function of the input samples. The columns correspond
to the classes in sorted order, as they appear in the attribute
``classes_``. Regression and binary classification are special
cases with ``k == 1``, otherwise ``k==n_classes``.
"""
check_is_fitted(self, "classes_")
# Check data
X = check_array(X, accept_sparse=['csr', 'csc'])
if self.n_features_ != X.shape[1]:
raise ValueError("Number of features of the model must "
"match the input. Model n_features is {1} and "
"input n_features is {2} "
"".format(self.n_features_, X.shape[1]))
# Parallel loop
n_jobs, n_estimators, starts = _partition_estimators(self.n_estimators,
self.n_jobs)
all_decisions = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_decision_function)(
self.estimators_[starts[i]:starts[i + 1]],
self.estimators_features_[starts[i]:starts[i + 1]],
X)
for i in range(n_jobs))
# Reduce
decisions = sum(all_decisions) / self.n_estimators
return decisions
class BaggingRegressor(BaseBagging, RegressorMixin):
"""A Bagging regressor.
A Bagging regressor is an ensemble meta-estimator that fits base
regressors each on random subsets of the original dataset and then
aggregate their individual predictions (either by voting or by averaging)
to form a final prediction. Such a meta-estimator can typically be used as
a way to reduce the variance of a black-box estimator (e.g., a decision
tree), by introducing randomization into its construction procedure and
then making an ensemble out of it.
This algorithm encompasses several works from the literature. When random
subsets of the dataset are drawn as random subsets of the samples, then
this algorithm is known as Pasting [1]_. If samples are drawn with
replacement, then the method is known as Bagging [2]_. When random subsets
of the dataset are drawn as random subsets of the features, then the method
is known as Random Subspaces [3]_. Finally, when base estimators are built
on subsets of both samples and features, then the method is known as
Random Patches [4]_.
Read more in the :ref:`User Guide <bagging>`.
Parameters
----------
base_estimator : object or None, optional (default=None)
The base estimator to fit on random subsets of the dataset.
If None, then the base estimator is a decision tree.
n_estimators : int, optional (default=10)
The number of base estimators in the ensemble.
max_samples : int or float, optional (default=1.0)
The number of samples to draw from X to train each base estimator.
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples.
max_features : int or float, optional (default=1.0)
The number of features to draw from X to train each base estimator.
- If int, then draw `max_features` features.
- If float, then draw `max_features * X.shape[1]` features.
bootstrap : boolean, optional (default=True)
Whether samples are drawn with replacement.
bootstrap_features : boolean, optional (default=False)
Whether features are drawn with replacement.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
warm_start : bool, optional (default=False)
When set to True, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit
a whole new ensemble.
n_jobs : int, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the building process.
Attributes
----------
estimators_ : list of estimators
The collection of fitted sub-estimators.
estimators_samples_ : list of arrays
The subset of drawn samples (i.e., the in-bag samples) for each base
estimator.
estimators_features_ : list of arrays
The subset of drawn features for each base estimator.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_prediction_` might contain NaN.
References
----------
.. [1] L. Breiman, "Pasting small votes for classification in large
databases and on-line", Machine Learning, 36(1), 85-103, 1999.
.. [2] L. Breiman, "Bagging predictors", Machine Learning, 24(2), 123-140,
1996.
.. [3] T. Ho, "The random subspace method for constructing decision
forests", Pattern Analysis and Machine Intelligence, 20(8), 832-844,
1998.
.. [4] G. Louppe and P. Geurts, "Ensembles on Random Patches", Machine
Learning and Knowledge Discovery in Databases, 346-361, 2012.
"""
def __init__(self,
base_estimator=None,
n_estimators=10,
max_samples=1.0,
max_features=1.0,
bootstrap=True,
bootstrap_features=False,
oob_score=False,
warm_start=False,
n_jobs=1,
random_state=None,
verbose=0):
super(BaggingRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
max_samples=max_samples,
max_features=max_features,
bootstrap=bootstrap,
bootstrap_features=bootstrap_features,
oob_score=oob_score,
warm_start=warm_start,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the estimators in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
Returns
-------
y : array of shape = [n_samples]
The predicted values.
"""
check_is_fitted(self, "estimators_features_")
# Check data
X = check_array(X, accept_sparse=['csr', 'csc'])
# Parallel loop
n_jobs, n_estimators, starts = _partition_estimators(self.n_estimators,
self.n_jobs)
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_predict_regression)(
self.estimators_[starts[i]:starts[i + 1]],
self.estimators_features_[starts[i]:starts[i + 1]],
X)
for i in range(n_jobs))
# Reduce
y_hat = sum(all_y_hat) / self.n_estimators
return y_hat
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(BaggingRegressor, self)._validate_estimator(
default=DecisionTreeRegressor())
def _set_oob_score(self, X, y):
n_samples = y.shape[0]
predictions = np.zeros((n_samples,))
n_predictions = np.zeros((n_samples,))
for estimator, samples, features in zip(self.estimators_,
self.estimators_samples_,
self.estimators_features_):
mask = np.ones(n_samples, dtype=np.bool)
mask[samples] = False
predictions[mask] += estimator.predict((X[mask, :])[:, features])
n_predictions[mask] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few estimators were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
self.oob_score_ = r2_score(y, predictions)
|
DrMeers/django
|
refs/heads/master
|
tests/null_fk_ordering/models.py
|
165
|
"""
Regression tests for proper working of ForeignKey(null=True). Tests these bugs:
* #7512: including a nullable foreign key reference in Meta ordering has un
xpected results
"""
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
# The first two models represent a very simple null FK ordering case.
class Author(models.Model):
name = models.CharField(max_length=150)
@python_2_unicode_compatible
class Article(models.Model):
title = models.CharField(max_length=150)
author = models.ForeignKey(Author, null=True)
def __str__(self):
return 'Article titled: %s' % (self.title, )
class Meta:
ordering = ['author__name', ]
# These following 4 models represent a far more complex ordering case.
class SystemInfo(models.Model):
system_name = models.CharField(max_length=32)
class Forum(models.Model):
system_info = models.ForeignKey(SystemInfo)
forum_name = models.CharField(max_length=32)
@python_2_unicode_compatible
class Post(models.Model):
forum = models.ForeignKey(Forum, null=True)
title = models.CharField(max_length=32)
def __str__(self):
return self.title
@python_2_unicode_compatible
class Comment(models.Model):
post = models.ForeignKey(Post, null=True)
comment_text = models.CharField(max_length=250)
class Meta:
ordering = ['post__forum__system_info__system_name', 'comment_text']
def __str__(self):
return self.comment_text
|
ReubenAbrams/Chrubix
|
refs/heads/master
|
src/chrubix/__init__.py
|
1
|
#!/usr/local/bin/python3
import sys
import os
import pickle
import binascii
import time
import subprocess
import getopt
import hashlib
import base64
from chrubix.utils import call_binary, read_oneliner_file, write_oneliner_file, call_binary_and_show_progress, \
wget, failed, logme
from chrubix.utils.postinst import configure_lxdm_behavior
from chrubix import distros
from chrubix.distros.archlinux import ArchlinuxDistro
from chrubix.distros.debian import WheezyDebianDistro, JessieDebianDistro, StretchDebianDistro, TailsWheezyDebianDistro
from chrubix.distros.kali import KaliDistro
from chrubix.distros.fedora import NineteenFedoraDistro
from chrubix.distros.ubuntu import VividUbuntuDistro
from chrubix.distros.suse import SuseDistro
from _sqlite3 import InternalError
def list_command_line_options():
print( """
chrubix [options]
-h get help
-D<distro> install <distro>
-d<dev> destination storage device
-r<dev> root/bootstrap device; you may not reformat, but you may install an OS here
-s<dev> spare device; you may mount, reformat, etc. this partition
-k<dev> where the kernel is to be written (with dd)
-m<mountpt> where is the root fs mounted
-E evil maid mode :)
""" )
def generate_distro_record_from_name( name_str ):
distro_options = {
'archlinux' :ArchlinuxDistro,
'fedora19' :NineteenFedoraDistro,
'debianjessie' :JessieDebianDistro,
'kali' :KaliDistro,
'ubuntuvivid' :VividUbuntuDistro,
'suse' :SuseDistro,
'debianstretch' :StretchDebianDistro,
'debianwheezy' :WheezyDebianDistro,
'debiantails' :TailsWheezyDebianDistro,
}
os.system( 'cd /' )
print( "Creating distro record for %s" % ( name_str ) )
assert( name_str in distro_options.keys() )
rec = distro_options[name_str]() # rec itself handles the naming (......self.name) # rec.name = name_str
assert( None not in ( rec.name, rec.architecture ) )
assert( rec.branch is not None or rec.name in ( 'archlinux', 'kali', 'fedora' ) )
return rec
def load_distro_record( mountpoint = '/' ):
dct_to_load = pickle.load( open( '%s/etc/.distro.rec' % ( mountpoint ), "rb" ) )
distro_record = generate_distro_record_from_name( dct_to_load['name'] + ( '' if dct_to_load['branch'] is None else dct_to_load['branch'] ) )
for k in dct_to_load['dct'].keys():
if k in distro_record.__dict__.keys():
distro_record.__dict__[k] = dct_to_load['dct'][k]
else:
print( 'Warning - %s is not in the distro rec. Therefore, I shall not set its value to %s.' % ( k, dct_to_load['dct'][k] ) )
return distro_record
def save_distro_record( distro_rec = None, mountpoint = '/' ):
assert( distro_rec is not None )
original_status_lst = distro_rec.status_lst
try:
distro_rec.status_lst = original_status_lst[-5:]
except ( IndexError, SyntaxError ):
logme( 'Unable to truncate status_lst. Bummer, man...' )
dct_to_save = {'name':distro_rec.name, 'branch':distro_rec.branch, 'dct':distro_rec.__dict__}
pickle.dump( dct_to_save, open( '%s/etc/.distro.rec' % ( mountpoint ), "wb" ) )
if os.path.exists( '%s/etc/lxdm/lxdm.conf' % ( mountpoint ) ):
configure_lxdm_behavior( mountpoint, distro_rec.lxdm_settings )
def process_command_line( argv ):
do_distro = None
do_device = None
do_root_dev = None
do_kernel_dev = None
do_spare_dev = None
do_evil_maid = False
do_latest_kernel = False
install_to_plain_p3 = False
print( "Running chrubix from command line." )
if len( sys.argv ) <= 1:
list_command_line_options()
raise getopt.GetoptError( "In command line, please specify name of distro" )
optlist, args = getopt.getopt( argv[1:], 'hEZK:P:D:d:r:s:k:m:' )
args = args # hide Eclipse warning
for ( opt, param ) in optlist:
if opt == '-h':
list_command_line_options()
sys.exit( 1 )
elif opt == '-D':
do_distro = param
# print( 'Distro = %s' % ( do_distro ) )
elif opt == '-d':
do_device = param
elif opt == '-r':
do_root_dev = param
elif opt == '-s':
do_spare_dev = param
elif opt == '-k':
do_kernel_dev = param
elif opt == '-m':
do_mountpoint = param
elif opt == '-K':
do_latest_kernel = True if param == 'yes' else False
elif opt == '-E':
do_evil_maid = True
elif opt == '-Z':
install_to_plain_p3 = True
else:
raise getopt.GetoptError( str( opt ) + " is an unrecognized command-line parameter" )
distro = generate_distro_record_from_name( do_distro )
distro.device = do_device
distro.root_dev = do_root_dev
distro.kernel_dev = do_kernel_dev
distro.spare_dev = do_spare_dev
distro.mountpoint = do_mountpoint
distro.install_to_plain_p3 = install_to_plain_p3
distro.use_latest_kernel = do_latest_kernel
if do_evil_maid:
distro.reboot_into_stage_two = True
distro.kernel_rebuild_required = True
distro.kthx = True
distro.pheasants = True
logme( 'Configuring for Evil Maid Protection Mode' )
return distro
def exec_cli( argv ):
'''
If main.py detects that Chrubix was called from within ChromeOS, program execution goes HERE.
This function's job is to install a GNU/Linux variant on the partitions that are already mounted.
- Process commnad line w/ process_command_line(), returning a distro struct (for ppropriate distro)
- distro.install() -- i.e. install Linux on MMC w/ the appropriate distro subclass.
'''
res = 0
if os.path.isdir( '/Users' ) or not os.path.isfile( '/proc/cmdline' ):
failed( 'testbed() disabled' )
# res = testbed( argv )
# raise EnvironmentError( 'Do not call me if you are running under an OS other than Linux, please.' )
elif read_oneliner_file( '/proc/cmdline' ).find( 'cros_secure' ) < 0:
raise EnvironmentError( 'Boot into ChromeOS if you want to run me, please.' )
elif os.system( 'mount | grep /dev/mapper/encstateful &> /dev/null' ) == 0 and len( argv ) == 0:
raise EnvironmentError( 'OK, you are in ChromeOS; now, chroot into the bootstrap and run me again, please.' )
else:
# os.system( 'clear' )
distro = process_command_line( argv, ) # returns a record (instance) of the appropriate Linux distro subclass
res = distro.install()
if res is None:
res = 0
if res != 0:
print( 'exec_cli() returning w/ res=%d' % ( res ) )
return res
|
Si-elegans/Web-based_GUI_Tools
|
refs/heads/master
|
wiki/views/article.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import difflib
import logging
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.db.models import Q
from django.db import transaction
from django.shortcuts import render_to_response, redirect, get_object_or_404
from django.template.context import RequestContext
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _
from django.views.generic.base import TemplateView, View, RedirectView
from django.views.generic.edit import FormView
from django.views.generic.list import ListView
from wiki.views.mixins import ArticleMixin
from wiki import editors, forms, models
from wiki.conf import settings
from wiki.core.plugins import registry as plugin_registry
from wiki.core.diff import simple_merge
from wiki.decorators import get_article, json_view
from django.core.urlresolvers import reverse
from wiki.core.exceptions import NoRootURL
from wiki.core import permissions
from django.http import Http404
log = logging.getLogger(__name__)
class ArticleView(ArticleMixin, TemplateView):
template_name="wiki/view.html"
@method_decorator(get_article(can_read=True))
def dispatch(self, request, article, *args, **kwargs):
return super(ArticleView, self).dispatch(request, article, *args, **kwargs)
def get_context_data(self, **kwargs):
kwargs['selected_tab'] = 'view'
return ArticleMixin.get_context_data(self, **kwargs)
class Create(FormView, ArticleMixin):
form_class = forms.CreateForm
template_name="wiki/create.html"
@method_decorator(login_required)
@method_decorator(get_article(can_write=True, can_create=True))
def dispatch(self, request, article, *args, **kwargs):
return super(Create, self).dispatch(request, article, *args, **kwargs)
def get_form(self, form_class):
"""
Returns an instance of the form to be used in this view.
"""
kwargs = self.get_form_kwargs()
initial = kwargs.get('initial', {})
initial['slug'] = self.request.GET.get('slug', None)
kwargs['initial'] = initial
form = form_class(self.request, self.urlpath, **kwargs)
form.fields['slug'].widget = forms.TextInputPrepend(
prepend='/'+self.urlpath.path,
attrs={
# Make patterns force lowercase if we are case insensitive to bless the user with a
# bit of strictness, anyways
'pattern': '[a-z0-9_]+' if not settings.URL_CASE_SENSITIVE else '[a-zA-Z0-9_]+',
'title': 'Lowercase letters, numbers, and underscores' if not settings.URL_CASE_SENSITIVE else 'Letters, numbers, and underscores',
}
)
return form
def form_valid(self, form):
user=None
ip_address = None
if not self.request.user.is_anonymous():
user = self.request.user
if settings.LOG_IPS_USERS:
ip_address = self.request.META.get('REMOTE_ADDR', None)
elif settings.LOG_IPS_ANONYMOUS:
ip_address = self.request.META.get('REMOTE_ADDR', None)
try:
self.newpath = models.URLPath.create_article(
self.urlpath,
form.cleaned_data['slug'],
title=form.cleaned_data['title'],
content=form.cleaned_data['content'],
user_message=form.cleaned_data['summary'],
user=user,
ip_address=ip_address,
article_kwargs={'owner': user,
'group': self.article.group,
'group_read': self.article.group_read,
'group_write': self.article.group_write,
'other_read': self.article.other_read,
'other_write': self.article.other_write,
})
messages.success(self.request, _("New article '%s' created.") % self.newpath.article.current_revision.title)
transaction.commit()
# TODO: Handle individual exceptions better and give good feedback.
except Exception as e:
log.exception("Exception creating article.")
transaction.rollback()
if self.request.user.is_superuser:
messages.error(self.request, _("There was an error creating this article: %s") % str(e))
else:
messages.error(self.request, _("There was an error creating this article."))
return redirect('wiki:get', '')
url = self.get_success_url()
return url
def get_success_url(self):
return redirect('wiki:get', self.newpath.path)
def get_context_data(self, **kwargs):
c = ArticleMixin.get_context_data(self, **kwargs)
c['parent_urlpath'] = self.urlpath
c['parent_article'] = self.article
c['create_form'] = kwargs.pop('form', None)
c['editor'] = editors.getEditor()
return c
class Delete(FormView, ArticleMixin):
form_class = forms.DeleteForm
template_name="wiki/delete.html"
@method_decorator(login_required)
@method_decorator(get_article(can_write=True, not_locked=True, can_delete=True))
def dispatch(self, request, article, *args, **kwargs):
return self.dispatch1(request, article, *args, **kwargs)
def dispatch1(self, request, article, *args, **kwargs):
"""Deleted view needs to access this method without a decorator,
therefore it is separate."""
urlpath = kwargs.get('urlpath', None)
# Where to go after deletion...
self.next = ""
self.cannot_delete_root = False
if urlpath and urlpath.parent:
self.next = reverse('wiki:get', kwargs={'path': urlpath.parent.path})
elif urlpath:
# We are a urlpath with no parent. This is the root
self.cannot_delete_root = True
else:
# We have no urlpath. Get it if a urlpath exists
for art_obj in article.articleforobject_set.filter(is_mptt=True):
if art_obj.content_object.parent:
self.next = reverse('wiki:get', kwargs={'article_id': art_obj.content_object.parent.article.id})
else:
self.cannot_delete_root = True
return super(Delete, self).dispatch(request, article, *args, **kwargs)
def get_initial(self):
return {'revision': self.article.current_revision}
def get_form(self, form_class):
form = super(Delete, self).get_form(form_class)
if self.article.can_moderate(self.request.user):
form.fields['purge'].widget = forms.forms.CheckboxInput()
return form
def get_form_kwargs(self):
kwargs = FormView.get_form_kwargs(self)
kwargs['article'] = self.article
kwargs['has_children'] = bool(self.children_slice)
return kwargs
def form_valid(self, form):
cd = form.cleaned_data
purge = cd['purge']
#If we are purging, only moderators can delete articles with children
cannot_delete_children = False
can_moderate = self.article.can_moderate(self.request.user)
if purge and self.children_slice and not can_moderate:
cannot_delete_children = True
if self.cannot_delete_root or cannot_delete_children:
messages.error(self.request, _('This article cannot be deleted because it has children or is a root article.'))
return redirect('wiki:get', article_id=self.article.id)
if can_moderate and purge:
# First, remove children
if self.urlpath:
self.urlpath.delete_subtree()
self.article.delete()
messages.success(self.request, _('This article together with all its contents are now completely gone! Thanks!'))
else:
revision = models.ArticleRevision()
revision.inherit_predecessor(self.article)
revision.set_from_request(self.request)
revision.deleted = True
self.article.add_revision(revision)
messages.success(self.request, _('The article "%s" is now marked as deleted! Thanks for keeping the site free from unwanted material!') % revision.title)
return self.get_success_url()
def get_success_url(self):
return redirect(self.next)
def get_context_data(self, **kwargs):
cannot_delete_children = False
if self.children_slice and not self.article.can_moderate(self.request.user):
cannot_delete_children = True
kwargs['delete_form'] = kwargs.pop('form', None)
kwargs['cannot_delete_root'] = self.cannot_delete_root
kwargs['delete_children'] = self.children_slice[:20]
kwargs['delete_children_more'] = len(self.children_slice) > 20
kwargs['cannot_delete_children'] = cannot_delete_children
return super(Delete, self).get_context_data(**kwargs)
class Edit(FormView, ArticleMixin):
"""Edit an article and process sidebar plugins."""
form_class = forms.EditForm
template_name="wiki/edit.html"
@method_decorator(login_required)
@method_decorator(get_article(can_write=True, not_locked=True))
def dispatch(self, request, article, *args, **kwargs):
self.sidebar_plugins = plugin_registry.get_sidebar()
self.sidebar = []
return super(Edit, self).dispatch(request, article, *args, **kwargs)
def get_initial(self):
initial = FormView.get_initial(self)
for field_name in ['title', 'content']:
session_key = 'unsaved_article_%s_%d' % (field_name, self.article.id)
if session_key in self.request.session.keys():
content = self.request.session[session_key]
initial[field_name] = content
del self.request.session[session_key]
return initial
def get_form(self, form_class):
"""
Checks from querystring data that the edit form is actually being saved,
otherwise removes the 'data' and 'files' kwargs from form initialisation.
"""
kwargs = self.get_form_kwargs()
if self.request.POST.get('save', '') != '1' and self.request.POST.get('preview') != '1':
kwargs['data'] = None
kwargs['files'] = None
kwargs['no_clean'] = True
return form_class(self.request, self.article.current_revision, **kwargs)
def get_sidebar_form_classes(self):
"""Returns dictionary of form classes for the sidebar. If no form class is
specified, puts None in dictionary. Keys in the dictionary are used
to identify which form is being saved."""
form_classes = {}
for cnt, plugin in enumerate(self.sidebar_plugins):
form_classes['form%d' % cnt] = (plugin, plugin.sidebar.get('form_class', None))
return form_classes
def get(self, request, *args, **kwargs):
# Generate sidebar forms
self.sidebar_forms = []
for form_id, (plugin, Form) in self.get_sidebar_form_classes().items():
if Form:
form = Form(self.article, self.request.user)
setattr(form, 'form_id', form_id)
else:
form = None
self.sidebar.append((plugin, form))
return super(Edit, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
# Generate sidebar forms
self.sidebar_forms = []
for form_id, (plugin, Form) in self.get_sidebar_form_classes().items():
if Form:
if form_id == self.request.GET.get('f', None):
form = Form(self.article, self.request, data=self.request.POST, files=self.request.FILES)
if form.is_valid():
form.save()
usermessage = form.get_usermessage()
if usermessage:
messages.success(self.request, usermessage)
else:
messages.success(self.request, _('Your changes were saved.'))
title = form.cleaned_data['unsaved_article_title']
content = form.cleaned_data['unsaved_article_content']
if title != self.article.current_revision.title or content != self.article.current_revision.content:
request.session['unsaved_article_title_%d' % self.article.id] = title
request.session['unsaved_article_content_%d' % self.article.id] = content
messages.warning(request, _('Please note that your article text has not yet been saved!'))
if self.urlpath:
return redirect('wiki:edit', path=self.urlpath.path)
return redirect('wiki:edit', article_id=self.article.id)
else:
form = Form(self.article, self.request)
setattr(form, 'form_id', form_id)
else:
form = None
self.sidebar.append((plugin, form))
return super(Edit, self).post(request, *args, **kwargs)
def form_valid(self, form):
"""Create a new article revision when the edit form is valid
(does not concern any sidebar forms!)."""
revision = models.ArticleRevision()
revision.inherit_predecessor(self.article)
revision.title = form.cleaned_data['title']
revision.content = form.cleaned_data['content']
revision.user_message = form.cleaned_data['summary']
revision.deleted = False
revision.set_from_request(self.request)
self.article.add_revision(revision)
messages.success(self.request, _('A new revision of the article was succesfully added.'))
return self.get_success_url()
def get_success_url(self):
"""Go to the article view page when the article has been saved"""
if self.urlpath:
return redirect("wiki:get", path=self.urlpath.path)
return redirect('wiki:get', article_id=self.article.id)
def get_context_data(self, **kwargs):
kwargs['edit_form'] = kwargs.pop('form', None)
kwargs['editor'] = editors.getEditor()
kwargs['selected_tab'] = 'edit'
kwargs['sidebar'] = self.sidebar
return super(Edit, self).get_context_data(**kwargs)
class Deleted(Delete):
"""Tell a user that an article has been deleted. If user has permissions,
let user restore and possibly purge the deleted article and children."""
template_name="wiki/deleted.html"
form_class = forms.DeleteForm
@method_decorator(login_required)
@method_decorator(get_article(can_read=True, deleted_contents=True))
def dispatch(self, request, article, *args, **kwargs):
self.urlpath = kwargs.get('urlpath', None)
self.article = article
if self.urlpath:
deleted_ancestor = self.urlpath.first_deleted_ancestor()
if deleted_ancestor is None:
# No one is deleted!
return redirect('wiki:get', path=self.urlpath.path)
elif deleted_ancestor != self.urlpath:
# An ancestor was deleted, so redirect to that deleted page
return redirect('wiki:deleted', path=deleted_ancestor.path)
else:
if not article.current_revision.deleted:
return redirect('wiki:get', article_id=article.id)
# Restore
if request.GET.get('restore', False):
can_restore = not article.current_revision.locked and article.can_delete(request.user)
can_restore = can_restore or article.can_moderate(request.user)
if can_restore:
revision = models.ArticleRevision()
revision.inherit_predecessor(self.article)
revision.set_from_request(request)
revision.deleted = False
revision.automatic_log = _('Restoring article')
self.article.add_revision(revision)
messages.success(request, _('The article "%s" and its children are now restored.') % revision.title)
if self.urlpath:
return redirect('wiki:get', path=self.urlpath.path)
else:
return redirect('wiki:get', article_id=article.id)
return super(Deleted, self).dispatch1(request, article, *args, **kwargs)
def get_initial(self):
return {'revision': self.article.current_revision,
'purge': True}
def get_form(self, form_class):
form = super(Delete, self).get_form(form_class)
return form
def get_context_data(self, **kwargs):
kwargs['purge_form'] = kwargs.pop('form', None)
return super(Delete, self).get_context_data(**kwargs)
class Source(ArticleMixin, TemplateView):
template_name="wiki/source.html"
@method_decorator(login_required)
@method_decorator(get_article(can_read=True))
def dispatch(self, request, article, *args, **kwargs):
return super(Source, self).dispatch(request, article, *args, **kwargs)
def get_context_data(self, **kwargs):
kwargs['selected_tab'] = 'source'
return ArticleMixin.get_context_data(self, **kwargs)
class History(ListView, ArticleMixin):
template_name="wiki/history.html"
allow_empty = True
context_object_name = 'revisions'
paginate_by = 10
def get_queryset(self):
return models.ArticleRevision.objects.filter(article=self.article).order_by('-created')
def get_context_data(self, **kwargs):
# Is this a bit of a hack? Use better inheritance?
kwargs_article = ArticleMixin.get_context_data(self, **kwargs)
kwargs_listview = ListView.get_context_data(self, **kwargs)
kwargs.update(kwargs_article)
kwargs.update(kwargs_listview)
kwargs['selected_tab'] = 'history'
return kwargs
@method_decorator(login_required)
@method_decorator(get_article(can_read=True))
def dispatch(self, request, article, *args, **kwargs):
return super(History, self).dispatch(request, article, *args, **kwargs)
class Dir(ListView, ArticleMixin):
template_name="wiki/dir.html"
allow_empty = True
context_object_name = 'directory'
model = models.URLPath
paginate_by = 30
@method_decorator(login_required)
@method_decorator(get_article(can_read=True))
def dispatch(self, request, article, *args, **kwargs):
self.filter_form = forms.DirFilterForm(request.GET)
if self.filter_form.is_valid():
self.query = self.filter_form.cleaned_data['query']
else:
self.query = None
return super(Dir, self).dispatch(request, article, *args, **kwargs)
def get_queryset(self):
children = self.urlpath.get_children().can_read(self.request.user)
if self.query:
children = children.filter(Q(article__current_revision__title__contains=self.query) |
Q(slug__contains=self.query))
if not self.article.can_moderate(self.request.user):
children = children.active()
children = children.select_related_common().order_by('article__current_revision__title')
return children
def get_context_data(self, **kwargs):
kwargs_article = ArticleMixin.get_context_data(self, **kwargs)
kwargs_listview = ListView.get_context_data(self, **kwargs)
kwargs.update(kwargs_article)
kwargs.update(kwargs_listview)
kwargs['filter_query'] = self.query
kwargs['filter_form'] = self.filter_form
# Update each child's ancestor cache so the lookups don't have
# to be repeated.
updated_children = kwargs[self.context_object_name]
for child in updated_children:
child.set_cached_ancestors_from_parent(self.urlpath)
kwargs[self.context_object_name] = updated_children
return kwargs
class SearchView(ListView):
template_name="wiki/search.html"
paginate_by = 25
context_object_name = "articles"
def dispatch(self, request, *args, **kwargs):
# Do not allow anonymous users to search if they cannot read content
if request.user.is_anonymous() and not settings.ANONYMOUS:
return redirect(settings.LOGIN_URL)
self.search_form = forms.SearchForm(request.GET)
if self.search_form.is_valid():
self.query = self.search_form.cleaned_data['q']
else:
self.query = None
return super(SearchView, self).dispatch(request, *args, **kwargs)
def get_queryset(self):
if not self.query:
return models.Article.objects.get_empty_query_set()
articles = models.Article.objects.filter(Q(current_revision__title__icontains=self.query) |
Q(current_revision__content__icontains=self.query))
if not permissions.can_moderate(models.URLPath.root().article, self.request.user):
articles = articles.active().can_read(self.request.user)
return articles
def get_context_data(self, **kwargs):
kwargs = ListView.get_context_data(self, **kwargs)
kwargs['search_form'] = self.search_form
kwargs['search_query'] = self.query
return kwargs
class Plugin(View):
def dispatch(self, request, path=None, slug=None, **kwargs):
kwargs['path'] = path
for plugin in plugin_registry.get_plugins().values():
if getattr(plugin, 'slug', None) == slug:
return plugin.article_view(request, **kwargs)
raise Http404()
class Settings(ArticleMixin, TemplateView):
permission_form_class = forms.PermissionsForm
template_name="wiki/settings.html"
@method_decorator(login_required)
@method_decorator(get_article(can_read=True))
def dispatch(self, request, article, *args, **kwargs):
return super(Settings, self).dispatch(request, article, *args, **kwargs)
def get_form_classes(self,):
"""
Return all settings forms that can be filled in
"""
settings_forms = []
if permissions.can_change_permissions(self.article, self.request.user):
settings_forms.append(self.permission_form_class)
plugin_forms = [F for F in plugin_registry.get_settings_forms()]
plugin_forms.sort(key=lambda form: form.settings_order)
settings_forms += plugin_forms
for i in range(len(settings_forms)):
# TODO: Do not set an attribute on a form class - this
# could be mixed up with a different instance
# Use strategy from Edit view...
setattr(settings_forms[i], 'action', 'form%d' % i)
return settings_forms
def post(self, *args, **kwargs):
self.forms = []
for Form in self.get_form_classes():
if Form.action == self.request.GET.get('f', None):
form = Form(self.article, self.request, self.request.POST)
if form.is_valid():
form.save()
usermessage = form.get_usermessage()
if usermessage:
messages.success(self.request, usermessage)
if self.urlpath:
return redirect('wiki:settings', path=self.urlpath.path)
return redirect('wiki:settings', article_id=self.article.id)
else:
form = Form(self.article, self.request)
self.forms.append(form)
return super(Settings, self).get(*args, **kwargs)
def get(self, *args, **kwargs):
self.forms = []
# There is a bug where articles fetched with select_related have bad boolean field https://code.djangoproject.com/ticket/15040
# We fetch a fresh new article for this reason
new_article = models.Article.objects.get(id=self.article.id)
for Form in self.get_form_classes():
self.forms.append(Form(new_article, self.request))
return super(Settings, self).get(*args, **kwargs)
def get_success_url(self):
if self.urlpath:
return redirect('wiki:settings', path=self.urlpath.path)
return redirect('wiki:settings', article_id=self.article.id)
def get_context_data(self, **kwargs):
kwargs['selected_tab'] = 'settings'
kwargs['forms'] = self.forms
return super(Settings, self).get_context_data(**kwargs)
class ChangeRevisionView(RedirectView):
@method_decorator(get_article(can_write=True, not_locked=True))
def dispatch(self, request, article, *args, **kwargs):
self.article = article
self.urlpath = kwargs.pop('kwargs', False)
self.change_revision()
return super(ChangeRevisionView, self).dispatch(request, *args, **kwargs)
def get_redirect_url(self, **kwargs):
if self.urlpath:
return reverse("wiki:history", kwargs={'path':self.urlpath.path})
else:
return reverse('wiki:history', kwargs={'article_id':self.article.id})
def change_revision(self):
revision = get_object_or_404(models.ArticleRevision, article=self.article, id=self.kwargs['revision_id'])
self.article.current_revision = revision
self.article.save()
messages.success(self.request, _("The article %(title)s is now set to display revision #%(revision_number)d") % {
'title': revision.title,
'revision_number': revision.revision_number,
})
class Preview(ArticleMixin, TemplateView):
template_name="wiki/preview_inline.html"
@method_decorator(get_article(can_read=True, deleted_contents=True))
def dispatch(self, request, article, *args, **kwargs):
revision_id = request.GET.get('r', None)
self.title = None
self.content = None
self.preview = False
if revision_id:
self.revision = get_object_or_404(models.ArticleRevision, article=article, id=revision_id)
else:
self.revision = None
return super(Preview, self).dispatch(request, article, *args, **kwargs)
def post(self, request, *args, **kwargs):
edit_form = forms.EditForm(request, self.article.current_revision, request.POST, preview=True)
if edit_form.is_valid():
self.title = edit_form.cleaned_data['title']
self.content = edit_form.cleaned_data['content']
self.preview = True
return super(Preview, self).get(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
if self.revision and not self.title:
self.title = self.revision.title
if self.revision and not self.content:
self.content = self.revision.content
return super(Preview, self).get( request, *args, **kwargs)
def get_context_data(self, **kwargs):
kwargs['title'] = self.title
kwargs['revision'] = self.revision
kwargs['content'] = self.content
kwargs['preview'] = self.preview
return ArticleMixin.get_context_data(self, **kwargs)
@json_view
def diff(request, revision_id, other_revision_id=None):
revision = get_object_or_404(models.ArticleRevision, id=revision_id)
if not other_revision_id:
other_revision = revision.previous_revision
baseText = other_revision.content if other_revision else ""
newText = revision.content
differ = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
diff = differ.compare(baseText.splitlines(1), newText.splitlines(1))
other_changes = []
if not other_revision or other_revision.title != revision.title:
other_changes.append((_('New title'), revision.title))
return dict(diff=list(diff), other_changes=other_changes)
# TODO: Throw in a class-based view
@get_article(can_write=True)
def merge(request, article, revision_id, urlpath=None, template_file="wiki/preview_inline.html", preview=False):
revision = get_object_or_404(models.ArticleRevision, article=article, id=revision_id)
current_text = article.current_revision.content if article.current_revision else ""
new_text = revision.content
content = simple_merge(current_text, new_text)
# Save new revision
if not preview:
old_revision = article.current_revision
if revision.deleted:
c = RequestContext(request, {'error_msg': _('You cannot merge with a deleted revision'),
'article': article,
'urlpath': urlpath})
return render_to_response("wiki/error.html", context_instance=c)
new_revision = models.ArticleRevision()
new_revision.inherit_predecessor(article)
new_revision.deleted = False
new_revision.locked = False
new_revision.title=article.current_revision.title
new_revision.content=content
new_revision.automatic_log = (_('Merge between revision #%(r1)d and revision #%(r2)d') %
{'r1': revision.revision_number,
'r2': old_revision.revision_number})
article.add_revision(new_revision, save=True)
old_revision.simpleplugin_set.all().update(article_revision=new_revision)
revision.simpleplugin_set.all().update(article_revision=new_revision)
messages.success(request, _('A new revision was created: Merge between revision #%(r1)d and revision #%(r2)d') %
{'r1': revision.revision_number,
'r2': old_revision.revision_number})
if urlpath:
return redirect('wiki:edit', path=urlpath.path)
else:
return redirect('wiki:edit', article_id=article.id)
c = RequestContext(request, {'article': article,
'title': article.current_revision.title,
'revision': None,
'merge1': revision,
'merge2': article.current_revision,
'merge': True,
'content': content})
return render_to_response(template_file, context_instance=c)
class CreateRootView(FormView):
form_class = forms.CreateRootForm
template_name = 'wiki/create_root.html'
def dispatch(self, request, *args, **kwargs):
if not request.user.is_superuser:
return redirect("wiki:root_missing")
try:
root = models.URLPath.root()
except NoRootURL:
pass
else:
if root.article:
return redirect('wiki:get', path=root.path)
# TODO: This is too dangerous... let's say there is no root.article and we end up here,
# then it might cascade to delete a lot of things on an existing installation.... / benjaoming
root.delete()
return super(CreateRootView, self).dispatch(request, *args, **kwargs)
def form_valid(self, form):
models.URLPath.create_root(
title=form.cleaned_data["title"],
content=form.cleaned_data["content"],
request=self.request
)
return redirect("wiki:root")
def get_context_data(self, **kwargs):
data = super(CreateRootView, self).get_context_data(**kwargs)
data['editor'] = editors.getEditor()
return data
class MissingRootView(TemplateView):
template_name = 'wiki/root_missing.html'
|
Northshoot/maestro
|
refs/heads/master
|
client/request_sender.py
|
1
|
#!/usr/bin/python
'''
Created on 1 may 2013
Script that send a simulation request to a CCMD server
Use this python program for the Benchmark oriented client
@author: Laurynas Riliskis
'''
import socket
from threading import Lock
import sys
CCMD_PORT = 22087
DEFAULT_CONFIGURATION_FILES_DIRECTORY = 'cfgFiles' # directory where the configuration files are when a benchmark oriented simulation is done
# the large scale simulation client uses multi thread, so we need to use a lock to prevent 2 threads to write at the same time
def log(txt, print_lock):
#print_lock.acquire()
print txt
#print_lock.release()
def abort(msg):
print '!'*80
print "FATAL ERROR: %s \nCan't proceed exiting!" %msg
print '!'*80
sys.exit()
def getFileData(file_name):
try:
file_instances = open(file_name, 'r')
str_instances = file_instances.read()
file_instances.close()
return str_instances , None
except Exception as e:
error = "getFileData: cant open file %s error: %s" %(file_name, e)
return None , error
def sendDataAndRxAck(mySocket, msg):
try:
mySocket.send(msg)
response = mySocket.recv(1024) # wait for the ACK of the CCMD to clean the content of the socket
return response, None
except Exception as e:
error = "sendDataAndRxAck: error communicating %s " %e
return None, error
def sendData(mySocket, msg):
try:
mySocket.send(msg)
except Exception as e:
error = "sendData: error communicating %s " %e
return None, error
# Send a simulation request to the address CCMD_HOSTNAME with the configuration files in the
# directory cfgFiles, the simulation will have the id simulation_number
# the lock is used to clean the output while using multi thread
def send_simulation_request(args):
cfgFiles, simulation_number, CCMD_HOSTNAME = args
print_lock = "___"
log('simulation ' + str(simulation_number) + ' ==> simulation is starting', print_lock)
# configuration files that will be sent to the CCMD
config_file_for_instances = cfgFiles + '/instances.cfg'
config_file_for_commands = cfgFiles + '/commands.cfg'
config_file_for_performance_monitoring = cfgFiles + '/performance_monitoring.cfg'
address_CCMD = (CCMD_HOSTNAME, CCMD_PORT)
try:
mySocket = socket.socket()
mySocket.connect(address_CCMD)
strg, error = getFileData(config_file_for_instances)
if error:
abort(error)
log("%d -- SENDING : %s" %(simulation_number,config_file_for_instances),print_lock)
rx, error = sendDataAndRxAck(mySocket,strg)
if error:
abort(error)
if 'instances cfg received' in rx:
rx = mySocket.recv(1024)
else:
abort("Wrong response from master %s" %rx)
all_started = False
try:
while not all_started:
if 'NOTE@Started' in rx :
#log('simulation ' + str(simulation_number) + ' ==> SERVER >>' + rx, print_lock)
rx, error = sendDataAndRxAck(mySocket, 'ACK')
#log('*'*80,print_lock)
all_started = True
elif "start_instance@" in rx:
#log("instances Started %s" %rx)
rx, error = sendDataAndRxAck(mySocket, "ACK")
else:
log("RX :: ERROR--got %s" %strg, print_lock)
except Exception as e:
abort(e)
strg=rx
if error:
abort(error)
elif strg != 'Ready for CMD':
abort("wrong response: %s" % strg)
else:
log(strg,print_lock)
strg, error = getFileData(config_file_for_commands)
if error:
abort(error)
log("Sending: " + config_file_for_commands,print_lock)
log('*'*80,print_lock)
rx , error = sendDataAndRxAck(mySocket, "COMMANDS@%s" %strg)
if error:
abort(error)
data = rx.split('@')
if data[0] != 'ACK' and data[1] != 'CMD':
abort("Error in ACK, got: %s :: %s" %(data[0],data[1]))
log('simulation ' + str(simulation_number) + ' ==> SERVER >>' + rx, print_lock)
strg, error = getFileData(config_file_for_performance_monitoring)
if error:
abort(error)
log("Sending: " + config_file_for_performance_monitoring,print_lock)
rx , error = sendDataAndRxAck(mySocket, "MONITORING@%s" %strg)
if error:
abort(error)
data = rx.split('@')
if data[0] != 'ACK' and data[1] != 'MONITORING':
abort("Error in ACK, got: %s :: %s" %(data[0],data[1]))
log('simulation ' + str(simulation_number) + ' ==> SERVER >>' + rx, print_lock)
rx = mySocket.recv(1024)
log(rx,print_lock)
log('simulation ' + str(simulation_number) + ' ==> simulation has been started, performance and output results will be dumped in EC2 server ' + str(address_CCMD[0]), print_lock)
mySocket.close()
except Exception as e:
print "******** Line 86 *********"
print e
return "Simulation Ended"
if __name__ == '__main__':
send_simulation_request('cfgFiles', Lock())
|
gskachkov/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/net/unittestresults.py
|
155
|
# Copyright (c) 2012, Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import xml.dom.minidom
_log = logging.getLogger(__name__)
class UnitTestResults(object):
@classmethod
def results_from_string(self, string):
if not string:
return None
try:
dom = xml.dom.minidom.parseString(string)
failures = []
for testcase in dom.getElementsByTagName('testcase'):
if testcase.getElementsByTagName('failure').length != 0:
testname = testcase.getAttribute('name')
classname = testcase.getAttribute('classname')
failures.append("%s.%s" % (classname, testname))
return failures
except xml.parsers.expat.ExpatError, e:
_log.error("XML error %s parsing unit test output" % str(e))
return None
|
mattattack7/canvas-contrib
|
refs/heads/master
|
API_Examples/import_outcomes/python/outcomes_importer.py
|
4
|
#!/usr/bin/env python
domain = "<yourdomain>.instructure.com"
token = "<token>"
####################################################################################################
####################################################################################################
############### Don't edit anything after this point unless you know what you
############### are doing. You may know what you are doing, I don't know, but be aware that
############### everything past this point is breakable. You know the "You break it
############### you buy it" kind of thing.
####################################################################################################
####################################################################################################
import requests,json
import argparse
import sys,os
import csv
import pprint
def get_headers():
return {'Authorization': 'Bearer %s' % token}
vendor_guid_cache = {'outcome_groups':{},'outcomes':{}}
def checkFileReturnCSVReader(file_name):
if file_name and os.path.exists(file_name):
return csv.reader(open(file_name,'rU'))
else:
return None
def getRootOutcomeGroup():
url = "https://%s/api/v1/accounts/self/root_outcome_group" % domain
#print 'url',url
return requests.get(url,headers=get_headers(),verify=False).json()
def paginated_outcomes(outcome_group_vendor_id=None):
# Get outcomes
all_done = False
url = 'https://{0}/api/v1/accounts/self/outcome_groups/{1}/outcomes'.format(domain,outcome_group_vendor_id)
while not all_done:
response = requests.get(url,headers=get_headers())
for s in response.json():
outcome = s['outcome']
vendor_guid_cache['outcomes'].setdefault(outcome['vendor_guid'],outcome)
yield outcome
if 'next' in response.links:
url = response.links['next']['url']
else:
all_done = True
def paginated_outcome_groups():
# Get outcome groups
all_done = False
url = 'https://%s/api/v1/accounts/self/outcome_groups' % (domain)
#params = {}
while not all_done:
response = requests.get(url,headers=get_headers())
for s in response.json():
vendor_guid_cache['outcome_groups'].setdefault(s['vendor_guid'],s)
yield s
if 'next' in response.links:
url = response.links['next']['href']
else:
all_done = True
def paginated_outcome_subgroups(parent_group_id):
# Get outcome subgroups (this needs to walk)
all_done = False
url = 'https://%s/api/v1/accounts/self/outcome_groups/%d/subgroups' % (domain,int(parent_group_id))
#params = {}
while not all_done:
response = requests.get(url,headers=get_headers())
if not response.json():
#yield []
return
else:
for s in response.json():
yield s
vendor_guid_cache['outcome_groups'].setdefault(s['vendor_guid'],s)
for sg in paginated_outcome_subgroups(s['id']):
vendor_guid_cache['outcome_groups'].setdefault(s['vendor_guid'],s)
yield s
if 'next' in response.links:
url = response.links['next']['url']
else:
all_done = True
do_api_for_find = True
def findOutcomeGroup(outcome_group_vendor_id):
root_group = getRootOutcomeGroup()
og = vendor_guid_cache['outcome_groups'].get(outcome_group_vendor_id,None)
if do_api_for_find:
if not og:
for pog in paginated_outcome_subgroups(root_group['id']):
if pog['vendor_guid'] == outcome_group_vendor_id:
og = pog
break
return og
def deleteOutcomeGroup(outcome_group_id):
url = 'https://%s/api/v1/accounts/self/outcome_groups/%d' % (domain,outcome_group_id)
return requests.delete(url,headers=get_headers())
def getOrCreateOutcomeGroup(outcome): #outcome_group_vendor_id,name,description,parent_group_id=None):
parent_group = None
root_group = getRootOutcomeGroup()
outcome_group_vendor_id = outcome['outcome_group_vendor_guid']
name = outcome['outcome_group_vendor_guid']
description = outcome['outcome_group_vendor_guid']
parent_group_id = outcome['parent_outcome_group_vendor_guid']
og = vendor_guid_cache['outcome_groups'].get(outcome_group_vendor_id,findOutcomeGroup(outcome_group_vendor_id))
if not og:
if not parent_group_id:
parent_group = root_group
else:
parent_group = vendor_guid_cache['outcome_groups'].get(outcome_group_vendor_id,findOutcomeGroup(parent_group_id))
if not parent_group:
return None
else:
# no outcome group was found, create it now
og = createOutcomeGroup(outcome,parent_group['id'])
return og
def createOutcomeGroup(outcome,parent_id):
vendor_guid = name = description = outcome['outcome_group_vendor_guid']
url = 'https://%s/api/v1/accounts/self/outcome_groups/%d/subgroups' % (domain,parent_id)
params = {'title':name,'description':description,'vendor_guid':vendor_guid}
vendor_guid_cache['outcome_groups'][vendor_guid] = requests.post(url,data=params,headers=get_headers()).json()
return vendor_guid_cache['outcome_groups'][vendor_guid]
def getOrCreateOutcome(outcome_to_create):
if not vendor_guid_cache['outcomes'].get(outcome_to_create['vendor_guid'],None):
for outcome in paginated_outcomes(outcome_to_create['group_id']):
vendor_guid_cache['outcomes'][outcome['vendor_guid']] = outcome
if not vendor_guid_cache['outcomes'].get(outcome_to_create['vendor_guid'],None):
vendor_guid_cache['outcomes'][outcome_to_create['vendor_guid']] = createOutcome(outcome_to_create)#group_id,title,description,vendor_guid,mastery_points,ratings)
return vendor_guid_cache['outcomes'][outcome_to_create['vendor_guid']]
#def createOutcome(group_id,title,description,vendor_guid,mastery_points,ratings):
def createOutcome(outcome_to_create):
path = "/api/v1/accounts/self/outcome_groups/%s/outcomes" % outcome_to_create['group_id']
'''
params = {
'title':outcome_to_create['title'],
'description':outcome_to_create['description'],
'vendor_guid':outcome_to_create['vendor_guid'],
'mastery_points':outcome_to_create['mastery_points'],
'ratings':outcome_to_create['ratings'],
'calculation_method':outcome_to_create['calculation_method'],
'calculation_int':outcome_to_create['calculation_int']
}
'''
headers = {'Authorization':'Bearer %s'%token,'Content-Type':'application/json'}
url = 'https://%s%s' % (domain,path)
#data = json.dumps(outcome_to_create)
#print 'path',path,data
res = requests.post(url,headers=get_headers(),data=outcome_to_create)
return res.json()
def updateOutcome(outcome_to_update):
print 'outcome_to_update',outcome_to_update
path = "/api/v1/accounts/self/outcome_groups/%s/outcomes/%s" % (outcome_to_update['outcome_group']['id'],outcome_to_update['outcome']['id'])
headers = {'Authorization':'Bearer %s'%token,'Content-Type':'application/json'}
url = 'https://%s%s' % (domain,path)
#data = json.dumps(outcome_to_update['outcome'])
res = requests.put(url,headers=get_headers(),data=outcome_to_update)
del(vendor_guid_cache['outcomes'][outcome_to_update['outcome']['vendor_guid']])
return res.json()
def isValidRow(row):
return len(row) >=9
# Prepare argument parsing
parser = argparse.ArgumentParser()
parser.add_argument('--outcomesfile',required=True,help='path to the outcomes.csv file')
if __name__ == '__main__':
args = parser.parse_args()
outcomes_file = checkFileReturnCSVReader(args.outcomesfile)
if outcomes_file :
outcomes = {}
outcome_data = {}
for outcome_row in outcomes_file:
if outcome_row[0]=="vendor_guid":
# TODO need to make sure this can be a non-canvas id
outcome_data['rating_levels'] = outcome_row[5:]
#print 'rating data',outcome_data['rating_levels']
else:
# If it's not one of these, assume this is an outcome row
fields = ['vendor_guid','outcome_group_vendor_guid','parent_outcome_group_vendor_guid','title','description','calculation_method','calculation_int','mastery_points']
outcome = dict(zip(fields,outcome_row[:8]))
points_description = ['points','description']
combo = zip(outcome_data.get('rating_levels'),outcome_row[8:])
outcome['ratings'] = map(lambda x: dict(zip(points_description,x)),combo)
pprint.pprint(outcome)
og = getOrCreateOutcomeGroup(outcome)#['outcome_group'],outcome['outcome_group'],outcome['outcome_group'])
outcome['group_id'] = og['id']
if not og:
print 'OutcomeGroup not found',outcome['outcome_group']
else:
outcome['outcome_group_vendor_id'] = og['id']
print 'outcome_to_create',outcome['vendor_guid']
print "Outcome", getOrCreateOutcome(outcome)
|
rahul67/hue
|
refs/heads/master
|
apps/oozie/src/oozie/migrations/0022_auto__chg_field_mapreduce_node_ptr__chg_field_start_node_ptr.py
|
37
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Mapreduce.node_ptr'
db.alter_column('oozie_mapreduce', 'node_ptr_id', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['oozie.Node'], unique=True, primary_key=True))
# Changing field 'Start.node_ptr'
db.alter_column('oozie_start', 'node_ptr_id', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['oozie.Node'], unique=True, primary_key=True))
def backwards(self, orm):
# Changing field 'Mapreduce.node_ptr'
db.alter_column('oozie_mapreduce', 'node_ptr_id', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['oozie.Node'], unique=True))
# Changing field 'Start.node_ptr'
db.alter_column('oozie_start', 'node_ptr_id', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['oozie.Node'], unique=True))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 8, 28, 16, 10, 12, 534880)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 8, 28, 16, 10, 12, 534819)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'oozie.bundle': {
'Meta': {'object_name': 'Bundle', '_ormbases': ['oozie.Job']},
'coordinators': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['oozie.Coordinator']", 'through': "orm['oozie.BundledCoordinator']", 'symmetrical': 'False'}),
'job_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Job']", 'unique': 'True', 'primary_key': 'True'}),
'kick_off_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 8, 28, 16, 10, 12, 429841)'})
},
'oozie.bundledcoordinator': {
'Meta': {'object_name': 'BundledCoordinator'},
'bundle': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Bundle']"}),
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Coordinator']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameters': ('django.db.models.fields.TextField', [], {'default': '\'[{"name":"oozie.use.system.libpath","value":"true"}]\''})
},
'oozie.coordinator': {
'Meta': {'object_name': 'Coordinator', '_ormbases': ['oozie.Job']},
'concurrency': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 8, 31, 16, 10, 12, 427644)'}),
'execution': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'frequency_number': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'frequency_unit': ('django.db.models.fields.CharField', [], {'default': "'days'", 'max_length': '20'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Job']", 'unique': 'True', 'primary_key': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 8, 28, 16, 10, 12, 427612)'}),
'throttle': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'timeout': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'timezone': ('django.db.models.fields.CharField', [], {'default': "'America/Los_Angeles'", 'max_length': '24'}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Workflow']", 'null': 'True'})
},
'oozie.datainput': {
'Meta': {'object_name': 'DataInput'},
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Coordinator']"}),
'dataset': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Dataset']", 'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'oozie.dataoutput': {
'Meta': {'object_name': 'DataOutput'},
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Coordinator']"}),
'dataset': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Dataset']", 'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'oozie.dataset': {
'Meta': {'object_name': 'Dataset'},
'advanced_end_instance': ('django.db.models.fields.CharField', [], {'default': "'0'", 'max_length': '128', 'blank': 'True'}),
'advanced_start_instance': ('django.db.models.fields.CharField', [], {'default': "'0'", 'max_length': '128'}),
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Coordinator']"}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'done_flag': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'blank': 'True'}),
'frequency_number': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'frequency_unit': ('django.db.models.fields.CharField', [], {'default': "'days'", 'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_choice': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '10'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'start': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 8, 28, 16, 10, 12, 428249)'}),
'timezone': ('django.db.models.fields.CharField', [], {'default': "'America/Los_Angeles'", 'max_length': '24'}),
'uri': ('django.db.models.fields.CharField', [], {'default': "'/data/${YEAR}${MONTH}${DAY}'", 'max_length': '1024'})
},
'oozie.decision': {
'Meta': {'object_name': 'Decision'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.decisionend': {
'Meta': {'object_name': 'DecisionEnd'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.distcp': {
'Meta': {'object_name': 'DistCp'},
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.email': {
'Meta': {'object_name': 'Email'},
'body': ('django.db.models.fields.TextField', [], {'default': "''"}),
'cc': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'subject': ('django.db.models.fields.TextField', [], {'default': "''"}),
'to': ('django.db.models.fields.TextField', [], {'default': "''"})
},
'oozie.end': {
'Meta': {'object_name': 'End'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.fork': {
'Meta': {'object_name': 'Fork'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.fs': {
'Meta': {'object_name': 'Fs'},
'chmods': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'}),
'deletes': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'}),
'mkdirs': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'}),
'moves': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'touchzs': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'})
},
'oozie.generic': {
'Meta': {'object_name': 'Generic'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'xml': ('django.db.models.fields.TextField', [], {'default': "''"})
},
'oozie.history': {
'Meta': {'object_name': 'History'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Job']"}),
'oozie_job_id': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'properties': ('django.db.models.fields.TextField', [], {}),
'submission_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'submitter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'oozie.hive': {
'Meta': {'object_name': 'Hive'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': '\'[{"name":"oozie.hive.defaults","value":"hive-site.xml"}]\''}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'script_path': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'oozie.java': {
'Meta': {'object_name': 'Java'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'args': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'capture_output': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'jar_path': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'java_opts': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'main_class': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.job': {
'Meta': {'object_name': 'Job'},
'deployment_dir': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_shared': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'is_trashed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'parameters': ('django.db.models.fields.TextField', [], {'default': '\'[{"name":"oozie.use.system.libpath","value":"true"}]\''}),
'schema_version': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'oozie.join': {
'Meta': {'object_name': 'Join'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.kill': {
'Meta': {'object_name': 'Kill'},
'message': ('django.db.models.fields.CharField', [], {'default': "'Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]'", 'max_length': '256'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.link': {
'Meta': {'object_name': 'Link'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parent_node'", 'to': "orm['oozie.Node']"}),
'comment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'child_node'", 'to': "orm['oozie.Node']"})
},
'oozie.mapreduce': {
'Meta': {'object_name': 'Mapreduce'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'jar_path': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.node': {
'Meta': {'object_name': 'Node'},
'children': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'parents'", 'symmetrical': 'False', 'through': "orm['oozie.Link']", 'to': "orm['oozie.Node']"}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'node_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Workflow']"})
},
'oozie.pig': {
'Meta': {'object_name': 'Pig'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'script_path': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'oozie.shell': {
'Meta': {'object_name': 'Shell'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'capture_output': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'command': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.sqoop': {
'Meta': {'object_name': 'Sqoop'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'script_path': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'})
},
'oozie.ssh': {
'Meta': {'object_name': 'Ssh'},
'capture_output': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'command': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'host': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'oozie.start': {
'Meta': {'object_name': 'Start'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.streaming': {
'Meta': {'object_name': 'Streaming'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'mapper': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'reducer': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
'oozie.subworkflow': {
'Meta': {'object_name': 'SubWorkflow'},
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'propagate_configuration': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'sub_workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Workflow']"})
},
'oozie.workflow': {
'Meta': {'object_name': 'Workflow', '_ormbases': ['oozie.Job']},
'end': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'end_workflow'", 'null': 'True', 'to': "orm['oozie.End']"}),
'is_single': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Job']", 'unique': 'True', 'primary_key': 'True'}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'managed': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'start': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'start_workflow'", 'null': 'True', 'to': "orm['oozie.Start']"})
}
}
complete_apps = ['oozie']
|
kenonelah/etherpad-lite
|
refs/heads/master
|
src/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/easy_xml_test.py
|
2698
|
#!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Unit tests for the easy_xml.py file. """
import gyp.easy_xml as easy_xml
import unittest
import StringIO
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.stderr = StringIO.StringIO()
def test_EasyXml_simple(self):
self.assertEqual(
easy_xml.XmlToString(['test']),
'<?xml version="1.0" encoding="utf-8"?><test/>')
self.assertEqual(
easy_xml.XmlToString(['test'], encoding='Windows-1252'),
'<?xml version="1.0" encoding="Windows-1252"?><test/>')
def test_EasyXml_simple_with_attributes(self):
self.assertEqual(
easy_xml.XmlToString(['test2', {'a': 'value1', 'b': 'value2'}]),
'<?xml version="1.0" encoding="utf-8"?><test2 a="value1" b="value2"/>')
def test_EasyXml_escaping(self):
original = '<test>\'"\r&\nfoo'
converted = '<test>\'"
&
foo'
converted_apos = converted.replace("'", ''')
self.assertEqual(
easy_xml.XmlToString(['test3', {'a': original}, original]),
'<?xml version="1.0" encoding="utf-8"?><test3 a="%s">%s</test3>' %
(converted, converted_apos))
def test_EasyXml_pretty(self):
self.assertEqual(
easy_xml.XmlToString(
['test3',
['GrandParent',
['Parent1',
['Child']
],
['Parent2']
]
],
pretty=True),
'<?xml version="1.0" encoding="utf-8"?>\n'
'<test3>\n'
' <GrandParent>\n'
' <Parent1>\n'
' <Child/>\n'
' </Parent1>\n'
' <Parent2/>\n'
' </GrandParent>\n'
'</test3>\n')
def test_EasyXml_complex(self):
# We want to create:
target = (
'<?xml version="1.0" encoding="utf-8"?>'
'<Project>'
'<PropertyGroup Label="Globals">'
'<ProjectGuid>{D2250C20-3A94-4FB9-AF73-11BC5B73884B}</ProjectGuid>'
'<Keyword>Win32Proj</Keyword>'
'<RootNamespace>automated_ui_tests</RootNamespace>'
'</PropertyGroup>'
'<Import Project="$(VCTargetsPath)\\Microsoft.Cpp.props"/>'
'<PropertyGroup '
'Condition="\'$(Configuration)|$(Platform)\'=='
'\'Debug|Win32\'" Label="Configuration">'
'<ConfigurationType>Application</ConfigurationType>'
'<CharacterSet>Unicode</CharacterSet>'
'</PropertyGroup>'
'</Project>')
xml = easy_xml.XmlToString(
['Project',
['PropertyGroup', {'Label': 'Globals'},
['ProjectGuid', '{D2250C20-3A94-4FB9-AF73-11BC5B73884B}'],
['Keyword', 'Win32Proj'],
['RootNamespace', 'automated_ui_tests']
],
['Import', {'Project': '$(VCTargetsPath)\\Microsoft.Cpp.props'}],
['PropertyGroup',
{'Condition': "'$(Configuration)|$(Platform)'=='Debug|Win32'",
'Label': 'Configuration'},
['ConfigurationType', 'Application'],
['CharacterSet', 'Unicode']
]
])
self.assertEqual(xml, target)
if __name__ == '__main__':
unittest.main()
|
TheTypoMaster/chromium-crosswalk
|
refs/heads/master
|
tools/perf/benchmarks/thread_times.py
|
6
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from core import perf_benchmark
from benchmarks import silk_flags
from measurements import thread_times
import page_sets
from telemetry import benchmark
class _ThreadTimes(perf_benchmark.PerfBenchmark):
@classmethod
def AddBenchmarkCommandLineArgs(cls, parser):
parser.add_option('--report-silk-details', action='store_true',
help='Report details relevant to silk.')
@classmethod
def Name(cls):
return 'thread_times'
@classmethod
def ValueCanBeAddedPredicate(cls, value, _):
# Default to only reporting per-frame metrics.
return 'per_second' not in value.name
def CreatePageTest(self, options):
return thread_times.ThreadTimes(options.report_silk_details)
@benchmark.Enabled('android')
class ThreadTimesKeySilkCases(_ThreadTimes):
"""Measures timeline metrics while performing smoothness action on key silk
cases."""
page_set = page_sets.KeySilkCasesPageSet
@classmethod
def Name(cls):
return 'thread_times.key_silk_cases'
@benchmark.Enabled('android', 'linux')
class ThreadTimesKeyHitTestCases(_ThreadTimes):
"""Measure timeline metrics while performing smoothness action on key hit
testing cases."""
page_set = page_sets.KeyHitTestCasesPageSet
@classmethod
def Name(cls):
return 'thread_times.key_hit_test_cases'
@benchmark.Enabled('android')
class ThreadTimesFastPathMobileSites(_ThreadTimes):
"""Measures timeline metrics while performing smoothness action on
key mobile sites labeled with fast-path tag.
http://www.chromium.org/developers/design-documents/rendering-benchmarks"""
page_set = page_sets.KeyMobileSitesSmoothPageSet
options = {'story_label_filter' : 'fastpath'}
@classmethod
def Name(cls):
return 'thread_times.key_mobile_sites_smooth'
@benchmark.Enabled('android')
class ThreadTimesSimpleMobileSites(_ThreadTimes):
"""Measures timeline metric using smoothness action on simple mobile sites
http://www.chromium.org/developers/design-documents/rendering-benchmarks"""
page_set = page_sets.SimpleMobileSitesPageSet
@classmethod
def Name(cls):
return 'thread_times.simple_mobile_sites'
@benchmark.Disabled('win') # crbug.com/443781
class ThreadTimesCompositorCases(_ThreadTimes):
"""Measures timeline metrics while performing smoothness action on
tough compositor cases, using software rasterization.
http://www.chromium.org/developers/design-documents/rendering-benchmarks"""
page_set = page_sets.ToughCompositorCasesPageSet
def SetExtraBrowserOptions(self, options):
silk_flags.CustomizeBrowserOptionsForSoftwareRasterization(options)
@classmethod
def Name(cls):
return 'thread_times.tough_compositor_cases'
@benchmark.Enabled('android')
class ThreadTimesPolymer(_ThreadTimes):
"""Measures timeline metrics while performing smoothness action on
Polymer cases."""
page_set = page_sets.PolymerPageSet
@classmethod
def Name(cls):
return 'thread_times.polymer'
@benchmark.Enabled('android')
class ThreadTimesKeyIdlePowerCases(_ThreadTimes):
"""Measures timeline metrics for sites that should be idle in foreground
and background scenarios. The metrics are per-second rather than per-frame."""
page_set = page_sets.KeyIdlePowerCasesPageSet
@classmethod
def Name(cls):
return 'thread_times.key_idle_power_cases'
@classmethod
def ValueCanBeAddedPredicate(cls, value, _):
# Only report per-second metrics.
return 'per_frame' not in value.name and 'mean_frame' not in value.name
class ThreadTimesToughScrollingCases(_ThreadTimes):
"""Measure timeline metrics while performing smoothness action on tough
scrolling cases."""
page_set = page_sets.ToughScrollingCasesPageSet
@classmethod
def Name(cls):
return 'thread_times.tough_scrolling_cases'
|
rhololkeolke/apo-website-devin
|
refs/heads/master
|
src/werkzeug/useragents.py
|
92
|
# -*- coding: utf-8 -*-
"""
werkzeug.useragents
~~~~~~~~~~~~~~~~~~~
This module provides a helper to inspect user agent strings. This module
is far from complete but should work for most of the currently available
browsers.
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
class UserAgentParser(object):
"""A simple user agent parser. Used by the `UserAgent`."""
platforms = (
('iphone|ios', 'iphone'),
(r'darwin|mac|os\s*x', 'macos'),
('win', 'windows'),
(r'android', 'android'),
(r'x11|lin(\b|ux)?', 'linux'),
('(sun|i86)os', 'solaris'),
(r'nintendo\s+wii', 'wii'),
('irix', 'irix'),
('hp-?ux', 'hpux'),
('aix', 'aix'),
('sco|unix_sv', 'sco'),
('bsd', 'bsd'),
('amiga', 'amiga')
)
browsers = (
('googlebot', 'google'),
('msnbot', 'msn'),
('yahoo', 'yahoo'),
('ask jeeves', 'ask'),
(r'aol|america\s+online\s+browser', 'aol'),
('opera', 'opera'),
('chrome', 'chrome'),
('firefox|firebird|phoenix|iceweasel', 'firefox'),
('galeon', 'galeon'),
('safari', 'safari'),
('webkit', 'webkit'),
('camino', 'camino'),
('konqueror', 'konqueror'),
('k-meleon', 'kmeleon'),
('netscape', 'netscape'),
(r'msie|microsoft\s+internet\s+explorer', 'msie'),
('lynx', 'lynx'),
('links', 'links'),
('seamonkey|mozilla', 'seamonkey')
)
_browser_version_re = r'(?:%s)[/\sa-z(]*(\d+[.\da-z]+)?(?i)'
_language_re = re.compile(
r'(?:;\s*|\s+)(\b\w{2}\b(?:-\b\w{2}\b)?)\s*;|'
r'(?:\(|\[|;)\s*(\b\w{2}\b(?:-\b\w{2}\b)?)\s*(?:\]|\)|;)'
)
def __init__(self):
self.platforms = [(b, re.compile(a, re.I)) for a, b in self.platforms]
self.browsers = [(b, re.compile(self._browser_version_re % a))
for a, b in self.browsers]
def __call__(self, user_agent):
for platform, regex in self.platforms:
match = regex.search(user_agent)
if match is not None:
break
else:
platform = None
for browser, regex in self.browsers:
match = regex.search(user_agent)
if match is not None:
version = match.group(1)
break
else:
browser = version = None
match = self._language_re.search(user_agent)
if match is not None:
language = match.group(1) or match.group(2)
else:
language = None
return platform, browser, version, language
class UserAgent(object):
"""Represents a user agent. Pass it a WSGI environment or a user agent
string and you can inspect some of the details from the user agent
string via the attributes. The following attributes exist:
.. attribute:: string
the raw user agent string
.. attribute:: platform
the browser platform. The following platforms are currently
recognized:
- `aix`
- `amiga`
- `android`
- `bsd`
- `hpux`
- `iphone`
- `irix`
- `linux`
- `macos`
- `sco`
- `solaris`
- `wii`
- `windows`
.. attribute:: browser
the name of the browser. The following browsers are currently
recognized:
- `aol` *
- `ask` *
- `camino`
- `chrome`
- `firefox`
- `galeon`
- `google` *
- `kmeleon`
- `konqueror`
- `links`
- `lynx`
- `msie`
- `msn`
- `netscape`
- `opera`
- `safari`
- `seamonkey`
- `webkit`
- `yahoo` *
(Browsers maked with a star (``*``) are crawlers.)
.. attribute:: version
the version of the browser
.. attribute:: language
the language of the browser
"""
_parser = UserAgentParser()
def __init__(self, environ_or_string):
if isinstance(environ_or_string, dict):
environ_or_string = environ_or_string.get('HTTP_USER_AGENT', '')
self.string = environ_or_string
self.platform, self.browser, self.version, self.language = \
self._parser(environ_or_string)
def to_header(self):
return self.string
def __str__(self):
return self.string
def __nonzero__(self):
return bool(self.browser)
def __repr__(self):
return '<%s %r/%s>' % (
self.__class__.__name__,
self.browser,
self.version
)
# conceptionally this belongs in this module but because we want to lazily
# load the user agent module (which happens in wrappers.py) we have to import
# it afterwards. The class itself has the module set to this module so
# pickle, inspect and similar modules treat the object as if it was really
# implemented here.
from werkzeug.wrappers import UserAgentMixin
|
drjeep/django
|
refs/heads/master
|
tests/flatpages_tests/settings.py
|
514
|
import os
FLATPAGES_TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(os.path.dirname(__file__), 'templates')],
'OPTIONS': {
'context_processors': (
'django.contrib.auth.context_processors.auth',
),
},
}]
|
xujun10110/Hammer
|
refs/heads/master
|
lib/knock/modules/dns/node.py
|
49
|
# Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS nodes. A node is a set of rdatasets."""
import StringIO
import dns.rdataset
import dns.rdatatype
import dns.renderer
class Node(object):
"""A DNS node.
A node is a set of rdatasets
@ivar rdatasets: the node's rdatasets
@type rdatasets: list of dns.rdataset.Rdataset objects"""
__slots__ = ['rdatasets']
def __init__(self):
"""Initialize a DNS node.
"""
self.rdatasets = [];
def to_text(self, name, **kw):
"""Convert a node to text format.
Each rdataset at the node is printed. Any keyword arguments
to this method are passed on to the rdataset's to_text() method.
@param name: the owner name of the rdatasets
@type name: dns.name.Name object
@rtype: string
"""
s = StringIO.StringIO()
for rds in self.rdatasets:
if len(rds) > 0:
print >> s, rds.to_text(name, **kw)
return s.getvalue()[:-1]
def __repr__(self):
return '<DNS node ' + str(id(self)) + '>'
def __eq__(self, other):
"""Two nodes are equal if they have the same rdatasets.
@rtype: bool
"""
#
# This is inefficient. Good thing we don't need to do it much.
#
for rd in self.rdatasets:
if rd not in other.rdatasets:
return False
for rd in other.rdatasets:
if rd not in self.rdatasets:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __len__(self):
return len(self.rdatasets)
def __iter__(self):
return iter(self.rdatasets)
def find_rdataset(self, rdclass, rdtype, covers=dns.rdatatype.NONE,
create=False):
"""Find an rdataset matching the specified properties in the
current node.
@param rdclass: The class of the rdataset
@type rdclass: int
@param rdtype: The type of the rdataset
@type rdtype: int
@param covers: The covered type. Usually this value is
dns.rdatatype.NONE, but if the rdtype is dns.rdatatype.SIG or
dns.rdatatype.RRSIG, then the covers value will be the rdata
type the SIG/RRSIG covers. The library treats the SIG and RRSIG
types as if they were a family of
types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA). This makes RRSIGs much
easier to work with than if RRSIGs covering different rdata
types were aggregated into a single RRSIG rdataset.
@type covers: int
@param create: If True, create the rdataset if it is not found.
@type create: bool
@raises KeyError: An rdataset of the desired type and class does
not exist and I{create} is not True.
@rtype: dns.rdataset.Rdataset object
"""
for rds in self.rdatasets:
if rds.match(rdclass, rdtype, covers):
return rds
if not create:
raise KeyError
rds = dns.rdataset.Rdataset(rdclass, rdtype)
self.rdatasets.append(rds)
return rds
def get_rdataset(self, rdclass, rdtype, covers=dns.rdatatype.NONE,
create=False):
"""Get an rdataset matching the specified properties in the
current node.
None is returned if an rdataset of the specified type and
class does not exist and I{create} is not True.
@param rdclass: The class of the rdataset
@type rdclass: int
@param rdtype: The type of the rdataset
@type rdtype: int
@param covers: The covered type.
@type covers: int
@param create: If True, create the rdataset if it is not found.
@type create: bool
@rtype: dns.rdataset.Rdataset object or None
"""
try:
rds = self.find_rdataset(rdclass, rdtype, covers, create)
except KeyError:
rds = None
return rds
def delete_rdataset(self, rdclass, rdtype, covers=dns.rdatatype.NONE):
"""Delete the rdataset matching the specified properties in the
current node.
If a matching rdataset does not exist, it is not an error.
@param rdclass: The class of the rdataset
@type rdclass: int
@param rdtype: The type of the rdataset
@type rdtype: int
@param covers: The covered type.
@type covers: int
"""
rds = self.get_rdataset(rdclass, rdtype, covers)
if not rds is None:
self.rdatasets.remove(rds)
def replace_rdataset(self, replacement):
"""Replace an rdataset.
It is not an error if there is no rdataset matching I{replacement}.
Ownership of the I{replacement} object is transferred to the node;
in other words, this method does not store a copy of I{replacement}
at the node, it stores I{replacement} itself.
"""
if not isinstance(replacement, dns.rdataset.Rdataset):
raise ValueError, 'replacement is not an rdataset'
self.delete_rdataset(replacement.rdclass, replacement.rdtype,
replacement.covers)
self.rdatasets.append(replacement)
|
th0mmeke/toyworld
|
refs/heads/master
|
util/expand_energy_factorial.py
|
1
|
import config
import csv
import sys
import os
import string
"""Experiment, Repeat, Partition Start, Partition End, Dimensionality, Energy, Number of cycles, Length of longest cycle, Count of most common cycle
to:
Experiment, Repeat, Partition Start, Partition End, Energy, Number of cycles, Length of longest cycle, Count of most common cycle"""
if __name__ == "__main__":
fname = sys.argv[1]
filename = os.path.join(config.DataDir, fname)
new_filename = filename + "-expanded"
with open(filename, 'rb') as csvfile:
with open(new_filename, 'wb') as outfile:
spamreader = csv.reader(csvfile, delimiter=',')
for row in spamreader:
if row[5] == "+1":
row[5] = "300"
else:
row[5] = "100"
del(row[4])
outfile.write("{}\n".format(string.join(row, ",")))
|
virtus80/python_training
|
refs/heads/master
|
generator/contact.py
|
1
|
from model.contact import Contact
import random
import string
import os.path
import jsonpickle
import getopt
import sys
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["number of contacts", "file"])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n = 1
f = "data/contacts.json"
for o, a in opts:
if o == "-n":
n = int(a)
elif o =="-f":
f = a
def random_string_for_names(prefix, maxlen):
symbols = string.ascii_letters + " "
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
def random_string_for_phones(maxlen):
symbols = string.digits*3 + " +()" + "-"*3
return "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
def random_string_for_address(maxlen):
symbols = string.ascii_letters + string.digits + " ,."*5
return "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
def random_string_for_email(maxlen):
symbols = string.ascii_letters*3 + string.digits + "-_#&+"
return "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
testdata = [Contact(firstname=random_string_for_names("name", 15), lastname=random_string_for_names("surname", 20),
nickname=random_string_for_address(20), company=random_string_for_names("company", 30),
address=random_string_for_address(60), homephone=random_string_for_phones(14),
workphone=random_string_for_phones(14), mobilephone=random_string_for_phones(14),
secondaryphone=random_string_for_phones(14),email=random_string_for_email(30) + "@" + random.choice(["mail.ru", "gmail.com",
"yandex.ru", "i.ua", "ukr.net"])) for i in range(n)]
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
with open(file, "w") as out:
jsonpickle.set_encoder_options("json", indent=2)
out.write(jsonpickle.encode(testdata))
|
zbqf109/goodo
|
refs/heads/master
|
openerp/addons/test_inherits/__openerp__.py
|
357
|
# -*- coding: utf-8 -*-
{
'name': 'test-inherits',
'version': '0.1',
'category': 'Tests',
'description': """A module to verify the inheritance using _inherits.""",
'author': 'Camptocamp',
'website': 'http://www.camptocamp.com',
'depends': ['base'],
'data': [
'ir.model.access.csv',
'demo_data.xml',
],
'installable': True,
'auto_install': False,
}
|
tectronics/cortex-vfx
|
refs/heads/master
|
test/IECoreHoudini/ops/parameters/compoundParameters/compoundParameters-1.py
|
12
|
##########################################################################
#
# Copyright 2010 Dr D Studios Pty Limited (ACN 127 184 954) (Dr. D Studios),
# its affiliates and/or its licensors.
#
# Copyright (c) 2010-2011, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from IECore import *
class compoundParameters( Op ) :
def __init__( self ) :
Op.__init__( self,
"Op with some compound parameters.",
ObjectParameter(
name = "result",
description = "Dummy.",
defaultValue = PointsPrimitive(V3fVectorData()),
type = TypeId.PointsPrimitive
)
)
self.parameters().addParameters( [
CompoundParameter(
name = "compound_1",
description = "a compound parameter",
userData = { "UI" : { "label" : StringData( "My Compound 1" ) } },
members = [
V3dParameter(
name = "j",
description = "a v3d",
defaultValue = V3dData( V3d( 8, 16, 32 ) ),
userData = { "UI" : { "label" : StringData( "A Vector" ) } },
),
Color3fParameter(
name = "k",
description = "an m44f",
defaultValue = Color3f(1,0.5,0),
userData = { "UI" : { "label" : StringData( "A Colour" ) } },
),
]
),
CompoundParameter(
name = "compound_2",
description = "a compound parameter",
userData = { "UI" : { "label" : StringData( "My Compound 2" ) } },
members = [
V3dParameter(
name = "j",
description = "a v3d",
defaultValue = V3dData( V3d( 8, 16, 32 ) ),
presets = (
( "one", V3d( 1 ) ),
( "two", V3d( 2 ) )
),
userData = { "UI" : { "label" : StringData( "Compound->V3d" ) } },
),
V2fParameter(
name = "k",
description = "an v2f",
defaultValue = V2f(1,1)
),
]
),
CompoundParameter(
name = "compound_3",
description = "a compound parameter",
userData ={ "UI" : { "label" : StringData( "My Compound 3" ) } },
members = [
CompoundParameter(
name = "compound_4",
description = "a compound parameter",
userData = { "UI" : { "label" : StringData( "My Compound 4" ) } },
members = [
IntParameter(
name = "some_int",
description = "Int",
defaultValue = 123,
userData = { "UI" : { "label" : StringData( "Int" ) } },
),
]
)
]
),
FloatParameter(
name="blah",
description="blah",
defaultValue = 123.0
),
CompoundParameter(
name = "compound_5",
description = "a compound parameter",
userData = { "UI" : { "label" : StringData( "Another Compound Parameter" ) } },
members = [
BoolParameter(
name = "bool_1",
description = "a boolean parameter",
defaultValue = True
)
]
)
] )
def doOperation( self, args ) :
return PointsPrimitive(V3fVectorData())
registerRunTimeTyped( compoundParameters )
|
xflows/clowdflows-backend
|
refs/heads/master
|
workflows/api/tests.py
|
1
|
from django.contrib.auth.models import User
from rest_framework.reverse import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from workflows.models import Workflow, Widget, Input
TEST_USERNAME = 'testuser'
TEST_PASSWORD = '123'
# Test workflow ids
TEST_WORKFLOW_USERS_PK = 2
TEST_WORKFLOW_OTHER_USER_PRIVATE_PK = 4
TEST_WORKFLOW_OTHER_USER_PUBLIC_PK = 6
TEST_OUTPUT_PK = 9
# Test widget ids
TEST_WIDGET_USERS_PK = 6
TEST_WIDGET_OTHER_USER_PRIVATE_PK = 33
TEST_WIDGET_OTHER_USER_PUBLIC_PK = 34
# Test widget parameters
TEST_PARAMETER_USERS_PK = 10
TEST_PARAMETER_OTHER_USER_PRIVATE_PK = 98
TEST_PARAMETER_OTHER_USER_PUBLIC_PK = 99
class BaseAPITestCase(APITestCase):
fixtures = ['test_data_api', ]
def _login(self):
self.client.login(username=TEST_USERNAME, password=TEST_PASSWORD)
def _logout(self):
self.client.logout()
def _test_multiple_response_codes(self, verb, urls, codes, data=None):
for url, code in zip(urls, codes):
response = verb(url, data) if data else verb(url)
self.assertEqual(response.status_code, code)
class SupportingAPITests(BaseAPITestCase):
def test_register(self):
url = reverse('user-create')
response = self.client.post(url, {
'username': 'testuser3',
'password': '123',
'email': 'testuser3@testdomain.com'
})
self.assertEqual(response.status_code, 200)
self.assertEqual(User.objects.filter(username='testuser3').count(), 1)
def test_login(self):
url = reverse('token-create')
response = self.client.post(url, {
'username': 'testuser',
'password': '123'
})
self.assertEqual(response.status_code, 200)
def test_logout(self):
url = reverse('token-destroy')
self._login()
response = self.client.post(url) # HTTP_AUTHORIZATION="Token %s" % auth_token)
self.assertEqual(response.status_code, 204)
def test_widget_library(self):
url = reverse('widget-library-list')
# Test without authentication - this should fail
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self._login()
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self._logout()
class WorkflowAPITests(BaseAPITestCase):
def test_create(self):
url = reverse('workflow-list')
workflow_data = {
'name': 'Untitled workflow',
'is_public': False,
'description': '',
'widget': None,
'template_parent': None
}
# Test without authentication - this should not be allowed
response = self.client.post(url, workflow_data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self._login()
response = self.client.post(url, workflow_data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self._logout()
def test_patch(self):
url = reverse('workflow-detail', kwargs={'pk': TEST_WORKFLOW_USERS_PK})
url_other_user_private = reverse('workflow-detail', kwargs={'pk': TEST_WORKFLOW_OTHER_USER_PRIVATE_PK})
url_other_user_public = reverse('workflow-detail', kwargs={'pk': TEST_WORKFLOW_OTHER_USER_PUBLIC_PK})
workflowData = {
'name': 'Test workflow',
'is_public': True,
'description': 'Test description'
}
# Test without authentication - this should not be allowed
response = self.client.patch(url, workflowData)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self._login()
response = self.client.patch(url, workflowData)
updated_workflow = response.data
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(updated_workflow['name'], 'Test workflow')
self.assertEqual(updated_workflow['is_public'], True)
self.assertEqual(updated_workflow['description'], 'Test description')
# Try to patch
self._test_multiple_response_codes(
self.client.patch,
[url_other_user_private, url_other_user_public],
[status.HTTP_403_FORBIDDEN, status.HTTP_403_FORBIDDEN],
data=workflowData
)
self._logout()
def test_delete(self):
url = reverse('workflow-detail', kwargs={'pk': TEST_WORKFLOW_USERS_PK})
url_other_user_private = reverse('workflow-detail', kwargs={'pk': TEST_WORKFLOW_OTHER_USER_PRIVATE_PK})
url_other_user_public = reverse('workflow-detail', kwargs={'pk': TEST_WORKFLOW_OTHER_USER_PUBLIC_PK})
# Test without authentication - this should not be allowed
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self._login()
self._test_multiple_response_codes(
self.client.delete,
[url, url_other_user_private, url_other_user_public],
[status.HTTP_204_NO_CONTENT, status.HTTP_403_FORBIDDEN, status.HTTP_403_FORBIDDEN]
)
self._logout()
def test_reset(self):
url = reverse('workflow-reset', kwargs={'pk': TEST_WORKFLOW_USERS_PK})
url_other_user_private = reverse('workflow-reset', kwargs={'pk': TEST_WORKFLOW_OTHER_USER_PRIVATE_PK})
url_other_user_public = reverse('workflow-reset', kwargs={'pk': TEST_WORKFLOW_OTHER_USER_PUBLIC_PK})
# Test without authentication - this should not be allowed
response = self.client.post(url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self._login()
response = self.client.post(url, format="json")
data = response.json()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(data['status'], 'ok')
workflow = Workflow.objects.get(pk=TEST_WORKFLOW_USERS_PK)
for widget in workflow.widgets.all():
self.assertEqual(widget.finished, False)
self.assertEqual(widget.error, False)
self.assertEqual(widget.running, False)
self._test_multiple_response_codes(
self.client.post,
[url_other_user_private, url_other_user_public],
[status.HTTP_403_FORBIDDEN, status.HTTP_403_FORBIDDEN]
)
self._logout()
def test_run(self):
url = reverse('workflow-run', kwargs={'pk': TEST_WORKFLOW_USERS_PK})
url_other_user_private = reverse('workflow-run', kwargs={'pk': TEST_WORKFLOW_OTHER_USER_PRIVATE_PK})
url_other_user_public = reverse('workflow-run', kwargs={'pk': TEST_WORKFLOW_OTHER_USER_PUBLIC_PK})
# Test without authentication - this should not be allowed
response = self.client.post(url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self._login()
response = self.client.post(url, format="json")
# data = response.json()
self.assertEqual(response.status_code, status.HTTP_200_OK)
# self.assertEqual(data['status'], 'ok')
workflow = Workflow.objects.get(pk=TEST_WORKFLOW_USERS_PK)
for widget in workflow.widgets.all():
self.assertEqual(widget.finished, True)
self._test_multiple_response_codes(
self.client.post,
[url_other_user_private, url_other_user_public],
[status.HTTP_403_FORBIDDEN, status.HTTP_403_FORBIDDEN]
)
self._logout()
def test_subprocess(self):
url = reverse('workflow-subprocess', kwargs={'pk': TEST_WORKFLOW_USERS_PK})
url_other_user_private = reverse('workflow-subprocess', kwargs={'pk': TEST_WORKFLOW_OTHER_USER_PRIVATE_PK})
url_other_user_public = reverse('workflow-subprocess', kwargs={'pk': TEST_WORKFLOW_OTHER_USER_PUBLIC_PK})
# Test without authentication - this should not be allowed
response = self.client.post(url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self._login()
response = self.client.post(url, format="json")
widget = response.json()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(widget['type'], 'subprocess')
self._test_multiple_response_codes(
self.client.post,
[url_other_user_private, url_other_user_public],
[status.HTTP_403_FORBIDDEN, status.HTTP_403_FORBIDDEN]
)
# Get subprocess workflow object
subprocess_workflow = Widget.objects.get(pk=widget['id']).workflow_link
# Test adding input
url = reverse('workflow-subprocess-input', kwargs={'pk': subprocess_workflow.pk})
response = self.client.post(url)
widget = response.json()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(widget['type'], 'input')
# Test adding output
url = reverse('workflow-subprocess-output', kwargs={'pk': subprocess_workflow.pk})
response = self.client.post(url)
widget = response.json()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(widget['type'], 'output')
self._logout()
def test_subprocess_forloop(self):
url = reverse('workflow-subprocess', kwargs={'pk': TEST_WORKFLOW_USERS_PK})
self._login()
# First add a subprocess
response = self.client.post(url)
widget = response.json()
subprocess_workflow = Widget.objects.get(pk=widget['id']).workflow_link
# Test adding for loop widgets
url = reverse('workflow-subprocess-forloop', kwargs={'pk': subprocess_workflow.pk})
response = self.client.post(url)
data = response.json()
self.assertNotIn('status', data)
widget_types = {w['type'] for w in data}
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertSetEqual(widget_types, {'for_input', 'for_output'})
self._logout()
def test_subprocess_xvalidation(self):
url = reverse('workflow-subprocess', kwargs={'pk': TEST_WORKFLOW_USERS_PK})
self._login()
# First add a subprocess
response = self.client.post(url)
data = response.json()
self.assertNotIn('status', data)
subprocess_workflow = Widget.objects.get(pk=data['id']).workflow_link
# Test adding cross validation widgets
url = reverse('workflow-subprocess-xvalidation', kwargs={'pk': subprocess_workflow.pk})
response = self.client.post(url)
widgets = response.json()
widget_types = {w['type'] for w in widgets}
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertSetEqual(widget_types, {'cv_input', 'cv_output'})
self._logout()
class WidgetAPITests(BaseAPITestCase):
def test_fetch_value(self):
url = reverse('output-value', kwargs={'pk': TEST_OUTPUT_PK})
self._login()
response = self.client.get(url)
data = response.json()
self.assertEqual(data['value'], '5')
def test_create(self):
url = reverse('widget-list')
workflow_url = reverse('workflow-detail', kwargs={'pk': TEST_WORKFLOW_USERS_PK})
workflow_url_private = reverse('workflow-detail', kwargs={'pk': TEST_WORKFLOW_OTHER_USER_PRIVATE_PK})
workflow_url_public = reverse('workflow-detail', kwargs={'pk': TEST_WORKFLOW_OTHER_USER_PUBLIC_PK})
widget_data = {
'workflow': workflow_url,
'x': 50,
'y': 50,
'name': 'Test widget',
'abstract_widget': 3, # Multiply integers abstract widget
'finished': False,
'error': False,
'running': False,
'interaction_waiting': False,
'type': 'regular',
'progress': 0
}
# Test without authentication - this should not be allowed
response = self.client.post(url, widget_data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self._login()
response = self.client.post(url, widget_data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# Test on other user's workflows
widget_data['workflow'] = workflow_url_private
response = self.client.post(url, widget_data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
widget_data['workflow'] = workflow_url_public
response = self.client.post(url, widget_data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self._logout()
def test_patch(self):
widget_url = reverse('widget-detail', kwargs={'pk': TEST_WIDGET_USERS_PK})
widget_url_private = reverse('widget-detail', kwargs={'pk': TEST_WIDGET_OTHER_USER_PRIVATE_PK})
widget_url_public = reverse('widget-detail', kwargs={'pk': TEST_WIDGET_OTHER_USER_PUBLIC_PK})
widget_data = {
'x': 12,
'y': 34,
'name': 'Test name'
}
# Test without authentication - this should not be allowed
response = self.client.patch(widget_url, widget_data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self._login()
response = self.client.patch(widget_url, widget_data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
widget = Widget.objects.get(pk=TEST_WIDGET_USERS_PK)
self.assertEqual(widget.x, widget_data['x'])
self.assertEqual(widget.y, widget_data['y'])
self.assertEqual(widget.name, widget_data['name'])
# Test on other user's widgets
response = self.client.patch(widget_url_private, widget_data)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
response = self.client.patch(widget_url_public, widget_data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self._logout()
def test_reset(self):
widget_url = reverse('widget-reset', kwargs={'pk': TEST_WIDGET_USERS_PK})
widget_url_private = reverse('widget-reset', kwargs={'pk': TEST_WIDGET_OTHER_USER_PRIVATE_PK})
widget_url_public = reverse('widget-reset', kwargs={'pk': TEST_WIDGET_OTHER_USER_PUBLIC_PK})
# Test without authentication - this should not be allowed
response = self.client.post(widget_url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self._login()
response = self.client.post(widget_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
widget = Widget.objects.get(pk=TEST_WIDGET_USERS_PK)
self.assertEqual(widget.finished, False)
# Test on other user's widgets
response = self.client.post(widget_url_private)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
response = self.client.post(widget_url_public)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self._logout()
def test_run(self):
widget_url = reverse('widget-run', kwargs={'pk': TEST_WIDGET_USERS_PK})
widget_reset_url = reverse('widget-reset', kwargs={'pk': TEST_WIDGET_USERS_PK})
widget_url_private = reverse('widget-run', kwargs={'pk': TEST_WIDGET_OTHER_USER_PRIVATE_PK})
widget_url_public = reverse('widget-run', kwargs={'pk': TEST_WIDGET_OTHER_USER_PUBLIC_PK})
# Test without authentication - this should not be allowed
response = self.client.post(widget_url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self._login()
# First reset the widget
response = self.client.post(widget_reset_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
widget = Widget.objects.get(pk=TEST_WIDGET_USERS_PK)
self.assertEqual(widget.finished, False)
# .. then run
response = self.client.post(widget_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
widget = Widget.objects.get(pk=TEST_WIDGET_USERS_PK)
self.assertEqual(widget.finished, True)
# Test on other user's widgets
response = self.client.post(widget_url_private)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
response = self.client.post(widget_url_public)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self._logout()
def test_delete(self):
widget_url = reverse('widget-detail', kwargs={'pk': TEST_WIDGET_USERS_PK})
widget_url_private = reverse('widget-detail', kwargs={'pk': TEST_WIDGET_OTHER_USER_PRIVATE_PK})
widget_url_public = reverse('widget-detail', kwargs={'pk': TEST_WIDGET_OTHER_USER_PUBLIC_PK})
# Test without authentication - this should not be allowed
response = self.client.delete(widget_url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self._login()
response = self.client.delete(widget_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
widget_count = Widget.objects.filter(pk=TEST_WIDGET_USERS_PK).count()
self.assertEqual(widget_count, 0)
# Test on other user's widgets
response = self.client.delete(widget_url_private)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
response = self.client.delete(widget_url_public)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self._logout()
def test_save_parameters(self):
widget_url = reverse('widget-save-parameters', kwargs={'pk': TEST_WIDGET_USERS_PK})
widget_url_private = reverse('widget-save-parameters', kwargs={'pk': TEST_WIDGET_OTHER_USER_PRIVATE_PK})
widget_url_public = reverse('widget-save-parameters', kwargs={'pk': TEST_WIDGET_OTHER_USER_PUBLIC_PK})
parameters = [{
'id': TEST_PARAMETER_USERS_PK,
'value': '42'
}]
# Test without authentication - this should not be allowed
response = self.client.patch(widget_url, parameters)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self._login()
response = self.client.patch(widget_url, parameters)
self.assertEqual(response.status_code, status.HTTP_200_OK)
parameter = Input.objects.get(pk=TEST_PARAMETER_USERS_PK)
self.assertEqual(parameter.value, '42')
# Test on other user's widgets
parameters[0]['id'] = TEST_PARAMETER_OTHER_USER_PRIVATE_PK
response = self.client.patch(widget_url_private, parameters)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
parameters[0]['id'] = TEST_PARAMETER_OTHER_USER_PUBLIC_PK
response = self.client.patch(widget_url_public, parameters)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self._logout()
|
suda/micropython
|
refs/heads/master
|
tests/bytecode/pylib-tests/pty.py
|
34
|
"""Pseudo terminal utilities."""
# Bugs: No signal handling. Doesn't set slave termios and window size.
# Only tested on Linux.
# See: W. Richard Stevens. 1992. Advanced Programming in the
# UNIX Environment. Chapter 19.
# Author: Steen Lumholt -- with additions by Guido.
from select import select
import os
import tty
__all__ = ["openpty","fork","spawn"]
STDIN_FILENO = 0
STDOUT_FILENO = 1
STDERR_FILENO = 2
CHILD = 0
def openpty():
"""openpty() -> (master_fd, slave_fd)
Open a pty master/slave pair, using os.openpty() if possible."""
try:
return os.openpty()
except (AttributeError, OSError):
pass
master_fd, slave_name = _open_terminal()
slave_fd = slave_open(slave_name)
return master_fd, slave_fd
def master_open():
"""master_open() -> (master_fd, slave_name)
Open a pty master and return the fd, and the filename of the slave end.
Deprecated, use openpty() instead."""
try:
master_fd, slave_fd = os.openpty()
except (AttributeError, OSError):
pass
else:
slave_name = os.ttyname(slave_fd)
os.close(slave_fd)
return master_fd, slave_name
return _open_terminal()
def _open_terminal():
"""Open pty master and return (master_fd, tty_name).
SGI and generic BSD version, for when openpty() fails."""
try:
import sgi
except ImportError:
pass
else:
try:
tty_name, master_fd = sgi._getpty(os.O_RDWR, 0o666, 0)
except IOError as msg:
raise os.error(msg)
return master_fd, tty_name
for x in 'pqrstuvwxyzPQRST':
for y in '0123456789abcdef':
pty_name = '/dev/pty' + x + y
try:
fd = os.open(pty_name, os.O_RDWR)
except os.error:
continue
return (fd, '/dev/tty' + x + y)
raise os.error('out of pty devices')
def slave_open(tty_name):
"""slave_open(tty_name) -> slave_fd
Open the pty slave and acquire the controlling terminal, returning
opened filedescriptor.
Deprecated, use openpty() instead."""
result = os.open(tty_name, os.O_RDWR)
try:
from fcntl import ioctl, I_PUSH
except ImportError:
return result
try:
ioctl(result, I_PUSH, "ptem")
ioctl(result, I_PUSH, "ldterm")
except IOError:
pass
return result
def fork():
"""fork() -> (pid, master_fd)
Fork and make the child a session leader with a controlling terminal."""
try:
pid, fd = os.forkpty()
except (AttributeError, OSError):
pass
else:
if pid == CHILD:
try:
os.setsid()
except OSError:
# os.forkpty() already set us session leader
pass
return pid, fd
master_fd, slave_fd = openpty()
pid = os.fork()
if pid == CHILD:
# Establish a new session.
os.setsid()
os.close(master_fd)
# Slave becomes stdin/stdout/stderr of child.
os.dup2(slave_fd, STDIN_FILENO)
os.dup2(slave_fd, STDOUT_FILENO)
os.dup2(slave_fd, STDERR_FILENO)
if (slave_fd > STDERR_FILENO):
os.close (slave_fd)
# Explicitly open the tty to make it become a controlling tty.
tmp_fd = os.open(os.ttyname(STDOUT_FILENO), os.O_RDWR)
os.close(tmp_fd)
else:
os.close(slave_fd)
# Parent and child process.
return pid, master_fd
def _writen(fd, data):
"""Write all the data to a descriptor."""
while data:
n = os.write(fd, data)
data = data[n:]
def _read(fd):
"""Default read function."""
return os.read(fd, 1024)
def _copy(master_fd, master_read=_read, stdin_read=_read):
"""Parent copy loop.
Copies
pty master -> standard output (master_read)
standard input -> pty master (stdin_read)"""
fds = [master_fd, STDIN_FILENO]
while True:
rfds, wfds, xfds = select(fds, [], [])
if master_fd in rfds:
data = master_read(master_fd)
if not data: # Reached EOF.
fds.remove(master_fd)
else:
os.write(STDOUT_FILENO, data)
if STDIN_FILENO in rfds:
data = stdin_read(STDIN_FILENO)
if not data:
fds.remove(STDIN_FILENO)
else:
_writen(master_fd, data)
def spawn(argv, master_read=_read, stdin_read=_read):
"""Create a spawned process."""
if type(argv) == type(''):
argv = (argv,)
pid, master_fd = fork()
if pid == CHILD:
os.execlp(argv[0], *argv)
try:
mode = tty.tcgetattr(STDIN_FILENO)
tty.setraw(STDIN_FILENO)
restore = 1
except tty.error: # This is the same as termios.error
restore = 0
try:
_copy(master_fd, master_read, stdin_read)
except (IOError, OSError):
if restore:
tty.tcsetattr(STDIN_FILENO, tty.TCSAFLUSH, mode)
os.close(master_fd)
|
pangtouyu/flasky
|
refs/heads/master
|
app/api_1_0/users.py
|
104
|
from flask import jsonify, request, current_app, url_for
from . import api
from ..models import User, Post
@api.route('/users/<int:id>')
def get_user(id):
user = User.query.get_or_404(id)
return jsonify(user.to_json())
@api.route('/users/<int:id>/posts/')
def get_user_posts(id):
user = User.query.get_or_404(id)
page = request.args.get('page', 1, type=int)
pagination = user.posts.order_by(Post.timestamp.desc()).paginate(
page, per_page=current_app.config['FLASKY_POSTS_PER_PAGE'],
error_out=False)
posts = pagination.items
prev = None
if pagination.has_prev:
prev = url_for('api.get_posts', page=page-1, _external=True)
next = None
if pagination.has_next:
next = url_for('api.get_posts', page=page+1, _external=True)
return jsonify({
'posts': [post.to_json() for post in posts],
'prev': prev,
'next': next,
'count': pagination.total
})
@api.route('/users/<int:id>/timeline/')
def get_user_followed_posts(id):
user = User.query.get_or_404(id)
page = request.args.get('page', 1, type=int)
pagination = user.followed_posts.order_by(Post.timestamp.desc()).paginate(
page, per_page=current_app.config['FLASKY_POSTS_PER_PAGE'],
error_out=False)
posts = pagination.items
prev = None
if pagination.has_prev:
prev = url_for('api.get_posts', page=page-1, _external=True)
next = None
if pagination.has_next:
next = url_for('api.get_posts', page=page+1, _external=True)
return jsonify({
'posts': [post.to_json() for post in posts],
'prev': prev,
'next': next,
'count': pagination.total
})
|
louistin/fullstack
|
refs/heads/master
|
Python/basic/string_20161211.py
|
1
|
#!/usr/bin/env python
# encoding: utf-8
test_str = '''
hehehe,
hahhah,
%s,
heihei
''' % 'louis'
print test_str
|
valexandersaulys/prudential_insurance_kaggle
|
refs/heads/master
|
venv/lib/python2.7/site-packages/numpy/__init__.py
|
79
|
"""
NumPy
=====
Provides
1. An array object of arbitrary homogeneous items
2. Fast mathematical operations over arrays
3. Linear Algebra, Fourier Transforms, Random Number Generation
How to use the documentation
----------------------------
Documentation is available in two forms: docstrings provided
with the code, and a loose standing reference guide, available from
`the NumPy homepage <http://www.scipy.org>`_.
We recommend exploring the docstrings using
`IPython <http://ipython.scipy.org>`_, an advanced Python shell with
TAB-completion and introspection capabilities. See below for further
instructions.
The docstring examples assume that `numpy` has been imported as `np`::
>>> import numpy as np
Code snippets are indicated by three greater-than signs::
>>> x = 42
>>> x = x + 1
Use the built-in ``help`` function to view a function's docstring::
>>> help(np.sort)
... # doctest: +SKIP
For some objects, ``np.info(obj)`` may provide additional help. This is
particularly true if you see the line "Help on ufunc object:" at the top
of the help() page. Ufuncs are implemented in C, not Python, for speed.
The native Python help() does not know how to view their help, but our
np.info() function does.
To search for documents containing a keyword, do::
>>> np.lookfor('keyword')
... # doctest: +SKIP
General-purpose documents like a glossary and help on the basic concepts
of numpy are available under the ``doc`` sub-module::
>>> from numpy import doc
>>> help(doc)
... # doctest: +SKIP
Available subpackages
---------------------
doc
Topical documentation on broadcasting, indexing, etc.
lib
Basic functions used by several sub-packages.
random
Core Random Tools
linalg
Core Linear Algebra Tools
fft
Core FFT routines
polynomial
Polynomial tools
testing
Numpy testing tools
f2py
Fortran to Python Interface Generator.
distutils
Enhancements to distutils with support for
Fortran compilers support and more.
Utilities
---------
test
Run numpy unittests
show_config
Show numpy build configuration
dual
Overwrite certain functions with high-performance Scipy tools
matlib
Make everything matrices.
__version__
Numpy version string
Viewing documentation using IPython
-----------------------------------
Start IPython with the NumPy profile (``ipython -p numpy``), which will
import `numpy` under the alias `np`. Then, use the ``cpaste`` command to
paste examples into the shell. To see which functions are available in
`numpy`, type ``np.<TAB>`` (where ``<TAB>`` refers to the TAB key), or use
``np.*cos*?<ENTER>`` (where ``<ENTER>`` refers to the ENTER key) to narrow
down the list. To view the docstring for a function, use
``np.cos?<ENTER>`` (to view the docstring) and ``np.cos??<ENTER>`` (to view
the source code).
Copies vs. in-place operation
-----------------------------
Most of the functions in `numpy` return a copy of the array argument
(e.g., `np.sort`). In-place versions of these functions are often
available as array methods, i.e. ``x = np.array([1,2,3]); x.sort()``.
Exceptions to this rule are documented.
"""
from __future__ import division, absolute_import, print_function
import sys
class ModuleDeprecationWarning(DeprecationWarning):
"""Module deprecation warning.
The nose tester turns ordinary Deprecation warnings into test failures.
That makes it hard to deprecate whole modules, because they get
imported by default. So this is a special Deprecation warning that the
nose tester will let pass without making tests fail.
"""
pass
class VisibleDeprecationWarning(UserWarning):
"""Visible deprecation warning.
By default, python will not show deprecation warnings, so this class
can be used when a very visible warning is helpful, for example because
the usage is most likely a user bug.
"""
pass
class _NoValue:
"""Special keyword value.
This class may be used as the default value assigned to a
deprecated keyword in order to check if it has been given a user
defined value.
"""
pass
# oldnumeric and numarray were removed in 1.9. In case some packages import
# but do not use them, we define them here for backward compatibility.
oldnumeric = 'removed'
numarray = 'removed'
# We first need to detect if we're being called as part of the numpy setup
# procedure itself in a reliable manner.
try:
__NUMPY_SETUP__
except NameError:
__NUMPY_SETUP__ = False
if __NUMPY_SETUP__:
import sys as _sys
_sys.stderr.write('Running from numpy source directory.\n')
del _sys
else:
try:
from numpy.__config__ import show as show_config
except ImportError:
msg = """Error importing numpy: you should not try to import numpy from
its source directory; please exit the numpy source tree, and relaunch
your python interpreter from there."""
raise ImportError(msg)
from .version import git_revision as __git_revision__
from .version import version as __version__
from ._import_tools import PackageLoader
def pkgload(*packages, **options):
loader = PackageLoader(infunc=True)
return loader(*packages, **options)
from . import add_newdocs
__all__ = ['add_newdocs',
'ModuleDeprecationWarning',
'VisibleDeprecationWarning']
pkgload.__doc__ = PackageLoader.__call__.__doc__
from .testing import Tester
test = Tester().test
bench = Tester().bench
from . import core
from .core import *
from . import compat
from . import lib
from .lib import *
from . import linalg
from . import fft
from . import polynomial
from . import random
from . import ctypeslib
from . import ma
from . import matrixlib as _mat
from .matrixlib import *
from .compat import long
# Make these accessible from numpy name-space
# but not imported in from numpy import *
if sys.version_info[0] >= 3:
from builtins import bool, int, float, complex, object, str
unicode = str
else:
from __builtin__ import bool, int, float, complex, object, unicode, str
from .core import round, abs, max, min
__all__.extend(['__version__', 'pkgload', 'PackageLoader',
'show_config'])
__all__.extend(core.__all__)
__all__.extend(_mat.__all__)
__all__.extend(lib.__all__)
__all__.extend(['linalg', 'fft', 'random', 'ctypeslib', 'ma'])
# Filter annoying Cython warnings that serve no good purpose.
import warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
warnings.filterwarnings("ignore", message="numpy.ndarray size changed")
|
madhusudancs/pytask
|
refs/heads/master
|
pytask/helpers/__init__.py
|
1
|
#!/usr/bin/env python
#
# Copyright 2011 Authors of PyTask.
#
# This file is part of PyTask.
#
# PyTask is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyTask is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyTask. If not, see <http://www.gnu.org/licenses/>.
"""Package containing the helper functions that may be used through out
the site.
"""
__authors__ = [
'"Madhusudan.C.S" <madhusudancs@fossee.in>',
]
|
Jumpscale/web
|
refs/heads/master
|
pythonlib/gdata/contacts/data.py
|
81
|
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data model classes for parsing and generating XML for the Contacts API."""
__author__ = 'vinces1979@gmail.com (Vince Spicer)'
import atom.core
import gdata
import gdata.data
PHOTO_LINK_REL = 'http://schemas.google.com/contacts/2008/rel#photo'
PHOTO_EDIT_LINK_REL = 'http://schemas.google.com/contacts/2008/rel#edit-photo'
EXTERNAL_ID_ORGANIZATION = 'organization'
RELATION_MANAGER = 'manager'
CONTACTS_NAMESPACE = 'http://schemas.google.com/contact/2008'
CONTACTS_TEMPLATE = '{%s}%%s' % CONTACTS_NAMESPACE
class BillingInformation(atom.core.XmlElement):
"""
gContact:billingInformation
Specifies billing information of the entity represented by the contact. The element cannot be repeated.
"""
_qname = CONTACTS_TEMPLATE % 'billingInformation'
class Birthday(atom.core.XmlElement):
"""
Stores birthday date of the person represented by the contact. The element cannot be repeated.
"""
_qname = CONTACTS_TEMPLATE % 'birthday'
when = 'when'
class ContactLink(atom.data.Link):
"""
Extends atom.data.Link to add gd:etag attribute for photo link.
"""
etag = gdata.data.GD_TEMPLATE % 'etag'
class CalendarLink(atom.core.XmlElement):
"""
Storage for URL of the contact's calendar. The element can be repeated.
"""
_qname = CONTACTS_TEMPLATE % 'calendarLink'
rel = 'rel'
label = 'label'
primary = 'primary'
href = 'href'
class DirectoryServer(atom.core.XmlElement):
"""
A directory server associated with this contact.
May not be repeated.
"""
_qname = CONTACTS_TEMPLATE % 'directoryServer'
class Event(atom.core.XmlElement):
"""
These elements describe events associated with a contact.
They may be repeated
"""
_qname = CONTACTS_TEMPLATE % 'event'
label = 'label'
rel = 'rel'
when = gdata.data.When
class ExternalId(atom.core.XmlElement):
"""
Describes an ID of the contact in an external system of some kind.
This element may be repeated.
"""
_qname = CONTACTS_TEMPLATE % 'externalId'
label = 'label'
rel = 'rel'
value = 'value'
def ExternalIdFromString(xml_string):
return atom.core.parse(ExternalId, xml_string)
class Gender(atom.core.XmlElement):
"""
Specifies the gender of the person represented by the contact.
The element cannot be repeated.
"""
_qname = CONTACTS_TEMPLATE % 'gender'
value = 'value'
class Hobby(atom.core.XmlElement):
"""
Describes an ID of the contact in an external system of some kind.
This element may be repeated.
"""
_qname = CONTACTS_TEMPLATE % 'hobby'
class Initials(atom.core.XmlElement):
""" Specifies the initials of the person represented by the contact. The
element cannot be repeated. """
_qname = CONTACTS_TEMPLATE % 'initials'
class Jot(atom.core.XmlElement):
"""
Storage for arbitrary pieces of information about the contact. Each jot
has a type specified by the rel attribute and a text value.
The element can be repeated.
"""
_qname = CONTACTS_TEMPLATE % 'jot'
rel = 'rel'
class Language(atom.core.XmlElement):
"""
Specifies the preferred languages of the contact.
The element can be repeated.
The language must be specified using one of two mutually exclusive methods:
using the freeform @label attribute, or using the @code attribute, whose value
must conform to the IETF BCP 47 specification.
"""
_qname = CONTACTS_TEMPLATE % 'language'
code = 'code'
label = 'label'
class MaidenName(atom.core.XmlElement):
"""
Specifies maiden name of the person represented by the contact.
The element cannot be repeated.
"""
_qname = CONTACTS_TEMPLATE % 'maidenName'
class Mileage(atom.core.XmlElement):
"""
Specifies the mileage for the entity represented by the contact.
Can be used for example to document distance needed for reimbursement
purposes. The value is not interpreted. The element cannot be repeated.
"""
_qname = CONTACTS_TEMPLATE % 'mileage'
class NickName(atom.core.XmlElement):
"""
Specifies the nickname of the person represented by the contact.
The element cannot be repeated.
"""
_qname = CONTACTS_TEMPLATE % 'nickname'
class Occupation(atom.core.XmlElement):
"""
Specifies the occupation/profession of the person specified by the contact.
The element cannot be repeated.
"""
_qname = CONTACTS_TEMPLATE % 'occupation'
class Priority(atom.core.XmlElement):
"""
Classifies importance of the contact into 3 categories:
* Low
* Normal
* High
The priority element cannot be repeated.
"""
_qname = CONTACTS_TEMPLATE % 'priority'
class Relation(atom.core.XmlElement):
"""
This element describe another entity (usually a person) that is in a
relation of some kind with the contact.
"""
_qname = CONTACTS_TEMPLATE % 'relation'
rel = 'rel'
label = 'label'
class Sensitivity(atom.core.XmlElement):
"""
Classifies sensitivity of the contact into the following categories:
* Confidential
* Normal
* Personal
* Private
The sensitivity element cannot be repeated.
"""
_qname = CONTACTS_TEMPLATE % 'sensitivity'
rel = 'rel'
class UserDefinedField(atom.core.XmlElement):
"""
Represents an arbitrary key-value pair attached to the contact.
"""
_qname = CONTACTS_TEMPLATE % 'userDefinedField'
key = 'key'
value = 'value'
def UserDefinedFieldFromString(xml_string):
return atom.core.parse(UserDefinedField, xml_string)
class Website(atom.core.XmlElement):
"""
Describes websites associated with the contact, including links.
May be repeated.
"""
_qname = CONTACTS_TEMPLATE % 'website'
href = 'href'
label = 'label'
primary = 'primary'
rel = 'rel'
def WebsiteFromString(xml_string):
return atom.core.parse(Website, xml_string)
class HouseName(atom.core.XmlElement):
"""
Used in places where houses or buildings have names (and
not necessarily numbers), eg. "The Pillars".
"""
_qname = CONTACTS_TEMPLATE % 'housename'
class Street(atom.core.XmlElement):
"""
Can be street, avenue, road, etc. This element also includes the house
number and room/apartment/flat/floor number.
"""
_qname = CONTACTS_TEMPLATE % 'street'
class POBox(atom.core.XmlElement):
"""
Covers actual P.O. boxes, drawers, locked bags, etc. This is usually but not
always mutually exclusive with street
"""
_qname = CONTACTS_TEMPLATE % 'pobox'
class Neighborhood(atom.core.XmlElement):
"""
This is used to disambiguate a street address when a city contains more than
one street with the same name, or to specify a small place whose mail is
routed through a larger postal town. In China it could be a county or a
minor city.
"""
_qname = CONTACTS_TEMPLATE % 'neighborhood'
class City(atom.core.XmlElement):
"""
Can be city, village, town, borough, etc. This is the postal town and not
necessarily the place of residence or place of business.
"""
_qname = CONTACTS_TEMPLATE % 'city'
class SubRegion(atom.core.XmlElement):
"""
Handles administrative districts such as U.S. or U.K. counties that are not
used for mail addressing purposes. Subregion is not intended for
delivery addresses.
"""
_qname = CONTACTS_TEMPLATE % 'subregion'
class Region(atom.core.XmlElement):
"""
A state, province, county (in Ireland), Land (in Germany),
departement (in France), etc.
"""
_qname = CONTACTS_TEMPLATE % 'region'
class PostalCode(atom.core.XmlElement):
"""
Postal code. Usually country-wide, but sometimes specific to the
city (e.g. "2" in "Dublin 2, Ireland" addresses).
"""
_qname = CONTACTS_TEMPLATE % 'postcode'
class Country(atom.core.XmlElement):
""" The name or code of the country. """
_qname = CONTACTS_TEMPLATE % 'country'
class Status(atom.core.XmlElement):
"""Person's status element."""
_qname = CONTACTS_TEMPLATE % 'status'
indexed = 'indexed'
class PersonEntry(gdata.data.BatchEntry):
"""Represents a google contact"""
link = [ContactLink]
billing_information = BillingInformation
birthday = Birthday
calendar_link = [CalendarLink]
directory_server = DirectoryServer
event = [Event]
external_id = [ExternalId]
gender = Gender
hobby = [Hobby]
initials = Initials
jot = [Jot]
language= [Language]
maiden_name = MaidenName
mileage = Mileage
nickname = NickName
occupation = Occupation
priority = Priority
relation = [Relation]
sensitivity = Sensitivity
user_defined_field = [UserDefinedField]
website = [Website]
name = gdata.data.Name
phone_number = [gdata.data.PhoneNumber]
organization = gdata.data.Organization
postal_address = [gdata.data.PostalAddress]
email = [gdata.data.Email]
im = [gdata.data.Im]
structured_postal_address = [gdata.data.StructuredPostalAddress]
extended_property = [gdata.data.ExtendedProperty]
status = Status
class Deleted(atom.core.XmlElement):
"""If present, indicates that this contact has been deleted."""
_qname = gdata.GDATA_TEMPLATE % 'deleted'
class GroupMembershipInfo(atom.core.XmlElement):
"""
Identifies the group to which the contact belongs or belonged.
The group is referenced by its id.
"""
_qname = CONTACTS_TEMPLATE % 'groupMembershipInfo'
href = 'href'
deleted = 'deleted'
class ContactEntry(PersonEntry):
"""A Google Contacts flavor of an Atom Entry."""
deleted = Deleted
group_membership_info = [GroupMembershipInfo]
organization = gdata.data.Organization
def GetPhotoLink(self):
for a_link in self.link:
if a_link.rel == PHOTO_LINK_REL:
return a_link
return None
def GetPhotoEditLink(self):
for a_link in self.link:
if a_link.rel == PHOTO_EDIT_LINK_REL:
return a_link
return None
class ContactsFeed(gdata.data.BatchFeed):
"""A collection of Contacts."""
entry = [ContactEntry]
class SystemGroup(atom.core.XmlElement):
"""The contacts systemGroup element.
When used within a contact group entry, indicates that the group in
question is one of the predefined system groups."""
_qname = CONTACTS_TEMPLATE % 'systemGroup'
id = 'id'
class GroupEntry(gdata.data.BatchEntry):
"""Represents a contact group."""
extended_property = [gdata.data.ExtendedProperty]
system_group = SystemGroup
class GroupsFeed(gdata.data.BatchFeed):
"""A Google contact groups feed flavor of an Atom Feed."""
entry = [GroupEntry]
class ProfileEntry(PersonEntry):
"""A Google Profiles flavor of an Atom Entry."""
def ProfileEntryFromString(xml_string):
"""Converts an XML string into a ProfileEntry object.
Args:
xml_string: string The XML describing a Profile entry.
Returns:
A ProfileEntry object corresponding to the given XML.
"""
return atom.core.parse(ProfileEntry, xml_string)
class ProfilesFeed(gdata.data.BatchFeed):
"""A Google Profiles feed flavor of an Atom Feed."""
_qname = atom.data.ATOM_TEMPLATE % 'feed'
entry = [ProfileEntry]
def ProfilesFeedFromString(xml_string):
"""Converts an XML string into a ProfilesFeed object.
Args:
xml_string: string The XML describing a Profiles feed.
Returns:
A ProfilesFeed object corresponding to the given XML.
"""
return atom.core.parse(ProfilesFeed, xml_string)
|
alexrudy/Zeeko
|
refs/heads/master
|
zeeko/utils/sandwich.py
|
1
|
# -*- coding: utf-8 -*-
import sys
PY2 = (sys.version_info[0] == 2)
__all__ = ['sandwich_unicode', 'unsandwich_unicode']
def sandwich_unicode(value, encoding='utf-8'):
"""Sandwich unicode values.
This function always returns bytes.
"""
if isinstance(value, bytes):
return value
else:
return value.encode(encoding)
def unsandwich_unicode(value, encoding='utf-8'):
"""Unsandwich a bytestirng value, returning
the appropriate native string value."""
if isinstance(value, bytes) and not PY2:
return value.decode(encoding)
return value
|
Lektorium-LLC/edx-platform
|
refs/heads/master
|
openedx/core/djangoapps/embargo/middleware.py
|
10
|
"""Middleware for embargoing site and courses.
IMPORTANT NOTE: This code WILL NOT WORK if you have a misconfigured proxy
server. If you are configuring embargo functionality, or if you are
experiencing mysterious problems with embargoing, please check that your
reverse proxy is setting any of the well known client IP address headers (ex.,
HTTP_X_FORWARDED_FOR).
This middleware allows you to:
* Embargoing courses (access restriction by courses)
* Embargoing site (access restriction of the main site)
Embargo can restrict by states and whitelist/blacklist (IP Addresses
(ie. 10.0.0.0), Networks (ie. 10.0.0.0/24)), or the user profile country.
Usage:
1) Enable embargo by setting `settings.FEATURES['EMBARGO']` to True.
2) In Django admin, create a new `IPFilter` model to block or whitelist
an IP address from accessing the site.
3) In Django admin, create a new `RestrictedCourse` model and
configure a whitelist or blacklist of countries for that course.
"""
import logging
import re
from django.conf import settings
from django.core.exceptions import MiddlewareNotUsed
from django.core.urlresolvers import reverse
from django.shortcuts import redirect
from ipware.ip import get_ip
from util.request import course_id_from_url
from . import api as embargo_api
from .models import IPFilter
log = logging.getLogger(__name__)
class EmbargoMiddleware(object):
"""Middleware for embargoing site and courses. """
ALLOW_URL_PATTERNS = [
# Don't block the embargo message pages; otherwise we'd
# end up in an infinite redirect loop.
re.compile(r'^/embargo/blocked-message/'),
# Don't block the Django admin pages. Otherwise, we might
# accidentally lock ourselves out of Django admin
# during testing.
re.compile(r'^/admin/'),
# Do not block access to course metadata. This information is needed for
# sever-to-server calls.
re.compile(r'^/api/course_structure/v[\d+]/courses/{}/$'.format(settings.COURSE_ID_PATTERN)),
]
def __init__(self):
# If embargoing is turned off, make this middleware do nothing
if not settings.FEATURES.get('EMBARGO'):
raise MiddlewareNotUsed()
def process_request(self, request):
"""Block requests based on embargo rules.
This will perform the following checks:
1) If the user's IP address is blacklisted, block.
2) If the user's IP address is whitelisted, allow.
3) If the user's country (inferred from their IP address) is blocked for
a courseware page, block.
4) If the user's country (retrieved from the user's profile) is blocked
for a courseware page, block.
5) Allow access.
"""
# Never block certain patterns by IP address
for pattern in self.ALLOW_URL_PATTERNS:
if pattern.match(request.path) is not None:
return None
ip_address = get_ip(request)
ip_filter = IPFilter.current()
if ip_filter.enabled and ip_address in ip_filter.blacklist_ips:
log.info(
(
u"User %s was blocked from accessing %s "
u"because IP address %s is blacklisted."
), request.user.id, request.path, ip_address
)
# If the IP is blacklisted, reject.
# This applies to any request, not just courseware URLs.
ip_blacklist_url = reverse(
'embargo:blocked_message',
kwargs={
'access_point': 'courseware',
'message_key': 'embargo'
}
)
return redirect(ip_blacklist_url)
elif ip_filter.enabled and ip_address in ip_filter.whitelist_ips:
log.info(
(
u"User %s was allowed access to %s because "
u"IP address %s is whitelisted."
),
request.user.id, request.path, ip_address
)
# If the IP is whitelisted, then allow access,
# skipping later checks.
return None
else:
# Otherwise, perform the country access checks.
# This applies only to courseware URLs.
return self.country_access_rules(request.user, ip_address, request.path)
def country_access_rules(self, user, ip_address, url_path):
"""
Check the country access rules for a given course.
Applies only to courseware URLs.
Args:
user (User): The user making the current request.
ip_address (str): The IP address from which the request originated.
url_path (str): The request path.
Returns:
HttpResponse or None
"""
course_id = course_id_from_url(url_path)
if course_id:
redirect_url = embargo_api.redirect_if_blocked(
course_id,
user=user,
ip_address=ip_address,
url=url_path,
access_point='courseware'
)
if redirect_url:
return redirect(redirect_url)
|
manashmndl/kivy
|
refs/heads/master
|
examples/widgets/lists/list_simple_in_kv.py
|
52
|
from kivy.uix.modalview import ModalView
from kivy.uix.listview import ListView
from kivy.uix.gridlayout import GridLayout
from kivy.lang import Builder
Builder.load_string("""
<ListViewModal>:
size_hint: None,None
size: 400,400
ListView:
size_hint: .8,.8
item_strings: [str(index) for index in range(100)]
""")
class ListViewModal(ModalView):
def __init__(self, **kwargs):
super(ListViewModal, self).__init__(**kwargs)
class MainView(GridLayout):
"""Implementation of a list view declared in a kv template.
"""
def __init__(self, **kwargs):
kwargs['cols'] = 1
super(MainView, self).__init__(**kwargs)
listview_modal = ListViewModal()
self.add_widget(listview_modal)
if __name__ == '__main__':
from kivy.base import runTouchApp
runTouchApp(MainView(width=800))
|
google/starthinker
|
refs/heads/master
|
dags/dbm_dag.py
|
1
|
###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
'''
--------------------------------------------------------------
Before running this Airflow module...
Install StarThinker in cloud composer ( recommended ):
From Release: pip install starthinker
From Open Source: pip install git+https://github.com/google/starthinker
Or push local code to the cloud composer plugins directory ( if pushing local code changes ):
source install/deploy.sh
4) Composer Menu
l) Install All
--------------------------------------------------------------
If any recipe task has "auth" set to "user" add user credentials:
1. Ensure an RECIPE['setup']['auth']['user'] = [User Credentials JSON]
OR
1. Visit Airflow UI > Admin > Connections.
2. Add an Entry called "starthinker_user", fill in the following fields. Last step paste JSON from authentication.
- Conn Type: Google Cloud Platform
- Project: Get from https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md
- Keyfile JSON: Get from: https://github.com/google/starthinker/blob/master/tutorials/deploy_commandline.md#optional-setup-user-credentials
--------------------------------------------------------------
If any recipe task has "auth" set to "service" add service credentials:
1. Ensure an RECIPE['setup']['auth']['service'] = [Service Credentials JSON]
OR
1. Visit Airflow UI > Admin > Connections.
2. Add an Entry called "starthinker_service", fill in the following fields. Last step paste JSON from authentication.
- Conn Type: Google Cloud Platform
- Project: Get from https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md
- Keyfile JSON: Get from: https://github.com/google/starthinker/blob/master/tutorials/cloud_service.md
--------------------------------------------------------------
DV360 Report
Create a DV360 report.
- Reference field values from the <a href='https://developers.google.com/bid-manager/v1/reports'>DV360 API</a> to build a report.
- Copy and paste the JSON definition of a report, <a href='https://github.com/google/starthinker/blob/master/tests/scripts/dbm_to_bigquery.json#L9-L40' target='_blank'>sample for reference</a>.
- The report is only created, a seperate script is required to move the data.
- To reset a report, delete it from DV360 reporting.
--------------------------------------------------------------
This StarThinker DAG can be extended with any additional tasks from the following sources:
- https://google.github.io/starthinker/
- https://github.com/google/starthinker/tree/master/dags
'''
from starthinker.airflow.factory import DAG_Factory
INPUTS = {
'auth_read': 'user', # Credentials used for reading data.
'report': '{}', # Report body and filters.
'delete': False, # If report exists, delete it before creating a new one.
}
RECIPE = {
'tasks': [
{
'dbm': {
'auth': {
'field': {
'name': 'auth_read',
'kind': 'authentication',
'order': 1,
'default': 'user',
'description': 'Credentials used for reading data.'
}
},
'report': {
'field': {
'name': 'report',
'kind': 'json',
'order': 1,
'default': '{}',
'description': 'Report body and filters.'
}
},
'delete': {
'field': {
'name': 'delete',
'kind': 'boolean',
'order': 2,
'default': False,
'description': 'If report exists, delete it before creating a new one.'
}
}
}
}
]
}
dag_maker = DAG_Factory('dbm', RECIPE, INPUTS)
dag = dag_maker.generate()
if __name__ == "__main__":
dag_maker.print_commandline()
|
sergmelikyan/murano
|
refs/heads/master
|
murano/tests/functional/api/v1/test_services.py
|
2
|
# Copyright (c) 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from tempest import exceptions
from tempest.test import attr
from murano.tests.functional.api import base
class TestServices(base.TestCase):
@attr(type='smoke')
def test_get_services_list(self):
env = self.create_environment('test')
sess = self.client.create_session(env['id'])[1]
resp, services_list = self.client.get_services_list(env['id'],
sess['id'])
self.assertEqual(resp.status, 200)
self.assertTrue(isinstance(services_list, list))
@attr(type='negative')
def test_get_services_list_without_env_id(self):
env = self.create_environment('test')
sess = self.client.create_session(env['id'])[1]
self.assertRaises(exceptions.NotFound,
self.client.get_services_list,
None,
sess['id'])
@attr(type='negative')
def test_get_services_list_after_delete_env(self):
env = self.create_environment('test')
sess = self.client.create_session(env['id'])[1]
self.client.delete_environment(env['id'])
self.assertRaises(exceptions.NotFound,
self.client.get_services_list,
env['id'],
sess['id'])
@attr(type='negative')
def test_get_services_list_after_delete_session(self):
env = self.create_environment('test')
sess = self.client.create_session(env['id'])[1]
self.client.delete_session(env['id'], sess['id'])
self.assertRaises(exceptions.NotFound,
self.client.get_services_list,
env['id'],
sess['id'])
@attr(type='smoke')
def test_create_and_delete_demo_service(self):
env = self.create_environment('test')
sess = self.client.create_session(env['id'])[1]
services_list = self.client.get_services_list(env['id'], sess['id'])[1]
resp, service = self.create_demo_service(env['id'], sess['id'])
services_list_ = self.client.get_services_list(env['id'],
sess['id'])[1]
self.assertEqual(resp.status, 200)
self.assertEqual(len(services_list) + 1, len(services_list_))
resp = self.client.delete_service(env['id'],
sess['id'],
service['?']['id'])[0]
services_list_ = self.client.get_services_list(env['id'],
sess['id'])[1]
self.assertEqual(resp.status, 200)
self.assertEqual(len(services_list), len(services_list_))
@attr(type='negative')
def test_create_demo_service_without_env_id(self):
env = self.create_environment('test')
sess = self.client.create_session(env['id'])[1]
self.assertRaises(exceptions.NotFound,
self.create_demo_service,
None,
sess['id'])
@attr(type='negative')
def test_create_demo_service_without_sess_id(self):
env = self.create_environment('test')
self.client.create_session(env['id'])
self.assertRaises(exceptions.Unauthorized,
self.create_demo_service,
env['id'],
"")
@attr(type='negative')
def test_delete_demo_service_without_env_id(self):
env = self.create_environment('test')
sess = self.client.create_session(env['id'])[1]
service = self.create_demo_service(env['id'], sess['id'])[1]
self.assertRaises(exceptions.NotFound,
self.client.delete_service,
None,
sess['id'],
service['?']['id'])
@attr(type='negative')
def test_delete_demo_service_without_session_id(self):
env = self.create_environment('test')
sess = self.client.create_session(env['id'])[1]
service = self.create_demo_service(env['id'], sess['id'])[1]
self.assertRaises(exceptions.Unauthorized,
self.client.delete_service,
env['id'],
"",
service['?']['id'])
@attr(type='negative')
def test_double_delete_service(self):
env = self.create_environment('test')
sess = self.client.create_session(env['id'])[1]
service = self.create_demo_service(env['id'], sess['id'])[1]
self.client.delete_service(env['id'], sess['id'], service['?']['id'])
self.assertRaises(exceptions.NotFound,
self.client.delete_service,
env['id'],
sess['id'],
service['?']['id'])
@attr(type='smoke')
def test_get_service(self):
env = self.create_environment('test')
sess = self.client.create_session(env['id'])[1]
service = self.create_demo_service(env['id'], sess['id'])[1]
resp, service_ = self.client.get_service(env['id'],
sess['id'],
service['?']['id'])
self.assertEqual(resp.status, 200)
self.assertEqual(service, service_)
@attr(type='negative')
def test_get_service_without_env_id(self):
env = self.create_environment('test')
sess = self.client.create_session(env['id'])[1]
service = self.create_demo_service(env['id'], sess['id'])[1]
self.assertRaises(exceptions.NotFound,
self.client.get_service,
None,
sess['id'],
service['?']['id'])
@testtools.skip("https://bugs.launchpad.net/murano/+bug/1295573")
@attr(type='negative')
def test_get_service_without_sess_id(self):
env = self.create_environment('test')
sess = self.client.create_session(env['id'])[1]
service = self.create_demo_service(env['id'], sess['id'])[1]
self.assertRaises(exceptions.Unauthorized,
self.client.get_service,
env['id'],
"",
service['?']['id'])
|
muntasirsyed/intellij-community
|
refs/heads/master
|
python/testData/refactoring/extractmethod/MethodContext.after.py
|
71
|
class C:
def foo(self):
self.bar()
def bar(self):
for x in [1, 2]:
print x
|
sometallgit/AutoUploader
|
refs/heads/master
|
Python27/Lib/sqlite3/test/factory.py
|
8
|
#-*- coding: ISO-8859-1 -*-
# pysqlite2/test/factory.py: tests for the various factories in pysqlite
#
# Copyright (C) 2005-2007 Gerhard Häring <gh@ghaering.de>
#
# This file is part of pysqlite.
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
import unittest
import sqlite3 as sqlite
from collections import Sequence
class MyConnection(sqlite.Connection):
def __init__(self, *args, **kwargs):
sqlite.Connection.__init__(self, *args, **kwargs)
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
class MyCursor(sqlite.Cursor):
def __init__(self, *args, **kwargs):
sqlite.Cursor.__init__(self, *args, **kwargs)
self.row_factory = dict_factory
class ConnectionFactoryTests(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:", factory=MyConnection)
def tearDown(self):
self.con.close()
def CheckIsInstance(self):
self.assertIsInstance(self.con, MyConnection)
class CursorFactoryTests(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:")
def tearDown(self):
self.con.close()
def CheckIsInstance(self):
cur = self.con.cursor()
self.assertIsInstance(cur, sqlite.Cursor)
cur = self.con.cursor(MyCursor)
self.assertIsInstance(cur, MyCursor)
cur = self.con.cursor(factory=lambda con: MyCursor(con))
self.assertIsInstance(cur, MyCursor)
def CheckInvalidFactory(self):
# not a callable at all
self.assertRaises(TypeError, self.con.cursor, None)
# invalid callable with not exact one argument
self.assertRaises(TypeError, self.con.cursor, lambda: None)
# invalid callable returning non-cursor
self.assertRaises(TypeError, self.con.cursor, lambda con: None)
class RowFactoryTestsBackwardsCompat(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:")
def CheckIsProducedByFactory(self):
cur = self.con.cursor(factory=MyCursor)
cur.execute("select 4+5 as foo")
row = cur.fetchone()
self.assertIsInstance(row, dict)
cur.close()
def tearDown(self):
self.con.close()
class RowFactoryTests(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:")
def CheckCustomFactory(self):
self.con.row_factory = lambda cur, row: list(row)
row = self.con.execute("select 1, 2").fetchone()
self.assertIsInstance(row, list)
def CheckSqliteRowIndex(self):
self.con.row_factory = sqlite.Row
row = self.con.execute("select 1 as a, 2 as b").fetchone()
self.assertIsInstance(row, sqlite.Row)
col1, col2 = row["a"], row["b"]
self.assertEqual(col1, 1, "by name: wrong result for column 'a'")
self.assertEqual(col2, 2, "by name: wrong result for column 'a'")
col1, col2 = row["A"], row["B"]
self.assertEqual(col1, 1, "by name: wrong result for column 'A'")
self.assertEqual(col2, 2, "by name: wrong result for column 'B'")
self.assertEqual(row[0], 1, "by index: wrong result for column 0")
self.assertEqual(row[0L], 1, "by index: wrong result for column 0")
self.assertEqual(row[1], 2, "by index: wrong result for column 1")
self.assertEqual(row[1L], 2, "by index: wrong result for column 1")
self.assertEqual(row[-1], 2, "by index: wrong result for column -1")
self.assertEqual(row[-1L], 2, "by index: wrong result for column -1")
self.assertEqual(row[-2], 1, "by index: wrong result for column -2")
self.assertEqual(row[-2L], 1, "by index: wrong result for column -2")
with self.assertRaises(IndexError):
row['c']
with self.assertRaises(IndexError):
row[2]
with self.assertRaises(IndexError):
row[2L]
with self.assertRaises(IndexError):
row[-3]
with self.assertRaises(IndexError):
row[-3L]
with self.assertRaises(IndexError):
row[2**1000]
def CheckSqliteRowIter(self):
"""Checks if the row object is iterable"""
self.con.row_factory = sqlite.Row
row = self.con.execute("select 1 as a, 2 as b").fetchone()
for col in row:
pass
def CheckSqliteRowAsTuple(self):
"""Checks if the row object can be converted to a tuple"""
self.con.row_factory = sqlite.Row
row = self.con.execute("select 1 as a, 2 as b").fetchone()
t = tuple(row)
self.assertEqual(t, (row['a'], row['b']))
def CheckSqliteRowAsDict(self):
"""Checks if the row object can be correctly converted to a dictionary"""
self.con.row_factory = sqlite.Row
row = self.con.execute("select 1 as a, 2 as b").fetchone()
d = dict(row)
self.assertEqual(d["a"], row["a"])
self.assertEqual(d["b"], row["b"])
def CheckSqliteRowHashCmp(self):
"""Checks if the row object compares and hashes correctly"""
self.con.row_factory = sqlite.Row
row_1 = self.con.execute("select 1 as a, 2 as b").fetchone()
row_2 = self.con.execute("select 1 as a, 2 as b").fetchone()
row_3 = self.con.execute("select 1 as a, 3 as b").fetchone()
self.assertEqual(row_1, row_1)
self.assertEqual(row_1, row_2)
self.assertTrue(row_2 != row_3)
self.assertFalse(row_1 != row_1)
self.assertFalse(row_1 != row_2)
self.assertFalse(row_2 == row_3)
self.assertEqual(row_1, row_2)
self.assertEqual(hash(row_1), hash(row_2))
self.assertNotEqual(row_1, row_3)
self.assertNotEqual(hash(row_1), hash(row_3))
def CheckSqliteRowAsSequence(self):
""" Checks if the row object can act like a sequence """
self.con.row_factory = sqlite.Row
row = self.con.execute("select 1 as a, 2 as b").fetchone()
as_tuple = tuple(row)
self.assertEqual(list(reversed(row)), list(reversed(as_tuple)))
self.assertIsInstance(row, Sequence)
def CheckFakeCursorClass(self):
# Issue #24257: Incorrect use of PyObject_IsInstance() caused
# segmentation fault.
# Issue #27861: Also applies for cursor factory.
class FakeCursor(str):
__class__ = sqlite.Cursor
self.con.row_factory = sqlite.Row
self.assertRaises(TypeError, self.con.cursor, FakeCursor)
self.assertRaises(TypeError, sqlite.Row, FakeCursor(), ())
def tearDown(self):
self.con.close()
class TextFactoryTests(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:")
def CheckUnicode(self):
austria = unicode("Österreich", "latin1")
row = self.con.execute("select ?", (austria,)).fetchone()
self.assertEqual(type(row[0]), unicode, "type of row[0] must be unicode")
def CheckString(self):
self.con.text_factory = str
austria = unicode("Österreich", "latin1")
row = self.con.execute("select ?", (austria,)).fetchone()
self.assertEqual(type(row[0]), str, "type of row[0] must be str")
self.assertEqual(row[0], austria.encode("utf-8"), "column must equal original data in UTF-8")
def CheckCustom(self):
self.con.text_factory = lambda x: unicode(x, "utf-8", "ignore")
austria = unicode("Österreich", "latin1")
row = self.con.execute("select ?", (austria.encode("latin1"),)).fetchone()
self.assertEqual(type(row[0]), unicode, "type of row[0] must be unicode")
self.assertTrue(row[0].endswith(u"reich"), "column must contain original data")
def CheckOptimizedUnicode(self):
self.con.text_factory = sqlite.OptimizedUnicode
austria = unicode("Österreich", "latin1")
germany = unicode("Deutchland")
a_row = self.con.execute("select ?", (austria,)).fetchone()
d_row = self.con.execute("select ?", (germany,)).fetchone()
self.assertEqual(type(a_row[0]), unicode, "type of non-ASCII row must be unicode")
self.assertEqual(type(d_row[0]), str, "type of ASCII-only row must be str")
def tearDown(self):
self.con.close()
class TextFactoryTestsWithEmbeddedZeroBytes(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:")
self.con.execute("create table test (value text)")
self.con.execute("insert into test (value) values (?)", ("a\x00b",))
def CheckString(self):
# text_factory defaults to unicode
row = self.con.execute("select value from test").fetchone()
self.assertIs(type(row[0]), unicode)
self.assertEqual(row[0], "a\x00b")
def CheckCustom(self):
# A custom factory should receive a str argument
self.con.text_factory = lambda x: x
row = self.con.execute("select value from test").fetchone()
self.assertIs(type(row[0]), str)
self.assertEqual(row[0], "a\x00b")
def CheckOptimizedUnicodeAsString(self):
# ASCII -> str argument
self.con.text_factory = sqlite.OptimizedUnicode
row = self.con.execute("select value from test").fetchone()
self.assertIs(type(row[0]), str)
self.assertEqual(row[0], "a\x00b")
def CheckOptimizedUnicodeAsUnicode(self):
# Non-ASCII -> unicode argument
self.con.text_factory = sqlite.OptimizedUnicode
self.con.execute("delete from test")
self.con.execute("insert into test (value) values (?)", (u'ä\0ö',))
row = self.con.execute("select value from test").fetchone()
self.assertIs(type(row[0]), unicode)
self.assertEqual(row[0], u"ä\x00ö")
def tearDown(self):
self.con.close()
def suite():
connection_suite = unittest.makeSuite(ConnectionFactoryTests, "Check")
cursor_suite = unittest.makeSuite(CursorFactoryTests, "Check")
row_suite_compat = unittest.makeSuite(RowFactoryTestsBackwardsCompat, "Check")
row_suite = unittest.makeSuite(RowFactoryTests, "Check")
text_suite = unittest.makeSuite(TextFactoryTests, "Check")
text_zero_bytes_suite = unittest.makeSuite(TextFactoryTestsWithEmbeddedZeroBytes, "Check")
return unittest.TestSuite((connection_suite, cursor_suite, row_suite_compat, row_suite, text_suite, text_zero_bytes_suite))
def test():
runner = unittest.TextTestRunner()
runner.run(suite())
if __name__ == "__main__":
test()
|
rossant/qtools
|
refs/heads/master
|
qtools/__init__.py
|
1
|
from qtpy import *
from utils import *
from tasks import *
from garbagecollector import *
|
fnouama/intellij-community
|
refs/heads/master
|
python/testData/refactoring/introduceVariable/substringBeforeFormatTuple.py
|
83
|
print("<selection>Hello</selection> %s" % ("World",))
|
wreckJ/intellij-community
|
refs/heads/master
|
python/helpers/pydev/pydev_ipython/inputhookgtk3.py
|
106
|
# encoding: utf-8
"""
Enable Gtk3 to be used interacive by IPython.
Authors: Thomi Richards
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from gi.repository import Gtk, GLib # @UnresolvedImport
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
def _main_quit(*args, **kwargs):
Gtk.main_quit()
return False
def create_inputhook_gtk3(stdin_file):
def inputhook_gtk3():
GLib.io_add_watch(stdin_file, GLib.IO_IN, _main_quit)
Gtk.main()
return 0
return inputhook_gtk3
|
SravanthiSinha/edx-platform
|
refs/heads/master
|
common/djangoapps/cache_toolbox/model.py
|
239
|
"""
Caching model instances
-----------------------
``cache_model`` adds utility methods to a model to obtain ``ForeignKey``
instances via the cache.
Usage
~~~~~
::
from django.db import models
from django.contrib.auth.models import User
class Foo(models.Model):
name = models.CharField(length=20)
cache_model(Foo)
::
>>> a = Foo.objects.create(name='a')
>>> a
<Foo: >
>>> Foo.get_cached(a.pk) # Cache miss
<Foo: >
>>> a = Foo.get_cached(a.pk) # Cache hit
>>> a.name
u'a'
Instances returned from ``get_cached`` are real model instances::
>>> a = Foo.get_cached(a.pk) # Cache hit
>>> type(a)
<class '__main__.models.A'>
>>> a.pk
1L
Invalidation
~~~~~~~~~~~~
Invalidation is performed automatically upon saving or deleting a ``Foo``
instance::
>>> a = Foo.objects.create(name='a')
>>> a.name = 'b'
>>> a.save()
>>> a = Foo.get_cached(a.pk)
>>> a.name
u'b'
>>> a.delete()
>>> a = Foo.get_cached(a.pk)
... Foo.DoesNotExist
"""
from django.db.models.signals import post_save, post_delete
from .core import get_instance, delete_instance
def cache_model(model, timeout=None):
if hasattr(model, 'get_cached'):
# Already patched
return
def clear_cache(sender, instance, *args, **kwargs):
delete_instance(sender, instance)
post_save.connect(clear_cache, sender=model, weak=False)
post_delete.connect(clear_cache, sender=model, weak=False)
@classmethod
def get(cls, pk, using=None):
if pk is None:
return None
return get_instance(cls, pk, timeout, using)
model.get_cached = get
|
wolverineav/horizon
|
refs/heads/master
|
openstack_dashboard/test/integration_tests/pages/project/compute/access_and_security/securitygroupspage.py
|
10
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack_dashboard.test.integration_tests.pages import basepage
from openstack_dashboard.test.integration_tests.regions import forms
from openstack_dashboard.test.integration_tests.regions import tables
from openstack_dashboard.test.integration_tests.pages.project.compute.\
access_and_security.managerulespage import ManageRulesPage
class SecurityGroupsTable(tables.TableRegion):
name = "security_groups"
CREATE_SECURITYGROUP_FORM_FIELDS = ("name", "description")
@tables.bind_table_action('create')
def create_group(self, create_button):
create_button.click()
return forms.FormRegion(
self.driver, self.conf,
field_mappings=self.CREATE_SECURITYGROUP_FORM_FIELDS)
@tables.bind_table_action('delete')
def delete_group(self, delete_button):
delete_button.click()
return forms.BaseFormRegion(self.driver, self.conf, None)
@tables.bind_row_action('manage_rules')
def manage_rules(self, manage_rules_button, row):
manage_rules_button.click()
return ManageRulesPage(self.driver, self.conf)
class SecuritygroupsPage(basepage.BaseNavigationPage):
SECURITYGROUPS_TABLE_NAME_COLUMN = 'name'
def __init__(self, driver, conf):
super(SecuritygroupsPage, self).__init__(driver, conf)
self._page_title = "Access & Security"
def _get_row_with_securitygroup_name(self, name):
return self.securitygroups_table.get_row(
self.SECURITYGROUPS_TABLE_NAME_COLUMN, name)
@property
def securitygroups_table(self):
return SecurityGroupsTable(self.driver, self.conf)
def create_securitygroup(self, name, description=None):
create_securitygroups_form = self.securitygroups_table.create_group()
create_securitygroups_form.name.text = name
if description is not None:
create_securitygroups_form.description.text = description
create_securitygroups_form.submit()
def delete_securitygroup(self, name):
row = self._get_row_with_securitygroup_name(name)
row.mark()
modal_confirmation_form = self.securitygroups_table.delete_group()
modal_confirmation_form.submit()
def is_securitygroup_present(self, name):
return bool(self._get_row_with_securitygroup_name(name))
def go_to_manage_rules(self, name):
row = self._get_row_with_securitygroup_name(name)
return self.securitygroups_table.manage_rules(row)
|
bcostm/mbed-os
|
refs/heads/master
|
tools/test/examples/examples.py
|
31
|
""" import and bulid a bunch of example programs """
from argparse import ArgumentParser
import os
from os.path import dirname, abspath, basename
import os.path
import sys
import subprocess
import json
ROOT = abspath(dirname(dirname(dirname(dirname(__file__)))))
sys.path.insert(0, ROOT)
from tools.utils import argparse_force_uppercase_type
from tools.utils import argparse_many
from tools.build_api import get_mbed_official_release
import examples_lib as lib
from examples_lib import SUPPORTED_TOOLCHAINS, SUPPORTED_IDES
def main():
"""Entry point"""
official_targets = get_mbed_official_release("5")
official_target_names = [x[0] for x in official_targets]
parser = ArgumentParser()
parser.add_argument("-c", dest="config", default="examples.json")
parser.add_argument("-e", "--example",
help=("filter the examples used in the script"),
type=argparse_many(lambda x: x),
default=[])
subparsers = parser.add_subparsers()
import_cmd = subparsers.add_parser("import")
import_cmd.set_defaults(fn=do_import)
clone_cmd = subparsers.add_parser("clone")
clone_cmd.set_defaults(fn=do_clone)
deploy_cmd = subparsers.add_parser("deploy")
deploy_cmd.set_defaults(fn=do_deploy)
version_cmd = subparsers.add_parser("tag")
version_cmd.add_argument("tag")
version_cmd.set_defaults(fn=do_versionning)
compile_cmd = subparsers.add_parser("compile")
compile_cmd.set_defaults(fn=do_compile),
compile_cmd.add_argument(
"toolchains", nargs="*", default=SUPPORTED_TOOLCHAINS,
type=argparse_force_uppercase_type(SUPPORTED_TOOLCHAINS,
"toolchain")),
compile_cmd.add_argument("-m", "--mcu",
help=("build for the given MCU (%s)" %
', '.join(official_target_names)),
metavar="MCU",
type=argparse_many(
argparse_force_uppercase_type(
official_target_names, "MCU")),
default=official_target_names)
export_cmd = subparsers.add_parser("export")
export_cmd.set_defaults(fn=do_export),
export_cmd.add_argument(
"ide", nargs="*", default=SUPPORTED_IDES,
type=argparse_force_uppercase_type(SUPPORTED_IDES,
"ide"))
export_cmd.add_argument("-m", "--mcu",
help=("build for the given MCU (%s)" %
', '.join(official_target_names)),
metavar="MCU",
type=argparse_many(
argparse_force_uppercase_type(
official_target_names, "MCU")),
default=official_target_names)
args = parser.parse_args()
config = json.load(open(os.path.join(os.path.dirname(__file__),
args.config)))
all_examples = []
for example in config['examples']:
all_examples = all_examples + [basename(x['repo']) for x in lib.get_repo_list(example)]
examples = [x for x in all_examples if x in args.example] if args.example else all_examples
return args.fn(args, config, examples)
def do_export(args, config, examples):
"""Do export and build step"""
results = {}
results = lib.export_repos(config, args.ide, args.mcu, examples)
lib.print_summary(results, export=True)
failures = lib.get_num_failures(results, export=True)
print("Number of failures = %d" % failures)
return failures
def do_import(_, config, examples):
"""Do the import step of this process"""
lib.source_repos(config, examples)
return 0
def do_clone(_, config, examples):
"""Do the clone step of this process"""
lib.clone_repos(config, examples)
return 0
def do_deploy(_, config, examples):
"""Do the deploy step of this process"""
lib.deploy_repos(config, examples)
return 0
def do_compile(args, config, examples):
"""Do the compile step"""
results = {}
results = lib.compile_repos(config, args.toolchains, args.mcu, examples)
lib.print_summary(results)
failures = lib.get_num_failures(results)
print("Number of failures = %d" % failures)
return failures
def do_versionning(args, config, examples):
""" Test update the mbed-os to the version specified by the tag """
lib.update_mbedos_version(config, args.tag, examples)
return 0
if __name__ == "__main__":
sys.exit(main())
|
livioribeiro/project-euler
|
refs/heads/master
|
python/p010.py
|
1
|
UP_TO = 2000000
import math
def is_prime(num):
if num <= 2:
return True
if num % 2 == 0:
return False
for i in range(3, math.ceil(math.sqrt(num)) + 1, 2):
if num % i == 0:
return False
return True
if __name__ == '__main__':
total = 2 + sum((i for i in range(3, UP_TO, 2) if is_prime(i)))
print(total)
|
StrellaGroup/erpnext
|
refs/heads/develop
|
erpnext/selling/doctype/sales_order_item/sales_order_item.py
|
33
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from erpnext.controllers.print_settings import print_settings_for_item_table
class SalesOrderItem(Document):
def __setup__(self):
print_settings_for_item_table(self)
def on_doctype_update():
frappe.db.add_index("Sales Order Item", ["item_code", "warehouse"])
|
luipir/QTraffic
|
refs/heads/master
|
ui/qtraffic_select_layer_ui.py
|
1
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui/qtraffic_select_layer.ui'
#
# Created: Tue Sep 8 18:01:38 2015
# by: PyQt4 UI code generator 4.11.2
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_selectLayer_dlg(object):
def setupUi(self, selectLayer_dlg):
selectLayer_dlg.setObjectName(_fromUtf8("selectLayer_dlg"))
selectLayer_dlg.resize(400, 101)
self.verticalLayout = QtGui.QVBoxLayout(selectLayer_dlg)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.label = QtGui.QLabel(selectLayer_dlg)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName(_fromUtf8("label"))
self.verticalLayout.addWidget(self.label)
self.selectLayer_CBox = QgsMapLayerComboBox(selectLayer_dlg)
self.selectLayer_CBox.setObjectName(_fromUtf8("selectLayer_CBox"))
self.verticalLayout.addWidget(self.selectLayer_CBox)
self.buttonBox = QtGui.QDialogButtonBox(selectLayer_dlg)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.verticalLayout.addWidget(self.buttonBox)
self.retranslateUi(selectLayer_dlg)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), selectLayer_dlg.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), selectLayer_dlg.reject)
QtCore.QMetaObject.connectSlotsByName(selectLayer_dlg)
def retranslateUi(self, selectLayer_dlg):
selectLayer_dlg.setWindowTitle(_translate("selectLayer_dlg", "Dialog", None))
self.label.setText(_translate("selectLayer_dlg", "Select Roads vectotr layer", None))
from qgis.gui import QgsMapLayerComboBox
|
chylli/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/port/qt.py
|
113
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""QtWebKit implementation of the Port interface."""
import glob
import logging
import re
import sys
import os
import platform
from webkitpy.common.memoized import memoized
from webkitpy.layout_tests.models.test_configuration import TestConfiguration
from webkitpy.port.base import Port
from webkitpy.port.xvfbdriver import XvfbDriver
_log = logging.getLogger(__name__)
class QtPort(Port):
ALL_VERSIONS = ['linux', 'win', 'mac']
port_name = "qt"
def _wk2_port_name(self):
return "qt-wk2"
def _port_flag_for_scripts(self):
return "--qt"
@classmethod
def determine_full_port_name(cls, host, options, port_name):
if port_name and port_name != cls.port_name:
return port_name
return port_name + '-' + host.platform.os_name
# sys_platform exists only for unit testing.
def __init__(self, host, port_name, **kwargs):
super(QtPort, self).__init__(host, port_name, **kwargs)
self._operating_system = port_name.replace('qt-', '')
# FIXME: Why is this being set at all?
self._version = self.operating_system()
def _generate_all_test_configurations(self):
configurations = []
for version in self.ALL_VERSIONS:
for build_type in self.ALL_BUILD_TYPES:
configurations.append(TestConfiguration(version=version, architecture='x86', build_type=build_type))
return configurations
def _build_driver(self):
# The Qt port builds DRT as part of the main build step
return True
def supports_per_test_timeout(self):
return True
def _path_to_driver(self):
return self._build_path('bin/%s' % self.driver_name())
def _path_to_image_diff(self):
return self._build_path('bin/ImageDiff')
def _path_to_webcore_library(self):
if self.operating_system() == 'mac':
return self._build_path('lib/QtWebKitWidgets.framework/QtWebKitWidgets')
else:
return self._build_path('lib/libQt5WebKitWidgets.so')
def _modules_to_search_for_symbols(self):
# We search in every library to be reliable in the case of building with CONFIG+=force_static_libs_as_shared.
if self.operating_system() == 'mac':
frameworks = glob.glob(os.path.join(self._build_path('lib'), '*.framework'))
return [os.path.join(framework, os.path.splitext(os.path.basename(framework))[0]) for framework in frameworks]
else:
suffix = 'dll' if self.operating_system() == 'win' else 'so'
return glob.glob(os.path.join(self._build_path('lib'), 'lib*.' + suffix))
@memoized
def qt_version(self):
version = ''
try:
for line in self._executive.run_command(['qmake', '-v']).split('\n'):
match = re.search('Qt\sversion\s(?P<version>\d\.\d)', line)
if match:
version = match.group('version')
break
except OSError:
version = '5.0'
return version
def _search_paths(self):
# qt-mac-wk2
# /
# qt-wk1 qt-wk2
# \/
# qt-5.x
# \
# (qt-linux|qt-mac|qt-win)
# |
# qt
search_paths = []
if self.get_option('webkit_test_runner'):
if self.operating_system() == 'mac':
search_paths.append('qt-mac-wk2')
search_paths.append('qt-wk2')
else:
search_paths.append('qt-wk1')
search_paths.append('qt-' + self.qt_version())
search_paths.append(self.port_name + '-' + self.operating_system())
search_paths.append(self.port_name)
return search_paths
def default_baseline_search_path(self):
return map(self._webkit_baseline_path, self._search_paths())
def _port_specific_expectations_files(self):
paths = self._search_paths()
if self.get_option('webkit_test_runner'):
paths.append('wk2')
# expectations_files() uses the directories listed in _search_paths reversed.
# e.g. qt -> qt-linux -> qt-5.x -> qt-wk1
return list(reversed([self._filesystem.join(self._webkit_baseline_path(p), 'TestExpectations') for p in paths]))
def setup_environ_for_server(self, server_name=None):
clean_env = super(QtPort, self).setup_environ_for_server(server_name)
clean_env['QTWEBKIT_PLUGIN_PATH'] = self._build_path('lib/plugins')
self._copy_value_from_environ_if_set(clean_env, 'QT_DRT_WEBVIEW_MODE')
self._copy_value_from_environ_if_set(clean_env, 'DYLD_IMAGE_SUFFIX')
self._copy_value_from_environ_if_set(clean_env, 'QT_WEBKIT_LOG')
self._copy_value_from_environ_if_set(clean_env, 'DISABLE_NI_WARNING')
self._copy_value_from_environ_if_set(clean_env, 'QT_WEBKIT_PAUSE_UI_PROCESS')
self._copy_value_from_environ_if_set(clean_env, 'QT_QPA_PLATFORM_PLUGIN_PATH')
self._copy_value_from_environ_if_set(clean_env, 'QT_WEBKIT_DISABLE_UIPROCESS_DUMPPIXELS')
return clean_env
# FIXME: We should find a way to share this implmentation with Gtk,
# or teach run-launcher how to call run-safari and move this down to Port.
def show_results_html_file(self, results_filename):
run_launcher_args = []
if self.get_option('webkit_test_runner'):
run_launcher_args.append('-2')
run_launcher_args.append("file://%s" % results_filename)
self._run_script("run-launcher", run_launcher_args)
def operating_system(self):
return self._operating_system
def check_sys_deps(self, needs_http):
result = super(QtPort, self).check_sys_deps(needs_http)
if not 'WEBKIT_TESTFONTS' in os.environ:
_log.error('\nThe WEBKIT_TESTFONTS environment variable is not defined or not set properly.')
_log.error('You must set it before running the tests.')
_log.error('Use git to grab the actual fonts from http://gitorious.org/qtwebkit/testfonts')
return False
return result
# Qt port is not ready for parallel testing, see https://bugs.webkit.org/show_bug.cgi?id=77730 for details.
def default_child_processes(self):
return 1
|
gofortargets/CNN_brandsafety
|
refs/heads/master
|
knx/text/tokenizer/default_tokenizer.py
|
1
|
from BS.knx.text.tokenizer import treebank_tokenizer
def tokenize(text):
return treebank_tokenizer.tokenize(text)
|
RonnyPfannschmidt/django-classy-settings
|
refs/heads/master
|
example/settings/__init__.py
|
5
|
import os
import cbs
mode = os.environ.get('DJANGO_MODE', 'Local')
cbs.apply('settings.{}.{}Settings'.format(mode.lower(), mode.title()), globals())
|
jlegendary/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/varzesh3.py
|
120
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class Varzesh3IE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?video\.varzesh3\.com/(?:[^/]+/)+(?P<id>[^/]+)/?'
_TEST = {
'url': 'http://video.varzesh3.com/germany/bundesliga/5-%D9%88%D8%A7%DA%A9%D9%86%D8%B4-%D8%A8%D8%B1%D8%AA%D8%B1-%D8%AF%D8%B1%D9%88%D8%A7%D8%B2%D9%87%E2%80%8C%D8%A8%D8%A7%D9%86%D8%A7%D9%86%D8%9B%D9%87%D9%81%D8%AA%D9%87-26-%D8%A8%D9%88%D9%86%D8%AF%D8%B3/',
'md5': '2a933874cb7dce4366075281eb49e855',
'info_dict': {
'id': '76337',
'ext': 'mp4',
'title': '۵ واکنش برتر دروازهبانان؛هفته ۲۶ بوندسلیگا',
'description': 'فصل ۲۰۱۵-۲۰۱۴',
'thumbnail': 're:^https?://.*\.jpg$',
}
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
video_url = self._search_regex(
r'<source[^>]+src="([^"]+)"', webpage, 'video url')
title = self._og_search_title(webpage)
description = self._html_search_regex(
r'(?s)<div class="matn">(.+?)</div>',
webpage, 'description', fatal=False)
thumbnail = self._og_search_thumbnail(webpage)
video_id = self._search_regex(
r"<link[^>]+rel='(?:canonical|shortlink)'[^>]+href='/\?p=([^']+)'",
webpage, display_id, default=display_id)
return {
'url': video_url,
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
}
|
iut-ibk/DynaMind-UrbanSim
|
refs/heads/master
|
3rdparty/opus/src/urbansim_parcel/models/subarea_distribute_unplaced_jobs_model.py
|
2
|
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from numpy import where
from urbansim_parcel.models.subarea_scaling_jobs_model import SubareaScalingJobsModel
class SubareaDistributeUnplacedJobsModel(SubareaScalingJobsModel):
"""This model is used to place randomly (within sectors) any unplaced jobs.
"""
model_name = "Subarea Distribute Unplaced Jobs Model"
def run(self, location_set, agent_set, **kwargs):
"""
'location_set', 'agent_set' are of type Dataset. The model selects all unplaced jobs
and passes them to ScalingJobsModel.
"""
agents_index = where(agent_set.get_attribute(location_set.get_id_name()[0]) <= 0)[0]
return SubareaScalingJobsModel.run(self, location_set, agent_set, agents_index, **kwargs)
|
windyuuy/opera
|
refs/heads/master
|
chromium/src/third_party/trace-viewer/third_party/pywebsocket/src/mod_pywebsocket/memorizingfile.py
|
680
|
#!/usr/bin/env python
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Memorizing file.
A memorizing file wraps a file and memorizes lines read by readline.
"""
import sys
class MemorizingFile(object):
"""MemorizingFile wraps a file and memorizes lines read by readline.
Note that data read by other methods are not memorized. This behavior
is good enough for memorizing lines SimpleHTTPServer reads before
the control reaches WebSocketRequestHandler.
"""
def __init__(self, file_, max_memorized_lines=sys.maxint):
"""Construct an instance.
Args:
file_: the file object to wrap.
max_memorized_lines: the maximum number of lines to memorize.
Only the first max_memorized_lines are memorized.
Default: sys.maxint.
"""
self._file = file_
self._memorized_lines = []
self._max_memorized_lines = max_memorized_lines
self._buffered = False
self._buffered_line = None
def __getattribute__(self, name):
if name in ('_file', '_memorized_lines', '_max_memorized_lines',
'_buffered', '_buffered_line', 'readline',
'get_memorized_lines'):
return object.__getattribute__(self, name)
return self._file.__getattribute__(name)
def readline(self, size=-1):
"""Override file.readline and memorize the line read.
Note that even if size is specified and smaller than actual size,
the whole line will be read out from underlying file object by
subsequent readline calls.
"""
if self._buffered:
line = self._buffered_line
self._buffered = False
else:
line = self._file.readline()
if line and len(self._memorized_lines) < self._max_memorized_lines:
self._memorized_lines.append(line)
if size >= 0 and size < len(line):
self._buffered = True
self._buffered_line = line[size:]
return line[:size]
return line
def get_memorized_lines(self):
"""Get lines memorized so far."""
return self._memorized_lines
# vi:sts=4 sw=4 et
|
jrydberg/edgy
|
refs/heads/master
|
src/edgy/xml/xmlbuilder.py
|
1
|
"""ElementBuilder.py - construct ElementTrees with friendly syntax
(C) 2005 Oren Tirosh. Released under the terms of the MIT License.
http://www.opensource.org/licenses/mit-license.php
* Extended Element factory
Backward-compatible with the standard ElementTree Element factory with
the following extensions:
Sub-elements may be supplied as arguments:
Element('tag', {'a': '5'}, Element('othertag'))
Attribute dictionary is optional:
Element('tag', Element('othertag'))
Element text may be supplied as an argument:
Element('tag', 'some text')
Element text and sub-elements:
Element('tag', 'some text', Element('othertag'))
Element text, sub-elements and sub-element tails:
Element('tag', 'some text', Element('othertag'), 'tail')
* Namespaces
A namespace is a factory for QNames.
ns.tag == ns+'tag' == QName('http://namespace/uri', 'tag')
where:
ns = Namespace('http://namespace/uri')
Namespace(None) or LocalNamespace generates LocalName objects instead
of QNames but is otherwise similar.
A second optional argument to Namespace is prefix which will be used
when generating XML instead of automatically-generated numeric namespace
prefixes unless it collides with another defined prefix or uri.
* Callable names
QName and LocalName objects are callable, taking the same arguments as
the Element factory, except the tag argument which is implicitly set to
the QName/LocalName itself.
ns.tag(a='5') == Element(QName('http://namespace/uri', 'tag', a='5')
"""
#from elementtree import ElementTree
from edgy.xml.element import (Element as basefactory, iselement,
QName, _namespace_map)
from edgy.xml.utils import splitTag
__all__ = 'Element', 'Namespace', 'LocalNamespace'
def Element(tag, attrib={}, *children, **extra):
""" Element(tag (,attrdict)? (,subelement|string)* ) -> Element """
if isinstance(attrib, dict):
attrib = attrib.copy()
else:
children = (attrib,) + children
attrib = {}
attrib.update(extra)
element = basefactory(tag, attrib)
prevelem = None
for arg in children:
if iselement(arg):
element.append(arg)
prevelem = arg
else:
if isinstance(arg, basestring):
if prevelem is None:
element.text = (element.text or '') + arg
else:
prevelem.tail = (prevelem.tail or '') + arg
else:
try:
it = iter(arg)
except:
raise TypeError, "argument type to Element"
for item in it:
if not iselement(item):
raise TypeError, "invalid argument type to Element"
element.append(item)
return element
class _QName(unicode, QName):
""" Calling a QName creates an Element with the name as its tag """
def __new__(cls, *args):
tmp = QName(*args)
new = unicode.__new__(cls, tmp.text)
new.text = new
return new
# Use Python's binding of first argument as self
__call__ = Element
class LocalName(unicode):
""" Calling LocalName creates an Element with the name as its tag """
# Use Python's binding of first argument as self
__call__ = Element
class Namespace:
""" Namespace(uri [, prefix hint]) -> Namespace object """
def __init__(self, uri=None, prefix=None, location=None):
self.uri = uri or None
if prefix is None:
return
map = _namespace_map
if uri in map or prefix in map.values():
# prefix or URI already used
return
if prefix.startswith("ns") and prefix[2:].isdigit():
# names in this form may collide with autogenerated prefixes
return
map[uri] = prefix
if location is None:
location = uri
self.location = location
def __contains__(self, qn):
nsuri, tag = splitTag(str(qn))
return (nsuri == self.uri)
def __add__(self, tag):
if self.uri is None:
return LocalName(tag)
else:
return _QName(self.uri, tag)
def __getattr__(self, tag):
if tag[0] == '_':
raise AttributeError(tag)
qname = self+tag
self.__dict__[tag] = qname # cache for faster access next time
return qname
def __getitem__(self, tag):
return self + tag
LocalNamespace = Namespace(None)
|
pkleimert/hrpt
|
refs/heads/master
|
apps/pollster/models_2012_11_21.py
|
2
|
# -*- coding: utf-8 -*-
import warnings
from django.db import models, connection, transaction, IntegrityError, DatabaseError
from django.contrib.auth.models import User
from django.forms import ModelForm
from django.core.validators import RegexValidator
from cms.models import CMSPlugin
from xml.etree import ElementTree
from math import pi,cos,sin,log,exp,atan
from . import dynamicmodels, json
from .db.utils import get_db_type, convert_query_paramstyle
import os, re, shutil, warnings, datetime, csv
from django.conf import settings
DEG_TO_RAD = pi/180
RAD_TO_DEG = 180/pi
try:
import mapnik2 as mapnik
except:
try:
import mapnik
except ImportError:
warnings.warn("No working version for library 'mapnik' found. Continuing without mapnik")
SURVEY_STATUS_CHOICES = (
('DRAFT', 'Draft'),
('PUBLISHED', 'Published'),
('UNPUBLISHED', 'Unpublished')
)
SURVEY_TRANSLATION_STATUS_CHOICES = (
('DRAFT', 'Draft'),
('PUBLISHED', 'Published')
)
CHART_STATUS_CHOICES = (
('DRAFT', 'Draft'),
('PUBLISHED', 'Published'),
)
QUESTION_TYPE_CHOICES = (
('builtin', 'Builtin'),
('text', 'Open Answer'),
('single-choice', 'Single Choice'),
('multiple-choice', 'Multiple Choice'),
('matrix-select', 'Matrix Select'),
('matrix-entry', 'Matrix Entry'),
)
CHART_SQLFILTER_CHOICES = (
('NONE', 'None'),
('USER', 'Current User'),
('PERSON', 'Current Person'),
)
IDENTIFIER_REGEX = r'^[a-zA-Z][a-zA-Z0-9_]*$'
IDENTIFIER_OPTION_REGEX = r'^[a-zA-Z0-9_]*$'
SURVEY_EXTRA_SQL = {
'postgresql': {
'weekly': [
"""DROP VIEW IF EXISTS pollster_health_status""",
"""CREATE VIEW pollster_health_status AS
SELECT id as pollster_results_weekly_id,
case true
when "Q1_0"
then 'NO-SYMPTOMS'
when "Q5" = 0
and ("Q1_1" or "Q1_11" or "Q1_8" or "Q1_9")
and ("Q1_5" or "Q1_6" or "Q1_7")
then 'ILI'
when "Q5" = 1
and ("Q1_4" or "Q1_5" or "Q1_6" or "Q1_7")
then 'COMMON-COLD'
when "Q1_15" or "Q1_16" or "Q1_17" and "Q1_18"
then 'GASTROINTESTINAL'
else 'NON-INFLUENZA'
end as status
FROM pollster_results_weekly"""
]
},
'sqlite': {
'weekly': [
"""DROP VIEW IF EXISTS pollster_health_status""",
"""CREATE VIEW pollster_health_status AS
SELECT id as pollster_results_weekly_id,
case 1
when Q1_0
then 'NO-SYMPTOMS'
when Q5 == 0
and (Q1_1 or Q1_11 or Q1_8 or Q1_9)
and (Q1_5 or Q1_6 or Q1_7)
then 'ILI'
when Q5 == 1
and (Q1_4 or Q1_5 or Q1_6 or Q1_7)
then 'COMMON-COLD'
when Q1_15 or Q1_16 or Q1_17 and Q1_18
then 'GASTROINTESTINAL'
else 'NON-INFLUENZA'
end as status
FROM pollster_results_weekly"""
]
}
}
def _get_or_default(queryset, default=None):
r = queryset[0:1]
if r:
return r[0]
return default
class Survey(models.Model):
parent = models.ForeignKey('self', db_index=True, blank=True, null=True)
title = models.CharField(max_length=255, blank=True, default='')
shortname = models.SlugField(max_length=255, default='')
version = models.SlugField(max_length=255, blank=True, default='')
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
status = models.CharField(max_length=255, default='DRAFT', choices=SURVEY_STATUS_CHOICES)
form = None
translation_survey = None
_standard_result_fields =[
('user', models.IntegerField(null=True, blank=True, verbose_name="User")),
('global_id', models.CharField(max_length=36, null=True, blank=True, verbose_name="Person")),
('channel', models.CharField(max_length=36, null=True, blank=True, verbose_name="Channel"))
]
@staticmethod
def get_by_shortname(shortname):
return Survey.objects.all().get(shortname=shortname, status="PUBLISHED")
@property
def translated_title(self):
if self.translation and self.translation.title:
return self.translation.title
return self.title
@property
def is_draft(self):
return self.status == 'DRAFT'
@property
def is_published(self):
return self.status == 'PUBLISHED'
@property
def is_unpublished(self):
return self.status == 'UNPUBLISHED'
@property
def is_editable(self):
return self.is_draft or self.is_unpublished
@property
def questions(self):
for question in self.question_set.all():
question.set_form(self.form)
question.set_translation_survey(self.translation_survey)
yield question
@property
def translation(self):
return self.translation_survey
@models.permalink
def get_absolute_url(self):
return ('pollster_survey_edit', [str(self.id)])
def __unicode__(self):
return "Survey #%d %s" % (self.id, self.title)
def get_table_name(self):
if self.is_published and not self.shortname:
raise RuntimeError('cannot generate tables for surveys with no shortname')
return 'results_'+str(self.shortname)
def get_last_participation_data(self, user_id, global_id):
model = self.as_model()
participation = model.objects\
.filter(user=user_id)\
.filter(global_id = global_id)\
.order_by('-timestamp')\
.values()
return _get_or_default(participation)
def as_model(self):
fields = []
fields.extend(Survey._standard_result_fields)
for question in self.questions:
fields += question.as_fields()
model = dynamicmodels.create(self.get_table_name(), fields=dict(fields), app_label='pollster')
return model
def as_form(self):
model = self.as_model()
questions = list(self.questions)
def clean(self):
for question in questions:
if question.is_multiple_choice and question.is_mandatory:
valid = any([self.cleaned_data.get(d, False) for d in question.data_names])
if not valid:
self._errors[question.data_name] = self.error_class('At least one option should be selected')
return self.cleaned_data
form = dynamicmodels.to_form(model, {'clean': clean})
for question in questions:
if question.is_mandatory and question.data_name in form.base_fields:
form.base_fields[question.data_name].required = True
return form
def set_form(self, form):
self.form = form
def set_translation_survey(self, translation_survey):
self.translation_survey = translation_survey
def check(self):
errors = []
if not self.shortname:
errors.append('Missing survey shortname')
elif not re.match(IDENTIFIER_REGEX, self.shortname):
errors.append('Invalid survey shortname "%s"' % (self.shortname,))
for question in self.questions:
errors.extend(question.check())
return errors
def publish(self):
if self.is_published:
return None
errors = self.check()
if errors:
return errors
# Unpublish other surveys with the same shortname.
for o in Survey.objects.filter(shortname=self.shortname, status='PUBLISHED'):
o.unpublish()
self.status = 'PUBLISHED'
model = self.as_model()
table = model._meta.db_table
if table in connection.introspection.table_names():
now = datetime.datetime.now()
backup = table+'_vx_'+format(now, '%Y%m%d%H%M%s')
connection.cursor().execute('ALTER TABLE '+table+' RENAME TO '+backup)
dynamicmodels.install(model)
db = get_db_type(connection)
for extra_sql in SURVEY_EXTRA_SQL[db].get(self.shortname, []):
connection.cursor().execute(extra_sql)
self.save()
return None
def unpublish(self):
if not self.is_published:
return
table = self.as_model()._meta.db_table
if table in connection.introspection.table_names():
now = datetime.datetime.now()
version = self.version or 0
backup = table+'_v'+str(version)+'_'+format(now, '%Y%m%d%H%M%s')
connection.cursor().execute('ALTER TABLE '+table+' RENAME TO '+backup)
self.status = 'UNPUBLISHED'
self.save()
def write_csv(self, writer):
model = self.as_model()
fields = model._meta.fields
writer.writerow([field.verbose_name or field.name for field in fields])
for result in model.objects.all():
row = []
for field in fields:
val = getattr(result, field.name)
if callable(val):
val = val()
if type(val) is unicode:
val = val.encode('utf-8')
row.append(val)
writer.writerow(row)
class RuleType(models.Model):
title = models.CharField(max_length=255, blank=True, default='')
js_class = models.CharField(max_length=255, unique=True)
def __unicode__(self):
return "RuleType #%d %s" % (self.id, self.title)
class QuestionDataType(models.Model):
title = models.CharField(max_length=255, blank=True, default='')
db_type = models.CharField(max_length=255)
css_class = models.CharField(max_length=255)
js_class = models.CharField(max_length=255, unique=True)
def __unicode__(self):
return "QuestionDataType #%d %s" % (self.id, self.title)
def as_field_type(self, verbose_name=None, regex=None):
import django.db.models
import db.models
field = eval(self.db_type)
field.verbose_name = verbose_name
if regex:
field.validators.append(RegexValidator(regex=regex))
return field
@staticmethod
def default_type():
return QuestionDataType.objects.filter(title = 'Text')[0]
@staticmethod
def default_timestamp_type():
return QuestionDataType.objects.filter(title = 'Timestamp')[0]
@property
def is_internal(self):
return self.title == 'Timestamp'
class VirtualOptionType(models.Model):
title = models.CharField(max_length=255, blank=True, default='')
question_data_type = models.ForeignKey(QuestionDataType)
js_class = models.CharField(max_length=255, unique=True)
def __unicode__(self):
return "VirtualOptionType #%d %s for %s" % (self.id, self.title, self.question_data_type.title)
class Question(models.Model):
survey = models.ForeignKey(Survey, db_index=True)
starts_hidden = models.BooleanField(default=False)
is_mandatory = models.BooleanField(default=False)
ordinal = models.IntegerField()
title = models.CharField(max_length=255, blank=True, default='')
description = models.TextField(blank=True, default='')
type = models.CharField(max_length=255, choices=QUESTION_TYPE_CHOICES)
data_type = models.ForeignKey(QuestionDataType)
open_option_data_type = models.ForeignKey(QuestionDataType, related_name="questions_with_open_option", null=True, blank=True)
data_name = models.CharField(max_length=255)
visual = models.CharField(max_length=255, blank=True, default='')
tags = models.CharField(max_length=255, blank=True, default='')
regex = models.CharField(max_length=1023, blank=True, default='')
error_message = models.TextField(blank=True, default='')
form = None
translation_survey = None
translation_question = None
@property
def translated_title(self):
if self.translation and self.translation.title:
return self.translation.title
return self.title
@property
def translated_description(self):
if self.translation and self.translation.description:
return self.translation.description
return self.description
@property
def translated_error_message(self):
if self.translation and self.translation.error_message:
return self.translation.error_message
return self.error_message
@property
def errors(self):
if not self.form:
return {}
errors = [(data_name, self.form.errors[data_name]) for data_name in self.data_names if data_name in self.form.errors]
if self.is_multiple_choice and self.data_name in self.form.errors:
errors.append((self.data_name, self.form.errors[self.data_name]))
return dict(errors)
@property
def rows(self):
for row in self.row_set.all():
row.set_translation_survey(self.translation_survey)
yield row
@property
def columns(self):
for column in self.column_set.all():
column.set_translation_survey(self.translation_survey)
yield column
@property
def rows_columns(self):
for row in self.rows:
yield (row, self._columns_for_row(row))
def _columns_for_row(self, row):
for column in self.columns:
column.set_row(row)
yield column
@property
def data_names(self):
return [data_name for data_name, data_type in self.as_fields()]
@property
def options(self):
for option in self.option_set.all():
option.set_form(self.form)
option.set_translation_survey(self.translation_survey)
yield option
@property
def translation(self):
return self.translation_question
@property
def css_classes(self):
c = ['question', 'question-'+self.type, self.data_type.css_class]
if self.starts_hidden:
c.append('starts-hidden')
if self.is_mandatory:
c.append('mandatory')
if self.errors:
c.append('error')
return c
@property
def form_value(self):
if not self.form:
return ''
return self.form.data.get(self.data_name, '')
@property
def is_builtin(self):
return self.type == 'builtin'
@property
def is_text(self):
return self.type == 'text'
@property
def is_single_choice(self):
return self.type == 'single-choice'
@property
def is_multiple_choice(self):
return self.type == 'multiple-choice'
@property
def is_matrix_select(self):
return self.type == 'matrix-select'
@property
def is_matrix_entry(self):
return self.type == 'matrix-entry'
@property
def is_visual_dropdown(self):
return self.visual == 'dropdown'
def __unicode__(self):
return "Question #%d %s" % (self.id, self.title)
class Meta:
ordering = ['survey', 'ordinal']
def data_name_for_row_column(self, row, column):
return '%s_multi_row%d_col%d' % (self.data_name, row.ordinal, column.ordinal)
def as_fields(self):
fields = []
if self.type == 'builtin':
fields = [ (self.data_name, self.data_type.as_field_type(verbose_name=self.title)) ]
elif self.type == 'text':
fields = [ (self.data_name, self.data_type.as_field_type(verbose_name=self.title, regex=self.regex)) ]
elif self.type == 'single-choice':
open_option_data_type = self.open_option_data_type or self.data_type
fields = [ (self.data_name, self.data_type.as_field_type(verbose_name=self.title)) ]
for open_option in [o for o in self.option_set.all() if o.is_open]:
title_open = "%s: %s Open Answer" % (self.title, open_option.value)
fields.append( (open_option.open_option_data_name, open_option_data_type.as_field_type(verbose_name=title_open)) )
elif self.type == 'multiple-choice':
fields = []
for option in self.option_set.all():
title = "%s: %s" % (self.title, option.value)
fields.append( (option.data_name, models.BooleanField(verbose_name=title)) )
if option.is_open:
title_open = "%s: %s Open Answer" % (self.title, option.value)
fields.append( (option.open_option_data_name, option.open_option_data_type.as_field_type(verbose_name=title_open)) )
elif self.type in ('matrix-select', 'matrix-entry'):
fields = []
for row, columns in self.rows_columns:
for column in columns:
r = row.title or ("row %d" % row.ordinal)
c = column.title or ("column %d" % column.ordinal)
title = "%s (%s, %s)" % (self.title, r, c)
fields.append( (column.data_name, self.data_type.as_field_type(verbose_name=title)) )
else:
raise NotImplementedError(self.type)
return fields
def set_form(self, form):
self.form = form
def set_translation_survey(self, translation_survey):
self.translation_survey = translation_survey
if translation_survey:
r = translation_survey.translationquestion_set.all().filter(question=self)
default = TranslationQuestion(translation = translation_survey, question=self)
self.translation_question = _get_or_default(r, default)
def check(self):
errors = []
if not self.data_name:
errors.append('Missing data name for question "%s"' % (self.title, ))
elif not re.match(IDENTIFIER_REGEX, self.data_name):
errors.append('Invalid data name "%s" for question "%s"' % (self.data_name, self.title))
values = {}
for option in self.options:
errors.extend(option.check())
values[option.value] = values.get(option.value, 0) + 1
if self.type == 'multiple-choice':
dups = [val for val, count in values.items() if count > 1]
for dup in dups:
errors.append('Duplicated value %s in question %s' % (dup, self.title))
return errors
class QuestionRow(models.Model):
question = models.ForeignKey(Question, related_name="row_set", db_index=True)
ordinal = models.IntegerField()
title = models.CharField(max_length=255, blank=True, default='')
translation_survey = None
translation_row = None
class Meta:
ordering = ['question', 'ordinal']
def __unicode__(self):
return "QuestionRow #%d %s" % (self.id, self.title)
@property
def translated_title(self):
if self.translation and self.translation.title:
return self.translation.title
return self.title
@property
def translation(self):
return self.translation_row
def set_translation_survey(self, translation_survey):
self.translation_survey = translation_survey
if translation_survey:
r = translation_survey.translationquestionrow_set.all().filter(row=self)
default = TranslationQuestionRow(translation = translation_survey, row=self)
self.translation_row = _get_or_default(r, default)
class QuestionColumn(models.Model):
question = models.ForeignKey(Question, related_name="column_set", db_index=True)
ordinal = models.IntegerField()
title = models.CharField(max_length=255, blank=True, default='')
translation_survey = None
translation_column = None
row = None
class Meta:
ordering = ['question', 'ordinal']
def __unicode__(self):
return "QuestionColumn #%d %s" % (self.id, self.title)
@property
def translated_title(self):
if self.translation and self.translation.title:
return self.translation.title
return self.title
@property
def translation(self):
return self.translation_column
def set_translation_survey(self, translation_survey):
self.translation_survey = translation_survey
if translation_survey:
r = translation_survey.translationquestioncolumn_set.all().filter(column=self)
default = TranslationQuestionColumn(translation = translation_survey, column=self)
self.translation_column = _get_or_default(r, default)
def set_row(self, row):
self.row = row
@property
def options(self):
for option in self.question.options:
if option.row and option.row != self.row:
continue
if option.column and option.column != self:
continue
option.set_row_column(self.row, self)
option.set_translation_survey(self.translation_survey)
# TODO: We need a form to reset the selects to user's values.
# option.set_form(self.form)
yield option
@property
def data_name(self):
if not self.row:
raise NotImplementedError('use Question.rows_columns() to get the right data_name here')
return self.question.data_name_for_row_column(self.row, self)
class Option(models.Model):
question = models.ForeignKey(Question, db_index=True)
clone = models.ForeignKey('self', db_index=True, blank=True, null=True)
row = models.ForeignKey(QuestionRow, blank=True, null=True)
column = models.ForeignKey(QuestionColumn, blank=True, null=True)
is_virtual = models.BooleanField(default=False)
is_open = models.BooleanField(default=False)
starts_hidden = models.BooleanField(default=False)
ordinal = models.IntegerField()
text = models.CharField(max_length=4095, blank=True, default='')
group = models.CharField(max_length=255, blank=True, default='')
value = models.CharField(max_length=255, default='')
description = models.TextField(blank=True, default='')
virtual_type = models.ForeignKey(VirtualOptionType, blank=True, null=True)
virtual_inf = models.CharField(max_length=255, blank=True, default='')
virtual_sup = models.CharField(max_length=255, blank=True, default='')
virtual_regex = models.CharField(max_length=255, blank=True, default='')
form = None
translation_survey = None
translation_option = None
current_row_column = (None, None)
@property
def translated_text(self):
if self.translation and self.translation.text:
return self.translation.text
return self.text
@property
def translated_description(self):
if self.translation and self.translation.description:
return self.translation.description
return self.description
@property
def data_name(self):
if self.question.type in ('text', 'single-choice'):
return self.question.data_name
elif self.question.type == 'multiple-choice':
return self.question.data_name+'_'+self.value
elif self.question.type in ('matrix-select', 'matrix-entry'):
row = self.row or self.current_row_column[0]
column = self.column or self.current_row_column[1]
return self.question.data_name_for_row_column(row, column)
else:
raise NotImplementedError(self.question.type)
@property
def translation(self):
return self.translation_option
@property
def open_option_data_name(self):
return self.question.data_name+'_'+self.value+'_open'
@property
def open_option_data_type(self):
return self.question.open_option_data_type or self.question.data_type
def __unicode__(self):
return 'Option #%d %s' % (self.id, self.value)
class Meta:
ordering = ['question', 'ordinal']
@property
def form_value(self):
if not self.form:
return ''
return self.form.data.get(self.data_name, '')
@property
def open_option_data_form_value(self):
if not self.form:
return ''
return self.form.data.get(self.open_option_data_name, '')
@property
def form_is_checked(self):
if self.question.type in ('text', 'single-choice'):
return self.form_value == self.value
elif self.question.type == 'multiple-choice':
return bool(self.form_value)
elif self.question.type in ('matrix-select', 'matrix-entry'):
return self.form_value == self.value
else:
raise NotImplementedError(self.question.type)
def set_form(self, form):
self.form = form
def set_translation_survey(self, translation_survey):
self.translation_survey = translation_survey
if translation_survey:
r = translation_survey.translationoption_set.all().filter(option=self)
default = TranslationOption(translation = translation_survey, option=self)
self.translation_option = _get_or_default(r, default)
def set_row_column(self, row, column):
self.current_row_column = (row, column)
def check(self):
errors = []
if self.is_virtual:
if not self.virtual_inf and not self.virtual_sup and not self.virtual_regex:
errors.append('Missing parameters for derived value in question "%s"' % (self.question.title, ))
else:
if not self.text:
errors.append('Empty text for option in question "%s"' % (self.question.title, ))
if not self.value:
errors.append('Missing value for option "%s" in question "%s"' % (self.text, self.question.title))
elif self.question.type == 'multiple-choice' and not re.match(IDENTIFIER_OPTION_REGEX, self.value):
errors.append('Invalid value "%s" for option "%s" in question "%s"' % (self.value, self.text, self.question.title))
return errors
class Rule(models.Model):
rule_type = models.ForeignKey(RuleType)
is_sufficient = models.BooleanField(default=True)
subject_question = models.ForeignKey(Question, related_name='subject_of_rules', db_index=True)
subject_options = models.ManyToManyField(Option, related_name='subject_of_rules', limit_choices_to = {'question': subject_question})
object_question = models.ForeignKey(Question, related_name='object_of_rules', blank=True, null=True)
object_options = models.ManyToManyField(Option, related_name='object_of_rules', limit_choices_to = {'question': object_question})
def js_class(self):
return self.rule_type.js_class
def __unicode__(self):
return 'Rule #%d' % (self.id)
# I18n models
class TranslationSurvey(models.Model):
survey = models.ForeignKey(Survey, db_index=True)
language = models.CharField(max_length=3, db_index=True)
title = models.CharField(max_length=255, blank=True, default='')
status = models.CharField(max_length=255, default='DRAFT', choices=SURVEY_TRANSLATION_STATUS_CHOICES)
class Meta:
verbose_name = 'Translation'
ordering = ['survey', 'language']
unique_together = ('survey', 'language')
@models.permalink
def get_absolute_url(self):
return ('pollster_survey_translation_edit', [str(self.survey.id), self.language])
def __unicode__(self):
return "TranslationSurvey(%s) for %s" % (self.language, self.survey)
def as_form(self, data=None):
class TranslationSurveyForm(ModelForm):
class Meta:
model = TranslationSurvey
fields = ['title', 'status']
return TranslationSurveyForm(data, instance=self, prefix="survey")
class TranslationQuestion(models.Model):
translation = models.ForeignKey(TranslationSurvey, db_index=True)
question = models.ForeignKey(Question, db_index=True)
title = models.CharField(max_length=255, blank=True, default='')
description = models.TextField(blank=True, default='')
error_message = models.TextField(blank=True, default='')
class Meta:
ordering = ['translation', 'question']
unique_together = ('translation', 'question')
def __unicode__(self):
return "TranslationQuestion(%s) for %s" % (self.translation.language, self.question)
def as_form(self, data=None):
class TranslationQuestionForm(ModelForm):
class Meta:
model = TranslationQuestion
fields = ['title', 'description', 'error_message']
return TranslationQuestionForm(data, instance=self, prefix="question_%s"%(self.id,))
class TranslationQuestionRow(models.Model):
translation = models.ForeignKey(TranslationSurvey, db_index=True)
row = models.ForeignKey(QuestionRow, db_index=True)
title = models.CharField(max_length=255, blank=True, default='')
class Meta:
ordering = ['translation', 'row']
unique_together = ('translation', 'row')
def __unicode__(self):
return "TranslationQuestionRow(%s) for %s" % (self.translation.language, self.row)
def as_form(self, data=None):
class TranslationRowForm(ModelForm):
class Meta:
model = TranslationQuestionRow
fields = ['title']
return TranslationRowForm(data, instance=self, prefix="row_%s"%(self.id,))
class TranslationQuestionColumn(models.Model):
translation = models.ForeignKey(TranslationSurvey, db_index=True)
column = models.ForeignKey(QuestionColumn, db_index=True)
title = models.CharField(max_length=255, blank=True, default='')
class Meta:
ordering = ['translation', 'column']
unique_together = ('translation', 'column')
def __unicode__(self):
return "TranslationQuestionColumn(%s) for %s" % (self.translation.language, self.column)
def as_form(self, data=None):
class TranslationColumnForm(ModelForm):
class Meta:
model = TranslationQuestionColumn
fields = ['title']
return TranslationColumnForm(data, instance=self, prefix="column_%s"%(self.id,))
class TranslationOption(models.Model):
translation = models.ForeignKey(TranslationSurvey, db_index=True)
option = models.ForeignKey(Option, db_index=True)
text = models.CharField(max_length=4095, blank=True, default='')
description = models.TextField(blank=True, default='')
class Meta:
ordering = ['translation', 'option']
unique_together = ('translation', 'option')
def __unicode__(self):
return "TranslationOption(%s) for %s" % (self.translation.language, self.option)
def as_form(self, data=None):
class TranslationOptionForm(ModelForm):
class Meta:
model = TranslationOption
fields = ['text', 'description']
return TranslationOptionForm(data, instance=self, prefix="option_%s"%(self.id,))
class ChartType(models.Model):
shortname = models.SlugField(max_length=255, unique=True)
description = models.CharField(max_length=255)
def __unicode__(self):
return self.description or self.shortname
class Chart(models.Model):
survey = models.ForeignKey(Survey, db_index=True)
type = models.ForeignKey(ChartType, db_index=True)
shortname = models.SlugField(max_length=255)
chartwrapper = models.TextField(blank=True, default='')
sqlsource = models.TextField(blank=True, default='', verbose_name="SQL Source Query")
sqlfilter = models.CharField(max_length=255, default='NONE', choices=CHART_SQLFILTER_CHOICES, verbose_name="Results Filter")
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
status = models.CharField(max_length=255, default='DRAFT', choices=CHART_STATUS_CHOICES)
geotable = models.CharField(max_length=255, default='pollster_zip_codes', choices=settings.GEOMETRY_TABLES)
class Meta:
ordering = ['survey', 'shortname']
unique_together = ('survey', 'shortname')
def __unicode__(self):
return "Chart %s for %s" % (self.shortname, self.survey)
@models.permalink
def get_absolute_url(self):
return ('pollster_survey_chart_edit', [str(self.survey.id), self.shortname])
@property
def is_draft(self):
return self.status == 'DRAFT'
@property
def is_published(self):
return self.status == 'PUBLISHED'
@property
def has_data(self):
if not self.sqlsource:
return False
else:
return True
def to_json(self, user_id, global_id):
data = {}
if self.type.shortname == "google-charts":
data[ "chartType"] = "Table"
if self.chartwrapper:
data = json.loads(self.chartwrapper)
descriptions, cells = self.load_data(user_id, global_id)
cols = [{"id": desc[0], "label": desc[0], "type": "number"} for desc in descriptions]
rows = [{"c": [{"v": v} for v in c]} for c in cells]
data["dataTable"] = { "cols": cols, "rows": rows }
else:
if self.chartwrapper:
data["bounds"] = json.loads(self.chartwrapper)
try:
shortname = settings.POLLSTER_USER_PROFILE_SURVEY
survey = Survey.objects.get(shortname=shortname, status='PUBLISHED')
lpd = survey.get_last_participation_data(user_id, global_id)
if lpd and hasattr(settings, 'POLLSTER_USER_ZIP_CODE_DATA_NAME'):
zip_code = lpd.get(settings.POLLSTER_USER_ZIP_CODE_DATA_NAME)
if zip_code is not None:
zip_code = str(zip_code).upper()
country = None
if hasattr(settings, 'POLLSTER_USER_COUNTRY_DATA_NAME'):
country = lpd.get(settings.POLLSTER_USER_COUNTRY_DATA_NAME)
if country is not None:
country = str(country).upper()
data["center"] = self.load_zip_coords(zip_code, country)
except:
pass
return json.dumps(data)
def get_map_click(self, lat, lng):
result = {}
skip_cols = ("ogc_fid", "color", "geometry")
description, data = self.load_info(lat, lng)
if data and len(data) > 0:
for i in range(len(data[0])):
if description[i][0] not in skip_cols:
result[description[i][0]] = str(data[0][i])
return json.dumps(result)
def get_map_tile(self, user_id, global_id, z, x, y):
filename = self.get_map_tile_filename(z, x, y)
if self.sqlfilter == "USER" and user_id:
filename = filename + "_user_" + str(user_id)
elif self.sqlfilter == "PERSON" and global_id:
filename = filename + "_gid_" + global_id
if not os.path.exists(filename):
self.generate_map_tile(self.generate_mapnik_map(user_id, global_id), filename, z, x, y)
return open(filename).read()
def generate_map_tile(self, m, filename, z, x, y):
# Code taken from OSM generate_tiles.py
proj = GoogleProjection()
mprj = mapnik.Projection(m.srs)
p0 = (x * 256, (y + 1) * 256)
p1 = ((x + 1) * 256, y * 256)
l0 = proj.fromPixelToLL(p0, z);
l1 = proj.fromPixelToLL(p1, z);
c0 = mprj.forward(mapnik.Coord(l0[0], l0[1]))
c1 = mprj.forward(mapnik.Coord(l1[0], l1[1]))
if hasattr(mapnik,'mapnik_version') and mapnik.mapnik_version() >= 800:
bbox = mapnik.Box2d(c0.x, c0.y, c1.x, c1.y)
else:
bbox = mapnik.Envelope(c0.x, c0.y, c1.x, c1.y)
m.resize(256, 256)
m.zoom_to_box(bbox)
im = mapnik.Image(256, 256)
mapnik.render(m, im)
# See https://github.com/mapnik/mapnik/wiki/OutputFormats for output
# formats and special parameters. The default here is 32 bit PNG with 8
# bit per component and alpha channel.
im.save(str(filename), "png32")
def generate_mapnik_map(self, user_id, global_id):
m = mapnik.Map(256, 256)
style = self.generate_mapnik_style(user_id, global_id)
m.background = mapnik.Color("transparent")
m.append_style("ZIP_CODES STYLE", style)
m.srs = "+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +no_defs +over"
layer = mapnik.Layer('ZIP_CODES')
layer.datasource = self.create_mapnik_datasource(user_id, global_id)
layer.styles.append("ZIP_CODES STYLE")
m.layers.append(layer)
return m
def generate_mapnik_style(self, user_id, global_id):
style = mapnik.Style()
for color in self.load_colors(user_id, global_id):
# If the color can't be parsed, use red.
try:
c = mapnik.Color(str(color))
except:
c = mapnik.Color('#ff0000')
line = mapnik.LineSymbolizer(c, 1.5)
line.stroke.opacity = 0.7
poly = mapnik.PolygonSymbolizer(c)
poly.fill_opacity = 0.5
rule = mapnik.Rule()
rule.filter = mapnik.Filter(str("[color] = '%s'" % (color,)))
rule.symbols.extend([poly,line])
style.rules.append(rule)
return style
def create_mapnik_datasource(self, user_id, global_id):
# First create the SQL query that is a join between pollster_zip_codes and
# the chart query as created by the user; then create an appropriate datasource.
if global_id and re.findall('[^0-9A-Za-z-]', global_id):
raise Exception("invalid global_id "+global_id)
table = """SELECT * FROM %s""" % (self.get_view_name(),)
if self.sqlfilter == 'USER' :
table += """ WHERE "user" = %d""" % (user_id,)
elif self.sqlfilter == 'PERSON':
table += """ WHERE "user" = %d AND global_id = '%s'""" % (user_id, global_id)
table = "(" + table + ") AS ZIP_CODES"
if settings.DATABASES["default"]["ENGINE"] == "django.db.backends.sqlite3":
name = settings.DATABASES["default"]["NAME"]
return mapnik.SQLite(file=filename, wkb_format="spatialite",
geometry_field="geometry", estimate_extent=False, table=table)
if settings.DATABASES["default"]["ENGINE"] == "django.db.backends.postgresql_psycopg2":
name = settings.DATABASES["default"]["NAME"]
host = settings.DATABASES["default"]["HOST"]
port = settings.DATABASES["default"]["PORT"]
username = settings.DATABASES["default"]["USER"]
password = settings.DATABASES["default"]["PASSWORD"]
return mapnik.PostGIS(host=host, port=port, user=username, password=password, dbname=name,
geometry_field="geometry", estimate_extent=False, table=table)
def get_map_tile_base(self):
return "%s/_pollster_tile_cache/survey_%s/%s" % (settings.POLLSTER_CACHE_PATH, self.survey.id, self.shortname)
def get_map_tile_filename(self, z, x, y):
filename = "%s/%s/%s_%s" % (self.get_map_tile_base(), z, x, y)
pathname = os.path.dirname(filename)
if not os.path.exists(pathname):
try:
os.makedirs(pathname)
except OSError:
# Another thread created the directory in the meantime: just go on.
pass
return filename
def clear_map_tile_cache(self):
try:
shutil.rmtree(self.get_map_tile_base())
except:
pass
def get_table_name(self):
return 'pollster_charts_'+str(self.survey.shortname)+'_'+str(self.shortname)
def get_view_name(self):
return self.get_table_name() + "_view"
def update_table(self):
table_query = self.sqlsource
geo_table = self.geotable
if table_query:
table = self.get_table_name()
view = self.get_view_name()
if re.search(r'\bzip_code_country\b', table_query):
view_query = """SELECT A.*, B.id AS OGC_FID, B.geometry
FROM %s B, (SELECT * FROM %s) A
WHERE upper(A.zip_code_key) = upper(B.zip_code_key)
AND upper(A.zip_code_country) = upper(B.country)""" % (geo_table, table,)
else:
view_query = """SELECT A.*, B.id AS OGC_FID, B.geometry
FROM %s B, (SELECT * FROM %s) A
WHERE upper(A.zip_code_key) = upper(B.zip_code_key)""" % (geo_table, table,)
cursor = connection.cursor()
#try:
cursor.execute("DROP VIEW IF EXISTS %s" % (view,))
cursor.execute("DROP TABLE IF EXISTS %s" % (table,))
cursor.execute("CREATE TABLE %s AS %s" % (table, table_query))
if self.type.shortname != 'google-charts':
cursor.execute("CREATE VIEW %s AS %s" % (view, view_query))
transaction.commit_unless_managed()
self.clear_map_tile_cache()
return True
#except IntegrityError:
# return False
#except DatabaseError:
# return False
return False
def update_data(self):
table_query = self.sqlsource
if table_query:
table = self.get_table_name()
cursor = connection.cursor()
try:
cursor.execute("DELETE FROM %s" % (table,))
cursor.execute("INSERT INTO %s %s" % (table, table_query))
transaction.commit_unless_managed()
self.clear_map_tile_cache()
return True
except IntegrityError:
return False
except DatabaseError:
return False
return False
def load_data(self, user_id, global_id):
table = self.get_table_name()
query = "SELECT * FROM %s" % (table,)
if self.sqlfilter == 'USER' :
query += """ WHERE "user" = %(user_id)s"""
elif self.sqlfilter == 'PERSON':
query += """ WHERE "user" = %(user_id)s AND global_id = %(global_id)s"""
params = { 'user_id': user_id, 'global_id': global_id }
query = convert_query_paramstyle(connection, query, params)
try:
cursor = connection.cursor()
cursor.execute(query, params)
return (cursor.description, cursor.fetchall())
except DatabaseError, e:
return ((('Error',),), ((str(e),),))
def load_colors(self, user_id, global_id):
table = self.get_table_name()
query = """SELECT DISTINCT color FROM %s""" % (table,)
if self.sqlfilter == 'USER' :
query += """ WHERE "user" = %(user_id)s"""
elif self.sqlfilter == 'PERSON':
query += """ WHERE "user" = %(user_id)s AND global_id = %(global_id)s"""
params = { 'user_id': user_id, 'global_id': global_id }
query = convert_query_paramstyle(connection, query, params)
try:
cursor = connection.cursor()
cursor.execute(query, params)
return [x[0] for x in cursor.fetchall()]
except DatabaseError, e:
# If the SQL query is wrong we just return 'red'. We don't try to pop
# up a warning because this probably is an async Javascript call: the
# query error should be shown by the map editor.
return ['#ff0000']
def load_info(self, lat, lng):
view = self.get_view_name()
query = "SELECT * FROM %s WHERE ST_Contains(geometry, 'SRID=4326;POINT(%%s %%s)')" % (view,)
try:
cursor = connection.cursor()
cursor.execute(query, (lng, lat))
return (cursor.description, cursor.fetchall())
except DatabaseError, e:
return (None, [])
def load_zip_coords(self, zip_code_key, zip_code_country=None):
geo_table = self.geotable
if zip_code_country:
query = """SELECT ST_Y(ST_Centroid(geometry)) AS lat, ST_X(ST_Centroid(geometry)) AS lng
FROM """ + geo_table + """ WHERE zip_code_key = %s AND country = %s"""
args = (zip_code_key, zip_code_country)
else:
query = """SELECT ST_Y(ST_Centroid(geometry)) AS lat, ST_X(ST_Centroid(geometry)) AS lng
FROM """ + geo_table + """ WHERE zip_code_key = %s"""
args = (zip_code_key,)
try:
cursor = connection.cursor()
cursor.execute(query, args)
data = cursor.fetchall()
if len(data) > 0:
return {"lat": data[0][0], "lng": data[0][1]}
else:
return {}
except DatabaseError, e:
return {}
class GoogleProjection:
def __init__(self, levels=25):
self.Bc = []
self.Cc = []
self.zc = []
self.Ac = []
c = 256
for d in range(0,levels):
e = c/2;
self.Bc.append(c/360.0)
self.Cc.append(c/(2 * pi))
self.zc.append((e,e))
self.Ac.append(c)
c *= 2
def fromLLtoPixel(self,ll,zoom):
d = self.zc[zoom]
e = round(d[0] + ll[0] * self.Bc[zoom])
f = min(max(sin(DEG_TO_RAD * ll[1]),-0.9999),0.9999)
g = round(d[1] + 0.5*log((1+f)/(1-f))*-self.Cc[zoom])
return (e,g)
def fromPixelToLL(self,px,zoom):
e = self.zc[zoom]
f = (px[0] - e[0])/self.Bc[zoom]
g = (px[1] - e[1])/-self.Cc[zoom]
h = RAD_TO_DEG * ( 2 * atan(exp(g)) - 0.5 * pi)
return (f,h)
class SurveyChartPlugin(CMSPlugin):
chart = models.ForeignKey(Chart)
|
pschella/scipy
|
refs/heads/master
|
scipy/stats/_constants.py
|
40
|
"""
Statistics-related constants.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
# The smallest representable positive number such that 1.0 + _EPS != 1.0.
_EPS = np.finfo(float).eps
# The largest [in magnitude] usable floating value.
_XMAX = np.finfo(float).max
# The log of the largest usable floating value; useful for knowing
# when exp(something) will overflow
_LOGXMAX = np.log(_XMAX)
# The smallest [in magnitude] usable floating value.
_XMIN = np.finfo(float).tiny
# -special.psi(1)
_EULER = 0.577215664901532860606512090082402431042
# special.zeta(3, 1) Apery's constant
_ZETA3 = 1.202056903159594285399738161511449990765
|
pombredanne/cliques
|
refs/heads/master
|
invite_only/adapter.py
|
1
|
from allauth.account.adapter import DefaultAccountAdapter
from invite_only.models import InviteCode
import logging
logger = logging.getLogger(__name__)
class InviteOnlyAccountAdapter(DefaultAccountAdapter):
def is_open_for_signup(self, request):
if request.method == 'GET':
logger.info("is oepn for signup {} {}".format(
request.GET, request.POST))
invite_code = self._get_invite(request)
# If None, return False
return invite_code is not None
else:
return True
def stash_verified_email(self, request, email):
request.session['account_verified_email'] = email
def is_email_verified(self, request, email):
return True
def save_user(self, request, user, form, commit=True):
super(InviteOnlyAccountAdapter, self).save_user(
request, user, form, commit=True)
def _get_invite(self, request):
"""
Returns the InviteCode object or None if it doesn't exist
"""
logger.info('get {}, post: {}'.format(request.GET, request.POST))
invite_code = request.GET.get('invite_code')
logger.debug('Got invite_code {}'.format(invite_code))
try:
return InviteCode.objects.get(code=invite_code)
except (InviteCode.MultipleObjectsReturned, InviteCode.DoesNotExist):
logger.exception('Invalid invite code {}'.format(invite_code))
return None
|
allanino/nupic
|
refs/heads/master
|
tests/integration/nupic/engine/vector_file_sensor_test.py
|
34
|
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
## @file
This file tests VectorFileSensor exhaustively using the sessions interface.
Need to add tests for parameters:
loading and appending CSV files
test for recentFile
"""
import os
import pkg_resources
import unittest2 as unittest
from nupic.engine import Array, Dimensions, Network
g_filename = pkg_resources.resource_filename(__name__, "data/vectorfile.nta")
g_dataFile = pkg_resources.resource_filename(__name__,
"data/vectortestdata.txt")
g_dataFile2 = pkg_resources.resource_filename(__name__,
"data/vectortestdata2.txt")
g_dataFileCSV = pkg_resources.resource_filename(__name__,
"data/vectortestdata.csv")
g_dataFileCSV2 = pkg_resources.resource_filename(__name__,
"data/vectortestdata2.csv")
g_dataFileCSV3 = pkg_resources.resource_filename(__name__,
"data/vectortestdata3.csv")
g_dataFileLF4 = pkg_resources.resource_filename(__name__,
"data/vectortestdata.lf4")
g_dataFileBF4 = pkg_resources.resource_filename(__name__,
"data/vectortestdata.bf4")
g_dataFileIDX = pkg_resources.resource_filename(__name__,
"data/vectortestdata.idx")
class VectorFileSensorTest(unittest.TestCase):
"""Class for testing the VectorFileSensor plugin by loading a known network
with a single VectorFileSensor node and a known data file."""
def setUp(self):
self.filename = g_filename
self.nodeName = "TestSensor"
self.sensorName = "VectorFileSensor"
self.dataFile = g_dataFile
self.dataFile2 = g_dataFile2
self.dataFile3a = g_dataFileCSV
self.dataFile3b = g_dataFileCSV2
self.dataFile3c = g_dataFileCSV3
self.dataFile4 = g_dataFileLF4
self.dataFile5 = g_dataFileBF4
self.dataFile6 = g_dataFileIDX
self.numTests = 333
self.testsPassed = 0
self.testFailures = []
self.sensor = None
def testAll(self):
"""Run all the tests in our suite, catching any exceptions that might be
thrown.
"""
print 'VectorFileSensorTest parameters:'
print 'PYTHONPATH: %s' % os.environ.get('PYTHONPATH', 'NOT SET')
print 'filename: %s' % self.filename
self._testRunWithoutFile()
self._testNetLoad()
self._testFakeLoadFile()
self._testRepeatCount()
self._testUnknownCommand()
# Test maxOutput and activeOutputCount
self._testOutputCounts(0)
self._testLoadFile(self.dataFile, '0', '0')
self._testOutputCounts(5)
# Test a sequence of loads, runs, appends, etc.
self._testLoadFile(self.dataFile, '0', '0')
self._testRun()
self._testLoadFile(self.dataFile2, '', '0')
self._testRun()
self._testLoadFile(self.dataFile2, '2', '0')
self._testRun()
self._testLoadFile(self.dataFile3a, '3', '0')
self._testRun()
self._testLoadFile(self.dataFile4, '4', '0')
self._testRun()
self._testLoadFile(self.dataFile5, '5', '0')
self._testRun()
self._testLoadFile(self.dataFile6, '6', '0')
self._testRun()
self._testPosition()
self._testAppendFile(self.dataFile2, '2', '1', 10)
self._testAppendFile(self.dataFile, '0', '1', 15)
self._testRun()
self._testScaling(self.dataFile3b, '3')
# Test optional categoryOut and resetOut
self.sensor.setParameter('hasCategoryOut', 1)
self.sensor.setParameter('hasResetOut', 1)
self._testLoadFile(self.dataFile3c, '3', '0')
self._testOptionalOutputs()
self.sensor.setParameter('hasCategoryOut', 0)
self.sensor.setParameter('hasResetOut', 0)
def _testNetLoad(self):
"""Test loading a network with this sensor in it."""
n = Network()
r = n.addRegion(self.nodeName, self.sensorName, '{ activeOutputCount: 11}')
r.dimensions = Dimensions([1])
n.save(self.filename)
n = Network(self.filename)
n.initialize()
self.testsPassed += 1
# Check that vectorCount parameter is zero
r = n.regions[self.nodeName]
res = r.getParameter('vectorCount')
self.assertEqual(
res, 0, "getting vectorCount:\n Expected '0', got back '%d'\n" % res)
self.sensor = r
def _testFakeLoadFile(self):
"""Test reading in a fake file."""
# Loading a fake file should throw an exception
with self.assertRaises(RuntimeError):
self.sensor.executeCommand(['loadFile', 'ExistenceIsAnIllusion.txt', '0'])
def _testRunWithoutFile(self):
"""Test running the network without a file loaded. This should be run
before any file has been loaded in!"""
with self.assertRaises(AttributeError):
self.sensor.compute()
def _testRepeatCount(self):
"""Test setting and getting repeat count using parameters."""
# Check default repeat count
n = Network(self.filename)
sensor = n.regions[self.nodeName]
res = sensor.executeCommand(['dump'])
expected = self.sensorName + \
' isLabeled = 0 repeatCount = 1 vectorCount = 0 iterations = 0\n'
self.assertEqual(
res, expected,
"repeat count test:\n expected '%s'\n got '%s'\n" %
(expected, res))
# Set to 42, check it and return it back to 1
sensor.setParameter('repeatCount', 42)
res = sensor.getParameter('repeatCount')
self.assertEqual(
res, 42, "set repeatCount to 42:\n got back '%d'\n" % res)
res = sensor.executeCommand(['dump'])
expected = (self.sensorName +
' isLabeled = 0 repeatCount = 42 vectorCount = 0 '
'iterations = 0\n')
self.assertEqual(
res, expected,
"set to 42 test:\n expected '%s'\n got '%s'\n" %
(expected, res))
sensor.setParameter('repeatCount', 1)
def _testLoadFile(self, dataFile, fileFormat= '', iterations=''):
"""Test reading our sample vector file. The sample file
has 5 vectors of the correct length, plus one with incorrect length.
The sensor should ignore the last line."""
# Now load a real file
if fileFormat != '':
res = self.sensor.executeCommand(['loadFile', dataFile, fileFormat])
else:
res = self.sensor.executeCommand(['loadFile', dataFile])
self.assertTrue(res == '' or
res.startswith('VectorFileSensor read in file'),
'loading a real file: %s' % str(res))
# Check recent file
res = self.sensor.getParameter('recentFile')
self.assertEqual(res, dataFile, 'recent file, got: %s' % (res))
# Check summary of file contents
res = self.sensor.executeCommand(['dump'])
expected = (self.sensorName +
' isLabeled = 0 repeatCount = 1 vectorCount = 5 iterations = ' +
iterations + '\n')
self.assertEqual(res, expected,
'file summary:\n expected "%s"\n got "%s"\n' %
(expected, res))
def _testAppendFile(self, dataFile, fileFormat= '', iterations='',
numVecs=''):
"""Test appending our sample vector file. The sample file
has 5 vectors of the correct length, plus one with incorrect length.
The sensor should ignore the last line."""
# Now load a real file
if fileFormat != '':
res = self.sensor.executeCommand(['appendFile', dataFile, fileFormat])
else:
res = self.sensor.executeCommand(['appendFile', dataFile])
self.assertTrue(res == '' or
res.startswith('VectorFileSensor read in file'),
'loading a real file: %s' % str(res))
# Check recent file
res = self.sensor.getParameter('recentFile')
self.assertEqual(res, dataFile, 'recent file, got: %s' % res)
# Check summary of file contents
res = self.sensor.executeCommand(['dump'])
expected = self.sensorName + ' isLabeled = 0 repeatCount = 1' + \
' vectorCount = '+str(numVecs)+' iterations = ' + iterations + '\n'
self.assertEqual(res, expected,
'file summary:\n expected "%s"\n got "%s"\n' %
(expected, res))
# Check vectorCount parameter
res = self.sensor.getParameter('vectorCount')
self.assertEqual(res, numVecs,
'getting position:\n Expected ' + str(numVecs) +
', got back "%s"\n' % res)
def _testRun(self):
"""This is the basic workhorse test routine. It runs the net several times
to ensure the sensor is outputting the correct values. The routine tests
looping, tests each vector, and tests repeat count. """
# Set repeat count to 3
self.sensor.setParameter('repeatCount', 3)
self.sensor.setParameter('position', 0)
# Run the sensor several times to ensure it is outputting the correct
# values.
for _epoch in [1, 2]: # test looping
for vec in [0, 1, 2, 3, 4]: # test each vector
for _rc in [1, 2, 3]: # test repeatCount
# Run and get outputs
self.sensor.compute()
outputs = self.sensor.getOutputData('dataOut')
# Check outputs
#sum = reduce(lambda x,y:int(x)+int(y),outputs)
self.assertEqual(outputs[vec], vec+1, 'output = %s' % str(outputs))
self.assertEqual(sum(outputs), vec+1, 'output = %s' % str(outputs))
# Set repeat count back to 1
self.sensor.setParameter('repeatCount', 1)
def _testOutputCounts(self, vectorCount):
"""Test maxOutputVectorCount with different repeat counts."""
# Test maxOutput with different repeat counts.
res = self.sensor.getParameter('maxOutputVectorCount')
self.assertEqual(res, vectorCount,
"getting maxOutputVectorCount:\n Expected '" +
str(vectorCount) + "', got back '%d'\n" % (res))
self.sensor.setParameter('repeatCount', 3)
res = self.sensor.getParameter('maxOutputVectorCount')
self.assertEqual(res, 3 * vectorCount,
'getting maxOutputVectorCount:\n Expected ' +
str(3*vectorCount)+', got back "%d"\n' % res)
self.sensor.setParameter('repeatCount', 1)
# Test activeOutputCount
res = self.sensor.getParameter('activeOutputCount')
self.assertEqual(
res, 11,
'getting activeOutputCount :\n Expected 11, got back "%d"\n' % res)
def _testPosition(self):
"""Test setting and getting position parameter. Run compute once to verify
it went to the right position."""
self.sensor.setParameter('position', 2)
self.sensor.compute()
outputs = self.sensor.getOutputData('dataOut')
self.assertEqual(outputs[2], 3, 'output = %s' % str(outputs))
self.assertEqual(sum(outputs), 3, 'output = %s' % str(outputs))
# Now it should have incremented the position
res = self.sensor.getParameter('position')
self.assertEqual(res, 3,
'getting position:\n Expected "3", got back "%d"\n' %
res)
def _testScaling(self, dataFile, fileFormat= ''):
"""Specific tests for setScaleVector, setOffsetVector, and scalingMode"""
# Retrieve scalingMode after a netLoad. Should be 'none'
res = self.sensor.getParameter('scalingMode')
self.assertEqual(res, 'none',
'Getting scalingMode:\n Expected "none", got back "%s"\n' %
res)
# Retrieve scaling and offset after netLoad - should be 1 and zero
# respectively.
a = Array('Real32', 11)
self.sensor.getParameterArray('scaleVector', a)
self.assertEqual(str(a), '[ 1 1 1 1 1 1 1 1 1 1 1 ]',
'Error getting ones scaleVector:\n Got back "%s"\n' %
str(res))
self.sensor.getParameterArray('offsetVector', a)
self.assertEqual(str(a), '[ 0 0 0 0 0 0 0 0 0 0 0 ]',
'Error getting zero offsetVector:\n Got back "%s"\n' %
str(res))
# load data file, set scaling and offset to standardForm and check
self.sensor.executeCommand(['loadFile', dataFile, fileFormat])
self.sensor.setParameter('scalingMode', 'standardForm')
self.sensor.getParameterArray('scaleVector', a)
s = ('[ 2.23607 1.11803 0.745356 0.559017 0.447214 2.23607 1.11803 '
'0.745356 0.559017 0.447214 2.23607 ]')
self.assertEqual(
str(a), s,
'Error getting standardForm scaleVector:\n Got back "%s"\n' % res)
o = '[ -0.2 -0.4 -0.6 -0.8 -1 -0.2 -0.4 -0.6 -0.8 -1 -0.2 ]'
self.sensor.getParameterArray('offsetVector', a)
self.assertEqual(
str(a), o,
'Error getting standardForm offsetVector:\n Got back "%s"\n' % res)
# set to custom value and check
scaleVector = Array('Real32', 11)
for i, x in enumerate((1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1)):
scaleVector[i] = x
self.sensor.setParameterArray('scaleVector', scaleVector)
self.sensor.getParameterArray('scaleVector', a)
self.assertEqual(str(a), str(scaleVector),
'Error getting modified scaleVector:\n Got back "%s"\n' %
str(res))
offsetVector = Array('Real32', 11)
for i, x in enumerate((1, 2, 3, 4, 1, 1, 1, 1, 1, 2, 1)):
offsetVector[i] = x
self.sensor.setParameterArray('offsetVector', offsetVector)
self.sensor.getParameterArray('offsetVector', a)
self.assertEqual(str(a), str(offsetVector),
'Error getting modified offsetVector:\n Got back "%s"\n' %
str(res))
# scalingMode should now be custom
mode = self.sensor.getParameter('scalingMode')
self.assertEqual(
mode, 'custom',
'Getting scalingMode:\n Expected "custom", got back "%s"\n' % res)
# At this point we test loading a data file using loadFile. The scaling
# params should still be active and applied to the new vectors.
res = self.sensor.executeCommand(['loadFile', dataFile, fileFormat])
self.sensor.getParameterArray('offsetVector', a)
self.assertEqual(
str(a), str(offsetVector),
'Error getting modified offsetVector after loadFile:\n Got back '
'"%s"\n' % res)
self.sensor.getParameterArray('scaleVector', a)
self.assertEqual(str(a), str(scaleVector),
'Error getting modified scaleVector after loadFile:\n '
'Got back "%s"\n' % res)
# Set scaling mode back to none and retrieve scaling and offset - should
# be 1 and zero respectively.
self.sensor.setParameter('scalingMode', 'none')
self.sensor.getParameterArray('scaleVector', a)
noScaling = Array('Real32', 11)
for i in range(11):
noScaling[i] = 1
self.assertEqual(str(a), str(noScaling),
'Error getting ones scaleVector:\n Got back "%s"\n' % res)
noOffset = Array('Real32', 11)
for i in range(11):
noOffset[i] = 0
self.sensor.getParameterArray('offsetVector', a)
self.assertEqual(str(a), str(noOffset),
'Error getting zero offsetVector:\n Got back "%s"\n' % res)
def _testUnknownCommand(self):
"""Test that exception is thrown when unknown execute command sent."""
with self.assertRaises(RuntimeError):
self.sensor.executeCommand(['nonExistentCommand'])
def _testOptionalOutputs(self):
"""This is the basic workhorse test routine. It runs the net several times
to ensure the sensor is outputting the correct values. The routine tests
looping, tests each vector, and tests repeat count. """
# Set repeat count to 3
self.sensor.setParameter('repeatCount', 3)
self.sensor.setParameter('position', 0)
# Run the sensor several times to ensure it is outputting the correct
# values.
categories = []
resetOuts = []
for _epoch in [1, 2]: # test looping
for vec in [0, 1, 2, 3, 4]: # test each vector
for _rc in [1, 2, 3]: # test repeatCount
# Run and get outputs
self.sensor.compute()
outputs = self.sensor.getOutputData('dataOut')
a = self.sensor.getOutputData('categoryOut')
categories.append(a[0])
a = self.sensor.getOutputData('resetOut')
resetOuts.append(a[0])
# Check outputs
self.assertEqual(outputs[vec], vec+1, 'output = %s' % str(outputs))
self.assertEqual(sum(outputs), vec+1, 'output = %s' % str(outputs))
self.assertEqual(categories, 2 * ([6] * 12 + [8] * 3))
self.assertEqual(resetOuts,
2 * [1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1])
# Set repeat count back to 1
self.sensor.setParameter('repeatCount', 1)
if __name__=='__main__':
unittest.main()
|
alejandro-mc/trees
|
refs/heads/master
|
nodesVsSPRSeq.py
|
1
|
#randSPRwalk.py
#writes random SPR walks to files
#calls GTP on each SPR random walk file to get
#the ditances between each tree and the first tree of the sequence
#the results are written to csv files with lines delimited by \t
from tree_utils import *
import os
import sys
def randSPRwalk(size,steps,runs,seed):
#set the seed
random.seed(seed)
out_file_name = "SPR_" + str(size) + "_" + str(steps) + "_" +\
str(runs) + "_" + str(seed)
#create a file for each spr sequence
for k in range(runs):
#define starting tree
rand_tree = genRandBinTree(list(range(size)))
total_nodes = countNodes(rand_tree)
#write current sequence to file
with open('tmpsprseq' + str(k),'w') as treefile:
treefile.write(toNewickTree(rand_tree) + "\n")
current_tree = rand_tree
for i in range(steps):
current_tree = randSPR(current_tree,total_nodes)[0]
treefile.write(toNewickTree(current_tree) + "\n")
#assumes GTP file is in current working directory
outfile = "tempseq.csv"
infile = "tmpsprseq" + str(k)
os.system("java -jar gtp.jar -r 1 -o " + outfile + " " + infile)
#append output to final sequence file
os.system("cat tempseq.csv | ./toLines.py > tempnodesseqfile")
#prepend number of nodes to every line in the file
#and append the lines to out results file
with open('tempnodesseqfile','r') as seqfile:
with open(out_file_name,'a') as nodesSeqs:
for line in seqfile:
nodesSeqs.write(str(total_nodes) + "," + line)
#cleanup
os.system("rm tempseq.csv")
os.system("rm tempnodesseqfile")
os.system("rm tmpsprseq*")
if __name__=='__main__':
if len(sys.argv)<5:
print ("Too few arguments!!")
print ("Usage: <no. leaves> <no. SPR steps> <no. runs> <seed>")
sys.exit(-1)
size = sys.argv[1]
steps = sys.argv[2]
runs = sys.argv[3]
seed = sys.argv[4]
randSPRwalk(int(size),int(steps),int(runs),int(seed))
|
iDTLabssl/account-invoicing
|
refs/heads/8.0
|
stock_picking_invoicing/__init__.py
|
33
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2013-15 Agile Business Group sagl (<http://www.agilebg.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import stock
|
pmclanahan/pytest-progressive
|
refs/heads/master
|
noseprogressive/bar.py
|
4
|
from __future__ import with_statement
from itertools import cycle
from signal import signal, SIGWINCH
__all__ = ['ProgressBar', 'NullProgressBar']
class ProgressBar(object):
_is_dodging = 0 # Like a semaphore
def __init__(self, max_value, term, filled_color=8, empty_color=7):
"""``max_value`` is the highest value I will attain. Must be >0."""
self.stream = term.stream
self.max = max_value
self._term = term
self.last = '' # The contents of the previous progress line printed
self._measure_terminal()
# Prepare formatting, dependent on whether we have terminal colors:
if term.number_of_colors > max(filled_color, empty_color):
self._fill_cap = term.on_color(filled_color)
self._empty_cap = term.on_color(empty_color)
self._empty_char = ' '
else:
self._fill_cap = term.reverse
self._empty_cap = lambda s: s
self._empty_char = '_'
signal(SIGWINCH, self._handle_winch)
def _measure_terminal(self):
self.lines, self.cols = (self._term.height or 24,
self._term.width or 80)
def _handle_winch(self, *args):
#self.erase() # Doesn't seem to help.
self._measure_terminal()
# TODO: Reprint the bar but at the new width.
def update(self, test_path, number):
"""Draw an updated progress bar.
At the moment, the graph takes a fixed width, and the test identifier
takes the rest of the row, truncated from the left to fit.
test_path -- the selector of the test being run
number -- how many tests have been run so far, including this one
"""
# TODO: Play nicely with absurdly narrow terminals. (OS X's won't even
# go small enough to hurt us.)
# Figure out graph:
GRAPH_WIDTH = 14
# min() is in case we somehow get the total test count wrong. It's tricky.
num_filled = int(round(min(1.0, float(number) / self.max) * GRAPH_WIDTH))
graph = ''.join([self._fill_cap(' ' * num_filled),
self._empty_cap(self._empty_char * (GRAPH_WIDTH - num_filled))])
# Figure out the test identifier portion:
cols_for_path = self.cols - GRAPH_WIDTH - 2 # 2 spaces between path & graph
if len(test_path) > cols_for_path:
test_path = test_path[len(test_path) - cols_for_path:]
else:
test_path += ' ' * (cols_for_path - len(test_path))
# Put them together, and let simmer:
self.last = self._term.bold(test_path) + ' ' + graph
with self._at_last_line():
self.stream.write(self.last)
self.stream.flush()
def erase(self):
"""White out the progress bar."""
with self._at_last_line():
self.stream.write(self._term.clear_eol)
self.stream.flush()
def _at_last_line(self):
"""Return a context manager that positions the cursor at the last line, lets you write things, and then returns it to its previous position."""
return self._term.location(0, self.lines)
def dodging(bar):
"""Return a context manager which erases the bar, lets you output things, and then redraws the bar.
It's reentrant.
"""
class ShyProgressBar(object):
"""Context manager that implements a progress bar that gets out of the way"""
def __enter__(self):
"""Erase the progress bar so bits of disembodied progress bar don't get scrolled up the terminal."""
# My terminal has no status line, so we make one manually.
bar._is_dodging += 1 # Increment before calling erase(), which
# calls dodging() again.
if bar._is_dodging <= 1: # It *was* 0.
bar.erase()
def __exit__(self, type, value, tb):
"""Redraw the last saved state of the progress bar."""
if bar._is_dodging == 1: # Can't decrement yet; write() could
# read it.
# This is really necessary only because we monkeypatch
# stderr; the next test is about to start and will redraw
# the bar.
with bar._at_last_line():
bar.stream.write(bar.last)
bar.stream.flush()
bar._is_dodging -= 1
return ShyProgressBar()
class Null(object):
def __getattr__(self, *args, **kwargs):
"""Return a boring callable for any attribute accessed."""
return lambda *args, **kwargs: None
# Beginning in Python 2.7, __enter__ and __exit__ aren't looked up through
# __getattr__ or __getattribute__:
# http://docs.python.org/reference/datamodel#specialnames
__enter__ = __exit__ = __getattr__
class NullProgressBar(Null):
"""``ProgressBar`` workalike that does nothing
Comes in handy when you want to have an option to hide the progress bar.
"""
def dodging(self):
return Null() # So Python can call __enter__ and __exit__ on it
|
shingonoide/odoo
|
refs/heads/deverp_8.0
|
addons/payment_ogone/tests/test_ogone.py
|
430
|
# -*- coding: utf-8 -*-
from lxml import objectify
import time
import urlparse
from openerp.addons.payment.models.payment_acquirer import ValidationError
from openerp.addons.payment.tests.common import PaymentAcquirerCommon
from openerp.addons.payment_ogone.controllers.main import OgoneController
from openerp.tools import mute_logger
class OgonePayment(PaymentAcquirerCommon):
def setUp(self):
super(OgonePayment, self).setUp()
cr, uid = self.cr, self.uid
self.base_url = self.registry('ir.config_parameter').get_param(cr, uid, 'web.base.url')
# get the adyen account
model, self.ogone_id = self.registry('ir.model.data').get_object_reference(cr, uid, 'payment_ogone', 'payment_acquirer_ogone')
def test_10_ogone_form_render(self):
cr, uid, context = self.cr, self.uid, {}
# be sure not to do stupid thing
ogone = self.payment_acquirer.browse(self.cr, self.uid, self.ogone_id, None)
self.assertEqual(ogone.environment, 'test', 'test without test environment')
# ----------------------------------------
# Test: button direct rendering + shasign
# ----------------------------------------
form_values = {
'PSPID': 'dummy',
'ORDERID': 'test_ref0',
'AMOUNT': '1',
'CURRENCY': 'EUR',
'LANGUAGE': 'en_US',
'CN': 'Norbert Buyer',
'EMAIL': 'norbert.buyer@example.com',
'OWNERZIP': '1000',
'OWNERADDRESS': 'Huge Street 2/543',
'OWNERCTY': 'Belgium',
'OWNERTOWN': 'Sin City',
'OWNERTELNO': '0032 12 34 56 78',
'SHASIGN': '815f67b8ff70d234ffcf437c13a9fa7f807044cc',
'ACCEPTURL': '%s' % urlparse.urljoin(self.base_url, OgoneController._accept_url),
'DECLINEURL': '%s' % urlparse.urljoin(self.base_url, OgoneController._decline_url),
'EXCEPTIONURL': '%s' % urlparse.urljoin(self.base_url, OgoneController._exception_url),
'CANCELURL': '%s' % urlparse.urljoin(self.base_url, OgoneController._cancel_url),
}
# render the button
res = self.payment_acquirer.render(
cr, uid, self.ogone_id,
'test_ref0', 0.01, self.currency_euro_id,
partner_id=None,
partner_values=self.buyer_values,
context=context)
# check form result
tree = objectify.fromstring(res)
self.assertEqual(tree.get('action'), 'https://secure.ogone.com/ncol/test/orderstandard.asp', 'ogone: wrong form POST url')
for form_input in tree.input:
if form_input.get('name') in ['submit']:
continue
self.assertEqual(
form_input.get('value'),
form_values[form_input.get('name')],
'ogone: wrong value for input %s: received %s instead of %s' % (form_input.get('name'), form_input.get('value'), form_values[form_input.get('name')])
)
# ----------------------------------------
# Test2: button using tx + validation
# ----------------------------------------
# create a new draft tx
tx_id = self.payment_transaction.create(
cr, uid, {
'amount': 0.01,
'acquirer_id': self.ogone_id,
'currency_id': self.currency_euro_id,
'reference': 'test_ref0',
'partner_id': self.buyer_id,
}, context=context
)
# render the button
res = self.payment_acquirer.render(
cr, uid, self.ogone_id,
'should_be_erased', 0.01, self.currency_euro,
tx_id=tx_id,
partner_id=None,
partner_values=self.buyer_values,
context=context)
# check form result
tree = objectify.fromstring(res)
self.assertEqual(tree.get('action'), 'https://secure.ogone.com/ncol/test/orderstandard.asp', 'ogone: wrong form POST url')
for form_input in tree.input:
if form_input.get('name') in ['submit']:
continue
self.assertEqual(
form_input.get('value'),
form_values[form_input.get('name')],
'ogone: wrong value for form input %s: received %s instead of %s' % (form_input.get('name'), form_input.get('value'), form_values[form_input.get('name')])
)
@mute_logger('openerp.addons.payment_ogone.models.ogone', 'ValidationError')
def test_20_ogone_form_management(self):
cr, uid, context = self.cr, self.uid, {}
# be sure not to do stupid thing
ogone = self.payment_acquirer.browse(self.cr, self.uid, self.ogone_id, None)
self.assertEqual(ogone.environment, 'test', 'test without test environment')
# typical data posted by ogone after client has successfully paid
ogone_post_data = {
'orderID': u'test_ref_2',
'STATUS': u'9',
'CARDNO': u'XXXXXXXXXXXX0002',
'PAYID': u'25381582',
'CN': u'Norbert Buyer',
'NCERROR': u'0',
'TRXDATE': u'11/15/13',
'IP': u'85.201.233.72',
'BRAND': u'VISA',
'ACCEPTANCE': u'test123',
'currency': u'EUR',
'amount': u'1.95',
'SHASIGN': u'7B7B0ED9CBC4A85543A9073374589033A62A05A5',
'ED': u'0315',
'PM': u'CreditCard'
}
# should raise error about unknown tx
with self.assertRaises(ValidationError):
self.payment_transaction.ogone_form_feedback(cr, uid, ogone_post_data, context=context)
# create tx
tx_id = self.payment_transaction.create(
cr, uid, {
'amount': 1.95,
'acquirer_id': self.ogone_id,
'currency_id': self.currency_euro_id,
'reference': 'test_ref_2',
'partner_name': 'Norbert Buyer',
'partner_country_id': self.country_france_id,
}, context=context
)
# validate it
self.payment_transaction.ogone_form_feedback(cr, uid, ogone_post_data, context=context)
# check state
tx = self.payment_transaction.browse(cr, uid, tx_id, context=context)
self.assertEqual(tx.state, 'done', 'ogone: validation did not put tx into done state')
self.assertEqual(tx.ogone_payid, ogone_post_data.get('PAYID'), 'ogone: validation did not update tx payid')
# reset tx
tx.write({'state': 'draft', 'date_validate': False, 'ogone_payid': False})
# now ogone post is ok: try to modify the SHASIGN
ogone_post_data['SHASIGN'] = 'a4c16bae286317b82edb49188d3399249a784691'
with self.assertRaises(ValidationError):
self.payment_transaction.ogone_form_feedback(cr, uid, ogone_post_data, context=context)
# simulate an error
ogone_post_data['STATUS'] = 2
ogone_post_data['SHASIGN'] = 'a4c16bae286317b82edb49188d3399249a784691'
self.payment_transaction.ogone_form_feedback(cr, uid, ogone_post_data, context=context)
# check state
tx = self.payment_transaction.browse(cr, uid, tx_id, context=context)
self.assertEqual(tx.state, 'error', 'ogone: erroneous validation did not put tx into error state')
def test_30_ogone_s2s(self):
test_ref = 'test_ref_%.15f' % time.time()
cr, uid, context = self.cr, self.uid, {}
# be sure not to do stupid thing
ogone = self.payment_acquirer.browse(self.cr, self.uid, self.ogone_id, None)
self.assertEqual(ogone.environment, 'test', 'test without test environment')
# create a new draft tx
tx_id = self.payment_transaction.create(
cr, uid, {
'amount': 0.01,
'acquirer_id': self.ogone_id,
'currency_id': self.currency_euro_id,
'reference': test_ref,
'partner_id': self.buyer_id,
'type': 'server2server',
}, context=context
)
# create an alias
res = self.payment_transaction.ogone_s2s_create_alias(
cr, uid, tx_id, {
'expiry_date_mm': '01',
'expiry_date_yy': '2015',
'holder_name': 'Norbert Poilu',
'number': '4000000000000002',
'brand': 'VISA',
}, context=context)
# check an alias is set, containing at least OPENERP
tx = self.payment_transaction.browse(cr, uid, tx_id, context=context)
self.assertIn('OPENERP', tx.partner_reference, 'ogone: wrong partner reference after creating an alias')
res = self.payment_transaction.ogone_s2s_execute(cr, uid, tx_id, {}, context=context)
# print res
# {
# 'orderID': u'reference',
# 'STATUS': u'9',
# 'CARDNO': u'XXXXXXXXXXXX0002',
# 'PAYID': u'24998692',
# 'CN': u'Norbert Poilu',
# 'NCERROR': u'0',
# 'TRXDATE': u'11/05/13',
# 'IP': u'85.201.233.72',
# 'BRAND': u'VISA',
# 'ACCEPTANCE': u'test123',
# 'currency': u'EUR',
# 'amount': u'1.95',
# 'SHASIGN': u'EFDC56879EF7DE72CCF4B397076B5C9A844CB0FA',
# 'ED': u'0314',
# 'PM': u'CreditCard'
# }
|
js0701/chromium-crosswalk
|
refs/heads/master
|
testing/chromoting/chromoting_test_utilities.py
|
7
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility script to run tests on the Chromoting bot."""
import hashlib
import os
from os.path import expanduser
import re
import shutil
import socket
import subprocess
import psutil
PROD_DIR_ID = '#PROD_DIR#'
CRD_ID = 'chrome-remote-desktop' # Used in a few file/folder names
HOST_READY_INDICATOR = 'Host ready to receive connections.'
BROWSER_TEST_ID = 'browser_tests'
HOST_HASH_VALUE = hashlib.md5(socket.gethostname()).hexdigest()
NATIVE_MESSAGING_DIR = 'NativeMessagingHosts'
# On a Swarming bot where these tests are executed, a temp folder is created
# under which the files specified in an .isolate are copied. This temp folder
# has a random name, which we'll store here for use later.
# Note that the test-execution always starts from the testing/chromoting folder
# under the temp folder.
ISOLATE_CHROMOTING_HOST_PATH = 'remoting/host/linux/linux_me2me_host.py'
ISOLATE_TEMP_FOLDER = os.path.abspath(os.path.join(os.getcwd(), '../..'))
CHROMOTING_HOST_PATH = os.path.join(ISOLATE_TEMP_FOLDER,
ISOLATE_CHROMOTING_HOST_PATH)
class HostOperationFailedException(Exception):
pass
def RunCommandInSubProcess(command):
"""Creates a subprocess with command-line that is passed in.
Args:
command: The text of command to be executed.
Returns:
results: stdout contents of executing the command.
"""
cmd_line = [command]
try:
print 'Going to run:\n%s' % command
results = subprocess.check_output(cmd_line, stderr=subprocess.STDOUT,
shell=True)
except subprocess.CalledProcessError, e:
results = e.output
finally:
print results
return results
def TestMachineCleanup(user_profile_dir, host_logs=None):
"""Cleans up test machine so as not to impact other tests.
Args:
user_profile_dir: the user-profile folder used by Chromoting tests.
host_logs: List of me2me host logs; these will be deleted.
"""
# Stop the host service.
RunCommandInSubProcess(CHROMOTING_HOST_PATH + ' --stop')
# Cleanup any host logs.
if host_logs:
for host_log in host_logs:
RunCommandInSubProcess('rm %s' % host_log)
# Remove the user-profile dir
if os.path.exists(user_profile_dir):
shutil.rmtree(user_profile_dir)
def InitialiseTestMachineForLinux(cfg_file):
"""Sets up a Linux machine for connect-to-host chromoting tests.
Copy over me2me host-config to expected locations.
By default, the Linux me2me host expects the host-config file to be under
$HOME/.config/chrome-remote-desktop
Its name is expected to have a hash that is specific to a machine.
Args:
cfg_file: location of test account's host-config file.
Raises:
Exception: if host did not start properly.
"""
# First get home directory on current machine.
home_dir = expanduser('~')
default_config_file_location = os.path.join(home_dir, '.config', CRD_ID)
if os.path.exists(default_config_file_location):
shutil.rmtree(default_config_file_location)
os.makedirs(default_config_file_location)
# Copy over test host-config to expected location, with expected file-name.
# The file-name should contain a hash-value that is machine-specific.
default_config_file_name = 'host#%s.json' % HOST_HASH_VALUE
config_file_src = os.path.join(os.getcwd(), cfg_file)
shutil.copyfile(
config_file_src,
os.path.join(default_config_file_location, default_config_file_name))
# Make sure chromoting host is running.
RestartMe2MeHost()
def RestartMe2MeHost():
"""Stops and starts the Me2Me host on the test machine.
Launches the me2me start-host command, and parses the stdout of the execution
to obtain the host log-file name.
Returns:
log_file: Host log file.
Raises:
Exception: If host-log does not contain string indicating host is ready.
"""
# To start the host, we want to be in the temp-folder for this test execution.
# Store the current folder to return back to it later.
previous_directory = os.getcwd()
os.chdir(ISOLATE_TEMP_FOLDER)
# Stop chromoting host.
RunCommandInSubProcess(CHROMOTING_HOST_PATH + ' --stop')
# Start chromoting host.
print 'Starting chromoting host from %s' % CHROMOTING_HOST_PATH
results = RunCommandInSubProcess(CHROMOTING_HOST_PATH + ' --start')
os.chdir(previous_directory)
# Get log file from results of above command printed to stdout. Example:
# Log file: /tmp/tmp0c3EcP/chrome_remote_desktop_20150929_101525_B0o89t
start_of_host_log = results.index('Log file: ') + len('Log file: ')
log_file = results[start_of_host_log:].rstrip()
# Confirm that the start process completed, and we got:
# "Host ready to receive connections." in the log.
if HOST_READY_INDICATOR not in results:
# Host start failed. Print out host-log. Don't run any tests.
with open(log_file, 'r') as f:
print f.read()
raise HostOperationFailedException('Host restart failed.')
return log_file
def CleanupUserProfileDir(args):
SetupUserProfileDir(args.me2me_manifest_file, args.it2me_manifest_file,
args.user_profile_dir)
def SetupUserProfileDir(me2me_manifest_file, it2me_manifest_file,
user_profile_dir):
"""Sets up the Google Chrome user profile directory.
Delete the previous user profile directory if exists and create a new one.
This invalidates any state changes by the previous test so each test can start
with the same environment.
When a user launches the remoting web-app, the native messaging host process
is started. For this to work, this function places the me2me and it2me native
messaging host manifest files in a specific folder under the user-profile dir.
Args:
me2me_manifest_file: location of me2me native messaging host manifest file.
it2me_manifest_file: location of it2me native messaging host manifest file.
user_profile_dir: Chrome user-profile-directory.
"""
native_messaging_folder = os.path.join(user_profile_dir, NATIVE_MESSAGING_DIR)
if os.path.exists(user_profile_dir):
shutil.rmtree(user_profile_dir)
os.makedirs(native_messaging_folder)
manifest_files = [me2me_manifest_file, it2me_manifest_file]
for manifest_file in manifest_files:
manifest_file_src = os.path.join(os.getcwd(), manifest_file)
manifest_file_dest = (
os.path.join(native_messaging_folder, os.path.basename(manifest_file)))
shutil.copyfile(manifest_file_src, manifest_file_dest)
def PrintRunningProcesses():
processes = psutil.get_process_list()
processes = sorted(processes, key=lambda process: process.name)
print 'List of running processes:\n'
for process in processes:
print process.name
def PrintHostLogContents(host_log_files=None):
if host_log_files:
host_log_contents = ''
for log_file in sorted(host_log_files):
with open(log_file, 'r') as log:
host_log_contents += '\nHOST LOG %s\n CONTENTS:\n%s' % (
log_file, log.read())
print host_log_contents
def TestCaseSetup(args):
# Reset the user profile directory to start each test with a clean slate.
CleanupUserProfileDir(args)
# Stop+start me2me host process.
return RestartMe2MeHost()
def GetJidListFromTestResults(results):
"""Parse the output of a test execution to obtain the JID used by the test.
Args:
results: stdio contents of test execution.
Returns:
jids_used: List of JIDs used by test; empty list if not found.
"""
# Reg-ex defining the JID information in the string being parsed.
jid_re = '(Connecting to )(.*.gserviceaccount.com/chromoting.*)(. Local.*)'
jids_used = []
for line in results.split('\n'):
match = re.search(jid_re, line)
if match:
jid_used = match.group(2)
if jid_used not in jids_used:
jids_used.append(jid_used)
return jids_used
def GetJidFromHostLog(host_log_file):
"""Parse the me2me host log to obtain the JID that the host registered.
Args:
host_log_file: path to host-log file that should be parsed for a JID.
Returns:
host_jid: host-JID if found in host-log, else None
"""
host_jid = None
with open(host_log_file, 'r') as log_file:
for line in log_file:
# The host JID will be recorded in a line saying 'Signaling
# connected'.
if 'Signaling connected. ' in line:
components = line.split(':')
host_jid = components[-1].lstrip()
break
return host_jid
|
jendap/tensorflow
|
refs/heads/master
|
tensorflow/python/debug/cli/cli_config_test.py
|
68
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for cli_config."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import shutil
import tempfile
from tensorflow.python.debug.cli import cli_config
from tensorflow.python.framework import test_util
from tensorflow.python.platform import gfile
from tensorflow.python.platform import googletest
class CLIConfigTest(test_util.TensorFlowTestCase):
def setUp(self):
self._tmp_dir = tempfile.mkdtemp()
self._tmp_config_path = os.path.join(self._tmp_dir, ".tfdbg_config")
self.assertFalse(gfile.Exists(self._tmp_config_path))
super(CLIConfigTest, self).setUp()
def tearDown(self):
shutil.rmtree(self._tmp_dir)
super(CLIConfigTest, self).tearDown()
def testConstructCLIConfigWithoutFile(self):
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
self.assertEqual(20, config.get("graph_recursion_depth"))
self.assertEqual(True, config.get("mouse_mode"))
with self.assertRaises(KeyError):
config.get("property_that_should_not_exist")
self.assertTrue(gfile.Exists(self._tmp_config_path))
def testCLIConfigForwardCompatibilityTest(self):
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
with open(self._tmp_config_path, "rt") as f:
config_json = json.load(f)
# Remove a field to simulate forward compatibility test.
del config_json["graph_recursion_depth"]
with open(self._tmp_config_path, "wt") as f:
json.dump(config_json, f)
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
self.assertEqual(20, config.get("graph_recursion_depth"))
def testModifyConfigValue(self):
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
config.set("graph_recursion_depth", 9)
config.set("mouse_mode", False)
self.assertEqual(9, config.get("graph_recursion_depth"))
self.assertEqual(False, config.get("mouse_mode"))
def testModifyConfigValueWithTypeCasting(self):
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
config.set("graph_recursion_depth", "18")
config.set("mouse_mode", "false")
self.assertEqual(18, config.get("graph_recursion_depth"))
self.assertEqual(False, config.get("mouse_mode"))
def testModifyConfigValueWithTypeCastingFailure(self):
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
with self.assertRaises(ValueError):
config.set("mouse_mode", "maybe")
def testLoadFromModifiedConfigFile(self):
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
config.set("graph_recursion_depth", 9)
config.set("mouse_mode", False)
config2 = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
self.assertEqual(9, config2.get("graph_recursion_depth"))
self.assertEqual(False, config2.get("mouse_mode"))
def testSummarizeFromConfig(self):
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
output = config.summarize()
self.assertEqual(
["Command-line configuration:",
"",
" graph_recursion_depth: %d" % config.get("graph_recursion_depth"),
" mouse_mode: %s" % config.get("mouse_mode")], output.lines)
def testSummarizeFromConfigWithHighlight(self):
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
output = config.summarize(highlight="mouse_mode")
self.assertEqual(
["Command-line configuration:",
"",
" graph_recursion_depth: %d" % config.get("graph_recursion_depth"),
" mouse_mode: %s" % config.get("mouse_mode")], output.lines)
self.assertEqual((2, 12, ["underline", "bold"]),
output.font_attr_segs[3][0])
self.assertEqual((14, 18, "bold"), output.font_attr_segs[3][1])
def testSetCallback(self):
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
test_value = {"graph_recursion_depth": -1}
def callback(config):
test_value["graph_recursion_depth"] = config.get("graph_recursion_depth")
config.set_callback("graph_recursion_depth", callback)
config.set("graph_recursion_depth", config.get("graph_recursion_depth") - 1)
self.assertEqual(test_value["graph_recursion_depth"],
config.get("graph_recursion_depth"))
def testSetCallbackInvalidPropertyName(self):
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
with self.assertRaises(KeyError):
config.set_callback("nonexistent_property_name", print)
def testSetCallbackNotCallable(self):
config = cli_config.CLIConfig(config_file_path=self._tmp_config_path)
with self.assertRaises(TypeError):
config.set_callback("graph_recursion_depth", 1)
if __name__ == "__main__":
googletest.main()
|
jamespcole/home-assistant
|
refs/heads/master
|
homeassistant/components/homematic/switch.py
|
1
|
"""Support for HomeMatic switches."""
import logging
from homeassistant.components.switch import SwitchDevice
from homeassistant.const import STATE_UNKNOWN
from . import ATTR_DISCOVER_DEVICES, HMDevice
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['homematic']
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the HomeMatic switch platform."""
if discovery_info is None:
return
devices = []
for conf in discovery_info[ATTR_DISCOVER_DEVICES]:
new_device = HMSwitch(conf)
devices.append(new_device)
add_entities(devices)
class HMSwitch(HMDevice, SwitchDevice):
"""Representation of a HomeMatic switch."""
@property
def is_on(self):
"""Return True if switch is on."""
try:
return self._hm_get_state() > 0
except TypeError:
return False
@property
def today_energy_kwh(self):
"""Return the current power usage in kWh."""
if "ENERGY_COUNTER" in self._data:
try:
return self._data["ENERGY_COUNTER"] / 1000
except ZeroDivisionError:
return 0
return None
def turn_on(self, **kwargs):
"""Turn the switch on."""
self._hmdevice.on(self._channel)
def turn_off(self, **kwargs):
"""Turn the switch off."""
self._hmdevice.off(self._channel)
def _init_data_struct(self):
"""Generate the data dictionary (self._data) from metadata."""
self._state = "STATE"
self._data.update({self._state: STATE_UNKNOWN})
# Need sensor values for SwitchPowermeter
for node in self._hmdevice.SENSORNODE:
self._data.update({node: STATE_UNKNOWN})
|
erwilan/ansible
|
refs/heads/devel
|
lib/ansible/plugins/terminal/asa.py
|
57
|
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import json
from ansible.plugins.terminal import TerminalBase
from ansible.errors import AnsibleConnectionFailure
class TerminalModule(TerminalBase):
terminal_stdout_re = [
re.compile(r"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"),
re.compile(r"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$")
]
terminal_stderr_re = [
re.compile(r"% ?Error"),
re.compile(r"^% \w+", re.M),
re.compile(r"% ?Bad secret"),
re.compile(r"invalid input", re.I),
re.compile(r"(?:incomplete|ambiguous) command", re.I),
re.compile(r"connection timed out", re.I),
re.compile(r"[^\r\n]+ not found", re.I),
re.compile(r"'[^']' +returned error code: ?\d+"),
]
def authorize(self, passwd=None):
if self._get_prompt().endswith('#'):
return
cmd = {'command': 'enable'}
if passwd:
cmd['prompt'] = r"[\r\n]?password: $"
cmd['answer'] = passwd
try:
self._exec_cli_command(json.dumps(cmd))
self._exec_cli_command('terminal pager 0')
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to elevate privilege to enable mode')
def on_deauthorize(self):
prompt = self._get_prompt()
if prompt is None:
# if prompt is None most likely the terminal is hung up at a prompt
return
if '(config' in prompt:
self._exec_cli_command('end')
self._exec_cli_command('disable')
elif prompt.endswith('#'):
self._exec_cli_command('disable')
|
fevxie/odoo
|
refs/heads/8.0
|
addons/l10n_fr_hr_payroll/report/fiche_paye.py
|
303
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.report import report_sxw
class fiche_paye_parser(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(fiche_paye_parser, self).__init__(cr, uid, name, context)
self.localcontext.update({
'lang': "fr_FR",
'get_payslip_lines': self.get_payslip_lines,
'get_total_by_rule_category': self.get_total_by_rule_category,
'get_employer_line': self.get_employer_line,
})
def get_payslip_lines(self, objs):
payslip_line = self.pool.get('hr.payslip.line')
res = []
ids = []
for item in objs:
if item.appears_on_payslip == True and not item.salary_rule_id.parent_rule_id :
ids.append(item.id)
if ids:
res = payslip_line.browse(self.cr, self.uid, ids)
return res
def get_total_by_rule_category(self, obj, code):
payslip_line = self.pool.get('hr.payslip.line')
rule_cate_obj = self.pool.get('hr.salary.rule.category')
cate_ids = rule_cate_obj.search(self.cr, self.uid, [('code', '=', code)])
category_total = 0
if cate_ids:
line_ids = payslip_line.search(self.cr, self.uid, [('slip_id', '=', obj.id),('category_id.id', '=', cate_ids[0] )])
for line in payslip_line.browse(self.cr, self.uid, line_ids):
category_total += line.total
return category_total
def get_employer_line(self, obj, parent_line):
payslip_line = self.pool.get('hr.payslip.line')
line_ids = payslip_line.search(self.cr, self.uid, [('slip_id', '=', obj.id), ('salary_rule_id.parent_rule_id.id', '=', parent_line.salary_rule_id.id )])
res = line_ids and payslip_line.browse(self.cr, self.uid, line_ids[0]) or False
return res
class wrapped_report_fiche_paye(osv.AbstractModel):
_name = 'report.l10n_fr_hr_payroll.report_l10nfrfichepaye'
_inherit = 'report.abstract_report'
_template = 'l10n_fr_hr_payroll.report_l10nfrfichepaye'
_wrapped_report_class = fiche_paye_parser
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
scpeters/catkin
|
refs/heads/indigo-devel
|
test/utils.py
|
11
|
from __future__ import print_function
from sys import version_info
import os
import shutil
import subprocess
import unittest
import tempfile
# import platform
# ubuntudist = platform.dist()[2]
PYTHON_INSTALL_PATH = os.path.join('lib',
'python%u.%u' % (version_info[0],
version_info[1]),
'dist-packages')
TESTS_DIR = os.path.dirname(__file__)
CATKIN_DIR = os.path.dirname(TESTS_DIR)
TEMP_DIR = os.path.join(TESTS_DIR, 'tmp')
if not os.path.isdir(TEMP_DIR):
os.makedirs(TEMP_DIR)
# network_tests_path = os.path.join(TESTS_DIR, 'network_tests')
MOCK_DIR = os.path.join(TESTS_DIR, 'mock_resources')
# MAKE_CMD = ['make', 'VERBOSE=1', '-j8']
MAKE_CMD = ['make', '-j8']
def rosinstall(pth, specfile):
'''
calls rosinstall in pth with given specfile,
then replaces CMakelists with catkin's toplevel.cmake'
'''
assert os.path.exists(specfile), specfile
# to save testing time, we do not invoke rosinstall when we
# already have a .rosinstall file
if not os.path.exists(os.path.join(pth, '.rosinstall')):
succeed(["rosinstall", "-j8", "--catkin", "-n",
pth, specfile, '--continue-on-error'], cwd=TESTS_DIR)
def run(args, **kwargs):
"""
Call to Popen, returns (errcode, stdout, stderr)
"""
print("run:", args)
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=kwargs.get('cwd', None))
print("P==", p.__dict__)
(stdout, stderr) = p.communicate()
return (p.returncode, stdout, stderr)
def create_catkin_workspace(pth):
"""
prepares path to be a catkin workspace, by copying catkin and
creating a CMakeLists.txt
"""
if not os.path.isdir(pth):
os.makedirs(pth)
catkin_dir = os.path.join(pth, 'catkin')
if os.path.isdir(catkin_dir):
shutil.rmtree(catkin_dir)
# copy current catkin sources into workspace
# avoid copying tmp, as that may contain all of ros core
def notest(folder, contents):
if folder.endswith('test'):
return ['tmp']
return []
shutil.copytree(CATKIN_DIR, catkin_dir, symlinks=True, ignore=notest)
assert (os.path.exists(pth + "/catkin/cmake/toplevel.cmake")), \
pth + "/catkin/cmake/toplevel.cmake"
# workaround for current rosinstall creating flawed CMakelists
workspace_cmake = os.path.join(pth, "CMakeLists.txt")
if os.path.isfile(workspace_cmake):
os.remove(workspace_cmake)
succeed(["/bin/ln", "-s", "catkin/cmake/toplevel.cmake",
"CMakeLists.txt"],
cwd=pth)
def succeed(cmd, **kwargs):
"""
Call to Popen, returns stdout, or fails
"""
print(">>>", cmd, kwargs)
(r, out, err) = run(cmd, **kwargs)
print("<<<", out)
assert r == 0, "cmd failed with result %s:\n %s " % (r, str(cmd))
return out
def fail(cmd, **kwargs):
"""
runs command expecting it to return non-zero
"""
print(">>>", cmd, kwargs)
(r, out, err) = run(cmd, withexitstatus=True, **kwargs)
print("<<<", out)
assert 0 != r, """cmd succeeded, though should fail: %s
result=%u\n output=\n%s""" % (cmd, r, out)
return out
class AbstractCatkinWorkspaceTest(unittest.TestCase):
"""
Parent class for any test case that creates a workspace and calls
cmake, make, and make install. Creates a suitable folder structure
either in /tmp or in a root folder specified on init, that is a
build folder, and a src folder with latest catkin from source.
"""
def __init__(self, testCaseName, rootdir=None):
super(AbstractCatkinWorkspaceTest, self).__init__(testCaseName)
self.rootdir = rootdir
def setUp(self):
# directories to delete in teardown
self.directories = {}
if self.rootdir is None:
self.rootdir = tempfile.mkdtemp()
self.directories['root'] = self.rootdir
self.builddir = os.path.join(self.rootdir, "build")
self.develspace = os.path.join(self.builddir, 'devel')
self.workspacedir = os.path.join(self.rootdir, "src")
self.installdir = os.path.join(self.rootdir, "install")
if not os.path.exists(self.builddir):
os.makedirs(self.builddir)
self.setupWorkspaceContents()
def setupWorkspaceContents(self):
create_catkin_workspace(self.workspacedir)
# comment this to investigate results, cleanup tmp folders
# manually
def tearDown(self):
for d in self.directories:
shutil.rmtree(self.directories[d])
self.directories = {}
def cmake(self,
cwd=None,
srcdir=None,
installdir=None,
prefix_path=None,
expect=succeed,
**kwargs):
"""
invokes cmake
:param cwd: changes build dir
:param srcdir: changes sourcedir
:param installdir: changes installdir
:param prefix_path: where to cmake against (where to find)
:param expect: one of functions: succeed, fail
:param kwargs: (cwd, srcdir, expect) or stuff that will be
added to the cmake command
"""
args = []
if cwd is None:
cwd = self.builddir
if srcdir is None:
srcdir = self.workspacedir
this_builddir = cwd
this_srcdir = srcdir
print("v~_", this_builddir, this_srcdir)
if 'CATKIN_DPKG_BUILDPACKAGE_FLAGS' not in kwargs:
kwargs['CATKIN_DPKG_BUILDPACKAGE_FLAGS'] = '-d;-S;-us;-uc'
for k, v in kwargs.items():
print("~v^v~", k, v)
args += ["-D%s=%s" % (k, v)]
if not 'CMAKE_INSTALL_PREFIX' in kwargs:
if installdir is None:
installdir = self.installdir
args += ["-DCMAKE_INSTALL_PREFIX=%s" % (installdir)]
if not 'CMAKE_PREFIX_PATH' in kwargs:
if prefix_path is None:
prefix_path = self.installdir
args += ["-DCMAKE_PREFIX_PATH=%s" % (prefix_path)]
if not os.path.isdir(this_builddir):
os.makedirs(this_builddir)
cmd = ["cmake", this_srcdir] + args
o = expect(cmd, cwd=this_builddir)
if (expect == succeed):
self.assertTrue(os.path.isfile(this_builddir + "/CMakeCache.txt"))
self.assertTrue(os.path.isfile(this_builddir + "/Makefile"))
return o
def assert_exists(prefix, *args):
"""
Convenience function calling exists for all files in args with
prefix
"""
for arg in args:
p = os.path.join(prefix, arg)
print("Checking for", p)
assert os.path.exists(p), "%s doesn't exist" % p
|
brain-tec/account-invoicing
|
refs/heads/8.0
|
account_invoice_supplierinfo_update_on_validate/tests/test_account_invoice_supplierinfo_update.py
|
6
|
# -*- coding: utf-8 -*-
# © 2016 Chafique DELLI @ Akretion
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp.tests.common import TransactionCase
class Tests(TransactionCase):
def setUp(self):
super(Tests, self).setUp()
self.wizard_obj = self.env['wizard.update.invoice.supplierinfo']
self.supplierinfo_obj = self.env['product.supplierinfo']
self.partnerinfo_obj = self.env['pricelist.partnerinfo']
self.invoice = self.env.ref(
'account_invoice_supplierinfo_update_on_validate.'
'account_invoice_6')
def test_validate_without_update_pricelist_supplierinfo_product_template(
self):
# supplier invoice with pricelist supplierinfo to update and
# product supplierinfo is on product_template
vals_wizard = self.invoice.check_supplierinfo().get('context', {})
line_ids = vals_wizard.get('default_line_ids', {})
invoice_id = vals_wizard.get('default_invoice_id', {})
self.assertEquals(len(line_ids), 1)
self.assertEquals(line_ids[0][2]['current_price'], False)
self.assertEquals(line_ids[0][2]['new_price'], 400.0)
wizard = self.wizard_obj.create({
'line_ids': line_ids,
'invoice_id': invoice_id,
})
# validate invoice without update supplierinfo
wizard.with_context(
active_id=wizard.invoice_id.id).set_supplierinfo_ok()
self.assertEquals(self.invoice.state, 'open')
self.assertEquals(self.invoice.supplierinfo_ok, True)
supplierinfos = self.supplierinfo_obj.search([
('name', '=', self.invoice.supplier_partner_id.id),
(
'product_tmpl_id', '=',
self.invoice.invoice_line[0].product_id.product_tmpl_id.id),
])
self.assertEquals(len(supplierinfos), 0)
|
ClearCorp/server-tools
|
refs/heads/9.0
|
base_view_inheritance_extension/__init__.py
|
12
|
# -*- coding: utf-8 -*-
# © 2016 Therp BV <http://therp.nl>
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html).
from . import models
|
mkoistinen/django-cms
|
refs/heads/develop
|
cms/utils/setup.py
|
7
|
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from cms.utils.compat.dj import is_installed as app_is_installed
def validate_dependencies():
"""
Check for installed apps, their versions and configuration options
"""
if not app_is_installed('treebeard'):
raise ImproperlyConfigured('django CMS requires django-treebeard. Please install it and add "treebeard" to INSTALLED_APPS.')
def validate_settings():
"""
Check project settings file for required options
"""
try:
django_backend = [x for x in settings.TEMPLATES
if x['BACKEND'] == 'django.template.backends.django.DjangoTemplates'][0]
except IndexError:
raise ImproperlyConfigured("django CMS requires django.template.context_processors.request in "
"'django.template.backends.django.DjangoTemplates' context processors.")
context_processors = django_backend.get('OPTIONS', {}).get('context_processors', [])
if ('django.core.context_processors.request' not in context_processors and
'django.template.context_processors.request' not in context_processors):
raise ImproperlyConfigured("django CMS requires django.template.context_processors.request in "
"'django.template.backends.django.DjangoTemplates' context processors.")
def setup():
"""
Gather all checks and validations
"""
from cms.plugin_pool import plugin_pool
validate_dependencies()
validate_settings()
plugin_pool.validate_templates()
|
e-schumann/proxy_doodle
|
refs/heads/master
|
external/boost/tools/build/src/build/project.py
|
8
|
# Status: ported.
# Base revision: 64488
# Copyright 2002, 2003 Dave Abrahams
# Copyright 2002, 2005, 2006 Rene Rivera
# Copyright 2002, 2003, 2004, 2005, 2006 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# Implements project representation and loading. Each project is represented
# by:
# - a module where all the Jamfile content live.
# - an instance of 'project-attributes' class.
# (given a module name, can be obtained using the 'attributes' rule)
# - an instance of 'project-target' class (from targets.jam)
# (given a module name, can be obtained using the 'target' rule)
#
# Typically, projects are created as result of loading a Jamfile, which is done
# by rules 'load' and 'initialize', below. First, module for Jamfile is loaded
# and new project-attributes instance is created. Some rules necessary for
# project are added to the module (see 'project-rules' module) at the bottom of
# this file. Default project attributes are set (inheriting attributes of
# parent project, if it exists). After that the Jamfile is read. It can declare
# its own attributes using the 'project' rule which will be combined with any
# already set attributes.
#
# The 'project' rule can also declare a project id which will be associated
# with the project module.
#
# There can also be 'standalone' projects. They are created by calling
# 'initialize' on an arbitrary module and not specifying their location. After
# the call, the module can call the 'project' rule, declare main targets and
# behave as a regular project except that, since it is not associated with any
# location, it should only declare prebuilt targets.
#
# The list of all loaded Jamfiles is stored in the .project-locations variable.
# It is possible to obtain a module name for a location using the 'module-name'
# rule. Standalone projects are not recorded and can only be references using
# their project id.
import b2.util.path
import b2.build.targets
from b2.build import property_set, property
from b2.build.errors import ExceptionWithUserContext
from b2.manager import get_manager
import bjam
import b2
import re
import sys
import pkgutil
import os
import string
import imp
import traceback
import b2.util.option as option
from b2.util import (
record_jam_to_value_mapping, qualify_jam_action, is_iterable_typed, bjam_signature,
is_iterable)
class ProjectRegistry:
def __init__(self, manager, global_build_dir):
self.manager = manager
self.global_build_dir = global_build_dir
self.project_rules_ = ProjectRules(self)
# The target corresponding to the project being loaded now
self.current_project = None
# The set of names of loaded project modules
self.jamfile_modules = {}
# Mapping from location to module name
self.location2module = {}
# Mapping from project id to project module
self.id2module = {}
# Map from Jamfile directory to parent Jamfile/Jamroot
# location.
self.dir2parent_jamfile = {}
# Map from directory to the name of Jamfile in
# that directory (or None).
self.dir2jamfile = {}
# Map from project module to attributes object.
self.module2attributes = {}
# Map from project module to target for the project
self.module2target = {}
# Map from names to Python modules, for modules loaded
# via 'using' and 'import' rules in Jamfiles.
self.loaded_tool_modules_ = {}
self.loaded_tool_module_path_ = {}
# Map from project target to the list of
# (id,location) pairs corresponding to all 'use-project'
# invocations.
# TODO: should not have a global map, keep this
# in ProjectTarget.
self.used_projects = {}
self.saved_current_project = []
self.JAMROOT = self.manager.getenv("JAMROOT");
# Note the use of character groups, as opposed to listing
# 'Jamroot' and 'jamroot'. With the latter, we'd get duplicate
# matches on windows and would have to eliminate duplicates.
if not self.JAMROOT:
self.JAMROOT = ["project-root.jam", "[Jj]amroot", "[Jj]amroot.jam"]
# Default patterns to search for the Jamfiles to use for build
# declarations.
self.JAMFILE = self.manager.getenv("JAMFILE")
if not self.JAMFILE:
self.JAMFILE = ["[Bb]uild.jam", "[Jj]amfile.v2", "[Jj]amfile",
"[Jj]amfile.jam"]
self.__python_module_cache = {}
def load (self, jamfile_location):
"""Loads jamfile at the given location. After loading, project global
file and jamfile needed by the loaded one will be loaded recursively.
If the jamfile at that location is loaded already, does nothing.
Returns the project module for the Jamfile."""
assert isinstance(jamfile_location, basestring)
absolute = os.path.join(os.getcwd(), jamfile_location)
absolute = os.path.normpath(absolute)
jamfile_location = b2.util.path.relpath(os.getcwd(), absolute)
mname = self.module_name(jamfile_location)
# If Jamfile is already loaded, do not try again.
if not mname in self.jamfile_modules:
if "--debug-loading" in self.manager.argv():
print "Loading Jamfile at '%s'" % jamfile_location
self.load_jamfile(jamfile_location, mname)
# We want to make sure that child project are loaded only
# after parent projects. In particular, because parent projects
# define attributes which are inherited by children, and we do not
# want children to be loaded before parents has defined everything.
#
# While "build-project" and "use-project" can potentially refer
# to child projects from parent projects, we do not immediately
# load child projects when seing those attributes. Instead,
# we record the minimal information that will be used only later.
self.load_used_projects(mname)
return mname
def load_used_projects(self, module_name):
assert isinstance(module_name, basestring)
# local used = [ modules.peek $(module-name) : .used-projects ] ;
used = self.used_projects[module_name]
location = self.attribute(module_name, "location")
for u in used:
id = u[0]
where = u[1]
self.use(id, os.path.join(location, where))
def load_parent(self, location):
"""Loads parent of Jamfile at 'location'.
Issues an error if nothing is found."""
assert isinstance(location, basestring)
found = b2.util.path.glob_in_parents(
location, self.JAMROOT + self.JAMFILE)
if not found:
print "error: Could not find parent for project at '%s'" % location
print "error: Did not find Jamfile.jam or Jamroot.jam in any parent directory."
sys.exit(1)
return self.load(os.path.dirname(found[0]))
def find(self, name, current_location):
"""Given 'name' which can be project-id or plain directory name,
return project module corresponding to that id or directory.
Returns nothing of project is not found."""
assert isinstance(name, basestring)
assert isinstance(current_location, basestring)
project_module = None
# Try interpreting name as project id.
if name[0] == '/':
project_module = self.id2module.get(name)
if not project_module:
location = os.path.join(current_location, name)
# If no project is registered for the given location, try to
# load it. First see if we have Jamfile. If not we might have project
# root, willing to act as Jamfile. In that case, project-root
# must be placed in the directory referred by id.
project_module = self.module_name(location)
if not project_module in self.jamfile_modules:
if b2.util.path.glob([location], self.JAMROOT + self.JAMFILE):
project_module = self.load(location)
else:
project_module = None
return project_module
def module_name(self, jamfile_location):
"""Returns the name of module corresponding to 'jamfile-location'.
If no module corresponds to location yet, associates default
module name with that location."""
assert isinstance(jamfile_location, basestring)
module = self.location2module.get(jamfile_location)
if not module:
# Root the path, so that locations are always umbiguious.
# Without this, we can't decide if '../../exe/program1' and '.'
# are the same paths, or not.
jamfile_location = os.path.realpath(
os.path.join(os.getcwd(), jamfile_location))
module = "Jamfile<%s>" % jamfile_location
self.location2module[jamfile_location] = module
return module
def find_jamfile (self, dir, parent_root=0, no_errors=0):
"""Find the Jamfile at the given location. This returns the
exact names of all the Jamfiles in the given directory. The optional
parent-root argument causes this to search not the given directory
but the ones above it up to the directory given in it."""
assert isinstance(dir, basestring)
assert isinstance(parent_root, (int, bool))
assert isinstance(no_errors, (int, bool))
# Glob for all the possible Jamfiles according to the match pattern.
#
jamfile_glob = None
if parent_root:
parent = self.dir2parent_jamfile.get(dir)
if not parent:
parent = b2.util.path.glob_in_parents(dir,
self.JAMFILE)
self.dir2parent_jamfile[dir] = parent
jamfile_glob = parent
else:
jamfile = self.dir2jamfile.get(dir)
if not jamfile:
jamfile = b2.util.path.glob([dir], self.JAMFILE)
self.dir2jamfile[dir] = jamfile
jamfile_glob = jamfile
if len(jamfile_glob) > 1:
# Multiple Jamfiles found in the same place. Warn about this.
# And ensure we use only one of them.
# As a temporary convenience measure, if there's Jamfile.v2 amount
# found files, suppress the warning and use it.
#
pattern = "(.*[Jj]amfile\\.v2)|(.*[Bb]uild\\.jam)"
v2_jamfiles = [x for x in jamfile_glob if re.match(pattern, x)]
if len(v2_jamfiles) == 1:
jamfile_glob = v2_jamfiles
else:
print """warning: Found multiple Jamfiles at '%s'!""" % (dir)
for j in jamfile_glob:
print " -", j
print "Loading the first one"
# Could not find it, error.
if not no_errors and not jamfile_glob:
self.manager.errors()(
"""Unable to load Jamfile.
Could not find a Jamfile in directory '%s'
Attempted to find it with pattern '%s'.
Please consult the documentation at 'http://boost.org/boost-build2'."""
% (dir, string.join(self.JAMFILE)))
if jamfile_glob:
return jamfile_glob[0]
def load_jamfile(self, dir, jamfile_module):
"""Load a Jamfile at the given directory. Returns nothing.
Will attempt to load the file as indicated by the JAMFILE patterns.
Effect of calling this rule twice with the same 'dir' is underfined."""
assert isinstance(dir, basestring)
assert isinstance(jamfile_module, basestring)
# See if the Jamfile is where it should be.
is_jamroot = False
jamfile_to_load = b2.util.path.glob([dir], self.JAMROOT)
if jamfile_to_load:
if len(jamfile_to_load) > 1:
get_manager().errors()(
"Multiple Jamfiles found at '{}'\n"
"Filenames are: {}"
.format(dir, ' '.join(os.path.basename(j) for j in jamfile_to_load))
)
is_jamroot = True
jamfile_to_load = jamfile_to_load[0]
else:
jamfile_to_load = self.find_jamfile(dir)
dir = os.path.dirname(jamfile_to_load)
if not dir:
dir = "."
self.used_projects[jamfile_module] = []
# Now load the Jamfile in it's own context.
# The call to 'initialize' may load parent Jamfile, which might have
# 'use-project' statement that causes a second attempt to load the
# same project we're loading now. Checking inside .jamfile-modules
# prevents that second attempt from messing up.
if not jamfile_module in self.jamfile_modules:
previous_project = self.current_project
# Initialize the jamfile module before loading.
self.initialize(jamfile_module, dir, os.path.basename(jamfile_to_load))
if not jamfile_module in self.jamfile_modules:
saved_project = self.current_project
self.jamfile_modules[jamfile_module] = True
bjam.call("load", jamfile_module, jamfile_to_load)
if is_jamroot:
jamfile = self.find_jamfile(dir, no_errors=True)
if jamfile:
bjam.call("load", jamfile_module, jamfile)
# Now do some checks
if self.current_project != saved_project:
from textwrap import dedent
self.manager.errors()(dedent(
"""
The value of the .current-project variable has magically changed
after loading a Jamfile. This means some of the targets might be
defined a the wrong project.
after loading %s
expected value %s
actual value %s
"""
% (jamfile_module, saved_project, self.current_project)
))
self.end_load(previous_project)
if self.global_build_dir:
id = self.attributeDefault(jamfile_module, "id", None)
project_root = self.attribute(jamfile_module, "project-root")
location = self.attribute(jamfile_module, "location")
if location and project_root == dir:
# This is Jamroot
if not id:
# FIXME: go via errors module, so that contexts are
# shown?
print "warning: the --build-dir option was specified"
print "warning: but Jamroot at '%s'" % dir
print "warning: specified no project id"
print "warning: the --build-dir option will be ignored"
def end_load(self, previous_project=None):
if not self.current_project:
self.manager.errors()(
'Ending project loading requested when there was no project currently '
'being loaded.'
)
if not previous_project and self.saved_current_project:
self.manager.errors()(
'Ending project loading requested with no "previous project" when there '
'other projects still being loaded recursively.'
)
self.current_project = previous_project
def load_standalone(self, jamfile_module, file):
"""Loads 'file' as standalone project that has no location
associated with it. This is mostly useful for user-config.jam,
which should be able to define targets, but although it has
some location in filesystem, we do not want any build to
happen in user's HOME, for example.
The caller is required to never call this method twice on
the same file.
"""
assert isinstance(jamfile_module, basestring)
assert isinstance(file, basestring)
self.used_projects[jamfile_module] = []
bjam.call("load", jamfile_module, file)
self.load_used_projects(jamfile_module)
def is_jamroot(self, basename):
assert isinstance(basename, basestring)
match = [ pat for pat in self.JAMROOT if re.match(pat, basename)]
if match:
return 1
else:
return 0
def initialize(self, module_name, location=None, basename=None, standalone_path=''):
"""Initialize the module for a project.
module-name is the name of the project module.
location is the location (directory) of the project to initialize.
If not specified, standalone project will be initialized
standalone_path is the path to the source-location.
this should only be called from the python side.
"""
assert isinstance(module_name, basestring)
assert isinstance(location, basestring) or location is None
assert isinstance(basename, basestring) or basename is None
jamroot = False
parent_module = None
if module_name == "test-config":
# No parent
pass
elif module_name == "site-config":
parent_module = "test-config"
elif module_name == "user-config":
parent_module = "site-config"
elif module_name == "project-config":
parent_module = "user-config"
elif location and not self.is_jamroot(basename):
# We search for parent/project-root only if jamfile was specified
# --- i.e
# if the project is not standalone.
parent_module = self.load_parent(location)
elif location:
# It's either jamroot, or standalone project.
# If it's jamroot, inherit from user-config.
# If project-config module exist, inherit from it.
parent_module = 'user-config'
if 'project-config' in self.module2attributes:
parent_module = 'project-config'
jamroot = True
# TODO: need to consider if standalone projects can do anything but defining
# prebuilt targets. If so, we need to give more sensible "location", so that
# source paths are correct.
if not location:
location = ""
# the call to load_parent() above can end up loading this module again
# make sure we don't reinitialize the module's attributes
if module_name not in self.module2attributes:
if "--debug-loading" in self.manager.argv():
print "Initializing project '%s'" % module_name
attributes = ProjectAttributes(self.manager, location, module_name)
self.module2attributes[module_name] = attributes
python_standalone = False
if location:
attributes.set("source-location", [location], exact=1)
elif not module_name in ["test-config", "site-config", "user-config", "project-config"]:
# This is a standalone project with known location. Set source location
# so that it can declare targets. This is intended so that you can put
# a .jam file in your sources and use it via 'using'. Standard modules
# (in 'tools' subdir) may not assume source dir is set.
source_location = standalone_path
if not source_location:
source_location = self.loaded_tool_module_path_.get(module_name)
if not source_location:
self.manager.errors()('Standalone module path not found for "{}"'
.format(module_name))
attributes.set("source-location", [source_location], exact=1)
python_standalone = True
attributes.set("requirements", property_set.empty(), exact=True)
attributes.set("usage-requirements", property_set.empty(), exact=True)
attributes.set("default-build", property_set.empty(), exact=True)
attributes.set("projects-to-build", [], exact=True)
attributes.set("project-root", None, exact=True)
attributes.set("build-dir", None, exact=True)
self.project_rules_.init_project(module_name, python_standalone)
if parent_module:
self.inherit_attributes(module_name, parent_module)
attributes.set("parent-module", parent_module, exact=1)
if jamroot:
attributes.set("project-root", location, exact=1)
parent = None
if parent_module:
parent = self.target(parent_module)
if module_name not in self.module2target:
target = b2.build.targets.ProjectTarget(self.manager,
module_name, module_name, parent,
self.attribute(module_name, "requirements"),
# FIXME: why we need to pass this? It's not
# passed in jam code.
self.attribute(module_name, "default-build"))
self.module2target[module_name] = target
self.current_project = self.target(module_name)
def inherit_attributes(self, project_module, parent_module):
"""Make 'project-module' inherit attributes of project
root and parent module."""
assert isinstance(project_module, basestring)
assert isinstance(parent_module, basestring)
attributes = self.module2attributes[project_module]
pattributes = self.module2attributes[parent_module]
# Parent module might be locationless user-config.
# FIXME:
#if [ modules.binding $(parent-module) ]
#{
# $(attributes).set parent : [ path.parent
# [ path.make [ modules.binding $(parent-module) ] ] ] ;
# }
attributes.set("project-root", pattributes.get("project-root"), exact=True)
attributes.set("default-build", pattributes.get("default-build"), exact=True)
attributes.set("requirements", pattributes.get("requirements"), exact=True)
attributes.set("usage-requirements",
pattributes.get("usage-requirements"), exact=1)
parent_build_dir = pattributes.get("build-dir")
if parent_build_dir:
# Have to compute relative path from parent dir to our dir
# Convert both paths to absolute, since we cannot
# find relative path from ".." to "."
location = attributes.get("location")
parent_location = pattributes.get("location")
our_dir = os.path.join(os.getcwd(), location)
parent_dir = os.path.join(os.getcwd(), parent_location)
build_dir = os.path.join(parent_build_dir,
os.path.relpath(our_dir, parent_dir))
attributes.set("build-dir", build_dir, exact=True)
def register_id(self, id, module):
"""Associate the given id with the given project module."""
assert isinstance(id, basestring)
assert isinstance(module, basestring)
self.id2module[id] = module
def current(self):
"""Returns the project which is currently being loaded."""
if not self.current_project:
get_manager().errors()(
'Reference to the project currently being loaded requested '
'when there was no project module being loaded.'
)
return self.current_project
def set_current(self, c):
if __debug__:
from .targets import ProjectTarget
assert isinstance(c, ProjectTarget)
self.current_project = c
def push_current(self, project):
"""Temporary changes the current project to 'project'. Should
be followed by 'pop-current'."""
if __debug__:
from .targets import ProjectTarget
assert isinstance(project, ProjectTarget)
self.saved_current_project.append(self.current_project)
self.current_project = project
def pop_current(self):
if self.saved_current_project:
self.current_project = self.saved_current_project.pop()
else:
self.current_project = None
def attributes(self, project):
"""Returns the project-attribute instance for the
specified jamfile module."""
assert isinstance(project, basestring)
return self.module2attributes[project]
def attribute(self, project, attribute):
"""Returns the value of the specified attribute in the
specified jamfile module."""
assert isinstance(project, basestring)
assert isinstance(attribute, basestring)
try:
return self.module2attributes[project].get(attribute)
except:
raise BaseException("No attribute '%s' for project %s" % (attribute, project))
def attributeDefault(self, project, attribute, default):
"""Returns the value of the specified attribute in the
specified jamfile module."""
assert isinstance(project, basestring)
assert isinstance(attribute, basestring)
assert isinstance(default, basestring) or default is None
return self.module2attributes[project].getDefault(attribute, default)
def target(self, project_module):
"""Returns the project target corresponding to the 'project-module'."""
assert isinstance(project_module, basestring)
if project_module not in self.module2target:
self.module2target[project_module] = \
b2.build.targets.ProjectTarget(project_module, project_module,
self.attribute(project_module, "requirements"))
return self.module2target[project_module]
def use(self, id, location):
# Use/load a project.
assert isinstance(id, basestring)
assert isinstance(location, basestring)
saved_project = self.current_project
project_module = self.load(location)
declared_id = self.attributeDefault(project_module, "id", "")
if not declared_id or declared_id != id:
# The project at 'location' either have no id or
# that id is not equal to the 'id' parameter.
if id in self.id2module and self.id2module[id] != project_module:
self.manager.errors()(
"""Attempt to redeclare already existing project id '%s' at location '%s'""" % (id, location))
self.id2module[id] = project_module
self.current_project = saved_project
def add_rule(self, name, callable_):
"""Makes rule 'name' available to all subsequently loaded Jamfiles.
Calling that rule wil relay to 'callable'."""
assert isinstance(name, basestring)
assert callable(callable_)
self.project_rules_.add_rule(name, callable_)
def project_rules(self):
return self.project_rules_
def glob_internal(self, project, wildcards, excludes, rule_name):
if __debug__:
from .targets import ProjectTarget
assert isinstance(project, ProjectTarget)
assert is_iterable_typed(wildcards, basestring)
assert is_iterable_typed(excludes, basestring) or excludes is None
assert isinstance(rule_name, basestring)
location = project.get("source-location")[0]
result = []
callable = b2.util.path.__dict__[rule_name]
paths = callable([location], wildcards, excludes)
has_dir = 0
for w in wildcards:
if os.path.dirname(w):
has_dir = 1
break
if has_dir or rule_name != "glob":
result = []
# The paths we've found are relative to current directory,
# but the names specified in sources list are assumed to
# be relative to source directory of the corresponding
# prject. Either translate them or make absolute.
for p in paths:
rel = os.path.relpath(p, location)
# If the path is below source location, use relative path.
if not ".." in rel:
result.append(rel)
else:
# Otherwise, use full path just to avoid any ambiguities.
result.append(os.path.abspath(p))
else:
# There were not directory in wildcard, so the files are all
# in the source directory of the project. Just drop the
# directory, instead of making paths absolute.
result = [os.path.basename(p) for p in paths]
return result
def __build_python_module_cache(self):
"""Recursively walks through the b2/src subdirectories and
creates an index of base module name to package name. The
index is stored within self.__python_module_cache and allows
for an O(1) module lookup.
For example, given the base module name `toolset`,
self.__python_module_cache['toolset'] will return
'b2.build.toolset'
pkgutil.walk_packages() will find any python package
provided a directory contains an __init__.py. This has the
added benefit of allowing libraries to be installed and
automatically avaiable within the contrib directory.
*Note*: pkgutil.walk_packages() will import any subpackage
in order to access its __path__variable. Meaning:
any initialization code will be run if the package hasn't
already been imported.
"""
cache = {}
for importer, mname, ispkg in pkgutil.walk_packages(b2.__path__, prefix='b2.'):
basename = mname.split('.')[-1]
# since the jam code is only going to have "import toolset ;"
# it doesn't matter if there are separately named "b2.build.toolset" and
# "b2.contrib.toolset" as it is impossible to know which the user is
# referring to.
if basename in cache:
self.manager.errors()('duplicate module name "{0}" '
'found in boost-build path'.format(basename))
cache[basename] = mname
self.__python_module_cache = cache
def load_module(self, name, extra_path=None):
"""Load a Python module that should be useable from Jamfiles.
There are generally two types of modules Jamfiles might want to
use:
- Core Boost.Build. Those are imported using plain names, e.g.
'toolset', so this function checks if we have module named
b2.package.module already.
- Python modules in the same directory as Jamfile. We don't
want to even temporary add Jamfile's directory to sys.path,
since then we might get naming conflicts between standard
Python modules and those.
"""
assert isinstance(name, basestring)
assert is_iterable_typed(extra_path, basestring) or extra_path is None
# See if we loaded module of this name already
existing = self.loaded_tool_modules_.get(name)
if existing:
return existing
# check the extra path as well as any paths outside
# of the b2 package and import the module if it exists
b2_path = os.path.normpath(b2.__path__[0])
# normalize the pathing in the BOOST_BUILD_PATH.
# this allows for using startswith() to determine
# if a path is a subdirectory of the b2 root_path
paths = [os.path.normpath(p) for p in self.manager.boost_build_path()]
# remove all paths that start with b2's root_path
paths = [p for p in paths if not p.startswith(b2_path)]
# add any extra paths
paths.extend(extra_path)
try:
# find_module is used so that the pyc's can be used.
# an ImportError is raised if not found
f, location, description = imp.find_module(name, paths)
except ImportError:
# if the module is not found in the b2 package,
# this error will be handled later
pass
else:
# we've found the module, now let's try loading it.
# it's possible that the module itself contains an ImportError
# which is why we're loading it in this else clause so that the
# proper error message is shown to the end user.
# TODO: does this module name really need to be mangled like this?
mname = name + "__for_jamfile"
self.loaded_tool_module_path_[mname] = location
module = imp.load_module(mname, f, location, description)
self.loaded_tool_modules_[name] = module
return module
# the cache is created here due to possibly importing packages
# that end up calling get_manager() which might fail
if not self.__python_module_cache:
self.__build_python_module_cache()
underscore_name = name.replace('-', '_')
# check to see if the module is within the b2 package
# and already loaded
mname = self.__python_module_cache.get(underscore_name)
if mname in sys.modules:
return sys.modules[mname]
# otherwise, if the module name is within the cache,
# the module exists within the BOOST_BUILD_PATH,
# load it.
elif mname:
# in some cases, self.loaded_tool_module_path_ needs to
# have the path to the file during the import
# (project.initialize() for example),
# so the path needs to be set *before* importing the module.
path = os.path.join(b2.__path__[0], *mname.split('.')[1:])
self.loaded_tool_module_path_[mname] = path
# mname is guaranteed to be importable since it was
# found within the cache
__import__(mname)
module = sys.modules[mname]
self.loaded_tool_modules_[name] = module
return module
self.manager.errors()("Cannot find module '%s'" % name)
# FIXME:
# Defines a Boost.Build extension project. Such extensions usually
# contain library targets and features that can be used by many people.
# Even though extensions are really projects, they can be initialize as
# a module would be with the "using" (project.project-rules.using)
# mechanism.
#rule extension ( id : options * : * )
#{
# # The caller is a standalone module for the extension.
# local mod = [ CALLER_MODULE ] ;
#
# # We need to do the rest within the extension module.
# module $(mod)
# {
# import path ;
#
# # Find the root project.
# local root-project = [ project.current ] ;
# root-project = [ $(root-project).project-module ] ;
# while
# [ project.attribute $(root-project) parent-module ] &&
# [ project.attribute $(root-project) parent-module ] != user-config
# {
# root-project = [ project.attribute $(root-project) parent-module ] ;
# }
#
# # Create the project data, and bring in the project rules
# # into the module.
# project.initialize $(__name__) :
# [ path.join [ project.attribute $(root-project) location ] ext $(1:L) ] ;
#
# # Create the project itself, i.e. the attributes.
# # All extensions are created in the "/ext" project space.
# project /ext/$(1) : $(2) : $(3) : $(4) : $(5) : $(6) : $(7) : $(8) : $(9) ;
# local attributes = [ project.attributes $(__name__) ] ;
#
# # Inherit from the root project of whomever is defining us.
# project.inherit-attributes $(__name__) : $(root-project) ;
# $(attributes).set parent-module : $(root-project) : exact ;
# }
#}
class ProjectAttributes:
"""Class keeping all the attributes of a project.
The standard attributes are 'id', "location", "project-root", "parent"
"requirements", "default-build", "source-location" and "projects-to-build".
"""
def __init__(self, manager, location, project_module):
self.manager = manager
self.location = location
self.project_module = project_module
self.attributes = {}
self.usage_requirements = None
def set(self, attribute, specification, exact=False):
"""Set the named attribute from the specification given by the user.
The value actually set may be different."""
assert isinstance(attribute, basestring)
assert isinstance(exact, (int, bool))
if __debug__ and not exact:
if attribute == 'requirements':
assert (isinstance(specification, property_set.PropertySet)
or all(isinstance(s, basestring) for s in specification))
elif attribute in (
'usage-requirements', 'default-build', 'source-location', 'build-dir', 'id'):
assert is_iterable_typed(specification, basestring)
elif __debug__:
assert (
isinstance(specification, (property_set.PropertySet, type(None), basestring))
or all(isinstance(s, basestring) for s in specification)
)
if exact:
self.__dict__[attribute] = specification
elif attribute == "requirements":
self.requirements = property_set.refine_from_user_input(
self.requirements, specification,
self.project_module, self.location)
elif attribute == "usage-requirements":
unconditional = []
for p in specification:
split = property.split_conditional(p)
if split:
unconditional.append(split[1])
else:
unconditional.append(p)
non_free = property.remove("free", unconditional)
if non_free:
get_manager().errors()("usage-requirements %s have non-free properties %s" \
% (specification, non_free))
t = property.translate_paths(
property.create_from_strings(specification, allow_condition=True),
self.location)
existing = self.__dict__.get("usage-requirements")
if existing:
new = property_set.create(existing.all() + t)
else:
new = property_set.create(t)
self.__dict__["usage-requirements"] = new
elif attribute == "default-build":
self.__dict__["default-build"] = property_set.create(specification)
elif attribute == "source-location":
source_location = []
for path in specification:
source_location.append(os.path.join(self.location, path))
self.__dict__["source-location"] = source_location
elif attribute == "build-dir":
self.__dict__["build-dir"] = os.path.join(self.location, specification[0])
elif attribute == "id":
id = specification[0]
if id[0] != '/':
id = "/" + id
self.manager.projects().register_id(id, self.project_module)
self.__dict__["id"] = id
elif not attribute in ["default-build", "location",
"source-location", "parent",
"projects-to-build", "project-root"]:
self.manager.errors()(
"""Invalid project attribute '%s' specified
for project at '%s'""" % (attribute, self.location))
else:
self.__dict__[attribute] = specification
def get(self, attribute):
assert isinstance(attribute, basestring)
return self.__dict__[attribute]
def getDefault(self, attribute, default):
assert isinstance(attribute, basestring)
return self.__dict__.get(attribute, default)
def dump(self):
"""Prints the project attributes."""
id = self.get("id")
if not id:
id = "(none)"
else:
id = id[0]
parent = self.get("parent")
if not parent:
parent = "(none)"
else:
parent = parent[0]
print "'%s'" % id
print "Parent project:%s", parent
print "Requirements:%s", self.get("requirements")
print "Default build:%s", string.join(self.get("debuild-build"))
print "Source location:%s", string.join(self.get("source-location"))
print "Projects to build:%s", string.join(self.get("projects-to-build").sort());
class ProjectRules:
"""Class keeping all rules that are made available to Jamfile."""
def __init__(self, registry):
self.registry = registry
self.manager_ = registry.manager
self.rules = {}
self.local_names = [x for x in self.__class__.__dict__
if x not in ["__init__", "init_project", "add_rule",
"error_reporting_wrapper", "add_rule_for_type", "reverse"]]
self.all_names_ = [x for x in self.local_names]
def _import_rule(self, bjam_module, name, callable_):
assert isinstance(bjam_module, basestring)
assert isinstance(name, basestring)
assert callable(callable_)
if hasattr(callable_, "bjam_signature"):
bjam.import_rule(bjam_module, name, self.make_wrapper(callable_), callable_.bjam_signature)
else:
bjam.import_rule(bjam_module, name, self.make_wrapper(callable_))
def add_rule_for_type(self, type):
assert isinstance(type, basestring)
rule_name = type.lower().replace("_", "-")
@bjam_signature([['name'], ['sources', '*'], ['requirements', '*'],
['default_build', '*'], ['usage_requirements', '*']])
def xpto (name, sources=[], requirements=[], default_build=[], usage_requirements=[]):
return self.manager_.targets().create_typed_target(
type, self.registry.current(), name, sources,
requirements, default_build, usage_requirements)
self.add_rule(rule_name, xpto)
def add_rule(self, name, callable_):
assert isinstance(name, basestring)
assert callable(callable_)
self.rules[name] = callable_
self.all_names_.append(name)
# Add new rule at global bjam scope. This might not be ideal,
# added because if a jamroot does 'import foo' where foo calls
# add_rule, we need to import new rule to jamroot scope, and
# I'm lazy to do this now.
self._import_rule("", name, callable_)
def all_names(self):
return self.all_names_
def call_and_report_errors(self, callable_, *args, **kw):
assert callable(callable_)
result = None
try:
self.manager_.errors().push_jamfile_context()
result = callable_(*args, **kw)
except ExceptionWithUserContext, e:
e.report()
except Exception, e:
try:
self.manager_.errors().handle_stray_exception (e)
except ExceptionWithUserContext, e:
e.report()
finally:
self.manager_.errors().pop_jamfile_context()
return result
def make_wrapper(self, callable_):
"""Given a free-standing function 'callable', return a new
callable that will call 'callable' and report all exceptins,
using 'call_and_report_errors'."""
assert callable(callable_)
def wrapper(*args, **kw):
return self.call_and_report_errors(callable_, *args, **kw)
return wrapper
def init_project(self, project_module, python_standalone=False):
assert isinstance(project_module, basestring)
assert isinstance(python_standalone, bool)
if python_standalone:
m = sys.modules[project_module]
for n in self.local_names:
if n != "import_":
setattr(m, n, getattr(self, n))
for n in self.rules:
setattr(m, n, self.rules[n])
return
for n in self.local_names:
# Using 'getattr' here gives us a bound method,
# while using self.__dict__[r] would give unbound one.
v = getattr(self, n)
if callable(v):
if n == "import_":
n = "import"
else:
n = string.replace(n, "_", "-")
self._import_rule(project_module, n, v)
for n in self.rules:
self._import_rule(project_module, n, self.rules[n])
def project(self, *args):
assert is_iterable(args) and all(is_iterable(arg) for arg in args)
jamfile_module = self.registry.current().project_module()
attributes = self.registry.attributes(jamfile_module)
id = None
if args and args[0]:
id = args[0][0]
args = args[1:]
if id:
attributes.set('id', [id])
explicit_build_dir = None
for a in args:
if a:
attributes.set(a[0], a[1:], exact=0)
if a[0] == "build-dir":
explicit_build_dir = a[1]
# If '--build-dir' is specified, change the build dir for the project.
if self.registry.global_build_dir:
location = attributes.get("location")
# Project with empty location is 'standalone' project, like
# user-config, or qt. It has no build dir.
# If we try to set build dir for user-config, we'll then
# try to inherit it, with either weird, or wrong consequences.
if location and location == attributes.get("project-root"):
# Re-read the project id, since it might have been changed in
# the project's attributes.
id = attributes.get('id')
# This is Jamroot.
if id:
if explicit_build_dir and os.path.isabs(explicit_build_dir):
self.registry.manager.errors()(
"""Absolute directory specified via 'build-dir' project attribute
Don't know how to combine that with the --build-dir option.""")
rid = id
if rid[0] == '/':
rid = rid[1:]
p = os.path.join(self.registry.global_build_dir, rid)
if explicit_build_dir:
p = os.path.join(p, explicit_build_dir)
attributes.set("build-dir", p, exact=1)
elif explicit_build_dir:
self.registry.manager.errors()(
"""When --build-dir is specified, the 'build-dir'
attribute is allowed only for top-level 'project' invocations""")
def constant(self, name, value):
"""Declare and set a project global constant.
Project global constants are normal variables but should
not be changed. They are applied to every child Jamfile."""
assert is_iterable_typed(name, basestring)
assert is_iterable_typed(value, basestring)
self.registry.current().add_constant(name[0], value)
def path_constant(self, name, value):
"""Declare and set a project global constant, whose value is a path. The
path is adjusted to be relative to the invocation directory. The given
value path is taken to be either absolute, or relative to this project
root."""
assert is_iterable_typed(name, basestring)
assert is_iterable_typed(value, basestring)
if len(value) > 1:
self.registry.manager.errors()("path constant should have one element")
self.registry.current().add_constant(name[0], value, path=1)
def use_project(self, id, where):
# See comment in 'load' for explanation why we record the
# parameters as opposed to loading the project now.
assert is_iterable_typed(id, basestring)
assert is_iterable_typed(where, basestring)
m = self.registry.current().project_module()
self.registry.used_projects[m].append((id[0], where[0]))
def build_project(self, dir):
assert is_iterable_typed(dir, basestring)
jamfile_module = self.registry.current().project_module()
attributes = self.registry.attributes(jamfile_module)
now = attributes.get("projects-to-build")
attributes.set("projects-to-build", now + dir, exact=True)
def explicit(self, target_names):
assert is_iterable_typed(target_names, basestring)
self.registry.current().mark_targets_as_explicit(target_names)
def always(self, target_names):
assert is_iterable_typed(target_names, basestring)
self.registry.current().mark_targets_as_always(target_names)
def glob(self, wildcards, excludes=None):
assert is_iterable_typed(wildcards, basestring)
assert is_iterable_typed(excludes, basestring)or excludes is None
return self.registry.glob_internal(self.registry.current(),
wildcards, excludes, "glob")
def glob_tree(self, wildcards, excludes=None):
assert is_iterable_typed(wildcards, basestring)
assert is_iterable_typed(excludes, basestring) or excludes is None
bad = 0
for p in wildcards:
if os.path.dirname(p):
bad = 1
if excludes:
for p in excludes:
if os.path.dirname(p):
bad = 1
if bad:
self.registry.manager.errors()(
"The patterns to 'glob-tree' may not include directory")
return self.registry.glob_internal(self.registry.current(),
wildcards, excludes, "glob_tree")
def using(self, toolset, *args):
# The module referred by 'using' can be placed in
# the same directory as Jamfile, and the user
# will expect the module to be found even though
# the directory is not in BOOST_BUILD_PATH.
# So temporary change the search path.
assert is_iterable_typed(toolset, basestring)
current = self.registry.current()
location = current.get('location')
m = self.registry.load_module(toolset[0], [location])
if "init" not in m.__dict__:
self.registry.manager.errors()(
"Tool module '%s' does not define the 'init' method" % toolset[0])
m.init(*args)
# The above might have clobbered .current-project. Restore the correct
# value.
self.registry.set_current(current)
def import_(self, name, names_to_import=None, local_names=None):
assert is_iterable_typed(name, basestring)
assert is_iterable_typed(names_to_import, basestring) or names_to_import is None
assert is_iterable_typed(local_names, basestring)or local_names is None
name = name[0]
py_name = name
if py_name == "os":
py_name = "os_j"
jamfile_module = self.registry.current().project_module()
attributes = self.registry.attributes(jamfile_module)
location = attributes.get("location")
saved = self.registry.current()
m = self.registry.load_module(py_name, [location])
for f in m.__dict__:
v = m.__dict__[f]
f = f.replace("_", "-")
if callable(v):
qn = name + "." + f
self._import_rule(jamfile_module, qn, v)
record_jam_to_value_mapping(qualify_jam_action(qn, jamfile_module), v)
if names_to_import:
if not local_names:
local_names = names_to_import
if len(names_to_import) != len(local_names):
self.registry.manager.errors()(
"""The number of names to import and local names do not match.""")
for n, l in zip(names_to_import, local_names):
self._import_rule(jamfile_module, l, m.__dict__[n])
self.registry.set_current(saved)
def conditional(self, condition, requirements):
"""Calculates conditional requirements for multiple requirements
at once. This is a shorthand to be reduce duplication and to
keep an inline declarative syntax. For example:
lib x : x.cpp : [ conditional <toolset>gcc <variant>debug :
<define>DEBUG_EXCEPTION <define>DEBUG_TRACE ] ;
"""
assert is_iterable_typed(condition, basestring)
assert is_iterable_typed(requirements, basestring)
c = string.join(condition, ",")
if c.find(":") != -1:
return [c + r for r in requirements]
else:
return [c + ":" + r for r in requirements]
def option(self, name, value):
assert is_iterable(name) and isinstance(name[0], basestring)
assert is_iterable(value) and isinstance(value[0], basestring)
name = name[0]
if not name in ["site-config", "user-config", "project-config"]:
get_manager().errors()("The 'option' rule may be used only in site-config or user-config")
option.set(name, value[0])
|
openfun/edx-platform
|
refs/heads/master
|
common/lib/xmodule/xmodule/fields.py
|
144
|
import time
import logging
import re
from xblock.fields import JSONField
import datetime
import dateutil.parser
from pytz import UTC
log = logging.getLogger(__name__)
class Date(JSONField):
'''
Date fields know how to parse and produce json (iso) compatible formats. Converts to tz aware datetimes.
'''
# See note below about not defaulting these
CURRENT_YEAR = datetime.datetime.now(UTC).year
PREVENT_DEFAULT_DAY_MON_SEED1 = datetime.datetime(CURRENT_YEAR, 1, 1, tzinfo=UTC)
PREVENT_DEFAULT_DAY_MON_SEED2 = datetime.datetime(CURRENT_YEAR, 2, 2, tzinfo=UTC)
MUTABLE = False
def _parse_date_wo_default_month_day(self, field):
"""
Parse the field as an iso string but prevent dateutils from defaulting the day or month while
allowing it to default the other fields.
"""
# It's not trivial to replace dateutil b/c parsing timezones as Z, +03:30, -400 is hard in python
# however, we don't want dateutil to default the month or day (but some tests at least expect
# us to default year); so, we'll see if dateutil uses the defaults for these the hard way
result = dateutil.parser.parse(field, default=self.PREVENT_DEFAULT_DAY_MON_SEED1)
result_other = dateutil.parser.parse(field, default=self.PREVENT_DEFAULT_DAY_MON_SEED2)
if result != result_other:
log.warning("Field {0} is missing month or day".format(self.name))
return None
if result.tzinfo is None:
result = result.replace(tzinfo=UTC)
return result
def from_json(self, field):
"""
Parse an optional metadata key containing a time: if present, complain
if it doesn't parse.
Return None if not present or invalid.
"""
if field is None:
return field
elif field is "":
return None
elif isinstance(field, basestring):
return self._parse_date_wo_default_month_day(field)
elif isinstance(field, (int, long, float)):
return datetime.datetime.fromtimestamp(field / 1000, UTC)
elif isinstance(field, time.struct_time):
return datetime.datetime.fromtimestamp(time.mktime(field), UTC)
elif isinstance(field, datetime.datetime):
return field
else:
msg = "Field {0} has bad value '{1}'".format(
self.name, field)
raise TypeError(msg)
def to_json(self, value):
"""
Convert a time struct to a string
"""
if value is None:
return None
if isinstance(value, time.struct_time):
# struct_times are always utc
return time.strftime('%Y-%m-%dT%H:%M:%SZ', value)
elif isinstance(value, datetime.datetime):
if value.tzinfo is None or value.utcoffset().total_seconds() == 0:
# isoformat adds +00:00 rather than Z
return value.strftime('%Y-%m-%dT%H:%M:%SZ')
else:
return value.isoformat()
else:
raise TypeError("Cannot convert {!r} to json".format(value))
enforce_type = from_json
TIMEDELTA_REGEX = re.compile(r'^((?P<days>\d+?) day(?:s?))?(\s)?((?P<hours>\d+?) hour(?:s?))?(\s)?((?P<minutes>\d+?) minute(?:s)?)?(\s)?((?P<seconds>\d+?) second(?:s)?)?$')
class Timedelta(JSONField):
# Timedeltas are immutable, see http://docs.python.org/2/library/datetime.html#available-types
MUTABLE = False
def from_json(self, time_str):
"""
time_str: A string with the following components:
<D> day[s] (optional)
<H> hour[s] (optional)
<M> minute[s] (optional)
<S> second[s] (optional)
Returns a datetime.timedelta parsed from the string
"""
if time_str is None:
return None
if isinstance(time_str, datetime.timedelta):
return time_str
parts = TIMEDELTA_REGEX.match(time_str)
if not parts:
return
parts = parts.groupdict()
time_params = {}
for (name, param) in parts.iteritems():
if param:
time_params[name] = int(param)
return datetime.timedelta(**time_params)
def to_json(self, value):
if value is None:
return None
values = []
for attr in ('days', 'hours', 'minutes', 'seconds'):
cur_value = getattr(value, attr, 0)
if cur_value > 0:
values.append("%d %s" % (cur_value, attr))
return ' '.join(values)
def enforce_type(self, value):
"""
Ensure that when set explicitly the Field is set to a timedelta
"""
if isinstance(value, datetime.timedelta) or value is None:
return value
return self.from_json(value)
class RelativeTime(JSONField):
"""
Field for start_time and end_time video module properties.
It was decided, that python representation of start_time and end_time
should be python datetime.timedelta object, to be consistent with
common time representation.
At the same time, serialized representation should be "HH:MM:SS"
This format is convenient to use in XML (and it is used now),
and also it is used in frond-end studio editor of video module as format
for start and end time fields.
In database we previously had float type for start_time and end_time fields,
so we are checking it also.
Python object of RelativeTime is datetime.timedelta.
JSONed representation of RelativeTime is "HH:MM:SS"
"""
# Timedeltas are immutable, see http://docs.python.org/2/library/datetime.html#available-types
MUTABLE = False
@classmethod
def isotime_to_timedelta(cls, value):
"""
Validate that value in "HH:MM:SS" format and convert to timedelta.
Validate that user, that edits XML, sets proper format, and
that max value that can be used by user is "23:59:59".
"""
try:
obj_time = time.strptime(value, '%H:%M:%S')
except ValueError as e:
raise ValueError(
"Incorrect RelativeTime value {!r} was set in XML or serialized. "
"Original parse message is {}".format(value, e.message)
)
return datetime.timedelta(
hours=obj_time.tm_hour,
minutes=obj_time.tm_min,
seconds=obj_time.tm_sec
)
def from_json(self, value):
"""
Convert value is in 'HH:MM:SS' format to datetime.timedelta.
If not value, returns 0.
If value is float (backward compatibility issue), convert to timedelta.
"""
if not value:
return datetime.timedelta(seconds=0)
if isinstance(value, datetime.timedelta):
return value
# We've seen serialized versions of float in this field
if isinstance(value, float):
return datetime.timedelta(seconds=value)
if isinstance(value, basestring):
return self.isotime_to_timedelta(value)
msg = "RelativeTime Field {0} has bad value '{1!r}'".format(self.name, value)
raise TypeError(msg)
def to_json(self, value):
"""
Convert datetime.timedelta to "HH:MM:SS" format.
If not value, return "00:00:00"
Backward compatibility: check if value is float, and convert it. No exceptions here.
If value is not float, but is exceed 23:59:59, raise exception.
"""
if not value:
return "00:00:00"
if isinstance(value, float): # backward compatibility
value = min(value, 86400)
return self.timedelta_to_string(datetime.timedelta(seconds=value))
if isinstance(value, datetime.timedelta):
if value.total_seconds() > 86400: # sanity check
raise ValueError(
"RelativeTime max value is 23:59:59=86400.0 seconds, "
"but {} seconds is passed".format(value.total_seconds())
)
return self.timedelta_to_string(value)
raise TypeError("RelativeTime: cannot convert {!r} to json".format(value))
def timedelta_to_string(self, value):
"""
Makes first 'H' in str representation non-optional.
str(timedelta) has [H]H:MM:SS format, which is not suitable
for front-end (and ISO time standard), so we force HH:MM:SS format.
"""
stringified = str(value)
if len(stringified) == 7:
stringified = '0' + stringified
return stringified
def enforce_type(self, value):
"""
Ensure that when set explicitly the Field is set to a timedelta
"""
if isinstance(value, datetime.timedelta) or value is None:
return value
return self.from_json(value)
|
aberle/recipeasy
|
refs/heads/master
|
lib/werkzeug/debug/__init__.py
|
310
|
# -*- coding: utf-8 -*-
"""
werkzeug.debug
~~~~~~~~~~~~~~
WSGI application traceback debugger.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import json
import mimetypes
from os.path import join, dirname, basename, isfile
from werkzeug.wrappers import BaseRequest as Request, BaseResponse as Response
from werkzeug.debug.tbtools import get_current_traceback, render_console_html
from werkzeug.debug.console import Console
from werkzeug.security import gen_salt
#: import this here because it once was documented as being available
#: from this module. In case there are users left ...
from werkzeug.debug.repr import debug_repr
class _ConsoleFrame(object):
"""Helper class so that we can reuse the frame console code for the
standalone console.
"""
def __init__(self, namespace):
self.console = Console(namespace)
self.id = 0
class DebuggedApplication(object):
"""Enables debugging support for a given application::
from werkzeug.debug import DebuggedApplication
from myapp import app
app = DebuggedApplication(app, evalex=True)
The `evalex` keyword argument allows evaluating expressions in a
traceback's frame context.
.. versionadded:: 0.9
The `lodgeit_url` parameter was deprecated.
:param app: the WSGI application to run debugged.
:param evalex: enable exception evaluation feature (interactive
debugging). This requires a non-forking server.
:param request_key: The key that points to the request object in ths
environment. This parameter is ignored in current
versions.
:param console_path: the URL for a general purpose console.
:param console_init_func: the function that is executed before starting
the general purpose console. The return value
is used as initial namespace.
:param show_hidden_frames: by default hidden traceback frames are skipped.
You can show them by setting this parameter
to `True`.
"""
# this class is public
__module__ = 'werkzeug'
def __init__(self, app, evalex=False, request_key='werkzeug.request',
console_path='/console', console_init_func=None,
show_hidden_frames=False, lodgeit_url=None):
if lodgeit_url is not None:
from warnings import warn
warn(DeprecationWarning('Werkzeug now pastes into gists.'))
if not console_init_func:
console_init_func = dict
self.app = app
self.evalex = evalex
self.frames = {}
self.tracebacks = {}
self.request_key = request_key
self.console_path = console_path
self.console_init_func = console_init_func
self.show_hidden_frames = show_hidden_frames
self.secret = gen_salt(20)
def debug_application(self, environ, start_response):
"""Run the application and conserve the traceback frames."""
app_iter = None
try:
app_iter = self.app(environ, start_response)
for item in app_iter:
yield item
if hasattr(app_iter, 'close'):
app_iter.close()
except Exception:
if hasattr(app_iter, 'close'):
app_iter.close()
traceback = get_current_traceback(skip=1, show_hidden_frames=
self.show_hidden_frames,
ignore_system_exceptions=True)
for frame in traceback.frames:
self.frames[frame.id] = frame
self.tracebacks[traceback.id] = traceback
try:
start_response('500 INTERNAL SERVER ERROR', [
('Content-Type', 'text/html; charset=utf-8'),
# Disable Chrome's XSS protection, the debug
# output can cause false-positives.
('X-XSS-Protection', '0'),
])
except Exception:
# if we end up here there has been output but an error
# occurred. in that situation we can do nothing fancy any
# more, better log something into the error log and fall
# back gracefully.
environ['wsgi.errors'].write(
'Debugging middleware caught exception in streamed '
'response at a point where response headers were already '
'sent.\n')
else:
yield traceback.render_full(evalex=self.evalex,
secret=self.secret) \
.encode('utf-8', 'replace')
traceback.log(environ['wsgi.errors'])
def execute_command(self, request, command, frame):
"""Execute a command in a console."""
return Response(frame.console.eval(command), mimetype='text/html')
def display_console(self, request):
"""Display a standalone shell."""
if 0 not in self.frames:
self.frames[0] = _ConsoleFrame(self.console_init_func())
return Response(render_console_html(secret=self.secret),
mimetype='text/html')
def paste_traceback(self, request, traceback):
"""Paste the traceback and return a JSON response."""
rv = traceback.paste()
return Response(json.dumps(rv), mimetype='application/json')
def get_source(self, request, frame):
"""Render the source viewer."""
return Response(frame.render_source(), mimetype='text/html')
def get_resource(self, request, filename):
"""Return a static resource from the shared folder."""
filename = join(dirname(__file__), 'shared', basename(filename))
if isfile(filename):
mimetype = mimetypes.guess_type(filename)[0] \
or 'application/octet-stream'
f = open(filename, 'rb')
try:
return Response(f.read(), mimetype=mimetype)
finally:
f.close()
return Response('Not Found', status=404)
def __call__(self, environ, start_response):
"""Dispatch the requests."""
# important: don't ever access a function here that reads the incoming
# form data! Otherwise the application won't have access to that data
# any more!
request = Request(environ)
response = self.debug_application
if request.args.get('__debugger__') == 'yes':
cmd = request.args.get('cmd')
arg = request.args.get('f')
secret = request.args.get('s')
traceback = self.tracebacks.get(request.args.get('tb', type=int))
frame = self.frames.get(request.args.get('frm', type=int))
if cmd == 'resource' and arg:
response = self.get_resource(request, arg)
elif cmd == 'paste' and traceback is not None and \
secret == self.secret:
response = self.paste_traceback(request, traceback)
elif cmd == 'source' and frame and self.secret == secret:
response = self.get_source(request, frame)
elif self.evalex and cmd is not None and frame is not None and \
self.secret == secret:
response = self.execute_command(request, cmd, frame)
elif self.evalex and self.console_path is not None and \
request.path == self.console_path:
response = self.display_console(request)
return response(environ, start_response)
|
agusmakmun/Some-Examples-of-Simple-Python-Script
|
refs/heads/master
|
StringLace/StringLace.py
|
1
|
"""
Name : String Lace
Created By : Agus Makmun (Summon Agus)
Blog : bloggersmart.net - python.web.id
License : GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007
Documentation : https://github.com/agusmakmun/Some-Examples-of-Simple-Python-Script/
Powered : Python-2.7
"""
import string
SL = string.maketrans('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz', 'CDEFGHIJKLMNOPQRSTUVWXYZABcdefghijklmnopqrstuvwxyzab')
#print string.translate("g fmmnc wms bgblr rpylqjyrc gr zw fylb", SL)
print string.translate("lyky qywy ybyjyf YESQ KYIKSL", SL)
print string.translate("nama saya adalah AGUS MAKMUN", SL)
|
asfaltboy/GitSavvy
|
refs/heads/master
|
core/commands/cherry_pick.py
|
1
|
import sublime
from sublime_plugin import TextCommand
from .log import GsLogByBranchCommand
from ...common import util
class GsCherryPickCommand(GsLogByBranchCommand):
def log(self, **kwargs):
return super().log(cherry=True, start_end=("", self._selected_branch), **kwargs)
def do_action(self, commit_hash):
self.git("cherry-pick", commit_hash)
sublime.status_message("Commit %s cherry-picked successfully." %
commit_hash)
util.view.refresh_gitsavvy(self.window.active_view())
|
SeanHayes/python-social-auth
|
refs/heads/master
|
social/apps/pyramid_app/utils.py
|
76
|
import warnings
from functools import wraps
from pyramid.threadlocal import get_current_registry
from pyramid.httpexceptions import HTTPNotFound, HTTPForbidden
from social.utils import setting_name, module_member
from social.strategies.utils import get_strategy
from social.backends.utils import get_backend, user_backends_data
DEFAULTS = {
'STORAGE': 'social.apps.pyramid_app.models.PyramidStorage',
'STRATEGY': 'social.strategies.pyramid_strategy.PyramidStrategy'
}
def get_helper(name):
settings = get_current_registry().settings
return settings.get(setting_name(name), DEFAULTS.get(name, None))
def load_strategy(request):
return get_strategy(
get_helper('STRATEGY'),
get_helper('STORAGE'),
request
)
def load_backend(strategy, name, redirect_uri):
backends = get_helper('AUTHENTICATION_BACKENDS')
Backend = get_backend(backends, name)
return Backend(strategy=strategy, redirect_uri=redirect_uri)
def psa(redirect_uri=None):
def decorator(func):
@wraps(func)
def wrapper(request, *args, **kwargs):
backend = request.matchdict.get('backend')
if not backend:
return HTTPNotFound('Missing backend')
uri = redirect_uri
if uri and not uri.startswith('/'):
uri = request.route_url(uri, backend=backend)
request.strategy = load_strategy(request)
request.backend = load_backend(request.strategy, backend, uri)
return func(request, *args, **kwargs)
return wrapper
return decorator
def login_required(func):
@wraps(func)
def wrapper(request, *args, **kwargs):
is_logged_in = module_member(
request.backend.setting('LOGGEDIN_FUNCTION')
)
if not is_logged_in(request):
raise HTTPForbidden('Not authorized user')
return func(request, *args, **kwargs)
return wrapper
def backends(request, user):
"""Load Social Auth current user data to context under the key 'backends'.
Will return the output of social.backends.utils.user_backends_data."""
storage = module_member(get_helper('STORAGE'))
return {
'backends': user_backends_data(
user, get_helper('AUTHENTICATION_BACKENDS'), storage
)
}
def strategy(*args, **kwargs):
warnings.warn('@strategy decorator is deprecated, use @psa instead')
return psa(*args, **kwargs)
|
andersle/virtualsimlab
|
refs/heads/master
|
test.py
|
1
|
# -*- coding: utf-8 -*-
"""This is just an example for the virtual simulation lab."""
from __future__ import print_function
values = [i**2 for i in range(10)]
values.append(10 * 10)
values.append(2**3)
for val in values:
print(val)
|
aricchen/openHR
|
refs/heads/master
|
openerp/addons/account/report/account_aged_partner_balance.py
|
27
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.report import report_sxw
from common_report_header import common_report_header
class aged_trial_report(report_sxw.rml_parse, common_report_header):
def __init__(self, cr, uid, name, context):
super(aged_trial_report, self).__init__(cr, uid, name, context=context)
self.total_account = []
self.localcontext.update({
'time': time,
'get_lines_with_out_partner': self._get_lines_with_out_partner,
'get_lines': self._get_lines,
'get_total': self._get_total,
'get_direction': self._get_direction,
'get_for_period': self._get_for_period,
'get_company': self._get_company,
'get_currency': self._get_currency,
'get_partners':self._get_partners,
'get_account': self._get_account,
'get_fiscalyear': self._get_fiscalyear,
'get_target_move': self._get_target_move,
})
def set_context(self, objects, data, ids, report_type=None):
obj_move = self.pool.get('account.move.line')
ctx = data['form'].get('used_context', {})
ctx.update({'fiscalyear': False, 'all_fiscalyear': True})
self.query = obj_move._query_get(self.cr, self.uid, obj='l', context=ctx)
self.direction_selection = data['form'].get('direction_selection', 'past')
self.target_move = data['form'].get('target_move', 'all')
self.date_from = data['form'].get('date_from', time.strftime('%Y-%m-%d'))
if (data['form']['result_selection'] == 'customer' ):
self.ACCOUNT_TYPE = ['receivable']
elif (data['form']['result_selection'] == 'supplier'):
self.ACCOUNT_TYPE = ['payable']
else:
self.ACCOUNT_TYPE = ['payable','receivable']
return super(aged_trial_report, self).set_context(objects, data, ids, report_type=report_type)
def _get_lines(self, form):
res = []
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
self.cr.execute('SELECT DISTINCT res_partner.id AS id,\
res_partner.name AS name \
FROM res_partner,account_move_line AS l, account_account, account_move am\
WHERE (l.account_id=account_account.id) \
AND (l.move_id=am.id) \
AND (am.state IN %s)\
AND (account_account.type IN %s)\
AND account_account.active\
AND ((reconcile_id IS NULL)\
OR (reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s )))\
AND (l.partner_id=res_partner.id)\
AND (l.date <= %s)\
AND ' + self.query + ' \
ORDER BY res_partner.name', (tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from, self.date_from,))
partners = self.cr.dictfetchall()
## mise a 0 du total
for i in range(7):
self.total_account.append(0)
#
# Build a string like (1,2,3) for easy use in SQL query
partner_ids = [x['id'] for x in partners]
if not partner_ids:
return []
# This dictionary will store the debit-credit for all partners, using partner_id as key.
totals = {}
self.cr.execute('SELECT l.partner_id, SUM(l.debit-l.credit) \
FROM account_move_line AS l, account_account, account_move am \
WHERE (l.account_id = account_account.id) AND (l.move_id=am.id) \
AND (am.state IN %s)\
AND (account_account.type IN %s)\
AND (l.partner_id IN %s)\
AND ((l.reconcile_id IS NULL)\
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s )))\
AND ' + self.query + '\
AND account_account.active\
AND (l.date <= %s)\
GROUP BY l.partner_id ', (tuple(move_state), tuple(self.ACCOUNT_TYPE), tuple(partner_ids), self.date_from, self.date_from,))
t = self.cr.fetchall()
for i in t:
totals[i[0]] = i[1]
# This dictionary will store the future or past of all partners
future_past = {}
if self.direction_selection == 'future':
self.cr.execute('SELECT l.partner_id, SUM(l.debit-l.credit) \
FROM account_move_line AS l, account_account, account_move am \
WHERE (l.account_id=account_account.id) AND (l.move_id=am.id) \
AND (am.state IN %s)\
AND (account_account.type IN %s)\
AND (COALESCE(l.date_maturity, l.date) < %s)\
AND (l.partner_id IN %s)\
AND ((l.reconcile_id IS NULL)\
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s )))\
AND '+ self.query + '\
AND account_account.active\
AND (l.date <= %s)\
GROUP BY l.partner_id', (tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from, tuple(partner_ids),self.date_from, self.date_from,))
t = self.cr.fetchall()
for i in t:
future_past[i[0]] = i[1]
elif self.direction_selection == 'past': # Using elif so people could extend without this breaking
self.cr.execute('SELECT l.partner_id, SUM(l.debit-l.credit) \
FROM account_move_line AS l, account_account, account_move am \
WHERE (l.account_id=account_account.id) AND (l.move_id=am.id)\
AND (am.state IN %s)\
AND (account_account.type IN %s)\
AND (COALESCE(l.date_maturity,l.date) > %s)\
AND (l.partner_id IN %s)\
AND ((l.reconcile_id IS NULL)\
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s )))\
AND '+ self.query + '\
AND account_account.active\
AND (l.date <= %s)\
GROUP BY l.partner_id', (tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from, tuple(partner_ids), self.date_from, self.date_from,))
t = self.cr.fetchall()
for i in t:
future_past[i[0]] = i[1]
# Use one query per period and store results in history (a list variable)
# Each history will contain: history[1] = {'<partner_id>': <partner_debit-credit>}
history = []
for i in range(5):
args_list = (tuple(move_state), tuple(self.ACCOUNT_TYPE), tuple(partner_ids),self.date_from,)
dates_query = '(COALESCE(l.date_maturity,l.date)'
if form[str(i)]['start'] and form[str(i)]['stop']:
dates_query += ' BETWEEN %s AND %s)'
args_list += (form[str(i)]['start'], form[str(i)]['stop'])
elif form[str(i)]['start']:
dates_query += ' > %s)'
args_list += (form[str(i)]['start'],)
else:
dates_query += ' < %s)'
args_list += (form[str(i)]['stop'],)
args_list += (self.date_from,)
self.cr.execute('''SELECT l.partner_id, SUM(l.debit-l.credit)
FROM account_move_line AS l, account_account, account_move am
WHERE (l.account_id = account_account.id) AND (l.move_id=am.id)
AND (am.state IN %s)
AND (account_account.type IN %s)
AND (l.partner_id IN %s)
AND ((l.reconcile_id IS NULL)
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s )))
AND ''' + self.query + '''
AND account_account.active
AND ''' + dates_query + '''
AND (l.date <= %s)
GROUP BY l.partner_id''', args_list)
t = self.cr.fetchall()
d = {}
for i in t:
d[i[0]] = i[1]
history.append(d)
for partner in partners:
values = {}
## If choise selection is in the future
if self.direction_selection == 'future':
# Query here is replaced by one query which gets the all the partners their 'before' value
before = False
if future_past.has_key(partner['id']):
before = [ future_past[partner['id']] ]
self.total_account[6] = self.total_account[6] + (before and before[0] or 0.0)
values['direction'] = before and before[0] or 0.0
elif self.direction_selection == 'past': # Changed this so people could in the future create new direction_selections
# Query here is replaced by one query which gets the all the partners their 'after' value
after = False
if future_past.has_key(partner['id']): # Making sure this partner actually was found by the query
after = [ future_past[partner['id']] ]
self.total_account[6] = self.total_account[6] + (after and after[0] or 0.0)
values['direction'] = after and after[0] or 0.0
for i in range(5):
during = False
if history[i].has_key(partner['id']):
during = [ history[i][partner['id']] ]
# Ajout du compteur
self.total_account[(i)] = self.total_account[(i)] + (during and during[0] or 0)
values[str(i)] = during and during[0] or 0.0
total = False
if totals.has_key( partner['id'] ):
total = [ totals[partner['id']] ]
values['total'] = total and total[0] or 0.0
## Add for total
self.total_account[(i+1)] = self.total_account[(i+1)] + (total and total[0] or 0.0)
values['name'] = partner['name']
res.append(values)
total = 0.0
totals = {}
for r in res:
total += float(r['total'] or 0.0)
for i in range(5)+['direction']:
totals.setdefault(str(i), 0.0)
totals[str(i)] += float(r[str(i)] or 0.0)
return res
def _get_lines_with_out_partner(self, form):
res = []
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
## mise a 0 du total
for i in range(7):
self.total_account.append(0)
totals = {}
self.cr.execute('SELECT SUM(l.debit-l.credit) \
FROM account_move_line AS l, account_account, account_move am \
WHERE (l.account_id = account_account.id) AND (l.move_id=am.id)\
AND (am.state IN %s)\
AND (l.partner_id IS NULL)\
AND (account_account.type IN %s)\
AND ((l.reconcile_id IS NULL) \
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s )))\
AND ' + self.query + '\
AND (l.date <= %s)\
AND account_account.active ',(tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from, self.date_from,))
t = self.cr.fetchall()
for i in t:
totals['Unknown Partner'] = i[0]
future_past = {}
if self.direction_selection == 'future':
self.cr.execute('SELECT SUM(l.debit-l.credit) \
FROM account_move_line AS l, account_account, account_move am\
WHERE (l.account_id=account_account.id) AND (l.move_id=am.id)\
AND (am.state IN %s)\
AND (l.partner_id IS NULL)\
AND (account_account.type IN %s)\
AND (COALESCE(l.date_maturity, l.date) < %s)\
AND ((l.reconcile_id IS NULL)\
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s )))\
AND '+ self.query + '\
AND account_account.active ', (tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from, self.date_from))
t = self.cr.fetchall()
for i in t:
future_past['Unknown Partner'] = i[0]
elif self.direction_selection == 'past': # Using elif so people could extend without this breaking
self.cr.execute('SELECT SUM(l.debit-l.credit) \
FROM account_move_line AS l, account_account, account_move am \
WHERE (l.account_id=account_account.id) AND (l.move_id=am.id)\
AND (am.state IN %s)\
AND (l.partner_id IS NULL)\
AND (account_account.type IN %s)\
AND (COALESCE(l.date_maturity,l.date) > %s)\
AND ((l.reconcile_id IS NULL)\
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s )))\
AND '+ self.query + '\
AND account_account.active ', (tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from, self.date_from))
t = self.cr.fetchall()
for i in t:
future_past['Unknown Partner'] = i[0]
history = []
for i in range(5):
args_list = (tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from,)
dates_query = '(COALESCE(l.date_maturity,l.date)'
if form[str(i)]['start'] and form[str(i)]['stop']:
dates_query += ' BETWEEN %s AND %s)'
args_list += (form[str(i)]['start'], form[str(i)]['stop'])
elif form[str(i)]['start']:
dates_query += ' > %s)'
args_list += (form[str(i)]['start'],)
else:
dates_query += ' < %s)'
args_list += (form[str(i)]['stop'],)
args_list += (self.date_from,)
self.cr.execute('SELECT SUM(l.debit-l.credit)\
FROM account_move_line AS l, account_account, account_move am \
WHERE (l.account_id = account_account.id) AND (l.move_id=am.id)\
AND (am.state IN %s)\
AND (account_account.type IN %s)\
AND (l.partner_id IS NULL)\
AND ((l.reconcile_id IS NULL)\
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s )))\
AND '+ self.query + '\
AND account_account.active\
AND ' + dates_query + '\
AND (l.date <= %s)\
GROUP BY l.partner_id', args_list)
t = self.cr.fetchall()
d = {}
for i in t:
d['Unknown Partner'] = i[0]
history.append(d)
values = {}
if self.direction_selection == 'future':
before = False
if future_past.has_key('Unknown Partner'):
before = [ future_past['Unknown Partner'] ]
self.total_account[6] = self.total_account[6] + (before and before[0] or 0.0)
values['direction'] = before and before[0] or 0.0
elif self.direction_selection == 'past':
after = False
if future_past.has_key('Unknown Partner'):
after = [ future_past['Unknown Partner'] ]
self.total_account[6] = self.total_account[6] + (after and after[0] or 0.0)
values['direction'] = after and after[0] or 0.0
for i in range(5):
during = False
if history[i].has_key('Unknown Partner'):
during = [ history[i]['Unknown Partner'] ]
self.total_account[(i)] = self.total_account[(i)] + (during and during[0] or 0)
values[str(i)] = during and during[0] or 0.0
total = False
if totals.has_key( 'Unknown Partner' ):
total = [ totals['Unknown Partner'] ]
values['total'] = total and total[0] or 0.0
## Add for total
self.total_account[(i+1)] = self.total_account[(i+1)] + (total and total[0] or 0.0)
values['name'] = 'Unknown Partner'
if values['total']:
res.append(values)
total = 0.0
totals = {}
for r in res:
total += float(r['total'] or 0.0)
for i in range(5)+['direction']:
totals.setdefault(str(i), 0.0)
totals[str(i)] += float(r[str(i)] or 0.0)
return res
def _get_total(self,pos):
period = self.total_account[int(pos)]
return period or 0.0
def _get_direction(self,pos):
period = self.total_account[int(pos)]
return period or 0.0
def _get_for_period(self,pos):
period = self.total_account[int(pos)]
return period or 0.0
def _get_partners(self,data):
# TODO: deprecated, to remove in trunk
if data['form']['result_selection'] == 'customer':
return self._translate('Receivable Accounts')
elif data['form']['result_selection'] == 'supplier':
return self._translate('Payable Accounts')
elif data['form']['result_selection'] == 'customer_supplier':
return self._translate('Receivable and Payable Accounts')
return ''
report_sxw.report_sxw('report.account.aged_trial_balance', 'res.partner',
'addons/account/report/account_aged_partner_balance.rml',parser=aged_trial_report, header="internal landscape")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
yiyocx/django-rest-framework
|
refs/heads/master
|
tests/test_serializer.py
|
64
|
# coding: utf-8
from __future__ import unicode_literals
import pickle
import pytest
from rest_framework import serializers
from rest_framework.compat import unicode_repr
from .utils import MockObject
# Tests for core functionality.
# -----------------------------
class TestSerializer:
def setup(self):
class ExampleSerializer(serializers.Serializer):
char = serializers.CharField()
integer = serializers.IntegerField()
self.Serializer = ExampleSerializer
def test_valid_serializer(self):
serializer = self.Serializer(data={'char': 'abc', 'integer': 123})
assert serializer.is_valid()
assert serializer.validated_data == {'char': 'abc', 'integer': 123}
assert serializer.errors == {}
def test_invalid_serializer(self):
serializer = self.Serializer(data={'char': 'abc'})
assert not serializer.is_valid()
assert serializer.validated_data == {}
assert serializer.errors == {'integer': ['This field is required.']}
def test_partial_validation(self):
serializer = self.Serializer(data={'char': 'abc'}, partial=True)
assert serializer.is_valid()
assert serializer.validated_data == {'char': 'abc'}
assert serializer.errors == {}
def test_empty_serializer(self):
serializer = self.Serializer()
assert serializer.data == {'char': '', 'integer': None}
def test_missing_attribute_during_serialization(self):
class MissingAttributes:
pass
instance = MissingAttributes()
serializer = self.Serializer(instance)
with pytest.raises(AttributeError):
serializer.data
class TestValidateMethod:
def test_non_field_error_validate_method(self):
class ExampleSerializer(serializers.Serializer):
char = serializers.CharField()
integer = serializers.IntegerField()
def validate(self, attrs):
raise serializers.ValidationError('Non field error')
serializer = ExampleSerializer(data={'char': 'abc', 'integer': 123})
assert not serializer.is_valid()
assert serializer.errors == {'non_field_errors': ['Non field error']}
def test_field_error_validate_method(self):
class ExampleSerializer(serializers.Serializer):
char = serializers.CharField()
integer = serializers.IntegerField()
def validate(self, attrs):
raise serializers.ValidationError({'char': 'Field error'})
serializer = ExampleSerializer(data={'char': 'abc', 'integer': 123})
assert not serializer.is_valid()
assert serializer.errors == {'char': ['Field error']}
class TestBaseSerializer:
def setup(self):
class ExampleSerializer(serializers.BaseSerializer):
def to_representation(self, obj):
return {
'id': obj['id'],
'email': obj['name'] + '@' + obj['domain']
}
def to_internal_value(self, data):
name, domain = str(data['email']).split('@')
return {
'id': int(data['id']),
'name': name,
'domain': domain,
}
self.Serializer = ExampleSerializer
def test_serialize_instance(self):
instance = {'id': 1, 'name': 'tom', 'domain': 'example.com'}
serializer = self.Serializer(instance)
assert serializer.data == {'id': 1, 'email': 'tom@example.com'}
def test_serialize_list(self):
instances = [
{'id': 1, 'name': 'tom', 'domain': 'example.com'},
{'id': 2, 'name': 'ann', 'domain': 'example.com'},
]
serializer = self.Serializer(instances, many=True)
assert serializer.data == [
{'id': 1, 'email': 'tom@example.com'},
{'id': 2, 'email': 'ann@example.com'}
]
def test_validate_data(self):
data = {'id': 1, 'email': 'tom@example.com'}
serializer = self.Serializer(data=data)
assert serializer.is_valid()
assert serializer.validated_data == {
'id': 1,
'name': 'tom',
'domain': 'example.com'
}
def test_validate_list(self):
data = [
{'id': 1, 'email': 'tom@example.com'},
{'id': 2, 'email': 'ann@example.com'},
]
serializer = self.Serializer(data=data, many=True)
assert serializer.is_valid()
assert serializer.validated_data == [
{'id': 1, 'name': 'tom', 'domain': 'example.com'},
{'id': 2, 'name': 'ann', 'domain': 'example.com'}
]
class TestStarredSource:
"""
Tests for `source='*'` argument, which is used for nested representations.
For example:
nested_field = NestedField(source='*')
"""
data = {
'nested1': {'a': 1, 'b': 2},
'nested2': {'c': 3, 'd': 4}
}
def setup(self):
class NestedSerializer1(serializers.Serializer):
a = serializers.IntegerField()
b = serializers.IntegerField()
class NestedSerializer2(serializers.Serializer):
c = serializers.IntegerField()
d = serializers.IntegerField()
class TestSerializer(serializers.Serializer):
nested1 = NestedSerializer1(source='*')
nested2 = NestedSerializer2(source='*')
self.Serializer = TestSerializer
def test_nested_validate(self):
"""
A nested representation is validated into a flat internal object.
"""
serializer = self.Serializer(data=self.data)
assert serializer.is_valid()
assert serializer.validated_data == {
'a': 1,
'b': 2,
'c': 3,
'd': 4
}
def test_nested_serialize(self):
"""
An object can be serialized into a nested representation.
"""
instance = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
serializer = self.Serializer(instance)
assert serializer.data == self.data
class TestIncorrectlyConfigured:
def test_incorrect_field_name(self):
class ExampleSerializer(serializers.Serializer):
incorrect_name = serializers.IntegerField()
class ExampleObject:
def __init__(self):
self.correct_name = 123
instance = ExampleObject()
serializer = ExampleSerializer(instance)
with pytest.raises(AttributeError) as exc_info:
serializer.data
msg = str(exc_info.value)
assert msg.startswith(
"Got AttributeError when attempting to get a value for field `incorrect_name` on serializer `ExampleSerializer`.\n"
"The serializer field might be named incorrectly and not match any attribute or key on the `ExampleObject` instance.\n"
"Original exception text was:"
)
class TestUnicodeRepr:
def test_unicode_repr(self):
class ExampleSerializer(serializers.Serializer):
example = serializers.CharField()
class ExampleObject:
def __init__(self):
self.example = '한국'
def __repr__(self):
return unicode_repr(self.example)
instance = ExampleObject()
serializer = ExampleSerializer(instance)
repr(serializer) # Should not error.
class TestNotRequiredOutput:
def test_not_required_output_for_dict(self):
"""
'required=False' should allow a dictionary key to be missing in output.
"""
class ExampleSerializer(serializers.Serializer):
omitted = serializers.CharField(required=False)
included = serializers.CharField()
serializer = ExampleSerializer(data={'included': 'abc'})
serializer.is_valid()
assert serializer.data == {'included': 'abc'}
def test_not_required_output_for_object(self):
"""
'required=False' should allow an object attribute to be missing in output.
"""
class ExampleSerializer(serializers.Serializer):
omitted = serializers.CharField(required=False)
included = serializers.CharField()
def create(self, validated_data):
return MockObject(**validated_data)
serializer = ExampleSerializer(data={'included': 'abc'})
serializer.is_valid()
serializer.save()
assert serializer.data == {'included': 'abc'}
def test_default_required_output_for_dict(self):
"""
'default="something"' should require dictionary key.
We need to handle this as the field will have an implicit
'required=False', but it should still have a value.
"""
class ExampleSerializer(serializers.Serializer):
omitted = serializers.CharField(default='abc')
included = serializers.CharField()
serializer = ExampleSerializer({'included': 'abc'})
with pytest.raises(KeyError):
serializer.data
def test_default_required_output_for_object(self):
"""
'default="something"' should require object attribute.
We need to handle this as the field will have an implicit
'required=False', but it should still have a value.
"""
class ExampleSerializer(serializers.Serializer):
omitted = serializers.CharField(default='abc')
included = serializers.CharField()
instance = MockObject(included='abc')
serializer = ExampleSerializer(instance)
with pytest.raises(AttributeError):
serializer.data
class TestCacheSerializerData:
def test_cache_serializer_data(self):
"""
Caching serializer data with pickle will drop the serializer info,
but does preserve the data itself.
"""
class ExampleSerializer(serializers.Serializer):
field1 = serializers.CharField()
field2 = serializers.CharField()
serializer = ExampleSerializer({'field1': 'a', 'field2': 'b'})
pickled = pickle.dumps(serializer.data)
data = pickle.loads(pickled)
assert data == {'field1': 'a', 'field2': 'b'}
|
windskyer/mvpn
|
refs/heads/master
|
mvpn/api/openstack/radius/versions.py
|
1
|
from mvpn.api.openstack import wsgi
from mvpn.api.openstack.mvpn.views import versions as views_versions
LINKS = {
'v2.0': {
'pdf': 'http://docs.flftuu.com/'
'api/openstack-compute/2/os-compute-devguide-2.pdf',
'wadl': 'http://docs.flftuu.com/'
'api/openstack-compute/2/wadl/os-compute-2.wadl'
},
'v3.0': {
'pdf': 'http://docs.flftuu.com/'
'api/openstack-compute/3/os-compute-devguide-3.pdf',
'wadl': 'http://docs.flftuu.com/'
'api/openstack-compute/3/wadl/os-compute-3.wadl'
},
}
VERSIONS = {
"v2.0": {
"id": "v2.0",
"status": "CURRENT",
"updated": "2015-10-21T11:33:21Z",
"links": [
{
"rel": "describedby",
"type": "application/pdf",
"href": LINKS['v2.0']['pdf'],
},
{
"rel": "describedby",
"type": "application/vnd.sun.wadl+xml",
"href": LINKS['v2.0']['wadl'],
},
],
"media-types": [
{
"base": "application/xml",
"type": "application/vnd.openstack.compute+xml;version=2",
},
{
"base": "application/json",
"type": "application/vnd.openstack.compute+json;version=2",
}
],
},
"v3.0": {
"id": "v3.0",
"status": "EXPERIMENTAL",
"updated": "2016-02-23T11:33:21Z",
"links": [
{
"rel": "describedby",
"type": "application/pdf",
"href": LINKS['v3.0']['pdf'],
},
{
"rel": "describedby",
"type": "application/vnd.sun.wadl+xml",
"href": LINKS['v3.0']['wadl'],
},
],
"media-types": [
{
"base": "application/json",
"type": "application/vnd.openstack.compute+json;version=3",
}
],
}
}
class VersionV2(object):
#@wsgi.serializers(xml=VersionTemplate,
# atom=VersionAtomSerializer)
def show(self, req):
builder = views_versions.get_view_builder(req)
return builder.build_version(VERSIONS['v2.0'])
def create_resource():
return wsgi.Resource(VersionV2())
|
jdfekete/progressivis
|
refs/heads/master
|
widgets/progressivis_nb_widgets/nbwidgets/__init__.py
|
1
|
from ._version import version_info, __version__
from .scatterplot import *
from .module_graph import *
from .control_panel import *
from .psboard import *
from .sensitive_html import *
from .json_html import *
from .data_table import *
from .sparkline_progressbar import *
from .plotting_progressbar import *
from .utils import *
def _jupyter_nbextension_paths():
"""Called by Jupyter Notebook Server to detect if it is a valid nbextension and
to install the widget
Returns
=======
section: The section of the Jupyter Notebook Server to change.
Must be 'notebook' for widget extensions
src: Source directory name to copy files from.
Webpack outputs generated files into this directory and Jupyter
Notebook copies from this directory during widget installation
dest: Destination directory name to install widget files to.
Jupyter Notebook copies from `src` directory into
<jupyter path>/nbextensions/<dest> directory during widget
installation
require: Path to importable AMD Javascript module inside the
<jupyter path>/nbextensions/<dest> directory
"""
return [{
'section': 'notebook',
'src': 'static',
'dest': 'progressivis-nb-widgets',
'require': 'progressivis-nb-widgets/extension'
}]
|
nrwahl2/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/ovirt/ovirt_host_networks.py
|
25
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_host_networks
short_description: Module to manage host networks in oVirt/RHV
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "Module to manage host networks in oVirt/RHV."
options:
name:
description:
- "Name of the host to manage networks for."
required: true
state:
description:
- "Should the host be present/absent."
choices: ['present', 'absent']
default: present
bond:
description:
- "Dictionary describing network bond:"
- "C(name) - Bond name."
- "C(mode) - Bonding mode."
- "C(interfaces) - List of interfaces to create a bond."
interface:
description:
- "Name of the network interface where logical network should be attached."
networks:
description:
- "List of dictionary describing networks to be attached to interface or bond:"
- "C(name) - Name of the logical network to be assigned to bond or interface."
- "C(boot_protocol) - Boot protocol one of the I(none), I(static) or I(dhcp)."
- "C(address) - IP address in case of I(static) boot protocol is used."
- "C(prefix) - Routing prefix in case of I(static) boot protocol is used."
- "C(gateway) - Gateway in case of I(static) boot protocol is used."
- "C(version) - IP version. Either v4 or v6. Default is v4."
labels:
description:
- "List of names of the network label to be assigned to bond or interface."
check:
description:
- "If I(true) verify connectivity between host and engine."
- "Network configuration changes will be rolled back if connectivity between
engine and the host is lost after changing network configuration."
save:
description:
- "If I(true) network configuration will be persistent, by default they are temporary."
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Create bond on eth0 and eth1 interface, and put 'myvlan' network on top of it:
- name: Bonds
ovirt_host_networks:
name: myhost
bond:
name: bond0
mode: 2
interfaces:
- eth1
- eth2
networks:
- name: myvlan
boot_protocol: static
address: 1.2.3.4
prefix: 24
gateway: 1.2.3.4
version: v4
# Remove bond0 bond from host interfaces:
- ovirt_host_networks:
state: absent
name: myhost
bond:
name: bond0
# Assign myvlan1 and myvlan2 vlans to host eth0 interface:
- ovirt_host_networks:
name: myhost
interface: eth0
networks:
- name: myvlan1
- name: myvlan2
# Remove myvlan2 vlan from host eth0 interface:
- ovirt_host_networks:
state: absent
name: myhost
interface: eth0
networks:
- name: myvlan2
# Remove all networks/vlans from host eth0 interface:
- ovirt_host_networks:
state: absent
name: myhost
interface: eth0
'''
RETURN = '''
id:
description: ID of the host NIC which is managed
returned: On success if host NIC is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
host_nic:
description: "Dictionary of all the host NIC attributes. Host NIC attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/host_nic."
returned: On success if host NIC is found.
type: dict
'''
import traceback
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
create_connection,
equal,
get_dict_of_struct,
get_entity,
get_link_name,
ovirt_full_argument_spec,
search_by_name,
)
class HostNetworksModule(BaseModule):
def build_entity(self):
return otypes.Host()
def update_address(self, attachments_service, attachment, network):
# Check if there is any change in address assignenmts and
# update it if needed:
for ip in attachment.ip_address_assignments:
if str(ip.ip.version) == network.get('version', 'v4'):
changed = False
if not equal(network.get('boot_protocol'), str(ip.assignment_method)):
ip.assignment_method = otypes.BootProtocol(network.get('boot_protocol'))
changed = True
if not equal(network.get('address'), ip.ip.address):
ip.ip.address = network.get('address')
changed = True
if not equal(network.get('gateway'), ip.ip.gateway):
ip.ip.gateway = network.get('gateway')
changed = True
if not equal(network.get('prefix'), int(ip.ip.netmask) if ip.ip.netmask else None):
ip.ip.netmask = str(network.get('prefix'))
changed = True
if changed:
if not self._module.check_mode:
attachments_service.service(attachment.id).update(attachment)
self.changed = True
break
def has_update(self, nic_service):
update = False
bond = self._module.params['bond']
networks = self._module.params['networks']
nic = get_entity(nic_service)
if nic is None:
return update
# Check if bond configuration should be updated:
if bond:
update = not (
equal(str(bond.get('mode')), nic.bonding.options[0].value) and
equal(
sorted(bond.get('interfaces')) if bond.get('interfaces') else None,
sorted(get_link_name(self._connection, s) for s in nic.bonding.slaves)
)
)
if not networks:
return update
# Check if networks attachments configuration should be updated:
attachments_service = nic_service.network_attachments_service()
network_names = [network.get('name') for network in networks]
attachments = {}
for attachment in attachments_service.list():
name = get_link_name(self._connection, attachment.network)
if name in network_names:
attachments[name] = attachment
for network in networks:
attachment = attachments.get(network.get('name'))
# If attachment don't exsits, we need to create it:
if attachment is None:
return True
self.update_address(attachments_service, attachment, network)
return update
def _action_save_configuration(self, entity):
if self._module.params['save']:
if not self._module.check_mode:
self._service.service(entity.id).commit_net_config()
self.changed = True
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent'],
default='present',
),
name=dict(default=None, aliases=['host'], required=True),
bond=dict(default=None, type='dict'),
interface=dict(default=None),
networks=dict(default=None, type='list'),
labels=dict(default=None, type='list'),
check=dict(default=None, type='bool'),
save=dict(default=None, type='bool'),
)
module = AnsibleModule(argument_spec=argument_spec)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
hosts_service = connection.system_service().hosts_service()
host_networks_module = HostNetworksModule(
connection=connection,
module=module,
service=hosts_service,
)
host = host_networks_module.search_entity()
if host is None:
raise Exception("Host '%s' was not found." % module.params['name'])
bond = module.params['bond']
interface = module.params['interface']
networks = module.params['networks']
labels = module.params['labels']
nic_name = bond.get('name') if bond else module.params['interface']
nics_service = hosts_service.host_service(host.id).nics_service()
nic = search_by_name(nics_service, nic_name)
state = module.params['state']
if (
state == 'present' and
(nic is None or host_networks_module.has_update(nics_service.service(nic.id)))
):
host_networks_module.action(
entity=host,
action='setup_networks',
post_action=host_networks_module._action_save_configuration,
check_connectivity=module.params['check'],
modified_bonds=[
otypes.HostNic(
name=bond.get('name'),
bonding=otypes.Bonding(
options=[
otypes.Option(
name="mode",
value=str(bond.get('mode')),
)
],
slaves=[
otypes.HostNic(name=i) for i in bond.get('interfaces', [])
],
),
),
] if bond else None,
modified_labels=[
otypes.NetworkLabel(
name=str(name),
host_nic=otypes.HostNic(
name=bond.get('name') if bond else interface
),
) for name in labels
] if labels else None,
modified_network_attachments=[
otypes.NetworkAttachment(
network=otypes.Network(
name=network['name']
) if network['name'] else None,
host_nic=otypes.HostNic(
name=bond.get('name') if bond else interface
),
ip_address_assignments=[
otypes.IpAddressAssignment(
assignment_method=otypes.BootProtocol(
network.get('boot_protocol', 'none')
),
ip=otypes.Ip(
address=network.get('address'),
gateway=network.get('gateway'),
netmask=network.get('netmask'),
version=otypes.IpVersion(
network.get('version')
) if network.get('version') else None,
),
),
],
) for network in networks
] if networks else None,
)
elif state == 'absent' and nic:
attachments_service = nics_service.nic_service(nic.id).network_attachments_service()
attachments = attachments_service.list()
if networks:
network_names = [network['name'] for network in networks]
attachments = [
attachment for attachment in attachments
if get_link_name(connection, attachment.network) in network_names
]
if labels or bond or attachments:
host_networks_module.action(
entity=host,
action='setup_networks',
post_action=host_networks_module._action_save_configuration,
check_connectivity=module.params['check'],
removed_bonds=[
otypes.HostNic(
name=bond.get('name'),
),
] if bond else None,
removed_labels=[
otypes.NetworkLabel(
name=str(name),
) for name in labels
] if labels else None,
removed_network_attachments=list(attachments),
)
nic = search_by_name(nics_service, nic_name)
module.exit_json(**{
'changed': host_networks_module.changed,
'id': nic.id if nic else None,
'host_nic': get_dict_of_struct(nic),
})
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
|
mgoulish/qpid-dispatch
|
refs/heads/master
|
python/qpid_dispatch_internal/management/qdrouter.py
|
3
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License
#
"""
Qpid Dispatch Router management schema and config file parsing.
"""
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import json
from pkgutil import get_data
from . import schema
from qpid_dispatch_internal.compat import JSON_LOAD_KWARGS
class QdSchema(schema.Schema):
"""
Qpid Dispatch Router management schema.
"""
CONFIGURATION_ENTITY = u"configurationEntity"
OPERATIONAL_ENTITY = u"operationalEntity"
def __init__(self):
"""Load schema."""
qd_schema = get_data('qpid_dispatch.management', 'qdrouter.json')
try:
super(QdSchema, self).__init__(**json.loads(qd_schema, **JSON_LOAD_KWARGS))
except Exception as e:
raise ValueError("Invalid schema qdrouter.json: %s" % e)
self.configuration_entity = self.entity_type(self.CONFIGURATION_ENTITY)
self.operational_entity = self.entity_type(self.OPERATIONAL_ENTITY)
def validate_add(self, attributes, entities):
"""
Check that listeners and connectors can only have role=inter-router if the router has
mode=interior.
"""
entities = list(entities) # Iterate twice
super(QdSchema, self).validate_add(attributes, entities)
entities.append(attributes)
inter_router = not_interior = None
for e in entities:
short_type = self.short_name(e['type'])
if short_type == "router" and e['mode'] != "interior":
not_interior = e['mode']
if short_type in ["listener", "connector"] and e['role'] == "inter-router":
inter_router = e
if not_interior and inter_router:
raise schema.ValidationError(
"role='inter-router' only allowed with router mode='interior' for %s." % inter_router)
def is_configuration(self, entity_type):
return entity_type and self.configuration_entity in entity_type.all_bases
def is_operational(self, entity_type):
return entity_type and self.operational_entity in entity_type.all_bases
|
bjlittle/iris
|
refs/heads/pre-commit-ci-update-config
|
lib/iris/tests/test_file_load.py
|
5
|
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Test the file loading mechanism.
"""
# import iris tests first so that some things can be initialised before importing anything else
import iris.tests as tests
import iris
@tests.skip_data
class TestFileLoad(tests.IrisTest):
def _test_file(self, src_path, reference_filename):
"""
Checks the result of loading the given file spec, or creates the
reference file if it doesn't exist.
"""
cubes = iris.load_raw(tests.get_data_path(src_path))
self.assertCML(cubes, ["file_load", reference_filename])
def test_no_file(self):
# Test an IOError is received when a filename is given which doesn't match any files
real_file = ["PP", "globClim1", "theta.pp"]
non_existant_file = ["PP", "globClim1", "no_such_file*"]
with self.assertRaises(IOError):
iris.load(tests.get_data_path(non_existant_file))
with self.assertRaises(IOError):
iris.load(
[
tests.get_data_path(non_existant_file),
tests.get_data_path(real_file),
]
)
with self.assertRaises(IOError):
iris.load(
[
tests.get_data_path(real_file),
tests.get_data_path(non_existant_file),
]
)
def test_single_file(self):
src_path = ["PP", "globClim1", "theta.pp"]
self._test_file(src_path, "theta_levels.cml")
def test_star_wildcard(self):
src_path = ["PP", "globClim1", "*_wind.pp"]
self._test_file(src_path, "wind_levels.cml")
def test_query_wildcard(self):
src_path = ["PP", "globClim1", "?_wind.pp"]
self._test_file(src_path, "wind_levels.cml")
def test_charset_wildcard(self):
src_path = ["PP", "globClim1", "[rstu]_wind.pp"]
self._test_file(src_path, "u_wind_levels.cml")
def test_negative_charset_wildcard(self):
src_path = ["PP", "globClim1", "[!rstu]_wind.pp"]
self._test_file(src_path, "v_wind_levels.cml")
def test_empty_file(self):
with self.temp_filename(suffix=".pp") as temp_filename:
with open(temp_filename, "a"):
with self.assertRaises(iris.exceptions.TranslationError):
iris.load(temp_filename)
if __name__ == "__main__":
tests.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.