code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
from .flag_button import * | vinoth3v/In | In/flag/form/__init__.py | Python | apache-2.0 | 26 |
#!/usr/bin/env python
# coding: utf-8
# Copyright 2014 The Crashpad Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A one-shot testing webserver.
When invoked, this server will write a short integer to stdout, indiciating on
which port the server is listening. It will then read one integer from stdin,
indiciating the response code to be sent in response to a request. It also reads
16 characters from stdin, which, after having "\r\n" appended, will form the
response body in a successful response (one with code 200). The server will
process one HTTP request, deliver the prearranged response to the client, and
write the entire request to stdout. It will then terminate.
This server is written in Python since it provides a simple HTTP stack, and
because parsing chunked encoding is safer and easier in a memory-safe language.
This could easily have been written in C++ instead.
"""
import BaseHTTPServer
import struct
import sys
import zlib
class BufferedReadFile(object):
"""A File-like object that stores all read contents into a buffer."""
def __init__(self, real_file):
self.file = real_file
self.buffer = ""
def read(self, size=-1):
buf = self.file.read(size)
self.buffer += buf
return buf
def readline(self, size=-1):
buf = self.file.readline(size)
self.buffer += buf
return buf
def flush(self):
self.file.flush()
def close(self):
self.file.close()
class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
# Everything to be written to stdout is collected into this string. It can’t
# be written to stdout until after the HTTP transaction is complete, because
# stdout is a pipe being read by a test program that’s also the HTTP client.
# The test program expects to complete the entire HTTP transaction before it
# even starts reading this script’s stdout. If the stdout pipe buffer fills up
# during an HTTP transaction, deadlock would result.
raw_request = ''
response_code = 500
response_body = ''
def handle_one_request(self):
# Wrap the rfile in the buffering file object so that the raw header block
# can be written to stdout after it is parsed.
self.rfile = BufferedReadFile(self.rfile)
BaseHTTPServer.BaseHTTPRequestHandler.handle_one_request(self)
def do_POST(self):
RequestHandler.raw_request = self.rfile.buffer
self.rfile.buffer = ''
if self.headers.get('Transfer-Encoding', '').lower() == 'chunked':
if 'Content-Length' in self.headers:
raise AssertionError
body = self.handle_chunked_encoding()
else:
length = int(self.headers.get('Content-Length', -1))
body = self.rfile.read(length)
if self.headers.get('Content-Encoding', '').lower() == 'gzip':
# 15 is the value of |wbits|, which should be at the maximum possible
# value to ensure that any gzip stream can be decoded. The offset of 16
# specifies that the stream to decompress will be formatted with a gzip
# wrapper.
body = zlib.decompress(body, 16 + 15)
RequestHandler.raw_request += body
self.send_response(self.response_code)
self.end_headers()
if self.response_code == 200:
self.wfile.write(self.response_body)
self.wfile.write('\r\n')
def handle_chunked_encoding(self):
"""This parses a "Transfer-Encoding: Chunked" body in accordance with
RFC 7230 §4.1. This returns the result as a string.
"""
body = ''
chunk_size = self.read_chunk_size()
while chunk_size > 0:
# Read the body.
data = self.rfile.read(chunk_size)
chunk_size -= len(data)
body += data
# Finished reading this chunk.
if chunk_size == 0:
# Read through any trailer fields.
trailer_line = self.rfile.readline()
while trailer_line.strip() != '':
trailer_line = self.rfile.readline()
# Read the chunk size.
chunk_size = self.read_chunk_size()
return body
def read_chunk_size(self):
# Read the whole line, including the \r\n.
chunk_size_and_ext_line = self.rfile.readline()
# Look for a chunk extension.
chunk_size_end = chunk_size_and_ext_line.find(';')
if chunk_size_end == -1:
# No chunk extensions; just encounter the end of line.
chunk_size_end = chunk_size_and_ext_line.find('\r')
if chunk_size_end == -1:
self.send_response(400) # Bad request.
return -1
return int(chunk_size_and_ext_line[:chunk_size_end], base=16)
def log_request(self, code='-', size='-'):
# The default implementation logs these to sys.stderr, which is just noise.
pass
def Main():
if sys.platform == 'win32':
import os, msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
# Start the server.
server = BaseHTTPServer.HTTPServer(('127.0.0.1', 0), RequestHandler)
# Write the port as an unsigned short to the parent process.
sys.stdout.write(struct.pack('=H', server.server_address[1]))
sys.stdout.flush()
# Read the desired test response code as an unsigned short and the desired
# response body as a 16-byte string from the parent process.
RequestHandler.response_code, RequestHandler.response_body = \
struct.unpack('=H16s', sys.stdin.read(struct.calcsize('=H16s')))
# Handle the request.
server.handle_request()
# Share the entire request with the test program, which will validate it.
sys.stdout.write(RequestHandler.raw_request)
sys.stdout.flush()
if __name__ == '__main__':
Main()
| atom/crashpad | util/net/http_transport_test_server.py | Python | apache-2.0 | 6,017 |
# Define a custom User class to work with django-social-auth
from django.db import models
from django.contrib.auth.models import User
class Task(models.Model):
name = models.CharField(max_length=200)
owner = models.ForeignKey(User)
finished = models.BooleanField(default=False)
shared = models.BooleanField(default=False)
class Viewer(models.Model):
name = models.ForeignKey(User)
tasks = models.ForeignKey(Task)
class Friends(models.Model):
created = models.DateTimeField(auto_now_add=True, editable=False)
creator = models.ForeignKey(User, related_name="friendship_creator_set")
friend = models.ForeignKey(User, related_name="friend_set")
class CustomUserManager(models.Manager):
def create_user(self, username, email):
return self.model._default_manager.create(username=username)
class CustomUser(models.Model):
username = models.CharField(max_length=128)
last_login = models.DateTimeField(blank=True, null=True)
objects = CustomUserManager()
def is_authenticated(self):
return True
| kurdd/Oauth | app/models.py | Python | apache-2.0 | 1,109 |
#!/usr/bin/env python
#-*- encoding:utf-8 -*-
import json
from datetime import datetime
from bottle import route, mako_template as template, redirect, request, response, get, post
from bottle import static_file, view #为了不经过controller直接返回诸如html,css等静态文件引入
from model.documents import *
from setting import *
DATE_FORMAT = '%Y-%m-%d %H:%M:%S' # 入库格式化时间
@route('/to_add_item')
def to_add_item():
return template('views/system/item/add', site_opt = site_opt)
@route('/add_item', method = 'POST')
def add_item():
DATE_FORMAT = '%Y%m%d%H%M%S'
innerName = 'attr_%s' % datetime.now().strftime(DATE_FORMAT)
#request.params可以同时获取到GET或者POST方法传入的参数
name = request.params.get('name')
address = request.params.get('address')
telno = request.params.get('telno')
lat = request.params.get('lat')
lon = request.params.get('lon')
item = Restaurant(name=unicode(name, 'utf8'), address=unicode(address, 'utf8'), telno=telno, lat = lat, lon = lon)
item.save()
redirect('list_item')
@route('/list_item')
def list_item():
start = request.params.get('start') or '0'
size = request.params.get('size') or '1000'
items = Restaurant.objects[int(start):(int(start) + int(size))]
data = {
'items': items
}
return template('views/system/item/list', data = data, site_opt = site_opt)
@route('/del_item')
def del_item():
id = request.params.get('id')
Restaurant.objects(id=id).delete()
# cascade delete menus of the restaurant
Menu.objects(restaurant=id).delete()
redirect('/list_item')
@route('/modify_item', method = 'POST')
def modify_item():
id = request.params.get('id')
name = request.params.get('name')
address = request.params.get('address')
telno = request.params.get('telno')
lat = request.params.get('lat')
lon = request.params.get('lon')
print 'modify item=====%s, %s, %s, %s' % (id, name, address, telno)
Restaurant.objects(id=id).update(set__name = unicode(name, 'utf8'), set__address = address, set__telno = unicode(telno, 'utf-8'), set__lat = lat, set__lon = lon)
redirect('/list_item')
@route('/to_modify_item')
def to_modify_item():
id = request.params.get('id')
item = Restaurant.objects(id = id)[0]
data = {
'item': item
}
return template('views/system/item/edit', data = data, site_opt = site_opt) | buddyli/android_intership | controller/restaurant_oper.py | Python | apache-2.0 | 2,323 |
# coding=latin-1
from flask import request, g
from flask import abort, flash
from functools import wraps
def checa_permissao(permissao):
def decorator(f):
@wraps(f)
def inner(*args, **kwargs):
if g.user and g.user.checa_permissao(permissao):
return f(*args, **kwargs)
else:
flash(u'Atenção você não possui a permissão: %s. Se isto não estiver correto, entre em contato solicitando esta permissão.' % permissao.upper(),u'notice')
abort(401)
return inner
return decorator
| dedeco/cnddh-denuncias | cnddh/decorators.py | Python | apache-2.0 | 593 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-04-23 19:37
''' Миграция '''
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
''' Добавляем опциональные поля '''
dependencies = [
('lawe', '0006_remove_account_unit'),
]
operations = [
migrations.AlterField(
model_name='account',
name='group',
field=models.CharField(blank=True, max_length=200, verbose_name='Основная группа'),
),
migrations.AlterField(
model_name='account',
name='name',
field=models.CharField(blank=True, max_length=200, verbose_name='Название'),
),
migrations.AlterField(
model_name='account',
name='subgroup',
field=models.CharField(blank=True, max_length=200, verbose_name='Подгруппа'),
),
migrations.AlterField(
model_name='transaction',
name='description',
field=models.CharField(blank=True, max_length=200, verbose_name='Описание'),
),
]
| DronMDF/laweb | lawe/migrations/0007_auto_20170423_1937.py | Python | apache-2.0 | 1,029 |
#-
# Copyright (c) 2014 Michael Roe
# All rights reserved.
#
# This software was developed by SRI International and the University of
# Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
# ("CTSRD"), as part of the DARPA CRASH research programme.
#
# @BERI_LICENSE_HEADER_START@
#
# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. BERI licenses this
# file to you under the BERI Hardware-Software License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.beri-open-systems.org/legal/license-1-0.txt
#
# Unless required by applicable law or agreed to in writing, Work distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# @BERI_LICENSE_HEADER_END@
#
from beritest_tools import BaseBERITestCase
from nose.plugins.attrib import attr
class test_cp2_x_cincoffset_sealed(BaseBERITestCase):
@attr('capabilities')
def test_cp2_x_cincoffset_sealed_1(self):
'''Test that CIncOffset on a sealed capability does not change the offsrt'''
self.assertRegisterEqual(self.MIPS.a0, 0, "CIncOffset changed the offset of a sealed capability")
@attr('capabilities')
def test_cp2_x_cincoffset_sealed_2(self):
'''Test that CIncOffset on a sealed capability raised an exception'''
self.assertRegisterEqual(self.MIPS.a2, 1, "CIncOffset on a sealed capability did not raise an exception")
@attr('capabilities')
def test_cp2_x_cincoffset_sealed_3(self):
'''Test that CIncOffset on a sealed capability sets CapCause'''
self.assertRegisterEqual(self.MIPS.a3, 0x0301, "CIncOffset on a sealed capability did not set CapCause correctly")
| 8l/beri | cheritest/trunk/tests/cp2/test_cp2_x_cincoffset_sealed.py | Python | apache-2.0 | 2,058 |
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.orchestration.experimental.kubernetes.examples.taxi_pipeline_kubernetes."""
import os
import tensorflow as tf
from tfx.orchestration.experimental.kubernetes.examples import taxi_pipeline_kubernetes
class TaxiPipelineKubernetesTest(tf.test.TestCase):
def setUp(self):
super().setUp()
self._test_dir = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self._testMethodName)
def testTaxiPipelineCheckDagConstruction(self):
logical_pipeline = taxi_pipeline_kubernetes.create_pipeline(
pipeline_name='Test',
pipeline_root=self._test_dir,
data_root=self._test_dir,
module_file=self._test_dir,
serving_model_dir=self._test_dir,
beam_pipeline_args=[])
self.assertEqual(9, len(logical_pipeline.components))
if __name__ == '__main__':
tf.test.main()
| tensorflow/tfx | tfx/orchestration/experimental/kubernetes/examples/taxi_pipeline_kubernetes_test.py | Python | apache-2.0 | 1,478 |
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import time
import token
from tokenize import generate_tokens, untokenize
from StringIO import StringIO
from robotide.lib.robot.api import logger
from robotide.lib.robot.errors import (ContinueForLoop, DataError, ExecutionFailed,
ExecutionFailures, ExecutionPassed, ExitForLoop,
PassExecution, ReturnFromKeyword)
from robotide.lib.robot.running import Keyword, RUN_KW_REGISTER
from robotide.lib.robot.running.context import EXECUTION_CONTEXTS
from robotide.lib.robot.running.usererrorhandler import UserErrorHandler
from robotide.lib.robot.utils import (asserts, DotDict, escape, format_assign_message,
get_error_message, get_time, is_falsy, is_integer,
is_string, is_truthy, is_unicode, JYTHON, Matcher,
normalize, NormalizedDict, parse_time, prepr,
RERAISED_EXCEPTIONS, plural_or_not as s,
secs_to_timestr, seq2str, split_from_equals,
timestr_to_secs, type_name, unic)
from robotide.lib.robot.variables import (is_list_var, is_var, DictVariableTableValue,
VariableTableValue, VariableSplitter,
variable_not_found)
from robotide.lib.robot.version import get_version
if JYTHON:
from java.lang import String, Number
# TODO: The name of this decorator should be changed. It is used for avoiding
# arguments to be resolved by many other keywords than run keyword variants.
# Should also consider:
# - Exposing this functionality to external libraries. Would require doc
# enhancements and clean way to expose variables to make resolving them
# based on needs easier.
# - Removing the functionality that run keyword variants can be overridded
# by custom keywords without a warning.
def run_keyword_variant(resolve):
def decorator(method):
RUN_KW_REGISTER.register_run_keyword('BuiltIn', method.__name__, resolve)
return method
return decorator
class _BuiltInBase(object):
@property
def _context(self):
if EXECUTION_CONTEXTS.current is None:
raise RobotNotRunningError('Cannot access execution context')
return EXECUTION_CONTEXTS.current
@property
def _namespace(self):
return self._context.namespace
def _get_namespace(self, top=False):
ctx = EXECUTION_CONTEXTS.top if top else EXECUTION_CONTEXTS.current
return ctx.namespace
@property
def _variables(self):
return self._namespace.variables
def _matches(self, string, pattern):
# Must use this instead of fnmatch when string may contain newlines.
matcher = Matcher(pattern, caseless=False, spaceless=False)
return matcher.match(string)
def _is_true(self, condition):
if is_string(condition):
condition = self.evaluate(condition, modules='os,sys')
return bool(condition)
def _log_types(self, *args):
msg = ["Argument types are:"] + [self._get_type(a) for a in args]
self.log('\n'.join(msg), 'DEBUG')
def _get_type(self, arg):
# In IronPython type(u'x') is str. We want to report unicode anyway.
if is_unicode(arg):
return "<type 'unicode'>"
return str(type(arg))
class _Converter(_BuiltInBase):
def convert_to_integer(self, item, base=None):
"""Converts the given item to an integer number.
If the given item is a string, it is by default expected to be an
integer in base 10. There are two ways to convert from other bases:
- Give base explicitly to the keyword as ``base`` argument.
- Prefix the given string with the base so that ``0b`` means binary
(base 2), ``0o`` means octal (base 8), and ``0x`` means hex (base 16).
The prefix is considered only when ``base`` argument is not given and
may itself be prefixed with a plus or minus sign.
The syntax is case-insensitive and possible spaces are ignored.
Examples:
| ${result} = | Convert To Integer | 100 | | # Result is 100 |
| ${result} = | Convert To Integer | FF AA | 16 | # Result is 65450 |
| ${result} = | Convert To Integer | 100 | 8 | # Result is 64 |
| ${result} = | Convert To Integer | -100 | 2 | # Result is -4 |
| ${result} = | Convert To Integer | 0b100 | | # Result is 4 |
| ${result} = | Convert To Integer | -0x100 | | # Result is -256 |
See also `Convert To Number`, `Convert To Binary`, `Convert To Octal`,
`Convert To Hex`, and `Convert To Bytes`.
"""
self._log_types(item)
return self._convert_to_integer(item, base)
def _convert_to_integer(self, orig, base=None):
try:
item = self._handle_java_numbers(orig)
item, base = self._get_base(item, base)
if base:
return int(item, self._convert_to_integer(base))
return int(item)
except:
raise RuntimeError("'%s' cannot be converted to an integer: %s"
% (orig, get_error_message()))
def _handle_java_numbers(self, item):
if not JYTHON:
return item
if isinstance(item, String):
return unic(item)
if isinstance(item, Number):
return item.doubleValue()
return item
def _get_base(self, item, base):
if not is_string(item):
return item, base
item = normalize(item)
if item.startswith(('-', '+')):
sign = item[0]
item = item[1:]
else:
sign = ''
bases = {'0b': 2, '0o': 8, '0x': 16}
if base or not item.startswith(tuple(bases)):
return sign+item, base
return sign+item[2:], bases[item[:2]]
def convert_to_binary(self, item, base=None, prefix=None, length=None):
"""Converts the given item to a binary string.
The ``item``, with an optional ``base``, is first converted to an
integer using `Convert To Integer` internally. After that it
is converted to a binary number (base 2) represented as a
string such as ``1011``.
The returned value can contain an optional ``prefix`` and can be
required to be of minimum ``length`` (excluding the prefix and a
possible minus sign). If the value is initially shorter than
the required length, it is padded with zeros.
Examples:
| ${result} = | Convert To Binary | 10 | | | # Result is 1010 |
| ${result} = | Convert To Binary | F | base=16 | prefix=0b | # Result is 0b1111 |
| ${result} = | Convert To Binary | -2 | prefix=B | length=4 | # Result is -B0010 |
See also `Convert To Integer`, `Convert To Octal` and `Convert To Hex`.
"""
return self._convert_to_bin_oct_hex(bin, item, base, prefix, length)
def convert_to_octal(self, item, base=None, prefix=None, length=None):
"""Converts the given item to an octal string.
The ``item``, with an optional ``base``, is first converted to an
integer using `Convert To Integer` internally. After that it
is converted to an octal number (base 8) represented as a
string such as ``775``.
The returned value can contain an optional ``prefix`` and can be
required to be of minimum ``length`` (excluding the prefix and a
possible minus sign). If the value is initially shorter than
the required length, it is padded with zeros.
Examples:
| ${result} = | Convert To Octal | 10 | | | # Result is 12 |
| ${result} = | Convert To Octal | -F | base=16 | prefix=0 | # Result is -017 |
| ${result} = | Convert To Octal | 16 | prefix=oct | length=4 | # Result is oct0020 |
See also `Convert To Integer`, `Convert To Binary` and `Convert To Hex`.
"""
return self._convert_to_bin_oct_hex(oct, item, base, prefix, length)
def convert_to_hex(self, item, base=None, prefix=None, length=None,
lowercase=False):
"""Converts the given item to a hexadecimal string.
The ``item``, with an optional ``base``, is first converted to an
integer using `Convert To Integer` internally. After that it
is converted to a hexadecimal number (base 16) represented as
a string such as ``FF0A``.
The returned value can contain an optional ``prefix`` and can be
required to be of minimum ``length`` (excluding the prefix and a
possible minus sign). If the value is initially shorter than
the required length, it is padded with zeros.
By default the value is returned as an upper case string, but the
``lowercase`` argument a true value (see `Boolean arguments`) turns
the value (but not the given prefix) to lower case.
Examples:
| ${result} = | Convert To Hex | 255 | | | # Result is FF |
| ${result} = | Convert To Hex | -10 | prefix=0x | length=2 | # Result is -0x0A |
| ${result} = | Convert To Hex | 255 | prefix=X | lowercase=yes | # Result is Xff |
See also `Convert To Integer`, `Convert To Binary` and `Convert To Octal`.
"""
return self._convert_to_bin_oct_hex(hex, item, base, prefix, length,
lowercase)
def _convert_to_bin_oct_hex(self, method, item, base, prefix, length,
lowercase=False):
self._log_types(item)
ret = method(self._convert_to_integer(item, base)).upper().rstrip('L')
prefix = prefix or ''
if ret[0] == '-':
prefix = '-' + prefix
ret = ret[1:]
if len(ret) > 1: # oct(0) -> '0' (i.e. has no prefix)
prefix_length = {bin: 2, oct: 1, hex: 2}[method]
ret = ret[prefix_length:]
if length:
ret = ret.rjust(self._convert_to_integer(length), '0')
if is_truthy(lowercase):
ret = ret.lower()
return prefix + ret
def convert_to_number(self, item, precision=None):
"""Converts the given item to a floating point number.
If the optional ``precision`` is positive or zero, the returned number
is rounded to that number of decimal digits. Negative precision means
that the number is rounded to the closest multiple of 10 to the power
of the absolute precision.
Examples:
| ${result} = | Convert To Number | 42.512 | | # Result is 42.512 |
| ${result} = | Convert To Number | 42.512 | 1 | # Result is 42.5 |
| ${result} = | Convert To Number | 42.512 | 0 | # Result is 43.0 |
| ${result} = | Convert To Number | 42.512 | -1 | # Result is 40.0 |
Notice that machines generally cannot store floating point numbers
accurately. This may cause surprises with these numbers in general
and also when they are rounded. For more information see, for example,
these resources:
- http://docs.python.org/2/tutorial/floatingpoint.html
- http://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition
If you need an integer number, use `Convert To Integer` instead.
"""
self._log_types(item)
return self._convert_to_number(item, precision)
def _convert_to_number(self, item, precision=None):
number = self._convert_to_number_without_precision(item)
if precision:
number = round(number, self._convert_to_integer(precision))
return number
def _convert_to_number_without_precision(self, item):
try:
if JYTHON:
item = self._handle_java_numbers(item)
return float(item)
except:
error = get_error_message()
try:
return float(self._convert_to_integer(item))
except RuntimeError:
raise RuntimeError("'%s' cannot be converted to a floating "
"point number: %s" % (item, error))
def convert_to_string(self, item):
"""Converts the given item to a Unicode string.
Uses ``__unicode__`` or ``__str__`` method with Python objects and
``toString`` with Java objects.
Use `Encode String To Bytes` and `Decode Bytes To String` keywords
in ``String`` library if you need to convert between Unicode and byte
strings using different encodings. Use `Convert To Bytes` if you just
want to create byte strings.
"""
self._log_types(item)
return self._convert_to_string(item)
def _convert_to_string(self, item):
return unic(item)
def convert_to_boolean(self, item):
"""Converts the given item to Boolean true or false.
Handles strings ``True`` and ``False`` (case-insensitive) as expected,
otherwise returns item's
[http://docs.python.org/2/library/stdtypes.html#truth|truth value]
using Python's ``bool()`` method.
"""
self._log_types(item)
if is_string(item):
if item.upper() == 'TRUE':
return True
if item.upper() == 'FALSE':
return False
return bool(item)
def convert_to_bytes(self, input, input_type='text'):
u"""Converts the given ``input`` to bytes according to the ``input_type``.
Valid input types are listed below:
- ``text:`` Converts text to bytes character by character. All
characters with ordinal below 256 can be used and are converted to
bytes with same values. Many characters are easiest to represent
using escapes like ``\\x00`` or ``\\xff``.
- ``int:`` Converts integers separated by spaces to bytes. Similarly as
with `Convert To Integer`, it is possible to use binary, octal, or
hex values by prefixing the values with ``0b``, ``0o``, or ``0x``,
respectively.
- ``hex:`` Converts hexadecimal values to bytes. Single byte is always
two characters long (e.g. ``01`` or ``FF``). Spaces are ignored and
can be used freely as a visual separator.
- ``bin:`` Converts binary values to bytes. Single byte is always eight
characters long (e.g. ``00001010``). Spaces are ignored and can be
used freely as a visual separator.
In addition to giving the input as a string, it is possible to use
lists or other iterables containing individual characters or numbers.
In that case numbers do not need to be padded to certain length and
they cannot contain extra spaces.
Examples (last column shows returned bytes):
| ${bytes} = | Convert To Bytes | hyv\xe4 | | # hyv\\xe4 |
| ${bytes} = | Convert To Bytes | \\xff\\x07 | | # \\xff\\x07 |
| ${bytes} = | Convert To Bytes | 82 70 | int | # RF |
| ${bytes} = | Convert To Bytes | 0b10 0x10 | int | # \\x02\\x10 |
| ${bytes} = | Convert To Bytes | ff 00 07 | hex | # \\xff\\x00\\x07 |
| ${bytes} = | Convert To Bytes | 5246212121 | hex | # RF!!! |
| ${bytes} = | Convert To Bytes | 0000 1000 | bin | # \\x08 |
| ${input} = | Create List | 1 | 2 | 12 |
| ${bytes} = | Convert To Bytes | ${input} | int | # \\x01\\x02\\x0c |
| ${bytes} = | Convert To Bytes | ${input} | hex | # \\x01\\x02\\x12 |
Use `Encode String To Bytes` in ``String`` library if you need to
convert text to bytes using a certain encoding.
New in Robot Framework 2.8.2.
"""
try:
try:
ordinals = getattr(self, '_get_ordinals_from_%s' % input_type)
except AttributeError:
raise RuntimeError("Invalid input type '%s'." % input_type)
return ''.join(chr(o) for o in ordinals(input))
except:
raise RuntimeError("Creating bytes failed: %s" % get_error_message())
def _get_ordinals_from_text(self, input):
for char in input:
yield self._test_ordinal(ord(char), char, 'Character')
def _test_ordinal(self, ordinal, original, type):
if 0 <= ordinal <= 255:
return ordinal
raise RuntimeError("%s '%s' cannot be represented as a byte."
% (type, original))
def _get_ordinals_from_int(self, input):
if is_string(input):
input = input.split()
elif is_integer(input):
input = [input]
for integer in input:
ordinal = self._convert_to_integer(integer)
yield self._test_ordinal(ordinal, integer, 'Integer')
def _get_ordinals_from_hex(self, input):
for token in self._input_to_tokens(input, length=2):
ordinal = self._convert_to_integer(token, base=16)
yield self._test_ordinal(ordinal, token, 'Hex value')
def _get_ordinals_from_bin(self, input):
for token in self._input_to_tokens(input, length=8):
ordinal = self._convert_to_integer(token, base=2)
yield self._test_ordinal(ordinal, token, 'Binary value')
def _input_to_tokens(self, input, length):
if not is_string(input):
return input
input = ''.join(input.split())
if len(input) % length != 0:
raise RuntimeError('Expected input to be multiple of %d.' % length)
return (input[i:i+length] for i in xrange(0, len(input), length))
def create_list(self, *items):
"""Returns a list containing given items.
The returned list can be assigned both to ``${scalar}`` and ``@{list}``
variables.
Examples:
| @{list} = | Create List | a | b | c |
| ${scalar} = | Create List | a | b | c |
| ${ints} = | Create List | ${1} | ${2} | ${3} |
"""
return list(items)
@run_keyword_variant(resolve=0)
def create_dictionary(self, *items):
"""Creates and returns a dictionary based on given items.
Items are given using ``key=value`` syntax same way as ``&{dictionary}``
variables are created in the Variable table. Both keys and values
can contain variables, and possible equal sign in key can be escaped
with a backslash like ``escaped\\=key=value``. It is also possible to
get items from existing dictionaries by simply using them like
``&{dict}``.
If same key is used multiple times, the last value has precedence.
The returned dictionary is ordered, and values with strings as keys
can also be accessed using convenient dot-access syntax like
``${dict.key}``.
Examples:
| &{dict} = | Create Dictionary | key=value | foo=bar |
| Should Be True | ${dict} == {'key': 'value', 'foo': 'bar'} |
| &{dict} = | Create Dictionary | ${1}=${2} | &{dict} | foo=new |
| Should Be True | ${dict} == {1: 2, 'key': 'value', 'foo': 'new'} |
| Should Be Equal | ${dict.key} | value |
This keyword was changed in Robot Framework 2.9 in many ways:
- Moved from ``Collections`` library to ``BuiltIn``.
- Support also non-string keys in ``key=value`` syntax.
- Deprecated old syntax to give keys and values separately.
- Returned dictionary is ordered and dot-accessible.
"""
separate, combined = self._split_dict_items(items)
if separate:
self.log("Giving keys and values separately to 'Create Dictionary' "
"keyword is deprecated. Use 'key=value' syntax instead.",
level='WARN')
separate = self._format_separate_dict_items(separate)
combined = DictVariableTableValue(combined).resolve(self._variables)
result = DotDict(separate)
result.update(combined)
return result
def _split_dict_items(self, items):
separate = []
for item in items:
name, value = split_from_equals(item)
if value is not None or VariableSplitter(item).is_dict_variable():
break
separate.append(item)
return separate, items[len(separate):]
def _format_separate_dict_items(self, separate):
separate = self._variables.replace_list(separate)
if len(separate) % 2 != 0:
raise DataError('Expected even number of keys and values, got %d.'
% len(separate))
return [separate[i:i+2] for i in range(0, len(separate), 2)]
class _Verify(_BuiltInBase):
def _set_and_remove_tags(self, tags):
set_tags = [tag for tag in tags if not tag.startswith('-')]
remove_tags = [tag[1:] for tag in tags if tag.startswith('-')]
if remove_tags:
self.remove_tags(*remove_tags)
if set_tags:
self.set_tags(*set_tags)
def fail(self, msg=None, *tags):
"""Fails the test with the given message and optionally alters its tags.
The error message is specified using the ``msg`` argument.
It is possible to use HTML in the given error message, similarly
as with any other keyword accepting an error message, by prefixing
the error with ``*HTML*``.
It is possible to modify tags of the current test case by passing tags
after the message. Tags starting with a hyphen (e.g. ``-regression``)
are removed and others added. Tags are modified using `Set Tags` and
`Remove Tags` internally, and the semantics setting and removing them
are the same as with these keywords.
Examples:
| Fail | Test not ready | | | # Fails with the given message. |
| Fail | *HTML*<b>Test not ready</b> | | | # Fails using HTML in the message. |
| Fail | Test not ready | not-ready | | # Fails and adds 'not-ready' tag. |
| Fail | OS not supported | -regression | | # Removes tag 'regression'. |
| Fail | My message | tag | -t* | # Removes all tags starting with 't' except the newly added 'tag'. |
See `Fatal Error` if you need to stop the whole test execution.
Support for modifying tags was added in Robot Framework 2.7.4 and
HTML message support in 2.8.
"""
self._set_and_remove_tags(tags)
raise AssertionError(msg) if msg else AssertionError()
def fatal_error(self, msg=None):
"""Stops the whole test execution.
The test or suite where this keyword is used fails with the provided
message, and subsequent tests fail with a canned message.
Possible teardowns will nevertheless be executed.
See `Fail` if you only want to stop one test case unconditionally.
"""
error = AssertionError(msg) if msg else AssertionError()
error.ROBOT_EXIT_ON_FAILURE = True
raise error
def should_not_be_true(self, condition, msg=None):
"""Fails if the given condition is true.
See `Should Be True` for details about how ``condition`` is evaluated
and how ``msg`` can be used to override the default error message.
"""
if not msg:
msg = "'%s' should not be true." % condition
asserts.fail_if(self._is_true(condition), msg)
def should_be_true(self, condition, msg=None):
"""Fails if the given condition is not true.
If ``condition`` is a string (e.g. ``${rc} < 10``), it is evaluated as
a Python expression as explained in `Evaluating expressions` and the
keyword status is decided based on the result. If a non-string item is
given, the status is got directly from its
[http://docs.python.org/2/library/stdtypes.html#truth|truth value].
The default error message (``<condition> should be true``) is not very
informative, but it can be overridden with the ``msg`` argument.
Examples:
| Should Be True | ${rc} < 10 |
| Should Be True | '${status}' == 'PASS' | # Strings must be quoted |
| Should Be True | ${number} | # Passes if ${number} is not zero |
| Should Be True | ${list} | # Passes if ${list} is not empty |
Variables used like ``${variable}``, as in the examples above, are
replaced in the expression before evaluation. Variables are also
available in the evaluation namespace and can be accessed using special
syntax ``$variable``. This is a new feature in Robot Framework 2.9
and it is explained more thoroughly in `Evaluating expressions`.
Examples:
| Should Be True | $rc < 10 |
| Should Be True | $status == 'PASS' | # Expected string must be quoted |
Starting from Robot Framework 2.8, `Should Be True` automatically
imports Python's [http://docs.python.org/2/library/os.html|os] and
[http://docs.python.org/2/library/sys.html|sys] modules that contain
several useful attributes:
| Should Be True | os.linesep == '\\n' | # Unixy |
| Should Be True | os.linesep == '\\r\\n' | # Windows |
| Should Be True | sys.platform == 'darwin' | # OS X |
| Should Be True | sys.platform.startswith('java') | # Jython |
"""
if not msg:
msg = "'%s' should be true." % condition
asserts.fail_unless(self._is_true(condition), msg)
def should_be_equal(self, first, second, msg=None, values=True):
"""Fails if the given objects are unequal.
Optional ``msg`` and ``values`` arguments specify how to construct
the error message if this keyword fails:
- If ``msg`` is not given, the error message is ``<first> != <second>``.
- If ``msg`` is given and ``values`` gets a true value, the error
message is ``<msg>: <first> != <second>``.
- If ``msg`` is given and ``values`` gets a false value, the error
message is simply ``<msg>``.
``values`` is true by default, but can be turned to false by using,
for example, string ``false`` or ``no values``. See `Boolean arguments`
section for more details.
"""
self._log_types(first, second)
self._should_be_equal(first, second, msg, values)
def _should_be_equal(self, first, second, msg, values):
asserts.fail_unless_equal(first, second, msg,
self._include_values(values))
def _include_values(self, values):
return is_truthy(values) and str(values).upper() != 'NO VALUES'
def should_not_be_equal(self, first, second, msg=None, values=True):
"""Fails if the given objects are equal.
See `Should Be Equal` for an explanation on how to override the default
error message with ``msg`` and ``values``.
"""
self._log_types(first, second)
self._should_not_be_equal(first, second, msg, values)
def _should_not_be_equal(self, first, second, msg, values):
asserts.fail_if_equal(first, second, msg, self._include_values(values))
def should_not_be_equal_as_integers(self, first, second, msg=None,
values=True, base=None):
"""Fails if objects are equal after converting them to integers.
See `Convert To Integer` for information how to convert integers from
other bases than 10 using ``base`` argument or ``0b/0o/0x`` prefixes.
See `Should Be Equal` for an explanation on how to override the default
error message with ``msg`` and ``values``.
See `Should Be Equal As Integers` for some usage examples.
"""
self._log_types(first, second)
self._should_not_be_equal(self._convert_to_integer(first, base),
self._convert_to_integer(second, base),
msg, values)
def should_be_equal_as_integers(self, first, second, msg=None, values=True,
base=None):
"""Fails if objects are unequal after converting them to integers.
See `Convert To Integer` for information how to convert integers from
other bases than 10 using ``base`` argument or ``0b/0o/0x`` prefixes.
See `Should Be Equal` for an explanation on how to override the default
error message with ``msg`` and ``values``.
Examples:
| Should Be Equal As Integers | 42 | ${42} | Error message |
| Should Be Equal As Integers | ABCD | abcd | base=16 |
| Should Be Equal As Integers | 0b1011 | 11 |
"""
self._log_types(first, second)
self._should_be_equal(self._convert_to_integer(first, base),
self._convert_to_integer(second, base),
msg, values)
def should_not_be_equal_as_numbers(self, first, second, msg=None,
values=True, precision=6):
"""Fails if objects are equal after converting them to real numbers.
The conversion is done with `Convert To Number` keyword using the
given ``precision``.
See `Should Be Equal As Numbers` for examples on how to use
``precision`` and why it does not always work as expected. See also
`Should Be Equal` for an explanation on how to override the default
error message with ``msg`` and ``values``.
"""
self._log_types(first, second)
first = self._convert_to_number(first, precision)
second = self._convert_to_number(second, precision)
self._should_not_be_equal(first, second, msg, values)
def should_be_equal_as_numbers(self, first, second, msg=None, values=True,
precision=6):
"""Fails if objects are unequal after converting them to real numbers.
The conversion is done with `Convert To Number` keyword using the
given ``precision``.
Examples:
| Should Be Equal As Numbers | ${x} | 1.1 | | # Passes if ${x} is 1.1 |
| Should Be Equal As Numbers | 1.123 | 1.1 | precision=1 | # Passes |
| Should Be Equal As Numbers | 1.123 | 1.4 | precision=0 | # Passes |
| Should Be Equal As Numbers | 112.3 | 75 | precision=-2 | # Passes |
As discussed in the documentation of `Convert To Number`, machines
generally cannot store floating point numbers accurately. Because of
this limitation, comparing floats for equality is problematic and
a correct approach to use depends on the context. This keyword uses
a very naive approach of rounding the numbers before comparing them,
which is both prone to rounding errors and does not work very well if
numbers are really big or small. For more information about comparing
floats, and ideas on how to implement your own context specific
comparison algorithm, see
http://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/.
See `Should Not Be Equal As Numbers` for a negative version of this
keyword and `Should Be Equal` for an explanation on how to override
the default error message with ``msg`` and ``values``.
"""
self._log_types(first, second)
first = self._convert_to_number(first, precision)
second = self._convert_to_number(second, precision)
self._should_be_equal(first, second, msg, values)
def should_not_be_equal_as_strings(self, first, second, msg=None, values=True):
"""Fails if objects are equal after converting them to strings.
See `Should Be Equal` for an explanation on how to override the default
error message with ``msg`` and ``values``.
"""
self._log_types(first, second)
first, second = [self._convert_to_string(i) for i in first, second]
self._should_not_be_equal(first, second, msg, values)
def should_be_equal_as_strings(self, first, second, msg=None, values=True):
"""Fails if objects are unequal after converting them to strings.
See `Should Be Equal` for an explanation on how to override the default
error message with ``msg`` and ``values``.
"""
self._log_types(first, second)
first, second = [self._convert_to_string(i) for i in first, second]
self._should_be_equal(first, second, msg, values)
def should_not_start_with(self, str1, str2, msg=None, values=True):
"""Fails if the string ``str1`` starts with the string ``str2``.
See `Should Be Equal` for an explanation on how to override the default
error message with ``msg`` and ``values``.
"""
msg = self._get_string_msg(str1, str2, msg, values, 'starts with')
asserts.fail_if(str1.startswith(str2), msg)
def should_start_with(self, str1, str2, msg=None, values=True):
"""Fails if the string ``str1`` does not start with the string ``str2``.
See `Should Be Equal` for an explanation on how to override the default
error message with ``msg`` and ``values``.
"""
msg = self._get_string_msg(str1, str2, msg, values, 'does not start with')
asserts.fail_unless(str1.startswith(str2), msg)
def should_not_end_with(self, str1, str2, msg=None, values=True):
"""Fails if the string ``str1`` ends with the string ``str2``.
See `Should Be Equal` for an explanation on how to override the default
error message with ``msg`` and ``values``.
"""
msg = self._get_string_msg(str1, str2, msg, values, 'ends with')
asserts.fail_if(str1.endswith(str2), msg)
def should_end_with(self, str1, str2, msg=None, values=True):
"""Fails if the string ``str1`` does not end with the string ``str2``.
See `Should Be Equal` for an explanation on how to override the default
error message with ``msg`` and ``values``.
"""
msg = self._get_string_msg(str1, str2, msg, values, 'does not end with')
asserts.fail_unless(str1.endswith(str2), msg)
def should_not_contain(self, item1, item2, msg=None, values=True):
"""Fails if ``item1`` contains ``item2`` one or more times.
Works with strings, lists, and anything that supports Python's ``in``
operator. See `Should Be Equal` for an explanation on how to override
the default error message with ``msg`` and ``values``.
Examples:
| Should Not Contain | ${output} | FAILED |
| Should Not Contain | ${some_list} | value |
"""
msg = self._get_string_msg(item1, item2, msg, values, 'contains')
asserts.fail_if(item2 in item1, msg)
def should_contain(self, item1, item2, msg=None, values=True):
"""Fails if ``item1`` does not contain ``item2`` one or more times.
Works with strings, lists, and anything that supports Python's ``in``
operator. See `Should Be Equal` for an explanation on how to override
the default error message with ``msg`` and ``values``.
Examples:
| Should Contain | ${output} | PASS |
| Should Contain | ${some_list} | value |
"""
msg = self._get_string_msg(item1, item2, msg, values, 'does not contain')
asserts.fail_unless(item2 in item1, msg)
def should_contain_x_times(self, item1, item2, count, msg=None):
"""Fails if ``item1`` does not contain ``item2`` ``count`` times.
Works with strings, lists and all objects that `Get Count` works
with. The default error message can be overridden with ``msg`` and
the actual count is always logged.
Examples:
| Should Contain X Times | ${output} | hello | 2 |
| Should Contain X Times | ${some list} | value | 3 |
"""
count = self._convert_to_integer(count)
x = self.get_count(item1, item2)
if not msg:
msg = "'%s' contains '%s' %d time%s, not %d time%s." \
% (unic(item1), unic(item2), x, s(x), count, s(count))
self.should_be_equal_as_integers(x, count, msg, values=False)
def get_count(self, item1, item2):
"""Returns and logs how many times ``item2`` is found from ``item1``.
This keyword works with Python strings and lists and all objects
that either have ``count`` method or can be converted to Python lists.
Example:
| ${count} = | Get Count | ${some item} | interesting value |
| Should Be True | 5 < ${count} < 10 |
"""
if not hasattr(item1, 'count'):
try:
item1 = list(item1)
except:
raise RuntimeError("Converting '%s' to list failed: %s"
% (item1, get_error_message()))
count = item1.count(item2)
self.log('Item found from the first item %d time%s' % (count, s(count)))
return count
def should_not_match(self, string, pattern, msg=None, values=True):
"""Fails if the given ``string`` matches the given ``pattern``.
Pattern matching is similar as matching files in a shell, and it is
always case-sensitive. In the pattern ``*`` matches to anything and
``?`` matches to any single character.
See `Should Be Equal` for an explanation on how to override the default
error message with ``msg`` and ``values``.
"""
msg = self._get_string_msg(string, pattern, msg, values, 'matches')
asserts.fail_if(self._matches(string, pattern), msg)
def should_match(self, string, pattern, msg=None, values=True):
"""Fails unless the given ``string`` matches the given ``pattern``.
Pattern matching is similar as matching files in a shell, and it is
always case-sensitive. In the pattern, ``*`` matches to anything and
``?`` matches to any single character.
See `Should Be Equal` for an explanation on how to override the default
error message with ``msg`` and ``values``.
"""
msg = self._get_string_msg(string, pattern, msg, values,
'does not match')
asserts.fail_unless(self._matches(string, pattern), msg)
def should_match_regexp(self, string, pattern, msg=None, values=True):
"""Fails if ``string`` does not match ``pattern`` as a regular expression.
Regular expression check is implemented using the Python
[http://docs.python.org/2/library/re.html|re module]. Python's regular
expression syntax is derived from Perl, and it is thus also very
similar to the syntax used, for example, in Java, Ruby and .NET.
Things to note about the regexp syntax in Robot Framework test data:
1) Backslash is an escape character in the test data, and possible
backslashes in the pattern must thus be escaped with another backslash
(e.g. ``\\\\d\\\\w+``).
2) Strings that may contain special characters, but should be handled
as literal strings, can be escaped with the `Regexp Escape` keyword.
3) The given pattern does not need to match the whole string. For
example, the pattern ``ello`` matches the string ``Hello world!``. If
a full match is needed, the ``^`` and ``$`` characters can be used to
denote the beginning and end of the string, respectively. For example,
``^ello$`` only matches the exact string ``ello``.
4) Possible flags altering how the expression is parsed (e.g.
``re.IGNORECASE``, ``re.MULTILINE``) can be set by prefixing the
pattern with the ``(?iLmsux)`` group like ``(?im)pattern``. The
available flags are ``i`` (case-insensitive), ``m`` (multiline mode),
``s`` (dotall mode), ``x`` (verbose), ``u`` (Unicode dependent) and
``L`` (locale dependent).
If this keyword passes, it returns the portion of the string that
matched the pattern. Additionally, the possible captured groups are
returned.
See the `Should Be Equal` keyword for an explanation on how to override
the default error message with the ``msg`` and ``values`` arguments.
Examples:
| Should Match Regexp | ${output} | \\\\d{6} | # Output contains six numbers |
| Should Match Regexp | ${output} | ^\\\\d{6}$ | # Six numbers and nothing more |
| ${ret} = | Should Match Regexp | Foo: 42 | (?i)foo: \\\\d+ |
| ${match} | ${group1} | ${group2} = |
| ... | Should Match Regexp | Bar: 43 | (Foo|Bar): (\\\\d+) |
=>
| ${ret} = 'Foo: 42'
| ${match} = 'Bar: 43'
| ${group1} = 'Bar'
| ${group2} = '43'
"""
msg = self._get_string_msg(string, pattern, msg, values, 'does not match')
res = re.search(pattern, string)
asserts.fail_if_none(res, msg, values=False)
match = res.group(0)
groups = res.groups()
if groups:
return [match] + list(groups)
return match
def should_not_match_regexp(self, string, pattern, msg=None, values=True):
"""Fails if ``string`` matches ``pattern`` as a regular expression.
See `Should Match Regexp` for more information about arguments.
"""
msg = self._get_string_msg(string, pattern, msg, values, 'matches')
asserts.fail_unless_none(re.search(pattern, string), msg, values=False)
def get_length(self, item):
"""Returns and logs the length of the given item as an integer.
The item can be anything that has a length, for example, a string,
a list, or a mapping. The keyword first tries to get the length with
the Python function ``len``, which calls the item's ``__len__`` method
internally. If that fails, the keyword tries to call the item's
possible ``length`` and ``size`` methods directly. The final attempt is
trying to get the value of the item's ``length`` attribute. If all
these attempts are unsuccessful, the keyword fails.
Examples:
| ${length} = | Get Length | Hello, world! | |
| Should Be Equal As Integers | ${length} | 13 |
| @{list} = | Create List | Hello, | world! |
| ${length} = | Get Length | ${list} | |
| Should Be Equal As Integers | ${length} | 2 |
See also `Length Should Be`, `Should Be Empty` and `Should Not Be
Empty`.
"""
length = self._get_length(item)
self.log('Length is %d' % length)
return length
def _get_length(self, item):
try:
return len(item)
except RERAISED_EXCEPTIONS:
raise
except:
try:
return item.length()
except RERAISED_EXCEPTIONS:
raise
except:
try:
return item.size()
except RERAISED_EXCEPTIONS:
raise
except:
try:
return item.length
except RERAISED_EXCEPTIONS:
raise
except:
raise RuntimeError("Could not get length of '%s'." % item)
def length_should_be(self, item, length, msg=None):
"""Verifies that the length of the given item is correct.
The length of the item is got using the `Get Length` keyword. The
default error message can be overridden with the ``msg`` argument.
"""
length = self._convert_to_integer(length)
actual = self.get_length(item)
if actual != length:
raise AssertionError(msg or "Length of '%s' should be %d but is %d."
% (item, length, actual))
def should_be_empty(self, item, msg=None):
"""Verifies that the given item is empty.
The length of the item is got using the `Get Length` keyword. The
default error message can be overridden with the ``msg`` argument.
"""
if self.get_length(item) > 0:
raise AssertionError(msg or "'%s' should be empty." % item)
def should_not_be_empty(self, item, msg=None):
"""Verifies that the given item is not empty.
The length of the item is got using the `Get Length` keyword. The
default error message can be overridden with the ``msg`` argument.
"""
if self.get_length(item) == 0:
raise AssertionError(msg or "'%s' should not be empty." % item)
def _get_string_msg(self, str1, str2, msg, values, delim):
default = "'%s' %s '%s'" % (unic(str1), delim, unic(str2))
if not msg:
msg = default
elif self._include_values(values):
msg = '%s: %s' % (msg, default)
return msg
class _Variables(_BuiltInBase):
def get_variables(self, no_decoration=False):
"""Returns a dictionary containing all variables in the current scope.
Variables are returned as a special dictionary that allows accessing
variables in space, case, and underscore insensitive manner similarly
as accessing variables in the test data. This dictionary supports all
same operations as normal Python dictionaries and, for example,
Collections library can be used to access or modify it. Modifying the
returned dictionary has no effect on the variables available in the
current scope.
By default variables are returned with ``${}``, ``@{}`` or ``&{}``
decoration based on variable types. Giving a true value (see `Boolean
arguments`) to the optional argument ``no_decoration`` will return
the variables without the decoration. This option is new in Robot
Framework 2.9.
Example:
| ${example_variable} = | Set Variable | example value |
| ${variables} = | Get Variables | |
| Dictionary Should Contain Key | ${variables} | \\${example_variable} |
| Dictionary Should Contain Key | ${variables} | \\${ExampleVariable} |
| Set To Dictionary | ${variables} | \\${name} | value |
| Variable Should Not Exist | \\${name} | | |
| ${no decoration} = | Get Variables | no_decoration=Yes |
| Dictionary Should Contain Key | ${no decoration} | example_variable |
Note: Prior to Robot Framework 2.7.4 variables were returned as
a custom object that did not support all dictionary methods.
"""
return self._variables.as_dict(decoration=is_falsy(no_decoration))
@run_keyword_variant(resolve=0)
def get_variable_value(self, name, default=None):
"""Returns variable value or ``default`` if the variable does not exist.
The name of the variable can be given either as a normal variable name
(e.g. ``${NAME}``) or in escaped format (e.g. ``\\${NAME}``). Notice
that the former has some limitations explained in `Set Suite Variable`.
Examples:
| ${x} = | Get Variable Value | ${a} | default |
| ${y} = | Get Variable Value | ${a} | ${b} |
| ${z} = | Get Variable Value | ${z} | |
=>
| ${x} gets value of ${a} if ${a} exists and string 'default' otherwise
| ${y} gets value of ${a} if ${a} exists and value of ${b} otherwise
| ${z} is set to Python None if it does not exist previously
See `Set Variable If` for another keyword to set variables dynamically.
"""
try:
return self._variables[self._get_var_name(name)]
except DataError:
return self._variables.replace_scalar(default)
def log_variables(self, level='INFO'):
"""Logs all variables in the current scope with given log level."""
variables = self.get_variables()
for name in sorted(variables, key=lambda s: s[2:-1].lower()):
msg = format_assign_message(name, variables[name], cut_long=False)
self.log(msg, level)
@run_keyword_variant(resolve=0)
def variable_should_exist(self, name, msg=None):
"""Fails unless the given variable exists within the current scope.
The name of the variable can be given either as a normal variable name
(e.g. ``${NAME}``) or in escaped format (e.g. ``\\${NAME}``). Notice
that the former has some limitations explained in `Set Suite Variable`.
The default error message can be overridden with the ``msg`` argument.
See also `Variable Should Not Exist` and `Keyword Should Exist`.
"""
name = self._get_var_name(name)
msg = self._variables.replace_string(msg) if msg \
else "Variable %s does not exist." % name
try:
self._variables[name]
except DataError:
raise AssertionError(msg)
@run_keyword_variant(resolve=0)
def variable_should_not_exist(self, name, msg=None):
"""Fails if the given variable exists within the current scope.
The name of the variable can be given either as a normal variable name
(e.g. ``${NAME}``) or in escaped format (e.g. ``\\${NAME}``). Notice
that the former has some limitations explained in `Set Suite Variable`.
The default error message can be overridden with the ``msg`` argument.
See also `Variable Should Exist` and `Keyword Should Exist`.
"""
name = self._get_var_name(name)
msg = self._variables.replace_string(msg) if msg \
else "Variable %s exists." % name
try:
self._variables[name]
except DataError:
pass
else:
raise AssertionError(msg)
def replace_variables(self, text):
"""Replaces variables in the given text with their current values.
If the text contains undefined variables, this keyword fails.
If the given ``text`` contains only a single variable, its value is
returned as-is and it can be any object. Otherwise this keyword
always returns a string.
Example:
The file ``template.txt`` contains ``Hello ${NAME}!`` and variable
``${NAME}`` has the value ``Robot``.
| ${template} = | Get File | ${CURDIR}/template.txt |
| ${message} = | Replace Variables | ${template} |
| Should Be Equal | ${message} | Hello Robot! |
"""
return self._variables.replace_scalar(text)
def set_variable(self, *values):
"""Returns the given values which can then be assigned to a variables.
This keyword is mainly used for setting scalar variables.
Additionally it can be used for converting a scalar variable
containing a list to a list variable or to multiple scalar variables.
It is recommended to use `Create List` when creating new lists.
Examples:
| ${hi} = | Set Variable | Hello, world! |
| ${hi2} = | Set Variable | I said: ${hi} |
| ${var1} | ${var2} = | Set Variable | Hello | world |
| @{list} = | Set Variable | ${list with some items} |
| ${item1} | ${item2} = | Set Variable | ${list with 2 items} |
Variables created with this keyword are available only in the
scope where they are created. See `Set Global Variable`,
`Set Test Variable` and `Set Suite Variable` for information on how to
set variables so that they are available also in a larger scope.
"""
if len(values) == 0:
return ''
elif len(values) == 1:
return values[0]
else:
return list(values)
@run_keyword_variant(resolve=0)
def set_test_variable(self, name, *values):
"""Makes a variable available everywhere within the scope of the current test.
Variables set with this keyword are available everywhere within the
scope of the currently executed test case. For example, if you set a
variable in a user keyword, it is available both in the test case level
and also in all other user keywords used in the current test. Other
test cases will not see variables set with this keyword.
See `Set Suite Variable` for more information and examples.
"""
name = self._get_var_name(name)
value = self._get_var_value(name, values)
self._variables.set_test(name, value)
self._log_set_variable(name, value)
@run_keyword_variant(resolve=0)
def set_suite_variable(self, name, *values):
"""Makes a variable available everywhere within the scope of the current suite.
Variables set with this keyword are available everywhere within the
scope of the currently executed test suite. Setting variables with this
keyword thus has the same effect as creating them using the Variable
table in the test data file or importing them from variable files.
Possible child test suites do not see variables set with this keyword
by default. Starting from Robot Framework 2.9, that can be controlled
by using ``children=<option>`` as the last argument. If the specified
``<option>`` is a non-empty string or any other value considered true
in Python, the variable is set also to the child suites. Parent and
sibling suites will never see variables set with this keyword.
The name of the variable can be given either as a normal variable name
(e.g. ``${NAME}``) or in escaped format as ``\\${NAME}`` or ``$NAME``.
Variable value can be given using the same syntax as when variables
are created in the Variable table.
If a variable already exists within the new scope, its value will be
overwritten. Otherwise a new variable is created. If a variable already
exists within the current scope, the value can be left empty and the
variable within the new scope gets the value within the current scope.
Examples:
| Set Suite Variable | ${SCALAR} | Hello, world! |
| Set Suite Variable | ${SCALAR} | Hello, world! | children=true |
| Set Suite Variable | @{LIST} | First item | Second item |
| Set Suite Variable | &{DICT} | key=value | foo=bar |
| ${ID} = | Get ID |
| Set Suite Variable | ${ID} |
To override an existing value with an empty value, use built-in
variables ``${EMPTY}``, ``@{EMPTY}`` or ``&{EMPTY}``:
| Set Suite Variable | ${SCALAR} | ${EMPTY} |
| Set Suite Variable | @{LIST} | @{EMPTY} | # New in RF 2.7.4 |
| Set Suite Variable | &{DICT} | &{EMPTY} | # New in RF 2.9 |
*NOTE:* If the variable has value which itself is a variable (escaped
or not), you must always use the escaped format to set the variable:
Example:
| ${NAME} = | Set Variable | \\${var} |
| Set Suite Variable | ${NAME} | value | # Sets variable ${var} |
| Set Suite Variable | \\${NAME} | value | # Sets variable ${NAME} |
This limitation applies also to `Set Test Variable`, `Set Global
Variable`, `Variable Should Exist`, `Variable Should Not Exist` and
`Get Variable Value` keywords.
"""
name = self._get_var_name(name)
if (values and is_string(values[-1]) and
values[-1].startswith('children=')):
children = self._variables.replace_scalar(values[-1][9:])
children = is_truthy(children)
values = values[:-1]
else:
children = False
value = self._get_var_value(name, values)
self._variables.set_suite(name, value, children=children)
self._log_set_variable(name, value)
@run_keyword_variant(resolve=0)
def set_global_variable(self, name, *values):
"""Makes a variable available globally in all tests and suites.
Variables set with this keyword are globally available in all test
cases and suites executed after setting them. Setting variables with
this keyword thus has the same effect as creating from the command line
using the options ``--variable`` or ``--variablefile``. Because this
keyword can change variables everywhere, it should be used with care.
See `Set Suite Variable` for more information and examples.
"""
name = self._get_var_name(name)
value = self._get_var_value(name, values)
self._variables.set_global(name, value)
self._log_set_variable(name, value)
# Helpers
def _get_var_name(self, orig):
name = self._resolve_possible_variable(orig)
try:
return self._unescape_variable_if_needed(name)
except ValueError:
raise RuntimeError("Invalid variable syntax '%s'." % orig)
def _resolve_possible_variable(self, name):
try:
resolved = self._variables.replace_string(name)
return self._unescape_variable_if_needed(resolved)
except (KeyError, ValueError, DataError):
return name
def _unescape_variable_if_needed(self, name):
if name.startswith('\\'):
name = name[1:]
if len(name) < 2:
raise ValueError
if name[0] in '$@&' and name[1] != '{':
name = '%s{%s}' % (name[0], name[1:])
if is_var(name):
return name
# Support for possible internal variables (issue 397)
name = '%s{%s}' % (name[0], self.replace_variables(name[2:-1]))
if is_var(name):
return name
raise ValueError
def _get_var_value(self, name, values):
if not values:
return self._variables[name]
# TODO: In RF 2.10/3.0 the if branch below can be removed and
# VariableTableValue used with all variables. See issue #1919.
if name[0] == '$':
if len(values) != 1 or VariableSplitter(values[0]).is_list_variable():
raise DataError("Setting list value to scalar variable '%s' "
"is not supported anymore. Create list "
"variable '@%s' instead." % (name, name[1:]))
return self._variables.replace_scalar(values[0])
return VariableTableValue(values, name).resolve(self._variables)
def _log_set_variable(self, name, value):
self.log(format_assign_message(name, value))
class _RunKeyword(_BuiltInBase):
# If you use any of these run keyword variants from another library, you
# should register those keywords with 'register_run_keyword' method. See
# the documentation of that method at the end of this file. There are also
# other run keyword variant keywords in BuiltIn which can also be seen
# at the end of this file.
def run_keyword(self, name, *args):
"""Executes the given keyword with the given arguments.
Because the name of the keyword to execute is given as an argument, it
can be a variable and thus set dynamically, e.g. from a return value of
another keyword or from the command line.
"""
if not is_string(name):
raise RuntimeError('Keyword name must be a string.')
kw = Keyword(name, args=args)
return kw.run(self._context)
def run_keywords(self, *keywords):
"""Executes all the given keywords in a sequence.
This keyword is mainly useful in setups and teardowns when they need
to take care of multiple actions and creating a new higher level user
keyword would be an overkill.
By default all arguments are expected to be keywords to be executed.
Examples:
| Run Keywords | Initialize database | Start servers | Clear logs |
| Run Keywords | ${KW 1} | ${KW 2} |
| Run Keywords | @{KEYWORDS} |
Starting from Robot Framework 2.7.6, keywords can also be run with
arguments using upper case ``AND`` as a separator between keywords.
The keywords are executed so that the first argument is the first
keyword and proceeding arguments until the first ``AND`` are arguments
to it. First argument after the first ``AND`` is the second keyword and
proceeding arguments until the next ``AND`` are its arguments. And so on.
Examples:
| Run Keywords | Initialize database | db1 | AND | Start servers | server1 | server2 |
| Run Keywords | Initialize database | ${DB NAME} | AND | Start servers | @{SERVERS} | AND | Clear logs |
| Run Keywords | ${KW} | AND | @{KW WITH ARGS} |
Notice that the ``AND`` control argument must be used explicitly and
cannot itself come from a variable. If you need to use literal ``AND``
string as argument, you can either use variables or escape it with
a backslash like ``\\AND``.
"""
self._run_keywords(self._split_run_keywords(list(keywords)))
def _run_keywords(self, iterable):
errors = []
for kw, args in iterable:
try:
self.run_keyword(kw, *args)
except ExecutionPassed as err:
err.set_earlier_failures(errors)
raise err
except ExecutionFailed as err:
errors.extend(err.get_errors())
if not err.can_continue(self._context.in_teardown):
break
if errors:
raise ExecutionFailures(errors)
def _split_run_keywords(self, keywords):
if 'AND' not in keywords:
for name in self._variables.replace_list(keywords):
yield name, ()
else:
for name, args in self._split_run_keywords_from_and(keywords):
yield name, args
def _split_run_keywords_from_and(self, keywords):
while 'AND' in keywords:
index = keywords.index('AND')
yield self._resolve_run_keywords_name_and_args(keywords[:index])
keywords = keywords[index+1:]
yield self._resolve_run_keywords_name_and_args(keywords)
def _resolve_run_keywords_name_and_args(self, kw_call):
kw_call = self._variables.replace_list(kw_call, replace_until=1)
if not kw_call:
raise DataError('Incorrect use of AND')
return kw_call[0], kw_call[1:]
def run_keyword_if(self, condition, name, *args):
"""Runs the given keyword with the given arguments, if ``condition`` is true.
The given ``condition`` is evaluated in Python as explained in
`Evaluating expressions`, and ``name`` and ``*args`` have same
semantics as with `Run Keyword`.
Example, a simple if/else construct:
| ${status} | ${value} = | `Run Keyword And Ignore Error` | `My Keyword` |
| `Run Keyword If` | '${status}' == 'PASS' | `Some Action` | arg |
| `Run Keyword Unless` | '${status}' == 'PASS' | `Another Action` |
In this example, only either `Some Action` or `Another Action` is
executed, based on the status of `My Keyword`. Instead of `Run Keyword
And Ignore Error` you can also use `Run Keyword And Return Status`.
Variables used like ``${variable}``, as in the examples above, are
replaced in the expression before evaluation. Variables are also
available in the evaluation namespace and can be accessed using special
syntax ``$variable``. This is a new feature in Robot Framework 2.9
and it is explained more thoroughly in `Evaluating expressions`.
Example:
| `Run Keyword If` | $result is None or $result == 'FAIL' | `Keyword` |
Starting from Robot version 2.7.4, this keyword supports also optional
ELSE and ELSE IF branches. Both of these are defined in ``*args`` and
must use exactly format ``ELSE`` or ``ELSE IF``, respectively. ELSE
branches must contain first the name of the keyword to execute and then
its possible arguments. ELSE IF branches must first contain a condition,
like the first argument to this keyword, and then the keyword to execute
and its possible arguments. It is possible to have ELSE branch after
ELSE IF and to have multiple ELSE IF branches.
Given previous example, if/else construct can also be created like this:
| ${status} | ${value} = | `Run Keyword And Ignore Error` | My Keyword |
| `Run Keyword If` | '${status}' == 'PASS' | `Some Action` | arg | ELSE | `Another Action` |
The return value is the one of the keyword that was executed or None if
no keyword was executed (i.e. if ``condition`` was false). Hence, it is
recommended to use ELSE and/or ELSE IF branches to conditionally assign
return values from keyword to variables (to conditionally assign fixed
values to variables, see `Set Variable If`). This is illustrated by the
example below:
| ${var1} = | `Run Keyword If` | ${rc} == 0 | `Some keyword returning a value` |
| ... | ELSE IF | 0 < ${rc} < 42 | `Another keyword` |
| ... | ELSE IF | ${rc} < 0 | `Another keyword with args` | ${rc} | arg2 |
| ... | ELSE | `Final keyword to handle abnormal cases` | ${rc} |
| ${var2} = | `Run Keyword If` | ${condition} | `Some keyword` |
In this example, ${var2} will be set to None if ${condition} is false.
Notice that ``ELSE`` and ``ELSE IF`` control words must be used
explicitly and thus cannot come from variables. If you need to use
literal ``ELSE`` and ``ELSE IF`` strings as arguments, you can escape
them with a backslash like ``\\ELSE`` and ``\\ELSE IF``.
Starting from Robot Framework 2.8, Python's
[http://docs.python.org/2/library/os.html|os] and
[http://docs.python.org/2/library/sys.html|sys] modules are
automatically imported when evaluating the ``condition``.
Attributes they contain can thus be used in the condition:
| `Run Keyword If` | os.sep == '/' | `Unix Keyword` |
| ... | ELSE IF | sys.platform.startswith('java') | `Jython Keyword` |
| ... | ELSE | `Windows Keyword` |
"""
args, branch = self._split_elif_or_else_branch(args)
if self._is_true(condition):
return self.run_keyword(name, *args)
return branch()
def _split_elif_or_else_branch(self, args):
if 'ELSE IF' in args:
args, branch = self._split_branch(args, 'ELSE IF', 2,
'condition and keyword')
return args, lambda: self.run_keyword_if(*branch)
if 'ELSE' in args:
args, branch = self._split_branch(args, 'ELSE', 1, 'keyword')
return args, lambda: self.run_keyword(*branch)
return args, lambda: None
def _split_branch(self, args, control_word, required, required_error):
index = list(args).index(control_word)
branch = self._variables.replace_list(args[index+1:], required)
if len(branch) < required:
raise DataError('%s requires %s.' % (control_word, required_error))
return args[:index], branch
def run_keyword_unless(self, condition, name, *args):
"""Runs the given keyword with the given arguments, if ``condition`` is false.
See `Run Keyword If` for more information and an example.
"""
if not self._is_true(condition):
return self.run_keyword(name, *args)
def run_keyword_and_ignore_error(self, name, *args):
"""Runs the given keyword with the given arguments and ignores possible error.
This keyword returns two values, so that the first is either string
``PASS`` or ``FAIL``, depending on the status of the executed keyword.
The second value is either the return value of the keyword or the
received error message. See `Run Keyword And Return Status` If you are
only interested in the execution status.
The keyword name and arguments work as in `Run Keyword`. See
`Run Keyword If` for a usage example.
Errors caused by invalid syntax, timeouts, or fatal exceptions are not
caught by this keyword. Otherwise this keyword itself never fails.
Since Robot Framework 2.9, variable errors are caught by this keyword.
"""
try:
return 'PASS', self.run_keyword(name, *args)
except ExecutionFailed as err:
if err.dont_continue:
raise
return 'FAIL', unicode(err)
def run_keyword_and_return_status(self, name, *args):
"""Runs the given keyword with given arguments and returns the status as a Boolean value.
This keyword returns Boolean ``True`` if the keyword that is executed
succeeds and ``False`` if it fails. This is useful, for example, in
combination with `Run Keyword If`. If you are interested in the error
message or return value, use `Run Keyword And Ignore Error` instead.
The keyword name and arguments work as in `Run Keyword`.
Example:
| ${passed} = | `Run Keyword And Return Status` | Keyword | args |
| `Run Keyword If` | ${passed} | Another keyword |
Errors caused by invalid syntax, timeouts, or fatal exceptions are not
caught by this keyword. Otherwise this keyword itself never fails.
New in Robot Framework 2.7.6.
"""
status, _ = self.run_keyword_and_ignore_error(name, *args)
return status == 'PASS'
def run_keyword_and_continue_on_failure(self, name, *args):
"""Runs the keyword and continues execution even if a failure occurs.
The keyword name and arguments work as with `Run Keyword`.
Example:
| Run Keyword And Continue On Failure | Fail | This is a stupid example |
| Log | This keyword is executed |
The execution is not continued if the failure is caused by invalid syntax,
timeout, or fatal exception.
Since Robot Framework 2.9, variable errors are caught by this keyword.
"""
try:
return self.run_keyword(name, *args)
except ExecutionFailed as err:
if not err.dont_continue:
err.continue_on_failure = True
raise err
def run_keyword_and_expect_error(self, expected_error, name, *args):
"""Runs the keyword and checks that the expected error occurred.
The expected error must be given in the same format as in
Robot Framework reports. It can be a pattern containing
characters ``?``, which matches to any single character and
``*``, which matches to any number of any characters. ``name`` and
``*args`` have same semantics as with `Run Keyword`.
If the expected error occurs, the error message is returned and it can
be further processed/tested, if needed. If there is no error, or the
error does not match the expected error, this keyword fails.
Examples:
| Run Keyword And Expect Error | My error | Some Keyword | arg1 | arg2 |
| ${msg} = | Run Keyword And Expect Error | * | My KW |
| Should Start With | ${msg} | Once upon a time in |
Errors caused by invalid syntax, timeouts, or fatal exceptions are not
caught by this keyword.
Since Robot Framework 2.9, variable errors are caught by this keyword.
"""
try:
self.run_keyword(name, *args)
except ExecutionFailed as err:
if err.dont_continue:
raise
else:
raise AssertionError("Expected error '%s' did not occur."
% expected_error)
if not self._matches(unicode(err), expected_error):
raise AssertionError("Expected error '%s' but got '%s'."
% (expected_error, err))
return unicode(err)
def repeat_keyword(self, times, name, *args):
"""Executes the specified keyword multiple times.
``name`` and ``args`` define the keyword that is executed
similarly as with `Run Keyword`, and ``times`` specifies how many
the keyword should be executed. ``times`` can be given as an
integer or as a string that can be converted to an integer. If it is
a string, it can have postfix ``times`` or ``x`` (case and space
insensitive) to make the expression more explicit.
If ``times`` is zero or negative, the keyword is not executed at
all. This keyword fails immediately if any of the execution
rounds fails.
Examples:
| Repeat Keyword | 5 times | Go to Previous Page |
| Repeat Keyword | ${var} | Some Keyword | arg1 | arg2 |
"""
times = self._get_times_to_repeat(times)
self._run_keywords(self._yield_repeated_keywords(times, name, args))
def _get_times_to_repeat(self, times, require_postfix=False):
times = normalize(str(times))
if times.endswith('times'):
times = times[:-5]
elif times.endswith('x'):
times = times[:-1]
elif require_postfix:
raise ValueError
return self._convert_to_integer(times)
def _yield_repeated_keywords(self, times, name, args):
if times <= 0:
self.log("Keyword '%s' repeated zero times." % name)
for i in xrange(times):
self.log("Repeating keyword, round %d/%d." % (i+1, times))
yield name, args
def wait_until_keyword_succeeds(self, retry, retry_interval, name, *args):
"""Runs the specified keyword and retries if it fails.
``name`` and ``args`` define the keyword that is executed similarly
as with `Run Keyword`. How long to retry running the keyword is
defined using ``retry`` argument either as timeout or count.
``retry_interval`` is the time to wait before trying to run the
keyword again after the previous run has failed.
If ``retry`` is given as timeout, it must be in Robot Framework's
time format (e.g. ``1 minute``, ``2 min 3 s``, ``4.5``) that is
explained in an appendix of Robot Framework User Guide. If it is
given as count, it must have ``times`` or ``x`` postfix (e.g.
``5 times``, ``10 x``). ``retry_interval`` must always be given in
Robot Framework's time format.
If the keyword does not succeed regardless of retries, this keyword
fails. If the executed keyword passes, its return value is returned.
Examples:
| Wait Until Keyword Succeeds | 2 min | 5 sec | My keyword | argument |
| ${result} = | Wait Until Keyword Succeeds | 3x | 200ms | My keyword |
All normal failures are caught by this keyword. Errors caused by
invalid syntax, test or keyword timeouts, or fatal exceptions (caused
e.g. by `Fatal Error`) are not caught.
Running the same keyword multiple times inside this keyword can create
lots of output and considerably increase the size of the generated
output files. Starting from Robot Framework 2.7, it is possible to
remove unnecessary keywords from the outputs using
``--RemoveKeywords WUKS`` command line option.
Support for specifying ``retry`` as a number of times to retry is
a new feature in Robot Framework 2.9.
Since Robot Framework 2.9, variable errors are caught by this keyword.
"""
maxtime = count = -1
try:
count = self._get_times_to_repeat(retry, require_postfix=True)
except ValueError:
timeout = timestr_to_secs(retry)
maxtime = time.time() + timeout
message = 'for %s' % secs_to_timestr(timeout)
else:
if count <= 0:
raise ValueError('Retry count %d is not positive.' % count)
message = '%d time%s' % (count, s(count))
retry_interval = timestr_to_secs(retry_interval)
while True:
try:
return self.run_keyword(name, *args)
except ExecutionFailed as err:
if err.dont_continue:
raise
count -= 1
if time.time() > maxtime > 0 or count == 0:
raise AssertionError("Keyword '%s' failed after retrying "
"%s. The last error was: %s"
% (name, message, err))
self._sleep_in_parts(retry_interval)
def set_variable_if(self, condition, *values):
"""Sets variable based on the given condition.
The basic usage is giving a condition and two values. The
given condition is first evaluated the same way as with the
`Should Be True` keyword. If the condition is true, then the
first value is returned, and otherwise the second value is
returned. The second value can also be omitted, in which case
it has a default value None. This usage is illustrated in the
examples below, where ``${rc}`` is assumed to be zero.
| ${var1} = | Set Variable If | ${rc} == 0 | zero | nonzero |
| ${var2} = | Set Variable If | ${rc} > 0 | value1 | value2 |
| ${var3} = | Set Variable If | ${rc} > 0 | whatever | |
=>
| ${var1} = 'zero'
| ${var2} = 'value2'
| ${var3} = None
It is also possible to have 'else if' support by replacing the
second value with another condition, and having two new values
after it. If the first condition is not true, the second is
evaluated and one of the values after it is returned based on
its truth value. This can be continued by adding more
conditions without a limit.
| ${var} = | Set Variable If | ${rc} == 0 | zero |
| ... | ${rc} > 0 | greater than zero | less then zero |
| |
| ${var} = | Set Variable If |
| ... | ${rc} == 0 | zero |
| ... | ${rc} == 1 | one |
| ... | ${rc} == 2 | two |
| ... | ${rc} > 2 | greater than two |
| ... | ${rc} < 0 | less than zero |
Use `Get Variable Value` if you need to set variables
dynamically based on whether a variable exist or not.
"""
values = self._verify_values_for_set_variable_if(list(values))
if self._is_true(condition):
return self._variables.replace_scalar(values[0])
values = self._verify_values_for_set_variable_if(values[1:], True)
if len(values) == 1:
return self._variables.replace_scalar(values[0])
return self.run_keyword('BuiltIn.Set Variable If', *values[0:])
def _verify_values_for_set_variable_if(self, values, default=False):
if not values:
if default:
return [None]
raise RuntimeError('At least one value is required')
if is_list_var(values[0]):
values[:1] = [escape(item) for item in self._variables[values[0]]]
return self._verify_values_for_set_variable_if(values)
return values
def run_keyword_if_test_failed(self, name, *args):
"""Runs the given keyword with the given arguments, if the test failed.
This keyword can only be used in a test teardown. Trying to use it
anywhere else results in an error.
Otherwise, this keyword works exactly like `Run Keyword`, see its
documentation for more details.
Prior to Robot Framework 2.9 failures in test teardown itself were
not detected by this keyword.
"""
test = self._get_test_in_teardown('Run Keyword If Test Failed')
if not test.passed or self._context.failure_in_test_teardown:
return self.run_keyword(name, *args)
def run_keyword_if_test_passed(self, name, *args):
"""Runs the given keyword with the given arguments, if the test passed.
This keyword can only be used in a test teardown. Trying to use it
anywhere else results in an error.
Otherwise, this keyword works exactly like `Run Keyword`, see its
documentation for more details.
Prior to Robot Framework 2.9 failures in test teardown itself were
not detected by this keyword.
"""
test = self._get_test_in_teardown('Run Keyword If Test Passed')
if test.passed and not self._context.failure_in_test_teardown:
return self.run_keyword(name, *args)
def run_keyword_if_timeout_occurred(self, name, *args):
"""Runs the given keyword if either a test or a keyword timeout has occurred.
This keyword can only be used in a test teardown. Trying to use it
anywhere else results in an error.
Otherwise, this keyword works exactly like `Run Keyword`, see its
documentation for more details.
"""
self._get_test_in_teardown('Run Keyword If Timeout Occurred')
if self._context.timeout_occurred:
return self.run_keyword(name, *args)
def _get_test_in_teardown(self, kwname):
ctx = self._context
if ctx.test and ctx.in_test_teardown:
return ctx.test
raise RuntimeError("Keyword '%s' can only be used in test teardown."
% kwname)
def run_keyword_if_all_critical_tests_passed(self, name, *args):
"""Runs the given keyword with the given arguments, if all critical tests passed.
This keyword can only be used in suite teardown. Trying to use it in
any other place will result in an error.
Otherwise, this keyword works exactly like `Run Keyword`, see its
documentation for more details.
"""
suite = self._get_suite_in_teardown('Run Keyword If '
'All Critical Tests Passed')
if suite.statistics.critical.failed == 0:
return self.run_keyword(name, *args)
def run_keyword_if_any_critical_tests_failed(self, name, *args):
"""Runs the given keyword with the given arguments, if any critical tests failed.
This keyword can only be used in a suite teardown. Trying to use it
anywhere else results in an error.
Otherwise, this keyword works exactly like `Run Keyword`, see its
documentation for more details.
"""
suite = self._get_suite_in_teardown('Run Keyword If '
'Any Critical Tests Failed')
if suite.statistics.critical.failed > 0:
return self.run_keyword(name, *args)
def run_keyword_if_all_tests_passed(self, name, *args):
"""Runs the given keyword with the given arguments, if all tests passed.
This keyword can only be used in a suite teardown. Trying to use it
anywhere else results in an error.
Otherwise, this keyword works exactly like `Run Keyword`, see its
documentation for more details.
"""
suite = self._get_suite_in_teardown('Run Keyword If All Tests Passed')
if suite.statistics.all.failed == 0:
return self.run_keyword(name, *args)
def run_keyword_if_any_tests_failed(self, name, *args):
"""Runs the given keyword with the given arguments, if one or more tests failed.
This keyword can only be used in a suite teardown. Trying to use it
anywhere else results in an error.
Otherwise, this keyword works exactly like `Run Keyword`, see its
documentation for more details.
"""
suite = self._get_suite_in_teardown('Run Keyword If Any Tests Failed')
if suite.statistics.all.failed > 0:
return self.run_keyword(name, *args)
def _get_suite_in_teardown(self, kwname):
if not self._context.in_suite_teardown:
raise RuntimeError("Keyword '%s' can only be used in suite teardown."
% kwname)
return self._context.suite
class _Control(_BuiltInBase):
def continue_for_loop(self):
"""Skips the current for loop iteration and continues from the next.
Skips the remaining keywords in the current for loop iteration and
continues from the next one. Can be used directly in a for loop or
in a keyword that the loop uses.
Example:
| :FOR | ${var} | IN | @{VALUES} |
| | Run Keyword If | '${var}' == 'CONTINUE' | Continue For Loop |
| | Do Something | ${var} |
See `Continue For Loop If` to conditionally continue a for loop without
using `Run Keyword If` or other wrapper keywords.
New in Robot Framework 2.8.
"""
self.log("Continuing for loop from the next iteration.")
raise ContinueForLoop()
def continue_for_loop_if(self, condition):
"""Skips the current for loop iteration if the ``condition`` is true.
A wrapper for `Continue For Loop` to continue a for loop based on
the given condition. The condition is evaluated using the same
semantics as with `Should Be True` keyword.
Example:
| :FOR | ${var} | IN | @{VALUES} |
| | Continue For Loop If | '${var}' == 'CONTINUE' |
| | Do Something | ${var} |
New in Robot Framework 2.8.
"""
if self._is_true(condition):
self.continue_for_loop()
def exit_for_loop(self):
"""Stops executing the enclosing for loop.
Exits the enclosing for loop and continues execution after it.
Can be used directly in a for loop or in a keyword that the loop uses.
Example:
| :FOR | ${var} | IN | @{VALUES} |
| | Run Keyword If | '${var}' == 'EXIT' | Exit For Loop |
| | Do Something | ${var} |
See `Exit For Loop If` to conditionally exit a for loop without
using `Run Keyword If` or other wrapper keywords.
"""
self.log("Exiting for loop altogether.")
raise ExitForLoop()
def exit_for_loop_if(self, condition):
"""Stops executing the enclosing for loop if the ``condition`` is true.
A wrapper for `Exit For Loop` to exit a for loop based on
the given condition. The condition is evaluated using the same
semantics as with `Should Be True` keyword.
Example:
| :FOR | ${var} | IN | @{VALUES} |
| | Exit For Loop If | '${var}' == 'EXIT' |
| | Do Something | ${var} |
New in Robot Framework 2.8.
"""
if self._is_true(condition):
self.exit_for_loop()
@run_keyword_variant(resolve=0)
def return_from_keyword(self, *return_values):
"""Returns from the enclosing user keyword.
This keyword can be used to return from a user keyword with PASS status
without executing it fully. It is also possible to return values
similarly as with the ``[Return]`` setting. For more detailed information
about working with the return values, see the User Guide.
This keyword is typically wrapped to some other keyword, such as
`Run Keyword If` or `Run Keyword If Test Passed`, to return based
on a condition:
| Run Keyword If | ${rc} < 0 | Return From Keyword |
| Run Keyword If Test Passed | Return From Keyword |
It is possible to use this keyword to return from a keyword also inside
a for loop. That, as well as returning values, is demonstrated by the
`Find Index` keyword in the following somewhat advanced example.
Notice that it is often a good idea to move this kind of complicated
logic into a test library.
| ***** Variables *****
| @{LIST} = foo baz
|
| ***** Test Cases *****
| Example
| ${index} = Find Index baz @{LIST}
| Should Be Equal ${index} ${1}
| ${index} = Find Index non existing @{LIST}
| Should Be Equal ${index} ${-1}
|
| ***** Keywords *****
| Find Index
| [Arguments] ${element} @{items}
| ${index} = Set Variable ${0}
| :FOR ${item} IN @{items}
| \\ Run Keyword If '${item}' == '${element}' Return From Keyword ${index}
| \\ ${index} = Set Variable ${index + 1}
| Return From Keyword ${-1} # Also [Return] would work here.
The most common use case, returning based on an expression, can be
accomplished directly with `Return From Keyword If`. Both of these
keywords are new in Robot Framework 2.8.
See also `Run Keyword And Return` and `Run Keyword And Return If`.
"""
self.log('Returning from the enclosing user keyword.')
raise ReturnFromKeyword(return_values)
@run_keyword_variant(resolve=1)
def return_from_keyword_if(self, condition, *return_values):
"""Returns from the enclosing user keyword if ``condition`` is true.
A wrapper for `Return From Keyword` to return based on the given
condition. The condition is evaluated using the same semantics as
with `Should Be True` keyword.
Given the same example as in `Return From Keyword`, we can rewrite the
`Find Index` keyword as follows:
| ***** Keywords *****
| Find Index
| [Arguments] ${element} @{items}
| ${index} = Set Variable ${0}
| :FOR ${item} IN @{items}
| \\ Return From Keyword If '${item}' == '${element}' ${index}
| \\ ${index} = Set Variable ${index + 1}
| Return From Keyword ${-1} # Also [Return] would work here.
See also `Run Keyword And Return` and `Run Keyword And Return If`.
New in Robot Framework 2.8.
"""
if self._is_true(condition):
self.return_from_keyword(*return_values)
@run_keyword_variant(resolve=1)
def run_keyword_and_return(self, name, *args):
"""Runs the specified keyword and returns from the enclosing user keyword.
The keyword to execute is defined with ``name`` and ``*args`` exactly
like with `Run Keyword`. After running the keyword, returns from the
enclosing user keyword and passes possible return value from the
executed keyword further. Returning from a keyword has exactly same
semantics as with `Return From Keyword`.
Example:
| `Run Keyword And Return` | `My Keyword` | arg1 | arg2 |
| # Above is equivalent to: |
| ${result} = | `My Keyword` | arg1 | arg2 |
| `Return From Keyword` | ${result} | | |
Use `Run Keyword And Return If` if you want to run keyword and return
based on a condition.
New in Robot Framework 2.8.2.
"""
ret = self.run_keyword(name, *args)
self.return_from_keyword(escape(ret))
@run_keyword_variant(resolve=2)
def run_keyword_and_return_if(self, condition, name, *args):
"""Runs the specified keyword and returns from the enclosing user keyword.
A wrapper for `Run Keyword And Return` to run and return based on
the given ``condition``. The condition is evaluated using the same
semantics as with `Should Be True` keyword.
Example:
| `Run Keyword And Return If` | ${rc} > 0 | `My Keyword` | arg1 | arg2 |
| # Above is equivalent to: |
| `Run Keyword If` | ${rc} > 0 | `Run Keyword And Return` | `My Keyword ` | arg1 | arg2 |
Use `Return From Keyword If` if you want to return a certain value
based on a condition.
New in Robot Framework 2.8.2.
"""
if self._is_true(condition):
self.run_keyword_and_return(name, *args)
def pass_execution(self, message, *tags):
"""Skips rest of the current test, setup, or teardown with PASS status.
This keyword can be used anywhere in the test data, but the place where
used affects the behavior:
- When used in any setup or teardown (suite, test or keyword), passes
that setup or teardown. Possible keyword teardowns of the started
keywords are executed. Does not affect execution or statuses
otherwise.
- When used in a test outside setup or teardown, passes that particular
test case. Possible test and keyword teardowns are executed.
Possible continuable failures before this keyword is used, as well as
failures in executed teardowns, will fail the execution.
It is mandatory to give a message explaining why execution was passed.
By default the message is considered plain text, but starting it with
``*HTML*`` allows using HTML formatting.
It is also possible to modify test tags passing tags after the message
similarly as with `Fail` keyword. Tags starting with a hyphen
(e.g. ``-regression``) are removed and others added. Tags are modified
using `Set Tags` and `Remove Tags` internally, and the semantics
setting and removing them are the same as with these keywords.
Examples:
| Pass Execution | All features available in this version tested. |
| Pass Execution | Deprecated test. | deprecated | -regression |
This keyword is typically wrapped to some other keyword, such as
`Run Keyword If`, to pass based on a condition. The most common case
can be handled also with `Pass Execution If`:
| Run Keyword If | ${rc} < 0 | Pass Execution | Negative values are cool. |
| Pass Execution If | ${rc} < 0 | Negative values are cool. |
Passing execution in the middle of a test, setup or teardown should be
used with care. In the worst case it leads to tests that skip all the
parts that could actually uncover problems in the tested application.
In cases where execution cannot continue do to external factors,
it is often safer to fail the test case and make it non-critical.
New in Robot Framework 2.8.
"""
message = message.strip()
if not message:
raise RuntimeError('Message cannot be empty.')
self._set_and_remove_tags(tags)
log_message, level = self._get_logged_test_message_and_level(message)
self.log('Execution passed with message:\n%s' % log_message, level)
raise PassExecution(message)
@run_keyword_variant(resolve=1)
def pass_execution_if(self, condition, message, *tags):
"""Conditionally skips rest of the current test, setup, or teardown with PASS status.
A wrapper for `Pass Execution` to skip rest of the current test,
setup or teardown based the given ``condition``. The condition is
evaluated similarly as with `Should Be True` keyword, and ``message``
and ``*tags`` have same semantics as with `Pass Execution`.
Example:
| :FOR | ${var} | IN | @{VALUES} |
| | Pass Execution If | '${var}' == 'EXPECTED' | Correct value was found |
| | Do Something | ${var} |
New in Robot Framework 2.8.
"""
if self._is_true(condition):
message = self._variables.replace_string(message)
tags = [self._variables.replace_string(tag) for tag in tags]
self.pass_execution(message, *tags)
class _Misc(_BuiltInBase):
def no_operation(self):
"""Does absolutely nothing."""
def sleep(self, time_, reason=None):
"""Pauses the test executed for the given time.
``time`` may be either a number or a time string. Time strings are in
a format such as ``1 day 2 hours 3 minutes 4 seconds 5milliseconds`` or
``1d 2h 3m 4s 5ms``, and they are fully explained in an appendix of
Robot Framework User Guide. Optional `reason` can be used to explain why
sleeping is necessary. Both the time slept and the reason are logged.
Examples:
| Sleep | 42 |
| Sleep | 1.5 |
| Sleep | 2 minutes 10 seconds |
| Sleep | 10s | Wait for a reply |
"""
seconds = timestr_to_secs(time_)
# Python hangs with negative values
if seconds < 0:
seconds = 0
self._sleep_in_parts(seconds)
self.log('Slept %s' % secs_to_timestr(seconds))
if reason:
self.log(reason)
def _sleep_in_parts(self, seconds):
# time.sleep can't be stopped in windows
# to ensure that we can signal stop (with timeout)
# split sleeping to small pieces
endtime = time.time() + float(seconds)
while True:
remaining = endtime - time.time()
if remaining <= 0:
break
time.sleep(min(remaining, 0.5))
def catenate(self, *items):
"""Catenates the given items together and returns the resulted string.
By default, items are catenated with spaces, but if the first item
contains the string ``SEPARATOR=<sep>``, the separator ``<sep>`` is
used instead. Items are converted into strings when necessary.
Examples:
| ${str1} = | Catenate | Hello | world | |
| ${str2} = | Catenate | SEPARATOR=--- | Hello | world |
| ${str3} = | Catenate | SEPARATOR= | Hello | world |
=>
| ${str1} = 'Hello world'
| ${str2} = 'Hello---world'
| ${str3} = 'Helloworld'
"""
if not items:
return ''
items = [unic(item) for item in items]
if items[0].startswith('SEPARATOR='):
sep = items[0][len('SEPARATOR='):]
items = items[1:]
else:
sep = ' '
return sep.join(items)
def log(self, message, level='INFO', html=False, console=False, repr=False):
u"""Logs the given message with the given level.
Valid levels are TRACE, DEBUG, INFO (default), HTML, WARN, and ERROR.
Messages below the current active log level are ignored. See
`Set Log Level` keyword and ``--loglevel`` command line option
for more details about setting the level.
Messages logged with the WARN or ERROR levels will be automatically
visible also in the console and in the Test Execution Errors section
in the log file.
Logging can be configured using optional ``html``, ``console`` and
``repr`` arguments. They are off by default, but can be enabled
by giving them a true value. See `Boolean arguments` section for more
information about true and false values.
If the ``html`` argument is given a true value, the message will be
considered HTML and special characters such as ``<`` in it are not
escaped. For example, logging ``<img src="image.png">`` creates an
image when ``html`` is true, but otherwise the message is that exact
string. An alternative to using the ``html`` argument is using the HTML
pseudo log level. It logs the message as HTML using the INFO level.
If the ``console`` argument is true, the message will be written to
the console where test execution was started from in addition to
the log file. This keyword always uses the standard output stream
and adds a newline after the written message. Use `Log To Console`
instead if either of these is undesirable,
If the ``repr`` argument is true, the given item will be passed through
a custom version of Python's ``pprint.pformat()`` function before
logging it. This is useful, for example, when working with strings or
bytes containing invisible characters, or when working with nested data
structures. The custom version differs from the standard one so that it
omits the ``u`` prefix from Unicode strings and adds ``b`` prefix to
byte strings.
Examples:
| Log | Hello, world! | | | # Normal INFO message. |
| Log | Warning, world! | WARN | | # Warning. |
| Log | <b>Hello</b>, world! | html=yes | | # INFO message as HTML. |
| Log | <b>Hello</b>, world! | HTML | | # Same as above. |
| Log | <b>Hello</b>, world! | DEBUG | html=true | # DEBUG as HTML. |
| Log | Hello, console! | console=yes | | # Log also to the console. |
| Log | Hyv\xe4 \\x00 | repr=yes | | # Log ``'Hyv\\xe4 \\x00'``. |
See `Log Many` if you want to log multiple messages in one go, and
`Log To Console` if you only want to write to the console.
Arguments ``html``, ``console``, and ``repr`` are new in Robot Framework
2.8.2.
Pprint support when ``repr`` is used is new in Robot Framework 2.8.6,
and it was changed to drop the ``u`` prefix and add the ``b`` prefix
in Robot Framework 2.9.
"""
if is_truthy(repr):
message = prepr(message, width=80)
logger.write(message, level, is_truthy(html))
if is_truthy(console):
logger.console(message)
@run_keyword_variant(resolve=0)
def log_many(self, *messages):
"""Logs the given messages as separate entries using the INFO level.
Supports also logging list and dictionary variable items individually.
Examples:
| Log Many | Hello | ${var} |
| Log Many | @{list} | &{dict} |
See `Log` and `Log To Console` keywords if you want to use alternative
log levels, use HTML, or log to the console.
"""
for msg in self._yield_logged_messages(messages):
self.log(msg)
def _yield_logged_messages(self, messages):
for msg in messages:
var = VariableSplitter(msg)
value = self._variables.replace_scalar(msg)
if var.is_list_variable():
for item in value:
yield item
elif var.is_dict_variable():
for name, value in value.items():
yield '%s=%s' % (name, value)
else:
yield value
def log_to_console(self, message, stream='STDOUT', no_newline=False):
"""Logs the given message to the console.
By default uses the standard output stream. Using the standard error
stream is possibly by giving the ``stream`` argument value ``STDERR``
(case-insensitive).
By default appends a newline to the logged message. This can be
disabled by giving the ``no_newline`` argument a true value (see
`Boolean arguments`).
Examples:
| Log To Console | Hello, console! | |
| Log To Console | Hello, stderr! | STDERR |
| Log To Console | Message starts here and is | no_newline=true |
| Log To Console | continued without newline. | |
This keyword does not log the message to the normal log file. Use
`Log` keyword, possibly with argument ``console``, if that is desired.
New in Robot Framework 2.8.2.
"""
logger.console(message, newline=is_falsy(no_newline), stream=stream)
@run_keyword_variant(resolve=0)
def comment(self, *messages):
"""Displays the given messages in the log file as keyword arguments.
This keyword does nothing with the arguments it receives, but as they
are visible in the log, this keyword can be used to display simple
messages. Given arguments are ignored so thoroughly that they can even
contain non-existing variables. If you are interested about variable
values, you can use the `Log` or `Log Many` keywords.
"""
pass
def set_log_level(self, level):
"""Sets the log threshold to the specified level and returns the old level.
Messages below the level will not logged. The default logging level is
INFO, but it can be overridden with the command line option
``--loglevel``.
The available levels: TRACE, DEBUG, INFO (default), WARN, ERROR and NONE (no
logging).
"""
try:
old = self._context.output.set_log_level(level)
except DataError as err:
raise RuntimeError(unicode(err))
self._namespace.variables.set_global('${LOG_LEVEL}', level.upper())
self.log('Log level changed from %s to %s' % (old, level.upper()))
return old
def reload_library(self, name_or_instance):
"""Rechecks what keywords the specified library provides.
Can be called explicitly in the test data or by a library itself
when keywords it provides have changed.
The library can be specified by its name or as the active instance of
the library. The latter is especially useful if the library itself
calls this keyword as a method.
New in Robot Framework 2.9.
"""
library = self._namespace.reload_library(name_or_instance)
self.log('Reloaded library %s with %s keywords.' % (library.name,
len(library)))
@run_keyword_variant(resolve=0)
def import_library(self, name, *args):
"""Imports a library with the given name and optional arguments.
This functionality allows dynamic importing of libraries while tests
are running. That may be necessary, if the library itself is dynamic
and not yet available when test data is processed. In a normal case,
libraries should be imported using the Library setting in the Setting
table.
This keyword supports importing libraries both using library
names and physical paths. When paths are used, they must be
given in absolute format. Forward slashes can be used as path
separators in all operating systems.
It is possible to pass arguments to the imported library and also
named argument syntax works if the library supports it. ``WITH NAME``
syntax can be used to give a custom name to the imported library.
Examples:
| Import Library | MyLibrary |
| Import Library | ${CURDIR}/../Library.py | arg1 | named=arg2 |
| Import Library | ${LIBRARIES}/Lib.java | arg | WITH NAME | JavaLib |
"""
try:
self._namespace.import_library(name, list(args))
except DataError as err:
raise RuntimeError(unicode(err))
@run_keyword_variant(resolve=0)
def import_variables(self, path, *args):
"""Imports a variable file with the given path and optional arguments.
Variables imported with this keyword are set into the test suite scope
similarly when importing them in the Setting table using the Variables
setting. These variables override possible existing variables with
the same names. This functionality can thus be used to import new
variables, for example, for each test in a test suite.
The given path must be absolute. Forward slashes can be used as path
separator regardless the operating system.
Examples:
| Import Variables | ${CURDIR}/variables.py | | |
| Import Variables | ${CURDIR}/../vars/env.py | arg1 | arg2 |
"""
try:
self._namespace.import_variables(path, list(args), overwrite=True)
except DataError as err:
raise RuntimeError(unicode(err))
@run_keyword_variant(resolve=0)
def import_resource(self, path):
"""Imports a resource file with the given path.
Resources imported with this keyword are set into the test suite scope
similarly when importing them in the Setting table using the Resource
setting.
The given path must be absolute. Forward slashes can be used as path
separator regardless the operating system.
Examples:
| Import Resource | ${CURDIR}/resource.txt |
| Import Resource | ${CURDIR}/../resources/resource.html |
"""
try:
self._namespace.import_resource(path)
except DataError as err:
raise RuntimeError(unicode(err))
def set_library_search_order(self, *search_order):
"""Sets the resolution order to use when a name matches multiple keywords.
The library search order is used to resolve conflicts when a keyword
name in the test data matches multiple keywords. The first library
(or resource, see below) containing the keyword is selected and that
keyword implementation used. If the keyword is not found from any library
(or resource), test executing fails the same way as when the search
order is not set.
When this keyword is used, there is no need to use the long
``LibraryName.Keyword Name`` notation. For example, instead of
having
| MyLibrary.Keyword | arg |
| MyLibrary.Another Keyword |
| MyLibrary.Keyword | xxx |
you can have
| Set Library Search Order | MyLibrary |
| Keyword | arg |
| Another Keyword |
| Keyword | xxx |
This keyword can be used also to set the order of keywords in different
resource files. In this case resource names must be given without paths
or extensions like:
| Set Library Search Order | resource | another_resource |
*NOTE:*
- The search order is valid only in the suite where this keywords is used.
- Keywords in resources always have higher priority than
keywords in libraries regardless the search order.
- The old order is returned and can be used to reset the search order later.
- Library and resource names in the search order are both case and space
insensitive.
"""
return self._namespace.set_search_order(search_order)
def keyword_should_exist(self, name, msg=None):
"""Fails unless the given keyword exists in the current scope.
Fails also if there are more than one keywords with the same name.
Works both with the short name (e.g. ``Log``) and the full name
(e.g. ``BuiltIn.Log``).
The default error message can be overridden with the ``msg`` argument.
See also `Variable Should Exist`.
"""
try:
handler = self._namespace.get_handler(name)
if isinstance(handler, UserErrorHandler):
handler.run()
except DataError as err:
raise AssertionError(msg or unicode(err))
def get_time(self, format='timestamp', time_='NOW'):
"""Returns the given time in the requested format.
*NOTE:* DateTime library added in Robot Framework 2.8.5 contains
much more flexible keywords for getting the current date and time
and for date and time handling in general.
How time is returned is determined based on the given ``format``
string as follows. Note that all checks are case-insensitive.
1) If ``format`` contains the word ``epoch``, the time is returned
in seconds after the UNIX epoch (1970-01-01 00:00:00 UTC).
The return value is always an integer.
2) If ``format`` contains any of the words ``year``, ``month``,
``day``, ``hour``, ``min``, or ``sec``, only the selected parts are
returned. The order of the returned parts is always the one
in the previous sentence and the order of words in ``format``
is not significant. The parts are returned as zero-padded
strings (e.g. May -> ``05``).
3) Otherwise (and by default) the time is returned as a
timestamp string in the format ``2006-02-24 15:08:31``.
By default this keyword returns the current local time, but
that can be altered using ``time`` argument as explained below.
Note that all checks involving strings are case-insensitive.
1) If ``time`` is a number, or a string that can be converted to
a number, it is interpreted as seconds since the UNIX epoch.
This documentation was originally written about 1177654467
seconds after the epoch.
2) If ``time`` is a timestamp, that time will be used. Valid
timestamp formats are ``YYYY-MM-DD hh:mm:ss`` and
``YYYYMMDD hhmmss``.
3) If ``time`` is equal to ``NOW`` (default), the current local
time is used. This time is got using Python's ``time.time()``
function.
4) If ``time`` is equal to ``UTC``, the current time in
[http://en.wikipedia.org/wiki/Coordinated_Universal_Time|UTC]
is used. This time is got using ``time.time() + time.altzone``
in Python.
5) If ``time`` is in the format like ``NOW - 1 day`` or ``UTC + 1 hour
30 min``, the current local/UTC time plus/minus the time
specified with the time string is used. The time string format
is described in an appendix of Robot Framework User Guide.
Examples (expecting the current local time is 2006-03-29 15:06:21):
| ${time} = | Get Time | | | |
| ${secs} = | Get Time | epoch | | |
| ${year} = | Get Time | return year | | |
| ${yyyy} | ${mm} | ${dd} = | Get Time | year,month,day |
| @{time} = | Get Time | year month day hour min sec | | |
| ${y} | ${s} = | Get Time | seconds and year | |
=>
| ${time} = '2006-03-29 15:06:21'
| ${secs} = 1143637581
| ${year} = '2006'
| ${yyyy} = '2006', ${mm} = '03', ${dd} = '29'
| @{time} = ['2006', '03', '29', '15', '06', '21']
| ${y} = '2006'
| ${s} = '21'
Examples (expecting the current local time is 2006-03-29 15:06:21 and
UTC time is 2006-03-29 12:06:21):
| ${time} = | Get Time | | 1177654467 | # Time given as epoch seconds |
| ${secs} = | Get Time | sec | 2007-04-27 09:14:27 | # Time given as a timestamp |
| ${year} = | Get Time | year | NOW | # The local time of execution |
| @{time} = | Get Time | hour min sec | NOW + 1h 2min 3s | # 1h 2min 3s added to the local time |
| @{utc} = | Get Time | hour min sec | UTC | # The UTC time of execution |
| ${hour} = | Get Time | hour | UTC - 1 hour | # 1h subtracted from the UTC time |
=>
| ${time} = '2007-04-27 09:14:27'
| ${secs} = 27
| ${year} = '2006'
| @{time} = ['16', '08', '24']
| @{utc} = ['12', '06', '21']
| ${hour} = '11'
Support for UTC time was added in Robot Framework 2.7.5 but it did not
work correctly until 2.7.7.
"""
return get_time(format, parse_time(time_))
def evaluate(self, expression, modules=None, namespace=None):
"""Evaluates the given expression in Python and returns the results.
``expression`` is evaluated in Python as explained in `Evaluating
expressions`.
``modules`` argument can be used to specify a comma separated
list of Python modules to be imported and added to the evaluation
namespace.
``namespace`` argument can be used to pass a custom evaluation
namespace as a dictionary. Possible ``modules`` are added to this
namespace. This is a new feature in Robot Framework 2.8.4.
Variables used like ``${variable}`` are replaced in the expression
before evaluation. Variables are also available in the evaluation
namespace and can be accessed using special syntax ``$variable``.
This is a new feature in Robot Framework 2.9 and it is explained more
thoroughly in `Evaluating expressions`.
Examples (expecting ``${result}`` is 3.14):
| ${status} = | Evaluate | 0 < ${result} < 10 | # Would also work with string '3.14' |
| ${status} = | Evaluate | 0 < $result < 10 | # Using variable itself, not string representation |
| ${random} = | Evaluate | random.randint(0, sys.maxint) | modules=random, sys |
| ${ns} = | Create Dictionary | x=${4} | y=${2} |
| ${result} = | Evaluate | x*10 + y | namespace=${ns} |
=>
| ${status} = True
| ${random} = <random integer>
| ${result} = 42
"""
variables = self._variables.as_dict(decoration=False)
expression = self._handle_variables_in_expression(expression, variables)
namespace = self._create_evaluation_namespace(namespace, modules)
variables = self._decorate_variables_for_evaluation(variables)
try:
if not is_string(expression):
raise TypeError("Expression must be string, got %s."
% type_name(expression))
if not expression:
raise ValueError("Expression cannot be empty.")
return eval(expression, namespace, variables)
except:
raise RuntimeError("Evaluating expression '%s' failed: %s"
% (expression, get_error_message()))
def _handle_variables_in_expression(self, expression, variables):
tokens = []
variable_started = seen_variable = False
generated = generate_tokens(StringIO(expression).readline)
for toknum, tokval, _, _, _ in generated:
if variable_started:
if toknum == token.NAME:
if tokval not in variables:
variable_not_found('$%s' % tokval, variables,
deco_braces=False)
tokval = 'RF_VAR_' + tokval
seen_variable = True
else:
tokens.append((token.ERRORTOKEN, '$'))
variable_started = False
if toknum == token.ERRORTOKEN and tokval == '$':
variable_started = True
else:
tokens.append((toknum, tokval))
if seen_variable:
return untokenize(tokens).strip()
return expression
def _create_evaluation_namespace(self, namespace, modules):
namespace = dict(namespace or {})
modules = modules.replace(' ', '').split(',') if modules else []
namespace.update((m, __import__(m)) for m in modules if m)
return namespace
def _decorate_variables_for_evaluation(self, variables):
decorated = [('RF_VAR_' + name, value)
for name, value in variables.items()]
return NormalizedDict(decorated, ignore='_')
def call_method(self, object, method_name, *args, **kwargs):
"""Calls the named method of the given object with the provided arguments.
The possible return value from the method is returned and can be
assigned to a variable. Keyword fails both if the object does not have
a method with the given name or if executing the method raises an
exception.
Support for ``**kwargs`` is new in Robot Framework 2.9. Since that
possible equal signs in other arguments must be escaped with a
backslash like ``\\=``.
Examples:
| Call Method | ${hashtable} | put | myname | myvalue |
| ${isempty} = | Call Method | ${hashtable} | isEmpty | |
| Should Not Be True | ${isempty} | | | |
| ${value} = | Call Method | ${hashtable} | get | myname |
| Should Be Equal | ${value} | myvalue | | |
| Call Method | ${object} | kwargs | name=value | foo=bar |
| Call Method | ${object} | positional | escaped\\=equals |
"""
try:
method = getattr(object, method_name)
except AttributeError:
raise RuntimeError("Object '%s' does not have method '%s'."
% (object, method_name))
try:
return method(*args, **kwargs)
except:
raise RuntimeError("Calling method '%s' failed: %s"
% (method_name, get_error_message()))
def regexp_escape(self, *patterns):
"""Returns each argument string escaped for use as a regular expression.
This keyword can be used to escape strings to be used with
`Should Match Regexp` and `Should Not Match Regexp` keywords.
Escaping is done with Python's ``re.escape()`` function.
Examples:
| ${escaped} = | Regexp Escape | ${original} |
| @{strings} = | Regexp Escape | @{strings} |
"""
if len(patterns) == 0:
return ''
if len(patterns) == 1:
return re.escape(patterns[0])
return [re.escape(p) for p in patterns]
def set_test_message(self, message, append=False):
"""Sets message for the current test case.
If the optional ``append`` argument is given a true value (see `Boolean
arguments`), the given ``message`` is added after the possible earlier
message by joining the messages with a space.
In test teardown this keyword can alter the possible failure message,
but otherwise failures override messages set by this keyword. Notice
that in teardown the initial message is available as a built-in variable
``${TEST MESSAGE}``.
It is possible to use HTML format in the message by starting the message
with ``*HTML*``.
Examples:
| Set Test Message | My message | |
| Set Test Message | is continued. | append=yes |
| Should Be Equal | ${TEST MESSAGE} | My message is continued. |
| Set Test Message | `*`HTML`*` <b>Hello!</b> | |
This keyword can not be used in suite setup or suite teardown.
Support for ``append`` was added in Robot Framework 2.7.7 and support
for HTML format in 2.8.
"""
test = self._namespace.test
if not test:
raise RuntimeError("'Set Test Message' keyword cannot be used in "
"suite setup or teardown.")
test.message = self._get_possibly_appended_value(test.message, message,
append)
message, level = self._get_logged_test_message_and_level(test.message)
self.log('Set test message to:\n%s' % message, level)
def _get_possibly_appended_value(self, initial, new, append):
if not is_unicode(new):
new = unic(new)
if is_truthy(append) and initial:
return '%s %s' % (initial, new)
return new
def _get_logged_test_message_and_level(self, message):
if message.startswith('*HTML*'):
return message[6:].lstrip(), 'HTML'
return message, 'INFO'
def set_test_documentation(self, doc, append=False):
"""Sets documentation for the current test case.
By default the possible existing documentation is overwritten, but
this can be changed using the optional ``append`` argument similarly
as with `Set Test Message` keyword.
The current test documentation is available as a built-in variable
``${TEST DOCUMENTATION}``. This keyword can not be used in suite
setup or suite teardown.
New in Robot Framework 2.7. Support for ``append`` was added in 2.7.7.
"""
test = self._namespace.test
if not test:
raise RuntimeError("'Set Test Documentation' keyword cannot be "
"used in suite setup or teardown.")
test.doc = self._get_possibly_appended_value(test.doc, doc, append)
self._variables.set_test('${TEST_DOCUMENTATION}', test.doc)
self.log('Set test documentation to:\n%s' % test.doc)
def set_suite_documentation(self, doc, append=False, top=False):
"""Sets documentation for the current test suite.
By default the possible existing documentation is overwritten, but
this can be changed using the optional ``append`` argument similarly
as with `Set Test Message` keyword.
This keyword sets the documentation of the current suite by default.
If the optional ``top`` argument is given a true value (see `Boolean
arguments`), the documentation of the top level suite is altered
instead.
The documentation of the current suite is available as a built-in
variable ``${SUITE DOCUMENTATION}``.
New in Robot Framework 2.7. Support for ``append`` and ``top`` were
added in 2.7.7.
"""
top = is_truthy(top)
suite = self._get_namespace(top).suite
suite.doc = self._get_possibly_appended_value(suite.doc, doc, append)
self._variables.set_suite('${SUITE_DOCUMENTATION}', suite.doc, top)
self.log('Set suite documentation to:\n%s' % suite.doc)
def set_suite_metadata(self, name, value, append=False, top=False):
"""Sets metadata for the current test suite.
By default possible existing metadata values are overwritten, but
this can be changed using the optional ``append`` argument similarly
as with `Set Test Message` keyword.
This keyword sets the metadata of the current suite by default.
If the optional ``top`` argument is given a true value (see `Boolean
arguments`), the metadata of the top level suite is altered instead.
The metadata of the current suite is available as a built-in variable
``${SUITE METADATA}`` in a Python dictionary. Notice that modifying this
variable directly has no effect on the actual metadata the suite has.
New in Robot Framework 2.7.4. Support for ``append`` and ``top`` were
added in 2.7.7.
"""
top = is_truthy(top)
if not is_unicode(name):
name = unic(name)
metadata = self._get_namespace(top).suite.metadata
original = metadata.get(name, '')
metadata[name] = self._get_possibly_appended_value(original, value, append)
self._variables.set_suite('${SUITE_METADATA}', metadata.copy(), top)
self.log("Set suite metadata '%s' to value '%s'." % (name, metadata[name]))
def set_tags(self, *tags):
"""Adds given ``tags`` for the current test or all tests in a suite.
When this keyword is used inside a test case, that test gets
the specified tags and other tests are not affected.
If this keyword is used in a suite setup, all test cases in
that suite, recursively, gets the given tags. It is a failure
to use this keyword in a suite teardown.
The current tags are available as a built-in variable ``@{TEST TAGS}``.
See `Remove Tags` if you want to remove certain tags and `Fail` if
you want to fail the test case after setting and/or removing tags.
"""
ctx = self._context
if ctx.test:
ctx.test.tags.add(tags)
ctx.variables.set_test('@{TEST_TAGS}', list(ctx.test.tags))
elif not ctx.in_suite_teardown:
ctx.suite.set_tags(tags, persist=True)
else:
raise RuntimeError("'Set Tags' cannot be used in suite teardown.")
self.log('Set tag%s %s.' % (s(tags), seq2str(tags)))
def remove_tags(self, *tags):
"""Removes given ``tags`` from the current test or all tests in a suite.
Tags can be given exactly or using a pattern where ``*`` matches
anything and ``?`` matches one character.
This keyword can affect either one test case or all test cases in a
test suite similarly as `Set Tags` keyword.
The current tags are available as a built-in variable ``@{TEST TAGS}``.
Example:
| Remove Tags | mytag | something-* | ?ython |
See `Set Tags` if you want to add certain tags and `Fail` if you want
to fail the test case after setting and/or removing tags.
"""
ctx = self._context
if ctx.test:
ctx.test.tags.remove(tags)
ctx.variables.set_test('@{TEST_TAGS}', list(ctx.test.tags))
elif not ctx.in_suite_teardown:
ctx.suite.set_tags(remove=tags, persist=True)
else:
raise RuntimeError("'Remove Tags' cannot be used in suite teardown.")
self.log('Removed tag%s %s.' % (s(tags), seq2str(tags)))
def get_library_instance(self, name):
"""Returns the currently active instance of the specified test library.
This keyword makes it easy for test libraries to interact with
other test libraries that have state. This is illustrated by
the Python example below:
| from robotide.lib.robot.libraries.BuiltIn import BuiltIn
|
| def title_should_start_with(expected):
| seleniumlib = BuiltIn().get_library_instance('SeleniumLibrary')
| title = seleniumlib.get_title()
| if not title.startswith(expected):
| raise AssertionError("Title '%s' did not start with '%s'"
| % (title, expected))
It is also possible to use this keyword in the test data and
pass the returned library instance to another keyword. If a
library is imported with a custom name, the ``name`` used to get
the instance must be that name and not the original library name.
"""
try:
return self._namespace.get_library_instance(name)
except DataError as err:
raise RuntimeError(unicode(err))
class BuiltIn(_Verify, _Converter, _Variables, _RunKeyword, _Control, _Misc):
"""An always available standard library with often needed keywords.
``BuiltIn`` is Robot Framework's standard library that provides a set
of generic keywords needed often. It is imported automatically and
thus always available. The provided keywords can be used, for example,
for verifications (e.g. `Should Be Equal`, `Should Contain`),
conversions (e.g. `Convert To Integer`) and for various other purposes
(e.g. `Log`, `Sleep`, `Run Keyword If`, `Set Global Variable`).
== Table of contents ==
- `HTML error messages`
- `Evaluating expressions`
- `Boolean arguments`
- `Shortcuts`
- `Keywords`
= HTML error messages =
Many of the keywords accept an optional error message to use if the keyword
fails. Starting from Robot Framework 2.8, it is possible to use HTML in
these messages by prefixing them with ``*HTML*``. See `Fail` keyword for
a usage example. Notice that using HTML in messages is not limited to
BuiltIn library but works with any error message.
= Evaluating expressions =
Many keywords, such as `Evaluate`, `Run Keyword If` and `Should Be True`,
accept an expression that is evaluated in Python. These expressions are
evaluated using Python's
[https://docs.python.org/2/library/functions.html#eval|eval] function so
that all Python built-ins like ``len()`` and ``int()`` are available.
`Evaluate` allows configuring the execution namespace with custom modules,
and other keywords have [https://docs.python.org/2/library/os.html|os]
and [https://docs.python.org/2/library/sys.html|sys] modules available
automatically.
Examples:
| `Run Keyword If` | os.sep == '/' | Log | Not on Windows |
| ${random int} = | `Evaluate` | random.randint(0, 5) | modules=random |
When a variable is used in the expressing using the normal ``${variable}``
syntax, its value is replaces before the expression is evaluated. This
means that the value used in the expression will be the string
representation of the variable value, not the variable value itself.
This is not a problem with numbers and other objects that have a string
representation that can be evaluated directly, but with other objects
the behavior depends on the string representation. Most importantly,
strings must always be quoted, and if they can contain newlines, they must
be triple quoted.
Examples:
| `Should Be True` | ${rc} < 10 | Return code greater than 10 |
| `Run Keyword If` | '${status}' == 'PASS' | Log | Passed |
| `Run Keyword If` | 'FAIL' in '''${output}''' | Log | Output contains FAIL |
Starting from Robot Framework 2.9, variables themselves are automatically
available in the evaluation namespace. They can be accessed using special
variable syntax without the curly braces like ``$variable``. These
variables should never be quoted, and in fact they are not even replaced
inside strings.
Examples:
| `Should Be True` | $rc < 10 | Return code greater than 10 |
| `Run Keyword If` | $status == 'PASS' | `Log` | Passed |
| `Run Keyword If` | 'FAIL' in $output | `Log` | Output contains FAIL |
| `Should Be True` | len($result) > 1 and $result[1] == 'OK' |
Notice that instead of creating complicated expressions, it is often better
to move the logic into a test library.
= Boolean arguments =
Some keywords accept arguments that are handled as Boolean values true or
false. If such an argument is given as a string, it is considered false if
it is either empty or case-insensitively equal to ``false`` or ``no``.
Keywords verifying something that allow dropping actual and expected values
from the possible error message also consider string ``no values`` as false.
Other strings are considered true regardless their value, and other
argument types are tested using same
[http://docs.python.org/2/library/stdtypes.html#truth-value-testing|rules
as in Python].
True examples:
| `Should Be Equal` | ${x} | ${y} | Custom error | values=True | # Strings are generally true. |
| `Should Be Equal` | ${x} | ${y} | Custom error | values=yes | # Same as the above. |
| `Should Be Equal` | ${x} | ${y} | Custom error | values=${TRUE} | # Python ``True`` is true. |
| `Should Be Equal` | ${x} | ${y} | Custom error | values=${42} | # Numbers other than 0 are true. |
False examples:
| `Should Be Equal` | ${x} | ${y} | Custom error | values=False | # String ``false`` is false. |
| `Should Be Equal` | ${x} | ${y} | Custom error | values=no | # Also string ``no`` is false. |
| `Should Be Equal` | ${x} | ${y} | Custom error | values=${EMPTY} | # Empty string is false. |
| `Should Be Equal` | ${x} | ${y} | Custom error | values=${FALSE} | # Python ``False`` is false. |
| `Should Be Equal` | ${x} | ${y} | Custom error | values=no values | # ``no values`` works with ``values`` argument |
Note that prior to Robot Framework 2.9 some keywords considered all
non-empty strings, including ``false`` and ``no``, to be true.
"""
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
ROBOT_LIBRARY_VERSION = get_version()
class RobotNotRunningError(AttributeError):
"""Used when something cannot be done because Robot is not running.
Based on AttributeError to be backwards compatible with RF < 2.8.5.
May later be based directly on Exception, so new code should except
this exception explicitly.
"""
pass
def register_run_keyword(library, keyword, args_to_process=None):
"""Registers 'run keyword' so that its arguments can be handled correctly.
1) Why is this method needed
Keywords running other keywords internally (normally using `Run Keyword`
or some variants of it in BuiltIn) must have the arguments meant to the
internally executed keyword handled specially to prevent processing them
twice. This is done ONLY for keywords registered using this method.
If the register keyword has same name as any keyword from Robot Framework
standard libraries, it can be used without getting warnings. Normally
there is a warning in such cases unless the keyword is used in long
format (e.g. MyLib.Keyword).
Keywords executed by registered run keywords can be tested in dry-run mode
if they have 'name' argument which takes the name of the executed keyword.
2) How to use this method
`library` is the name of the library where the registered keyword is
implemented.
`keyword` can be either a function or method implementing the
keyword, or name of the implemented keyword as a string.
`args_to_process` is needed when `keyword` is given as a string, and it
defines how many of the arguments to the registered keyword must be
processed normally. When `keyword` is a method or function, this
information is got directly from it so that varargs (those specified with
syntax '*args') are not processed but others are.
3) Examples
from robotide.lib.robot.libraries.BuiltIn import BuiltIn, register_run_keyword
def my_run_keyword(name, *args):
# do something
return BuiltIn().run_keyword(name, *args)
# Either one of these works
register_run_keyword(__name__, my_run_keyword)
register_run_keyword(__name__, 'My Run Keyword', 1)
-------------
from robotide.lib.robot.libraries.BuiltIn import BuiltIn, register_run_keyword
class MyLibrary:
def my_run_keyword_if(self, expression, name, *args):
# do something
return BuiltIn().run_keyword_if(expression, name, *args)
# Either one of these works
register_run_keyword('MyLibrary', MyLibrary.my_run_keyword_if)
register_run_keyword('MyLibrary', 'my_run_keyword_if', 2)
"""
RUN_KW_REGISTER.register_run_keyword(library, keyword, args_to_process)
[register_run_keyword('BuiltIn', getattr(_RunKeyword, a))
for a in dir(_RunKeyword) if a[0] != '_']
| fingeronthebutton/RIDE | src/robotide/lib/robot/libraries/BuiltIn.py | Python | apache-2.0 | 140,502 |
from include import IncludeManager
from django.apps import apps
from django.db import models
from django.utils import timezone
from osf.models.base import BaseModel, ObjectIDMixin
from osf.utils.datetime_aware_jsonfield import DateTimeAwareJSONField
from osf.utils.fields import NonNaiveDateTimeField
from website.util import api_v2_url
class NodeLog(ObjectIDMixin, BaseModel):
FIELD_ALIASES = {
# TODO: Find a better way
'node': 'node__guids___id',
'user': 'user__guids___id',
'original_node': 'original_node__guids___id'
}
objects = IncludeManager()
DATE_FORMAT = '%m/%d/%Y %H:%M UTC'
# Log action constants -- NOTE: templates stored in log_templates.mako
CREATED_FROM = 'created_from'
PROJECT_CREATED = 'project_created'
PROJECT_REGISTERED = 'project_registered'
PROJECT_DELETED = 'project_deleted'
NODE_CREATED = 'node_created'
NODE_FORKED = 'node_forked'
NODE_REMOVED = 'node_removed'
POINTER_CREATED = NODE_LINK_CREATED = 'pointer_created'
POINTER_FORKED = NODE_LINK_FORKED = 'pointer_forked'
POINTER_REMOVED = NODE_LINK_REMOVED = 'pointer_removed'
WIKI_UPDATED = 'wiki_updated'
WIKI_DELETED = 'wiki_deleted'
WIKI_RENAMED = 'wiki_renamed'
MADE_WIKI_PUBLIC = 'made_wiki_public'
MADE_WIKI_PRIVATE = 'made_wiki_private'
CONTRIB_ADDED = 'contributor_added'
CONTRIB_REMOVED = 'contributor_removed'
CONTRIB_REORDERED = 'contributors_reordered'
CHECKED_IN = 'checked_in'
CHECKED_OUT = 'checked_out'
PERMISSIONS_UPDATED = 'permissions_updated'
MADE_PRIVATE = 'made_private'
MADE_PUBLIC = 'made_public'
TAG_ADDED = 'tag_added'
TAG_REMOVED = 'tag_removed'
FILE_TAG_ADDED = 'file_tag_added'
FILE_TAG_REMOVED = 'file_tag_removed'
EDITED_TITLE = 'edit_title'
EDITED_DESCRIPTION = 'edit_description'
CHANGED_LICENSE = 'license_changed'
UPDATED_FIELDS = 'updated_fields'
FILE_MOVED = 'addon_file_moved'
FILE_COPIED = 'addon_file_copied'
FILE_RENAMED = 'addon_file_renamed'
FOLDER_CREATED = 'folder_created'
FILE_ADDED = 'file_added'
FILE_UPDATED = 'file_updated'
FILE_REMOVED = 'file_removed'
FILE_RESTORED = 'file_restored'
ADDON_ADDED = 'addon_added'
ADDON_REMOVED = 'addon_removed'
COMMENT_ADDED = 'comment_added'
COMMENT_REMOVED = 'comment_removed'
COMMENT_UPDATED = 'comment_updated'
COMMENT_RESTORED = 'comment_restored'
CITATION_ADDED = 'citation_added'
CITATION_EDITED = 'citation_edited'
CITATION_REMOVED = 'citation_removed'
MADE_CONTRIBUTOR_VISIBLE = 'made_contributor_visible'
MADE_CONTRIBUTOR_INVISIBLE = 'made_contributor_invisible'
EXTERNAL_IDS_ADDED = 'external_ids_added'
EMBARGO_APPROVED = 'embargo_approved'
EMBARGO_CANCELLED = 'embargo_cancelled'
EMBARGO_COMPLETED = 'embargo_completed'
EMBARGO_INITIATED = 'embargo_initiated'
EMBARGO_TERMINATED = 'embargo_terminated'
RETRACTION_APPROVED = 'retraction_approved'
RETRACTION_CANCELLED = 'retraction_cancelled'
RETRACTION_INITIATED = 'retraction_initiated'
REGISTRATION_APPROVAL_CANCELLED = 'registration_cancelled'
REGISTRATION_APPROVAL_INITIATED = 'registration_initiated'
REGISTRATION_APPROVAL_APPROVED = 'registration_approved'
PREREG_REGISTRATION_INITIATED = 'prereg_registration_initiated'
AFFILIATED_INSTITUTION_ADDED = 'affiliated_institution_added'
AFFILIATED_INSTITUTION_REMOVED = 'affiliated_institution_removed'
PREPRINT_INITIATED = 'preprint_initiated'
PREPRINT_FILE_UPDATED = 'preprint_file_updated'
PREPRINT_LICENSE_UPDATED = 'preprint_license_updated'
SUBJECTS_UPDATED = 'subjects_updated'
VIEW_ONLY_LINK_ADDED = 'view_only_link_added'
VIEW_ONLY_LINK_REMOVED = 'view_only_link_removed'
actions = ([CHECKED_IN, CHECKED_OUT, FILE_TAG_REMOVED, FILE_TAG_ADDED, CREATED_FROM, PROJECT_CREATED,
PROJECT_REGISTERED, PROJECT_DELETED, NODE_CREATED, NODE_FORKED, NODE_REMOVED,
NODE_LINK_CREATED, NODE_LINK_FORKED, NODE_LINK_REMOVED, WIKI_UPDATED,
WIKI_DELETED, WIKI_RENAMED, MADE_WIKI_PUBLIC,
MADE_WIKI_PRIVATE, CONTRIB_ADDED, CONTRIB_REMOVED, CONTRIB_REORDERED,
PERMISSIONS_UPDATED, MADE_PRIVATE, MADE_PUBLIC, TAG_ADDED, TAG_REMOVED, EDITED_TITLE,
EDITED_DESCRIPTION, UPDATED_FIELDS, FILE_MOVED, FILE_COPIED,
FOLDER_CREATED, FILE_ADDED, FILE_UPDATED, FILE_REMOVED, FILE_RESTORED, ADDON_ADDED,
ADDON_REMOVED, COMMENT_ADDED, COMMENT_REMOVED, COMMENT_UPDATED, COMMENT_RESTORED,
MADE_CONTRIBUTOR_VISIBLE,
MADE_CONTRIBUTOR_INVISIBLE, EXTERNAL_IDS_ADDED, EMBARGO_APPROVED, EMBARGO_TERMINATED,
EMBARGO_CANCELLED, EMBARGO_COMPLETED, EMBARGO_INITIATED, RETRACTION_APPROVED,
RETRACTION_CANCELLED, RETRACTION_INITIATED, REGISTRATION_APPROVAL_CANCELLED,
REGISTRATION_APPROVAL_INITIATED, REGISTRATION_APPROVAL_APPROVED,
PREREG_REGISTRATION_INITIATED,
CITATION_ADDED, CITATION_EDITED, CITATION_REMOVED,
AFFILIATED_INSTITUTION_ADDED, AFFILIATED_INSTITUTION_REMOVED, PREPRINT_INITIATED,
PREPRINT_FILE_UPDATED, PREPRINT_LICENSE_UPDATED, VIEW_ONLY_LINK_ADDED, VIEW_ONLY_LINK_REMOVED] + list(sum([
config.actions for config in apps.get_app_configs() if config.name.startswith('addons.')
], tuple())))
action_choices = [(action, action.upper()) for action in actions]
date = NonNaiveDateTimeField(db_index=True, null=True, blank=True, default=timezone.now)
# TODO build action choices on the fly with the addon stuff
action = models.CharField(max_length=255, db_index=True) # , choices=action_choices)
params = DateTimeAwareJSONField(default=dict)
should_hide = models.BooleanField(default=False)
user = models.ForeignKey('OSFUser', related_name='logs', db_index=True,
null=True, blank=True, on_delete=models.CASCADE)
foreign_user = models.CharField(max_length=255, null=True, blank=True)
node = models.ForeignKey('AbstractNode', related_name='logs',
db_index=True, null=True, blank=True, on_delete=models.CASCADE)
original_node = models.ForeignKey('AbstractNode', db_index=True,
null=True, blank=True, on_delete=models.CASCADE)
def __unicode__(self):
return ('({self.action!r}, user={self.user!r},, node={self.node!r}, params={self.params!r}) '
'with id {self.id!r}').format(self=self)
class Meta:
ordering = ['-date']
get_latest_by = 'date'
@property
def absolute_api_v2_url(self):
path = '/logs/{}/'.format(self._id)
return api_v2_url(path)
def get_absolute_url(self):
return self.absolute_api_v2_url
@property
def absolute_url(self):
return self.absolute_api_v2_url
def _natural_key(self):
return self._id
| binoculars/osf.io | osf/models/nodelog.py | Python | apache-2.0 | 7,092 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Executes Transformer w/Keras benchmark and accuracy tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from absl import flags
import tensorflow as tf
from official.benchmark import benchmark_wrappers
from official.benchmark import owner_utils
from official.benchmark.perfzero_benchmark import PerfZeroBenchmark
from official.nlp.transformer import misc
from official.nlp.transformer import transformer_main as transformer_main
from official.utils.flags import core as flags_core
TRANSFORMER_EN2DE_DATA_DIR_NAME = 'wmt32k-en2de-official'
EN2DE_2014_BLEU_DATA_DIR_NAME = 'newstest2014'
FLAGS = flags.FLAGS
TMP_DIR = os.getenv('TMPDIR')
class TransformerBenchmark(PerfZeroBenchmark):
"""Methods common to executing transformer w/keras tests.
Code under test for the Transformer Keras models report the same data and
require the same FLAG setup.
"""
def __init__(self, output_dir=None, default_flags=None, root_data_dir=None,
flag_methods=None, tpu=None):
root_data_dir = root_data_dir if root_data_dir else ''
self.train_data_dir = os.path.join(root_data_dir,
TRANSFORMER_EN2DE_DATA_DIR_NAME)
self.vocab_file = os.path.join(root_data_dir,
TRANSFORMER_EN2DE_DATA_DIR_NAME,
'vocab.ende.32768')
self.bleu_source = os.path.join(root_data_dir,
EN2DE_2014_BLEU_DATA_DIR_NAME,
'newstest2014.en')
self.bleu_ref = os.path.join(root_data_dir,
EN2DE_2014_BLEU_DATA_DIR_NAME,
'newstest2014.de')
if default_flags is None:
default_flags = {}
default_flags['data_dir'] = self.train_data_dir
default_flags['vocab_file'] = self.vocab_file
super(TransformerBenchmark, self).__init__(
output_dir=output_dir,
default_flags=default_flags,
flag_methods=flag_methods,
tpu=tpu)
@benchmark_wrappers.enable_runtime_flags
def _run_and_report_benchmark(self,
bleu_max=None,
bleu_min=None,
log_steps=None,
total_batch_size=None,
warmup=1):
"""Report benchmark results by writing to local protobuf file.
Args:
bleu_max: highest passing level for bleu score.
bleu_min: lowest passing level for bleu score.
log_steps: How often the log was created for stats['step_timestamp_log'].
total_batch_size: Global batch-size.
warmup: number of entries in stats['step_timestamp_log'] to ignore.
"""
start_time_sec = time.time()
task = transformer_main.TransformerTask(FLAGS)
stats = task.train()
wall_time_sec = time.time() - start_time_sec
metrics = []
if 'bleu_uncased' in stats:
if 'bleu_uncased_history' in stats:
bleu_uncased_best = max(stats['bleu_uncased_history'],
key=lambda x: x[1])
metrics.append({'name': 'bleu_uncased',
'value': bleu_uncased_best[1],
'min_value': bleu_min,
'max_value': bleu_max})
metrics.append({'name': 'bleu_best_score_iteration',
'value': bleu_uncased_best[0]})
metrics.append({'name': 'bleu_uncased_last',
'value': stats['bleu_uncased']})
else:
metrics.append({'name': 'bleu_uncased',
'value': stats['bleu_uncased'],
'min_value': bleu_min,
'max_value': bleu_max})
if (warmup and 'step_timestamp_log' in stats and
len(stats['step_timestamp_log']) > warmup + 1):
# first entry in the time_log is start of step 1. The rest of the
# entries are the end of each step recorded
time_log = stats['step_timestamp_log']
elapsed = time_log[-1].timestamp - time_log[warmup].timestamp
num_examples = (
total_batch_size * log_steps * (len(time_log) - warmup - 1))
examples_per_sec = num_examples / elapsed
metrics.append({'name': 'exp_per_second',
'value': examples_per_sec})
if 'avg_exp_per_second' in stats:
metrics.append({'name': 'avg_exp_per_second',
'value': stats['avg_exp_per_second']})
if 'step_timestamp_log' in stats:
time_log = stats['step_timestamp_log']
metrics.append({'name': 'startup_time',
'value': time_log[0].timestamp - start_time_sec})
flags_str = flags_core.get_nondefault_flags_as_str()
self.report_benchmark(iters=-1, wall_time=wall_time_sec, metrics=metrics,
extras={'flags': flags_str})
class TransformerBaseKerasAccuracy(TransformerBenchmark):
"""Benchmark accuracy tests for Transformer Base model w/ Keras."""
def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
"""Benchmark accuracy tests for Transformer Base model w/ Keras.
Args:
output_dir: directory where to output e.g. log files
root_data_dir: directory under which to look for dataset
**kwargs: arbitrary named arguments. This is needed to make the
constructor forward compatible in case PerfZero provides more
named arguments before updating the constructor.
"""
flag_methods = [misc.define_transformer_flags]
super(TransformerBaseKerasAccuracy, self).__init__(
output_dir=output_dir, root_data_dir=root_data_dir,
flag_methods=flag_methods)
def benchmark_1_gpu(self):
"""Benchmark 1 gpu.
The paper uses 8 GPUs and a much larger effective batch size, this is will
not converge to the 27.3 BLEU (uncased) SOTA.
"""
self._setup()
FLAGS.num_gpus = 1
FLAGS.data_dir = self.train_data_dir
FLAGS.vocab_file = self.vocab_file
# Sets values directly to avoid validation check.
FLAGS['bleu_source'].value = self.bleu_source
FLAGS['bleu_ref'].value = self.bleu_ref
FLAGS.param_set = 'base'
FLAGS.batch_size = 2048
FLAGS.train_steps = 1000
FLAGS.steps_between_evals = 500
FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu')
# These bleu scores are based on test runs after at this limited
# number of steps and batch size after verifying SOTA at 8xV100s.
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps,
bleu_min=25.3,
bleu_max=26)
def benchmark_1_gpu_static_batch(self):
"""Benchmark 1 gpu with static_batch.
The paper uses 8 GPUs and a much larger effective batch size, this is will
not converge to the 27.3 BLEU (uncased) SOTA.
"""
self._setup()
FLAGS.num_gpus = 1
FLAGS.data_dir = self.train_data_dir
FLAGS.vocab_file = self.vocab_file
# Sets values directly to avoid validation check.
FLAGS['bleu_source'].value = self.bleu_source
FLAGS['bleu_ref'].value = self.bleu_ref
FLAGS.param_set = 'base'
FLAGS.batch_size = 4096
FLAGS.train_steps = 100000
FLAGS.steps_between_evals = 5000
FLAGS.static_batch = True
FLAGS.max_length = 64
FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_static_batch')
# These bleu scores are based on test runs after at this limited
# number of steps and batch size after verifying SOTA at 8xV100s.
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps,
bleu_min=25.3,
bleu_max=26)
def benchmark_8_gpu(self):
"""Benchmark 8 gpu.
Should converge to 27.3 BLEU (uncased). This has not been confirmed yet.
"""
self._setup()
FLAGS.num_gpus = 8
FLAGS.data_dir = self.train_data_dir
FLAGS.vocab_file = self.vocab_file
# Sets values directly to avoid validation check.
FLAGS['bleu_source'].value = self.bleu_source
FLAGS['bleu_ref'].value = self.bleu_ref
FLAGS.param_set = 'base'
FLAGS.batch_size = 4096*8
FLAGS.train_steps = 100000
FLAGS.steps_between_evals = 20000
FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu')
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps,
bleu_min=27,
bleu_max=28)
def benchmark_8_gpu_static_batch(self):
"""Benchmark 8 gpu.
Should converge to 27.3 BLEU (uncased). This has not been confirmed yet.
"""
self._setup()
FLAGS.num_gpus = 8
FLAGS.data_dir = self.train_data_dir
FLAGS.vocab_file = self.vocab_file
# Sets values directly to avoid validation check.
FLAGS['bleu_source'].value = self.bleu_source
FLAGS['bleu_ref'].value = self.bleu_ref
FLAGS.param_set = 'base'
FLAGS.batch_size = 4096*8
FLAGS.train_steps = 100000
FLAGS.static_batch = True
FLAGS.max_length = 64
FLAGS.steps_between_evals = 5000
FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_static_batch')
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps,
bleu_min=27,
bleu_max=28)
class TransformerBigKerasAccuracy(TransformerBenchmark):
"""Benchmark accuracy tests for Transformer Big model w/ Keras."""
def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
"""Benchmark accuracy tests for Transformer Big model w/ Keras.
Args:
output_dir: directory where to output e.g. log files
root_data_dir: directory under which to look for dataset
**kwargs: arbitrary named arguments. This is needed to make the
constructor forward compatible in case PerfZero provides more
named arguments before updating the constructor.
"""
flag_methods = [misc.define_transformer_flags]
super(TransformerBigKerasAccuracy, self).__init__(
output_dir=output_dir, root_data_dir=root_data_dir,
flag_methods=flag_methods)
def benchmark_8_gpu(self):
"""Benchmark 8 gpu.
Over 6 runs with eval every 20K steps the average highest value was 28.195
(bleu uncased). 28.424 was the highest and 27.96 the lowest. The values are
the highest value seen during a run and occurred at a median of iteration 9.
Iterations are not epochs, an iteration is a number of steps between evals.
"""
self._setup()
FLAGS.num_gpus = 8
FLAGS.data_dir = self.train_data_dir
FLAGS.vocab_file = self.vocab_file
# Sets values directly to avoid validation check.
FLAGS['bleu_source'].value = self.bleu_source
FLAGS['bleu_ref'].value = self.bleu_ref
FLAGS.param_set = 'big'
FLAGS.batch_size = 3072*8
FLAGS.train_steps = 20000 * 12
FLAGS.steps_between_evals = 20000
FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu')
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps,
bleu_min=27.9,
bleu_max=29.2)
def benchmark_8_gpu_static_batch(self):
"""Benchmark 8 gpu.
Should converge to 28.4 BLEU (uncased). This has not be verified yet."
"""
self._setup()
FLAGS.num_gpus = 8
FLAGS.data_dir = self.train_data_dir
FLAGS.vocab_file = self.vocab_file
# Sets values directly to avoid validation check.
FLAGS['bleu_source'].value = self.bleu_source
FLAGS['bleu_ref'].value = self.bleu_ref
FLAGS.param_set = 'big'
FLAGS.batch_size = 3072*8
FLAGS.static_batch = True
FLAGS.max_length = 64
FLAGS.train_steps = 20000 * 12
FLAGS.steps_between_evals = 20000
FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_static_batch')
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps,
bleu_min=28,
bleu_max=29.2)
def benchmark_8_gpu_fp16(self):
"""Benchmark 8 gpu with dynamic batch and fp16.
Over 6 runs with eval every 20K steps the average highest value was 28.247
(bleu uncased). 28.424 was the highest and 28.09 the lowest. The values are
the highest value seen during a run and occurred at a median of iteration
11. While this could be interpreted as worse than FP32, if looking at the
first iteration at which 28 is passed FP16 performs equal and possibly
better. Although not part of the initial test runs, the highest value
recorded with the arguments below was 28.9 at iteration 12. Iterations are
not epochs, an iteration is a number of steps between evals.
"""
self._setup()
FLAGS.num_gpus = 8
FLAGS.dtype = 'fp16'
FLAGS.data_dir = self.train_data_dir
FLAGS.vocab_file = self.vocab_file
# Sets values directly to avoid validation check.
FLAGS['bleu_source'].value = self.bleu_source
FLAGS['bleu_ref'].value = self.bleu_ref
FLAGS.param_set = 'big'
FLAGS.batch_size = 3072*8
FLAGS.train_steps = 20000 * 12
FLAGS.steps_between_evals = 20000
FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_fp16')
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps,
bleu_min=28,
bleu_max=29.2)
def benchmark_8_gpu_fp16_amp(self):
"""Benchmark 8 gpu with dynamic batch and fp16 with automatic mixed precision.
Should converge to 28.4 BLEU (uncased). This has not be verified yet."
"""
self._setup()
FLAGS.num_gpus = 8
FLAGS.dtype = 'fp16'
FLAGS.fp16_implementation = 'graph_rewrite'
FLAGS.data_dir = self.train_data_dir
FLAGS.vocab_file = self.vocab_file
# Sets values directly to avoid validation check.
FLAGS['bleu_source'].value = self.bleu_source
FLAGS['bleu_ref'].value = self.bleu_ref
FLAGS.param_set = 'big'
FLAGS.batch_size = 3072*8
FLAGS.train_steps = 20000 * 12
FLAGS.steps_between_evals = 20000
FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_fp16_amp')
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps,
bleu_min=28,
bleu_max=29)
def benchmark_8_gpu_static_batch_fp16(self):
"""Benchmark 8 gpu with static batch and fp16.
Should converge to 28.4 BLEU (uncased). This has not be verified yet."
"""
self._setup()
FLAGS.num_gpus = 8
FLAGS.dtype = 'fp16'
FLAGS.data_dir = self.train_data_dir
FLAGS.vocab_file = self.vocab_file
# Sets values directly to avoid validation check.
FLAGS['bleu_source'].value = self.bleu_source
FLAGS['bleu_ref'].value = self.bleu_ref
FLAGS.param_set = 'big'
FLAGS.batch_size = 3072*8
FLAGS.static_batch = True
FLAGS.max_length = 64
FLAGS.train_steps = 400000
FLAGS.steps_between_evals = 20000
FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_static_batch_fp16')
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps,
bleu_min=28,
bleu_max=29.2)
def benchmark_xla_8_gpu_static_batch_fp16(self):
"""Benchmark 8 gpu with static batch, XLA, and FP16.
Should converge to 28.4 BLEU (uncased). This has not be verified yet."
"""
self._setup()
FLAGS.num_gpus = 8
FLAGS.dtype = 'fp16'
FLAGS.enable_xla = True
FLAGS.data_dir = self.train_data_dir
FLAGS.vocab_file = self.vocab_file
# Sets values directly to avoid validation check.
FLAGS['bleu_source'].value = self.bleu_source
FLAGS['bleu_ref'].value = self.bleu_ref
FLAGS.param_set = 'big'
FLAGS.batch_size = 3072*8
FLAGS.static_batch = True
FLAGS.max_length = 64
FLAGS.train_steps = 400000
FLAGS.steps_between_evals = 20000
FLAGS.model_dir = self._get_model_dir(
'benchmark_xla_8_gpu_static_batch_fp16')
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps,
bleu_min=28,
bleu_max=29.2)
class TransformerKerasBenchmark(TransformerBenchmark):
"""Benchmarks for Transformer (Base and Big) using Keras."""
def __init__(self, output_dir=None, default_flags=None,
root_data_dir=None, batch_per_gpu=4096, tpu=None):
"""Initialize.
Args:
output_dir: Based directory for saving artifacts, e.g. checkpoints.
default_flags: default flags to use for all tests.
root_data_dir: root directory for data, e.g. training.
batch_per_gpu: batch size to use per gpu.
tpu: Target TPU to use.
"""
flag_methods = [misc.define_transformer_flags]
self.batch_per_gpu = batch_per_gpu
super(TransformerKerasBenchmark, self).__init__(
output_dir=output_dir,
default_flags=default_flags,
root_data_dir=root_data_dir,
flag_methods=flag_methods,
tpu=tpu)
def benchmark_1_gpu_no_dist_strat(self):
"""Benchmark 1 gpu without distribution strategy."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.distribution_strategy = 'off'
FLAGS.batch_size = self.batch_per_gpu
FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_no_dist_strat')
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
def benchmark_1_gpu_no_dist_strat_static_batch(self):
"""Benchmark 1 gpu without distribution strategy with static batch."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.distribution_strategy = 'off'
FLAGS.batch_size = self.batch_per_gpu
FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_no_ds_sb')
FLAGS.static_batch = True
FLAGS.max_length = 64
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
def benchmark_1_gpu(self):
"""Benchmark 1 gpu."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.batch_size = self.batch_per_gpu
FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu')
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
def benchmark_1_gpu_fp16(self):
"""Benchmark 1 gpu FP16."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.batch_size = self.batch_per_gpu
FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_fp16')
FLAGS.dtype = 'fp16'
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
def benchmark_xla_1_gpu(self):
"""Benchmark 1 gpu w/xla."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.batch_size = self.batch_per_gpu
FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu')
FLAGS.enable_xla = True
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
def benchmark_xla_1_gpu_fp16(self):
"""Benchmark 1 gpu w/xla and FP16."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.batch_size = self.batch_per_gpu
FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16')
FLAGS.enable_xla = True
FLAGS.dtype = 'fp16'
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
def benchmark_1_gpu_static_batch(self):
"""Benchmark 1 gpu with static batch."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.batch_size = self.batch_per_gpu
FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_static_batch')
FLAGS.static_batch = True
FLAGS.max_length = 64
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
def benchmark_xla_1_gpu_static_batch(self):
"""Benchmark 1 gpu with static batch w/xla."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.batch_size = self.batch_per_gpu
FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_static_batch')
FLAGS.static_batch = True
FLAGS.max_length = 64
FLAGS.enable_xla = True
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
def benchmark_1_gpu_static_batch_fp16(self):
"""Benchmark 1 gpu with static batch FP16."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.batch_size = self.batch_per_gpu
FLAGS.model_dir = self._get_model_dir(
'benchmark_1_gpu_static_batch_fp16')
FLAGS.static_batch = True
FLAGS.max_length = 64
FLAGS.dtype = 'fp16'
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
def benchmark_xla_1_gpu_static_batch_fp16(self):
"""Benchmark 1 gpu with static batch w/xla and FP16."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.batch_size = self.batch_per_gpu
FLAGS.model_dir = self._get_model_dir(
'benchmark_xla_1_gpu_static_batch_fp16')
FLAGS.static_batch = True
FLAGS.max_length = 64
FLAGS.enable_xla = True
FLAGS.dtype = 'fp16'
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
def benchmark_8_gpu(self):
"""Benchmark 8 gpu."""
self._setup()
FLAGS.num_gpus = 8
FLAGS.batch_size = self.batch_per_gpu * 8
FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu')
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
def benchmark_8_gpu_fp16(self):
"""Benchmark 8 gpu FP16."""
self._setup()
FLAGS.num_gpus = 8
FLAGS.dtype = 'fp16'
FLAGS.batch_size = self.batch_per_gpu * 8
FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_fp16')
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
def benchmark_xla_8_gpu(self):
"""Benchmark 8 gpu w/xla."""
self._setup()
FLAGS.num_gpus = 8
FLAGS.enable_xla = True
FLAGS.batch_size = self.batch_per_gpu * 8
FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu')
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
def benchmark_xla_8_gpu_fp16(self):
"""Benchmark 8 gpu w/xla and FP16."""
self._setup()
FLAGS.num_gpus = 8
FLAGS.enable_xla = True
FLAGS.dtype = 'fp16'
FLAGS.batch_size = self.batch_per_gpu * 8
FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16')
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
def benchmark_8_gpu_static_batch(self):
"""Benchmark 8 gpu with static batch."""
self._setup()
FLAGS.num_gpus = 8
FLAGS.batch_size = self.batch_per_gpu * 8
FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_static_batch')
FLAGS.static_batch = True
FLAGS.max_length = 64
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
def benchmark_8_gpu_static_batch_fp16(self):
"""Benchmark 8 gpu with static batch FP16."""
self._setup()
FLAGS.num_gpus = 8
FLAGS.dtype = 'fp16'
FLAGS.batch_size = self.batch_per_gpu * 8
FLAGS.model_dir = self._get_model_dir(
'benchmark_8_gpu_static_batch_fp16')
FLAGS.static_batch = True
FLAGS.max_length = 64
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
def benchmark_xla_8_gpu_static_batch(self):
"""Benchmark 8 gpu with static batch w/xla."""
self._setup()
FLAGS.num_gpus = 8
FLAGS.enable_xla = True
FLAGS.batch_size = self.batch_per_gpu * 8
FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_static_batch')
FLAGS.static_batch = True
FLAGS.max_length = 64
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
def benchmark_xla_8_gpu_static_batch_fp16(self):
"""Benchmark 8 gpu with static batch w/xla and FP16."""
self._setup()
FLAGS.num_gpus = 8
FLAGS.enable_xla = True
FLAGS.dtype = 'fp16'
FLAGS.batch_size = self.batch_per_gpu * 8
FLAGS.model_dir = self._get_model_dir(
'benchmark_xla_8_gpu_static_batch_fp16')
FLAGS.static_batch = True
FLAGS.max_length = 64
self._run_and_report_benchmark(total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
class TransformerBaseKerasBenchmarkReal(TransformerKerasBenchmark):
"""Transformer based version real data benchmark tests."""
def __init__(self, output_dir=TMP_DIR, root_data_dir=TMP_DIR, **kwargs):
def_flags = {}
def_flags['param_set'] = 'base'
def_flags['train_steps'] = 50
def_flags['log_steps'] = 10
super(TransformerBaseKerasBenchmarkReal, self).__init__(
output_dir=output_dir, default_flags=def_flags,
root_data_dir=root_data_dir, batch_per_gpu=4096)
class TransformerBigKerasBenchmarkReal(TransformerKerasBenchmark):
"""Transformer based version real data benchmark tests."""
def __init__(self, output_dir=TMP_DIR, root_data_dir=TMP_DIR,
tpu=None, **kwargs):
def_flags = {}
def_flags['param_set'] = 'big'
def_flags['train_steps'] = 50
def_flags['log_steps'] = 10
super(TransformerBigKerasBenchmarkReal, self).__init__(
output_dir=output_dir, default_flags=def_flags,
root_data_dir=root_data_dir, batch_per_gpu=3072,
tpu=tpu)
def benchmark_2x2_tpu(self):
"""Port of former snaggletooth transformer_big model on 2x2."""
self._setup()
FLAGS.model_dir = self._get_model_dir('benchmark_2x2_tpu')
FLAGS.train_steps = 300
FLAGS.log_steps = 150
FLAGS.steps_between_evals = 150
FLAGS.distribution_strategy = 'tpu'
FLAGS.static_batch = True
FLAGS.use_ctl = True
FLAGS.batch_size = 6144
FLAGS.max_length = 64
FLAGS.decode_batch_size = 32
FLAGS.decode_max_length = 97
FLAGS.padded_decode = True
FLAGS.enable_checkpointing = False
self._run_and_report_benchmark(
total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
def benchmark_4x4_tpu(self):
"""Port of former GCP transformer_big model on 4x4."""
self._setup()
FLAGS.model_dir = self._get_model_dir('benchmark_4x4_tpu')
FLAGS.train_steps = 300
FLAGS.log_steps = 150
FLAGS.steps_between_evals = 150
FLAGS.distribution_strategy = 'tpu'
FLAGS.static_batch = True
FLAGS.use_ctl = True
FLAGS.batch_size = 24576
FLAGS.max_length = 64
FLAGS.decode_batch_size = 32
FLAGS.decode_max_length = 97
FLAGS.padded_decode = True
FLAGS.enable_checkpointing = False
self._run_and_report_benchmark(
total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
@owner_utils.Owner('tf-graph-compiler')
def benchmark_4x4_tpu_mlir(self):
"""Run transformer_big model on 4x4 with the MLIR Bridge enabled."""
self._setup()
FLAGS.model_dir = self._get_model_dir('benchmark_4x4_tpu')
FLAGS.train_steps = 300
FLAGS.log_steps = 150
FLAGS.steps_between_evals = 150
FLAGS.distribution_strategy = 'tpu'
FLAGS.static_batch = True
FLAGS.use_ctl = True
FLAGS.batch_size = 24576
FLAGS.max_length = 64
FLAGS.decode_batch_size = 32
FLAGS.decode_max_length = 97
FLAGS.padded_decode = True
FLAGS.enable_checkpointing = False
tf.config.experimental.enable_mlir_bridge()
self._run_and_report_benchmark(
total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
if __name__ == '__main__':
tf.test.main()
| tombstone/models | official/benchmark/transformer_benchmark.py | Python | apache-2.0 | 29,410 |
##
# Copyright (c) 2009-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from twistedcaldav.ical import Component as iComponent
from twistedcaldav.vcard import Component as vComponent
__all__ = [
"CalendarFilter",
"AddressFilter",
]
class CalendarFilter(object):
"""
Abstract class that defines an iCalendar filter/merge object
"""
def __init__(self):
pass
def filter(self, ical):
"""
Filter the supplied iCalendar object using the request information.
@param ical: iCalendar object
@type ical: L{Component}
@return: L{Component} for the filtered calendar data
"""
raise NotImplementedError
def merge(self, icalnew, icalold):
"""
Merge the old iCalendar object into the new iCalendar data using the request information.
@param icalnew: new iCalendar object to merge data into
@type icalnew: L{Component}
@param icalold: old iCalendar data to merge data from
@type icalold: L{Component}
"""
raise NotImplementedError
def validCalendar(self, ical):
# If we were passed a string, parse it out as a Component
if isinstance(ical, str):
try:
ical = iComponent.fromString(ical)
except ValueError:
raise ValueError("Not a calendar: %r" % (ical,))
if ical is None or ical.name() != "VCALENDAR":
raise ValueError("Not a calendar: %r" % (ical,))
return ical
class AddressFilter(object):
"""
Abstract class that defines a vCard filter/merge object
"""
def __init__(self):
pass
def filter(self, vcard):
"""
Filter the supplied vCard object using the request information.
@param vcard: iCalendar object
@type vcard: L{Component}
@return: L{Component} for the filtered vcard data
"""
raise NotImplementedError
def merge(self, vcardnew, vcardold):
"""
Merge the old vcard object into the new vcard data using the request information.
@param vcardnew: new vcard object to merge data into
@type vcardnew: L{Component}
@param vcardold: old vcard data to merge data from
@type vcardold: L{Component}
"""
raise NotImplementedError
def validAddress(self, vcard):
# If we were passed a string, parse it out as a Component
if isinstance(vcard, str):
try:
vcard = vComponent.fromString(vcard)
except ValueError:
raise ValueError("Not a vcard: %r" % (vcard,))
if vcard is None or vcard.name() != "VCARD":
raise ValueError("Not a vcard: %r" % (vcard,))
return vcard
| macosforge/ccs-calendarserver | twistedcaldav/datafilters/filter.py | Python | apache-2.0 | 3,318 |
#!/usr/bin/env python3
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Executable sample for creating a Azure AD Context Feed.
Creating other feeds requires changing this sample code.
"""
import argparse
import json
from typing import Any, Mapping
from google.auth.transport import requests
from common import chronicle_auth
from common import regions
CHRONICLE_API_BASE_URL = "https://backstory.googleapis.com"
def create_azure_ad_context_feed(http_session: requests.AuthorizedSession,
tokenendpoint: str, clientid: str,
clientsecret: str, retrievedevices: bool,
retrievegroups: bool) -> Mapping[str, Any]:
"""Creates a new Azure AD Context feed.
Args:
http_session: Authorized session for HTTP requests.
tokenendpoint: A string which represents endpoint to connect to.
clientid: A string which represents Id of the credential to use.
clientsecret: A string which represents secret of the credential to use.
retrievedevices: A boolean to indicate whether to retrieve devices or not.
retrievegroups: A boolean to indicate whether to retrieve groups or not.
Returns:
New Azure AD Feed.
Raises:
requests.exceptions.HTTPError: HTTP request resulted in an error
(response.status_code >= 400).
"""
url = f"{CHRONICLE_API_BASE_URL}/v1/feeds/"
body = {
"details": {
"feedSourceType": "API",
"logType": "AZURE_AD_CONTEXT",
"azureAdContextSettings": {
"authentication": {
"tokenEndpoint": tokenendpoint,
"clientId": clientid,
"clientSecret": clientsecret
},
"retrieveDevices": retrievedevices,
"retrieveGroups": retrievegroups
}
}
}
response = http_session.request("POST", url, json=body)
# Expected server response:
# {
# "name": "feeds/e0eb5fb0-8fbd-4f0f-b063-710943ad7812",
# "details": {
# "logType": "AZURE_AD_CONTEXT",
# "feedSourceType": "API",
# "azureAdContextSettings": {
# "authentication": {
# "tokenEndpoint": "tokenendpoint.example.com",
# "clientId": "clientid_example",
# "clientSecret": "clientsecret_example"
# },
# "retrieveDevices": true
# }
# },
# "feedState": "PENDING_ENABLEMENT"
# }
if response.status_code >= 400:
print(response.text)
response.raise_for_status()
return response.json()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
chronicle_auth.add_argument_credentials_file(parser)
regions.add_argument_region(parser)
parser.add_argument(
"-te",
"--tokenendpoint",
type=str,
required=True,
help="token endpoint")
parser.add_argument(
"-ci",
"--clientid",
type=str,
required=True,
help="client id")
parser.add_argument(
"-cs",
"--clientsecret",
type=str,
required=True,
help="client secret")
parser.add_argument(
"-rd",
"--retrievedevices",
type=bool,
required=True,
help="retrieve devices")
parser.add_argument(
"-rg",
"--retrievegroups",
type=str,
required=True,
help="retrieve groups")
args = parser.parse_args()
CHRONICLE_API_BASE_URL = regions.url(CHRONICLE_API_BASE_URL, args.region)
session = chronicle_auth.initialize_http_session(args.credentials_file)
new_feed = create_azure_ad_context_feed(session, args.tokenendpoint,
args.clientid, args.clientsecret,
args.retrievedevices,
args.retrievegroups)
print(json.dumps(new_feed, indent=2))
| chronicle/api-samples-python | feeds/create_azure_ad_context_feed.py | Python | apache-2.0 | 4,356 |
# Copyright (c) 2015, Matt Layman
from ConfigParser import ConfigParser, NoOptionError, NoSectionError
import os
import sys
import requests
API_URL = 'https://www.transifex.com/api/2'
LANGUAGES = [
'es',
'fr',
'it',
'nl',
]
def fetch_po_for(language, username, password):
print 'Downloading po file for {0} ...'.format(language)
po_api = '/project/tappy/resource/tappypot/translation/{0}/'.format(
language)
po_url = API_URL + po_api
params = {'file': '1'}
r = requests.get(po_url, auth=(username, password), params=params)
if r.status_code == 200:
r.encoding = 'utf-8'
output_file = os.path.join(
here, 'tap', 'locale', language, 'LC_MESSAGES', 'tappy.po')
with open(output_file, 'wb') as out:
out.write(r.text.encode('utf-8'))
else:
print('Something went wrong fetching the {0} po file.'.format(
language))
def get_auth_from_conf(here):
transifex_conf = os.path.join(here, '.transifex.ini')
config = ConfigParser()
try:
with open(transifex_conf, 'r') as conf:
config.readfp(conf)
except IOError as ex:
sys.exit('Failed to load authentication configuration file.\n'
'{0}'.format(ex))
try:
username = config.get('auth', 'username')
password = config.get('auth', 'password')
except (NoOptionError, NoSectionError) as ex:
sys.exit('Oops. Incomplete configuration file: {0}'.format(ex))
return username, password
if __name__ == '__main__':
here = os.path.abspath(os.path.dirname(__file__))
username, password = get_auth_from_conf(here)
for language in LANGUAGES:
fetch_po_for(language, username, password)
| blakev/tappy | transifex.py | Python | bsd-2-clause | 1,750 |
#!/usr/bin/env python
'''
@author Luke C
@date Mon Mar 25 09:57:59 EDT 2013
@file ion/util/stored_values.py
'''
from pyon.core.exception import NotFound
import gevent
class StoredValueManager(object):
def __init__(self, container):
self.store = container.object_store
def stored_value_cas(self, doc_key, document_updates):
'''
Performs a check and set for a lookup_table in the object store for the given key
'''
try:
doc = self.store.read_doc(doc_key)
except NotFound:
doc_id, rev = self.store.create_doc(document_updates, object_id=doc_key)
return doc_id, rev
except KeyError as e:
if 'http' in e.message:
doc_id, rev = self.store.create_doc(document_updates, object_id=doc_key)
return doc_id, rev
for k,v in document_updates.iteritems():
doc[k] = v
doc_id, rev = self.store.update_doc(doc)
return doc_id, rev
def read_value(self, doc_key):
doc = self.store.read_doc(doc_key)
return doc
def read_value_mult(self, doc_keys, strict=False):
doc_list = self.store.read_doc_mult(doc_keys, strict=strict)
return doc_list
def delete_stored_value(self, doc_key):
self.store.delete_doc(doc_key)
| ooici/coi-services | ion/util/stored_values.py | Python | bsd-2-clause | 1,331 |
import os
import subprocess
import sys
import threading
try:
os.mkfifo("/tmp/shutdown",0666)
except OSError:
pass
try:
os.mkfifo("/tmp/abort",0666)
except OSError:
pass
subprocess.check_call(["go","build", "-ldflags", "-X main.FALLBACK_SHUTDOWN_PIPE /tmp/shutdown -X main.FALLBACK_ABORT_PIPE /tmp/abort -X main.RUNNER0 strace -X main.RUNNER1 strace+ -X main.RUNNER_ADDITIONAL_FLAG -f -X main.RUNER_CONFIG_PREFIX trace= -X main.RUNNER_CONFIG_FLAG -e -X main.RUNNER_PATH /usr/bin/"])
try:
os.mkfifo("/tmp/stdin",0666)
except OSError:
pass
try:
os.mkfifo("/tmp/stdout",0666)
except OSError:
pass
try:
os.mkfifo("/tmp/stderr",0666)
except OSError:
pass
os.chmod("/tmp/stdin",0660)
os.chmod("/tmp/stdout",0660)
os.chmod("/tmp/stderr",0660)
def echo_stderr():
while True:
with open('/tmp/stderr','r') as stderr:
sys.stderr.write(stderr.readline())
thread = threading.Thread(target=echo_stderr)
thread.setDaemon(True)
thread.start()
repeatexec = subprocess.Popen(["sudo","./repeatexec"],stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=sys.stderr)
with open("example_commands.json") as example:
for line in example.readlines():
if len(line.strip()):
print "EXECUTING ",line
repeatexec.stdin.write(line)
repeatexec.stdin.flush()
with open('/tmp/stdin','w') as stdin:
pass
with open('/tmp/stdout','r') as stdout:
print stdout.read()
exitcode = repeatexec.stdout.read(1)
print "RESPONSE ", ord(exitcode) if len(exitcode) else 'END OF TEST: SUCCESS'
| danielrh/repeatexec | build.py | Python | bsd-2-clause | 1,647 |
""" config file for eth tracker feel your keys and secrets here """
""" twilio api """
account_sid = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
auth_token = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
my_num = "+xxxxxxxxxxx",
twilio_num = "+xxxxxxxx",
""" CEX.IO API is limited to 600 requests per 10 minutes """
username = "xxxxxxxxx"
api_key = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
api_secret = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
""" finance """
investment = 50 # in euro
fees = 2.96
ticker = 'ETH/EUR'
price_alert = 400
percent_alert = 10 # 10%
alert = True
""" update interval for api calls """
update_int = 300 # every 5 minutes
| binary-signal/Ethereum-Tracker | config.py | Python | bsd-2-clause | 614 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "gettingstarted.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv) | jessefeinman/FintechHackathon | python-getting-started/manage.py | Python | bsd-2-clause | 256 |
import settings
globals().update(i for i in settings.__dict__.items() if i[0].isupper())
# Require the mezzanine.accounts app. We use settings.INSTALLED_APPS here so
# the syntax test doesn't complain about an undefined name.
if "mezzanine.accounts" not in settings.INSTALLED_APPS:
INSTALLED_APPS = list(settings.INSTALLED_APPS) + ["mezzanine.accounts"]
# Use the MD5 password hasher by default for quicker test runs.
PASSWORD_HASHERS = ('django.contrib.auth.hashers.MD5PasswordHasher',)
DEBUG = True
# Make these unique, and don't share it with anybody.
SECRET_KEY = "%(SECRET_KEY)s"
NEVERCACHE_KEY = "%(NEVERCACHE_KEY)s"
DATABASES = {
"default": {
# Ends with "postgresql_psycopg2", "mysql", "sqlite3" or "oracle".
"ENGINE": "django.db.backends.sqlite3",
# DB name or path to database file if using sqlite3.
"NAME": "dev.db",
# Not used with sqlite3.
"USER": "",
# Not used with sqlite3.
"PASSWORD": "",
# Set to empty string for localhost. Not used with sqlite3.
"HOST": "",
# Set to empty string for default. Not used with sqlite3.
"PORT": "",
}
}
###################
# DEPLOY SETTINGS #
###################
# Domains for public site
# ALLOWED_HOSTS = [""]
# These settings are used by the default fabfile.py provided.
# Check fabfile.py for defaults.
# FABRIC = {
# "DEPLOY_TOOL": "rsync", # Deploy with "git", "hg", or "rsync"
# "SSH_USER": "", # VPS SSH username
# "HOSTS": [""], # The IP address of your VPS
# "DOMAINS": ALLOWED_HOSTS, # Edit domains in ALLOWED_HOSTS
# "REQUIREMENTS_PATH": "requirements.txt", # Project's pip requirements
# "LOCALE": "en_US.UTF-8", # Should end with ".UTF-8"
# "DB_PASS": "", # Live database password
# "ADMIN_PASS": "", # Live admin user password
# "SECRET_KEY": SECRET_KEY,
# "NEVERCACHE_KEY": NEVERCACHE_KEY,
# }
| damnfine/mezzanine | mezzanine/project_template/test_settings.py | Python | bsd-2-clause | 1,926 |
from os.path import abspath
import sys
import logging
editorconfig_path = abspath('editorconfig-core-py/')
if editorconfig_path not in sys.path:
sys.path.append(editorconfig_path)
from editorconfig import get_properties, EditorConfigError
class EditorConfigPluginMixin(object):
def activate_plugin(self, window):
handler_id = window.connect('active_tab_state_changed',
self.set_config)
window.editorconfig_handler = handler_id
def deactivate_plugin(self, window):
window.disconnect(window.editorconfig_handler)
window.editorconfig_handler = None
for document in window.get_documents():
if getattr(document, 'editorconfig_whitespace_handler', None):
document.disconnect(document.editorconfig_whitespace_handler)
document.editorconfig_whitespace_handler = None
def set_config(self, window):
"""Get EditorConfig properties for file and change settings"""
tab = window.get_active_tab()
document = tab.get_document()
view = tab.get_view()
props = self.get_document_properties(document)
self.process_properties(props)
self.set_indentation(view,
props.get('indent_style'),
props.get('indent_size'),
props.get('tab_width'))
self.set_end_of_line(document, props.get('end_of_line'))
self.set_trim_trailing_whitespace(document,
props.get('trim_trailing_whitespace'))
def get_properties_from_filename(self, filename):
"""Retrieve dict of EditorConfig properties for the given filename"""
try:
return get_properties(filename)
except EditorConfigError:
logging.error("Error reading EditorConfig file", exc_info=True)
return {}
def process_properties(self, properties):
"""Process property values and remove invalid properties"""
# Convert tab_width to a number
if 'tab_width' in properties:
try:
properties['tab_width'] = int(properties['tab_width'])
except ValueError:
del properties['tab_width']
# Convert indent_size to a number or set equal to tab_width
if 'indent_size' in properties:
if (properties['indent_size'] == "tab" and
'tab_width' in properties):
properties['indent_size'] = properties['tab_width']
else:
try:
properties['indent_size'] = int(properties['indent_size'])
except ValueError:
del properties['indent_size']
if properties.get('trim_trailing_whitespace') == 'true':
properties['trim_trailing_whitespace'] = True
else:
properties['trim_trailing_whitespace'] = False
def set_end_of_line(self, document, end_of_line):
"""Set line ending style based on given end_of_line property"""
if end_of_line == "lf":
document.set_property('newline-type', 0)
elif end_of_line == "cr":
document.set_property('newline-type', 1)
elif end_of_line == "crlf":
document.set_property('newline-type', 2)
def set_indentation(self, view, indent_style, indent_size, tab_width):
"""Set indentation style for given view based on given properties"""
if indent_style == 'space':
view.set_insert_spaces_instead_of_tabs(True)
if indent_size:
view.set_tab_width(indent_size)
elif indent_style == 'tab':
view.set_insert_spaces_instead_of_tabs(False)
if tab_width:
view.set_tab_width(tab_width)
def set_trim_trailing_whitespace(self, document, trim_trailing_whitespace):
"""Create/delete file save handler for trimming trailing whitespace"""
def trim_whitespace_on_save(document, *args):
document.begin_user_action()
self.trim_trailing_whitespace(document)
document.end_user_action()
handler_id = getattr(document, 'editorconfig_whitespace_handler', None)
if trim_trailing_whitespace and not handler_id:
# The trimmer does not exist, so install it:
handler_id = document.connect('save', trim_whitespace_on_save)
document.editorconfig_whitespace_handler = handler_id
elif not trim_trailing_whitespace and handler_id:
# The trimmer exists, so remove it:
document.disconnect(document.editorconfig_whitespace_handler)
document.editorconfig_whitespace_handler = None
def trim_trailing_whitespace(self, document):
"""Trim trailing whitespace from each line of document"""
for line in range(document.get_end_iter().get_line() + 1):
end_of_line = document.get_iter_at_line(line)
end_of_line.forward_to_line_end()
whitespace_start = end_of_line.copy()
while whitespace_start.backward_char():
if not whitespace_start.get_char() in ' \t':
whitespace_start.forward_char()
break
if not whitespace_start.equal(end_of_line):
document.delete(whitespace_start, end_of_line)
| dublebuble/editorconfig-gedit | editorconfig_plugin/shared.py | Python | bsd-2-clause | 5,356 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.urlresolvers import reverse
from django.db import models, transaction, DatabaseError
from django.template.defaultfilters import slugify
from django.utils.encoding import python_2_unicode_compatible
from django.utils.functional import cached_property
from django.utils.html import strip_tags
from django.utils.translation import ugettext_lazy as _
from django.utils.timezone import now as tznow
from pybb.compat import get_user_model_path, get_username_field, get_atomic_func
from pybb import defaults
from pybb.profiles import PybbProfile
from pybb.util import unescape, FilePathGenerator, _get_markup_formatter
from annoying.fields import AutoOneToOneField
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^annoying\.fields\.JSONField"])
add_introspection_rules([], ["^annoying\.fields\.AutoOneToOneField"])
except ImportError:
pass
@python_2_unicode_compatible
class Category(models.Model):
name = models.CharField(_('Name'), max_length=80)
position = models.IntegerField(_('Position'), blank=True, default=0)
hidden = models.BooleanField(_('Hidden'), blank=False, null=False, default=False,
help_text=_('If checked, this category will be visible only for staff'))
slug = models.SlugField(_("Slug"), max_length=100, unique=True)
class Meta(object):
ordering = ['position']
verbose_name = _('Category')
verbose_name_plural = _('Categories')
def __str__(self):
return self.name
def forum_count(self):
return self.forums.all().count()
def get_absolute_url(self):
if defaults.PYBB_NICE_URL:
return reverse('pybb:category', kwargs={'slug': self.slug, })
return reverse('pybb:category', kwargs={'pk': self.id})
@property
def topics(self):
return Topic.objects.filter(forum__category=self).select_related()
@property
def posts(self):
return Post.objects.filter(topic__forum__category=self).select_related()
@python_2_unicode_compatible
class Forum(models.Model):
category = models.ForeignKey(Category, related_name='forums', verbose_name=_('Category'))
parent = models.ForeignKey('self', related_name='child_forums', verbose_name=_('Parent forum'),
blank=True, null=True)
name = models.CharField(_('Name'), max_length=80)
position = models.IntegerField(_('Position'), blank=True, default=0)
description = models.TextField(_('Description'), blank=True)
moderators = models.ManyToManyField(get_user_model_path(), blank=True, null=True, verbose_name=_('Moderators'))
updated = models.DateTimeField(_('Updated'), blank=True, null=True)
post_count = models.IntegerField(_('Post count'), blank=True, default=0)
topic_count = models.IntegerField(_('Topic count'), blank=True, default=0)
hidden = models.BooleanField(_('Hidden'), blank=False, null=False, default=False)
readed_by = models.ManyToManyField(get_user_model_path(), through='ForumReadTracker', related_name='readed_forums')
headline = models.TextField(_('Headline'), blank=True, null=True)
slug = models.SlugField(verbose_name=_("Slug"), max_length=100)
class Meta(object):
ordering = ['position']
verbose_name = _('Forum')
verbose_name_plural = _('Forums')
unique_together = ('category', 'slug')
def __str__(self):
return self.name
def update_counters(self):
self.topic_count = Topic.objects.filter(forum=self).count()
if self.topic_count:
posts = Post.objects.filter(topic__forum_id=self.id)
self.post_count = posts.count()
if self.post_count:
try:
last_post = posts.order_by('-created', '-id')[0]
self.updated = last_post.updated or last_post.created
except IndexError:
pass
else:
self.post_count = 0
self.save()
def get_absolute_url(self):
if defaults.PYBB_NICE_URL:
return reverse('pybb:forum', kwargs={'slug': self.slug, 'category_slug': self.category.slug})
return reverse('pybb:forum', kwargs={'pk': self.id})
@property
def posts(self):
return Post.objects.filter(topic__forum=self).select_related()
@cached_property
def last_post(self):
try:
return self.posts.order_by('-created', '-id')[0]
except IndexError:
return None
def get_parents(self):
"""
Used in templates for breadcrumb building
"""
parents = [self.category]
parent = self.parent
while parent is not None:
parents.insert(1, parent)
parent = parent.parent
return parents
@python_2_unicode_compatible
class Topic(models.Model):
POLL_TYPE_NONE = 0
POLL_TYPE_SINGLE = 1
POLL_TYPE_MULTIPLE = 2
POLL_TYPE_CHOICES = (
(POLL_TYPE_NONE, _('None')),
(POLL_TYPE_SINGLE, _('Single answer')),
(POLL_TYPE_MULTIPLE, _('Multiple answers')),
)
forum = models.ForeignKey(Forum, related_name='topics', verbose_name=_('Forum'))
name = models.CharField(_('Subject'), max_length=255)
created = models.DateTimeField(_('Created'), null=True)
updated = models.DateTimeField(_('Updated'), null=True)
user = models.ForeignKey(get_user_model_path(), verbose_name=_('User'))
views = models.IntegerField(_('Views count'), blank=True, default=0)
sticky = models.BooleanField(_('Sticky'), blank=True, default=False)
closed = models.BooleanField(_('Closed'), blank=True, default=False)
subscribers = models.ManyToManyField(get_user_model_path(), related_name='subscriptions',
verbose_name=_('Subscribers'), blank=True)
post_count = models.IntegerField(_('Post count'), blank=True, default=0)
readed_by = models.ManyToManyField(get_user_model_path(), through='TopicReadTracker', related_name='readed_topics')
on_moderation = models.BooleanField(_('On moderation'), default=False)
poll_type = models.IntegerField(_('Poll type'), choices=POLL_TYPE_CHOICES, default=POLL_TYPE_NONE)
poll_question = models.TextField(_('Poll question'), blank=True, null=True)
slug = models.SlugField(verbose_name=_("Slug"), max_length=100)
class Meta(object):
ordering = ['-created']
verbose_name = _('Topic')
verbose_name_plural = _('Topics')
unique_together = ('forum', 'slug')
def __str__(self):
return self.name
@cached_property
def head(self):
try:
return self.posts.all().order_by('created', 'id')[0]
except IndexError:
return None
@cached_property
def last_post(self):
try:
return self.posts.order_by('-created', '-id').select_related('user')[0]
except IndexError:
return None
def get_absolute_url(self):
if defaults.PYBB_NICE_URL:
return reverse('pybb:topic', kwargs={'slug': self.slug, 'forum_slug': self.forum.slug, 'category_slug': self.forum.category.slug})
return reverse('pybb:topic', kwargs={'pk': self.id})
def save(self, *args, **kwargs):
if self.id is None:
self.created = self.updated = tznow()
forum_changed = False
old_topic = None
if self.id is not None:
old_topic = Topic.objects.get(id=self.id)
if self.forum != old_topic.forum:
forum_changed = True
super(Topic, self).save(*args, **kwargs)
if forum_changed:
old_topic.forum.update_counters()
self.forum.update_counters()
def delete(self, using=None):
super(Topic, self).delete(using)
self.forum.update_counters()
def update_counters(self):
self.post_count = self.posts.count()
# force cache overwrite to get the real latest updated post
if hasattr(self, 'last_post'):
del self.last_post
if self.last_post:
self.updated = self.last_post.updated or self.last_post.created
self.save()
def get_parents(self):
"""
Used in templates for breadcrumb building
"""
parents = self.forum.get_parents()
parents.append(self.forum)
return parents
def poll_votes(self):
if self.poll_type != self.POLL_TYPE_NONE:
return PollAnswerUser.objects.filter(poll_answer__topic=self).count()
else:
return None
class RenderableItem(models.Model):
"""
Base class for models that has markup, body, body_text and body_html fields.
"""
class Meta(object):
abstract = True
body = models.TextField(_('Message'))
body_html = models.TextField(_('HTML version'))
body_text = models.TextField(_('Text version'))
def render(self):
self.body_html = _get_markup_formatter()(self.body)
# Remove tags which was generated with the markup processor
text = strip_tags(self.body_html)
# Unescape entities which was generated with the markup processor
self.body_text = unescape(text)
@python_2_unicode_compatible
class Post(RenderableItem):
topic = models.ForeignKey(Topic, related_name='posts', verbose_name=_('Topic'))
user = models.ForeignKey(get_user_model_path(), related_name='posts', verbose_name=_('User'))
created = models.DateTimeField(_('Created'), blank=True, db_index=True)
updated = models.DateTimeField(_('Updated'), blank=True, null=True)
user_ip = models.IPAddressField(_('User IP'), blank=True, default='0.0.0.0')
on_moderation = models.BooleanField(_('On moderation'), default=False)
class Meta(object):
ordering = ['created']
verbose_name = _('Post')
verbose_name_plural = _('Posts')
def summary(self):
limit = 50
tail = len(self.body) > limit and '...' or ''
return self.body[:limit] + tail
def __str__(self):
return self.summary()
def save(self, *args, **kwargs):
created_at = tznow()
if self.created is None:
self.created = created_at
self.render()
new = self.pk is None
topic_changed = False
old_post = None
if not new:
old_post = Post.objects.get(pk=self.pk)
if old_post.topic != self.topic:
topic_changed = True
super(Post, self).save(*args, **kwargs)
# If post is topic head and moderated, moderate topic too
if self.topic.head == self and not self.on_moderation and self.topic.on_moderation:
self.topic.on_moderation = False
self.topic.update_counters()
self.topic.forum.update_counters()
if topic_changed:
old_post.topic.update_counters()
old_post.topic.forum.update_counters()
def get_absolute_url(self):
return reverse('pybb:post', kwargs={'pk': self.id})
def delete(self, *args, **kwargs):
self_id = self.id
head_post_id = self.topic.posts.order_by('created', 'id')[0].id
if self_id == head_post_id:
self.topic.delete()
else:
super(Post, self).delete(*args, **kwargs)
self.topic.update_counters()
self.topic.forum.update_counters()
def get_parents(self):
"""
Used in templates for breadcrumb building
"""
return self.topic.forum.category, self.topic.forum, self.topic,
class Profile(PybbProfile):
"""
Profile class that can be used if you doesn't have
your site profile.
"""
user = AutoOneToOneField(get_user_model_path(), related_name='pybb_profile', verbose_name=_('User'))
class Meta(object):
verbose_name = _('Profile')
verbose_name_plural = _('Profiles')
def get_absolute_url(self):
return reverse('pybb:user', kwargs={'username': getattr(self.user, get_username_field())})
def get_display_name(self):
return self.user.get_username()
class Attachment(models.Model):
class Meta(object):
verbose_name = _('Attachment')
verbose_name_plural = _('Attachments')
post = models.ForeignKey(Post, verbose_name=_('Post'), related_name='attachments')
size = models.IntegerField(_('Size'))
file = models.FileField(_('File'),
upload_to=FilePathGenerator(to=defaults.PYBB_ATTACHMENT_UPLOAD_TO))
def save(self, *args, **kwargs):
self.size = self.file.size
super(Attachment, self).save(*args, **kwargs)
def size_display(self):
size = self.size
if size < 1024:
return '%db' % size
elif size < 1024 * 1024:
return '%dKb' % int(size / 1024)
else:
return '%.2fMb' % (size / float(1024 * 1024))
class TopicReadTrackerManager(models.Manager):
def get_or_create_tracker(self, user, topic):
"""
Correctly create tracker in mysql db on default REPEATABLE READ transaction mode
It's known problem when standrard get_or_create method return can raise exception
with correct data in mysql database.
See http://stackoverflow.com/questions/2235318/how-do-i-deal-with-this-race-condition-in-django/2235624
"""
is_new = True
sid = transaction.savepoint(using=self.db)
try:
with get_atomic_func()():
obj = TopicReadTracker.objects.create(user=user, topic=topic)
transaction.savepoint_commit(sid)
except DatabaseError:
transaction.savepoint_rollback(sid)
obj = TopicReadTracker.objects.get(user=user, topic=topic)
is_new = False
return obj, is_new
class TopicReadTracker(models.Model):
"""
Save per user topic read tracking
"""
user = models.ForeignKey(get_user_model_path(), blank=False, null=False)
topic = models.ForeignKey(Topic, blank=True, null=True)
time_stamp = models.DateTimeField(auto_now=True)
objects = TopicReadTrackerManager()
class Meta(object):
verbose_name = _('Topic read tracker')
verbose_name_plural = _('Topic read trackers')
unique_together = ('user', 'topic')
class ForumReadTrackerManager(models.Manager):
def get_or_create_tracker(self, user, forum):
"""
Correctly create tracker in mysql db on default REPEATABLE READ transaction mode
It's known problem when standrard get_or_create method return can raise exception
with correct data in mysql database.
See http://stackoverflow.com/questions/2235318/how-do-i-deal-with-this-race-condition-in-django/2235624
"""
is_new = True
sid = transaction.savepoint(using=self.db)
try:
with get_atomic_func()():
obj = ForumReadTracker.objects.create(user=user, forum=forum)
transaction.savepoint_commit(sid)
except DatabaseError:
transaction.savepoint_rollback(sid)
is_new = False
obj = ForumReadTracker.objects.get(user=user, forum=forum)
return obj, is_new
class ForumReadTracker(models.Model):
"""
Save per user forum read tracking
"""
user = models.ForeignKey(get_user_model_path(), blank=False, null=False)
forum = models.ForeignKey(Forum, blank=True, null=True)
time_stamp = models.DateTimeField(auto_now=True)
objects = ForumReadTrackerManager()
class Meta(object):
verbose_name = _('Forum read tracker')
verbose_name_plural = _('Forum read trackers')
unique_together = ('user', 'forum')
@python_2_unicode_compatible
class PollAnswer(models.Model):
topic = models.ForeignKey(Topic, related_name='poll_answers', verbose_name=_('Topic'))
text = models.CharField(max_length=255, verbose_name=_('Text'))
class Meta:
verbose_name = _('Poll answer')
verbose_name_plural = _('Polls answers')
def __str__(self):
return self.text
def votes(self):
return self.users.count()
def votes_percent(self):
topic_votes = self.topic.poll_votes()
if topic_votes > 0:
return 1.0 * self.votes() / topic_votes * 100
else:
return 0
@python_2_unicode_compatible
class PollAnswerUser(models.Model):
poll_answer = models.ForeignKey(PollAnswer, related_name='users', verbose_name=_('Poll answer'))
user = models.ForeignKey(get_user_model_path(), related_name='poll_answers', verbose_name=_('User'))
timestamp = models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name = _('Poll answer user')
verbose_name_plural = _('Polls answers users')
unique_together = (('poll_answer', 'user', ), )
def __str__(self):
return '%s - %s' % (self.poll_answer.topic, self.user)
def create_or_check_slug(instance, model, **extra_filters):
"""
returns a unique slug
:param instance : target instance
:param model: needed as instance._meta.model is available since django 1.6
:param extra_filters: filters needed for Forum and Topic for their unique_together field
"""
if not instance.slug:
instance.slug = slugify(instance.name)
slug = instance.slug
filters = {'slug__startswith': slug, }
if extra_filters:
filters.update(extra_filters)
count = 0
objs = model.objects.filter(**filters).exclude(pk=instance.pk)
slug_list = [obj.slug for obj in objs]
while slug in slug_list:
count += 1
slug = '%s-%d' % (instance.slug, count)
return slug
| springmerchant/pybbm | pybb/models.py | Python | bsd-2-clause | 17,800 |
# -*- coding: utf-8 -*-
"""
consolor
Copyright (c) 2013-2014, Friedrich Paetzke (f.paetzke@gmail.com)
All rights reserved.
"""
from __future__ import print_function
from consolor import BgColor, Color, get_line
try:
from unittest.mock import call, patch
except ImportError:
from mock import call, patch
def mockable_print(*args, **kwargs):
print(*args, **kwargs)
def test_print_bold():
result = get_line('123 bold', bold=True)
expected = '\x1b[1m123 bold\x1b[0m'
assert result == expected
def test_print_underline():
result = get_line('123 underline', underline=True)
expected = '\x1b[4m123 underline\x1b[0m'
assert result == expected
def test_get_bgcolor():
result = get_line('123 green bg', bgcolor=BgColor.Green)
expected = '\x1b[42;1m123 green bg\x1b[0m'
assert result == expected
def test_get_color():
result = get_line('123 light green', color=Color.LightGreen)
expected = '\x1b[1;32m123 light green\x1b[0m'
assert result == expected
def test_update_line():
for i in reversed(range(101)):
line = get_line('123%d' % i, update_line=True)
expected = '\x1b[2K\r%s%d\x1b[0m' % ('123', i)
assert line == expected
@patch('tests.test_consolor.mockable_print')
def test_print_color(mocked_print):
mockable_print(Color.Red, 'Red')
mockable_print('Red two')
mockable_print(Color.Reset, end='')
mockable_print('Not Red')
mocked_print.assert_has_calls([call('\x1b[0;31m', 'Red'),
call('Red two'),
call('\x1b[0m', end=''),
call('Not Red')])
@patch('tests.test_consolor.mockable_print')
def test_print_concat_color(mocked_print):
mockable_print(Color.Red, 'Red')
mockable_print('Red two')
mockable_print(Color.Blue, 'Blue')
mockable_print(Color.Reset, end='')
mockable_print('Not Blue')
mocked_print.assert_has_calls([call('\x1b[0;31m', 'Red'),
call('Red two'),
call('\x1b[0;34m', 'Blue'),
call('\x1b[0m', end=''),
call('Not Blue')])
@patch('tests.test_consolor.mockable_print')
def test_print_bgcolor(mocked_print):
mockable_print(BgColor.Red, 'Red')
mockable_print('Red two', BgColor.Reset)
mockable_print('None')
mocked_print.assert_has_calls([call('\x1b[41;1m', 'Red'),
call('Red two', '\x1b[0m'),
call('None')])
@patch('tests.test_consolor.mockable_print')
def test_print_concat_bgcolor(mocked_print):
mockable_print(BgColor.Red, 'Red')
mockable_print('Red two')
mockable_print(BgColor.Cyan, 'None')
mockable_print(BgColor.Reset)
mocked_print.assert_has_calls([call('\x1b[41;1m', 'Red'),
call('Red two'),
call('\x1b[46;1m', 'None'),
call('\x1b[0m')])
def test_color_and_bgcolor():
result = get_line('1', bgcolor=BgColor.Green, color=Color.Red)
expected = '\x1b[0;31m\x1b[42;1m1\x1b[0m'
assert result == expected
| paetzke/consolor | tests/test_consolor.py | Python | bsd-2-clause | 3,238 |
#!/usr/bin/env python
from setuptools import setup
try:
import unittest2 # noqa
except ImportError:
test_loader = 'unittest:TestLoader'
else:
test_loader = 'unittest2:TestLoader'
setup(
name='mockldap',
version='0.1.8',
description=u"A simple mock implementation of python-ldap.",
long_description=open('README').read(),
url='http://bitbucket.org/psagers/mockldap/',
author='Peter Sagerson',
author_email='psagers.pypi@ignorare.net',
license='BSD',
packages=['mockldap'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Programming Language :: Python',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Topic :: Internet :: WWW/HTTP',
'Topic :: System :: Systems Administration :: Authentication/Directory :: LDAP',
'Topic :: Software Development :: Libraries :: Python Modules',
],
keywords=['mock', 'ldap'],
install_requires=[
'python-ldap',
'funcparserlib==0.3.6',
'mock',
],
extras_require={
'passlib': ['passlib>=1.6.1'],
},
setup_requires=[
'setuptools>=0.6c11',
],
test_loader=test_loader,
test_suite='mockldap.tests',
)
| coreos/mockldap | setup.py | Python | bsd-2-clause | 1,346 |
from __future__ import print_function, division
import hashlib
from copy import deepcopy
import h5py
import numpy as np
from astropy import log as logger
from ..util.meshgrid import meshgrid_nd
from ..util.functions import FreezableClass, is_numpy_array, monotonically_increasing, link_or_copy, as_str
from .grid_helpers import single_grid_dims
class CartesianGrid(FreezableClass):
'''
A cartesian grid.
The grid can be initialized by passing the x, y, and z coordinates of cell walls::
>>> grid = CartesianGrid(x_wall, y_wall, z_wall)
where ``x_wall``, ``y_wall``, and ``z_wall`` are 1-d sequences of wall
positions. The number of cells in the resulting grid will be one less
in each dimension that the length of these arrays.
:class:`~hyperion.grid.CartesianGrid` objects may contain multiple
quantities (e.g. density, specific energy). To access these, you can
specify the name of the quantity as an item::
>>> grid['density']
which is no longer a :class:`~hyperion.grid.CartesianGrid` object, but
a :class:`~hyperion.grid.CartesianGridView` object. When setting
this for the first time, this can be set either to another
:class:`~hyperion.grid.CartesianGridView` object, an external h5py
link, or an empty list. For example, the following should work:
>>> grid['density_new'] = grid['density']
:class:`~hyperion.grid.CartesianGridView` objects allow the
specific dust population to be selected as an index:
>>> grid['density'][0]
Which is also a :class:`~hyperion.grid.CartesianGridView` object. The
data can then be accessed with the ``array`` attribute::
>>> grid['density'][0].array
which is a 3-d array of the requested quantity.
'''
def __init__(self, *args):
self.shape = None
self.x_wall = None
self.y_wall = None
self.z_wall = None
self.x = None
self.y = None
self.z = None
self.gx = None
self.gy = None
self.gz = None
self.volumes = None
self.areas = None
self.widths = None
self.quantities = {}
self._freeze()
if len(args) > 0:
if isinstance(args[0], CartesianGrid):
self.set_walls(args[0].x_wall, args[0].y_wall, args[0].z_wall)
else:
self.set_walls(*args)
def set_walls(self, x_wall, y_wall, z_wall):
if type(x_wall) in [list, tuple]:
x_wall = np.array(x_wall)
if type(y_wall) in [list, tuple]:
y_wall = np.array(y_wall)
if type(z_wall) in [list, tuple]:
z_wall = np.array(z_wall)
if not is_numpy_array(x_wall) or x_wall.ndim != 1:
raise ValueError("x_wall should be a 1-D sequence")
if not is_numpy_array(y_wall) or y_wall.ndim != 1:
raise ValueError("y_wall should be a 1-D sequence")
if not is_numpy_array(z_wall) or z_wall.ndim != 1:
raise ValueError("z_wall should be a 1-D sequence")
if not monotonically_increasing(x_wall):
raise ValueError("x_wall should be monotonically increasing")
if not monotonically_increasing(y_wall):
raise ValueError("y_wall should be monotonically increasing")
if not monotonically_increasing(z_wall):
raise ValueError("z_wall should be monotonically increasing")
# Find grid shape
self.shape = (len(z_wall) - 1, len(y_wall) - 1, len(x_wall) - 1)
# Store wall positions
self.x_wall = x_wall
self.y_wall = y_wall
self.z_wall = z_wall
# Compute cell centers
self.x = (x_wall[:-1] + x_wall[1:]) / 2.
self.y = (y_wall[:-1] + y_wall[1:]) / 2.
self.z = (z_wall[:-1] + z_wall[1:]) / 2.
# Generate 3D versions of r, t, p
#(each array is 3D and defined in every cell)
self.gx, self.gy, self.gz = meshgrid_nd(self.x, self.y, self.z)
# Generate 3D versions of the inner and outer wall positions respectively
gx_wall_min, gy_wall_min, gz_wall_min = meshgrid_nd(x_wall[:-1], y_wall[:-1], z_wall[:-1])
gx_wall_max, gy_wall_max, gz_wall_max = meshgrid_nd(x_wall[1:], y_wall[1:], z_wall[1:])
# USEFUL QUANTITIES
dx = gx_wall_max - gx_wall_min
dy = gy_wall_max - gy_wall_min
dz = gz_wall_max - gz_wall_min
# CELL VOLUMES
self.volumes = dx * dy * dz
# WALL AREAS
self.areas = np.zeros((6,) + self.shape)
# X walls:
self.areas[0, :, :, :] = dy * dz
self.areas[1, :, :, :] = dy * dz
# Y walls:
self.areas[2, :, :, :] = dx * dz
self.areas[3, :, :, :] = dx * dz
# Z walls:
self.areas[4, :, :, :] = dx * dy
self.areas[5, :, :, :] = dx * dy
# CELL WIDTHS
self.widths = np.zeros((3,) + self.shape)
# X direction:
self.widths[0, :, :, :] = dx
# Y direction:
self.widths[1, :, :, :] = dy
# Z direction:
self.widths[2, :, :, :] = dz
def __getattr__(self, attribute):
if attribute == 'n_dust':
n_dust = None
for quantity in self.quantities:
n_dust_q, shape_q = single_grid_dims(self.quantities[quantity])
if n_dust is None:
n_dust = n_dust_q
elif n_dust_q is not None:
if n_dust != n_dust_q:
raise ValueError("Not all dust lists in the grid have the same size")
return n_dust
else:
return FreezableClass.__getattribute__(self, attribute)
def _check_array_dimensions(self, array=None):
'''
Check that a grid's array dimensions agree with this grid's metadata
Parameters
----------
array : np.ndarray or list of np.ndarray, optional
The array for which to test the dimensions. If this is not
specified, this method performs a self-consistency check of array
dimensions and meta-data.
'''
n_pop_ref = None
if isinstance(array, CartesianGridView):
array = array.quantities[array.viewed_quantity]
for quantity in self.quantities:
if array is None:
n_pop, shape = single_grid_dims(self.quantities[quantity])
else:
n_pop, shape = single_grid_dims(array)
if shape != self.shape:
raise ValueError("Quantity arrays do not have the right "
"dimensions: %s instead of %s"
% (shape, self.shape))
if n_pop is not None:
if n_pop_ref is None:
n_pop_ref = n_pop
elif n_pop != n_pop_ref:
raise ValueError("Not all dust lists in the grid have the same size")
def read(self, group, quantities='all'):
'''
Read the geometry and physical quantities from a cartesian grid
Parameters
----------
group : h5py.Group
The HDF5 group to read the grid from. This group should contain
groups named 'Geometry' and 'Quantities'.
quantities : 'all' or list
Which physical quantities to read in. Use 'all' to read in all
quantities or a list of strings to read only specific quantities.
'''
# Read in geometry
self.read_geometry(group['Geometry'])
# Read in physical quantities
self.read_quantities(group['Quantities'], quantities=quantities)
# Self-consistently check geometry and physical quantities
self._check_array_dimensions()
def read_geometry(self, group):
'''
Read in geometry information from a cartesian grid
Parameters
----------
group : h5py.Group
The HDF5 group to read the grid geometry from.
'''
if as_str(group.attrs['grid_type']) != 'car':
raise ValueError("Grid is not cartesian")
self.set_walls(group['walls_1']['x'],
group['walls_2']['y'],
group['walls_3']['z'])
# Check that advertised hash matches real hash
if as_str(group.attrs['geometry']) != self.get_geometry_id():
raise Exception("Calculated geometry hash does not match hash in file")
def read_quantities(self, group, quantities='all'):
'''
Read in physical quantities from a cartesian grid
Parameters
----------
group : h5py.Group
The HDF5 group to read the grid quantities from
quantities : 'all' or list
Which physical quantities to read in. Use 'all' to read in all
quantities or a list of strings to read only specific quantities.
'''
# Read in physical quantities
if quantities is not None:
for quantity in group:
if quantities == 'all' or quantity in quantities:
array = np.array(group[quantity])
if array.ndim == 4: # if array is 4D, it is a list of 3D arrays
self.quantities[quantity] = [array[i] for i in range(array.shape[0])]
else:
self.quantities[quantity] = array
# Self-consistently check geometry and physical quantities
self._check_array_dimensions()
def write(self, group, quantities='all', copy=True, absolute_paths=False, compression=True, wall_dtype=float, physics_dtype=float):
'''
Write out the cartesian grid
Parameters
----------
group : h5py.Group
The HDF5 group to write the grid to
quantities : 'all' or list
Which physical quantities to write out. Use 'all' to write out all
quantities or a list of strings to write only specific quantities.
copy : bool
Whether to copy external links, or leave them as links.
absolute_paths : bool
If copy is False, then this indicates whether to use absolute or
relative paths for links.
compression : bool
Whether to compress the arrays in the HDF5 file
wall_dtype : type
The datatype to use to write the wall positions
physics_dtype : type
The datatype to use to write the physical quantities
'''
# Create HDF5 groups if needed
if 'Geometry' not in group:
g_geometry = group.create_group('Geometry')
else:
g_geometry = group['Geometry']
if 'Quantities' not in group:
g_quantities = group.create_group('Quantities')
else:
g_quantities = group['Quantities']
# Write out geometry
g_geometry.attrs['grid_type'] = np.string_('car'.encode('utf-8'))
g_geometry.attrs['geometry'] = np.string_(self.get_geometry_id().encode('utf-8'))
dset = g_geometry.create_dataset("walls_1", data=np.array(list(zip(self.x_wall)), dtype=[('x', wall_dtype)]), compression=compression)
dset.attrs['Unit'] = np.string_('cm'.encode('utf-8'))
dset = g_geometry.create_dataset("walls_2", data=np.array(list(zip(self.y_wall)), dtype=[('y', wall_dtype)]), compression=compression)
dset.attrs['Unit'] = np.string_('cm'.encode('utf-8'))
dset = g_geometry.create_dataset("walls_3", data=np.array(list(zip(self.z_wall)), dtype=[('z', wall_dtype)]), compression=compression)
dset.attrs['Unit'] = np.string_('cm'.encode('utf-8'))
# Self-consistently check geometry and physical quantities
self._check_array_dimensions()
# Write out physical quantities
for quantity in self.quantities:
if quantities == 'all' or quantity in quantities:
if isinstance(self.quantities[quantity], h5py.ExternalLink):
link_or_copy(g_quantities, quantity, self.quantities[quantity], copy, absolute_paths=absolute_paths)
else:
dset = g_quantities.create_dataset(quantity, data=self.quantities[quantity],
compression=compression,
dtype=physics_dtype)
dset.attrs['geometry'] = np.string_(self.get_geometry_id().encode('utf-8'))
def write_single_array(self, group, name, array, copy=True, absolute_paths=False, compression=True, physics_dtype=float):
'''
Write out a single quantity, checking for consistency with geometry
Parameters
----------
group : h5py.Group
The HDF5 group to write the grid to
name : str
The name of the array in the group
array : np.ndarray
The array to write out
copy : bool
Whether to copy external links, or leave them as links.
absolute_paths : bool
If copy is False, then this indicates whether to use absolute or
relative paths for links.
compression : bool
Whether to compress the arrays in the HDF5 file
wall_dtype : type
The datatype to use to write the wall positions
physics_dtype : type
The datatype to use to write the physical quantities
'''
# Check consistency of array dimensions with grid
self._check_array_dimensions(array)
if isinstance(array, h5py.ExternalLink):
link_or_copy(group, name, array, copy, absolute_paths=absolute_paths)
else:
dset = group.create_dataset(name, data=array,
compression=compression,
dtype=physics_dtype)
dset.attrs['geometry'] = np.string_(self.get_geometry_id().encode('utf-8'))
def get_geometry_id(self):
geo_hash = hashlib.md5()
geo_hash.update(self.x_wall.tostring())
geo_hash.update(self.y_wall.tostring())
geo_hash.update(self.z_wall.tostring())
return geo_hash.hexdigest()
def __getitem__(self, item):
return CartesianGridView(self, item)
def __setitem__(self, item, value):
if isinstance(value, CartesianGridView):
if self.x_wall is None and self.y_wall is None and self.z_wall is None:
logger.warning("No geometry in target grid - copying from original grid")
self.set_walls(value.x_wall, value.y_wall, value.z_wall)
self.quantities[item] = deepcopy(value.quantities[value.viewed_quantity])
elif isinstance(value, h5py.ExternalLink):
self.quantities[item] = value
elif value == []:
self.quantities[item] = []
else:
raise ValueError('value should be an empty list, and ExternalLink, or a CartesianGridView instance')
def __contains__(self, item):
return self.quantities.__contains__(item)
def reset_quantities(self):
self.quantities = {}
def add_derived_quantity(self, name, function):
if name in self.quantities:
raise KeyError(name + ' already exists')
function(self.quantities)
def to_yt(self, dust_id=0):
'''
Convert cartesian grid to a yt object (requires yt)
This can only be used for regular cartesian grids
Parameters
----------
dust_id : int, optional
The ID of the dust population to extract. If not set, this
defaults to 0 (the first dust population).
'''
# Check that cartesian grid is regular
dxs = np.diff(self.x_wall)
if np.std(dxs) / np.mean(dxs) > 1.e-8:
raise ValueError("Grid is significantly non-regular in x direction")
dys = np.diff(self.y_wall)
if np.std(dys) / np.mean(dys) > 1.e-8:
raise ValueError("Grid is significantly non-regular in y direction")
dzs = np.diff(self.z_wall)
if np.std(dzs) / np.mean(dzs) > 1.e-8:
raise ValueError("Grid is significantly non-regular in z direction")
# Convert to yt object
from .yt_wrappers import cartesian_grid_to_yt_stream
return cartesian_grid_to_yt_stream(self, self.x_wall[0], self.x_wall[-1],
self.y_wall[0], self.y_wall[-1],
self.z_wall[0], self.z_wall[-1],
dust_id=dust_id)
class CartesianGridView(CartesianGrid):
def __init__(self, grid, quantity):
self.viewed_quantity = quantity
CartesianGrid.__init__(self)
self.set_walls(grid.x_wall, grid.y_wall, grid.z_wall)
self.quantities = {quantity: grid.quantities[quantity]}
def append(self, grid):
'''
Used to append quantities from another grid
Parameters
----------
grid : 3D Numpy array or CartesianGridView instance
The grid to copy the quantity from
'''
if isinstance(grid, CartesianGridView):
if self.quantities[self.viewed_quantity] is grid.quantities[grid.viewed_quantity]:
raise Exception("Calling append recursively")
if type(grid.quantities[grid.viewed_quantity]) is list:
raise Exception("Can only append a single grid")
self._check_array_dimensions(grid.quantities[grid.viewed_quantity])
self.quantities[self.viewed_quantity].append(deepcopy(grid.quantities[grid.viewed_quantity]))
elif isinstance(grid, np.ndarray):
self._check_array_dimensions(grid)
self.quantities[self.viewed_quantity].append(deepcopy(grid))
else:
raise ValueError("grid should be a Numpy array or a CartesianGridView instance")
def add(self, grid):
'''
Used to add quantities from another grid
Parameters
----------
grid : 3D Numpy array or CartesianGridView instance
The grid to copy the quantity from
'''
if type(self.quantities[self.viewed_quantity]) is list:
raise Exception("need to first specify the item to add to")
if isinstance(grid, CartesianGridView):
if type(grid.quantities[grid.viewed_quantity]) is list:
raise Exception("need to first specify the item to add")
self._check_array_dimensions(grid.quantities[grid.viewed_quantity])
self.quantities[self.viewed_quantity] += grid.quantities[grid.viewed_quantity]
elif isinstance(grid, np.ndarray):
self._check_array_dimensions(grid)
self.quantities[self.viewed_quantity] += grid
else:
raise ValueError("grid should be a Numpy array or a CartesianGridView instance")
def __getitem__(self, item):
if type(item) is int:
grid = CartesianGridView(self, self.viewed_quantity)
grid.quantities = {grid.viewed_quantity: grid.quantities[grid.viewed_quantity][item]}
return grid
else:
return CartesianGrid.__getitem__(self, item)
def __getattr__(self, attribute):
if attribute == 'array':
return self.quantities[self.viewed_quantity]
else:
return CartesianGrid.__getattr__(self, attribute)
| hyperion-rt/hyperion | hyperion/grid/cartesian_grid.py | Python | bsd-2-clause | 19,544 |
#!/usr/bin/env python
import os
from setuptools import setup, find_packages
import streamkinect2.version as meta
# Utility function to read the README file.
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = meta.__project__,
version = meta.__version__,
author = meta.__author__,
author_email = meta.__author_email__,
description = "A simple network streamer for kinect2 data.",
license = "BSD",
keywords = "kinect kinect2 zeroconf bonjour",
url = "https://github.com/rjw57/stramkinect2",
packages=find_packages(exclude='test'),
long_description=read('README.md'),
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"License :: OSI Approved :: BSD License",
],
install_requires=[
'blinker',
'enum34',
'lz4',
'numpy',
'pillow',
'pyzmq',
'tornado',
'zeroconf',
],
setup_requires=[
'nose',
],
tests_require=[
'coverage'
],
extras_require={
'docs': [ 'sphinx', 'docutils', ],
},
)
| rjw57/streamkinect2 | setup.py | Python | bsd-2-clause | 1,148 |
"""latex.py
Character translation utilities for LaTeX-formatted text.
Usage:
- unicode(string,'latex')
- ustring.decode('latex')
are both available just by letting "import latex" find this file.
- unicode(string,'latex+latin1')
- ustring.decode('latex+latin1')
where latin1 can be replaced by any other known encoding, also
become available by calling latex.register().
We also make public a dictionary latex_equivalents,
mapping ord(unicode char) to LaTeX code.
D. Eppstein, October 2003.
"""
from __future__ import generators
import codecs
import re
from backports import Set
def register():
"""Enable encodings of the form 'latex+x' where x describes another encoding.
Unicode characters are translated to or from x when possible, otherwise
expanded to latex.
"""
codecs.register(_registry)
def getregentry():
"""Encodings module API."""
return _registry('latex')
def _registry(encoding):
if encoding == 'latex':
encoding = None
elif encoding.startswith('latex+'):
encoding = encoding[6:]
else:
return None
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
"""Convert unicode string to latex."""
output = []
for c in input:
if encoding:
try:
output.append(c.encode(encoding))
continue
except:
pass
if ord(c) in latex_equivalents:
output.append(latex_equivalents[ord(c)])
else:
output += ['{\\char', str(ord(c)), '}']
return ''.join(output), len(input)
def decode(self,input,errors='strict'):
"""Convert latex source string to unicode."""
if encoding:
input = unicode(input,encoding,errors)
# Note: we may get buffer objects here.
# It is not permussable to call join on buffer objects
# but we can make them joinable by calling unicode.
# This should always be safe since we are supposed
# to be producing unicode output anyway.
x = map(unicode,_unlatex(input))
return u''.join(x), len(input)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
def _tokenize(tex):
"""Convert latex source into sequence of single-token substrings."""
start = 0
try:
# skip quickly across boring stuff
pos = _stoppers.finditer(tex).next().span()[0]
except StopIteration:
yield tex
return
while 1:
if pos > start:
yield tex[start:pos]
if tex[start] == '\\' and not (tex[pos-1].isdigit() and tex[start+1].isalpha()):
while pos < len(tex) and tex[pos].isspace(): # skip blanks after csname
pos += 1
while pos < len(tex) and tex[pos] in _ignore:
pos += 1 # flush control characters
if pos >= len(tex):
return
start = pos
if tex[pos:pos+2] in {'$$':None, '/~':None}: # protect ~ in urls
pos += 2
elif tex[pos].isdigit():
while pos < len(tex) and tex[pos].isdigit():
pos += 1
elif tex[pos] == '-':
while pos < len(tex) and tex[pos] == '-':
pos += 1
elif tex[pos] != '\\' or pos == len(tex) - 1:
pos += 1
elif not tex[pos+1].isalpha():
pos += 2
else:
pos += 1
while pos < len(tex) and tex[pos].isalpha():
pos += 1
if tex[start:pos] == '\\char' or tex[start:pos] == '\\accent':
while pos < len(tex) and tex[pos].isdigit():
pos += 1
class _unlatex:
"""Convert tokenized tex into sequence of unicode strings. Helper for decode()."""
def __iter__(self):
"""Turn self into an iterator. It already is one, nothing to do."""
return self
def __init__(self,tex):
"""Create a new token converter from a string."""
self.tex = tuple(_tokenize(tex)) # turn tokens into indexable list
self.pos = 0 # index of first unprocessed token
self.lastoutput = 'x' # lastoutput must always be nonempty string
def __getitem__(self,n):
"""Return token at offset n from current pos."""
p = self.pos + n
t = self.tex
return p < len(t) and t[p] or None
def next(self):
"""Find and return another piece of converted output."""
if self.pos >= len(self.tex):
raise StopIteration
nextoutput = self.chunk()
if self.lastoutput[0] == '\\' and self.lastoutput[-1].isalpha() and nextoutput[0].isalpha():
nextoutput = ' ' + nextoutput # add extra space to terminate csname
self.lastoutput = nextoutput
return nextoutput
def chunk(self):
"""Grab another set of input tokens and convert them to an output string."""
for delta,c in self.candidates(0):
if c in _l2u:
self.pos += delta
return unichr(_l2u[c])
elif len(c) == 2 and c[1] == 'i' and (c[0],'\\i') in _l2u:
self.pos += delta # correct failure to undot i
return unichr(_l2u[(c[0],'\\i')])
elif len(c) == 1 and c[0].startswith('\\char') and c[0][5:].isdigit():
self.pos += delta
return unichr(int(c[0][5:]))
# nothing matches, just pass through token as-is
self.pos += 1
return self[-1]
def candidates(self,offset):
"""Generate pairs delta,c where c is a token or tuple of tokens from tex
(after deleting extraneous brackets starting at pos) and delta
is the length of the tokens prior to bracket deletion.
"""
t = self[offset]
if t in _blacklist:
return
elif t == '{':
for delta,c in self.candidates(offset+1):
if self[offset+delta+1] == '}':
yield delta+2,c
elif t == '\\mbox':
for delta,c in self.candidates(offset+1):
yield delta+1,c
elif t == '$' and self[offset+2] == '$':
yield 3, (t,self[offset+1],t)
else:
q = self[offset+1]
if q == '{' and self[offset+3] == '}':
yield 4, (t,self[offset+2])
elif q:
yield 2, (t,q)
yield 1, t
latex_equivalents = {
0x0009: ' ',
0x000a: '\n',
0x0023: '{\#}',
0x0026: '{\&}',
0x00a0: '{~}',
0x00a1: '{!`}',
0x00a2: '{\\not{c}}',
0x00a3: '{\\pounds}',
0x00a7: '{\\S}',
0x00a8: '{\\"{}}',
0x00a9: '{\\copyright}',
0x00af: '{\\={}}',
0x00ac: '{\\neg}',
0x00ad: '{\\-}',
0x00b0: '{\\mbox{$^\\circ$}}',
0x00b1: '{\\mbox{$\\pm$}}',
0x00b2: '{\\mbox{$^2$}}',
0x00b3: '{\\mbox{$^3$}}',
0x00b4: "{\\'{}}",
0x00b5: '{\\mbox{$\\mu$}}',
0x00b6: '{\\P}',
0x00b7: '{\\mbox{$\\cdot$}}',
0x00b8: '{\\c{}}',
0x00b9: '{\\mbox{$^1$}}',
0x00bf: '{?`}',
0x00c0: '{\\`A}',
0x00c1: "{\\'A}",
0x00c2: '{\\^A}',
0x00c3: '{\\~A}',
0x00c4: '{\\"A}',
0x00c5: '{\\AA}',
0x00c6: '{\\AE}',
0x00c7: '{\\c{C}}',
0x00c8: '{\\`E}',
0x00c9: "{\\'E}",
0x00ca: '{\\^E}',
0x00cb: '{\\"E}',
0x00cc: '{\\`I}',
0x00cd: "{\\'I}",
0x00ce: '{\\^I}',
0x00cf: '{\\"I}',
0x00d1: '{\\~N}',
0x00d2: '{\\`O}',
0x00d3: "{\\'O}",
0x00d4: '{\\^O}',
0x00d5: '{\\~O}',
0x00d6: '{\\"O}',
0x00d7: '{\\mbox{$\\times$}}',
0x00d8: '{\\O}',
0x00d9: '{\\`U}',
0x00da: "{\\'U}",
0x00db: '{\\^U}',
0x00dc: '{\\"U}',
0x00dd: "{\\'Y}",
0x00df: '{\\ss}',
0x00e0: '{\\`a}',
0x00e1: "{\\'a}",
0x00e2: '{\\^a}',
0x00e3: '{\\~a}',
0x00e4: '{\\"a}',
0x00e5: '{\\aa}',
0x00e6: '{\\ae}',
0x00e7: '{\\c{c}}',
0x00e8: '{\\`e}',
0x00e9: "{\\'e}",
0x00ea: '{\\^e}',
0x00eb: '{\\"e}',
0x00ec: '{\\`\\i}',
0x00ed: "{\\'\\i}",
0x00ee: '{\\^\\i}',
0x00ef: '{\\"\\i}',
0x00f1: '{\\~n}',
0x00f2: '{\\`o}',
0x00f3: "{\\'o}",
0x00f4: '{\\^o}',
0x00f5: '{\\~o}',
0x00f6: '{\\"o}',
0x00f7: '{\\mbox{$\\div$}}',
0x00f8: '{\\o}',
0x00f9: '{\\`u}',
0x00fa: "{\\'u}",
0x00fb: '{\\^u}',
0x00fc: '{\\"u}',
0x00fd: "{\\'y}",
0x00ff: '{\\"y}',
0x0100: '{\\=A}',
0x0101: '{\\=a}',
0x0102: '{\\u{A}}',
0x0103: '{\\u{a}}',
0x0104: '{\\c{A}}',
0x0105: '{\\c{a}}',
0x0106: "{\\'C}",
0x0107: "{\\'c}",
0x0108: "{\\^C}",
0x0109: "{\\^c}",
0x010a: "{\\.C}",
0x010b: "{\\.c}",
0x010c: "{\\v{C}}",
0x010d: "{\\v{c}}",
0x010e: "{\\v{D}}",
0x010f: "{\\v{d}}",
0x0112: '{\\=E}',
0x0113: '{\\=e}',
0x0114: '{\\u{E}}',
0x0115: '{\\u{e}}',
0x0116: '{\\.E}',
0x0117: '{\\.e}',
0x0118: '{\\c{E}}',
0x0119: '{\\c{e}}',
0x011a: "{\\v{E}}",
0x011b: "{\\v{e}}",
0x011c: '{\\^G}',
0x011d: '{\\^g}',
0x011e: '{\\u{G}}',
0x011f: '{\\u{g}}',
0x0120: '{\\.G}',
0x0121: '{\\.g}',
0x0122: '{\\c{G}}',
0x0123: '{\\c{g}}',
0x0124: '{\\^H}',
0x0125: '{\\^h}',
0x0128: '{\\~I}',
0x0129: '{\\~\\i}',
0x012a: '{\\=I}',
0x012b: '{\\=\\i}',
0x012c: '{\\u{I}}',
0x012d: '{\\u\\i}',
0x012e: '{\\c{I}}',
0x012f: '{\\c{i}}',
0x0130: '{\\.I}',
0x0131: '{\\i}',
0x0132: '{IJ}',
0x0133: '{ij}',
0x0134: '{\\^J}',
0x0135: '{\\^\\j}',
0x0136: '{\\c{K}}',
0x0137: '{\\c{k}}',
0x0139: "{\\'L}",
0x013a: "{\\'l}",
0x013b: "{\\c{L}}",
0x013c: "{\\c{l}}",
0x013d: "{\\v{L}}",
0x013e: "{\\v{l}}",
0x0141: '{\\L}',
0x0142: '{\\l}',
0x0143: "{\\'N}",
0x0144: "{\\'n}",
0x0145: "{\\c{N}}",
0x0146: "{\\c{n}}",
0x0147: "{\\v{N}}",
0x0148: "{\\v{n}}",
0x014c: '{\\=O}',
0x014d: '{\\=o}',
0x014e: '{\\u{O}}',
0x014f: '{\\u{o}}',
0x0150: '{\\H{O}}',
0x0151: '{\\H{o}}',
0x0152: '{\\OE}',
0x0153: '{\\oe}',
0x0154: "{\\'R}",
0x0155: "{\\'r}",
0x0156: "{\\c{R}}",
0x0157: "{\\c{r}}",
0x0158: "{\\v{R}}",
0x0159: "{\\v{r}}",
0x015a: "{\\'S}",
0x015b: "{\\'s}",
0x015c: "{\\^S}",
0x015d: "{\\^s}",
0x015e: "{\\c{S}}",
0x015f: "{\\c{s}}",
0x0160: "{\\v{S}}",
0x0161: "{\\v{s}}",
0x0162: "{\\c{T}}",
0x0163: "{\\c{t}}",
0x0164: "{\\v{T}}",
0x0165: "{\\v{t}}",
0x0168: "{\\~U}",
0x0169: "{\\~u}",
0x016a: "{\\=U}",
0x016b: "{\\=u}",
0x016c: "{\\u{U}}",
0x016d: "{\\u{u}}",
0x016e: "{\\r{U}}",
0x016f: "{\\r{u}}",
0x0170: "{\\H{U}}",
0x0171: "{\\H{u}}",
0x0172: "{\\c{U}}",
0x0173: "{\\c{u}}",
0x0174: "{\\^W}",
0x0175: "{\\^w}",
0x0176: "{\\^Y}",
0x0177: "{\\^y}",
0x0178: '{\\"Y}',
0x0179: "{\\'Z}",
0x017a: "{\\'Z}",
0x017b: "{\\.Z}",
0x017c: "{\\.Z}",
0x017d: "{\\v{Z}}",
0x017e: "{\\v{z}}",
0x01c4: "{D\\v{Z}}",
0x01c5: "{D\\v{z}}",
0x01c6: "{d\\v{z}}",
0x01c7: "{LJ}",
0x01c8: "{Lj}",
0x01c9: "{lj}",
0x01ca: "{NJ}",
0x01cb: "{Nj}",
0x01cc: "{nj}",
0x01cd: "{\\v{A}}",
0x01ce: "{\\v{a}}",
0x01cf: "{\\v{I}}",
0x01d0: "{\\v\\i}",
0x01d1: "{\\v{O}}",
0x01d2: "{\\v{o}}",
0x01d3: "{\\v{U}}",
0x01d4: "{\\v{u}}",
0x01e6: "{\\v{G}}",
0x01e7: "{\\v{g}}",
0x01e8: "{\\v{K}}",
0x01e9: "{\\v{k}}",
0x01ea: "{\\c{O}}",
0x01eb: "{\\c{o}}",
0x01f0: "{\\v\\j}",
0x01f1: "{DZ}",
0x01f2: "{Dz}",
0x01f3: "{dz}",
0x01f4: "{\\'G}",
0x01f5: "{\\'g}",
0x01fc: "{\\'\\AE}",
0x01fd: "{\\'\\ae}",
0x01fe: "{\\'\\O}",
0x01ff: "{\\'\\o}",
0x02c6: '{\\^{}}',
0x02dc: '{\\~{}}',
0x02d8: '{\\u{}}',
0x02d9: '{\\.{}}',
0x02da: "{\\r{}}",
0x02dd: '{\\H{}}',
0x02db: '{\\c{}}',
0x02c7: '{\\v{}}',
0x03c0: '{\\mbox{$\\pi$}}',
# consider adding more Greek here
0xfb01: '{fi}',
0xfb02: '{fl}',
0x2013: '{--}',
0x2014: '{---}',
0x2018: "{`}",
0x2019: "{'}",
0x201c: "{``}",
0x201d: "{''}",
0x2020: "{\\dag}",
0x2021: "{\\ddag}",
0x2122: "{\\mbox{$^\\mbox{TM}$}}",
0x2022: "{\\mbox{$\\bullet$}}",
0x2026: "{\\ldots}",
0x2202: "{\\mbox{$\\partial$}}",
0x220f: "{\\mbox{$\\prod$}}",
0x2211: "{\\mbox{$\\sum$}}",
0x221a: "{\\mbox{$\\surd$}}",
0x221e: "{\\mbox{$\\infty$}}",
0x222b: "{\\mbox{$\\int$}}",
0x2248: "{\\mbox{$\\approx$}}",
0x2260: "{\\mbox{$\\neq$}}",
0x2264: "{\\mbox{$\\leq$}}",
0x2265: "{\\mbox{$\\geq$}}",
}
for _i in range(0x0020):
if _i not in latex_equivalents:
latex_equivalents[_i] = ''
for _i in range(0x0020,0x007f):
if _i not in latex_equivalents:
latex_equivalents[_i] = chr(_i)
# Characters that should be ignored and not output in tokenization
_ignore = Set([chr(i) for i in range(32)+[127]]) - Set('\t\n\r')
# Regexp of chars not in blacklist, for quick start of tokenize
_stoppers = re.compile('[\x00-\x1f!$\\-?\\{~\\\\`\']')
_blacklist = Set(' \n\r')
_blacklist.add(None) # shortcut candidate generation at end of data
# Construction of inverse translation table
_l2u = {
'\ ':ord(' ') # unexpanding space makes no sense in non-TeX contexts
}
for _tex in latex_equivalents:
if _tex <= 0x0020 or (_tex <= 0x007f and len(latex_equivalents[_tex]) <= 1):
continue # boring entry
_toks = tuple(_tokenize(latex_equivalents[_tex]))
if _toks[0] == '{' and _toks[-1] == '}':
_toks = _toks[1:-1]
if _toks[0].isalpha():
continue # don't turn ligatures into single chars
if len(_toks) == 1 and (_toks[0] == "'" or _toks[0] == "`"):
continue # don't turn ascii quotes into curly quotes
if _toks[0] == '\\mbox' and _toks[1] == '{' and _toks[-1] == '}':
_toks = _toks[2:-1]
if len(_toks) == 4 and _toks[1] == '{' and _toks[3] == '}':
_toks = (_toks[0],_toks[2])
if len(_toks) == 1:
_toks = _toks[0]
_l2u[_toks] = _tex
# Shortcut candidate generation for certain useless candidates:
# a character is in _blacklist if it can not be at the start
# of any translation in _l2u. We use this to quickly skip through
# such characters before getting to more difficult-translate parts.
# _blacklist is defined several lines up from here because it must
# be defined in order to call _tokenize, however it is safe to
# delay filling it out until now.
for i in range(0x0020,0x007f):
_blacklist.add(chr(i))
_blacklist.remove('{')
_blacklist.remove('$')
for candidate in _l2u:
if isinstance(candidate,tuple):
if not candidate or not candidate[0]:
continue
firstchar = candidate[0][0]
else:
firstchar = candidate[0]
_blacklist.discard(firstchar)
| jterrace/sphinxtr | extensions/natbib/latex_codec.py | Python | bsd-2-clause | 15,250 |
from show_latent import LatentView | mzwiessele/GPyNotebook | GPyNotebook/latent/__init__.py | Python | bsd-2-clause | 34 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from molo.core.models import ArticlePage, ArticlePageRecommendedSections
from wagtail.wagtailcore.blocks import StreamValue
def create_recomended_articles(main_article, article_list):
'''
Creates recommended article objects from article_list
and _prepends_ to existing recommended articles.
'''
existing_recommended_articles = [
ra.recommended_article.specific
for ra in main_article.recommended_articles.all()]
ArticlePageRecommendedSections.objects.filter(page=main_article).delete()
for hyperlinked_article in article_list:
ArticlePageRecommendedSections(
page=main_article,
recommended_article=hyperlinked_article).save()
# re-create existing recommended articles
for article in existing_recommended_articles:
if article not in article_list:
ArticlePageRecommendedSections(
page=main_article,
recommended_article=article).save()
def convert_articles(apps, schema_editor):
'''
Derived from https://github.com/wagtail/wagtail/issues/2110
'''
articles = ArticlePage.objects.all().exact_type(ArticlePage)
for article in articles:
stream_data = []
linked_articles = []
for block in article.body.stream_data:
if block['type'] == 'page':
if ArticlePage.objects.filter(id=block['value']):
linked_articles.append(ArticlePage.objects.get(
id=block['value']))
else:
# add block to new stream_data
stream_data.append(block)
if linked_articles:
create_recomended_articles(article, linked_articles)
stream_block = article.body.stream_block
article.body = StreamValue(stream_block, stream_data, is_lazy=True)
article.save()
section = article.get_parent().specific
section.enable_recommended_section = True
section.enable_next_section = True
section.save()
class Migration(migrations.Migration):
dependencies = [
('iogt', '0002_create_importers_group'),
]
operations = [
migrations.RunPython(convert_articles),
]
| praekelt/molo-iogt | iogt/migrations/0003_convert_recomended_articles.py | Python | bsd-2-clause | 2,316 |
import base64
import cPickle as pickle
import datetime
from email import message_from_string
from email.utils import getaddresses
from django import forms
from django.contrib.auth.models import User, Group
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.core.exceptions import ValidationError
from django.core.validators import validate_email
from django.db import models
from django.db.models import Q, F
from django.utils import simplejson as json
from django.utils.encoding import smart_str
from kiki.message import KikiMessage
from kiki.validators import validate_local_part, validate_not_command
class ListUserMetadata(models.Model):
UNCONFIRMED = 0
SUBSCRIBER = 1
MODERATOR = 2
BLACKLISTED = 3
STATUS_CHOICES = (
(UNCONFIRMED, u'Unconfirmed'),
(SUBSCRIBER, u'Subscriber'),
(MODERATOR, u'Moderator'),
(BLACKLISTED, u'Blacklisted'),
)
user = models.ForeignKey(User)
mailing_list = models.ForeignKey('MailingList')
status = models.PositiveSmallIntegerField(choices=STATUS_CHOICES, default=UNCONFIRMED, db_index=True)
def __unicode__(self):
return u"%s - %s - %s" % (self.user, self.mailing_list, self.get_status_display())
class Meta:
unique_together = ('user', 'mailing_list')
class MailingListManager(models.Manager):
def for_site(self, site):
return self.filter(site=site)
def for_addresses(self, addresses):
"""
Takes a an iterable of email addresses and returns a queryset of mailinglists attached to the current site with matching local parts.
"""
site = Site.objects.get_current()
local_parts = []
for addr in addresses:
addr = addr.rsplit('@', 1)
if addr[1] == site.domain:
local_parts.append(addr[0])
if not local_parts:
return self.none()
return self.filter(domain=site, local_part__in=local_parts)
class MailingList(models.Model):
"""
This model contains all options for a mailing list, as well as some helpful
methods for accessing subscribers, moderators, etc.
"""
objects = MailingListManager()
MODERATORS = "mod"
SUBSCRIBERS = "sub"
ANYONE = "all"
PERMISSION_CHOICES = (
(MODERATORS, 'Moderators',),
(SUBSCRIBERS, 'Subscribers',),
(ANYONE, 'Anyone',),
)
name = models.CharField(max_length=50)
subject_prefix = models.CharField(max_length=10, blank=True)
local_part = models.CharField(max_length=64, validators=[validate_local_part, validate_not_command])
domain = models.ForeignKey(Site)
description = models.TextField(blank=True)
who_can_post = models.CharField(max_length=3, choices=PERMISSION_CHOICES, default=SUBSCRIBERS)
self_subscribe_enabled = models.BooleanField(verbose_name='self-subscribe enabled', default=True)
moderation_enabled = models.BooleanField(help_text="If enabled, messages that would be rejected will be marked ``Requires Moderation`` and an email will be sent to the list's moderators.", default=False)
# If is_anonymous becomes an option, the precooker will need to handle some anonymizing.
#is_anonymous = models.BooleanField()
users = models.ManyToManyField(
User,
related_name = 'mailinglists',
blank = True,
null = True,
through = ListUserMetadata
)
messages = models.ManyToManyField(
'Message',
related_name = 'mailinglists',
blank = True,
null = True,
through = 'ListMessage'
)
@property
def address(self):
return "%s@%s" % (self.local_part, self.domain.domain)
def _list_id_header(self):
# Does this need to be a byte string?
return smart_str(u"%s <%s.%s>" % (self.name, self.local_part, self.domain.domain))
def __unicode__(self):
return self.name
def clean(self):
validate_email(self.address)
# As per RFC 2919, the list_id_header has a max length of 255 octets.
if len(self._list_id_header()) > 254:
# Allow 4 extra spaces: the delimiters, the space, and the period.
raise ValidationError("The list name, local part, and site domain name can be at most 250 characters long together.")
def get_recipients(self):
"""Returns a queryset of :class:`User`\ s that should receive this message."""
qs = User.objects.filter(is_active=True)
qs = qs.filter(listusermetadata__mailing_list=self, listusermetadata__status__in=[ListUserMetadata.SUBSCRIBER, ListUserMetadata.MODERATOR])
return qs.distinct()
def _is_email_with_status(self, email, status):
if isinstance(email, basestring):
kwargs = {'user__email__iexact': email}
elif isinstance(email, User):
kwargs = {'user': email}
else:
return False
try:
self.listusermetadata_set.get(status=status, **kwargs)
except ListUserMetadata.DoesNotExist:
return False
return True
def is_subscriber(self, email):
return self._is_email_with_status(email, ListUserMetadata.SUBCRIBER)
def is_moderator(self, email):
return self._is_email_with_status(email, ListUserMetadata.MODERATOR)
def can_post(self, email):
if self.who_can_post == MailingList.ANYONE:
return True
if self.who_can_post == MailingList.SUBSCRIBERS and self.is_subscriber(email):
return True
if self.is_moderator(email):
return True
return False
class ProcessedMessageModel(models.Model):
"""
Encapsulates the logic required for storing and fetching pickled EmailMessage objects. This should eventually be replaced with a custom model field.
"""
processed_message = models.TextField(help_text="The processed form of the message at the current stage (pickled).", blank=True)
# Store the message as a base64-encoded pickle dump a la django-mailer.
def set_processed(self, msg):
self.processed_message = base64.encodestring(pickle.dumps(msg, pickle.HIGHEST_PROTOCOL))
self._processed = msg
def get_processed(self):
if not hasattr(self, '_processed'):
self._processed = pickle.loads(base64.decodestring(self.processed_message))
return self._processed
class Meta:
abstract = True
class Message(ProcessedMessageModel):
"""
Represents an email received by Kiki. Stores the original received message as well as a pickled version of the processed message.
"""
UNPROCESSED = 'u'
PROCESSED = 'p'
FAILED = 'f'
STATUS_CHOICES = (
(UNPROCESSED, 'Unprocessed'),
(PROCESSED, 'Processed'),
(FAILED, 'Failed'),
)
message_id = models.CharField(max_length=255, unique=True)
#: The message_id of the email this is in reply to.
# in_reply_to = models.CharField(max_length=255, db_index=True, blank=True)
from_email = models.EmailField()
received = models.DateTimeField()
status = models.CharField(max_length=1, choices=STATUS_CHOICES, db_index=True, default=UNPROCESSED)
original_message = models.TextField(help_text="The original raw text of the message.")
class ListMessage(ProcessedMessageModel):
"""
Represents the relationship between a :class:`Message` and a :class:`MailingList`. This is what is processed to handle the sending of a message to a list rather than the original message.
"""
ACCEPTED = 1
REQUIRES_MODERATION = 2
PREPPED = 3
SENT = 4
FAILED = 5
STATUS_CHOICES = (
(ACCEPTED, 'Accepted'),
(REQUIRES_MODERATION, 'Requires Moderation'),
(PREPPED, 'Prepped'),
(SENT, 'Sent'),
(FAILED, 'Failed'),
)
message = models.ForeignKey(Message)
mailing_list = models.ForeignKey(MailingList)
status = models.PositiveSmallIntegerField(choices=STATUS_CHOICES, db_index=True)
class Meta:
unique_together = ('message', 'mailing_list',)
class ListCommand(models.Model):
#: The ListCommand has not been processed.
UNPROCESSED = 1
#: The ListCommand has been rejected (e.g. for permissioning reasons.)
REJECTED = 2
#: Ths ListCommand has been processed completely.
PROCESSED = 3
#: An error occurred while processing the ListCommand.
FAILED = 4
STATUS_CHOICES = (
(UNPROCESSED, 'Unprocessed'),
(REJECTED, 'Rejected'),
(PROCESSED, 'Processed'),
(FAILED, 'Failed'),
)
message = models.ForeignKey(Message)
mailing_list = models.ForeignKey(MailingList)
status = models.PositiveSmallIntegerField(choices=STATUS_CHOICES, db_index=True, default=UNPROCESSED)
command = models.CharField(max_length=20) | melinath/django-kiki | kiki/models.py | Python | bsd-2-clause | 8,088 |
import pygame
# EXPORT
KeyCodes = {
"BACKSPACE": pygame.K_BACKSPACE,
"TAB": pygame.K_TAB,
"CLEAR": pygame.K_CLEAR,
"RETURN": pygame.K_RETURN,
"PAUSE": pygame.K_PAUSE,
"ESCAPE": pygame.K_ESCAPE,
"SPACE": pygame.K_SPACE,
"EXCLAIM": pygame.K_EXCLAIM,
"QUOTEDBL": pygame.K_QUOTEDBL,
"HASH": pygame.K_HASH,
"DOLLAR": pygame.K_DOLLAR,
"AMPERSAND": pygame.K_AMPERSAND,
"QUOTE": pygame.K_QUOTE,
"LEFTPAREN": pygame.K_LEFTPAREN,
"RIGHTPAREN": pygame.K_RIGHTPAREN,
"ASTERISK": pygame.K_ASTERISK,
"PLUS": pygame.K_PLUS,
"COMMA": pygame.K_COMMA,
"MINUS": pygame.K_MINUS,
"PERIOD": pygame.K_PERIOD,
"SLASH": pygame.K_SLASH,
"0": pygame.K_0,
"1": pygame.K_1,
"2": pygame.K_2,
"3": pygame.K_3,
"4": pygame.K_4,
"5": pygame.K_5,
"6": pygame.K_6,
"7": pygame.K_7,
"8": pygame.K_8,
"9": pygame.K_9,
"COLON": pygame.K_COLON,
"SEMICOLON": pygame.K_SEMICOLON,
"LESS": pygame.K_LESS,
"EQUALS": pygame.K_EQUALS,
"GREATER": pygame.K_GREATER,
"QUESTION": pygame.K_QUESTION,
"AT": pygame.K_AT,
"LEFTBRACKET": pygame.K_LEFTBRACKET,
"BACKSLASH": pygame.K_BACKSLASH,
"RIGHTBRACKET": pygame.K_RIGHTBRACKET,
"CARET": pygame.K_CARET,
"UNDERSCORE": pygame.K_UNDERSCORE,
"BACKQUOTE": pygame.K_BACKQUOTE,
"a": pygame.K_a,
"b": pygame.K_b,
"c": pygame.K_c,
"d": pygame.K_d,
"e": pygame.K_e,
"f": pygame.K_f,
"g": pygame.K_g,
"h": pygame.K_h,
"i": pygame.K_i,
"j": pygame.K_j,
"k": pygame.K_k,
"l": pygame.K_l,
"m": pygame.K_m,
"n": pygame.K_n,
"o": pygame.K_o,
"p": pygame.K_p,
"q": pygame.K_q,
"r": pygame.K_r,
"s": pygame.K_s,
"t": pygame.K_t,
"u": pygame.K_u,
"v": pygame.K_v,
"w": pygame.K_w,
"x": pygame.K_x,
"y": pygame.K_y,
"z": pygame.K_z,
"DELETE": pygame.K_DELETE,
"KP0": pygame.K_KP0,
"KP1": pygame.K_KP1,
"KP2": pygame.K_KP2,
"KP3": pygame.K_KP3,
"KP4": pygame.K_KP4,
"KP5": pygame.K_KP5,
"KP6": pygame.K_KP6,
"KP7": pygame.K_KP7,
"KP8": pygame.K_KP8,
"KP9": pygame.K_KP9,
"KP_PERIOD": pygame.K_KP_PERIOD,
"KP_DIVIDE": pygame.K_KP_DIVIDE,
"KP_MULTIPLY": pygame.K_KP_MULTIPLY,
"KP_MINUS": pygame.K_KP_MINUS,
"KP_PLUS": pygame.K_KP_PLUS,
"KP_ENTER": pygame.K_KP_ENTER,
"KP_EQUALS": pygame.K_KP_EQUALS,
"UP": pygame.K_UP,
"DOWN": pygame.K_DOWN,
"RIGHT": pygame.K_RIGHT,
"LEFT": pygame.K_LEFT,
"INSERT": pygame.K_INSERT,
"HOME": pygame.K_HOME,
"END": pygame.K_END,
"PAGEUP": pygame.K_PAGEUP,
"PAGEDOWN": pygame.K_PAGEDOWN,
"F1": pygame.K_F1,
"F2": pygame.K_F2,
"F3": pygame.K_F3,
"F4": pygame.K_F4,
"F5": pygame.K_F5,
"F6": pygame.K_F6,
"F7": pygame.K_F7,
"F8": pygame.K_F8,
"F9": pygame.K_F9,
"F10": pygame.K_F10,
"F11": pygame.K_F11,
"F12": pygame.K_F12,
"F13": pygame.K_F13,
"F14": pygame.K_F14,
"F15": pygame.K_F15,
"NUMLOCK": pygame.K_NUMLOCK,
"CAPSLOCK": pygame.K_CAPSLOCK,
"SCROLLOCK": pygame.K_SCROLLOCK,
"RSHIFT": pygame.K_RSHIFT,
"LSHIFT": pygame.K_LSHIFT,
"RCTRL": pygame.K_RCTRL,
"LCTRL": pygame.K_LCTRL,
"RALT": pygame.K_RALT,
"LALT": pygame.K_LALT,
"RMETA": pygame.K_RMETA,
"LMETA": pygame.K_LMETA,
"LSUPER": pygame.K_LSUPER,
"RSUPER": pygame.K_RSUPER,
"MODE": pygame.K_MODE,
"HELP": pygame.K_HELP,
"PRINT": pygame.K_PRINT,
"SYSREQ": pygame.K_SYSREQ,
"BREAK": pygame.K_BREAK,
"MENU": pygame.K_MENU,
"POWER": pygame.K_POWER,
"EURO": pygame.K_EURO,
}
| amirgeva/py2d | engine/keycodes.py | Python | bsd-2-clause | 3,668 |
import sys
# widgets
class Button:
"""
Represents button
Keyword arguments:
text -- button text
| str
onclick -- function invoked after pressing the button
| function: Button -> void
Attributes:
wide -- makes the button wide
"""
def __new__(cls, text=None, onclick=None):
return object.__new__(sys.modules['aui.widgets'].Button)
def __init__(self, text, onclick=None):
self.wide = self
def destroy(self):
"""Destroys the button"""
pass
class Checkbox:
"""
Represents checkbox in UI
Keyword arguments:
text -- checkbox text
| str
selected -- whether the checkbox is selected on init
| boolean
onchange -- function invoked after toggling the checkbox
| function: Checkbox -> void
"""
def __new__(cls, text=None, selected=False, onchange=None, *args):
return object.__new__(sys.modules['aui.widgets'].Checkbox)
def __init__(self, text, selected=False, onchange=None):
pass
def destroy(self):
"""Destroys the checkbox"""
pass
class Input:
"""
Represents input field in UI
Keyword arguments:
value -- default value
| str (default: "")
onenter -- function called after the return key is pressed
| function: Input -> void
Attributes:
wide -- makes the input wide
"""
def __new__(cls, value="", onenter=None, *args):
return object.__new__(sys.modules['aui.widgets'].Input)
def __init__(self, value="", onenter=None):
self.wide = self
def destroy(self):
"""Destroys the input field"""
pass
class Label:
"""
Represents label in UI
Keyword arguments:
text -- label text
| str
"""
def __new__(cls, text=None, *args):
return object.__new__(sys.modules['aui.widgets'].Label)
def __init__(self, text):
pass
def destroy(self):
"""Destroys the label"""
pass
class Text:
"""
Represents multiline input field in UI
Keyword arguments:
text -- widget text
| str (default: "")
"""
def __new__(cls, text=None, *args):
return object.__new__(sys.modules['aui.widgets'].Text)
def __init__(self, text=""):
pass
def destroy(self):
"""Destroys the text field"""
pass
# containers
class Vertical:
"""
Represents vertical container in UI
Arguments:
*children -- children elements of the container
"""
def __new__(cls, *args):
return object.__new__(sys.modules['aui.widgets'].Vertical)
def append(self, child):
"""
Appends widget to the vertical container
Keyword arguments:
child -- the widget to be placed into the container
"""
pass
def create(self, parent, align=None):
"""
Creates vertical container and assigns it to its parent
Keyword arguments:
parent -- parent of the element to be put into
align -- alignment of the element in container tk.constants.(TOP/RIGHT/BOTTOM/LEFT)
"""
pass
def destroy(self):
"""Destroys the vertical container"""
pass
class Horizontal:
"""
Represents horizontal container in UI
Arguments:
*children -- children elements of the container
"""
def __new__(cls, *args):
return object.__new__(sys.modules['aui.widgets'].Horizontal)
def append(self, child):
"""
Appends widget to the horizontal container
Keyword arguments:
child -- the widget to be placed into the container
"""
pass
def create(self, parent, align=None):
"""
Creates horizontal container and assigns it to its parent
Keyword arguments:
parent -- parent of the element to be put into
align -- alignment of the element in container tk.constants.(TOP/RIGHT/BOTTOM/LEFT)
"""
pass
def destroy(self):
"""Destroys the horizontal container"""
pass
| klausweiss/python-aui | aui/widgets.py | Python | bsd-2-clause | 4,207 |
# ENVISIoN
#
# Copyright (c) 2019 Jesper Ericsson
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##############################################################################################
def format_error(error):
# Return a list with error type and message.
return [type(error).__name__, str(error)]
class EnvisionError(Exception):
pass
class HandlerNotFoundError(EnvisionError):
''' Error used for non-critical unhandled requests.
User does not know when raised.'''
pass
class HandlerAlreadyExistError(EnvisionError):
''' Used when new visualisation tries to initialize
with existing handler id.'''
pass
class InvalidRequestError(EnvisionError):
''' Error used for invalid requests, such as invalid parameters.
Should generate an alert so user knows something did not work.
'''
pass
class ProcessorNotFoundError(EnvisionError):
pass
class BadHDF5Error(EnvisionError):
pass
class ProcessorNetworkError(EnvisionError):
pass
# TODO: Custom parse errors
| rartino/ENVISIoN | envisionpy/utils/exceptions.py | Python | bsd-2-clause | 2,304 |
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
# --------------------------------------------------------
# R*CNN
# Written by Georgia Gkioxari, 2015.
# See LICENSE in the project root for license information.
# --------------------------------------------------------
"""The data layer used during training to train a R*CNN network.
AttributesDataLayer implements a Caffe Python layer.
"""
import caffe
from fast_rcnn.config import cfg
from attr_data_layer.minibatch import get_minibatch
import numpy as np
import yaml
from multiprocessing import Process, Queue
# import pdb
class AttributesDataLayer(caffe.Layer):
"""R*CNN data layer used during training for attributes."""
def _shuffle_roidb_inds(self):
"""Randomly permute the training roidb."""
self._perm = np.random.permutation(np.arange(len(self._roidb)))
self._cur = 0
def _get_next_minibatch_inds(self):
"""Return the roidb indices for the next minibatch."""
if self._cur + cfg.TRAIN.IMS_PER_BATCH >= len(self._roidb):
self._shuffle_roidb_inds()
db_inds = self._perm[self._cur:self._cur + cfg.TRAIN.IMS_PER_BATCH]
self._cur += cfg.TRAIN.IMS_PER_BATCH
return db_inds
def _get_next_minibatch(self):
"""Return the blobs to be used for the next minibatch.
If cfg.TRAIN.USE_PREFETCH is True, then blobs will be computed in a
separate process and made available through self._blob_queue.
"""
if cfg.TRAIN.USE_PREFETCH:
return self._blob_queue.get()
else:
db_inds = self._get_next_minibatch_inds()
minibatch_db = [self._roidb[i] for i in db_inds]
return get_minibatch(minibatch_db, self._num_classes)
def set_roidb(self, roidb):
"""Set the roidb to be used by this layer during training."""
self._roidb = roidb
self._shuffle_roidb_inds()
if cfg.TRAIN.USE_PREFETCH:
self._blob_queue = Queue(10)
self._prefetch_process = BlobFetcher(self._blob_queue,
self._roidb,
self._num_classes)
self._prefetch_process.start()
# Terminate the child process when the parent exists
def cleanup():
print 'Terminating BlobFetcher'
self._prefetch_process.terminate()
self._prefetch_process.join()
import atexit
atexit.register(cleanup)
def setup(self, bottom, top):
"""Setup the RoIDataLayer."""
# parse the layer parameter string, which must be valid YAML
layer_params = yaml.load(self.param_str_)
self._num_classes = layer_params['num_classes']
self._name_to_top_map = {
'data': 0,
'rois': 1,
'labels': 2}
# data blob: holds a batch of N images, each with 3 channels
# The height and width (100 x 100) are dummy values
top[0].reshape(1, 3, 100, 100)
# rois blob: holds R regions of interest, each is a 5-tuple
# (n, x1, y1, x2, y2) specifying an image batch index n and a
# rectangle (x1, y1, x2, y2)
top[1].reshape(1, 5)
# labels blob: holds labels for each attribute
top[2].reshape(1, self._num_classes)
def forward(self, bottom, top):
"""Get blobs and copy them into this layer's top blob vector."""
blobs = self._get_next_minibatch()
for blob_name, blob in blobs.iteritems():
top_ind = self._name_to_top_map[blob_name]
# Reshape net's input blobs
top[top_ind].reshape(*(blob.shape))
# Copy data into net's input blobs
top[top_ind].data[...] = blob.astype(np.float32, copy=False)
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
pass
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
class BlobFetcher(Process):
"""Experimental class for prefetching blobs in a separate process."""
def __init__(self, queue, roidb, num_classes):
super(BlobFetcher, self).__init__()
self._queue = queue
self._roidb = roidb
self._num_classes = num_classes
self._perm = None
self._cur = 0
self._shuffle_roidb_inds()
# fix the random seed for reproducibility
np.random.seed(cfg.RNG_SEED)
def _shuffle_roidb_inds(self):
"""Randomly permute the training roidb."""
# TODO(rbg): remove duplicated code
self._perm = np.random.permutation(np.arange(len(self._roidb)))
self._cur = 0
def _get_next_minibatch_inds(self):
"""Return the roidb indices for the next minibatch."""
# TODO(rbg): remove duplicated code
if self._cur + cfg.TRAIN.IMS_PER_BATCH >= len(self._roidb):
self._shuffle_roidb_inds()
db_inds = self._perm[self._cur:self._cur + cfg.TRAIN.IMS_PER_BATCH]
self._cur += cfg.TRAIN.IMS_PER_BATCH
return db_inds
def run(self):
print 'BlobFetcher started'
while True:
db_inds = self._get_next_minibatch_inds()
minibatch_db = [self._roidb[i] for i in db_inds]
blobs = get_minibatch(minibatch_db, self._num_classes)
self._queue.put(blobs)
| gkioxari/RstarCNN | lib/attr_data_layer/layer.py | Python | bsd-2-clause | 5,647 |
from .. import Availability, Class, Constant, Define, Method, Parameter, Type
gx_class = Class('VM',
doc="""
In-memory vector data methods
The :class:`VM` class will store vector (array) data in a memory buffer which
can be accessed using the :class:`VM` methods.
The main use for the :class:`VM` class is to store data in a single physical
memory location. This memory can then be accessed by a user DLL using
the :func:`GetPtrVM_GEO` function defined in gx_extern.h.
:class:`VM` memory can be any size, but a :class:`VM` is intended for handling relatively
small sets of data compared to a :class:`VV`, which can work efficiently with
very large volumes of data. The acceptable maximum :class:`VM` size depends on
the operating system and the performance requirements of an application.
The best performance is achieved when all :class:`VM` memory can be stored
comfortably within the the available system RAM. If all :class:`VM` memory
will not fit in the system RAM, the operating system virtual memory
manager will be used to swap memory to the operations systems virtual
memory paging file. Note that the operating system virtual memory
manager is much slower than the manager used by Geosoft when working with
very large arrays in a :class:`VV`.
See :class:`VV` for methods to move data between a :class:`VM` and a :class:`VV`.
""")
gx_methods = {
'Miscellaneous': [
Method('Create_VM', module='geoengine.core', version='5.0.0',
availability=Availability.PUBLIC,
doc="Create a :class:`VM`.",
notes="The :class:`VM` elements are initialized to dummies.",
return_type="VM",
return_doc=":class:`VM` Object",
parameters = [
Parameter('type', type=Type.INT32_T,
doc=":def:`GEO_VAR`"),
Parameter('elements', type=Type.INT32_T,
doc=":class:`VM` length (less than 16777215)")
]),
Method('CreateExt_VM', module='geoengine.core', version='6.4.2',
availability=Availability.PUBLIC,
doc="Create a :class:`VM`, using one of the :def:`GS_TYPES` special data types.",
notes="The :class:`VM` elements are initialized to dummies.",
return_type="VM",
return_doc=":class:`VM` Object",
parameters = [
Parameter('type', type=Type.INT32_T,
doc=":def:`GS_TYPES`"),
Parameter('elements', type=Type.INT32_T,
doc=":class:`VM` length (less than 16777215)")
]),
Method('Destroy_VM', module='geoengine.core', version='5.0.0',
availability=Availability.PUBLIC,
doc="Destroy a :class:`VM`.",
return_type=Type.VOID,
parameters = [
Parameter('vm', type="VM",
doc=":class:`VM` to destroy.")
]),
Method('iGetInt_VM', module='geoengine.core', version='5.0.0',
availability=Availability.PUBLIC,
doc="Get an integer element from a :class:`VM`.",
return_type=Type.INT32_T,
return_doc="""
Element wanted, or :const:`iDUMMY`
if the value is dummy or outside of the range of data.
""",
parameters = [
Parameter('vm', type="VM"),
Parameter('element', type=Type.INT32_T,
doc="Element wanted")
]),
Method('IGetString_VM', module='geoengine.core', version='5.0.0',
availability=Availability.PUBLIC,
doc="Get a string element from a :class:`VM`.",
notes="""
Returns element wanted, or blank string
if the value is dummy or outside of the range of data.
Type conversions are performed if necessary. Dummy values
are converted to "*" string.
""",
return_type=Type.VOID,
parameters = [
Parameter('vm', type="VM"),
Parameter('element', type=Type.INT32_T,
doc="Element wanted"),
Parameter('str_val', type=Type.STRING, is_ref=True, size_of_param='str_size',
doc="String in which to place element"),
Parameter('str_size', type=Type.INT32_T, default_length='STR_VERY_LONG',
doc="Maximum length of the string")
]),
Method('iLength_VM', module='geoengine.core', version='5.0.0',
availability=Availability.PUBLIC,
doc="Returns current :class:`VM` length.",
return_type=Type.INT32_T,
return_doc="# of elements in the :class:`VM`.",
parameters = [
Parameter('vm', type="VM")
]),
Method('ReSize_VM', module='geoengine.core', version='5.0.0',
availability=Availability.PUBLIC,
doc="Re-set the size of a :class:`VM`.",
notes="If increasing the :class:`VM` size, new elements are set to dummies.",
return_type=Type.VOID,
parameters = [
Parameter('vm', type="VM",
doc=":class:`VM` to resize"),
Parameter('newsize', type=Type.INT32_T,
doc="New size (number of elements)")
]),
Method('rGetReal_VM', module='geoengine.core', version='5.0.0',
availability=Availability.PUBLIC,
doc="Get a real element from a :class:`VM`.",
return_type=Type.DOUBLE,
return_doc="""
Element wanted, or :const:`rDUMMY`
if the value is dummy or outside of the range of data.
""",
parameters = [
Parameter('vm', type="VM"),
Parameter('element', type=Type.INT32_T,
doc="Element wanted")
]),
Method('SetInt_VM', module='geoengine.core', version='5.0.0',
availability=Availability.PUBLIC,
doc="Set an integer element in a :class:`VM`.",
notes="""
Element being set cannot be < 0.
If the element is > current :class:`VM` length, the :class:`VM` length is
increased. Reallocating :class:`VM` lengths can lead to fragmented
memory and should be avoided if possible.
""",
return_type=Type.VOID,
parameters = [
Parameter('vm', type="VM"),
Parameter('element', type=Type.INT32_T,
doc="Element to set"),
Parameter('value', type=Type.INT32_T,
doc="Value to set")
]),
Method('SetReal_VM', module='geoengine.core', version='5.0.0',
availability=Availability.PUBLIC,
doc="Set a real element in a :class:`VM`.",
notes="""
Element being set cannot be < 0.
If the element is > current :class:`VM` length, the :class:`VM` length is
increased. Reallocating :class:`VM` lengths can lead to fragmented
memory and should be avoided if possible.
""",
return_type=Type.VOID,
parameters = [
Parameter('vm', type="VM"),
Parameter('element', type=Type.INT32_T,
doc="Element to set"),
Parameter('value', type=Type.DOUBLE,
doc="Value to set")
]),
Method('SetString_VM', module='geoengine.core', version='5.0.0',
availability=Availability.PUBLIC,
doc="Set a string element in a :class:`VM`.",
notes="""
Element being set cannot be < 0.
If the element is > current :class:`VM` length, the :class:`VM` length is
increased. Reallocating :class:`VM` lengths can lead to fragmented
memory and should be avoided if possible.
""",
return_type=Type.VOID,
parameters = [
Parameter('vm', type="VM"),
Parameter('element', type=Type.INT32_T,
doc="Element to set"),
Parameter('value', type=Type.STRING,
doc="String to set")
])
]
}
| GeosoftInc/gxapi | spec/core/VM.py | Python | bsd-2-clause | 9,250 |
"""The tower of Hanoi."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
def tower_of_hanoi(height, from_pole, to_pole, with_pole, counter):
"""Tower of Hanoi.
Time complexity: T(1) = 1, T(n) = 2T(n - 1) + 1 => O(2^n).
Space complexity: O(1).
"""
if height == 1:
counter[0] += 1
print('{0} -> {1}'.format(from_pole, to_pole))
else:
tower_of_hanoi(height - 1, from_pole, with_pole, to_pole, counter)
tower_of_hanoi(1, from_pole, to_pole, with_pole, counter)
tower_of_hanoi(height - 1, with_pole, to_pole, from_pole, counter)
def main():
from_pole = 'A'
to_pole = 'B'
with_pole = 'C'
height = 1
counter = [0]
print('height: {}'.format(height))
tower_of_hanoi(height, from_pole, to_pole, with_pole, counter)
print('counter: {}'.format(counter[0]))
height = 2
counter = [0]
print('height: {}'.format(height))
tower_of_hanoi(height, from_pole, to_pole, with_pole, counter)
print('counter: {}'.format(counter[0]))
height = 5
counter = [0]
print('height: {}'.format(height))
tower_of_hanoi(height, from_pole, to_pole, with_pole, counter)
print('counter: {}'.format(counter[0]))
if __name__ == '__main__':
main()
| bowen0701/algorithms_data_structures | alg_tower_of_hanoi.py | Python | bsd-2-clause | 1,320 |
from django.conf.urls.defaults import patterns, include, url
from singlecontrol.views import index, socketio
urlpatterns = patterns('',
url(r'^$', view=index, name='index'),
url(r'^socket\.io', view=socketio, name='socketio'),
)
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns += staticfiles_urlpatterns()
| victorpoluceno/python_kinect_socketio | urls.py | Python | bsd-2-clause | 349 |
## @file
# generate capsule
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
from __future__ import absolute_import
from .GenFdsGlobalVariable import GenFdsGlobalVariable, FindExtendTool
from CommonDataClass.FdfClass import CapsuleClassObject
import Common.LongFilePathOs as os
from io import BytesIO
from Common.Misc import SaveFileOnChange, PackRegistryFormatGuid
import uuid
from struct import pack
from Common import EdkLogger
from Common.BuildToolError import GENFDS_ERROR
from Common.DataType import TAB_LINE_BREAK
WIN_CERT_REVISION = 0x0200
WIN_CERT_TYPE_EFI_GUID = 0x0EF1
EFI_CERT_TYPE_PKCS7_GUID = uuid.UUID('{4aafd29d-68df-49ee-8aa9-347d375665a7}')
EFI_CERT_TYPE_RSA2048_SHA256_GUID = uuid.UUID('{a7717414-c616-4977-9420-844712a735bf}')
## create inf file describes what goes into capsule and call GenFv to generate capsule
#
#
class Capsule (CapsuleClassObject):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
CapsuleClassObject.__init__(self)
# For GenFv
self.BlockSize = None
# For GenFv
self.BlockNum = None
self.CapsuleName = None
## Generate FMP capsule
#
# @retval string Generated Capsule file path
#
def GenFmpCapsule(self):
#
# Generate capsule header
# typedef struct {
# EFI_GUID CapsuleGuid;
# UINT32 HeaderSize;
# UINT32 Flags;
# UINT32 CapsuleImageSize;
# } EFI_CAPSULE_HEADER;
#
Header = BytesIO()
#
# Use FMP capsule GUID: 6DCBD5ED-E82D-4C44-BDA1-7194199AD92A
#
Header.write(PackRegistryFormatGuid('6DCBD5ED-E82D-4C44-BDA1-7194199AD92A'))
HdrSize = 0
if 'CAPSULE_HEADER_SIZE' in self.TokensDict:
Header.write(pack('=I', int(self.TokensDict['CAPSULE_HEADER_SIZE'], 16)))
HdrSize = int(self.TokensDict['CAPSULE_HEADER_SIZE'], 16)
else:
Header.write(pack('=I', 0x20))
HdrSize = 0x20
Flags = 0
if 'CAPSULE_FLAGS' in self.TokensDict:
for flag in self.TokensDict['CAPSULE_FLAGS'].split(','):
flag = flag.strip()
if flag == 'PopulateSystemTable':
Flags |= 0x00010000 | 0x00020000
elif flag == 'PersistAcrossReset':
Flags |= 0x00010000
elif flag == 'InitiateReset':
Flags |= 0x00040000
Header.write(pack('=I', Flags))
#
# typedef struct {
# UINT32 Version;
# UINT16 EmbeddedDriverCount;
# UINT16 PayloadItemCount;
# // UINT64 ItemOffsetList[];
# } EFI_FIRMWARE_MANAGEMENT_CAPSULE_HEADER;
#
FwMgrHdr = BytesIO()
if 'CAPSULE_HEADER_INIT_VERSION' in self.TokensDict:
FwMgrHdr.write(pack('=I', int(self.TokensDict['CAPSULE_HEADER_INIT_VERSION'], 16)))
else:
FwMgrHdr.write(pack('=I', 0x00000001))
FwMgrHdr.write(pack('=HH', len(self.CapsuleDataList), len(self.FmpPayloadList)))
FwMgrHdrSize = 4+2+2+8*(len(self.CapsuleDataList)+len(self.FmpPayloadList))
#
# typedef struct _WIN_CERTIFICATE {
# UINT32 dwLength;
# UINT16 wRevision;
# UINT16 wCertificateType;
# //UINT8 bCertificate[ANYSIZE_ARRAY];
# } WIN_CERTIFICATE;
#
# typedef struct _WIN_CERTIFICATE_UEFI_GUID {
# WIN_CERTIFICATE Hdr;
# EFI_GUID CertType;
# //UINT8 CertData[ANYSIZE_ARRAY];
# } WIN_CERTIFICATE_UEFI_GUID;
#
# typedef struct {
# UINT64 MonotonicCount;
# WIN_CERTIFICATE_UEFI_GUID AuthInfo;
# } EFI_FIRMWARE_IMAGE_AUTHENTICATION;
#
# typedef struct _EFI_CERT_BLOCK_RSA_2048_SHA256 {
# EFI_GUID HashType;
# UINT8 PublicKey[256];
# UINT8 Signature[256];
# } EFI_CERT_BLOCK_RSA_2048_SHA256;
#
PreSize = FwMgrHdrSize
Content = BytesIO()
for driver in self.CapsuleDataList:
FileName = driver.GenCapsuleSubItem()
FwMgrHdr.write(pack('=Q', PreSize))
PreSize += os.path.getsize(FileName)
File = open(FileName, 'rb')
Content.write(File.read())
File.close()
for fmp in self.FmpPayloadList:
if fmp.Existed:
FwMgrHdr.write(pack('=Q', PreSize))
PreSize += len(fmp.Buffer)
Content.write(fmp.Buffer)
continue
if fmp.ImageFile:
for Obj in fmp.ImageFile:
fmp.ImageFile = Obj.GenCapsuleSubItem()
if fmp.VendorCodeFile:
for Obj in fmp.VendorCodeFile:
fmp.VendorCodeFile = Obj.GenCapsuleSubItem()
if fmp.Certificate_Guid:
ExternalTool, ExternalOption = FindExtendTool([], GenFdsGlobalVariable.ArchList, fmp.Certificate_Guid)
CmdOption = ''
CapInputFile = fmp.ImageFile
if not os.path.isabs(fmp.ImageFile):
CapInputFile = os.path.join(GenFdsGlobalVariable.WorkSpaceDir, fmp.ImageFile)
CapOutputTmp = os.path.join(GenFdsGlobalVariable.FvDir, self.UiCapsuleName) + '.tmp'
if ExternalTool is None:
EdkLogger.error("GenFds", GENFDS_ERROR, "No tool found with GUID %s" % fmp.Certificate_Guid)
else:
CmdOption += ExternalTool
if ExternalOption:
CmdOption = CmdOption + ' ' + ExternalOption
CmdOption += ' -e ' + ' --monotonic-count ' + str(fmp.MonotonicCount) + ' -o ' + CapOutputTmp + ' ' + CapInputFile
CmdList = CmdOption.split()
GenFdsGlobalVariable.CallExternalTool(CmdList, "Failed to generate FMP auth capsule")
if uuid.UUID(fmp.Certificate_Guid) == EFI_CERT_TYPE_PKCS7_GUID:
dwLength = 4 + 2 + 2 + 16 + os.path.getsize(CapOutputTmp) - os.path.getsize(CapInputFile)
else:
dwLength = 4 + 2 + 2 + 16 + 16 + 256 + 256
fmp.ImageFile = CapOutputTmp
AuthData = [fmp.MonotonicCount, dwLength, WIN_CERT_REVISION, WIN_CERT_TYPE_EFI_GUID, fmp.Certificate_Guid]
fmp.Buffer = fmp.GenCapsuleSubItem(AuthData)
else:
fmp.Buffer = fmp.GenCapsuleSubItem()
FwMgrHdr.write(pack('=Q', PreSize))
PreSize += len(fmp.Buffer)
Content.write(fmp.Buffer)
BodySize = len(FwMgrHdr.getvalue()) + len(Content.getvalue())
Header.write(pack('=I', HdrSize + BodySize))
#
# The real capsule header structure is 28 bytes
#
Header.write('\x00'*(HdrSize-28))
Header.write(FwMgrHdr.getvalue())
Header.write(Content.getvalue())
#
# Generate FMP capsule file
#
CapOutputFile = os.path.join(GenFdsGlobalVariable.FvDir, self.UiCapsuleName) + '.Cap'
SaveFileOnChange(CapOutputFile, Header.getvalue(), True)
return CapOutputFile
## Generate capsule
#
# @param self The object pointer
# @retval string Generated Capsule file path
#
def GenCapsule(self):
if self.UiCapsuleName.upper() + 'cap' in GenFdsGlobalVariable.ImageBinDict:
return GenFdsGlobalVariable.ImageBinDict[self.UiCapsuleName.upper() + 'cap']
GenFdsGlobalVariable.InfLogger( "\nGenerate %s Capsule" %self.UiCapsuleName)
if ('CAPSULE_GUID' in self.TokensDict and
uuid.UUID(self.TokensDict['CAPSULE_GUID']) == uuid.UUID('6DCBD5ED-E82D-4C44-BDA1-7194199AD92A')):
return self.GenFmpCapsule()
CapInfFile = self.GenCapInf()
CapInfFile.writelines("[files]" + TAB_LINE_BREAK)
CapFileList = []
for CapsuleDataObj in self.CapsuleDataList:
CapsuleDataObj.CapsuleName = self.CapsuleName
FileName = CapsuleDataObj.GenCapsuleSubItem()
CapsuleDataObj.CapsuleName = None
CapFileList.append(FileName)
CapInfFile.writelines("EFI_FILE_NAME = " + \
FileName + \
TAB_LINE_BREAK)
SaveFileOnChange(self.CapInfFileName, CapInfFile.getvalue(), False)
CapInfFile.close()
#
# Call GenFv tool to generate capsule
#
CapOutputFile = os.path.join(GenFdsGlobalVariable.FvDir, self.UiCapsuleName)
CapOutputFile = CapOutputFile + '.Cap'
GenFdsGlobalVariable.GenerateFirmwareVolume(
CapOutputFile,
[self.CapInfFileName],
Capsule=True,
FfsList=CapFileList
)
GenFdsGlobalVariable.VerboseLogger( "\nGenerate %s Capsule Successfully" %self.UiCapsuleName)
GenFdsGlobalVariable.SharpCounter = 0
GenFdsGlobalVariable.ImageBinDict[self.UiCapsuleName.upper() + 'cap'] = CapOutputFile
return CapOutputFile
## Generate inf file for capsule
#
# @param self The object pointer
# @retval file inf file object
#
def GenCapInf(self):
self.CapInfFileName = os.path.join(GenFdsGlobalVariable.FvDir,
self.UiCapsuleName + "_Cap" + '.inf')
CapInfFile = BytesIO() #open (self.CapInfFileName , 'w+')
CapInfFile.writelines("[options]" + TAB_LINE_BREAK)
for Item in self.TokensDict:
CapInfFile.writelines("EFI_" + \
Item + \
' = ' + \
self.TokensDict[Item] + \
TAB_LINE_BREAK)
return CapInfFile
| MattDevo/edk2 | BaseTools/Source/Python/GenFds/Capsule.py | Python | bsd-2-clause | 10,886 |
# Inviwo Python script
import inviwo
inviwo.loadTransferFunction("SimpleRaycaster.transferFunction",inviwo.getDataPath() + "transferfunction.itf")
| sarbi127/inviwo | data/scripts/loadtransferfunction.py | Python | bsd-2-clause | 150 |
import sys
import time
from mpi4py.futures import MPICommExecutor
x0 = -2.0
x1 = +2.0
y0 = -1.5
y1 = +1.5
w = 1600
h = 1200
dx = (x1 - x0) / w
dy = (y1 - y0) / h
def julia(x, y):
c = complex(0, 0.65)
z = complex(x, y)
n = 255
while abs(z) < 3 and n > 1:
z = z**2 + c
n -= 1
return n
def julia_line(k):
line = bytearray(w)
y = y1 - k * dy
for j in range(w):
x = x0 + j * dx
line[j] = julia(x, y)
return line
def plot(image):
import warnings
warnings.simplefilter('ignore', UserWarning)
try:
from matplotlib import pyplot as plt
except ImportError:
return
plt.figure()
plt.imshow(image, aspect='equal', cmap='cubehelix')
plt.axis('off')
try:
plt.draw()
plt.pause(2)
except:
pass
def test_julia():
with MPICommExecutor() as executor:
if executor is None: return # worker process
tic = time.time()
image = list(executor.map(julia_line, range(h), chunksize=10))
toc = time.time()
print("%s Set %dx%d in %.2f seconds." % ('Julia', w, h, toc-tic))
if len(sys.argv) > 1 and sys.argv[1] == '-plot':
plot(image)
if __name__ == '__main__':
test_julia()
| mpi4py/mpi4py | demo/futures/run_julia.py | Python | bsd-2-clause | 1,252 |
"""
The GUI to QCRI.
"""
# pylint: disable=I0011, no-member, missing-docstring
import threading
import logging
from sys import version_info
import pythoncom
from qcri.application import importer
from qcri.application import qualitycenter
# pylint: disable=I0011, import-error
if version_info.major == 2:
import Tkinter as tk
import tkMessageBox as messagebox
import tkFileDialog as filedialog
import ttk
import Queue as queue
elif version_info.major == 3:
import tkinter as tk
from tkinter import messagebox
from tkinter import filedialog
from tkinter import ttk
import queue
LOG = logging.getLogger(__name__)
def work_in_background(tk_, func, callback=None):
"""
Processes func in background.
"""
window = BusyWindow()
done_queue = queue.Queue()
def _process():
func()
done_queue.put(True)
def _process_queue():
try:
done_queue.get_nowait()
window.destroy()
if callback:
callback()
except queue.Empty:
tk_.after(100, _process_queue)
thread = threading.Thread(target=_process)
thread.start()
tk_.after(100, _process_queue)
def center(widget, width, height):
"""
Center the given widget.
"""
screen_width = widget.winfo_screenwidth()
screen_height = widget.winfo_screenheight()
x_offset = int(screen_width / 2 - width / 2)
y_offset = int(screen_height / 2 - height / 2)
widget.geometry('{}x{}+{}+{}'.format(width, height, x_offset, y_offset))
# todo: add <rightclick> <selectall>
class QcriGui(tk.Tk):
"""
The main window.
"""
def __init__(self, cfg):
tk.Tk.__init__(self)
self.cfg = cfg # ConfigParser
self.qcc = None # the Quality Center connection
self.valid_parsers = {}
self._cached_tests = {} # for the treeview
self._results = {} # test results
self.dir_dict = {}
self.bug_dict = {}
self.protocol("WM_DELETE_WINDOW", self.on_closing)
self.title('QC Results Importer')
center(self, 1200, 700)
# tkinter widgets
self.menubar = None
self.remote_path = None
self.choose_parser = None
self.choose_results_button = None
self.qcdir_tree = None
self.upload_button = None
self.choose_results_entry = None
self.runresults_tree = None
self.runresultsview = None
self.header_frame = None
self.qc_connected_frm = None
self.qc_disconnected_frm = None
self.link_bug = None
self.qc_domain = tk.StringVar()
self.attach_report = tk.IntVar()
self.qc_project = tk.StringVar()
self.runresultsvar = tk.StringVar()
self.qc_conn_status = tk.BooleanVar()
# build the gui
self._make()
# style = ttk.Style()
# style.theme_settings("default", {
# "TCombobox": {
# "configure": {"padding": 25}
# }
# })
def on_closing(self):
"""
Called when the window is closed.
:return:
"""
self.disconnect_qc()
self.destroy()
def disconnect_qc(self):
"""
Release the QC connection
"""
qualitycenter.disconnect(self.qcc)
self.qc_conn_status.set(False)
def _make(self):
# the Main Frame
main_frm = tk.Frame(self)
full_pane = tk.PanedWindow(
main_frm, orient=tk.HORIZONTAL, sashpad=4, sashrelief=tk.RAISED)
local_pane = self._create_local_pane(full_pane)
remote_pane = self._create_remote_pane(full_pane)
full_pane.add(local_pane)
full_pane.add(remote_pane)
full_pane.paneconfigure(local_pane, sticky='nsew', minsize=400)
full_pane.paneconfigure(remote_pane, sticky='nsew', minsize=400)
full_pane.grid(row=1, column=0, sticky='nsew', padx=10, pady=10)
main_frm.grid(row=0, column=0, sticky='nsew', padx=5, pady=5)
main_frm.rowconfigure(1, weight=1)
main_frm.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
def _create_local_pane(self, full_pane):
local_pane = tk.LabelFrame(full_pane, text='Test Results')
self.choose_results_button = tk.Button(
local_pane,
text='Results',
width=15,
command=self._load_run_results)
self.choose_results_button.grid(
row=0, column=0, sticky='ew', padx=10, pady=5)
self.choose_results_entry = tk.Entry(
local_pane, state='disabled', textvariable=self.runresultsvar)
self.choose_results_entry.grid(
row=0, column=1, sticky='nsew', padx=10, pady=5)
self.choose_parser = ttk.Combobox(
local_pane, show='', state='disabled')
self.choose_parser.bind(
'<<ComboboxSelected>>', self._on_parser_changed)
self.choose_parser.grid(
row=1, column=0, columnspan=2, sticky='nsew', padx=10, pady=7)
self.runresultsview = TestResultsView(
local_pane, on_selected=self._on_test_result_selected)
self.runresultsview.grid(
row=2, column=0, columnspan=2, sticky='nsew', padx=10, pady=5)
self.runresultsview.rowconfigure(0, weight=1)
self.runresultsview.columnconfigure(0, weight=1)
local_pane.rowconfigure(2, weight=1)
local_pane.columnconfigure(1, weight=1)
local_pane.config(padx=10)
return local_pane
def _on_qc_conn_status_changed(self, *_):
if self.qc_conn_status.get():
self.qc_connected_frm.tkraise()
self.upload_button.config(state=tk.NORMAL)
else:
self.qc_disconnected_frm.tkraise()
self.upload_button.config(state=tk.DISABLED)
for row in self.qcdir_tree.get_children():
self.qcdir_tree.delete(row)
# we didn't change selection, but fire off the events
self._on_test_result_selected(None)
def _create_remote_pane(self, parent):
remote_pane = tk.LabelFrame(parent, text='Quality Center')
self.header_frame = tk.Frame(remote_pane)
# QC Disconnected Frame
self.qc_disconnected_frm = tk.Frame(self.header_frame)
if self.cfg.getboolean('main', 'history'):
hist = importer.load_history()
else:
hist = None
qc_connect_button = tk.Button(
self.qc_disconnected_frm,
text='Connect',
command=lambda: LoginWindow(self.login_callback, hist),
width=15)
qc_connect_button.grid(row=0, column=0, sticky='ew', pady=5)
self.qc_disconnected_frm.grid(row=0, column=0, sticky='nsew')
# QC Connected Frame
self.qc_connected_frm = tk.Frame(self.header_frame)
qc_disconnect_button = tk.Button(
self.qc_connected_frm, text='Disconnect',
command=self.disconnect_qc, width=15)
qc_disconnect_button.grid(
row=0, column=0, sticky='ew', padx=(0, 10), pady=5)
domain_label = tk.Label(
self.qc_connected_frm, text='Domain:', font=('sans-serif 10 bold'))
domain_label.grid(row=0, column=1)
domain_val_lbl = tk.Label(
self.qc_connected_frm, textvariable=self.qc_domain)
domain_val_lbl.grid(row=0, column=2, sticky='w', padx=10)
project_label = tk.Label(
self.qc_connected_frm, text='Project:', font=('sans-seif 10 bold'))
project_label.grid(row=0, column=3)
project_val_lbl = tk.Label(
self.qc_connected_frm, textvariable=self.qc_project)
project_val_lbl.grid(row=0, column=4, sticky='w', padx=10)
self.qc_connected_frm.columnconfigure(4, weight=1)
self.qc_connected_frm.grid(row=0, column=0, sticky='nsew')
# raise the disconnected frame first
self.qc_disconnected_frm.tkraise()
self.qc_conn_status.trace('w', self._on_qc_conn_status_changed)
# self.header_frame.columnconfigure(1, weight=1)
self.header_frame.grid(row=0, column=0, sticky='nsew', padx=10)
# Upload Controls
upload_frm = tk.Frame(remote_pane)
self.attach_report.set(1)
attach_report_chkbox = tk.Checkbutton(
upload_frm, text='Attach Report', variable=self.attach_report)
attach_report_chkbox.grid(row=0, column=2, sticky='e')
self.link_bug = tk.Button(
upload_frm,
text='Link Bugs',
width=15,
command=self._on_link_bugs_clicked,
state=tk.DISABLED)
self.link_bug.grid(row=0, column=0, sticky='w')
self.upload_button = tk.Button(
upload_frm,
text='Import',
command=self._on_upload_btn_clicked,
state=tk.DISABLED)
self.upload_button.grid(row=0, column=1, sticky='ew', padx=10)
upload_frm.columnconfigure(1, weight=1)
upload_frm.grid(row=1, column=0, sticky='nsew', padx=10, pady=5)
# QC Directory
qcdir_tree_frame = tk.Frame(remote_pane)
self.qcdir_tree = ttk.Treeview(qcdir_tree_frame, selectmode='browse')
self.qcdir_tree.heading('#0', text='Test Lab', anchor='center')
self.qcdir_tree.bind('<Button-3>', self._on_right_click_qc_tree)
self.qcdir_tree.bind('<<TreeviewOpen>>', self._on_branch_opened)
self.qcdir_tree.grid(row=0, column=0, sticky='nsew')
ysb = ttk.Scrollbar(
qcdir_tree_frame, orient='vertical', command=self.qcdir_tree.yview)
ysb.grid(row=0, column=1, sticky='ns')
self.qcdir_tree.configure(yscroll=ysb.set)
qcdir_tree_frame.columnconfigure(0, weight=1)
qcdir_tree_frame.rowconfigure(0, weight=1)
qcdir_tree_frame.grid(row=2, column=0, sticky='nsew', padx=10, pady=5)
remote_pane.columnconfigure(0, weight=1)
remote_pane.rowconfigure(2, weight=1)
return remote_pane
def _on_right_click_qc_tree(self, event):
if not self.qc_conn_status.get():
return
menu = tk.Menu(self, tearoff=0)
menu.add_command(label='Refresh', command=self.refresh_qc_directories)
menu.post(event.x_root, event.y_root)
def _load_run_results(self):
filename = filedialog.askopenfilename()
if not filename:
return
self.runresultsvar.set(filename)
valid_parsers = importer.get_parsers(filename, self.cfg)
if not valid_parsers:
messagebox.showerror(
'Unknown Format', 'Unable to parse this file. '
'View log for details.')
self.choose_parser['values'] = ['']
self.choose_parser.current(0)
self.choose_parser.event_generate('<<ComboboxSelected>>')
return
self.valid_parsers = {p.__name__: p for p in valid_parsers}
self.choose_parser['values'] = list(self.valid_parsers.keys())
if len(valid_parsers) > 1:
self.choose_parser.config(state='enabled')
self.choose_parser.current(0)
self.choose_parser.event_generate('<<ComboboxSelected>>')
def _on_parser_changed(self, dummy_event=None):
filepath = self.runresultsvar.get()
if not filepath:
self.runresultsview.clear()
self.runresultsview.refresh()
return
parser_name = self.choose_parser.get()
if not parser_name:
self.runresultsview.clear()
self.runresultsview.refresh()
return
parser = self.valid_parsers[parser_name]
results = []
try:
self.results = importer.parse_results(parser, filepath, self.cfg)
except importer.ParserError as ex:
messagebox.showerror(
'Parser Error', 'An error occurred while parsing. '
'View log for details.')
LOG.exception(ex)
self.runresultsview.populate(self.results['tests'])
def _on_test_result_selected(self, dummy_event=None):
has_failed_test = self.runresultsview.get_selection(failed=True)
connected_to_qc = self.qc_conn_status.get()
if has_failed_test and connected_to_qc:
self.link_bug.config(state=tk.NORMAL)
else:
self.link_bug.config(state=tk.DISABLED, fg='black')
def refresh_qc_directories(self):
"""
Refresh the QC directory tree in background.
"""
def _():
for child in self.qcdir_tree.get_children():
self.qcdir_tree.delete(child)
root_ = self.qcc.TestSetTreeManager.Root
subdirs = qualitycenter.get_subdirectories(root_)
self.dir_dict.clear()
for node in subdirs:
idx = self.qcdir_tree.insert('', 'end', text=node.Name)
self.dir_dict[idx] = node.Path
subsubdirs = qualitycenter.get_subdirectories(node)
if subsubdirs:
self.qcdir_tree.insert(idx, 'end', text='Fetching...')
work_in_background(self, _)
def _on_branch_opened(self, dummy_event):
selection = self.qcdir_tree.selection()
if not selection:
return
selected_idx = selection[0]
children = self.qcdir_tree.get_children(selected_idx)
if not children:
return
child = self.qcdir_tree.item(children[0])
if child['text'] == 'Fetching...':
def refresh(parent_idx):
fldr = self.dir_dict[parent_idx]
node = qualitycenter.get_qc_folder(self.qcc, fldr, create=False)
subdirs = qualitycenter.get_subdirectories(node)
for child in self.qcdir_tree.get_children(parent_idx):
self.qcdir_tree.delete(child)
for node in subdirs:
idx = self.qcdir_tree.insert(parent_idx, 'end', text=node.Name)
self.dir_dict[idx] = node.Path
subsubdirs = qualitycenter.get_subdirectories(node)
if subsubdirs:
self.qcdir_tree.insert(idx, 'end', text='Fetching...')
work_in_background(self, lambda: refresh(selected_idx))
def select_run_result(self):
pass
def _on_link_bugs_clicked(self):
failed_tests = self.runresultsview.get_selection(failed=True)
if len(failed_tests) == 0:
messagebox.showerror('Error', 'No failed tests in selection.')
return
BugWindow(self.qcc, failed_tests, self.runresultsview.refresh)
def _on_upload_btn_clicked(self):
selected_rows = self.runresultsview.get_selection()
if len(selected_rows) == 0:
messagebox.showerror('Error', 'No tests selected.')
return
selected_qc_dir = self.qcdir_tree.selection()
if len(selected_qc_dir) != 1:
messagebox.showerror('Error', 'Destination not selected.')
return
qcdir = self.dir_dict[selected_qc_dir[0]]
if not qcdir:
messagebox.showerror('Error', 'path is blank')
return
assert qcdir.startswith('Root\\'), qcdir
# remove "Root\"
qcdir = qcdir[5:]
results = self.results.copy()
results['tests'] = [self.runresultsview.tests[row]
for row in selected_rows]
result = messagebox.askyesno(
'Confirm',
('Are you sure you want to upload to the following '
'location?\n\n{}'.format(qcdir)))
if not result:
return
work_in_background(
self,
lambda: importer.import_results(
self.qcc,
qcdir,
results,
self.attach_report.get()),
lambda: messagebox.showinfo('Success', 'Import complete.'))
def login_callback(self, logincfg):
"""
called by login window
"""
use_history = self.cfg.getboolean('main', 'history')
if use_history:
hist = importer.load_history()
importer.update_history(hist, logincfg)
# pylint
try:
qcc = qualitycenter.connect(**logincfg)
except pythoncom.com_error as ex:
messagebox.showerror('Unable to Connect',
'Error Details:\n\n{}'.format(ex))
return False
self.qcc = qcc
self.qc_domain.set(logincfg['domain'])
self.qc_project.set(logincfg['project'])
self.qc_conn_status.set(True)
self.refresh_qc_directories()
return True
class LoginWindow(tk.Toplevel):
"""
The login window.
"""
def __init__(self, callback=None, history=None):
tk.Toplevel.__init__(self)
self.callback = callback
self.history = history or {}
self.title('QC Log In')
self.url = None
self.username = None
self.password = None
self.domain = None
self.project = None
center(self, 300, 300)
self._make()
def _make_combo(self, frame, text):
tk.Label(frame, text='{}:'.format(text)).pack(side=tk.TOP)
cbo = ttk.Combobox(frame, width=16, show='')
cbo.pack(side=tk.TOP, padx=10, fill=tk.BOTH)
cbo.bind('<Return>', self.check_password)
cbo['values'] = self.history.get(text.lower(), [])
if cbo['values']:
cbo.set(cbo['values'][-1])
return cbo
def _make(self):
rootfrm = tk.Frame(self, padx=10, pady=10)
rootfrm.pack(fill=tk.BOTH, expand=True)
self.url = self._make_combo(rootfrm, 'URL')
self.username = self._make_combo(rootfrm, 'Username')
tk.Label(rootfrm, text='Password:').pack(side=tk.TOP)
self.password = tk.Entry(rootfrm, width=16, show='*')
self.password.pack(side=tk.TOP, padx=10, fill=tk.BOTH)
self.domain = self._make_combo(rootfrm, 'Domain')
self.project = self._make_combo(rootfrm, 'Project')
loginbtn = tk.Button(
rootfrm, text="Login", width=10, pady=8,
command=self.check_password)
loginbtn.pack(side=tk.BOTTOM)
self.password.bind('<Return>', self.check_password)
loginbtn.bind('<Return>', self.check_password)
focus = self.password
if not self.project.get():
focus = self.project
if not self.domain.get():
focus = self.domain
if not self.username.get():
focus = self.username
if not self.url.get():
focus = self.url
focus.focus()
self.grab_set()
def check_password(self, dummy_event=None):
"""
Verify their QC password.
"""
logincfg = {
'url': self.url.get(),
'domain': self.domain.get(),
'project': self.project.get(),
'username': self.username.get(),
'password': self.password.get()
}
if not any(logincfg.items()):
return
if self.callback(logincfg):
self.destroy()
self.grab_release()
class BugWindow(tk.Toplevel):
def __init__(self, qcc, test_results, callback):
tk.Toplevel.__init__(self)
center(self, 900, 600)
self.qcc = qcc
self.callback = callback
self._test_cache = {}
self._bug_cache = {}
self._make()
self.populate_tests(test_results)
self.refresh_qc_bugs()
self.protocol("WM_DELETE_WINDOW", self.on_closing)
self.grab_set()
def on_closing(self):
self.destroy()
self.grab_release()
def _make(self):
main_frm = tk.PanedWindow(
self,
borderwidth=1,
orient=tk.HORIZONTAL,
sashpad=4,
sashrelief=tk.RAISED)
left_frm = tk.Frame(main_frm)
test_tree_frm = tk.Frame(left_frm)
self.test_tree = ttk.Treeview(
test_tree_frm, selectmode='browse')
self.test_tree['show'] = 'headings'
self.test_tree['columns'] = ('subject', 'tests', 'step', 'bug')
self.test_tree.heading('subject', text='Subject')
self.test_tree.heading('tests', text='Test')
self.test_tree.heading('step', text='Failed Step')
self.test_tree.heading('bug', text='Bug')
self.test_tree.column('subject', width=60)
self.test_tree.column('tests', width=150)
self.test_tree.column('step', width=40)
self.test_tree.column('bug', width=10)
ysb = ttk.Scrollbar(
test_tree_frm, orient='vertical', command=self.test_tree.yview)
self.test_tree.grid(row=0, column=0, sticky='nsew')
ysb.grid(row=0, column=1, sticky='ns')
self.test_tree.configure(yscroll=ysb.set)
test_tree_frm.columnconfigure(0, weight=1)
test_tree_frm.rowconfigure(0, weight=1)
test_tree_frm.grid(row=0, column=0, sticky='nsew', padx=10, pady=10)
left_frm.rowconfigure(0, weight=1)
left_frm.columnconfigure(0, weight=1)
main_frm.add(left_frm)
right_frm = tk.Frame(main_frm)
bug_tree_frame = tk.Frame(right_frm)
self.bug_tree = ttk.Treeview(bug_tree_frame, selectmode='browse')
self.bug_tree['show'] = 'headings'
self.bug_tree['columns'] = (
'bug', 'summary', 'status', 'detected_on')
self.bug_tree.heading('bug', text='Bug', anchor='center')
self.bug_tree.heading('summary', text='Summary', anchor='center')
self.bug_tree.heading('status', text='Status', anchor='center')
self.bug_tree.heading(
'detected_on', text='Detection Date', anchor='center')
self.bug_tree.column('bug', width=10)
self.bug_tree.column('summary', width=50)
self.bug_tree.column('status', width=10)
self.bug_tree.column('detected_on', width=20)
self.bug_tree.grid(row=0, column=0, sticky='nsew')
ysb = ttk.Scrollbar(
bug_tree_frame, orient='vertical', command=self.bug_tree.yview)
ysb.grid(row=0, column=1, sticky='ns')
self.bug_tree.configure(yscroll=ysb.set)
bug_tree_frame.columnconfigure(0, weight=1)
bug_tree_frame.rowconfigure(0, weight=1)
bug_tree_frame.grid(row=0, column=0, sticky='nsew', padx=10, pady=10)
right_frm.columnconfigure(0, weight=1)
right_frm.rowconfigure(0, weight=1)
main_frm.add(right_frm)
main_frm.paneconfigure(left_frm, minsize=400)
main_frm.paneconfigure(right_frm, minsize=400)
main_frm.grid(row=0, column=0, sticky='nsew')
self.link_bug_button = tk.Button(
self, text='Link Bug', command=self.link_bug)
self.link_bug_button.grid(
row=1, column=0, sticky='ew', padx=10, pady=10)
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
def populate_tests(self, tests):
self._test_cache.clear()
for test in tests:
failed_step = None
for step in test['steps']:
if step['status'] == 'Failed':
failed_step = step
break
if not failed_step:
LOG.error('failed step not found: %s', test)
return
idx = self.test_tree.insert('', 'end', values=(
test['subject'],
test['name'],
failed_step['name'],
test.get('bug', '-')))
self._test_cache[idx] = test
def refresh_qc_bugs(self):
for child in self.bug_tree.get_children():
self.bug_tree.delete(child)
bugs = qualitycenter.get_bugs(self.qcc)
self._bug_cache.clear()
for bug in bugs:
idx = self.bug_tree.insert('', 'end', values=(
bug['id'],
bug['summary'],
bug['status'],
bug['detection_date']))
self._bug_cache[idx] = bug['id']
def link_bug(self):
sel = self.bug_tree.selection()
if len(sel) != 1:
return
bug_rowidx = sel[0]
bug = self._bug_cache[bug_rowidx]
sel = self.test_tree.selection()
if len(sel) != 1:
return
test_row = self.test_tree.item(sel[0])
row_values = test_row['values']
self.test_tree.item(sel[0], values=(
row_values[0], row_values[1], row_values[2], bug))
failed_test = self._test_cache[sel[0]]
failed_test['bug'] = bug
self.callback()
class BusyWindow(tk.Toplevel):
"""
Shown when reading or writing to Quality Center.
"""
def __init__(self):
tk.Toplevel.__init__(self)
center(self, 100, 50)
frm = tk.Frame(self, padx=10, pady=10)
spinner = tk.Label(frm, text='Busy')
spinner.pack(fill=tk.BOTH, expand=True)
frm.pack(fill=tk.BOTH, expand=True)
self.config(borderwidth=2, relief=tk.RIDGE)
self.protocol("WM_DELETE_WINDOW", self.on_closing)
self.grab_set()
self.overrideredirect(1)
def on_closing(self):
self.destroy()
self.grab_release()
class TestResultsView(tk.Frame):
"""
A frame containing a summary of the parsed test results.
"""
def __init__(self, master, on_selected=None, **kwargs):
tk.Frame.__init__(self, master, **kwargs)
self._cache = {}
self.tree = ttk.Treeview(self)
self.tree['show'] = 'headings'
self.tree['columns'] = ('subject', 'tests', 'status', 'bug')
self.tree.heading('subject', text='Subject')
self.tree.heading('tests', text='Test')
self.tree.heading('status', text='Status')
self.tree.heading('bug', text='Bug')
self.tree.column('subject', width=60)
self.tree.column('tests', width=150)
self.tree.column('status', width=40)
self.tree.column('bug', width=10)
self.tree.bind('<<TreeviewSelect>>', on_selected)
ysb = ttk.Scrollbar(self, orient='vertical', command=self.tree.yview)
self.tree.grid(row=0, column=0, sticky='nsew')
ysb.grid(row=0, column=1, sticky='ns')
self.tree.configure(yscroll=ysb.set)
@property
def tests(self):
return self._cache
def clear(self):
self._cache.clear()
def get_selection(self, failed=False):
selection = self.tree.selection()
if not failed:
return selection
failed_tests = []
for idx in selection:
row = self.tree.item(idx)
status = row['values'][2]
if status == 'Failed':
failed_tests.append(self._cache[idx])
return failed_tests
def refresh(self):
tests = [test for test in self._cache.values()]
self.populate(tests)
def populate(self, tests):
# clear the tree
for idx in self.tree.get_children():
self.tree.delete(idx)
self._cache.clear()
for test in tests:
bug = test.get('bug', '')
if not bug:
bug = '-' if test['status'] == 'Failed' else ''
idx = self.tree.insert('', 'end', values=(
test['subject'],
test['name'],
test['status'],
bug))
self._cache[idx] = test
| douville/qcri | qcri/application/gui.py | Python | bsd-2-clause | 27,582 |
#
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
from thrift.Thrift import *
from ttypes import *
EDAM_VERSION_MAJOR = 1
EDAM_VERSION_MINOR = 20
| vinodc/evernote | src/evernote/edam/userstore/constants.py | Python | bsd-2-clause | 199 |
from datetime import (
datetime,
timedelta,
)
import re
import numpy as np
import pytest
from pandas import (
Index,
NaT,
Timedelta,
TimedeltaIndex,
Timestamp,
notna,
timedelta_range,
to_timedelta,
)
import pandas._testing as tm
class TestGetItem:
def test_ellipsis(self):
# GH#21282
idx = timedelta_range("1 day", "31 day", freq="D", name="idx")
result = idx[...]
assert result.equals(idx)
assert result is not idx
def test_getitem_slice_keeps_name(self):
# GH#4226
tdi = timedelta_range("1d", "5d", freq="H", name="timebucket")
assert tdi[1:].name == tdi.name
def test_getitem(self):
idx1 = timedelta_range("1 day", "31 day", freq="D", name="idx")
for idx in [idx1]:
result = idx[0]
assert result == Timedelta("1 day")
result = idx[0:5]
expected = timedelta_range("1 day", "5 day", freq="D", name="idx")
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[0:10:2]
expected = timedelta_range("1 day", "9 day", freq="2D", name="idx")
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[-20:-5:3]
expected = timedelta_range("12 day", "24 day", freq="3D", name="idx")
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[4::-1]
expected = TimedeltaIndex(
["5 day", "4 day", "3 day", "2 day", "1 day"], freq="-1D", name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
@pytest.mark.parametrize(
"key",
[
Timestamp("1970-01-01"),
Timestamp("1970-01-02"),
datetime(1970, 1, 1),
Timestamp("1970-01-03").to_datetime64(),
# non-matching NA values
np.datetime64("NaT"),
],
)
def test_timestamp_invalid_key(self, key):
# GH#20464
tdi = timedelta_range(0, periods=10)
with pytest.raises(KeyError, match=re.escape(repr(key))):
tdi.get_loc(key)
class TestGetLoc:
@pytest.mark.filterwarnings("ignore:Passing method:FutureWarning")
def test_get_loc(self):
idx = to_timedelta(["0 days", "1 days", "2 days"])
for method in [None, "pad", "backfill", "nearest"]:
assert idx.get_loc(idx[1], method) == 1
assert idx.get_loc(idx[1].to_pytimedelta(), method) == 1
assert idx.get_loc(str(idx[1]), method) == 1
assert idx.get_loc(idx[1], "pad", tolerance=Timedelta(0)) == 1
assert idx.get_loc(idx[1], "pad", tolerance=np.timedelta64(0, "s")) == 1
assert idx.get_loc(idx[1], "pad", tolerance=timedelta(0)) == 1
with pytest.raises(ValueError, match="unit abbreviation w/o a number"):
idx.get_loc(idx[1], method="nearest", tolerance="foo")
with pytest.raises(ValueError, match="tolerance size must match"):
idx.get_loc(
idx[1],
method="nearest",
tolerance=[
Timedelta(0).to_timedelta64(),
Timedelta(0).to_timedelta64(),
],
)
for method, loc in [("pad", 1), ("backfill", 2), ("nearest", 1)]:
assert idx.get_loc("1 day 1 hour", method) == loc
# GH 16909
assert idx.get_loc(idx[1].to_timedelta64()) == 1
# GH 16896
assert idx.get_loc("0 days") == 0
def test_get_loc_nat(self):
tidx = TimedeltaIndex(["1 days 01:00:00", "NaT", "2 days 01:00:00"])
assert tidx.get_loc(NaT) == 1
assert tidx.get_loc(None) == 1
assert tidx.get_loc(float("nan")) == 1
assert tidx.get_loc(np.nan) == 1
class TestGetIndexer:
def test_get_indexer(self):
idx = to_timedelta(["0 days", "1 days", "2 days"])
tm.assert_numpy_array_equal(
idx.get_indexer(idx), np.array([0, 1, 2], dtype=np.intp)
)
target = to_timedelta(["-1 hour", "12 hours", "1 day 1 hour"])
tm.assert_numpy_array_equal(
idx.get_indexer(target, "pad"), np.array([-1, 0, 1], dtype=np.intp)
)
tm.assert_numpy_array_equal(
idx.get_indexer(target, "backfill"), np.array([0, 1, 2], dtype=np.intp)
)
tm.assert_numpy_array_equal(
idx.get_indexer(target, "nearest"), np.array([0, 1, 1], dtype=np.intp)
)
res = idx.get_indexer(target, "nearest", tolerance=Timedelta("1 hour"))
tm.assert_numpy_array_equal(res, np.array([0, -1, 1], dtype=np.intp))
class TestWhere:
def test_where_doesnt_retain_freq(self):
tdi = timedelta_range("1 day", periods=3, freq="D", name="idx")
cond = [True, True, False]
expected = TimedeltaIndex([tdi[0], tdi[1], tdi[0]], freq=None, name="idx")
result = tdi.where(cond, tdi[::-1])
tm.assert_index_equal(result, expected)
def test_where_invalid_dtypes(self):
tdi = timedelta_range("1 day", periods=3, freq="D", name="idx")
tail = tdi[2:].tolist()
i2 = Index([NaT, NaT] + tail)
mask = notna(i2)
expected = Index([NaT.value, NaT.value] + tail, dtype=object, name="idx")
assert isinstance(expected[0], int)
result = tdi.where(mask, i2.asi8)
tm.assert_index_equal(result, expected)
ts = i2 + Timestamp.now()
expected = Index([ts[0], ts[1]] + tail, dtype=object, name="idx")
result = tdi.where(mask, ts)
tm.assert_index_equal(result, expected)
per = (i2 + Timestamp.now()).to_period("D")
expected = Index([per[0], per[1]] + tail, dtype=object, name="idx")
result = tdi.where(mask, per)
tm.assert_index_equal(result, expected)
ts = Timestamp.now()
expected = Index([ts, ts] + tail, dtype=object, name="idx")
result = tdi.where(mask, ts)
tm.assert_index_equal(result, expected)
def test_where_mismatched_nat(self):
tdi = timedelta_range("1 day", periods=3, freq="D", name="idx")
cond = np.array([True, False, False])
dtnat = np.datetime64("NaT", "ns")
expected = Index([tdi[0], dtnat, dtnat], dtype=object, name="idx")
assert expected[2] is dtnat
result = tdi.where(cond, dtnat)
tm.assert_index_equal(result, expected)
class TestTake:
def test_take(self):
# GH 10295
idx1 = timedelta_range("1 day", "31 day", freq="D", name="idx")
for idx in [idx1]:
result = idx.take([0])
assert result == Timedelta("1 day")
result = idx.take([-1])
assert result == Timedelta("31 day")
result = idx.take([0, 1, 2])
expected = timedelta_range("1 day", "3 day", freq="D", name="idx")
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([0, 2, 4])
expected = timedelta_range("1 day", "5 day", freq="2D", name="idx")
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([7, 4, 1])
expected = timedelta_range("8 day", "2 day", freq="-3D", name="idx")
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(["4 day", "3 day", "6 day"], name="idx")
tm.assert_index_equal(result, expected)
assert result.freq is None
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(["29 day", "3 day", "6 day"], name="idx")
tm.assert_index_equal(result, expected)
assert result.freq is None
def test_take_invalid_kwargs(self):
idx = timedelta_range("1 day", "31 day", freq="D", name="idx")
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
with pytest.raises(TypeError, match=msg):
idx.take(indices, foo=2)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
idx.take(indices, out=indices)
msg = "the 'mode' parameter is not supported"
with pytest.raises(ValueError, match=msg):
idx.take(indices, mode="clip")
def test_take_equiv_getitem(self):
tds = ["1day 02:00:00", "1 day 04:00:00", "1 day 10:00:00"]
idx = timedelta_range(start="1d", end="2d", freq="H", name="idx")
expected = TimedeltaIndex(tds, freq=None, name="idx")
taken1 = idx.take([2, 4, 10])
taken2 = idx[[2, 4, 10]]
for taken in [taken1, taken2]:
tm.assert_index_equal(taken, expected)
assert isinstance(taken, TimedeltaIndex)
assert taken.freq is None
assert taken.name == expected.name
def test_take_fill_value(self):
# GH 12631
idx = TimedeltaIndex(["1 days", "2 days", "3 days"], name="xxx")
result = idx.take(np.array([1, 0, -1]))
expected = TimedeltaIndex(["2 days", "1 days", "3 days"], name="xxx")
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = TimedeltaIndex(["2 days", "1 days", "NaT"], name="xxx")
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
expected = TimedeltaIndex(["2 days", "1 days", "3 days"], name="xxx")
tm.assert_index_equal(result, expected)
msg = (
"When allow_fill=True and fill_value is not None, "
"all indices must be >= -1"
)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
msg = "index -5 is out of bounds for (axis 0 with )?size 3"
with pytest.raises(IndexError, match=msg):
idx.take(np.array([1, -5]))
class TestMaybeCastSliceBound:
@pytest.fixture(params=["increasing", "decreasing", None])
def monotonic(self, request):
return request.param
@pytest.fixture
def tdi(self, monotonic):
tdi = timedelta_range("1 Day", periods=10)
if monotonic == "decreasing":
tdi = tdi[::-1]
elif monotonic is None:
taker = np.arange(10, dtype=np.intp)
np.random.shuffle(taker)
tdi = tdi.take(taker)
return tdi
def test_maybe_cast_slice_bound_invalid_str(self, tdi):
# test the low-level _maybe_cast_slice_bound and that we get the
# expected exception+message all the way up the stack
msg = (
"cannot do slice indexing on TimedeltaIndex with these "
r"indexers \[foo\] of type str"
)
with pytest.raises(TypeError, match=msg):
tdi._maybe_cast_slice_bound("foo", side="left")
with pytest.raises(TypeError, match=msg):
tdi.get_slice_bound("foo", side="left")
with pytest.raises(TypeError, match=msg):
tdi.slice_locs("foo", None, None)
def test_slice_invalid_str_with_timedeltaindex(
self, tdi, frame_or_series, indexer_sl
):
obj = frame_or_series(range(10), index=tdi)
msg = (
"cannot do slice indexing on TimedeltaIndex with these "
r"indexers \[foo\] of type str"
)
with pytest.raises(TypeError, match=msg):
indexer_sl(obj)["foo":]
with pytest.raises(TypeError, match=msg):
indexer_sl(obj)["foo":-1]
with pytest.raises(TypeError, match=msg):
indexer_sl(obj)[:"foo"]
with pytest.raises(TypeError, match=msg):
indexer_sl(obj)[tdi[0] : "foo"]
| jorisvandenbossche/pandas | pandas/tests/indexes/timedeltas/test_indexing.py | Python | bsd-3-clause | 12,228 |
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns(
'show.views',
url(r'^radioshow/entrylist/$', 'radioshow_entryitem_list', name='radioshow_entryitem_list'),
url(r'^showcontributor/list/(?P<slug>[\w-]+)/$', 'showcontributor_content_list', name='showcontributor_content_list'),
url(r'^showcontributor/appearance/(?P<slug>[\w-]+)/$', 'showcontributor_appearance_list', name='showcontributor_appearance_list'),
url(r'^showcontributor/(?P<slug>[\w-]+)/$', 'showcontributor_detail', name='showcontributor_detail'),
url(r'^showcontributor/content/(?P<slug>[\w-]+)/$', 'showcontributor_content_detail', name='showcontributor_content_detail'),
url(r'^showcontributor/contact/(?P<slug>[\w-]+)/$', 'showcontributor_contact', name='showcontributor_contact'),
)
| praekelt/panya-show | show/urls.py | Python | bsd-3-clause | 804 |
from nose.tools import eq_, ok_
from django.test import TestCase
from us_ignite.snippets.models import Snippet
from us_ignite.snippets.tests import fixtures
class TestSnippetModel(TestCase):
def tearDown(self):
Snippet.objects.all().delete()
def get_instance(self):
data = {
'name': 'Gigabit snippets',
'slug': 'featured',
'url': 'http://us-ignite.org/',
}
return Snippet.objects.create(**data)
def test_instance_is_created_successfully(self):
instance = self.get_instance()
eq_(instance.name, 'Gigabit snippets')
eq_(instance.status, Snippet.DRAFT)
eq_(instance.url, 'http://us-ignite.org/')
eq_(instance.url_text, '')
eq_(instance.body, '')
eq_(instance.image, '')
eq_(instance.is_featured, False)
ok_(instance.created)
ok_(instance.modified)
eq_(instance.slug, 'featured')
ok_(instance.id)
eq_(instance.notes, '')
def test_instance_name_is_used_as_title(self):
instance = fixtures.get_snippet(name='About page')
eq_(instance.title, 'About page')
| us-ignite/us_ignite | us_ignite/snippets/tests/models_tests.py | Python | bsd-3-clause | 1,160 |
"""
Tests for line search routines
"""
from __future__ import division, print_function, absolute_import
from numpy.testing import (assert_, assert_equal, assert_array_almost_equal,
assert_array_almost_equal_nulp, assert_warns,
suppress_warnings)
import scipy.optimize.linesearch as ls
from scipy.optimize.linesearch import LineSearchWarning
import numpy as np
def assert_wolfe(s, phi, derphi, c1=1e-4, c2=0.9, err_msg=""):
"""
Check that strong Wolfe conditions apply
"""
phi1 = phi(s)
phi0 = phi(0)
derphi0 = derphi(0)
derphi1 = derphi(s)
msg = "s = %s; phi(0) = %s; phi(s) = %s; phi'(0) = %s; phi'(s) = %s; %s" % (
s, phi0, phi1, derphi0, derphi1, err_msg)
assert_(phi1 <= phi0 + c1*s*derphi0, "Wolfe 1 failed: " + msg)
assert_(abs(derphi1) <= abs(c2*derphi0), "Wolfe 2 failed: " + msg)
def assert_armijo(s, phi, c1=1e-4, err_msg=""):
"""
Check that Armijo condition applies
"""
phi1 = phi(s)
phi0 = phi(0)
msg = "s = %s; phi(0) = %s; phi(s) = %s; %s" % (s, phi0, phi1, err_msg)
assert_(phi1 <= (1 - c1*s)*phi0, msg)
def assert_line_wolfe(x, p, s, f, fprime, **kw):
assert_wolfe(s, phi=lambda sp: f(x + p*sp),
derphi=lambda sp: np.dot(fprime(x + p*sp), p), **kw)
def assert_line_armijo(x, p, s, f, **kw):
assert_armijo(s, phi=lambda sp: f(x + p*sp), **kw)
def assert_fp_equal(x, y, err_msg="", nulp=50):
"""Assert two arrays are equal, up to some floating-point rounding error"""
try:
assert_array_almost_equal_nulp(x, y, nulp)
except AssertionError as e:
raise AssertionError("%s\n%s" % (e, err_msg))
class TestLineSearch(object):
# -- scalar functions; must have dphi(0.) < 0
def _scalar_func_1(self, s):
self.fcount += 1
p = -s - s**3 + s**4
dp = -1 - 3*s**2 + 4*s**3
return p, dp
def _scalar_func_2(self, s):
self.fcount += 1
p = np.exp(-4*s) + s**2
dp = -4*np.exp(-4*s) + 2*s
return p, dp
def _scalar_func_3(self, s):
self.fcount += 1
p = -np.sin(10*s)
dp = -10*np.cos(10*s)
return p, dp
# -- n-d functions
def _line_func_1(self, x):
self.fcount += 1
f = np.dot(x, x)
df = 2*x
return f, df
def _line_func_2(self, x):
self.fcount += 1
f = np.dot(x, np.dot(self.A, x)) + 1
df = np.dot(self.A + self.A.T, x)
return f, df
# --
def setup_method(self):
self.scalar_funcs = []
self.line_funcs = []
self.N = 20
self.fcount = 0
def bind_index(func, idx):
# Remember Python's closure semantics!
return lambda *a, **kw: func(*a, **kw)[idx]
for name in sorted(dir(self)):
if name.startswith('_scalar_func_'):
value = getattr(self, name)
self.scalar_funcs.append(
(name, bind_index(value, 0), bind_index(value, 1)))
elif name.startswith('_line_func_'):
value = getattr(self, name)
self.line_funcs.append(
(name, bind_index(value, 0), bind_index(value, 1)))
np.random.seed(1234)
self.A = np.random.randn(self.N, self.N)
def scalar_iter(self):
for name, phi, derphi in self.scalar_funcs:
for old_phi0 in np.random.randn(3):
yield name, phi, derphi, old_phi0
def line_iter(self):
for name, f, fprime in self.line_funcs:
k = 0
while k < 9:
x = np.random.randn(self.N)
p = np.random.randn(self.N)
if np.dot(p, fprime(x)) >= 0:
# always pick a descent direction
continue
k += 1
old_fv = float(np.random.randn())
yield name, f, fprime, x, p, old_fv
# -- Generic scalar searches
def test_scalar_search_wolfe1(self):
c = 0
for name, phi, derphi, old_phi0 in self.scalar_iter():
c += 1
s, phi1, phi0 = ls.scalar_search_wolfe1(phi, derphi, phi(0),
old_phi0, derphi(0))
assert_fp_equal(phi0, phi(0), name)
assert_fp_equal(phi1, phi(s), name)
assert_wolfe(s, phi, derphi, err_msg=name)
assert_(c > 3) # check that the iterator really works...
def test_scalar_search_wolfe2(self):
for name, phi, derphi, old_phi0 in self.scalar_iter():
s, phi1, phi0, derphi1 = ls.scalar_search_wolfe2(
phi, derphi, phi(0), old_phi0, derphi(0))
assert_fp_equal(phi0, phi(0), name)
assert_fp_equal(phi1, phi(s), name)
if derphi1 is not None:
assert_fp_equal(derphi1, derphi(s), name)
assert_wolfe(s, phi, derphi, err_msg="%s %g" % (name, old_phi0))
def test_scalar_search_wolfe2_with_low_amax(self):
def phi(alpha):
return (alpha - 5) ** 2
def derphi(alpha):
return 2 * (alpha - 5)
s, _, _, _ = assert_warns(LineSearchWarning,
ls.scalar_search_wolfe2, phi, derphi, amax=0.001)
assert_(s is None)
def test_scalar_search_armijo(self):
for name, phi, derphi, old_phi0 in self.scalar_iter():
s, phi1 = ls.scalar_search_armijo(phi, phi(0), derphi(0))
assert_fp_equal(phi1, phi(s), name)
assert_armijo(s, phi, err_msg="%s %g" % (name, old_phi0))
# -- Generic line searches
def test_line_search_wolfe1(self):
c = 0
smax = 100
for name, f, fprime, x, p, old_f in self.line_iter():
f0 = f(x)
g0 = fprime(x)
self.fcount = 0
s, fc, gc, fv, ofv, gv = ls.line_search_wolfe1(f, fprime, x, p,
g0, f0, old_f,
amax=smax)
assert_equal(self.fcount, fc+gc)
assert_fp_equal(ofv, f(x))
if s is None:
continue
assert_fp_equal(fv, f(x + s*p))
assert_array_almost_equal(gv, fprime(x + s*p), decimal=14)
if s < smax:
c += 1
assert_line_wolfe(x, p, s, f, fprime, err_msg=name)
assert_(c > 3) # check that the iterator really works...
def test_line_search_wolfe2(self):
c = 0
smax = 512
for name, f, fprime, x, p, old_f in self.line_iter():
f0 = f(x)
g0 = fprime(x)
self.fcount = 0
with suppress_warnings() as sup:
sup.filter(LineSearchWarning,
"The line search algorithm could not find a solution")
sup.filter(LineSearchWarning,
"The line search algorithm did not converge")
s, fc, gc, fv, ofv, gv = ls.line_search_wolfe2(f, fprime, x, p,
g0, f0, old_f,
amax=smax)
assert_equal(self.fcount, fc+gc)
assert_fp_equal(ofv, f(x))
assert_fp_equal(fv, f(x + s*p))
if gv is not None:
assert_array_almost_equal(gv, fprime(x + s*p), decimal=14)
if s < smax:
c += 1
assert_line_wolfe(x, p, s, f, fprime, err_msg=name)
assert_(c > 3) # check that the iterator really works...
def test_line_search_wolfe2_bounds(self):
# See gh-7475
# For this f and p, starting at a point on axis 0, the strong Wolfe
# condition 2 is met if and only if the step length s satisfies
# |x + s| <= c2 * |x|
f = lambda x: np.dot(x, x)
fp = lambda x: 2 * x
p = np.array([1, 0])
# Smallest s satisfying strong Wolfe conditions for these arguments is 30
x = -60 * p
c2 = 0.5
s, _, _, _, _, _ = ls.line_search_wolfe2(f, fp, x, p, amax=30, c2=c2)
assert_line_wolfe(x, p, s, f, fp)
s, _, _, _, _, _ = assert_warns(LineSearchWarning,
ls.line_search_wolfe2, f, fp, x, p,
amax=29, c2=c2)
assert_(s is None)
# s=30 will only be tried on the 6th iteration, so this won't converge
assert_warns(LineSearchWarning, ls.line_search_wolfe2, f, fp, x, p,
c2=c2, maxiter=5)
def test_line_search_armijo(self):
c = 0
for name, f, fprime, x, p, old_f in self.line_iter():
f0 = f(x)
g0 = fprime(x)
self.fcount = 0
s, fc, fv = ls.line_search_armijo(f, x, p, g0, f0)
c += 1
assert_equal(self.fcount, fc)
assert_fp_equal(fv, f(x + s*p))
assert_line_armijo(x, p, s, f, err_msg=name)
assert_(c >= 9)
# -- More specific tests
def test_armijo_terminate_1(self):
# Armijo should evaluate the function only once if the trial step
# is already suitable
count = [0]
def phi(s):
count[0] += 1
return -s + 0.01*s**2
s, phi1 = ls.scalar_search_armijo(phi, phi(0), -1, alpha0=1)
assert_equal(s, 1)
assert_equal(count[0], 2)
assert_armijo(s, phi)
def test_wolfe_terminate(self):
# wolfe1 and wolfe2 should also evaluate the function only a few
# times if the trial step is already suitable
def phi(s):
count[0] += 1
return -s + 0.05*s**2
def derphi(s):
count[0] += 1
return -1 + 0.05*2*s
for func in [ls.scalar_search_wolfe1, ls.scalar_search_wolfe2]:
count = [0]
r = func(phi, derphi, phi(0), None, derphi(0))
assert_(r[0] is not None, (r, func))
assert_(count[0] <= 2 + 2, (count, func))
assert_wolfe(r[0], phi, derphi, err_msg=str(func))
| arokem/scipy | scipy/optimize/tests/test_linesearch.py | Python | bsd-3-clause | 10,206 |
# -*- coding: utf-8 -*-
import webview
"""
This example demonstrates how to localize GUI strings used by pywebview.
"""
if __name__ == '__main__':
localization = {
'global.saveFile': u'Сохранить файл',
'cocoa.menu.about': u'О программе',
'cocoa.menu.services': u'Cлужбы',
'cocoa.menu.view': u'Вид',
'cocoa.menu.hide': u'Скрыть',
'cocoa.menu.hideOthers': u'Скрыть остальные',
'cocoa.menu.showAll': u'Показать все',
'cocoa.menu.quit': u'Завершить',
'cocoa.menu.fullscreen': u'Перейти ',
'windows.fileFilter.allFiles': u'Все файлы',
'windows.fileFilter.otherFiles': u'Остальлные файльы',
'linux.openFile': u'Открыть файл',
'linux.openFiles': u'Открыть файлы',
'linux.openFolder': u'Открыть папку',
}
window_localization_override = {
'global.saveFile': u'Save file',
}
webview.create_window(
'Localization Example',
'https://pywebview.flowrl.com/hello',
localization=window_localization_override,
)
webview.start(localization=localization)
| r0x0r/pywebview | examples/localization.py | Python | bsd-3-clause | 1,251 |
# Django settings for wireframe project.
import os
PROJECT_DIR = os.path.dirname(__file__)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = 'sqlite3' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = 'wire.db' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'wire.db'
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
WIREFRAME_MEDIA_ROOT = os.path.join(PROJECT_DIR, os.pardir, 'wireframes', 'media', 'wireframes')
ADMIN_MEDIA_ROOT = os.path.join(PROJECT_DIR, os.pardir, 'admin_media', '')
MEDIA_URL = '/media/'
ADMIN_MEDIA_PREFIX = '/admin_media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'p)vc32rphaob@!7nze8@6ih5c_@ygjc%@csf*6^+d^((+%$4p#'
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
#'django.middleware.doc.XViewMiddleware',
'django.middleware.locale.LocaleMiddleware',
)
ROOT_URLCONF = 'example.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(os.path.dirname(__file__), "templates"),
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.i18n",
"django.core.context_processors.debug",
"django.core.context_processors.request",
"django.core.context_processors.media",
)
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'wireframes',
)
| batiste/django-wireframes | example/settings.py | Python | bsd-3-clause | 3,055 |
import numpy as np
from menpo.model import PCAModel
from menpo.visualize import print_progress
def prune(weights, n_retained=50):
w_norm = (weights[:, :n_retained] ** 2).sum(axis=1)
# High weights here suggest problematic samples
bad_to_good_index = np.argsort(w_norm)[::-1]
return w_norm, bad_to_good_index
def pca_and_weights(meshes, retain_eig_cum_val=0.997, verbose=False):
model = PCAModel(meshes, verbose=verbose)
n_comps_retained = (model.eigenvalues_cumulative_ratio() <
retain_eig_cum_val).sum()
if verbose:
print('\nRetaining {:.2%} of eigenvalues keeps {} components'.format(
retain_eig_cum_val, n_comps_retained))
model.trim_components(retain_eig_cum_val)
if verbose:
meshes = print_progress(meshes, prefix='Calculating weights')
weights = (np.vstack([model.project(m) for m in meshes])
/ np.sqrt(model.eigenvalues))
return model, weights
| menpo/lsfm | lsfm/model.py | Python | bsd-3-clause | 968 |
# proxy module
from __future__ import absolute_import
from etsdevtools.developer.tools.vet.class_browser import *
| enthought/etsproxy | enthought/developer/tools/vet/class_browser.py | Python | bsd-3-clause | 114 |
#!/usr/bin/env python
from __future__ import division
from __future__ import print_function
from builtins import input
from builtins import range
from past.utils import old_div
import sys
import numpy
import matplotlib
if matplotlib.get_backend() != "TKAgg":
matplotlib.use("TKAgg")
import pylab
import pmagpy.pmag as pmag
import pmagpy.pmagplotlib as pmagplotlib
def main():
"""
NAME
foldtest.py
DESCRIPTION
does a fold test (Tauxe, 2010) on data
INPUT FORMAT
dec inc dip_direction dip
SYNTAX
foldtest.py [command line options]
OPTIONS
-h prints help message and quits
-f FILE file with input data
-F FILE for confidence bounds on fold test
-u ANGLE (circular standard deviation) for uncertainty on bedding poles
-b MIN MAX bounds for quick search of percent untilting [default is -10 to 150%]
-n NB number of bootstrap samples [default is 1000]
-fmt FMT, specify format - default is svg
-sav save figures and quit
INPUT FILE
Dec Inc Dip_Direction Dip in space delimited file
OUTPUT PLOTS
Geographic: is an equal area projection of the input data in
original coordinates
Stratigraphic: is an equal area projection of the input data in
tilt adjusted coordinates
% Untilting: The dashed (red) curves are representative plots of
maximum eigenvalue (tau_1) as a function of untilting
The solid line is the cumulative distribution of the
% Untilting required to maximize tau for all the
bootstrapped data sets. The dashed vertical lines
are 95% confidence bounds on the % untilting that yields
the most clustered result (maximum tau_1).
Command line: prints out the bootstrapped iterations and
finally the confidence bounds on optimum untilting.
If the 95% conf bounds include 0, then a post-tilt magnetization is indicated
If the 95% conf bounds include 100, then a pre-tilt magnetization is indicated
If the 95% conf bounds exclude both 0 and 100, syn-tilt magnetization is
possible as is vertical axis rotation or other pathologies
Geographic: is an equal area projection of the input data in
OPTIONAL OUTPUT FILE:
The output file has the % untilting within the 95% confidence bounds
nd the number of bootstrap samples
"""
kappa=0
fmt,plot='svg',0
nb=1000 # number of bootstraps
min,max=-10,150
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit
if '-F' in sys.argv:
ind=sys.argv.index('-F')
outfile=open(sys.argv[ind+1],'w')
else:
outfile=""
if '-f' in sys.argv:
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
DIDDs=numpy.loadtxt(file)
else:
print(main.__doc__)
sys.exit()
if '-fmt' in sys.argv:
ind=sys.argv.index('-fmt')
fmt=sys.argv[ind+1]
if '-sav' in sys.argv:plot=1
if '-b' in sys.argv:
ind=sys.argv.index('-b')
min=int(sys.argv[ind+1])
max=int(sys.argv[ind+2])
if '-n' in sys.argv:
ind=sys.argv.index('-n')
nb=int(sys.argv[ind+1])
if '-u' in sys.argv:
ind=sys.argv.index('-u')
csd=float(sys.argv[ind+1])
kappa=(old_div(81.,csd))**2
#
# get to work
#
PLTS={'geo':1,'strat':2,'taus':3} # make plot dictionary
pmagplotlib.plot_init(PLTS['geo'],5,5)
pmagplotlib.plot_init(PLTS['strat'],5,5)
pmagplotlib.plot_init(PLTS['taus'],5,5)
pmagplotlib.plotEQ(PLTS['geo'],DIDDs,'Geographic')
D,I=pmag.dotilt_V(DIDDs)
TCs=numpy.array([D,I]).transpose()
pmagplotlib.plotEQ(PLTS['strat'],TCs,'Stratigraphic')
if plot==0:pmagplotlib.drawFIGS(PLTS)
Percs=list(range(min,max))
Cdf,Untilt=[],[]
pylab.figure(num=PLTS['taus'])
print('doing ',nb,' iterations...please be patient.....')
for n in range(nb): # do bootstrap data sets - plot first 25 as dashed red line
if n%50==0:print(n)
Taus=[] # set up lists for taus
PDs=pmag.pseudo(DIDDs)
if kappa!=0:
for k in range(len(PDs)):
d,i=pmag.fshdev(kappa)
dipdir,dip=pmag.dodirot(d,i,PDs[k][2],PDs[k][3])
PDs[k][2]=dipdir
PDs[k][3]=dip
for perc in Percs:
tilt=numpy.array([1.,1.,1.,0.01*perc])
D,I=pmag.dotilt_V(PDs*tilt)
TCs=numpy.array([D,I]).transpose()
ppars=pmag.doprinc(TCs) # get principal directions
Taus.append(ppars['tau1'])
if n<25:pylab.plot(Percs,Taus,'r--')
Untilt.append(Percs[Taus.index(numpy.max(Taus))]) # tilt that gives maximum tau
Cdf.append(old_div(float(n),float(nb)))
pylab.plot(Percs,Taus,'k')
pylab.xlabel('% Untilting')
pylab.ylabel('tau_1 (red), CDF (green)')
Untilt.sort() # now for CDF of tilt of maximum tau
pylab.plot(Untilt,Cdf,'g')
lower=int(.025*nb)
upper=int(.975*nb)
pylab.axvline(x=Untilt[lower],ymin=0,ymax=1,linewidth=1,linestyle='--')
pylab.axvline(x=Untilt[upper],ymin=0,ymax=1,linewidth=1,linestyle='--')
tit= '%i - %i %s'%(Untilt[lower],Untilt[upper],'Percent Unfolding')
print(tit)
print('range of all bootstrap samples: ', Untilt[0], ' - ', Untilt[-1])
pylab.title(tit)
outstring= '%i - %i; %i\n'%(Untilt[lower],Untilt[upper],nb)
if outfile!="":outfile.write(outstring)
files={}
for key in list(PLTS.keys()):
files[key]=('foldtest_'+'%s'%(key.strip()[:2])+'.'+fmt)
if plot==0:
pmagplotlib.drawFIGS(PLTS)
ans= input('S[a]ve all figures, <Return> to quit ')
if ans!='a':
print("Good bye")
sys.exit()
pmagplotlib.saveP(PLTS,files)
main()
| Caoimhinmg/PmagPy | programs/foldtest.py | Python | bsd-3-clause | 6,056 |
#!/usr/bin/env python
import matplotlib.pyplot as plt
import sys
import numpy
from math import floor
def movingAverage(x, N):
cumsum = numpy.cumsum(numpy.insert(x, 0, 0))
return (cumsum[N:] - cumsum[:-N])/N
filename = "reports/configuration.confrewardRecordReport.txt"
if (len(sys.argv) > 1):
filename = sys.argv[1]
with open(filename) as f:
print f.readline()
time = []
temp = []
avg = []
for line in f:
entry = line.split(":")
time.append(float(entry[0]))
temp.append(float(entry[1]))
windowSize = 100
avg = [0] * (windowSize - 1)
avg = avg + list( movingAverage(temp, windowSize))
ratio = 0.999
avg = avg[int(floor(len(avg )*ratio)): len(avg )-1]
time = time[int(floor(len(time)*ratio)): len(time)-1]
temp = temp[int(floor(len(temp)*ratio)): len(temp)-1]
plt.plot(time, temp, 'r-')
plt.plot(time, avg, 'ro')
plt.show()
| Jacques-Florence/schedSim | src/analysis/reward.py | Python | bsd-3-clause | 847 |
from matplotlib import pyplot
from .algo import _bs_fit
def axes_object(ax):
""" Checks if a value if an Axes. If None, a new one is created.
Both the figure and axes are returned (in that order).
"""
if ax is None:
ax = pyplot.gca()
fig = ax.figure
elif isinstance(ax, pyplot.Axes):
fig = ax.figure
else:
msg = "`ax` must be a matplotlib Axes instance or None"
raise ValueError(msg)
return fig, ax
def axis_name(axis, axname):
"""
Checks that an axis name is in ``{'x', 'y'}``. Raises an error on
an invalid value. Returns the lower case version of valid values.
"""
valid_args = ["x", "y"]
if axis.lower() not in valid_args:
msg = "Invalid value for {} ({}). Must be on of {}."
raise ValueError(msg.format(axname, axis, valid_args))
return axis.lower()
def fit_argument(arg, argname):
"""
Checks that an axis options is in ``{'x', y', 'both', None}``.
Raises an error on an invalid value. Returns the lower case version
of valid values.
"""
valid_args = ["x", "y", "both", None]
if arg not in valid_args:
msg = "Invalid value for {} ({}). Must be on of {}."
raise ValueError(msg.format(argname, arg, valid_args))
elif arg is not None:
arg = arg.lower()
return arg
def axis_type(axtype):
"""
Checks that a valid axis type is requested.
- *pp* - percentile axis
- *qq* - quantile axis
- *prob* - probability axis
Raises an error on an invalid value. Returns the lower case version
of valid values.
"""
if axtype.lower() not in ["pp", "qq", "prob"]:
raise ValueError("invalid axtype: {}".format(axtype))
return axtype.lower()
def axis_label(label):
"""
Replaces None with an empty string for axis labels.
"""
return "" if label is None else label
def other_options(options):
"""
Replaces None with an empty dict for plotting options.
"""
return dict() if options is None else options.copy()
def estimator(value):
if value.lower() in ["res", "resid", "resids", "residual", "residuals"]:
msg = "Bootstrapping the residuals is not ready yet"
raise NotImplementedError(msg)
elif value.lower() in ["fit", "values"]:
est = _bs_fit
else:
raise ValueError('estimator must be either "resid" or "fit".')
return est
| phobson/mpl-probscale | probscale/validate.py | Python | bsd-3-clause | 2,443 |
from storitell.tastypie.resources import ModelResource
from storitell.stories.models import Story
from storitell.stories.extra_methods import moderate_comment
from storitell.tastypie.validation import Validation
# Stories can be read through a REST-ful interface. It'd be nice
# to be able to POST as well, but that requires validation I haven't
# had time to code yet. Want to add it? Be my guest.
class StoryResource(ModelResource):
class Meta:
queryset = Story.objects.all()
resource_name = 'story'
fields = ['maintext','pub_date','upvotes']
allowed_methods = ['get']
| esten/StoriTell | StoriTell/stories/api.py | Python | bsd-3-clause | 583 |
from Player import Player
import random
import numpy
import time
class Connect4_RandomAI(Player):
def __init__(self,value):
# Initialize the random number generator using the current time as a seed so that we don't see the exact same AI
# behavior every time.
random.seed(time.clock())
self.value = value
#end def init()
'''
'''
def value(self):
return self.value;
#end def value()
'''
'''
def DetermineMove(self, board):
#####
# Pick a random valid column to drop our disc in
#####
# Keep generating random moves until we get one that is valid
while True:
move = numpy.array([0, random.randint(0, board.width - 1)])
if board.IsValidMove(move):
break
return move
#end def DetermineMove()
#end class Connect4_Human
| cmdunkers/DeeperMind | Players/Connect4_RandomAI.py | Python | bsd-3-clause | 894 |
from bokeh.plotting import figure, output_file, show
output_file("title.html")
p = figure(width=400, height=400, title="Some Title")
p.title.text_color = "olive"
p.title.text_font = "times"
p.title.text_font_style = "italic"
p.circle([1, 2, 3, 4, 5], [2, 5, 8, 2, 7], size=10)
show(p)
| bokeh/bokeh | sphinx/source/docs/user_guide/examples/styling_title.py | Python | bsd-3-clause | 289 |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: Gazetteer.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from . import DataStructures_pb2 as DataStructures__pb2
try:
FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes__pb2
except AttributeError:
FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes_pb2
from .DataStructures_pb2 import *
DESCRIPTOR = _descriptor.FileDescriptor(
name='Gazetteer.proto',
package='CoreML.Specification.CoreMLModels',
syntax='proto3',
serialized_pb=_b('\n\x0fGazetteer.proto\x12!CoreML.Specification.CoreMLModels\x1a\x14\x44\x61taStructures.proto\"\x9c\x01\n\tGazetteer\x12\x10\n\x08revision\x18\x01 \x01(\r\x12\x10\n\x08language\x18\n \x01(\t\x12\x1a\n\x12modelParameterData\x18\x64 \x01(\x0c\x12@\n\x11stringClassLabels\x18\xc8\x01 \x01(\x0b\x32\".CoreML.Specification.StringVectorH\x00\x42\r\n\x0b\x43lassLabelsB\x02H\x03P\x00\x62\x06proto3')
,
dependencies=[DataStructures__pb2.DESCRIPTOR,],
public_dependencies=[DataStructures__pb2.DESCRIPTOR,])
_GAZETTEER = _descriptor.Descriptor(
name='Gazetteer',
full_name='CoreML.Specification.CoreMLModels.Gazetteer',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='revision', full_name='CoreML.Specification.CoreMLModels.Gazetteer.revision', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='language', full_name='CoreML.Specification.CoreMLModels.Gazetteer.language', index=1,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='modelParameterData', full_name='CoreML.Specification.CoreMLModels.Gazetteer.modelParameterData', index=2,
number=100, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='stringClassLabels', full_name='CoreML.Specification.CoreMLModels.Gazetteer.stringClassLabels', index=3,
number=200, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='ClassLabels', full_name='CoreML.Specification.CoreMLModels.Gazetteer.ClassLabels',
index=0, containing_type=None, fields=[]),
],
serialized_start=77,
serialized_end=233,
)
_GAZETTEER.fields_by_name['stringClassLabels'].message_type = DataStructures__pb2._STRINGVECTOR
_GAZETTEER.oneofs_by_name['ClassLabels'].fields.append(
_GAZETTEER.fields_by_name['stringClassLabels'])
_GAZETTEER.fields_by_name['stringClassLabels'].containing_oneof = _GAZETTEER.oneofs_by_name['ClassLabels']
DESCRIPTOR.message_types_by_name['Gazetteer'] = _GAZETTEER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Gazetteer = _reflection.GeneratedProtocolMessageType('Gazetteer', (_message.Message,), dict(
DESCRIPTOR = _GAZETTEER,
__module__ = 'Gazetteer_pb2'
# @@protoc_insertion_point(class_scope:CoreML.Specification.CoreMLModels.Gazetteer)
))
_sym_db.RegisterMessage(Gazetteer)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003'))
# @@protoc_insertion_point(module_scope)
| apple/coremltools | coremltools/proto/Gazetteer_pb2.py | Python | bsd-3-clause | 4,337 |
"""Tests for `cookiecutter.hooks` module."""
import os
import errno
import stat
import sys
import textwrap
import pytest
from cookiecutter import hooks, utils, exceptions
def make_test_repo(name, multiple_hooks=False):
"""Create test repository for test setup methods."""
hook_dir = os.path.join(name, 'hooks')
template = os.path.join(name, 'input{{hooks}}')
os.mkdir(name)
os.mkdir(hook_dir)
os.mkdir(template)
with open(os.path.join(template, 'README.rst'), 'w') as f:
f.write("foo\n===\n\nbar\n")
with open(os.path.join(hook_dir, 'pre_gen_project.py'), 'w') as f:
f.write("#!/usr/bin/env python\n")
f.write("# -*- coding: utf-8 -*-\n")
f.write("from __future__ import print_function\n")
f.write("\n")
f.write("print('pre generation hook')\n")
f.write("f = open('python_pre.txt', 'w')\n")
f.write("f.close()\n")
if sys.platform.startswith('win'):
post = 'post_gen_project.bat'
with open(os.path.join(hook_dir, post), 'w') as f:
f.write("@echo off\n")
f.write("\n")
f.write("echo post generation hook\n")
f.write("echo. >shell_post.txt\n")
else:
post = 'post_gen_project.sh'
filename = os.path.join(hook_dir, post)
with open(filename, 'w') as f:
f.write("#!/bin/bash\n")
f.write("\n")
f.write("echo 'post generation hook';\n")
f.write("touch 'shell_post.txt'\n")
# Set the execute bit
os.chmod(filename, os.stat(filename).st_mode | stat.S_IXUSR)
# Adding an additional pre script
if multiple_hooks:
if sys.platform.startswith('win'):
pre = 'pre_gen_project.bat'
with open(os.path.join(hook_dir, pre), 'w') as f:
f.write("@echo off\n")
f.write("\n")
f.write("echo post generation hook\n")
f.write("echo. >shell_pre.txt\n")
else:
pre = 'pre_gen_project.sh'
filename = os.path.join(hook_dir, pre)
with open(filename, 'w') as f:
f.write("#!/bin/bash\n")
f.write("\n")
f.write("echo 'post generation hook';\n")
f.write("touch 'shell_pre.txt'\n")
# Set the execute bit
os.chmod(filename, os.stat(filename).st_mode | stat.S_IXUSR)
return post
class TestFindHooks(object):
"""Class to unite find hooks related tests in one place."""
repo_path = 'tests/test-hooks'
def setup_method(self, method):
"""Find hooks related tests setup fixture."""
self.post_hook = make_test_repo(self.repo_path)
def teardown_method(self, method):
"""Find hooks related tests teardown fixture."""
utils.rmtree(self.repo_path)
def test_find_hook(self):
"""Finds the specified hook."""
with utils.work_in(self.repo_path):
expected_pre = os.path.abspath('hooks/pre_gen_project.py')
actual_hook_path = hooks.find_hook('pre_gen_project')
assert expected_pre == actual_hook_path[0]
expected_post = os.path.abspath('hooks/{}'.format(self.post_hook))
actual_hook_path = hooks.find_hook('post_gen_project')
assert expected_post == actual_hook_path[0]
def test_no_hooks(self):
"""`find_hooks` should return None if the hook could not be found."""
with utils.work_in('tests/fake-repo'):
assert None is hooks.find_hook('pre_gen_project')
def test_unknown_hooks_dir(self):
"""`find_hooks` should return None if hook directory not found."""
with utils.work_in(self.repo_path):
assert hooks.find_hook('pre_gen_project', hooks_dir='hooks_dir') is None
def test_hook_not_found(self):
"""`find_hooks` should return None if the hook could not be found."""
with utils.work_in(self.repo_path):
assert hooks.find_hook('unknown_hook') is None
class TestExternalHooks(object):
"""Class to unite tests for hooks with different project paths."""
repo_path = os.path.abspath('tests/test-hooks/')
hooks_path = os.path.abspath('tests/test-hooks/hooks')
def setup_method(self, method):
"""External hooks related tests setup fixture."""
self.post_hook = make_test_repo(self.repo_path, multiple_hooks=True)
def teardown_method(self, method):
"""External hooks related tests teardown fixture."""
utils.rmtree(self.repo_path)
if os.path.exists('python_pre.txt'):
os.remove('python_pre.txt')
if os.path.exists('shell_post.txt'):
os.remove('shell_post.txt')
if os.path.exists('shell_pre.txt'):
os.remove('shell_pre.txt')
if os.path.exists('tests/shell_post.txt'):
os.remove('tests/shell_post.txt')
if os.path.exists('tests/test-hooks/input{{hooks}}/python_pre.txt'):
os.remove('tests/test-hooks/input{{hooks}}/python_pre.txt')
if os.path.exists('tests/test-hooks/input{{hooks}}/shell_post.txt'):
os.remove('tests/test-hooks/input{{hooks}}/shell_post.txt')
if os.path.exists('tests/context_post.txt'):
os.remove('tests/context_post.txt')
def test_run_script(self):
"""Execute a hook script, independently of project generation."""
hooks.run_script(os.path.join(self.hooks_path, self.post_hook))
assert os.path.isfile('shell_post.txt')
def test_run_failing_script(self, mocker):
"""Test correct exception raise if run_script fails."""
err = OSError()
prompt = mocker.patch('subprocess.Popen')
prompt.side_effect = err
with pytest.raises(exceptions.FailedHookException) as excinfo:
hooks.run_script(os.path.join(self.hooks_path, self.post_hook))
assert 'Hook script failed (error: {})'.format(err) in str(excinfo.value)
def test_run_failing_script_enoexec(self, mocker):
"""Test correct exception raise if run_script fails."""
err = OSError()
err.errno = errno.ENOEXEC
prompt = mocker.patch('subprocess.Popen')
prompt.side_effect = err
with pytest.raises(exceptions.FailedHookException) as excinfo:
hooks.run_script(os.path.join(self.hooks_path, self.post_hook))
assert 'Hook script failed, might be an empty file or missing a shebang' in str(
excinfo.value
)
def test_run_script_cwd(self):
"""Change directory before running hook."""
hooks.run_script(os.path.join(self.hooks_path, self.post_hook), 'tests')
assert os.path.isfile('tests/shell_post.txt')
assert 'tests' not in os.getcwd()
def test_run_script_with_context(self):
"""Execute a hook script, passing a context."""
hook_path = os.path.join(self.hooks_path, 'post_gen_project.sh')
if sys.platform.startswith('win'):
post = 'post_gen_project.bat'
with open(os.path.join(self.hooks_path, post), 'w') as f:
f.write("@echo off\n")
f.write("\n")
f.write("echo post generation hook\n")
f.write("echo. >{{cookiecutter.file}}\n")
else:
with open(hook_path, 'w') as fh:
fh.write("#!/bin/bash\n")
fh.write("\n")
fh.write("echo 'post generation hook';\n")
fh.write("touch 'shell_post.txt'\n")
fh.write("touch '{{cookiecutter.file}}'\n")
os.chmod(hook_path, os.stat(hook_path).st_mode | stat.S_IXUSR)
hooks.run_script_with_context(
os.path.join(self.hooks_path, self.post_hook),
'tests',
{'cookiecutter': {'file': 'context_post.txt'}},
)
assert os.path.isfile('tests/context_post.txt')
assert 'tests' not in os.getcwd()
def test_run_hook(self):
"""Execute hook from specified template in specified output \
directory."""
tests_dir = os.path.join(self.repo_path, 'input{{hooks}}')
with utils.work_in(self.repo_path):
hooks.run_hook('pre_gen_project', tests_dir, {})
assert os.path.isfile(os.path.join(tests_dir, 'python_pre.txt'))
assert os.path.isfile(os.path.join(tests_dir, 'shell_pre.txt'))
hooks.run_hook('post_gen_project', tests_dir, {})
assert os.path.isfile(os.path.join(tests_dir, 'shell_post.txt'))
def test_run_failing_hook(self):
"""Test correct exception raise if hook exit code is not zero."""
hook_path = os.path.join(self.hooks_path, 'pre_gen_project.py')
tests_dir = os.path.join(self.repo_path, 'input{{hooks}}')
with open(hook_path, 'w') as f:
f.write("#!/usr/bin/env python\n")
f.write("import sys; sys.exit(1)\n")
with utils.work_in(self.repo_path):
with pytest.raises(exceptions.FailedHookException) as excinfo:
hooks.run_hook('pre_gen_project', tests_dir, {})
assert 'Hook script failed' in str(excinfo.value)
@pytest.fixture()
def dir_with_hooks(tmp_path):
"""Yield a directory that contains hook backup files."""
hooks_dir = tmp_path.joinpath('hooks')
hooks_dir.mkdir()
pre_hook_content = textwrap.dedent(
"""
#!/usr/bin/env python
# -*- coding: utf-8 -*-
print('pre_gen_project.py~')
"""
)
pre_gen_hook_file = hooks_dir.joinpath('pre_gen_project.py~')
pre_gen_hook_file.write_text(pre_hook_content, encoding='utf8')
post_hook_content = textwrap.dedent(
"""
#!/usr/bin/env python
# -*- coding: utf-8 -*-
print('post_gen_project.py~')
"""
)
post_gen_hook_file = hooks_dir.joinpath('post_gen_project.py~')
post_gen_hook_file.write_text(post_hook_content, encoding='utf8')
# Make sure to yield the parent directory as `find_hooks()`
# looks into `hooks/` in the current working directory
yield str(tmp_path)
pre_gen_hook_file.unlink()
post_gen_hook_file.unlink()
def test_ignore_hook_backup_files(monkeypatch, dir_with_hooks):
"""Test `find_hook` correctly use `valid_hook` verification function."""
# Change the current working directory that contains `hooks/`
monkeypatch.chdir(dir_with_hooks)
assert hooks.find_hook('pre_gen_project') is None
assert hooks.find_hook('post_gen_project') is None
| pjbull/cookiecutter | tests/test_hooks.py | Python | bsd-3-clause | 10,560 |
from twisted.plugin import IPlugin
from twisted.words.protocols import irc
from txircd.module_interface import Command, ICommand, IModuleData, ModuleData
from zope.interface import implements
class ConnectCommand(ModuleData, Command):
implements(IPlugin, IModuleData, ICommand)
name = "ConnectCommand"
core = True
def actions(self):
return [ ("commandpermission-CONNECT", 1, self.canConnect) ]
def userCommands(self):
return [ ("CONNECT", 1, self) ]
def canConnect(self, user, data):
if not self.ircd.runActionUntilValue("userhasoperpermission", user, "command-connect", users=[user]):
user.sendMessage(irc.ERR_NOPRIVILEGES, "Permission denied - You do not have the correct operator privileges")
return False
return None
def parseParams(self, user, params, prefix, tags):
if not params:
user.sendSingleError("ConnectParams", irc.ERR_NEEDMOREPARAMS, "CONNECT", "Not enough parameters")
return None
return {
"server": params[0]
}
def execute(self, user, data):
serverName = data["server"]
if serverName in self.ircd.serverNames:
user.sendMessage("NOTICE", "*** Server {} is already on the network".format(serverName))
elif self.ircd.connectServer(serverName):
user.sendMessage("NOTICE", "*** Connecting to {}".format(serverName))
else:
user.sendMessage("NOTICE", "*** Failed to connect to {}; it's likely not configured.".format(serverName))
return True
connectCmd = ConnectCommand() | ElementalAlchemist/txircd | txircd/modules/rfc/cmd_connect.py | Python | bsd-3-clause | 1,451 |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('form_processor', '0019_allow_closed_by_null'),
]
operations = [
migrations.RenameField(
model_name='commcarecaseindexsql',
old_name='relationship',
new_name='relationship_id',
),
]
| dimagi/commcare-hq | corehq/form_processor/migrations/0020_rename_index_relationship.py | Python | bsd-3-clause | 352 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'BillStage.stage'
db.delete_column(u'bills_billstage', 'stage')
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'BillStage.stage'
raise RuntimeError("Cannot reverse this migration. 'BillStage.stage' and its values cannot be restored.")
models = {
u'bills.bill': {
'Meta': {'object_name': 'Bill'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'bills.billstage': {
'Meta': {'object_name': 'BillStage'},
'bill': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stages'", 'to': u"orm['bills.Bill']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'bills.ncopconcurrence': {
'Meta': {'object_name': 'NCOPConcurrence', '_ormbases': [u'bills.BillStage']},
u'billstage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bills.BillStage']", 'unique': 'True', 'primary_key': 'True'})
},
u'bills.parliamentfinalvote': {
'Meta': {'object_name': 'ParliamentFinalVote', '_ormbases': [u'bills.BillStage']},
u'billstage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bills.BillStage']", 'unique': 'True', 'primary_key': 'True'})
},
u'bills.parliamentfirstreading': {
'Meta': {'object_name': 'ParliamentFirstReading', '_ormbases': [u'bills.BillStage']},
u'billstage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bills.BillStage']", 'unique': 'True', 'primary_key': 'True'})
},
u'bills.parliamentportfoliocommittee': {
'Meta': {'object_name': 'ParliamentPortfolioCommittee', '_ormbases': [u'bills.BillStage']},
u'billstage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bills.BillStage']", 'unique': 'True', 'primary_key': 'True'})
},
u'bills.parliamentsecondreading': {
'Meta': {'object_name': 'ParliamentSecondReading', '_ormbases': [u'bills.BillStage']},
u'billstage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bills.BillStage']", 'unique': 'True', 'primary_key': 'True'})
},
u'bills.preparliamentarystage': {
'Meta': {'object_name': 'PreparliamentaryStage', '_ormbases': [u'bills.BillStage']},
u'billstage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bills.BillStage']", 'unique': 'True', 'primary_key': 'True'}),
'comments_end': ('django.db.models.fields.DateField', [], {}),
'comments_start': ('django.db.models.fields.DateField', [], {})
}
}
complete_apps = ['bills'] | adieyal/billtracker | code/billtracker/bills/migrations/0005_auto__del_field_billstage_stage.py | Python | bsd-3-clause | 3,234 |
import os
import mimetypes
import warnings
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.conf import settings
from django.core.files.base import File
from django.core.files.storage import Storage
from django.core.exceptions import ImproperlyConfigured
try:
from S3 import AWSAuthConnection, QueryStringAuthGenerator, CallingFormat
except ImportError:
raise ImproperlyConfigured("Could not load amazon's S3 bindings.\nSee "
"http://developer.amazonwebservices.com/connect/entry.jspa?externalID=134")
ACCESS_KEY_NAME = getattr(settings, 'AWS_S3_ACCESS_KEY_ID', getattr(settings, 'AWS_ACCESS_KEY_ID', None))
SECRET_KEY_NAME = getattr(settings, 'AWS_S3_SECRET_ACCESS_KEY', getattr(settings, 'AWS_SECRET_ACCESS_KEY', None))
HEADERS = getattr(settings, 'AWS_HEADERS', {})
DEFAULT_ACL = getattr(settings, 'AWS_DEFAULT_ACL', 'public-read') #access control policy (private, or public-read)
QUERYSTRING_ACTIVE = getattr(settings, 'AWS_QUERYSTRING_ACTIVE', False)
QUERYSTRING_EXPIRE = getattr(settings, 'AWS_QUERYSTRING_EXPIRE', 60)
SECURE_URLS = getattr(settings, 'AWS_S3_SECURE_URLS', False)
BUCKET_PREFIX = getattr(settings, 'AWS_BUCKET_PREFIX', '')
CALLING_FORMAT = getattr(settings, 'AWS_CALLING_FORMAT', CallingFormat.PATH)
PRELOAD_METADATA = getattr(settings, 'AWS_PRELOAD_METADATA', False)
IS_GZIPPED = getattr(settings, 'AWS_IS_GZIPPED', False)
GZIP_CONTENT_TYPES = getattr(settings, 'GZIP_CONTENT_TYPES', (
'text/css',
'application/javascript',
'application/x-javascript'
))
if IS_GZIPPED:
from gzip import GzipFile
class S3Storage(Storage):
"""Amazon Simple Storage Service"""
def __init__(self, bucket=settings.AWS_STORAGE_BUCKET_NAME,
access_key=None, secret_key=None, acl=DEFAULT_ACL,
calling_format=CALLING_FORMAT, encrypt=False,
gzip=IS_GZIPPED, gzip_content_types=GZIP_CONTENT_TYPES,
preload_metadata=PRELOAD_METADATA):
warnings.warn(
"The s3 backend is deprecated and will be removed in version 1.2. "
"Use the s3boto backend instead.",
PendingDeprecationWarning
)
self.bucket = bucket
self.acl = acl
self.encrypt = encrypt
self.gzip = gzip
self.gzip_content_types = gzip_content_types
self.preload_metadata = preload_metadata
if encrypt:
try:
import ezPyCrypto
except ImportError:
raise ImproperlyConfigured("Could not load ezPyCrypto.\nSee "
"http://www.freenet.org.nz/ezPyCrypto/ to install it.")
self.crypto_key = ezPyCrypto.key
if not access_key and not secret_key:
access_key, secret_key = self._get_access_keys()
self.connection = AWSAuthConnection(access_key, secret_key,
calling_format=calling_format)
self.generator = QueryStringAuthGenerator(access_key, secret_key,
calling_format=calling_format,
is_secure=SECURE_URLS)
self.generator.set_expires_in(QUERYSTRING_EXPIRE)
self.headers = HEADERS
self._entries = {}
def _get_access_keys(self):
access_key = ACCESS_KEY_NAME
secret_key = SECRET_KEY_NAME
if (access_key or secret_key) and (not access_key or not secret_key):
access_key = os.environ.get(ACCESS_KEY_NAME)
secret_key = os.environ.get(SECRET_KEY_NAME)
if access_key and secret_key:
# Both were provided, so use them
return access_key, secret_key
return None, None
@property
def entries(self):
if self.preload_metadata and not self._entries:
self._entries = dict((entry.key, entry)
for entry in self.connection.list_bucket(self.bucket).entries)
return self._entries
def _get_connection(self):
return AWSAuthConnection(*self._get_access_keys())
def _clean_name(self, name):
# Useful for windows' paths
return os.path.join(BUCKET_PREFIX, os.path.normpath(name).replace('\\', '/'))
def _compress_string(self, s):
"""Gzip a given string."""
zbuf = StringIO()
zfile = GzipFile(mode='wb', compresslevel=6, fileobj=zbuf)
zfile.write(s)
zfile.close()
return zbuf.getvalue()
def _put_file(self, name, content):
if self.encrypt:
# Create a key object
key = self.crypto_key()
# Read in a public key
fd = open(settings.CRYPTO_KEYS_PUBLIC, "rb")
public_key = fd.read()
fd.close()
# import this public key
key.importKey(public_key)
# Now encrypt some text against this public key
content = key.encString(content)
content_type = mimetypes.guess_type(name)[0] or "application/x-octet-stream"
if self.gzip and content_type in self.gzip_content_types:
content = self._compress_string(content)
self.headers.update({'Content-Encoding': 'gzip'})
self.headers.update({
'x-amz-acl': self.acl,
'Content-Type': content_type,
'Content-Length' : str(len(content)),
})
response = self.connection.put(self.bucket, name, content, self.headers)
if response.http_response.status not in (200, 206):
raise IOError("S3StorageError: %s" % response.message)
def _open(self, name, mode='rb'):
name = self._clean_name(name)
remote_file = S3StorageFile(name, self, mode=mode)
return remote_file
def _read(self, name, start_range=None, end_range=None):
name = self._clean_name(name)
if start_range is None:
headers = {}
else:
headers = {'Range': 'bytes=%s-%s' % (start_range, end_range)}
response = self.connection.get(self.bucket, name, headers)
if response.http_response.status not in (200, 206):
raise IOError("S3StorageError: %s" % response.message)
headers = response.http_response.msg
if self.encrypt:
# Read in a private key
fd = open(settings.CRYPTO_KEYS_PRIVATE, "rb")
private_key = fd.read()
fd.close()
# Create a key object, and auto-import private key
key = self.crypto_key(private_key)
# Decrypt this file
response.object.data = key.decString(response.object.data)
return response.object.data, headers.get('etag', None), headers.get('content-range', None)
def _save(self, name, content):
name = self._clean_name(name)
content.open()
if hasattr(content, 'chunks'):
content_str = ''.join(chunk for chunk in content.chunks())
else:
content_str = content.read()
self._put_file(name, content_str)
return name
def delete(self, name):
name = self._clean_name(name)
response = self.connection.delete(self.bucket, name)
if response.http_response.status != 204:
raise IOError("S3StorageError: %s" % response.message)
def exists(self, name):
name = self._clean_name(name)
if self.entries:
return name in self.entries
response = self.connection._make_request('HEAD', self.bucket, name)
return response.status == 200
def size(self, name):
name = self._clean_name(name)
if self.entries:
entry = self.entries.get(name)
if entry:
return entry.size
return 0
response = self.connection._make_request('HEAD', self.bucket, name)
content_length = response.getheader('Content-Length')
return content_length and int(content_length) or 0
def url(self, name):
name = self._clean_name(name)
if QUERYSTRING_ACTIVE:
return self.generator.generate_url('GET', self.bucket, name)
else:
return self.generator.make_bare_url(self.bucket, name)
def modified_time(self, name):
try:
from dateutil import parser, tz
except ImportError:
raise NotImplementedError()
name = self._clean_name(name)
if self.entries:
last_modified = self.entries.get(name).last_modified
else:
response = self.connection._make_request('HEAD', self.bucket, name)
last_modified = response.getheader('Last-Modified')
# convert to string to date
last_modified_date = parser.parse(last_modified)
# if the date has no timzone, assume UTC
if last_modified_date.tzinfo == None:
last_modified_date = last_modified_date.replace(tzinfo=tz.tzutc())
# convert date to local time w/o timezone
return last_modified_date.astimezone(tz.tzlocal()).replace(tzinfo=None)
## UNCOMMENT BELOW IF NECESSARY
#def get_available_name(self, name):
# """ Overwrite existing file with the same name. """
# name = self._clean_name(name)
# return name
class PreloadingS3Storage(S3Storage):
pass
class S3StorageFile(File):
def __init__(self, name, storage, mode):
self._name = name
self._storage = storage
self._mode = mode
self._is_dirty = False
self.file = StringIO()
self.start_range = 0
@property
def size(self):
if not hasattr(self, '_size'):
self._size = self._storage.size(self._name)
return self._size
def read(self, num_bytes=None):
if num_bytes is None:
args = []
self.start_range = 0
else:
args = [self.start_range, self.start_range+num_bytes-1]
data, etags, content_range = self._storage._read(self._name, *args)
if content_range is not None:
current_range, size = content_range.split(' ', 1)[1].split('/', 1)
start_range, end_range = current_range.split('-', 1)
self._size, self.start_range = int(size), int(end_range)+1
self.file = StringIO(data)
return self.file.getvalue()
def write(self, content):
if 'w' not in self._mode:
raise AttributeError("File was opened for read-only access.")
self.file = StringIO(content)
self._is_dirty = True
def close(self):
if self._is_dirty:
self._storage._put_file(self._name, self.file.getvalue())
self.file.close()
| rnoldo/django-avatar | storages/backends/s3.py | Python | bsd-3-clause | 10,680 |
"""Gradient descent
"""
import numpy as np
from frankenstein.tools.perf_utils import TIMER
from pyscf.lib import logger
""" Helper functions
"""
def get_gHp_fd(get_grad, p, order=1, eps=1.E-4):
""" Compute gradient-Hessian product using finite difference
Inps:
get_grad (callable):
grad(p) --> gradient given a direction p
p (np.ndarray):
initial gradient
order (int, default=1):
order 1 --> foward FD (err ~ O(eps))
order 2 --> central FD (err ~ O(eps^2))
eps (float, default=1.E-4):
strength of perturbation
"""
p_f = get_grad(eps*p)
if order == 1:
return 2. * (p_f-p) / eps
elif order == 2:
p_b = get_grad(-eps*p)
return (p_f-p_b) / eps
else:
raise ValueError("Invalid order (must be 1 or 2)!")
# Newton-raphson (for debug)
class NR:
def __init__(self, mf, eps=1.E-3, fd=2):
self.verbose = mf.verbose
self.stdout = mf.stdout
self.comment = ""
self.eps = eps
self.fd = fd
try:
stdout = mf.stdout
except:
stdout = None
self.timer = TIMER(4, stdout=stdout)
self.iteration = 0
def next_step(self, mf):
f = mf.get_value_gdm()
g = mf.get_grad_gdm()
# build fd hessian
def dphi(i, eps):
mf.back_to_origin()
mf.ov = np.zeros([mf.ov_size])
mf.ov[i] = eps
mf.update_all()
mf.ov[i] = 0.
return mf.get_grad_gdm()
self.timer.start(0)
mf.save_new_origin()
H = np.zeros([mf.ov_size]*2)
for i in range(mf.ov_size):
if self.fd == 1:
H[i] = (dphi(i,self.eps) - g) / self.eps
elif self.fd == 2:
H[i] = (dphi(i,self.eps) - dphi(i,-self.eps)) / (2.*self.eps)
else:
raise ValueError("fd must be 1 or 2.")
mf.back_to_origin()
self.timer.stop(0)
# get raw NR step
self.timer.start(1)
lbd = 1.E-5
du = -np.linalg.solve(H+lbd*np.eye(H.shape[1]), g)
self.timer.stop(1)
# line search
fc = [0]
def phi(alp):
fc[0] += 1
mf.back_to_origin()
mf.ov = alp * mf.regularize_step_gdm(du)
mf.update_all(skip_grad=True)
return mf.get_value_gdm()
self.timer.start(2)
mf.save_new_origin()
fold = f
dphi0 = g @ du
alp, fnew = scopt_linsrc.scalar_search_armijo(
phi, fold, dphi0, c1=1.E-4, alpha0=1.)
self.timer.stop(2)
fc = fc[0]
if alp is None:
raise RuntimeError("Line search failed.")
if fc == 1:
self.comment = "NR"
else:
self.comment = "LnSr (%d,%.2f)"%(fc,alp)
self.timer.start(3)
mf.update_gdm()
self.timer.stop(3)
self.iteration += 1
def report_timing(self):
self.timer.report(tnames=["hess", "linsolve", "linsrch", "grad"])
# Direct minimization (for debug)
class DM:
def __init__(self, mf, bounds=[-1,0], method="bf", plot=False):
if method == "bf":
self.alps = np.arange(*bounds, 0.05)
elif method == "interpolate":
self.amin = min(bounds)
self.amax = max(bounds)
self.ninter = 5
self.neval = 100
else:
raise ValueError("Unknown method '%s'." % method)
self.method = method
self.plot = plot
self.verbose = mf.verbose
self.stdout = mf.stdout
self.comment = ""
try:
stdout = mf.stdout
except:
stdout = None
self.timer = TIMER(2, stdout=stdout)
self.iteration = 0
def next_step(self, mf):
from scipy import interpolate as itplt
from matplotlib import pyplot as plt
g = mf.get_grad_gdm()
def phi(alp):
mf.back_to_origin()
mf.ov = alp * g
mf.update_all(skip_grad=True)
mf.ov = np.zeros(mf.ov_size)
return mf.get_value_gdm()
mf.save_new_origin()
E0 = mf.get_value_gdm()
self.timer.start(0)
if self.method == "bf":
alps = self.alps
Es = np.asarray([phi(alp) for alp in alps]) - E0
elif self.method == "interpolate":
amin = self.amin
amax = self.amax
err_g = np.mean(g**2)**0.5
if err_g > 1.E-3:
xs = np.linspace(amin, amax, self.ninter)
ys = np.asarray([phi(x) for x in xs])
xyrep = itplt.splrep(xs, ys)
fp = lambda x: itplt.splev(x, xyrep)
else:
xs = np.linspace(amin, amax, 3)
ys = np.asarray([phi(x) for x in xs])
p = np.polyfit(xs, ys, 2)
fp = np.poly1d(p)
alps = np.linspace(amin, amax, self.neval)
Es = fp(alps)
idmin = np.argmin(Es)
alp = alps[idmin]
E = Es[idmin]
self.timer.stop(0)
if self.plot:
plt.plot(alps, Es, "-")
if self.method == "interpolate": plt.plot(xs, ys, "o")
plt.plot(alp, E, "rx")
plt.show()
self.comment = "alp = % .2f" % alp
self.timer.start(1)
mf.back_to_origin()
mf.ov = alp * g
mf.update_all()
self.timer.stop(1)
self.iteration += 1
def report_timing(self):
self.timer.report(["lnsrch", "update me"])
# Direct inversion of iterative subspace (DIIS)
from pyscf.lib.diis import DIIS as pyDIIS
class DIIS:
def __init__(self, mf, ndiis=50, diis_start=1):
self.adiis = pyDIIS()
self.adiis.space = ndiis
self.adiis.min_space = diis_start
self.iteration = 0
self.comment = ""
try:
stdout = mf.stdout
except:
stdout = None
self.timer = TIMER(4, stdout=stdout)
def next_step(self, mf):
self.iteration += 1
self.timer.start(0)
f = mf.get_fock_diis()
ferr = mf.get_err_diis()
self.timer.stop(0)
self.timer.start(1)
f = self.adiis.update(f, ferr)
self.timer.stop(1)
self.timer.start(2)
if hasattr(mf, "mom_start"):
mom = self.iteration >= mf.mom_start
else:
mom = False
comment = mf.update_diis(f, mom=mom)
self.timer.stop(2)
self.timer.start(3)
mf.update_all()
self.timer.stop(3)
self.comment = "DIIS" if self.iteration > 0 else "Roothaan"
self.comment += " %s" % comment
def report_timing(self):
self.timer.report(tnames=["diis prep", "diis extrap", "roothaan",
"fock build"])
| hongzhouye/frankenstein | sgscf/sgopt.py | Python | bsd-3-clause | 6,925 |
from collections import OrderedDict
from django.contrib.auth.models import AnonymousUser
from kitsune.questions.forms import NewQuestionForm, WatchQuestionForm
from kitsune.questions.tests import TestCaseBase
from kitsune.users.tests import UserFactory
from nose.tools import eq_
class WatchQuestionFormTests(TestCaseBase):
"""Tests for WatchQuestionForm."""
def test_anonymous_watch_with_email(self):
form = WatchQuestionForm(
AnonymousUser(), data={"email": "wo@ot.com", "event_type": "reply"}
)
assert form.is_valid()
eq_("wo@ot.com", form.cleaned_data["email"])
def test_anonymous_watch_without_email(self):
form = WatchQuestionForm(AnonymousUser(), data={"event_type": "reply"})
assert not form.is_valid()
eq_("Please provide an email.", form.errors["email"][0])
def test_registered_watch_with_email(self):
form = WatchQuestionForm(UserFactory(), data={"email": "wo@ot.com", "event_type": "reply"})
assert form.is_valid()
assert not form.cleaned_data["email"]
def test_registered_watch_without_email(self):
form = WatchQuestionForm(UserFactory(), data={"event_type": "reply"})
assert form.is_valid()
class TestNewQuestionForm(TestCaseBase):
"""Tests for the NewQuestionForm"""
def setUp(self):
super(TestNewQuestionForm, self).setUp()
def test_metadata_keys(self):
"""Test metadata_field_keys property."""
# Test the default form
form = NewQuestionForm()
expected = ["category", "useragent"]
actual = form.metadata_field_keys
eq_(expected, actual)
# Test the form with a product
product = {
"key": "desktop",
"name": "Firefox on desktop",
"categories": OrderedDict(
[
(
"cookies",
{
"name": "Cookies",
"topic": "cookies",
"tags": ["cookies"],
},
)
]
),
"extra_fields": ["troubleshooting", "ff_version", "os", "plugins"],
}
form = NewQuestionForm(product=product)
expected = ["troubleshooting", "ff_version", "os", "plugins", "useragent", "category"]
actual = form.metadata_field_keys
eq_(sorted(expected), sorted(actual))
def test_cleaned_metadata(self):
"""Test the cleaned_metadata property."""
# Test with no metadata
data = {"title": "Lorem", "content": "ipsum", "email": "t@t.com"}
product = {
"key": "desktop",
"name": "Firefox on desktop",
"categories": OrderedDict(
[
(
"cookies",
{
"name": "Cookies",
"topic": "cookies",
"tags": ["cookies"],
},
)
]
),
"extra_fields": ["troubleshooting", "ff_version", "os", "plugins"],
}
form = NewQuestionForm(product=product, data=data)
form.is_valid()
expected = {}
actual = form.cleaned_metadata
eq_(expected, actual)
# Test with metadata
data["os"] = "Linux"
form = NewQuestionForm(product=product, data=data)
form.is_valid()
expected = {"os": "Linux"}
actual = form.cleaned_metadata
eq_(expected, actual)
# Add an empty metadata value
data["ff_version"] = ""
form = NewQuestionForm(product=product, data=data)
form.is_valid()
expected = {"os": "Linux"}
actual = form.cleaned_metadata
eq_(expected, actual)
| mozilla/kitsune | kitsune/questions/tests/test_forms.py | Python | bsd-3-clause | 3,899 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Parser for DALTON output files"""
from __future__ import print_function
import numpy
from . import logfileparser
from . import utils
class DALTON(logfileparser.Logfile):
"""A DALTON log file."""
def __init__(self, *args, **kwargs):
# Call the __init__ method of the superclass
super(DALTON, self).__init__(logname="DALTON", *args, **kwargs)
def __str__(self):
"""Return a string representation of the object."""
return "DALTON log file %s" % (self.filename)
def __repr__(self):
"""Return a representation of the object."""
return 'DALTON("%s")' % (self.filename)
def normalisesym(self, label):
"""Normalise the symmetries used by DALTON."""
# It appears that DALTON is using the correct labels.
return label
def before_parsing(self):
# Used to decide whether to wipe the atomcoords clean.
self.firststdorient = True
# Use to track which section/program output we are parsing,
# since some programs print out the same headers, which we
# would like to use as triggers.
self.section = None
# If there is no symmetry, assume this.
self.symlabels = ['Ag']
# Is the basis set from a single library file? This is true
# when the first line is BASIS, false for INTGRL/ATOMBASIS.
self.basislibrary = True
def parse_geometry(self, lines):
"""Parse DALTON geometry lines into an atomcoords array."""
coords = []
for lin in lines:
# Without symmetry there are simply four columns, and with symmetry
# an extra label is printed after the atom type.
cols = lin.split()
if cols[1][0] == "_":
xyz = cols[2:]
else:
xyz = cols[1:]
# The assumption is that DALTON always print in atomic units.
xyz = [utils.convertor(float(x), 'bohr', 'Angstrom') for x in xyz]
coords.append(xyz)
return coords
def extract(self, inputfile, line):
"""Extract information from the file object inputfile."""
# extract the version number first
if line[4:30] == "This is output from DALTON":
if line.split()[5] == "release" or line.split()[5] == "(Release":
self.metadata["package_version"] = line.split()[6][6:]
else:
self.metadata["package_version"] = line.split()[5]
# Is the basis set from a single library file, or is it
# manually specified? See before_parsing().
if line[:6] == 'INTGRL'or line[:9] == 'ATOMBASIS':
self.basislibrary = False
# This section at the start of geometry optimization jobs gives us information
# about optimization targets (geotargets) and possibly other things as well.
# Notice how the number of criteria required to converge is set to 2 here,
# but this parameter can (probably) be tweaked in the input.
#
# Chosen parameters for *OPTIMI :
# -------------------------------
#
# Default 1st order method will be used: BFGS update.
# Optimization will be performed in redundant internal coordinates (by default).
# Model Hessian will be used as initial Hessian.
# The model Hessian parameters of Roland Lindh will be used.
#
#
# Trust region method will be used to control step (default).
#
# Convergence threshold for gradient set to : 1.00D-04
# Convergence threshold for energy set to : 1.00D-06
# Convergence threshold for step set to : 1.00D-04
# Number of convergence criteria set to : 2
#
if line.strip()[:25] == "Convergence threshold for":
if not hasattr(self, 'geotargets'):
self.geotargets = []
self.geotargets_names = []
target = self.float(line.split()[-1])
name = line.strip()[25:].split()[0]
self.geotargets.append(target)
self.geotargets_names.append(name)
# This is probably the first place where atomic symmetry labels are printed,
# somewhere afer the SYMGRP point group information section. We need to know
# which atom is in which symmetry, since this influences how some things are
# print later on. We can also get some generic attributes along the way.
#
# Isotopic Masses
# ---------------
#
# C _1 12.000000
# C _2 12.000000
# C _1 12.000000
# C _2 12.000000
# ...
#
# Note that when there is no symmetry there are only two columns here.
#
# It is also a good idea to keep in mind that DALTON, with symmetry on, operates
# in a specific point group, so symmetry atoms have no internal representation.
# Therefore only atoms marked as "_1" or "#1" in other places are actually
# represented in the model. The symmetry atoms (higher symmetry indices) are
# generated on the fly when writing the output. We will save the symmetry indices
# here for later use.
#
# Additional note: the symmetry labels are printed only for atoms that have
# symmetry images... so assume "_1" if a label is missing. For example, there will
# be no label for atoms on an axes, such as the oxygen in water in C2v:
#
# O 15.994915
# H _1 1.007825
# H _2 1.007825
#
if line.strip() == "Isotopic Masses":
self.skip_lines(inputfile, ['d', 'b'])
# Since some symmetry labels may be missing, read in all lines first.
lines = []
line = next(inputfile)
while line.strip():
lines.append(line)
line = next(inputfile)
# Split lines into columsn and dd any missing symmetry labels, if needed.
lines = [l.split() for l in lines]
if any([len(l) == 3 for l in lines]):
for il, l in enumerate(lines):
if len(l) == 2:
lines[il] = [l[0], "_1", l[1]]
atomnos = []
symmetry_atoms = []
atommasses = []
for cols in lines:
cols0 = ''.join([i for i in cols[0] if not i.isdigit()]) #remove numbers
atomnos.append(self.table.number[cols0])
if len(cols) == 3:
symmetry_atoms.append(int(cols[1][1]))
atommasses.append(float(cols[2]))
else:
atommasses.append(float(cols[1]))
self.set_attribute('atomnos', atomnos)
self.set_attribute('atommasses', atommasses)
self.set_attribute('natom', len(atomnos))
self.set_attribute('natom', len(atommasses))
# Save this for later if there were any labels.
self.symmetry_atoms = symmetry_atoms or None
# This section is close to the beginning of the file, and can be used
# to parse natom, nbasis and atomnos. We also construct atombasis here,
# although that is symmetry-dependent (see inline comments). Note that
# DALTON operates on the idea of atom type, which are not necessarily
# unique element-wise.
#
# Atoms and basis sets
# --------------------
#
# Number of atom types : 6
# Total number of atoms: 20
#
# Basis set used is "STO-3G" from the basis set library.
#
# label atoms charge prim cont basis
# ----------------------------------------------------------------------
# C 6 6.0000 15 5 [6s3p|2s1p]
# H 4 1.0000 3 1 [3s|1s]
# C 2 6.0000 15 5 [6s3p|2s1p]
# H 2 1.0000 3 1 [3s|1s]
# C 2 6.0000 15 5 [6s3p|2s1p]
# H 4 1.0000 3 1 [3s|1s]
# ----------------------------------------------------------------------
# total: 20 70.0000 180 60
# ----------------------------------------------------------------------
#
# Threshold for neglecting AO integrals: 1.00D-12
#
if line.strip() == "Atoms and basis sets":
self.skip_lines(inputfile, ['d', 'b'])
line = next(inputfile)
assert "Number of atom types" in line
self.ntypes = int(line.split()[-1])
line = next(inputfile)
assert "Total number of atoms:" in line
self.set_attribute("natom", int(line.split()[-1]))
# When using the INTGRL keyword and not pulling from the
# basis set library, the "Basis set used" line doesn't
# appear.
if not self.basislibrary:
self.skip_line(inputfile, 'b')
else:
#self.skip_lines(inputfile, ['b', 'basisname', 'b'])
line = next(inputfile)
line = next(inputfile)
self.metadata["basis_set"] = line.split()[4].strip('\"')
line = next(inputfile)
line = next(inputfile)
cols = line.split()
# Detecting which columns things are in will be somewhat more robust
# to formatting changes in the future.
iatoms = cols.index('atoms')
icharge = cols.index('charge')
icont = cols.index('cont')
self.skip_line(inputfile, 'dashes')
atomnos = []
atombasis = []
nbasis = 0
for itype in range(self.ntypes):
line = next(inputfile)
cols = line.split()
atoms = int(cols[iatoms])
charge = float(cols[icharge])
assert int(charge) == charge
charge = int(charge)
cont = int(cols[icont])
for at in range(atoms):
atomnos.append(charge)
# If symmetry atoms are present, these will have basis functions
# printed immediately after the one unique atom, so for all
# practical purposes cclib can assume the ordering in atombasis
# follows this out-of order scheme to match the output.
if self.symmetry_atoms:
# So we extend atombasis only for the unique atoms (with a
# symmetry index of 1), interleaving the basis functions
# for this atoms with basis functions for all symmetry atoms.
if self.symmetry_atoms[at] == 1:
nsyms = 1
while (at + nsyms < self.natom) and self.symmetry_atoms[at + nsyms] == nsyms + 1:
nsyms += 1
for isym in range(nsyms):
istart = nbasis + isym
iend = nbasis + cont*nsyms + isym
atombasis.append(list(range(istart, iend, nsyms)))
nbasis += cont*nsyms
else:
atombasis.append(list(range(nbasis, nbasis + cont)))
nbasis += cont
self.set_attribute('atomnos', atomnos)
self.set_attribute('atombasis', atombasis)
self.set_attribute('nbasis', nbasis)
self.skip_line(inputfile, 'dashes')
line = next(inputfile)
self.set_attribute('natom', int(line.split()[iatoms]))
self.set_attribute('nbasis', int(line.split()[icont]))
self.skip_line(inputfile, 'dashes')
# The Gaussian exponents and contraction coefficients are printed for each primitive
# and then the contraction information is printed separately (see below) Both segmented
# and general contractions are used, but we can parse them the same way since zeros are
# inserted for primitives that are not used. However, no atom index is printed here
# so we don't really know when a new atom is started without using information
# from other section (we should already have atombasis parsed at this point).
#
# Orbital exponents and contraction coefficients
# ----------------------------------------------
#
#
# C #1 1s 1 71.616837 0.1543 0.0000
# seg. cont. 2 13.045096 0.5353 0.0000
# 3 3.530512 0.4446 0.0000
# 4 2.941249 0.0000 -0.1000
# ...
#
# Here is a corresponding fragment for general contractions:
#
# C 1s 1 33980.000000 0.0001 -0.0000 0.0000 0.0000 0.0000
# 0.0000 0.0000 0.0000 0.0000
# gen. cont. 2 5089.000000 0.0007 -0.0002 0.0000 0.0000 0.0000
# 0.0000 0.0000 0.0000 0.0000
# 3 1157.000000 0.0037 -0.0008 0.0000 0.0000 0.0000
# 0.0000 0.0000 0.0000 0.0000
# 4 326.600000 0.0154 -0.0033 0.0000 0.0000 0.0000
# ...
#
if line.strip() == "Orbital exponents and contraction coefficients":
self.skip_lines(inputfile, ['d', 'b', 'b'])
# Here we simply want to save the numbers defining each primitive for later use,
# where the first number is the exponent, and the rest are coefficients which
# should be zero if the primitive is not used in a contraction. This list is
# symmetry agnostic, although primitives/contractions are not generally.
self.primitives = []
prims = []
line = next(inputfile)
while line.strip():
# Each contraction/section is separated by a blank line, and at the very
# end there is an extra blank line.
while line.strip():
# For generalized contraction it is typical to see the coefficients wrapped
# to new lines, so we must collect them until we are sure a primitive starts.
if line[:30].strip():
if prims:
self.primitives.append(prims)
prims = []
prims += [float(x) for x in line[20:].split()]
line = next(inputfile)
line = next(inputfile)
# At the end we have the final primitive to save.
self.primitives.append(prims)
# This is the corresponding section to the primitive definitions parsed above, so we
# assume those numbers are available in the variable 'primitives'. Here we read in the
# indicies of primitives, which we use to construct gbasis.
#
# Contracted Orbitals
# -------------------
#
# 1 C 1s 1 2 3 4 5 6 7 8 9 10 11 12
# 2 C 1s 1 2 3 4 5 6 7 8 9 10 11 12
# 3 C 1s 10
# 4 C 1s 11
# ...
#
# Here is an fragment with symmetry labels:
#
# ...
# 1 C #1 1s 1 2 3
# 2 C #2 1s 7 8 9
# 3 C #1 1s 4 5 6
# ...
#
if line.strip() == "Contracted Orbitals":
self.skip_lines(inputfile, ['d', 'b'])
# This is the reverse of atombasis, so that we can easily map from a basis functions
# to the corresponding atom for use in the loop below.
basisatoms = [None for i in range(self.nbasis)]
for iatom in range(self.natom):
for ibasis in self.atombasis[iatom]:
basisatoms[ibasis] = iatom
# Since contractions are not generally given in order (when there is symmetry),
# start with an empty list for gbasis.
gbasis = [[] for i in range(self.natom)]
# This will hold the number of contractions already printed for each orbital,
# counting symmetry orbitals separately.
orbitalcount = {}
for ibasis in range(self.nbasis):
line = next(inputfile)
cols = line.split()
# The first columns is always the basis function index, which we can assert.
assert int(cols[0]) == ibasis + 1
# The number of columns is differnet when symmetry is used. If there are further
# complications, it may be necessary to use exact slicing, since the formatting
# of this section seems to be fixed (although columns can be missing). Notice how
# We subtract one from the primitive indices here already to match cclib's
# way of counting from zero in atombasis.
if '#' in line:
sym = cols[2]
orbital = cols[3]
prims = [int(i) - 1 for i in cols[4:]]
else:
sym = None
orbital = cols[2]
prims = [int(i) - 1 for i in cols[3:]]
shell = orbital[0]
subshell = orbital[1].upper()
iatom = basisatoms[ibasis]
# We want to count the number of contractiong already parsed for each orbital,
# but need to make sure to differentiate between atoms and symmetry atoms.
orblabel = str(iatom) + '.' + orbital + (sym or "")
orbitalcount[orblabel] = orbitalcount.get(orblabel, 0) + 1
# Here construct the actual primitives for gbasis, which should be a list
# of 2-tuples containing an exponent an coefficient. Note how we are indexing
# self.primitives from zero although the printed numbering starts from one.
primitives = []
for ip in prims:
p = self.primitives[ip]
exponent = p[0]
coefficient = p[orbitalcount[orblabel]]
primitives.append((exponent, coefficient))
contraction = (subshell, primitives)
if contraction not in gbasis[iatom]:
gbasis[iatom].append(contraction)
self.skip_line(inputfile, 'blank')
self.set_attribute('gbasis', gbasis)
# Since DALTON sometimes uses symmetry labels (Ag, Au, etc.) and sometimes
# just the symmetry group index, we need to parse and keep a mapping between
# these two for later use.
#
# Symmetry Orbitals
# -----------------
#
# Number of orbitals in each symmetry: 25 5 25 5
#
#
# Symmetry Ag ( 1)
#
# 1 C 1s 1 + 2
# 2 C 1s 3 + 4
# ...
#
if line.strip() == "Symmetry Orbitals":
self.skip_lines(inputfile, ['d', 'b'])
line = inputfile.next()
self.symcounts = [int(c) for c in line.split(':')[1].split()]
self.symlabels = []
for sc in self.symcounts:
self.skip_lines(inputfile, ['b', 'b'])
# If the number of orbitals for a symmetry is zero, the printout
# is different (see MP2 unittest logfile for an example).
line = inputfile.next()
if sc == 0:
assert "No orbitals in symmetry" in line
else:
assert line.split()[0] == "Symmetry"
self.symlabels.append(line.split()[1])
self.skip_line(inputfile, 'blank')
for i in range(sc):
orbital = inputfile.next()
if "Starting in Wave Function Section (SIRIUS)" in line:
self.section = "SIRIUS"
# Orbital specifications
# ======================
# Abelian symmetry species All | 1 2 3 4
# | Ag Au Bu Bg
# --- | --- --- --- ---
# Total number of orbitals 60 | 25 5 25 5
# Number of basis functions 60 | 25 5 25 5
#
# ** Automatic occupation of RKS orbitals **
#
# -- Initial occupation of symmetries is determined from extended Huckel guess.
# -- Initial occupation of symmetries is :
# @ Occupied SCF orbitals 35 | 15 2 15 3
#
# Maximum number of Fock iterations 0
# Maximum number of DIIS iterations 60
# Maximum number of QC-SCF iterations 60
# Threshold for SCF convergence 1.00D-05
# This is a DFT calculation of type: B3LYP
# ...
#
if "Total number of orbitals" in line:
# DALTON 2015 adds a @ in front of number of orbitals
chomp = line.split()
index = 4
if "@" in chomp:
index = 5
self.set_attribute("nbasis", int(chomp[index]))
self.nmo_per_symmetry = list(map(int, chomp[index+2:]))
assert self.nbasis == sum(self.nmo_per_symmetry)
if "Threshold for SCF convergence" in line:
if not hasattr(self, "scftargets"):
self.scftargets = []
scftarget = self.float(line.split()[-1])
self.scftargets.append([scftarget])
# Wave function specification
# ============================
# @ Wave function type >>> KS-DFT <<<
# @ Number of closed shell electrons 70
# @ Number of electrons in active shells 0
# @ Total charge of the molecule 0
#
# @ Spin multiplicity and 2 M_S 1 0
# @ Total number of symmetries 4 (point group: C2h)
# @ Reference state symmetry 1 (irrep name : Ag )
#
# This is a DFT calculation of type: B3LYP
# ...
#
if line.strip() == "Wave function specification":
self.skip_line(inputfile, 'e')
line = next(inputfile)
# Must be a coupled cluster calculation.
if line.strip() == '':
self.skip_lines(inputfile, ['b', 'Coupled Cluster', 'b'])
else:
assert "wave function" in line.lower()
line = next(inputfile)
assert "Number of closed shell electrons" in line
self.paired_electrons = int(line.split()[-1])
line = next(inputfile)
assert "Number of electrons in active shells" in line
self.unpaired_electrons = int(line.split()[-1])
line = next(inputfile)
assert "Total charge of the molecule" in line
self.set_attribute("charge", int(line.split()[-1]))
self.skip_line(inputfile, 'b')
line = next(inputfile)
assert "Spin multiplicity and 2 M_S" in line
self.set_attribute("mult", int(line.split()[-2]))
# Dalton only has ROHF, no UHF
if self.mult != 1:
self.metadata["unrestricted"] = True
if not hasattr(self, 'homos'):
self.set_attribute('homos', [(self.paired_electrons // 2) - 1])
if self.unpaired_electrons > 0:
self.homos.append(self.homos[0])
self.homos[0] += self.unpaired_electrons
# *********************************************
# ***** DIIS optimization of Hartree-Fock *****
# *********************************************
#
# C1-DIIS algorithm; max error vectors = 8
#
# Automatic occupation of symmetries with 70 electrons.
#
# Iter Total energy Error norm Delta(E) SCF occupation
# -----------------------------------------------------------------------------
# K-S energy, electrons, error : -46.547567739269 69.9999799123 -2.01D-05
# @ 1 -381.645762476 4.00D+00 -3.82D+02 15 2 15 3
# Virial theorem: -V/T = 2.008993
# @ MULPOP C _1 0.15; C _2 0.15; C _1 0.12; C _2 0.12; C _1 0.11; C _2 0.11; H _1 -0.15; H _2 -0.15; H _1 -0.14; H _2 -0.14;
# @ C _1 0.23; C _2 0.23; H _1 -0.15; H _2 -0.15; C _1 0.08; C _2 0.08; H _1 -0.12; H _2 -0.12; H _1 -0.13; H _2 -0.13;
# -----------------------------------------------------------------------------
# K-S energy, electrons, error : -46.647668038900 69.9999810430 -1.90D-05
# @ 2 -381.949410128 1.05D+00 -3.04D-01 15 2 15 3
# Virial theorem: -V/T = 2.013393
# ...
#
# With and without symmetry, the "Total energy" line is shifted a little.
if self.section == "SIRIUS" and "Iter" in line and "Total energy" in line:
iteration = 0
converged = False
values = []
if not hasattr(self, "scfvalues"):
self.scfvalues = []
while not converged:
try:
line = next(inputfile)
except StopIteration:
self.logger.warning('File terminated before end of last SCF!')
break
# each iteration is bracketed by "-------------"
if "-------------------" in line:
iteration += 1
continue
# the first hit of @ n where n is the current iteration
strcompare = "@{0:>3d}".format(iteration)
if strcompare in line:
temp = line.split()
error_norm = self.float(temp[3])
values.append([error_norm])
if line[0] == "@" and "converged in" in line:
converged = True
# It seems DALTON does change the SCF convergence criteria during a
# geometry optimization, but also does not print them. So, assume they
# are unchanged and copy the initial values after the first step. However,
# it would be good to check up on this - perhaps it is possible to print.
self.scfvalues.append(values)
if len(self.scfvalues) > 1:
self.scftargets.append(self.scftargets[-1])
# DALTON organizes the energies by symmetry, so we need to parse first,
# and then sort the energies (and labels) before we store them.
#
# The formatting varies depending on RHF/DFT and/or version. Here is
# an example from a DFT job:
#
# *** SCF orbital energy analysis ***
#
# Only the five lowest virtual orbital energies printed in each symmetry.
#
# Number of electrons : 70
# Orbital occupations : 15 2 15 3
#
# Sym Kohn-Sham orbital energies
#
# 1 Ag -10.01616533 -10.00394288 -10.00288640 -10.00209612 -9.98818062
# -0.80583154 -0.71422407 -0.58487249 -0.55551093 -0.50630125
# ...
#
# Here is an example from an RHF job that only has symmetry group indices:
#
# *** SCF orbital energy analysis ***
#
# Only the five lowest virtual orbital energies printed in each symmetry.
#
# Number of electrons : 70
# Orbital occupations : 15 2 15 3
#
# Sym Hartree-Fock orbital energies
#
# 1 -11.04052518 -11.03158921 -11.02882211 -11.02858563 -11.01747921
# -1.09029777 -0.97492511 -0.79988247 -0.76282547 -0.69677619
# ...
#
if self.section == "SIRIUS" and "*** SCF orbital energy analysis ***" in line:
# to get ALL orbital energies, the .PRINTLEVELS keyword needs
# to be at least 0,10 (up from 0,5). I know, obvious, right?
# this, however, will conflict with the scfvalues output that
# changes into some weird form of DIIS debug output.
mosyms = []
moenergies = []
self.skip_line(inputfile, 'blank')
line = next(inputfile)
# There is some extra text between the section header and
# the number of electrons for open-shell calculations.
while "Number of electrons" not in line:
line = next(inputfile)
nelectrons = int(line.split()[-1])
line = next(inputfile)
occupations = [int(o) for o in line.split()[3:]]
nsym = len(occupations)
self.skip_lines(inputfile, ['b', 'header', 'b'])
# now parse nsym symmetries
for isym in range(nsym):
# For unoccupied symmetries, nothing is printed here.
if occupations[isym] == 0:
continue
# When there are exactly five energies printed (on just one line), it seems
# an extra blank line is printed after a block.
line = next(inputfile)
if not line.strip():
line = next(inputfile)
cols = line.split()
# The first line has the orbital symmetry information, but sometimes
# it's the label and sometimes it's the index. There are always five
# energies per line, though, so we can deduce if we have the labels or
# not just the index. In the latter case, we depend on the labels
# being read earlier into the list `symlabels`. Finally, if no symlabels
# were read that implies there is only one symmetry, namely Ag.
if 'A' in cols[1] or 'B' in cols[1]:
sym = self.normalisesym(cols[1])
energies = [float(t) for t in cols[2:]]
else:
if hasattr(self, 'symlabels'):
sym = self.normalisesym(self.symlabels[int(cols[0]) - 1])
else:
assert cols[0] == '1'
sym = "Ag"
energies = [float(t) for t in cols[1:]]
while len(energies) > 0:
moenergies.extend(energies)
mosyms.extend(len(energies)*[sym])
line = next(inputfile)
energies = [float(col) for col in line.split()]
# now sort the data about energies and symmetries. see the following post for the magic
# http://stackoverflow.com/questions/19339/a-transpose-unzip-function-in-python-inverse-of-zip
sdata = sorted(zip(moenergies, mosyms), key=lambda x: x[0])
moenergies, mosyms = zip(*sdata)
self.moenergies = [[]]
self.moenergies[0] = [utils.convertor(moenergy, 'hartree', 'eV') for moenergy in moenergies]
self.mosyms = [[]]
self.mosyms[0] = mosyms
if not hasattr(self, "nmo"):
self.nmo = self.nbasis
if len(self.moenergies[0]) != self.nmo:
self.set_attribute('nmo', len(self.moenergies[0]))
# .-----------------------------------.
# | >>> Final results from SIRIUS <<< |
# `-----------------------------------'
#
#
# @ Spin multiplicity: 1
# @ Spatial symmetry: 1 ( irrep Ag in C2h )
# @ Total charge of molecule: 0
#
# @ Final DFT energy: -382.050716652387
# @ Nuclear repulsion: 445.936979976608
# @ Electronic energy: -827.987696628995
#
# @ Final gradient norm: 0.000003746706
# ...
#
if "Final HF energy" in line and not (hasattr(self, "mpenergies") or hasattr(self, "ccenergies")):
self.metadata["methods"].append("HF")
if "Final DFT energy" in line:
self.metadata["methods"].append("DFT")
if "This is a DFT calculation of type" in line:
self.metadata["functional"] = line.split()[-1]
if "Final DFT energy" in line or "Final HF energy" in line:
if not hasattr(self, "scfenergies"):
self.scfenergies = []
temp = line.split()
self.scfenergies.append(utils.convertor(float(temp[-1]), "hartree", "eV"))
if "@ = MP2 second order energy" in line:
self.metadata["methods"].append("MP2")
energ = utils.convertor(float(line.split()[-1]), 'hartree', 'eV')
if not hasattr(self, "mpenergies"):
self.mpenergies = []
self.mpenergies.append([])
self.mpenergies[-1].append(energ)
if "Total CCSD energy:" in line:
self.metadata["methods"].append("CCSD")
energ = utils.convertor(float(line.split()[-1]), 'hartree', 'eV')
if not hasattr(self, "ccenergies"):
self.ccenergies = []
self.ccenergies.append(energ)
if "Total energy CCSD(T)" in line:
self.metadata["methods"].append("CCSD(T)")
energ = utils.convertor(float(line.split()[-1]), 'hartree', 'eV')
if not hasattr(self, "ccenergies"):
self.ccenergies = []
self.ccenergies.append(energ)
# The molecular geometry requires the use of .RUN PROPERTIES in the input.
# Note that the second column is not the nuclear charge, but the atom type
# index used internally by DALTON.
#
# Molecular geometry (au)
# -----------------------
#
# C _1 1.3498778652 2.3494125195 0.0000000000
# C _2 -1.3498778652 -2.3494125195 0.0000000000
# C _1 2.6543517307 0.0000000000 0.0000000000
# ...
#
if "Molecular geometry (au)" in line:
if not hasattr(self, "atomcoords"):
self.atomcoords = []
if self.firststdorient:
self.firststdorient = False
self.skip_lines(inputfile, ['d', 'b'])
lines = [next(inputfile) for i in range(self.natom)]
atomcoords = self.parse_geometry(lines)
self.atomcoords.append(atomcoords)
if "Optimization Control Center" in line:
self.section = "OPT"
assert set(next(inputfile).strip()) == set(":")
# During geometry optimizations the geometry is printed in the section
# that is titles "Optimization Control Center". Note that after an optimizations
# finishes, DALTON normally runs another "static property section (ABACUS)",
# so the final geometry will be repeated in atomcoords.
#
# Next geometry (au)
# ------------------
#
# C _1 1.3203201560 2.3174808341 0.0000000000
# C _2 -1.3203201560 -2.3174808341 0.0000000000
# ...
if self.section == "OPT" and line.strip() == "Next geometry (au)":
self.skip_lines(inputfile, ['d', 'b'])
lines = [next(inputfile) for i in range(self.natom)]
coords = self.parse_geometry(lines)
self.atomcoords.append(coords)
# This section contains data for optdone and geovalues, although we could use
# it to double check some atttributes that were parsed before.
#
# Optimization information
# ------------------------
#
# Iteration number : 4
# End of optimization : T
# Energy at this geometry is : -379.777956
# Energy change from last geom. : -0.000000
# Predicted change : -0.000000
# Ratio, actual/predicted change : 0.952994
# Norm of gradient : 0.000058
# Norm of step : 0.000643
# Updated trust radius : 0.714097
# Total Hessian index : 0
#
if self.section == "OPT" and line.strip() == "Optimization information":
self.skip_lines(inputfile, ['d', 'b'])
line = next(inputfile)
assert 'Iteration number' in line
iteration = int(line.split()[-1])
line = next(inputfile)
assert 'End of optimization' in line
if not hasattr(self, 'optdone'):
self.optdone = []
self.optdone.append(line.split()[-1] == 'T')
# We need a way to map between lines here and the targets stated at the
# beginning of the file in 'Chosen parameters for *OPTIMI (see above),
# and this dictionary facilitates that. The keys are target names parsed
# in that initial section after input processing, and the values are
# substrings that should appear in the lines in this section. Make an
# exception for the energy at iteration zero where there is no gradient,
# and take the total energy for geovalues.
targets_labels = {
'gradient': 'Norm of gradient',
'energy': 'Energy change from last',
'step': 'Norm of step',
}
values = [numpy.nan] * len(self.geotargets)
while line.strip():
if iteration == 0 and "Energy at this geometry" in line:
index = self.geotargets_names.index('energy')
values[index] = self.float(line.split()[-1])
for tgt, lbl in targets_labels.items():
if lbl in line and tgt in self.geotargets_names:
index = self.geotargets_names.index(tgt)
values[index] = self.float(line.split()[-1])
line = next(inputfile)
# If we're missing something above, throw away the partial geovalues since
# we don't want artificial NaNs getting into cclib. Instead, fix the dictionary
# to make things work.
if not numpy.nan in values:
if not hasattr(self, 'geovalues'):
self.geovalues = []
self.geovalues.append(values)
# -------------------------------------------------
# extract the center of mass line
if "Center-of-mass coordinates (a.u.):" in line:
temp = line.split()
reference = [utils.convertor(float(temp[i]), "bohr", "Angstrom") for i in [3, 4, 5]]
if not hasattr(self, 'moments'):
self.moments = [reference]
# -------------------------------------------------
# Extract the dipole moment
if "Dipole moment components" in line:
dipole = numpy.zeros(3)
line = next(inputfile)
line = next(inputfile)
line = next(inputfile)
if not "zero by symmetry" in line:
line = next(inputfile)
line = next(inputfile)
temp = line.split()
for i in range(3):
dipole[i] = float(temp[2]) # store the Debye value
if hasattr(self, 'moments'):
self.moments.append(dipole)
## 'vibfreqs', 'vibirs', and 'vibsyms' appear in ABACUS.
# Vibrational Frequencies and IR Intensities
# ------------------------------------------
#
# mode irrep frequency IR intensity
# ============================================================
# cm-1 hartrees km/mol (D/A)**2/amu
# ------------------------------------------------------------
# 1 A 3546.72 0.016160 0.000 0.0000
# 2 A 3546.67 0.016160 0.024 0.0006
# ...
if "Vibrational Frequencies and IR Intensities" in line:
self.skip_lines(inputfile, ['dashes', 'blank'])
line = next(inputfile)
assert line.strip() == "mode irrep frequency IR intensity"
self.skip_line(inputfile, 'equals')
line = next(inputfile)
assert line.strip() == "cm-1 hartrees km/mol (D/A)**2/amu"
self.skip_line(inputfile, 'dashes')
line = next(inputfile)
# The normal modes are in order of decreasing IR
# frequency, so they can't be added directly to
# attributes; they must be grouped together first, sorted
# in order of increasing frequency, then added to their
# respective attributes.
vibdata = []
while line.strip():
sline = line.split()
vibsym = sline[1]
vibfreq = float(sline[2])
vibir = float(sline[4])
vibdata.append((vibfreq, vibir, vibsym))
line = next(inputfile)
vibdata.sort(key=lambda normalmode: normalmode[0])
self.vibfreqs = [normalmode[0] for normalmode in vibdata]
self.vibirs = [normalmode[1] for normalmode in vibdata]
self.vibsyms = [normalmode[2] for normalmode in vibdata]
# Now extract the normal mode displacements.
self.skip_lines(inputfile, ['b', 'b'])
line = next(inputfile)
assert line.strip() == "Normal Coordinates (bohrs*amu**(1/2)):"
# Normal Coordinates (bohrs*amu**(1/2)):
# --------------------------------------
#
#
# 1 3547 2 3547 3 3474 4 3471 5 3451
# ----------------------------------------------------------------------
#
# C x -0.000319 -0.000314 0.002038 0.000003 -0.001599
# C y -0.000158 -0.000150 -0.001446 0.003719 -0.002576
# C z 0.000000 -0.000000 -0.000000 0.000000 -0.000000
#
# C x 0.000319 -0.000315 -0.002038 0.000003 0.001600
# C y 0.000157 -0.000150 0.001448 0.003717 0.002577
# ...
self.skip_line(inputfile, 'd')
line = next(inputfile)
vibdisps = numpy.empty(shape=(len(self.vibirs), self.natom, 3))
ndisps = 0
while ndisps < len(self.vibirs):
# Skip two blank lines.
line = next(inputfile)
line = next(inputfile)
# Use the header with the normal mode indices and
# frequencies to update where we are.
ndisps_block = (len(line.split()) // 2)
mode_min, mode_max = ndisps, ndisps + ndisps_block
# Skip a line of dashes and a blank line.
line = next(inputfile)
line = next(inputfile)
for w in range(self.natom):
for coord in range(3):
line = next(inputfile)
vibdisps[mode_min:mode_max, w, coord] = [float(i) for i in line.split()[2:]]
# Skip a blank line.
line = next(inputfile)
ndisps += ndisps_block
# The vibrational displacements are in the wrong order;
# reverse them.
self.vibdisps = vibdisps[::-1, :, :]
## 'vibramans'
# Raman related properties for freq. 0.000000 au = Infinity nm
# ---------------------------------------------------------------
#
# Mode Freq. Alpha**2 Beta(a)**2 Pol.Int. Depol.Int. Dep. Ratio
#
# 1 3546.72 0.379364 16.900089 84.671721 50.700268 0.598786
# 2 3546.67 0.000000 0.000000 0.000000 0.000000 0.599550
if "Raman related properties for freq." in line:
self.skip_lines(inputfile, ['d', 'b'])
line = next(inputfile)
assert line[1:76] == "Mode Freq. Alpha**2 Beta(a)**2 Pol.Int. Depol.Int. Dep. Ratio"
self.skip_line(inputfile, 'b')
line = next(inputfile)
vibramans = []
# The Raman intensities appear under the "Pol.Int."
# (polarization intensity) column.
for m in range(len(self.vibfreqs)):
vibramans.append(float(line.split()[4]))
line = next(inputfile)
# All vibrational properties in DALTON appear in reverse
# order.
self.vibramans = vibramans[::-1]
# Static polarizability from **PROPERTIES/.POLARI.
if line.strip() == "Static polarizabilities (au)":
if not hasattr(self, 'polarizabilities'):
self.polarizabilities = []
polarizability = []
self.skip_lines(inputfile, ['d', 'b', 'directions', 'b'])
for _ in range(3):
line = next(inputfile)
polarizability.append(line.split()[1:])
self.polarizabilities.append(numpy.array(polarizability))
# Static and dynamic polarizability from **PROPERTIES/.ALPHA/*ABALNR.
if "Polarizability tensor for frequency" in line:
if not hasattr(self, 'polarizabilities'):
self.polarizabilities = []
polarizability = []
self.skip_lines(inputfile, ['d', 'directions', 'b'])
for _ in range(3):
line = next(inputfile)
polarizability.append(line.split()[1:])
self.polarizabilities.append(numpy.array(polarizability))
# Static and dynamic polarizability from **RESPONSE/*LINEAR.
# This section is *very* general and will need to be expanded later.
# For now, only form the matrix from dipole (length gauge) values.
if "@ FREQUENCY INDEPENDENT SECOND ORDER PROPERTIES" in line:
coord_to_idx = {'X': 0, 'Y': 1, 'Z': 2}
self.skip_line(inputfile, 'b')
line = next(inputfile)
polarizability_diplen = numpy.empty(shape=(3, 3))
while "Time used in linear response calculation is" not in line:
tokens = line.split()
if line.count("DIPLEN") == 2:
assert len(tokens) == 8
if not hasattr(self, 'polarizabilities'):
self.polarizabilities = []
i, j = coord_to_idx[tokens[2][0]], coord_to_idx[tokens[4][0]]
polarizability_diplen[i, j] = self.float(tokens[7])
line = next(inputfile)
polarizability_diplen = utils.symmetrize(polarizability_diplen, use_triangle='upper')
if hasattr(self, 'polarizabilities'):
self.polarizabilities.append(polarizability_diplen)
# Electronic excitations: single residues of the linear
# response equations.
if "Linear Response single residue calculation" in line:
etsyms = []
etenergies = []
# etoscs = []
etsecs = []
symmap = {"T": "Triplet", "F": "Singlet"}
while "End of Dynamic Property Section (RESPONS)" not in line:
line = next(inputfile)
if "Operator symmetry" in line:
do_triplet = line[-2]
if "@ Excited state no:" in line:
etsym = line.split()[9] # -2
etsyms.append(symmap[do_triplet] + "-" + etsym)
self.skip_lines(inputfile, ['d', 'b', 'Excitation energy in a.u.'])
line = next(inputfile)
etenergy = float(line.split()[1])
etenergies.append(etenergy)
while "The dominant contributions" not in line:
line = next(inputfile)
self.skip_line(inputfile, 'b')
line = next(inputfile)
# [0] is the starting (occupied) MO
# [1] is the ending (unoccupied) MO
# [2] and [3] are the excitation/deexcitation coefficients
# [4] is the orbital overlap
# [5] is the ...
# [6] is the ...
# [7] is the ...
assert "I A K_IA K_AI <|I|*|A|> <I^2*A^2> Weight Contrib" in line
self.skip_line(inputfile, 'b')
line = next(inputfile)
sec = []
while line.strip():
chomp = line.split()
startidx = int(chomp[0]) - 1
endidx = int(chomp[1]) - 1
contrib = float(chomp[2])
# Since DALTON is restricted open-shell only,
# there is not distinction between alpha and
# beta spin.
sec.append([(startidx, 0), (endidx, 0), contrib])
line = next(inputfile)
etsecs.append(sec)
self.set_attribute('etsyms', etsyms)
self.set_attribute('etenergies', etenergies)
# self.set_attribute('etoscs', etoscs)
self.set_attribute('etsecs', etsecs)
# TODO:
# aonames
# aooverlaps
# atomcharges
# atomspins
# coreelectrons
# enthalpy
# entropy
# etoscs
# etrotats
# freeenergy
# grads
# hessian
# mocoeffs
# nocoeffs
# nooccnos
# scancoords
# scanenergies
# scannames
# scanparm
# temperature
# vibanharms
# N/A:
# fonames
# fooverlaps
# fragnames
# frags
if __name__ == "__main__":
import doctest, daltonparser, sys
if len(sys.argv) == 1:
doctest.testmod(daltonparser, verbose=False)
if len(sys.argv) >= 2:
parser = daltonparser.DALTON(sys.argv[1])
data = parser.parse()
if len(sys.argv) > 2:
for i in range(len(sys.argv[2:])):
if hasattr(data, sys.argv[2 + i]):
print(getattr(data, sys.argv[2 + i]))
| Schamnad/cclib | src/cclib/parser/daltonparser.py | Python | bsd-3-clause | 52,550 |
"""Precompute the polynomials for the asymptotic expansion of the
generalized exponential integral.
Sources
-------
[1] NIST, Digital Library of Mathematical Functions,
https://dlmf.nist.gov/8.20#ii
"""
import os
try:
import sympy
from sympy import Poly
x = sympy.symbols('x')
except ImportError:
pass
def generate_A(K):
A = [Poly(1, x)]
for k in range(K):
A.append(Poly(1 - 2*k*x, x)*A[k] + Poly(x*(x + 1))*A[k].diff())
return A
WARNING = """\
/* This file was automatically generated by _precompute/expn_asy.py.
* Do not edit it manually!
*/
"""
def main():
print(__doc__)
fn = os.path.join('..', 'cephes', 'expn.h')
K = 12
A = generate_A(K)
with open(fn + '.new', 'w') as f:
f.write(WARNING)
f.write("#define nA {}\n".format(len(A)))
for k, Ak in enumerate(A):
tmp = ', '.join([str(x.evalf(18)) for x in Ak.coeffs()])
f.write("static const double A{}[] = {{{}}};\n".format(k, tmp))
tmp = ", ".join(["A{}".format(k) for k in range(K + 1)])
f.write("static const double *A[] = {{{}}};\n".format(tmp))
tmp = ", ".join([str(Ak.degree()) for Ak in A])
f.write("static const int Adegs[] = {{{}}};\n".format(tmp))
os.rename(fn + '.new', fn)
if __name__ == "__main__":
main()
| scipy/scipy | scipy/special/_precompute/expn_asy.py | Python | bsd-3-clause | 1,333 |
"""This tutorial introduces the LeNet5 neural network architecture
using Theano. LeNet5 is a convolutional neural network, good for
classifying images. This tutorial shows how to build the architecture,
and comes with all the hyper-parameters you need to reproduce the
paper's MNIST results.
This implementation simplifies the model in the following ways:
- LeNetConvPool doesn't implement location-specific gain and bias parameters
- LeNetConvPool doesn't implement pooling by average, it implements pooling
by max.
- Digit classification is implemented with a logistic regression rather than
an RBF network
- LeNet5 was not fully-connected convolutions at second layer
References:
- Y. LeCun, L. Bottou, Y. Bengio and P. Haffner:
Gradient-Based Learning Applied to Document
Recognition, Proceedings of the IEEE, 86(11):2278-2324, November 1998.
http://yann.lecun.com/exdb/publis/pdf/lecun-98.pdf
"""
import os
import sys
import timeit
import numpy
import theano
import theano.tensor as T
from theano.tensor.signal import downsample
from theano.tensor.nnet import conv
from logistic_sgd import LogisticRegression, load_data
from mlp import HiddenLayer
class LeNetConvPoolLayer(object):
"""Pool Layer of a convolutional network """
def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2)):
"""
Allocate a LeNetConvPoolLayer with shared variable internal parameters.
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dtensor4
:param input: symbolic image tensor, of shape image_shape
:type filter_shape: tuple or list of length 4
:param filter_shape: (number of filters, num input feature maps,
filter height, filter width)
:type image_shape: tuple or list of length 4
:param image_shape: (batch size, num input feature maps,
image height, image width)
:type poolsize: tuple or list of length 2
:param poolsize: the downsampling (pooling) factor (#rows, #cols)
"""
assert image_shape[1] == filter_shape[1]
self.input = input
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = numpy.prod(filter_shape[1:])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) /
numpy.prod(poolsize))
# initialize weights with random weights
W_bound = numpy.sqrt(6. / (fan_in + fan_out))
self.W = theano.shared(
numpy.asarray(
rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
dtype=theano.config.floatX
),
borrow=True
)
# the bias is a 1D tensor -- one bias per output feature map
b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX)
self.b = theano.shared(value=b_values, borrow=True)
# convolve input feature maps with filters
conv_out = conv.conv2d(
input=input,
filters=self.W,
filter_shape=filter_shape,
image_shape=image_shape
)
# downsample each feature map individually, using maxpooling
pooled_out = downsample.max_pool_2d(
input=conv_out,
ds=poolsize,
ignore_border=True
)
# add the bias term. Since the bias is a vector (1D array), we first
# reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
# thus be broadcasted across mini-batches and feature map
# width & height
self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
# store parameters of this layer
self.params = [self.W, self.b]
# keep track of model input
self.input = input
def evaluate_lenet5(learning_rate=0.1, n_epochs=200,
dataset='mnist.pkl.gz',
nkerns=[20, 50], batch_size=500):
""" Demonstrates lenet on MNIST dataset
:type learning_rate: float
:param learning_rate: learning rate used (factor for the stochastic
gradient)
:type n_epochs: int
:param n_epochs: maximal number of epochs to run the optimizer
:type dataset: string
:param dataset: path to the dataset used for training /testing (MNIST here)
:type nkerns: list of ints
:param nkerns: number of kernels on each layer
"""
rng = numpy.random.RandomState(23455)
datasets = load_data(dataset)
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0]
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
n_test_batches = test_set_x.get_value(borrow=True).shape[0]
n_train_batches /= batch_size
n_valid_batches /= batch_size
n_test_batches /= batch_size
print test_set_x.get_value()[0].shape
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
# start-snippet-1
x = T.matrix('x') # the data is presented as rasterized images
y = T.ivector('y') # the labels are presented as 1D vector of
# [int] labels
######################
# BUILD ACTUAL MODEL #
######################
print '... building the model'
# Reshape matrix of rasterized images of shape (batch_size, 28 * 28)
# to a 4D tensor, compatible with our LeNetConvPoolLayer
# (28, 28) is the size of MNIST images.
layer0_input = x.reshape((batch_size, 1, 28, 28))
# Construct the first convolutional pooling layer:
# filtering reduces the image size to (28-5+1 , 28-5+1) = (24, 24)
# maxpooling reduces this further to (24/2, 24/2) = (12, 12)
# 4D output tensor is thus of shape (batch_size, nkerns[0], 12, 12)
layer0 = LeNetConvPoolLayer(
rng,
input=layer0_input,
image_shape=(batch_size, 1, 28, 28),
filter_shape=(nkerns[0], 1, 5, 5),
poolsize=(2, 2)
)
# Construct the second convolutional pooling layer
# filtering reduces the image size to (12-5+1, 12-5+1) = (8, 8)
# maxpooling reduces this further to (8/2, 8/2) = (4, 4)
# 4D output tensor is thus of shape (batch_size, nkerns[1], 4, 4)
layer1 = LeNetConvPoolLayer(
rng,
input=layer0.output,
image_shape=(batch_size, nkerns[0], 12, 12),
filter_shape=(nkerns[1], nkerns[0], 5, 5),
poolsize=(2, 2)
)
# the HiddenLayer being fully-connected, it operates on 2D matrices of
# shape (batch_size, num_pixels) (i.e matrix of rasterized images).
# This will generate a matrix of shape (batch_size, nkerns[1] * 4 * 4),
# or (500, 50 * 4 * 4) = (500, 800) with the default values.
layer2_input = layer1.output.flatten(2)
# construct a fully-connected sigmoidal layer
layer2 = HiddenLayer(
rng,
input=layer2_input,
n_in=nkerns[1] * 4 * 4,
n_out=500,
activation=T.tanh
)
# classify the values of the fully-connected sigmoidal layer
layer3 = LogisticRegression(input=layer2.output, n_in=500, n_out=10)
# the cost we minimize during training is the NLL of the model
cost = layer3.negative_log_likelihood(y)
# create a function to compute the mistakes that are made by the model
test_model = theano.function(
[index],
layer3.errors(y),
givens={
x: test_set_x[index * batch_size: (index + 1) * batch_size],
y: test_set_y[index * batch_size: (index + 1) * batch_size]
}
)
validate_model = theano.function(
[index],
layer3.errors(y),
givens={
x: valid_set_x[index * batch_size: (index + 1) * batch_size],
y: valid_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# create a list of all model parameters to be fit by gradient descent
params = layer3.params + layer2.params + layer1.params + layer0.params
# create a list of gradients for all model parameters
grads = T.grad(cost, params)
# train_model is a function that updates the model parameters by
# SGD Since this model has many parameters, it would be tedious to
# manually create an update rule for each model parameter. We thus
# create the updates list by automatically looping over all
# (params[i], grads[i]) pairs.
updates = [
(param_i, param_i - learning_rate * grad_i)
for param_i, grad_i in zip(params, grads)
]
train_model = theano.function(
[index],
cost,
updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# end-snippet-1
###############
# TRAIN MODEL #
###############
print '... training'
# early-stopping parameters
patience = 10000 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
validation_frequency = min(n_train_batches, patience / 2)
# go through this many
# minibatche before checking the network
# on the validation set; in this case we
# check every epoch
best_validation_loss = numpy.inf
best_iter = 0
test_score = 0.
start_time = timeit.default_timer()
epoch = 0
done_looping = False
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in xrange(n_train_batches):
iter = (epoch - 1) * n_train_batches + minibatch_index
if iter % 100 == 0:
print 'training @ iter = ', iter
cost_ij = train_model(minibatch_index)
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i) for i
in xrange(n_valid_batches)]
this_validation_loss = numpy.mean(validation_losses)
print('epoch %i, minibatch %i/%i, validation error %f %%' %
(epoch, minibatch_index + 1, n_train_batches,
this_validation_loss * 100.))
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
if this_validation_loss < best_validation_loss * \
improvement_threshold:
patience = max(patience, iter * patience_increase)
# save best validation score and iteration number
best_validation_loss = this_validation_loss
best_iter = iter
# test it on the test set
test_losses = [
test_model(i)
for i in xrange(n_test_batches)
]
test_score = numpy.mean(test_losses)
print((' epoch %i, minibatch %i/%i, test error of '
'best model %f %%') %
(epoch, minibatch_index + 1, n_train_batches,
test_score * 100.))
if patience <= iter:
done_looping = True
break
end_time = timeit.default_timer()
print('Optimization complete.')
print('Best validation score of %f %% obtained at iteration %i, '
'with test performance %f %%' %
(best_validation_loss * 100., best_iter + 1, test_score * 100.))
print >> sys.stderr, ('The code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.))
if __name__ == '__main__':
evaluate_lenet5()
def experiment(state, channel):
evaluate_lenet5(state.learning_rate, dataset=state.dataset)
| webeng/DeepLearningTutorials | code/convolutional_mlp.py | Python | bsd-3-clause | 12,771 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-19 03:19
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('library', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AuthorRating',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rating', models.CharField(choices=[('1', '1 Star'), ('2', '2 Stars'), ('3', '3 Stars'), ('4', '4 Stars')], max_length=1, verbose_name='Rating')),
],
options={
'ordering': ('author',),
'verbose_name': 'Author Ratings',
},
),
migrations.CreateModel(
name='BookRating',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rating', models.CharField(choices=[('1', '1 Star'), ('2', '2 Stars'), ('3', '3 Stars'), ('4', '4 Stars')], max_length=1, verbose_name='Rating')),
],
options={
'ordering': ('book',),
'verbose_name': 'Book Ratings',
},
),
migrations.AlterModelOptions(
name='author',
options={'ordering': ('name',), 'verbose_name': 'Author', 'verbose_name_plural': 'Authors'},
),
migrations.AlterModelOptions(
name='book',
options={'ordering': ('title',), 'verbose_name': 'Book', 'verbose_name_plural': 'Books'},
),
migrations.AlterField(
model_name='author',
name='age',
field=models.SmallIntegerField(blank=True, null=True, verbose_name='Age'),
),
migrations.AlterField(
model_name='author',
name='name',
field=models.CharField(max_length=128, verbose_name='Name'),
),
migrations.AlterField(
model_name='author',
name='penname',
field=models.CharField(max_length=128, verbose_name='Pen Name'),
),
migrations.AlterField(
model_name='book',
name='title',
field=models.CharField(max_length=128, verbose_name='Title'),
),
migrations.AddField(
model_name='bookrating',
name='book',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='library.Book'),
),
migrations.AddField(
model_name='authorrating',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='library.Author'),
),
]
| harikvpy/django-popupcrud | demo/library/migrations/0002_auto_20170919_0319.py | Python | bsd-3-clause | 2,833 |
import unittest
import pylab as pl
import matplotlib as mpl
import itertools
import sys
import math
import timeit
import copy
from em import *
def generate_synthetic_data(N):
np.random.seed(0)
C = np.array([[0., -0.7], [3.5, .7]])
C1 = np.array([[-0.4, 1.7], [0.3, .7]])
Y = np.r_[
np.dot(np.random.randn(N/3, 2), C1),
np.dot(np.random.randn(N/3, 2), C),
np.random.randn(N/3, 2) + np.array([3, 3]),
]
return Y.astype(np.float32)
class EMTester(object):
def __init__(self, from_file, variant_param_spaces, device_id, num_subps, names_of_backends):
self.results = {}
self.variant_param_spaces = variant_param_spaces
self.device_id = device_id
self.num_subplots = num_subps
self.names_of_backends = names_of_backends
self.plot_id = num_subps/2*100 + 21
if from_file:
self.X = np.ndfromtxt('IS1000a.csv', delimiter=',', dtype=np.float32)
self.N = self.X.shape[0]
self.D = self.X.shape[1]
else:
N = 1000
self.X = generate_synthetic_data(N)
self.N = self.X.shape[0]
self.D = self.X.shape[1]
def new_gmm(self, M):
self.M = M
self.gmm = GMM(self.M, self.D, names_of_backends_to_use=self.names_of_backends, variant_param_spaces=self.variant_param_spaces, device_id=self.device_id)
def new_gmm_list(self, M, k):
self.M = M
self.init_num_clusters = k
self.gmm_list = [GMM(self.M, self.D, names_of_backends_to_use=self.names_of_backends, variant_param_spaces=self.variant_param_spaces, device_id=self.device_id) for i in range(k)]
def test_speech_ahc(self):
# Get the events, divide them into an initial k clusters and train each GMM on a cluster
per_cluster = self.N/self.init_num_clusters
init_training = zip(self.gmm_list,np.vsplit(self.X, range(per_cluster, self.N, per_cluster)))
for g, x in init_training:
g.train(x)
# Perform hierarchical agglomeration based on BIC scores
best_BIC_score = 1.0
while (best_BIC_score > 0 and len(self.gmm_list) > 1):
print "Num GMMs: %d, last score: %d" % (len(self.gmm_list), best_BIC_score)
num_clusters = len(self.gmm_list)
# Resegment data based on likelihood scoring
likelihoods = self.gmm_list[0].score(self.X)
for g in self.gmm_list[1:]:
likelihoods = np.column_stack((likelihoods, g.score(self.X)))
most_likely = likelihoods.argmax(axis=1)
# Across 2.5 secs of observations, vote on which cluster they should be associated with
iter_training = {}
for i in range(250, self.N, 250):
votes = np.zeros(num_clusters)
for j in range(i-250, i):
votes[most_likely[j]] += 1
#print votes.argmax()
iter_training.setdefault(self.gmm_list[votes.argmax()],[]).append(self.X[i-250:i,:])
votes = np.zeros(num_clusters)
for j in range((self.N/250)*250, self.N):
votes[most_likely[j]] += 1
#print votes.argmax()
iter_training.setdefault(self.gmm_list[votes.argmax()],[]).append(self.X[(self.N/250)*250:self.N,:])
# Retrain the GMMs on the clusters for which they were voted most likely and
# make a list of candidates for merging
iter_bic_list = []
for g, data_list in iter_training.iteritems():
cluster_data = data_list[0]
for d in data_list[1:]:
cluster_data = np.concatenate((cluster_data, d))
cluster_data = np.ascontiguousarray(cluster_data)
g.train(cluster_data)
iter_bic_list.append((g,cluster_data))
# Keep any GMMs that lost all votes in candidate list for merging
for g in self.gmm_list:
if g not in iter_training.keys():
iter_bic_list.append((g,None))
# Score all pairs of GMMs using BIC
best_merged_gmm = None
best_BIC_score = 0.0
merged_tuple = None
for gmm1idx in range(len(iter_bic_list)):
for gmm2idx in range(gmm1idx+1, len(iter_bic_list)):
g1, d1 = iter_bic_list[gmm1idx]
g2, d2 = iter_bic_list[gmm2idx]
score = 0.0
if d1 is not None or d2 is not None:
if d1 is not None and d2 is not None:
new_gmm, score = compute_distance_BIC(g1, g2, np.concatenate((d1, d2)))
elif d1 is not None:
new_gmm, score = compute_distance_BIC(g1, g2, d1)
else:
new_gmm, score = compute_distance_BIC(g1, g2, d2)
print "Comparing BIC %d with %d: %f" % (gmm1idx, gmm2idx, score)
if score > best_BIC_score:
best_merged_gmm = new_gmm
merged_tuple = (g1, g2)
best_BIC_score = score
# Merge the winning candidate pair if its deriable to do so
if best_BIC_score > 0.0:
self.gmm_list.remove(merged_tuple[0])
self.gmm_list.remove(merged_tuple[1])
self.gmm_list.append(best_merged_gmm)
print "Final size of each cluster:", [ g.M for g in self.gmm_list]
def test_cytosis_ahc(self):
M_start = self.M
M_end = 0
plot_counter = 2
for M in reversed(range(M_end, M_start)):
print "======================== AHC loop: M = ", M+1, " ==========================="
self.gmm.train(self.X)
#plotting
means = self.gmm.components.means.reshape((self.gmm.M, self.gmm.D))
covars = self.gmm.components.covars.reshape((self.gmm.M, self.gmm.D, self.gmm.D))
Y = self.gmm.predict(self.X)
if(self.plot_id % 10 <= self.num_subplots):
self.results['_'.join(['ASP v',str(self.plot_id-(100*self.num_subplots+11)),'@',str(self.gmm.D),str(self.gmm.M),str(self.N)])] = (str(self.plot_id), copy.deepcopy(means), copy.deepcopy(covars), copy.deepcopy(Y))
self.plot_id += 1
#find closest components and merge
if M > 0: #don't merge if there is only one component
gmm_list = []
for c1 in range(0, self.gmm.M):
for c2 in range(c1+1, self.gmm.M):
new_component, dist = self.gmm.compute_distance_rissanen(c1, c2)
gmm_list.append((dist, (c1, c2, new_component)))
#print "gmm_list after append: ", gmm_list
#compute minimum distance
min_c1, min_c2, min_component = min(gmm_list, key=lambda gmm: gmm[0])[1]
self.gmm.merge_components(min_c1, min_c2, min_component)
def time_cytosis_ahc(self):
M_start = self.M
M_end = 0
for M in reversed(range(M_end, M_start)):
print "======================== AHC loop: M = ", M+1, " ==========================="
self.gmm.train(self.X)
#find closest components and merge
if M > 0: #don't merge if there is only one component
gmm_list = []
for c1 in range(0, self.gmm.M):
for c2 in range(c1+1, self.gmm.M):
new_component, dist = self.gmm.compute_distance_rissanen(c1, c2)
gmm_list.append((dist, (c1, c2, new_component)))
#compute minimum distance
min_c1, min_c2, min_component = min(gmm_list, key=lambda gmm: gmm[0])[1]
self.gmm.merge_components(min_c1, min_c2, min_component)
def plot(self):
for t, r in self.results.iteritems():
splot = pl.subplot(r[0], title=t)
color_iter = itertools.cycle (['r', 'g', 'b', 'c'])
Y_ = r[3]
for i, (mean, covar, color) in enumerate(zip(r[1], r[2], color_iter)):
v, w = np.linalg.eigh(covar)
u = w[0] / np.linalg.norm(w[0])
pl.scatter(self.X.T[0,Y_==i], self.X.T[1,Y_==i], .8, color=color)
angle = np.arctan(u[1]/u[0])
angle = 180 * angle / np.pi
ell = mpl.patches.Ellipse (mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
pl.show()
if __name__ == '__main__':
device_id = 0
num_subplots = 6
variant_param_spaces = {'base': {},
'cuda_boost': {'num_blocks_estep': ['16'],
'num_threads_estep': ['512'],
'num_threads_mstep': ['512'],
'num_event_blocks': ['128'],
'max_num_dimensions': ['50'],
'max_num_components': ['122'],
'max_num_dimensions_covar_v3': ['40'],
'max_num_components_covar_v3': ['82'],
'diag_only': ['0'],
'max_iters': ['10'],
'min_iters': ['10'],
'covar_version_name': ['V1', 'V2A', 'V2B', 'V3'] },
'cilk_boost': {}
}
emt = EMTester(True, variant_param_spaces, device_id, num_subplots, ['cuda'])
#emt.new_gmm(6)
#t = timeit.Timer(emt.time_cytosis_ahc)
#print t.timeit(number=1)
#emt.test_cytosis_ahc()
#emt.plot()
emt.new_gmm_list(5, 16)
emt.test_speech_ahc()
| mbdriscoll/asp-old | tests/em_ahc_test.py | Python | bsd-3-clause | 9,721 |
#
# Widgets.py -- wrapped Qt widgets and convenience functions
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import os.path
from functools import reduce
from ginga.qtw.QtHelp import QtGui, QtCore, QTextCursor, \
QIcon, QPixmap, QImage, have_pyqt4
from ginga.qtw import QtHelp
from ginga.misc import Callback, Bunch
import ginga.icons
# path to our icons
icondir = os.path.split(ginga.icons.__file__)[0]
class WidgetError(Exception):
"""For errors thrown in this module."""
pass
_app = None
# BASE
class WidgetBase(Callback.Callbacks):
def __init__(self):
super(WidgetBase, self).__init__()
self.widget = None
self.changed = False
# external data can be attached here
self.extdata = Bunch.Bunch()
def get_widget(self):
return self.widget
def set_tooltip(self, text):
self.widget.setToolTip(text)
def set_enabled(self, tf):
self.widget.setEnabled(tf)
def get_size(self):
wd, ht = self.widget.width(), self.widget.height()
return (wd, ht)
def get_app(self):
return _app
def delete(self):
self.widget.deleteLater()
def focus(self):
self.widget.activateWindow()
self.widget.setFocus()
#self.widget.raise_()
def resize(self, width, height):
self.widget.resize(width, height)
def show(self):
self.widget.show()
def hide(self):
self.widget.hide()
def get_font(self, font_family, point_size):
font = QtHelp.get_font(font_family, point_size)
return font
def cfg_expand(self, horizontal=0, vertical=0):
h_policy = QtGui.QSizePolicy.Policy(horizontal)
v_policy = QtGui.QSizePolicy.Policy(vertical)
self.widget.setSizePolicy(QtGui.QSizePolicy(h_policy, v_policy))
# BASIC WIDGETS
class TextEntry(WidgetBase):
def __init__(self, text='', editable=True):
super(TextEntry, self).__init__()
self.widget = QtGui.QLineEdit()
self.widget.setText(text)
self.widget.setReadOnly(not editable)
self.widget.returnPressed.connect(self._cb_redirect)
self.enable_callback('activated')
def _cb_redirect(self, *args):
self.make_callback('activated')
def get_text(self):
return self.widget.text()
def set_text(self, text):
self.widget.setText(text)
def set_editable(self, tf):
self.widget.setReadOnly(not tf)
def set_font(self, font):
self.widget.setFont(font)
def set_length(self, numchars):
# this is only supposed to set the visible length (but Qt doesn't
# really have a good way to do that)
#self.widget.setMaxLength(numchars)
pass
class TextEntrySet(WidgetBase):
def __init__(self, text='', editable=True):
super(TextEntrySet, self).__init__()
self.widget = QtHelp.HBox()
self.entry = QtGui.QLineEdit()
self.entry.setText(text)
self.entry.setReadOnly(not editable)
layout = self.widget.layout()
layout.addWidget(self.entry, stretch=1)
self.btn = QtGui.QPushButton('Set')
self.entry.returnPressed.connect(self._cb_redirect)
self.btn.clicked.connect(self._cb_redirect)
layout.addWidget(self.btn, stretch=0)
self.enable_callback('activated')
def _cb_redirect(self, *args):
self.make_callback('activated')
def get_text(self):
return self.entry.text()
def set_text(self, text):
self.entry.setText(text)
def set_editable(self, tf):
self.entry.setReadOnly(not tf)
def set_font(self, font):
self.widget.setFont(font)
def set_length(self, numchars):
# this is only supposed to set the visible length (but Qt doesn't
# really have a good way to do that)
#self.widget.setMaxLength(numchars)
pass
def set_enabled(self, tf):
super(TextEntrySet, self).set_enabled(tf)
self.entry.setEnabled(tf)
class GrowingTextEdit(QtGui.QTextEdit):
def __init__(self, *args, **kwargs):
super(GrowingTextEdit, self).__init__(*args, **kwargs)
self.document().documentLayout().documentSizeChanged.connect(
self.sizeChange)
self.heightMin = 0
self.heightMax = 65000
def sizeChange(self):
docHeight = self.document().size().height()
# add some margin to prevent auto scrollbars
docHeight += 20
if self.heightMin <= docHeight <= self.heightMax:
self.setMaximumHeight(docHeight)
class TextArea(WidgetBase):
def __init__(self, wrap=False, editable=False):
super(TextArea, self).__init__()
#tw = QtGui.QTextEdit()
tw = GrowingTextEdit()
tw.setReadOnly(not editable)
if wrap:
tw.setLineWrapMode(QtGui.QTextEdit.WidgetWidth)
else:
tw.setLineWrapMode(QtGui.QTextEdit.NoWrap)
self.widget = tw
def append_text(self, text, autoscroll=True):
if text.endswith('\n'):
text = text[:-1]
self.widget.append(text)
if not autoscroll:
return
self.widget.moveCursor(QTextCursor.End)
self.widget.moveCursor(QTextCursor.StartOfLine)
self.widget.ensureCursorVisible()
def get_text(self):
return self.widget.document().toPlainText()
def clear(self):
self.widget.clear()
def set_text(self, text):
self.clear()
self.append_text(text)
def set_editable(self, tf):
self.widget.setReadOnly(not tf)
def set_limit(self, numlines):
#self.widget.setMaximumBlockCount(numlines)
pass
def set_font(self, font):
self.widget.setCurrentFont(font)
def set_wrap(self, tf):
if tf:
self.widget.setLineWrapMode(QtGui.QTextEdit.WidgetWidth)
else:
self.widget.setLineWrapMode(QtGui.QTextEdit.NoWrap)
class Label(WidgetBase):
def __init__(self, text='', halign='left', style='normal', menu=None):
super(Label, self).__init__()
lbl = QtGui.QLabel(text)
if halign == 'left':
lbl.setAlignment(QtCore.Qt.AlignLeft)
elif halign == 'center':
lbl.setAlignment(QtCore.Qt.AlignHCenter)
elif halign == 'center':
lbl.setAlignment(QtCore.Qt.AlignRight)
self.widget = lbl
lbl.mousePressEvent = self._cb_redirect
if style == 'clickable':
lbl.setSizePolicy(QtGui.QSizePolicy.Minimum,
QtGui.QSizePolicy.Minimum)
lbl.setFrameStyle(QtGui.QFrame.Box | QtGui.QFrame.Raised)
if menu is not None:
lbl.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
menu_w = menu.get_widget()
def on_context_menu(point):
menu_w.exec_(lbl.mapToGlobal(point))
lbl.customContextMenuRequested.connect(on_context_menu)
# Enable highlighting for copying
#lbl.setTextInteractionFlags(QtCore.Qt.TextSelectableByMouse)
self.enable_callback('activated')
def _cb_redirect(self, event):
buttons = event.buttons()
if buttons & QtCore.Qt.LeftButton:
self.make_callback('activated')
def get_text(self):
return self.widget.text()
def set_text(self, text):
self.widget.setText(text)
def set_font(self, font):
self.widget.setFont(font)
def set_color(self, fg=None, bg=None):
self.widget.setStyleSheet("QLabel { background-color: %s; color: %s; }" % (bg, fg));
class Button(WidgetBase):
def __init__(self, text=''):
super(Button, self).__init__()
self.widget = QtGui.QPushButton(text)
self.widget.clicked.connect(self._cb_redirect)
self.enable_callback('activated')
def _cb_redirect(self, *args):
self.make_callback('activated')
class ComboBox(WidgetBase):
def __init__(self, editable=False):
super(ComboBox, self).__init__()
self.widget = QtHelp.ComboBox()
self.widget.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToContents)
self.widget.setEditable(editable)
self.widget.activated.connect(self._cb_redirect)
self.enable_callback('activated')
def _cb_redirect(self):
idx = self.widget.currentIndex()
self.make_callback('activated', idx)
def insert_alpha(self, text):
index = 0
while True:
itemText = self.widget.itemText(index)
if len(itemText) == 0:
break
if itemText > text:
self.widget.insertItem(index, text)
return
index += 1
self.widget.addItem(text)
def delete_alpha(self, text):
index = self.widget.findText(text)
self.widget.removeItem(index)
def get_alpha(self, idx):
return self.widget.itemText(idx)
def clear(self):
self.widget.clear()
def show_text(self, text):
index = self.widget.findText(text)
self.set_index(index)
def append_text(self, text):
self.widget.addItem(text)
def set_index(self, index):
self.widget.setCurrentIndex(index)
def get_index(self):
return self.widget.currentIndex()
class SpinBox(WidgetBase):
def __init__(self, dtype=int):
super(SpinBox, self).__init__()
if dtype == float:
w = QtGui.QDoubleSpinBox()
else:
w = QtGui.QSpinBox()
w.valueChanged.connect(self._cb_redirect)
# should values wrap around
w.setWrapping(False)
self.widget = w
self.enable_callback('value-changed')
def _cb_redirect(self, val):
if self.changed:
self.changed = False
return
self.make_callback('value-changed', val)
def get_value(self):
return self.widget.value()
def set_value(self, val):
self.changed = True
self.widget.setValue(val)
def set_decimals(self, num):
self.widget.setDecimals(num)
def set_limits(self, minval, maxval, incr_value=1):
adj = self.widget
adj.setRange(minval, maxval)
adj.setSingleStep(incr_value)
class Slider(WidgetBase):
def __init__(self, orientation='horizontal', track=False):
super(Slider, self).__init__()
if orientation == 'horizontal':
w = QtGui.QSlider(QtCore.Qt.Horizontal)
w.setTickPosition(QtGui.QSlider.TicksBelow)
else:
w = QtGui.QSlider(QtCore.Qt.Vertical)
w.setTickPosition(QtGui.QSlider.TicksRight)
#w.setTickPosition(QtGui.QSlider.NoTicks)
# this controls whether the callbacks are made *as the user
# moves the slider* or afterwards
w.setTracking(track)
self.widget = w
w.valueChanged.connect(self._cb_redirect)
self.enable_callback('value-changed')
def _cb_redirect(self, val):
# It appears that Qt uses set_value() to set the value of the
# slider when it is dragged, so we cannot use the usual method
# of setting a hidden "changed" variable to suppress the callback
# when setting the value programmatically.
## if self.changed:
## self.changed = False
## return
self.make_callback('value-changed', val)
def get_value(self):
return self.widget.value()
def set_value(self, val):
self.changed = True
self.widget.setValue(val)
def set_tracking(self, tf):
self.widget.setTracking(tf)
def set_limits(self, minval, maxval, incr_value=1):
adj = self.widget
adj.setRange(minval, maxval)
adj.setSingleStep(incr_value)
class ScrollBar(WidgetBase):
def __init__(self, orientation='horizontal'):
super(ScrollBar, self).__init__()
if orientation == 'horizontal':
self.widget = QtGui.QScrollBar(QtCore.Qt.Horizontal)
else:
self.widget = QtGui.QScrollBar(QtCore.Qt.Vertical)
self.widget.valueChanged.connect(self._cb_redirect)
self.enable_callback('activated')
def _cb_redirect(self):
val = self.widget.value()
self.make_callback('activated', val)
class CheckBox(WidgetBase):
def __init__(self, text=''):
super(CheckBox, self).__init__()
self.widget = QtGui.QCheckBox(text)
self.widget.stateChanged.connect(self._cb_redirect)
self.enable_callback('activated')
def _cb_redirect(self, *args):
val = self.get_state()
self.make_callback('activated', val)
def set_state(self, tf):
self.widget.setChecked(tf)
def get_state(self):
val = self.widget.checkState()
# returns 0 (unchecked) or 2 (checked)
return (val != 0)
class ToggleButton(WidgetBase):
def __init__(self, text=''):
super(ToggleButton, self).__init__()
self.widget = QtGui.QPushButton(text)
self.widget.setCheckable(True)
self.widget.clicked.connect(self._cb_redirect)
self.enable_callback('activated')
def _cb_redirect(self, val):
self.make_callback('activated', val)
def set_state(self, tf):
self.widget.setChecked(tf)
def get_state(self):
return self.widget.isChecked()
class RadioButton(WidgetBase):
def __init__(self, text='', group=None):
super(RadioButton, self).__init__()
self.widget = QtGui.QRadioButton(text)
self.widget.toggled.connect(self._cb_redirect)
self.enable_callback('activated')
def _cb_redirect(self, val):
if self.changed:
self.changed = False
return
self.make_callback('activated', val)
def set_state(self, tf):
if self.widget.isChecked() != tf:
# toggled only fires when the value is toggled
self.changed = True
self.widget.setChecked(tf)
def get_state(self):
return self.widget.isChecked()
class Image(WidgetBase):
def __init__(self, native_image=None, style='normal', menu=None):
super(Image, self).__init__()
lbl = QtGui.QLabel()
self.widget = lbl
if native_image is not None:
self._set_image(native_image)
lbl.mousePressEvent = self._cb_redirect
if style == 'clickable':
lbl.setSizePolicy(QtGui.QSizePolicy.Minimum,
QtGui.QSizePolicy.Minimum)
#lbl.setFrameStyle(QtGui.QFrame.Box | QtGui.QFrame.Raised)
if menu is not None:
lbl.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
menu_w = menu.get_widget()
def on_context_menu(point):
menu_w.exec_(lbl.mapToGlobal(point))
lbl.customContextMenuRequested.connect(on_context_menu)
self.enable_callback('activated')
def _cb_redirect(self, event):
buttons = event.buttons()
if buttons & QtCore.Qt.LeftButton:
self.make_callback('activated')
def _set_image(self, native_image):
pixmap = QPixmap.fromImage(native_image)
self.widget.setPixmap(pixmap)
class ProgressBar(WidgetBase):
def __init__(self):
super(ProgressBar, self).__init__()
w = QtGui.QProgressBar()
w.setRange(0, 100)
w.setTextVisible(True)
self.widget = w
def set_value(self, pct):
self.widget.setValue(int(pct * 100.0))
class StatusBar(WidgetBase):
def __init__(self):
super(StatusBar, self).__init__()
sbar = QtGui.QStatusBar()
self.widget = sbar
def set_message(self, msg_str):
# remove message in about 10 seconds
self.widget.showMessage(msg_str, 10000)
class TreeView(WidgetBase):
def __init__(self, auto_expand=False, sortable=False,
selection='single', use_alt_row_color=False,
dragable=False):
super(TreeView, self).__init__()
self.auto_expand = auto_expand
self.sortable = sortable
self.dragable = dragable
self.selection = selection
self.levels = 1
self.leaf_key = None
self.leaf_idx = 0
self.columns = []
self.datakeys = []
# shadow index
self.shadow = {}
tv = QtGui.QTreeWidget()
self.widget = tv
tv.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
if selection == 'multiple':
tv.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection)
tv.setAlternatingRowColors(use_alt_row_color)
tv.itemDoubleClicked.connect(self._cb_redirect)
tv.itemSelectionChanged.connect(self._selection_cb)
if self.dragable:
tv.setDragEnabled(True)
tv.startDrag = self._start_drag
for cbname in ('selected', 'activated', 'drag-start'):
self.enable_callback(cbname)
def setup_table(self, columns, levels, leaf_key):
self.clear()
self.columns = columns
self.levels = levels
self.leaf_key = leaf_key
treeview = self.widget
treeview.setColumnCount(len(columns))
treeview.setSortingEnabled(self.sortable)
# speeds things up a bit
treeview.setUniformRowHeights(True)
# create the column headers
if not isinstance(columns[0], str):
# columns specifies a mapping
headers = [ col[0] for col in columns ]
datakeys = [ col[1] for col in columns ]
else:
headers = datakeys = columns
self.datakeys = datakeys
self.leaf_idx = datakeys.index(self.leaf_key)
if self.sortable:
# Sort increasing by default
treeview.sortByColumn(self.leaf_idx, QtCore.Qt.AscendingOrder)
treeview.setHeaderLabels(headers)
def set_tree(self, tree_dict):
self.clear()
self.add_tree(tree_dict)
def add_tree(self, tree_dict):
if self.sortable:
self.widget.setSortingEnabled(False)
for key in tree_dict:
self._add_subtree(1, self.shadow,
self.widget, key, tree_dict[key])
if self.sortable:
self.widget.setSortingEnabled(True)
# User wants auto expand?
if self.auto_expand:
self.widget.expandAll()
def _add_subtree(self, level, shadow, parent_item, key, node):
if level >= self.levels:
# leaf node
values = [ '' if _key == 'icon' else str(node[_key])
for _key in self.datakeys ]
try:
bnch = shadow[key]
item = bnch.item
# TODO: update leaf item
except KeyError:
# new item
item = QtGui.QTreeWidgetItem(parent_item, values)
if level == 1:
parent_item.addTopLevelItem(item)
else:
parent_item.addChild(item)
shadow[key] = Bunch.Bunch(node=node, item=item, terminal=True)
# hack for adding an image to a table
# TODO: add types for columns
if 'icon' in node:
i = self.datakeys.index('icon')
item.setIcon(i, node['icon'])
# mark cell as non-editable
item.setFlags(item.flags() & ~QtCore.Qt.ItemIsEditable)
else:
try:
# node already exists
bnch = shadow[key]
item = bnch.item
d = bnch.node
except KeyError:
# new node
item = QtGui.QTreeWidgetItem(parent_item, [str(key)])
if level == 1:
parent_item.addTopLevelItem(item)
else:
parent_item.addChild(item)
d = {}
shadow[key] = Bunch.Bunch(node=d, item=item, terminal=False)
# recurse for non-leaf interior node
for key in node:
self._add_subtree(level+1, d, item, key, node[key])
def _selection_cb(self):
res_dict = self.get_selected()
self.make_callback('selected', res_dict)
def _cb_redirect(self, item):
res_dict = {}
self._get_item(res_dict, item)
self.make_callback('activated', res_dict)
def _get_path(self, item):
if item is None:
return []
if item.childCount() == 0:
path_rest = self._get_path(item.parent())
myname = item.text(self.leaf_idx)
path_rest.append(myname)
return path_rest
myname = item.text(0)
path_rest = self._get_path(item.parent())
path_rest.append(myname)
return path_rest
def _get_item(self, res_dict, item):
# from the QTreeViewItem `item`, return the item via a path
# in the dictionary `res_dict`
path = self._get_path(item)
d, s = res_dict, self.shadow
for name in path[:-1]:
d = d.setdefault(name, {})
s = s[name].node
dst_key = path[-1]
d[dst_key] = s[dst_key].node
def get_selected(self):
items = list(self.widget.selectedItems())
res_dict = {}
for item in items:
if item.childCount() > 0:
# only leaf nodes can be selected
continue
self._get_item(res_dict, item)
return res_dict
def clear(self):
self.widget.clear()
self.shadow = {}
def clear_selection(self):
self.widget.clearSelection()
def _path_to_item(self, path):
s = self.shadow
for name in path[:-1]:
s = s[name].node
item = s[path[-1]].item
return item
def select_path(self, path):
item = self._path_to_item(path)
self.widget.setItemSelected(item, True)
def highlight_path(self, path, onoff, font_color='green'):
item = self._path_to_item(path)
# A little painfully inefficient, can we do better than this?
font = QtHelp.QFont()
if not onoff:
color = QtHelp.QColor('black')
else:
font.setBold(True)
color = QtHelp.QColor(font_color)
brush = QtHelp.QBrush(color)
for i in range(item.columnCount()):
item.setForeground(i, brush)
item.setFont(i, font)
def scroll_to_path(self, path):
# TODO: this doesn't give an error, but does not seem to be
# working as the API indicates
item = self._path_to_item(path)
row = self.widget.indexOfTopLevelItem(item)
midx = self.widget.indexAt(QtCore.QPoint(row, 0))
self.widget.scrollTo(midx, QtGui.QAbstractItemView.PositionAtCenter)
def sort_on_column(self, i):
self.widget.sortByColumn(i, QtCore.Qt.AscendingOrder)
def set_column_width(self, i, width):
self.widget.setColumnWidth(i, width)
def set_column_widths(self, lwidths):
for i, width in enumerate(lwidths):
if width is not None:
self.set_column_width(i, width)
def set_optimal_column_widths(self):
for i in range(len(self.columns)):
self.widget.resizeColumnToContents(i)
def _start_drag(self, event):
res_dict = self.get_selected()
drag_pkg = DragPackage(self.widget)
self.make_callback('drag-start', drag_pkg, res_dict)
drag_pkg.start_drag()
# CONTAINERS
class ContainerBase(WidgetBase):
def __init__(self):
super(ContainerBase, self).__init__()
self.children = []
def add_ref(self, ref):
# TODO: should this be a weakref?
self.children.append(ref)
def _remove(self, childw, delete=False):
layout = self.widget.layout()
if layout is not None:
layout.removeWidget(childw)
childw.setParent(None)
if delete:
childw.deleteLater()
def remove(self, w, delete=False):
if not w in self.children:
raise ValueError("Widget is not a child of this container")
self.children.remove(w)
self._remove(w.get_widget(), delete=delete)
def remove_all(self, delete=False):
for w in list(self.children):
self.remove(w, delete=delete)
def get_children(self):
return self.children
def num_children(self):
return len(self.children)
def _get_native_children(self):
return [child.get_widget() for child in self.children]
def _get_native_index(self, nchild):
l = self._get_native_children()
try:
return l.index(nchild)
except (IndexError, ValueError) as e:
return -1
def _native_to_child(self, nchild):
idx = self._get_native_index(nchild)
if idx < 0:
return None
return self.children[idx]
def set_margins(self, left, right, top, bottom):
layout = self.widget.layout()
layout.setContentsMargins(left, right, top, bottom)
def set_border_width(self, pix):
layout = self.widget.layout()
layout.setContentsMargins(pix, pix, pix, pix)
class Box(ContainerBase):
def __init__(self, orientation='horizontal'):
super(Box, self).__init__()
self.widget = QtGui.QWidget()
self.orientation = orientation
if orientation == 'horizontal':
self.layout = QtGui.QHBoxLayout()
else:
self.layout = QtGui.QVBoxLayout()
# because of ridiculous defaults
self.layout.setContentsMargins(0, 0, 0, 0)
self.widget.setLayout(self.layout)
def add_widget(self, child, stretch=0.0):
self.add_ref(child)
child_w = child.get_widget()
if self.orientation == 'horizontal':
self.layout.addWidget(child_w, stretch=stretch,
alignment=QtCore.Qt.AlignLeft)
else:
self.layout.addWidget(child_w, stretch=stretch)
def set_spacing(self, val):
self.layout.setSpacing(val)
class HBox(Box):
def __init__(self):
super(HBox, self).__init__(orientation='horizontal')
class VBox(Box):
def __init__(self):
super(VBox, self).__init__(orientation='vertical')
class Frame(ContainerBase):
def __init__(self, title=None):
super(Frame, self).__init__()
self.widget = QtGui.QFrame()
self.widget.setFrameStyle(QtGui.QFrame.Box | QtGui.QFrame.Raised)
vbox = QtGui.QVBoxLayout()
self.layout = vbox
# because of ridiculous defaults
vbox.setContentsMargins(2, 2, 2, 2)
self.widget.setLayout(vbox)
if title:
lbl = QtGui.QLabel(title)
lbl.setAlignment(QtCore.Qt.AlignHCenter)
#lbl.setAlignment(QtCore.Qt.AlignLeft)
vbox.addWidget(lbl, stretch=0)
self.label = lbl
else:
self.label = None
def set_widget(self, child, stretch=1):
self.remove_all()
self.add_ref(child)
self.widget.layout().addWidget(child.get_widget(), stretch=stretch)
# Qt custom expander widget
# See http://stackoverflow.com/questions/10364589/equivalent-of-gtks-expander-in-pyqt4
#
class Expander(ContainerBase):
r_arrow = None
d_arrow = None
# Note: add 'text-align: left;' if you want left adjusted labels
widget_style = """
QPushButton { margin: 1px,1px,1px,1px; padding: 0px;
border-width: 0px; border-style: solid; }
"""
def __init__(self, title=''):
super(Expander, self).__init__()
# Qt doesn't seem to like it (segfault) if we actually construct
# these icons in the class variable declarations
if Expander.r_arrow is None:
Expander.r_arrow = QtHelp.get_icon(os.path.join(icondir,
'triangle-right-48.png'),
size=(12, 12))
if Expander.d_arrow is None:
Expander.d_arrow = QtHelp.get_icon(os.path.join(icondir,
'triangle-down-48.png'),
size=(12, 12))
self.widget = QtGui.QWidget()
vbox = QtGui.QVBoxLayout()
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
self.layout = vbox
self.toggle = QtGui.QPushButton(Expander.r_arrow, title)
self.toggle.setStyleSheet(Expander.widget_style)
#self.toggle.setCheckable(True)
self.toggle.clicked.connect(self._toggle_widget)
vbox.addWidget(self.toggle, stretch=0)
self.widget.setLayout(vbox)
def set_widget(self, child, stretch=1):
self.remove_all()
self.add_ref(child)
child_w = child.get_widget()
self.widget.layout().addWidget(child_w, stretch=stretch)
child_w.setVisible(False)
def _toggle_widget(self):
child = self.get_children()[0]
child_w = child.get_widget()
#if self.toggle.isChecked():
if child_w.isVisible():
self.toggle.setIcon(Expander.r_arrow)
child_w.setVisible(False)
else:
self.toggle.setIcon(Expander.d_arrow)
child_w.setVisible(True)
class TabWidget(ContainerBase):
def __init__(self, tabpos='top', reorderable=False, detachable=False,
group=0):
super(TabWidget, self).__init__()
self.reorderable = reorderable
self.detachable = detachable
w = QtGui.QTabWidget()
w.currentChanged.connect(self._cb_redirect)
w.tabCloseRequested.connect(self._tab_close)
w.setUsesScrollButtons(True)
#w.setTabsClosable(True)
if self.reorderable:
w.setMovable(True)
## w.tabInserted = self._tab_insert_cb
## w.tabRemoved = self._tab_remove_cb
self.widget = w
self.set_tab_position(tabpos)
for name in ('page-switch', 'page-close', 'page-move', 'page-detach'):
self.enable_callback(name)
def set_tab_position(self, tabpos):
w = self.widget
if tabpos == 'top':
w.setTabPosition(QtGui.QTabWidget.North)
elif tabpos == 'bottom':
w.setTabPosition(QtGui.QTabWidget.South)
elif tabpos == 'left':
w.setTabPosition(QtGui.QTabWidget.West)
elif tabpos == 'right':
w.setTabPosition(QtGui.QTabWidget.East)
def _cb_redirect(self, index):
# get new index, because passed index can be out of date
index = self.get_index()
child = self.index_to_widget(index)
if child is not None:
self.make_callback('page-switch', child)
def _tab_close(self, index):
child = self.index_to_widget(index)
self.make_callback('page-close', child)
def add_widget(self, child, title=''):
self.add_ref(child)
child_w = child.get_widget()
self.widget.addTab(child_w, title)
# attach title to child
child.extdata.tab_title = title
def _remove(self, nchild, delete=False):
idx = self.widget.indexOf(nchild)
self.widget.removeTab(idx)
nchild.setParent(None)
if delete:
nchild.deleteLater()
def get_index(self):
return self.widget.currentIndex()
def set_index(self, idx):
self.widget.setCurrentIndex(idx)
child = self.index_to_widget(idx)
#child.focus()
def index_of(self, child):
return self.widget.indexOf(child.get_widget())
def index_to_widget(self, idx):
"""Returns child corresponding to `idx`"""
nchild = self.widget.widget(idx)
if nchild is None:
return nchild
return self._native_to_child(nchild)
def highlight_tab(self, idx, tf):
tabbar = self.widget.tabBar()
if not tf:
color = QtHelp.QColor('black')
else:
color = QtHelp.QColor('green')
tabbar.setTabTextColor(idx, color)
class StackWidget(ContainerBase):
def __init__(self):
super(StackWidget, self).__init__()
self.widget = QtGui.QStackedWidget()
# TODO: currently only provided for compatibility with other
# like widgets
self.enable_callback('page-switch')
def add_widget(self, child, title=''):
self.add_ref(child)
child_w = child.get_widget()
self.widget.addWidget(child_w)
# attach title to child
child.extdata.tab_title = title
def get_index(self):
return self.widget.currentIndex()
def set_index(self, idx):
self.widget.setCurrentIndex(idx)
#child = self.index_to_widget(idx)
#child.focus()
def index_of(self, child):
return self.widget.indexOf(child.get_widget())
def index_to_widget(self, idx):
nchild = self.widget.widget(idx)
return self._native_to_child(nchild)
class MDIWidget(ContainerBase):
def __init__(self, tabpos='top', mode='mdi'):
super(MDIWidget, self).__init__()
w = QtGui.QMdiArea()
w.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
w.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
w.subWindowActivated.connect(self._cb_redirect)
## w.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding,
## QtGui.QSizePolicy.Expanding))
w.setTabsClosable(True)
w.setTabsMovable(False)
self.widget = w
self.true_mdi = True
self.cur_index = -1
for name in ('page-switch', 'page-close'):
self.enable_callback(name)
self.set_tab_position(tabpos)
self.set_mode(mode)
def set_tab_position(self, tabpos):
w = self.widget
if tabpos == 'top':
w.setTabPosition(QtGui.QTabWidget.North)
elif tabpos == 'bottom':
w.setTabPosition(QtGui.QTabWidget.South)
elif tabpos == 'left':
w.setTabPosition(QtGui.QTabWidget.West)
elif tabpos == 'right':
w.setTabPosition(QtGui.QTabWidget.East)
def get_mode(self):
if self.widget.viewMode() == QtGui.QMdiArea.TabbedView:
return 'tabs'
return 'mdi'
def set_mode(self, mode):
mode = mode.lower()
if mode == 'tabs':
self.widget.setViewMode(QtGui.QMdiArea.TabbedView)
elif mode == 'mdi':
self.widget.setViewMode(QtGui.QMdiArea.SubWindowView)
else:
raise ValueError("Don't understand mode='%s'" % (mode))
def _cb_redirect(self, subwin):
if subwin is not None:
nchild = subwin.widget()
child = self._native_to_child(nchild)
self.cur_index = self.children.index(child)
self.make_callback('page-switch', child)
def _window_resized(self, event, subwin, widget):
qsize = event.size()
wd, ht = qsize.width(), qsize.height()
# save size
widget.extdata.mdi_size = (wd, ht)
subwin._resizeEvent(event)
def _window_moved(self, event, subwin, widget):
qpos = event.pos()
x, y = qpos.x(), qpos.y()
# save position
widget.extdata.mdi_pos = (x, y)
subwin._moveEvent(event)
def _window_closed(self, event, subwin, widget):
nchild = subwin.widget()
child = self._native_to_child(nchild)
# let the application deal with this if desired in page-close
# callback
event.ignore()
#self.widget.removeSubWindow(subwin)
self.make_callback('page-close', child)
def add_widget(self, child, title=''):
self.add_ref(child)
child_w = child.get_widget()
subwin = QtGui.QMdiSubWindow(self.widget)
subwin.setWidget(child_w)
# attach title to child
child.extdata.tab_title = title
w = self.widget.addSubWindow(subwin)
w._closeEvent = w.closeEvent
w.closeEvent = lambda event: self._window_closed(event, w, child)
# does child have a previously saved size
size = child.extdata.get('mdi_size', None)
if size is not None:
wd, ht = size
w.resize(wd, ht)
# does child have a previously saved position
pos = child.extdata.get('mdi_pos', None)
if pos is not None:
x, y = pos
w.move(x, y)
w._resizeEvent = w.resizeEvent
w.resizeEvent = lambda event: self._window_resized(event, w, child)
w._moveEvent = w.moveEvent
w.moveEvent = lambda event: self._window_moved(event, w, child)
w.setWindowTitle(title)
child_w.show()
w.show()
def _remove(self, nchild, delete=False):
subwins = list(self.widget.subWindowList())
l = [ sw.widget() for sw in subwins ]
try:
idx = l.index(nchild)
subwin = subwins[idx]
except (IndexError, ValueError) as e:
subwin = None
if subwin is not None:
self.widget.removeSubWindow(subwin)
subwin.deleteLater()
nchild.setParent(None)
if delete:
nchild.deleteLater()
def get_index(self):
subwin = self.widget.activeSubWindow()
if subwin is not None:
return self._get_native_index(subwin.widget())
return self.cur_index
def _get_subwin(self, widget):
for subwin in list(self.widget.subWindowList()):
if subwin.widget() == widget:
return subwin
return None
def set_index(self, idx):
if 0 <= idx < len(self.children):
child = self.children[idx]
subwin = self._get_subwin(child.widget)
if subwin is not None:
self.widget.setActiveSubWindow(subwin)
def index_of(self, child):
nchild = child.get_widget()
return self._get_native_index(nchild)
def index_to_widget(self, idx):
if 0 <= idx < len(self.children):
return self.children[idx]
return None
def tile_panes(self):
self.widget.tileSubWindows()
def cascade_panes(self):
self.widget.cascadeSubWindows()
def use_tabs(self, tf):
if tf:
self.widget.setViewMode(QtGui.QMdiArea.TabbedView)
else:
self.widget.setViewMode(QtGui.QMdiArea.SubWindowView)
class ScrollArea(ContainerBase):
def __init__(self):
super(ScrollArea, self).__init__()
self.widget = QtGui.QScrollArea()
self.widget.setWidgetResizable(True)
self.widget._resizeEvent = self.widget.resizeEvent
self.widget.resizeEvent = self._resize_cb
self.enable_callback('configure')
def _resize_cb(self, event):
self.widget._resizeEvent(event)
rect = self.widget.geometry()
x1, y1, x2, y2 = rect.getCoords()
width = x2 - x1
height = y2 - y1
self.make_callback('configure', width, height)
def set_widget(self, child):
self.add_ref(child)
self.widget.setWidget(child.get_widget())
def scroll_to_end(self, vertical=True, horizontal=False):
area = self.widget
if vertical:
area.verticalScrollBar().setValue(area.verticalScrollBar().maximum())
if horizontal:
area.horizontalScrollBar().setValue(area.horizontalScrollBar().maximum())
class Splitter(ContainerBase):
def __init__(self, orientation='horizontal'):
super(Splitter, self).__init__()
w = QtGui.QSplitter()
self.orientation = orientation
if orientation == 'horizontal':
w.setOrientation(QtCore.Qt.Horizontal)
else:
w.setOrientation(QtCore.Qt.Vertical)
self.widget = w
w.setStretchFactor(0, 0.5)
w.setStretchFactor(1, 0.5)
def add_widget(self, child):
self.add_ref(child)
child_w = child.get_widget()
self.widget.addWidget(child_w)
def get_sizes(self):
return list(self.widget.sizes())
def set_sizes(self, sizes):
return self.widget.setSizes(sizes)
class GridBox(ContainerBase):
def __init__(self, rows=1, columns=1):
super(GridBox, self).__init__()
w = QtGui.QWidget()
layout = QtGui.QGridLayout()
w.setLayout(layout)
self.widget = w
def resize_grid(self, rows, columns):
pass
def set_row_spacing(self, val):
self.widget.layout().setVerticalSpacing(val)
def set_column_spacing(self, val):
self.widget.layout().setHorizontalSpacing(val)
def set_spacing(self, val):
self.set_row_spacing(val)
self.set_column_spacing(val)
def add_widget(self, child, row, col, stretch=0):
self.add_ref(child)
w = child.get_widget()
self.widget.layout().addWidget(w, row, col)
class ToolbarAction(WidgetBase):
def __init__(self):
super(ToolbarAction, self).__init__()
self.widget = None
self.enable_callback('activated')
def _cb_redirect(self, *args):
if self.widget.isCheckable():
tf = self.widget.isChecked()
self.make_callback('activated', tf)
else:
self.make_callback('activated')
def set_state(self, tf):
self.widget.setChecked(tf)
def get_state(self):
return self.widget.isChecked()
class Toolbar(ContainerBase):
def __init__(self, orientation='horizontal'):
super(Toolbar, self).__init__()
w = QtGui.QToolBar()
if orientation == 'horizontal':
w.setOrientation(QtCore.Qt.Horizontal)
else:
w.setOrientation(QtCore.Qt.Vertical)
self.widget = w
def add_action(self, text, toggle=False, iconpath=None):
child = ToolbarAction()
if iconpath:
image = QImage(iconpath)
qsize = QtCore.QSize(24, 24)
image = image.scaled(qsize)
pixmap = QPixmap.fromImage(image)
iconw = QIcon(pixmap)
action = self.widget.addAction(iconw, text,
child._cb_redirect)
else:
action = self.widget.addAction(text, child._cb_redirect)
action.setCheckable(toggle)
child.widget = action
self.add_ref(child)
return child
def add_widget(self, child):
self.add_ref(child)
w = child.get_widget()
self.widget.addWidget(w)
def add_menu(self, text, menu=None):
if menu is None:
menu = Menu()
child = self.add_action(text)
child.add_callback('activated', lambda w: menu.popup())
return menu
def add_separator(self):
self.widget.addSeparator()
class MenuAction(WidgetBase):
def __init__(self, text=None, checkable=False):
super(MenuAction, self).__init__()
self.widget = None
self.text = text
self.checkable = checkable
self.enable_callback('activated')
def set_state(self, tf):
if not self.checkable:
raise ValueError("Not a checkable menu item")
self.widget.setChecked(tf)
def get_state(self):
return self.widget.isChecked()
def _cb_redirect(self, *args):
if self.widget.isCheckable():
tf = self.widget.isChecked()
self.make_callback('activated', tf)
else:
self.make_callback('activated')
class Menu(ContainerBase):
def __init__(self):
super(Menu, self).__init__()
# this get's overwritten if created from Menubar
self.widget = QtGui.QMenu()
def add_widget(self, child):
w = self.widget.addAction(child.text, lambda: child._cb_redirect())
if child.checkable:
w.setCheckable(True)
child.widget = w
self.add_ref(child)
def add_name(self, name, checkable=False):
child = MenuAction(text=name, checkable=checkable)
self.add_widget(child)
return child
def add_separator(self):
self.widget.addSeparator()
def popup(self, widget=None):
if widget is not None:
w = widget.get_widget()
#self.widget.popup(w.mapToGlobal(QtCore.QPoint(0, 0)))
self.widget.exec_(w.mapToGlobal(QtCore.QPoint(0, 0)))
else:
self.widget.exec_(QtGui.QCursor.pos())
class Menubar(ContainerBase):
def __init__(self):
super(Menubar, self).__init__()
self.widget = QtGui.QMenuBar()
def add_widget(self, child):
menu_w = child.get_widget()
self.widget.addMenu(menu_w)
self.add_ref(child)
def add_name(self, name):
menu_w = self.widget.addMenu(name)
child = Menu()
child.widget = menu_w
self.add_ref(child)
return child
class TopLevel(ContainerBase):
def __init__(self, title=None):
super(TopLevel, self).__init__()
widget = QtHelp.TopLevel()
self.widget = widget
box = QtGui.QVBoxLayout()
box.setContentsMargins(0, 0, 0, 0)
box.setSpacing(0)
widget.setLayout(box)
widget.closeEvent = lambda event: self._quit(event)
widget.destroyed = self._destroyed_cb
if not title is None:
widget.setWindowTitle(title)
self.enable_callback('close')
def set_widget(self, child):
self.add_ref(child)
child_w = child.get_widget()
self.widget.layout().addWidget(child_w)
def _quit(self, event):
#event.accept()
# let application decide how to handle this
event.ignore()
self.close()
def _closeEvent(*args):
self.close()
def close(self):
#self.widget.deleteLater()
#self.widget = None
self.make_callback('close')
def _destroyed_cb(self, *args):
event.accept()
def raise_(self):
self.widget.raise_()
self.widget.activateWindow()
def lower(self):
self.widget.lower()
def focus(self):
self.widget.raise_()
self.widget.activateWindow()
def move(self, x, y):
self.widget.move(x, y)
def maximize(self):
self.widget.showMaximized()
def unmaximize(self):
self.widget.showNormal()
def fullscreen(self):
self.widget.showFullScreen()
def unfullscreen(self):
self.widget.showNormal()
def is_fullscreen(self):
return self.widget.isFullScreen()
def iconify(self):
self.hide()
def uniconify(self):
self.widget.showNormal()
def set_title(self, title):
self.widget.setWindowTitle(title)
class Application(Callback.Callbacks):
def __init__(self, logger=None):
global _app
super(Application, self).__init__()
self.logger = logger
self.window_list = []
self.window_dict = {}
self.wincnt = 0
if have_pyqt4:
QtGui.QApplication.setGraphicsSystem('raster')
app = QtGui.QApplication([])
#app.lastWindowClosed.connect(lambda *args: self._quit())
self._qtapp = app
_app = self
# Get screen size
desktop = self._qtapp.desktop()
#rect = desktop.screenGeometry()
rect = desktop.availableGeometry()
size = rect.size()
self.screen_wd = size.width()
self.screen_ht = size.height()
for name in ('shutdown', ):
self.enable_callback(name)
def get_screen_size(self):
return (self.screen_wd, self.screen_ht)
def process_events(self):
self._qtapp.processEvents()
def process_end(self):
self._qtapp.quit()
def add_window(self, window, wid=None):
if wid is None:
wid = 'win%d' % (self.wincnt)
self.wincnt += 1
window.wid = wid
window.url = ''
window.app = self
self.window_dict[wid] = window
def get_window(self, wid):
return self.window_dict[wid]
def has_window(self, wid):
return wid in self.window_dict
def get_wids(self):
return list(self.window_dict.keys())
def make_window(self, title=None):
w = TopLevel(title=title)
self.add_window(w)
return w
class Dialog(WidgetBase):
def __init__(self, title=None, flags=None, buttons=None,
parent=None):
super(Dialog, self).__init__()
self.widget = QtGui.QDialog(parent.get_widget())
self.widget.setModal(True)
vbox = QtGui.QVBoxLayout()
self.widget.setLayout(vbox)
self.content = VBox()
vbox.addWidget(self.content.get_widget(), stretch=1)
hbox_w = QtGui.QWidget()
hbox = QtGui.QHBoxLayout()
hbox_w.setLayout(hbox)
for name, val in buttons:
btn = QtGui.QPushButton(name)
def cb(val):
return lambda: self._cb_redirect(val)
btn.clicked.connect(cb(val))
hbox.addWidget(btn, stretch=0)
vbox.addWidget(hbox_w, stretch=0)
## self.widget.closeEvent = lambda event: self.delete()
self.enable_callback('activated')
def _cb_redirect(self, val):
self.make_callback('activated', val)
def get_content_area(self):
return self.content
class SaveDialog(QtGui.QFileDialog):
def __init__(self, title=None, selectedfilter=None):
super(SaveDialog, self).__init__()
self.selectedfilter = selectedfilter
self.widget = self.getSaveFileName(self, title, '', selectedfilter)
def get_path(self):
if self.widget and self.selectedfilter is not None and not self.widget.endswith(self.selectedfilter[1:]):
self.widget += self.selectedfilter[1:]
return self.widget
class DragPackage(object):
def __init__(self, src_widget):
self.src_widget = src_widget
self._drag = QtHelp.QDrag(self.src_widget)
def set_urls(self, urls):
mimeData = QtCore.QMimeData()
_urls = [ QtCore.QUrl(url) for url in urls ]
mimeData.setUrls(_urls)
self._drag.setMimeData(mimeData)
def start_drag(self):
if QtHelp.have_pyqt5:
result = self._drag.exec_(QtCore.Qt.MoveAction)
else:
result = self._drag.start(QtCore.Qt.MoveAction)
# MODULE FUNCTIONS
def name_mangle(name, pfx=''):
newname = []
for c in name.lower():
if not (c.isalpha() or c.isdigit() or (c == '_')):
newname.append('_')
else:
newname.append(c)
return pfx + ''.join(newname)
def make_widget(title, wtype):
if wtype == 'label':
w = Label(title)
w.widget.setAlignment(QtCore.Qt.AlignRight)
elif wtype == 'llabel':
w = Label(title)
w.widget.setAlignment(QtCore.Qt.AlignLeft)
elif wtype == 'entry':
w = TextEntry()
#w.widget.setMaxLength(12)
elif wtype == 'entryset':
w = TextEntrySet()
#w.widget.setMaxLength(12)
elif wtype == 'combobox':
w = ComboBox()
elif wtype == 'spinbutton':
w = SpinBox(dtype=int)
elif wtype == 'spinfloat':
w = SpinBox(dtype=float)
elif wtype == 'vbox':
w = VBox()
elif wtype == 'hbox':
w = HBox()
elif wtype == 'hscale':
w = Slider(orientation='horizontal')
elif wtype == 'vscale':
w = Slider(orientation='vertical')
elif wtype == 'checkbutton':
w = CheckBox(title)
elif wtype == 'radiobutton':
w = RadioButton(title)
elif wtype == 'togglebutton':
w = ToggleButton(title)
elif wtype == 'button':
w = Button(title)
elif wtype == 'spacer':
w = Label('')
elif wtype == 'textarea':
w = TextArea(editable=True)
elif wtype == 'toolbar':
w = Toolbar()
elif wtype == 'progress':
w = ProgressBar()
elif wtype == 'menubar':
w = Menubar()
else:
raise ValueError("Bad wtype=%s" % wtype)
return w
def hadjust(w, orientation):
if orientation != 'horizontal':
return w
vbox = VBox()
vbox.add_widget(w)
vbox.add_widget(Label(''), stretch=1)
return vbox
def build_info(captions, orientation='vertical'):
numrows = len(captions)
numcols = reduce(lambda acc, tup: max(acc, len(tup)), captions, 0)
if (numcols % 2) != 0:
raise ValueError("Column spec is not an even number")
numcols = int(numcols // 2)
widget = QtGui.QWidget()
table = QtGui.QGridLayout()
widget.setLayout(table)
table.setVerticalSpacing(2)
table.setHorizontalSpacing(4)
table.setContentsMargins(2, 2, 2, 2)
wb = Bunch.Bunch()
row = 0
for tup in captions:
col = 0
while col < numcols:
idx = col * 2
if idx < len(tup):
title, wtype = tup[idx:idx+2]
if not title.endswith(':'):
name = name_mangle(title)
else:
name = name_mangle('lbl_'+title[:-1])
w = make_widget(title, wtype)
table.addWidget(w.widget, row, col)
wb[name] = w
col += 1
row += 1
w = wrap(widget)
w = hadjust(w, orientation=orientation)
return w, wb
def wrap(native_widget):
wrapper = WidgetBase()
wrapper.widget = native_widget
return wrapper
def get_orientation(container):
if not hasattr(container, 'size'):
return 'vertical'
(wd, ht) = container.size
## wd, ht = container.get_size()
#print('container size is %dx%d' % (wd, ht))
if wd < ht:
return 'vertical'
else:
return 'horizontal'
def get_oriented_box(container, scrolled=True, fill=False):
orientation = get_orientation(container)
if orientation == 'vertical':
box1 = VBox()
box2 = VBox()
else:
box1 = HBox()
box2 = VBox()
box2.add_widget(box1, stretch=0)
if not fill:
box2.add_widget(Label(''), stretch=1)
if scrolled:
sw = ScrollArea()
sw.set_widget(box2)
else:
sw = box2
return box1, sw, orientation
#END
| Cadair/ginga | ginga/qtw/Widgets.py | Python | bsd-3-clause | 54,587 |
from django.conf import settings
from django.db import models
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from mymoney.core.utils.currencies import get_currencies
class BankAccountManager(models.Manager):
def get_user_bankaccounts(self, user):
if not hasattr(user, '_cache_bankaccounts'):
user._cache_bankaccounts = user.bankaccounts.order_by('label')
return user._cache_bankaccounts
def delete_orphans(self):
"""
Delete bank account which have no more owners.
"""
self.filter(owners__isnull=True).delete()
class BankAccount(models.Model):
label = models.CharField(max_length=255, verbose_name=_('Label'))
balance = models.DecimalField(
max_digits=10,
decimal_places=2,
default=0,
verbose_name=_('Balance'),
)
balance_initial = models.DecimalField(
max_digits=10,
decimal_places=2,
default=0,
verbose_name=_('Initial balance'),
help_text=_('Initial balance will automatically update the balance.'),
)
currency = models.CharField(
max_length=3,
choices=get_currencies(),
verbose_name=_('Currency'),
)
owners = models.ManyToManyField(
settings.AUTH_USER_MODEL,
limit_choices_to={'is_staff': False, 'is_superuser': False},
verbose_name=_('Owners'),
related_name='bankaccounts',
db_table='bankaccounts_owners',
)
objects = BankAccountManager()
class Meta:
db_table = 'bankaccounts'
permissions = (("administer_owners", "Administer owners"),)
def __str__(self):
return self.label
def save(self, *args, **kwargs):
# Init balance. Merge both just in case.
if self.pk is None:
self.balance += self.balance_initial
# Otherwise update it with the new delta.
else:
original = BankAccount.objects.get(pk=self.pk)
self.balance += self.balance_initial - original.balance_initial
super(BankAccount, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse('banktransactions:list', kwargs={
'bankaccount_pk': self.pk,
})
| ychab/mymoney | mymoney/apps/bankaccounts/models.py | Python | bsd-3-clause | 2,268 |
from collections import ChainMap
from py4jdbc.exceptions import dbapi2
exc_classes = ChainMap(sqlstate.exc_classes).new_child()
class _VCodeAgg(dbapi2.CodeAggregatorMeta):
exc_classes = exc_classes
class VerticaMultinodeError(dbapi2.DatabaseError, metaclass=_VCodeAgg):
code = 'V1'
class VerticaMiscellaneousError(dbapi2.DatabaseError, metaclass=_VCodeAgg):
code = 'V2'
class ConfigurationFileError(dbapi2.DatabaseError, metaclass=_VCodeAgg):
code = 'VC'
class DbDesignerError(dbapi2.DatabaseError, metaclass=_VCodeAgg):
code = 'VD'
class UserProcedureError(dbapi2.DatabaseError, metaclass=_VCodeAgg):
code = 'VP'
class InternalError(dbapi2.DatabaseError, metaclass=_VCodeAgg):
code = 'VX'
messages = (
('00000', 'ERRCODE_SUCCESSFUL_COMPLETION'),
('01000', 'ERRCODE_WARNING'),
('01003', 'ERRCODE_WARNING_NULL_VALUE_ELIMINATED_IN_SET_FUNCTION'),
('01004', 'ERRCODE_WARNING_STRING_DATA_RIGHT_TRUNCATION'),
('01006', 'ERRCODE_WARNING_PRIVILEGE_NOT_REVOKED'),
('01007', 'ERRCODE_WARNING_PRIVILEGE_NOT_GRANTED'),
('01008', 'ERRCODE_WARNING_PRIVILEGE_ALREADY_GRANTED'),
('01009', 'ERRCODE_WARNING_PRIVILEGE_ALREADY_REVOKED'),
('0100C', 'ERRCODE_WARNING_DYNAMIC_RESULT_SETS_RETURNED'),
('01V01', 'ERRCODE_WARNING_DEPRECATED_FEATURE'),
('01V02', 'ERRCODE_WARNING_QUERY_RETRIED'),
('02000', 'ERRCODE_NO_DATA'),
('02001', 'ERRCODE_NO_ADDITIONAL_DYNAMIC_RESULT_SETS_RETURNED'),
('03000', 'ERRCODE_SQL_STATEMENT_NOT_YET_COMPLETE'),
('08000', 'ERRCODE_CONNECTION_EXCEPTION'),
('08001', 'ERRCODE_SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION'),
('08003', 'ERRCODE_CONNECTION_DOES_NOT_EXIST'),
('08004', 'ERRCODE_SQLSERVER_REJECTED_ESTABLISHMENT_OF_SQLCONNECTION'),
('08006', 'ERRCODE_CONNECTION_FAILURE'),
('08007', 'ERRCODE_TRANSACTION_RESOLUTION_UNKNOWN'),
('08V01', 'ERRCODE_PROTOCOL_VIOLATION'),
('09000', 'ERRCODE_TRIGGERED_ACTION_EXCEPTION'),
('0A000', 'ERRCODE_FEATURE_NOT_SUPPORTED'),
('0A005', 'ERRCODE_PLAN_TO_SQL_NOT_SUPPORTED'),
('0B000', 'ERRCODE_INVALID_TRANSACTION_INITIATION'),
('0F000', 'ERRCODE_LOCATOR_EXCEPTION'),
('0F001', 'ERRCODE_L_E_INVALID_SPECIFICATION'),
('0L000', 'ERRCODE_INVALID_GRANTOR'),
('0LV01', 'ERRCODE_INVALID_GRANT_OPERATION'),
('0P000', 'ERRCODE_INVALID_ROLE_SPECIFICATION'),
('21000', 'ERRCODE_CARDINALITY_VIOLATION'),
('22000', 'ERRCODE_DATA_EXCEPTION'),
('22001', 'ERRCODE_STRING_DATA_RIGHT_TRUNCATION'),
('22002', 'ERRCODE_NULL_VALUE_NO_INDICATOR_PARAMETER'),
('22003', 'ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE'),
('22004', 'ERRCODE_NULL_VALUE_NOT_ALLOWED'),
('22005', 'ERRCODE_ERROR_IN_ASSIGNMENT'),
('22007', 'ERRCODE_INVALID_DATETIME_FORMAT'),
('22008', 'ERRCODE_DATETIME_FIELD_OVERFLOW'),
('22009', 'ERRCODE_INVALID_TIME_ZONE_DISPLACEMENT_VALUE'),
('2200B', 'ERRCODE_ESCAPE_CHARACTER_CONFLICT'),
('2200C', 'ERRCODE_INVALID_USE_OF_ESCAPE_CHARACTER'),
('2200D', 'ERRCODE_INVALID_ESCAPE_OCTET'),
('2200F', 'ERRCODE_ZERO_LENGTH_CHARACTER_STRING'),
('2200G', 'ERRCODE_MOST_SPECIFIC_TYPE_MISMATCH'),
('22010', 'ERRCODE_INVALID_INDICATOR_PARAMETER_VALUE'),
('22011', 'ERRCODE_SUBSTRING_ERROR'),
('22012', 'ERRCODE_DIVISION_BY_ZERO'),
('22015', 'ERRCODE_INTERVAL_FIELD_OVERFLOW'),
('22018', 'ERRCODE_INVALID_CHARACTER_VALUE_FOR_CAST'),
('22019', 'ERRCODE_INVALID_ESCAPE_CHARACTER'),
('2201B', 'ERRCODE_INVALID_REGULAR_EXPRESSION'),
('2201E', 'ERRCODE_INVALID_ARGUMENT_FOR_LOG'),
('2201F', 'ERRCODE_INVALID_ARGUMENT_FOR_POWER_FUNCTION'),
('2201G', 'ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION'),
('22020', 'ERRCODE_INVALID_LIMIT_VALUE'),
('22021', 'ERRCODE_CHARACTER_NOT_IN_REPERTOIRE'),
('22022', 'ERRCODE_INDICATOR_OVERFLOW'),
('22023', 'ERRCODE_INVALID_PARAMETER_VALUE'),
('22024', 'ERRCODE_UNTERMINATED_C_STRING'),
('22025', 'ERRCODE_INVALID_ESCAPE_SEQUENCE'),
('22026', 'ERRCODE_STRING_DATA_LENGTH_MISMATCH'),
('22027', 'ERRCODE_TRIM_ERROR'),
('2202E', 'ERRCODE_ARRAY_ELEMENT_ERROR'),
('22906', 'ERRCODE_NONSTANDARD_USE_OF_ESCAPE_CHARACTER'),
('22V01', 'ERRCODE_FLOATING_POINT_EXCEPTION'),
('22V02', 'ERRCODE_INVALID_TEXT_REPRESENTATION'),
('22V03', 'ERRCODE_INVALID_BINARY_REPRESENTATION'),
('22V04', 'ERRCODE_BAD_COPY_FILE_FORMAT'),
('22V05', 'ERRCODE_UNTRANSLATABLE_CHARACTER'),
('22V0B', 'ERRCODE_ESCAPE_CHARACTER_ON_NOESCAPE'),
('22V21', 'ERRCODE_INVALID_EPOCH'),
('22V22', 'ERRCODE_PLPGSQL_ERROR'),
('22V23', 'ERRCODE_RAISE_EXCEPTION'),
('22V24', 'ERRCODE_COPY_PARSE_ERROR'),
('23000', 'ERRCODE_INTEGRITY_CONSTRAINT_VIOLATION'),
('23001', 'ERRCODE_RESTRICT_VIOLATION'),
('23502', 'ERRCODE_NOT_NULL_VIOLATION'),
('23503', 'ERRCODE_FOREIGN_KEY_VIOLATION'),
('23505', 'ERRCODE_UNIQUE_VIOLATION'),
('23514', 'ERRCODE_CHECK_VIOLATION'),
('24000', 'ERRCODE_INVALID_CURSOR_STATE'),
('25000', 'ERRCODE_INVALID_TRANSACTION_STATE'),
('25001', 'ERRCODE_ACTIVE_SQL_TRANSACTION'),
('25002', 'ERRCODE_BRANCH_TRANSACTION_ALREADY_ACTIVE'),
('25003', 'ERRCODE_INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION'),
('25004', 'ERRCODE_INAPPROPRIATE_ISOLATION_LEVEL_FOR_BRANCH_TRANSACTION'),
('25005', 'ERRCODE_NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION'),
('25006', 'ERRCODE_READ_ONLY_SQL_TRANSACTION'),
('25007', 'ERRCODE_SCHEMA_AND_DATA_STATEMENT_MIXING_NOT_SUPPORTED'),
('25008', 'ERRCODE_HELD_CURSOR_REQUIRES_SAME_ISOLATION_LEVEL'),
('25V01', 'ERRCODE_NO_ACTIVE_SQL_TRANSACTION'),
('25V02', 'ERRCODE_IN_FAILED_SQL_TRANSACTION'),
('26000', 'ERRCODE_INVALID_SQL_STATEMENT_NAME'),
('27000', 'ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION'),
('28000', 'ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION'),
('28001', 'ERRCODE_ACCOUNT_LOCKED'),
('28002', 'ERRCODE_PASSWORD_EXPIRED'),
('28003', 'ERRCODE_PASSWORD_IN_GRACE_PERIOD'),
('2B000', 'ERRCODE_DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST'),
('2BV01', 'ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST'),
('2D000', 'ERRCODE_INVALID_TRANSACTION_TERMINATION'),
('2F000', 'ERRCODE_SQL_ROUTINE_EXCEPTION'),
('2F002', 'ERRCODE_S_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED'),
('2F003', 'ERRCODE_S_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED'),
('2F004', 'ERRCODE_S_R_E_READING_SQL_DATA_NOT_PERMITTED'),
('2F005', 'ERRCODE_S_R_E_FUNCTION_EXECUTED_NO_RETURN_STATEMENT'),
('34000', 'ERRCODE_INVALID_CURSOR_NAME'),
('38000', 'ERRCODE_EXTERNAL_ROUTINE_EXCEPTION'),
('38001', 'ERRCODE_E_R_E_CONTAINING_SQL_NOT_PERMITTED'),
('38002', 'ERRCODE_E_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED'),
('38003', 'ERRCODE_E_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED'),
('38004', 'ERRCODE_E_R_E_READING_SQL_DATA_NOT_PERMITTED'),
('39000', 'ERRCODE_EXTERNAL_ROUTINE_INVOCATION_EXCEPTION'),
('39001', 'ERRCODE_E_R_I_E_INVALID_SQLSTATE_RETURNED'),
('39004', 'ERRCODE_E_R_I_E_NULL_VALUE_NOT_ALLOWED'),
('39V01', 'ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED'),
('39V02', 'ERRCODE_E_R_I_E_SRF_PROTOCOL_VIOLATED'),
('3B000', 'ERRCODE_SAVEPOINT_EXCEPTION'),
('3B001', 'ERRCODE_S_E_INVALID_SPECIFICATION'),
('3D000', 'ERRCODE_INVALID_CATALOG_NAME'),
('3F000', 'ERRCODE_INVALID_SCHEMA_NAME'),
('40000', 'ERRCODE_TRANSACTION_ROLLBACK'),
('40001', 'ERRCODE_T_R_SERIALIZATION_FAILURE'),
('40002', 'ERRCODE_T_R_INTEGRITY_CONSTRAINT_VIOLATION'),
('40003', 'ERRCODE_T_R_STATEMENT_COMPLETION_UNKNOWN'),
('40V01', 'ERRCODE_T_R_DEADLOCK_DETECTED'),
('42000', 'ERRCODE_SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION'),
('42501', 'ERRCODE_INSUFFICIENT_PRIVILEGE'),
('42601', 'ERRCODE_SYNTAX_ERROR'),
('42602', 'ERRCODE_INVALID_NAME'),
('42611', 'ERRCODE_INVALID_COLUMN_DEFINITION'),
('42622', 'ERRCODE_NAME_TOO_LONG'),
('42701', 'ERRCODE_DUPLICATE_COLUMN'),
('42702', 'ERRCODE_AMBIGUOUS_COLUMN'),
('42703', 'ERRCODE_UNDEFINED_COLUMN'),
('42704', 'ERRCODE_UNDEFINED_OBJECT'),
('42710', 'ERRCODE_DUPLICATE_OBJECT'),
('42712', 'ERRCODE_DUPLICATE_ALIAS'),
('42723', 'ERRCODE_DUPLICATE_FUNCTION'),
('42725', 'ERRCODE_AMBIGUOUS_FUNCTION'),
('42803', 'ERRCODE_GROUPING_ERROR'),
('42804', 'ERRCODE_DATATYPE_MISMATCH'),
('42809', 'ERRCODE_WRONG_OBJECT_TYPE'),
('42830', 'ERRCODE_INVALID_FOREIGN_KEY'),
('42846', 'ERRCODE_CANNOT_COERCE'),
('42883', 'ERRCODE_UNDEFINED_FUNCTION'),
('42939', 'ERRCODE_RESERVED_NAME'),
('42P20', 'ERRCODE_WINDOWING_ERROR'),
('42V01', 'ERRCODE_UNDEFINED_TABLE'),
('42V02', 'ERRCODE_UNDEFINED_PARAMETER'),
('42V03', 'ERRCODE_DUPLICATE_CURSOR'),
('42V04', 'ERRCODE_DUPLICATE_DATABASE'),
('42V05', 'ERRCODE_DUPLICATE_PSTATEMENT'),
('42V06', 'ERRCODE_DUPLICATE_SCHEMA'),
('42V07', 'ERRCODE_DUPLICATE_TABLE'),
('42V08', 'ERRCODE_AMBIGUOUS_PARAMETER'),
('42V09', 'ERRCODE_AMBIGUOUS_ALIAS'),
('42V10', 'ERRCODE_INVALID_COLUMN_REFERENCE'),
('42V11', 'ERRCODE_INVALID_CURSOR_DEFINITION'),
('42V12', 'ERRCODE_INVALID_DATABASE_DEFINITION'),
('42V13', 'ERRCODE_INVALID_FUNCTION_DEFINITION'),
('42V14', 'ERRCODE_INVALID_PSTATEMENT_DEFINITION'),
('42V15', 'ERRCODE_INVALID_SCHEMA_DEFINITION'),
('42V16', 'ERRCODE_INVALID_TABLE_DEFINITION'),
('42V17', 'ERRCODE_INVALID_OBJECT_DEFINITION'),
('42V18', 'ERRCODE_INDETERMINATE_DATATYPE'),
('42V21', 'ERRCODE_UNDEFINED_PROJECTION'),
('42V22', 'ERRCODE_UNDEFINED_NODE'),
('42V23', 'ERRCODE_UNDEFINED_PERMUTATION'),
('42V24', 'ERRCODE_UNDEFINED_USER'),
('42V25', 'ERRCODE_PATTERN_MATCH_ERROR'),
('42V26', 'ERRCODE_DUPLICATE_NODE'),
('44000', 'ERRCODE_WITH_CHECK_OPTION_VIOLATION'),
('53000', 'ERRCODE_INSUFFICIENT_RESOURCES'),
('53100', 'ERRCODE_DISK_FULL'),
('53200', 'ERRCODE_OUT_OF_MEMORY'),
('53300', 'ERRCODE_TOO_MANY_CONNECTIONS'),
('54000', 'ERRCODE_PROGRAM_LIMIT_EXCEEDED'),
('54001', 'ERRCODE_STATEMENT_TOO_COMPLEX'),
('54011', 'ERRCODE_TOO_MANY_COLUMNS'),
('54023', 'ERRCODE_TOO_MANY_ARGUMENTS'),
('55000', 'ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE'),
('55006', 'ERRCODE_OBJECT_IN_USE'),
('55V02', 'ERRCODE_CANT_CHANGE_RUNTIME_PARAM'),
('55V03', 'ERRCODE_LOCK_NOT_AVAILABLE'),
('55V04', 'ERRCODE_TM_MARKER_NOT_AVAILABLE'),
('57000', 'ERRCODE_OPERATOR_INTERVENTION'),
('57014', 'ERRCODE_QUERY_CANCELED'),
('57015', 'ERRCODE_SLOW_DELETE'),
('57V01', 'ERRCODE_ADMIN_SHUTDOWN'),
('57V02', 'ERRCODE_CRASH_SHUTDOWN'),
('57V03', 'ERRCODE_CANNOT_CONNECT_NOW'),
('58030', 'ERRCODE_IO_ERROR'),
('58V01', 'ERRCODE_UNDEFINED_FILE'),
('58V02', 'ERRCODE_DUPLICATE_FILE'),
('V1001', 'ERRCODE_LOST_CONNECTIVITY'),
('V1002', 'ERRCODE_K_SAFETY_VIOLATION'),
('V1003', 'ERRCODE_CLUSTER_CHANGE'),
('V2000', 'ERRCODE_AUTH_FAILED'),
('V2001', 'ERRCODE_LICENSE_ISSUE'),
('V2002', 'ERRCODE_MOVEOUT_ABORTED'),
('VC001', 'ERRCODE_CONFIG_FILE_ERROR'),
('VC002', 'ERRCODE_LOCK_FILE_EXISTS'),
('VD001', 'ERRCODE_DESIGNER_FUNCTION_ERROR'),
('VP000', 'ERRCODE_USER_PROC_ERROR'),
('VP001', 'ERRCODE_USER_PROC_EXEC_ERROR'),
('VX001', 'ERRCODE_INTERNAL_ERROR'),
('VX002', 'ERRCODE_DATA_CORRUPTED'),
('VX003', 'ERRCODE_INDEX_CORRUPTED'),
('VX004', 'ERRCODE_PLAN_TO_SQL_INTERNAL_EROR'),
) | massmutual/py4jdbc | py4jdbc/exceptions/vertica.py | Python | bsd-3-clause | 11,366 |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2017, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import json
import os
import pkg_resources
import shutil
from urllib.parse import quote
import scipy
import numpy as np
import pandas as pd
import qiime2
from statsmodels.sandbox.stats.multicomp import multipletests
import q2templates
TEMPLATES = pkg_resources.resource_filename('q2_diversity', '_alpha')
def alpha_group_significance(output_dir: str, alpha_diversity: pd.Series,
metadata: qiime2.Metadata) -> None:
metadata_df = metadata.to_dataframe()
metadata_df = metadata_df.apply(pd.to_numeric, errors='ignore')
pre_filtered_cols = set(metadata_df.columns)
metadata_df = metadata_df.select_dtypes(exclude=[np.number])
post_filtered_cols = set(metadata_df.columns)
filtered_numeric_categories = pre_filtered_cols - post_filtered_cols
filtered_group_comparisons = []
categories = metadata_df.columns
metric_name = alpha_diversity.name
if len(categories) == 0:
raise ValueError('Only numeric data is present in metadata file.')
filenames = []
filtered_categories = []
for category in categories:
metadata_category = metadata.get_category(category).to_series()
metadata_category = metadata_category.loc[alpha_diversity.index]
metadata_category = metadata_category.replace(r'', np.nan).dropna()
initial_data_length = alpha_diversity.shape[0]
data = pd.concat([alpha_diversity, metadata_category], axis=1,
join='inner')
filtered_data_length = data.shape[0]
names = []
groups = []
for name, group in data.groupby(metadata_category.name):
names.append('%s (n=%d)' % (name, len(group)))
groups.append(list(group[alpha_diversity.name]))
if (len(groups) > 1 and len(groups) != len(data.index)):
escaped_category = quote(category)
filename = 'category-%s.jsonp' % escaped_category
filenames.append(filename)
# perform Kruskal-Wallis across all groups
kw_H_all, kw_p_all = scipy.stats.mstats.kruskalwallis(*groups)
# perform pairwise Kruskal-Wallis across all pairs of groups and
# correct for multiple comparisons
kw_H_pairwise = []
for i in range(len(names)):
for j in range(i):
try:
H, p = scipy.stats.mstats.kruskalwallis(groups[i],
groups[j])
kw_H_pairwise.append([names[j], names[i], H, p])
except ValueError:
filtered_group_comparisons.append(
['%s:%s' % (category, names[i]),
'%s:%s' % (category, names[j])])
kw_H_pairwise = pd.DataFrame(
kw_H_pairwise, columns=['Group 1', 'Group 2', 'H', 'p-value'])
kw_H_pairwise.set_index(['Group 1', 'Group 2'], inplace=True)
kw_H_pairwise['q-value'] = multipletests(
kw_H_pairwise['p-value'], method='fdr_bh')[1]
kw_H_pairwise.sort_index(inplace=True)
pairwise_fn = 'kruskal-wallis-pairwise-%s.csv' % escaped_category
pairwise_path = os.path.join(output_dir, pairwise_fn)
kw_H_pairwise.to_csv(pairwise_path)
with open(os.path.join(output_dir, filename), 'w') as fh:
df = pd.Series(groups, index=names)
fh.write("load_data('%s'," % category)
df.to_json(fh, orient='split')
fh.write(",")
json.dump({'initial': initial_data_length,
'filtered': filtered_data_length}, fh)
fh.write(",")
json.dump({'H': kw_H_all, 'p': kw_p_all}, fh)
fh.write(",'")
table = kw_H_pairwise.to_html(classes="table table-striped "
"table-hover")
table = table.replace('border="1"', 'border="0"')
fh.write(table.replace('\n', ''))
fh.write("','%s', '%s');" % (quote(pairwise_fn), metric_name))
else:
filtered_categories.append(category)
index = os.path.join(
TEMPLATES, 'alpha_group_significance_assets', 'index.html')
q2templates.render(index, output_dir, context={
'categories': [quote(fn) for fn in filenames],
'filtered_numeric_categories': ', '.join(filtered_numeric_categories),
'filtered_categories': ', '.join(filtered_categories),
'filtered_group_comparisons':
'; '.join([' vs '.join(e) for e in filtered_group_comparisons])})
shutil.copytree(
os.path.join(TEMPLATES, 'alpha_group_significance_assets', 'dst'),
os.path.join(output_dir, 'dist'))
_alpha_correlation_fns = {'spearman': scipy.stats.spearmanr,
'pearson': scipy.stats.pearsonr}
def alpha_correlation(output_dir: str,
alpha_diversity: pd.Series,
metadata: qiime2.Metadata,
method: str='spearman') -> None:
try:
alpha_correlation_fn = _alpha_correlation_fns[method]
except KeyError:
raise ValueError('Unknown alpha correlation method %s. The available '
'options are %s.' %
(method, ', '.join(_alpha_correlation_fns.keys())))
metadata_df = metadata.to_dataframe()
metadata_df = metadata_df.apply(pd.to_numeric, errors='ignore')
pre_filtered_cols = set(metadata_df.columns)
metadata_df = metadata_df.select_dtypes(include=[np.number])
post_filtered_cols = set(metadata_df.columns)
filtered_categories = pre_filtered_cols - post_filtered_cols
categories = metadata_df.columns
if len(categories) == 0:
raise ValueError('Only non-numeric data is present in metadata file.')
filenames = []
for category in categories:
metadata_category = metadata_df[category]
metadata_category = metadata_category.loc[alpha_diversity.index]
metadata_category = metadata_category.dropna()
# create a dataframe containing the data to be correlated, and drop
# any samples that have no data in either column
df = pd.concat([metadata_category, alpha_diversity], axis=1,
join='inner')
# compute correlation
correlation_result = alpha_correlation_fn(df[metadata_category.name],
df[alpha_diversity.name])
warning = None
if alpha_diversity.shape[0] != df.shape[0]:
warning = {'initial': alpha_diversity.shape[0],
'method': method.title(),
'filtered': df.shape[0]}
escaped_category = quote(category)
filename = 'category-%s.jsonp' % escaped_category
filenames.append(filename)
with open(os.path.join(output_dir, filename), 'w') as fh:
fh.write("load_data('%s'," % category)
df.to_json(fh, orient='split')
fh.write(",")
json.dump(warning, fh)
fh.write(",")
json.dump({
'method': method.title(),
'testStat': '%1.4f' % correlation_result[0],
'pVal': '%1.4f' % correlation_result[1],
'sampleSize': df.shape[0]}, fh)
fh.write(");")
index = os.path.join(TEMPLATES, 'alpha_correlation_assets', 'index.html')
q2templates.render(index, output_dir, context={
'categories': [quote(fn) for fn in filenames],
'filtered_categories': ', '.join(filtered_categories)})
shutil.copytree(os.path.join(TEMPLATES, 'alpha_correlation_assets', 'dst'),
os.path.join(output_dir, 'dist'))
| maxvonhippel/q2-diversity | q2_diversity/_alpha/_visualizer.py | Python | bsd-3-clause | 8,213 |
# coding=utf-8
# Django settings for bluebottle project.
import os, datetime
# Import global settings for overriding without throwing away defaults
from django.conf import global_settings
from django.utils.translation import ugettext as _
from admin_dashboard import *
from .payments import *
# Set PROJECT_ROOT to the dir of the current file
# Find the project's containing directory and normalize it to refer to
# the project's root more easily
PROJECT_ROOT = os.path.dirname(os.path.normpath(os.path.join(__file__, '..', '..')))
# DJANGO_PROJECT: the short project name
# (defaults to the basename of PROJECT_ROOT)
DJANGO_PROJECT = os.path.basename(PROJECT_ROOT.rstrip('/'))
DEBUG = True
TEST_MEMCACHE = False
TEMPLATE_DEBUG = True
COMPRESS_TEMPLATES = False
ADMINS = (
('Team Error', 'errors@onepercentclub.com'),
)
CONTACT_EMAIL = 'info@onepercentclub.com'
MANAGERS = ADMINS
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['.onepercentclub.com', '.1procentclub.nl', 'localhost']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Amsterdam'
# Available user interface translations
# Ref: https://docs.djangoproject.com/en/1.4/ref/settings/#languages
#
# Default language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en'
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the settings.
gettext_noop = lambda s: s
LANGUAGES = (
('nl', gettext_noop('Dutch')),
('en', gettext_noop('English'))
)
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# First one is for apps the second for the main templates
LOCALE_PATHS = ('../locale', 'locale')
# If you set this to False, Django will not use timezone-aware datetimes.
# pytz is in requirements.txt because it's "highly recommended" when using
# timezone support.
# https://docs.djangoproject.com/en/1.4/topics/i18n/timezones/
USE_TZ = True
# Static Files and Media
# ======================
#
# For staticfiles and media, the following convention is used:
#
# * '/static/media/': Application media default path
# * '/static/global/': Global static media
# * '/static/assets/<app_name>/': Static assets after running `collectstatic`
#
# The respective URL's (available only when `DEBUG=True`) are in `urls.py`.
#
# More information:
# https://docs.djangoproject.com/en/1.4/ref/contrib/staticfiles/
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'static', 'media')
# Absolute filesystem path to the directory that will hold PRIVATE user-uploaded files.
PRIVATE_MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'private', 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/static/media/'
PRIVATE_MEDIA_URL = '/private/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static', 'assets')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/assets/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
# You can also name this tuple like: ('css', '/path/to/css')
(os.path.join(PROJECT_ROOT, 'static', 'global')),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
]
TEMPLATE_LOADERS = [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'apptemplates.Loader', # extend AND override templates
]
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
# These are basically the default values from the Django configuration, written
# as a list for easy manipulation. This way one can:
#
# 1. Easily add, remove or replace elements in the list, ie. overriding.
# 2. Know what the defaults are, if you want to change them right here. This
# way you won't have to look them up every time you want to change.
#
# Note: The first three middleware classes need to be in this order: Session, Locale, Common
# http://stackoverflow.com/questions/8092695/404-on-requests-without-trailing-slash-to-i18n-urls
MIDDLEWARE_CLASSES = [
'bluebottle.auth.middleware.UserJwtTokenMiddleware',
'apps.redirects.middleware.RedirectHashCompatMiddleware',
'bluebottle.auth.middleware.AdminOnlyCsrf',
# Have a middleware to make sure old cookies still work after we switch to domain-wide cookies.
'bluebottle.utils.middleware.SubDomainSessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'bluebottle.auth.middleware.AdminOnlySessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'bluebottle.auth.middleware.AdminOnlyAuthenticationMiddleware',
'bluebottle.bb_accounts.middleware.LocaleMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# https://docs.djangoproject.com/en/1.4/ref/clickjacking/
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.transaction.TransactionMiddleware',
'apps.redirects.middleware.RedirectFallbackMiddleware',
'apps.crawlable.middleware.HashbangMiddleware',
'django_tools.middlewares.ThreadLocal.ThreadLocalMiddleware',
'bluebottle.auth.middleware.SlidingJwtTokenMiddleware'
]
# Browsers will block our pages from loading in an iframe no matter which site
# made the request. This setting can be overridden on a per response or a per
# view basis with the @xframe decorators.
X_FRAME_OPTIONS = 'DENY'
TEMPLATE_CONTEXT_PROCESSORS = global_settings.TEMPLATE_CONTEXT_PROCESSORS + (
# Makes the 'request' variable (the current HttpRequest) available in templates.
'django.core.context_processors.request',
'django.core.context_processors.i18n',
'bluebottle.utils.context_processors.installed_apps_context_processor',
'bluebottle.utils.context_processors.git_commit',
'bluebottle.utils.context_processors.conf_settings',
'bluebottle.utils.context_processors.google_maps_api_key',
'bluebottle.utils.context_processors.google_analytics_code',
'bluebottle.utils.context_processors.sentry_dsn',
'bluebottle.utils.context_processors.facebook_auth_settings',
'bluebottle.utils.context_processors.mixpanel_settings',
'social.apps.django_app.context_processors.backends',
'social.apps.django_app.context_processors.login_redirect',
)
ROOT_URLCONF = 'onepercentclub.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'onepercentclub.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_ROOT, 'templates')
)
INSTALLED_APPS = (
# Django apps
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# 3rd party apps
'django_extensions',
'django_extensions.tests',
'raven.contrib.django.raven_compat',
'djcelery',
'south',
# 'django_nose',
'compressor',
'sorl.thumbnail',
'taggit',
'taggit_autocomplete_modified',
'micawber.contrib.mcdjango', # Embedding videos
'templatetag_handlebars',
'rest_framework',
'rest_framework.authtoken',
'polymorphic',
'registration',
'filetransfers',
'loginas',
#'social_auth',
'social.apps.django_app.default',
# Onepercent app to send POST requests to AFOM
'onepercent_afom',
#Widget
'bluebottle.widget',
# CMS page contents
'fluent_contents',
'fluent_contents.plugins.text',
'fluent_contents.plugins.oembeditem',
'fluent_contents.plugins.rawhtml',
'django_wysiwyg',
'tinymce',
'statici18n',
'django.contrib.humanize',
'django_tools',
# FB Auth
'bluebottle.auth',
# Password auth from old PHP site.
'legacyauth',
# Plain Bluebottle apps
'bluebottle.wallposts',
'bluebottle.utils',
'bluebottle.common',
'bluebottle.contentplugins',
'bluebottle.contact',
'bluebottle.geo',
'bluebottle.pages',
'bluebottle.news',
'bluebottle.slides',
'bluebottle.quotes',
'bluebottle.payments',
'bluebottle.payments_docdata',
'bluebottle.payments_logger',
'bluebottle.payments_voucher',
'bluebottle.redirects',
# Apps extending Bluebottle base models
# These should be before there Bb parents so the templates are overridden
'apps.members',
'apps.tasks',
'apps.projects',
'apps.organizations',
'apps.payouts',
# apps overriding bluebottle functionality should come before the bluebottle entries
# (template loaders pick the first template they find)
'apps.core',
'apps.bluebottle_salesforce',
'apps.bluebottle_dashboard',
'apps.contentplugins',
'apps.campaigns',
'apps.hbtemplates',
'apps.statistics',
'apps.homepage',
'apps.partners',
'apps.crawlable',
'apps.mchanga',
'apps.recurring_donations',
# Bluebottle apps with abstract models
'bluebottle.bb_accounts',
'bluebottle.bb_organizations',
'bluebottle.bb_projects',
'bluebottle.bb_tasks',
'bluebottle.bb_fundraisers',
'bluebottle.bb_donations',
'bluebottle.bb_orders',
'bluebottle.bb_payouts',
# Basic Bb implementations
'bluebottle.fundraisers',
'bluebottle.donations',
'bluebottle.orders',
# FIXME: Keep these just for migrations
'apps.fund',
'apps.cowry',
'apps.cowry_docdata',
# FIXME: Reimplement these apps
'apps.vouchers',
# 'apps.sepa',
# 'apps.csvimport',
# 'apps.accounting',
# Custom dashboard
'fluent_dashboard',
'admin_tools',
'admin_tools.theming',
'admin_tools.menu',
'admin_tools.dashboard',
'django.contrib.admin',
'django.contrib.admindocs',
)
# Custom User model
AUTH_USER_MODEL = 'members.Member'
PROJECTS_PROJECT_MODEL = 'projects.Project'
PROJECTS_PHASELOG_MODEL = 'projects.ProjectPhaseLog'
FUNDRAISERS_FUNDRAISER_MODEL = 'fundraisers.FundRaiser'
TASKS_TASK_MODEL = 'tasks.Task'
TASKS_SKILL_MODEL = 'tasks.Skill'
TASKS_TASKMEMBER_MODEL = 'tasks.TaskMember'
TASKS_TASKFILE_MODEL = 'tasks.TaskFile'
ORGANIZATIONS_ORGANIZATION_MODEL = 'organizations.Organization'
ORGANIZATIONS_DOCUMENT_MODEL = 'organizations.OrganizationDocument'
ORGANIZATIONS_MEMBER_MODEL = 'organizations.OrganizationMember'
ORDERS_ORDER_MODEL = 'orders.Order'
DONATIONS_DONATION_MODEL = 'donations.Donation'
PAYOUTS_PROJECTPAYOUT_MODEL = 'payouts.ProjectPayout'
PAYOUTS_ORGANIZATIONPAYOUT_MODEL = 'payouts.OrganizationPayout'
SOCIAL_AUTH_USER_MODEL = 'members.Member'
SOCIAL_AUTH_FACEBOOK_SCOPE = ['email', 'user_friends', 'public_profile', 'user_birthday']
SOCIAL_AUTH_FACEBOOK_EXTRA_DATA = [('birthday', 'birthday')]
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt': "%d/%b/%Y %H:%M:%S"
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'payment_logs': {
'level': 'INFO',
'class': 'bluebottle.payments_logger.handlers.PaymentLogHandler',
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'bluebottle.salesforce': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'payments.payment': {
'handlers': ['mail_admins', 'payment_logs'],
'level': 'INFO',
'propagate': True,
},
}
}
# log errors & warnings
import logging
logging.basicConfig(level=logging.WARNING, format='[%(asctime)s] %(levelname)-8s %(message)s', datefmt="%d/%b/%Y %H:%M:%S")
# Django Celery - asynchronous task server
import djcelery
djcelery.setup_loader()
SOCIAL_AUTH_STRATEGY = 'social.strategies.django_strategy.DjangoStrategy'
SOCIAL_AUTH_STORAGE = 'social.apps.django_app.default.models.DjangoStorage'
AUTHENTICATION_BACKENDS = (
'social.backends.facebook.FacebookAppOAuth2',
'social.backends.facebook.FacebookOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
# We're using nose because it limits the tests to our apps (i.e. no Django and
# 3rd party app tests). We need this because tests in contrib.auth.user are
# failing in Django 1.4.1. Here's the ticket for the failing test:
# https://code.djangoproject.com/ticket/17966
# The new test runner in Django 1.5 will be more flexible:
#https://code.djangoproject.com/ticket/17365
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_ARGS = [
'--detailed-errors',
'--nologcapture',
]
SKIP_BB_FUNCTIONAL_TESTS = True
SOUTH_TESTS_MIGRATE = False # Make south shut up during tests
# django-compressor http://pypi.python.org/pypi/django_compressor
# Compressor is enabled whenever DEBUG is False.
STATICFILES_FINDERS += [
# django-compressor staticfiles
'compressor.finders.CompressorFinder',
]
# TODO Enable compass here.
COMPRESS_OUTPUT_DIR = 'compressed'
COMPRESS_CSS_FILTERS = [
'compressor.filters.css_default.CssAbsoluteFilter',
#'compressor.filters.datauri.DataUriFilter',
'compressor.filters.cssmin.CSSMinFilter'
]
# Automagic CSS precompilation
#COMPRESS_PRECOMPILERS = (
# ('text/coffeescript', 'coffee --compile --stdio'),
# ('text/less', 'lessc {infile} {outfile}'),
# ('text/x-sass', 'sass {infile} {outfile}'),
# ('text/x-scss', 'sass --scss {infile} {outfile}'),
#)
# The default URL to send users to after login. This will be used when the
# 'next' URL parameter hasn't been set.
LOGIN_REDIRECT_URL = '/'
# Blog/news content configuration
FLUENT_CONTENTS_CACHE_OUTPUT = True
FLUENT_TEXT_CLEAN_HTML = True
FLUENT_TEXT_SANITIZE_HTML = True
DJANGO_WYSIWYG_FLAVOR = 'tinymce_advanced'
# Required for handlebars_template to work properly
USE_EMBER_STYLE_ATTRS = True
# Sorl Thumbnail settings
# http://sorl-thumbnail.readthedocs.org/en/latest/reference/settings.html
THUMBNAIL_QUALITY = 85
# TODO: Configure Sorl with Redis.
REST_FRAMEWORK = {
'FILTER_BACKEND': 'rest_framework.filters.DjangoFilterBackend',
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
)
}
JWT_AUTH = {
'JWT_EXPIRATION_DELTA': datetime.timedelta(days=7),
'JWT_LEEWAY': 0,
'JWT_VERIFY': True,
'JWT_VERIFY_EXPIRATION': True,
'JWT_ALLOW_TOKEN_RENEWAL': True,
# After the renewal limit it isn't possible to request a token refresh
# => time token first created + renewal limit.
'JWT_TOKEN_RENEWAL_LIMIT': datetime.timedelta(days=90),
}
# Time between attempts to refresh the jwt token automatically on standard request
# TODO: move this setting into the JWT_AUTH settings.
JWT_TOKEN_RENEWAL_DELTA = datetime.timedelta(minutes=30)
COWRY_RETURN_URL_BASE = 'http://127.0.0.1:8000'
COWRY_PAYMENT_METHODS = {
'dd-webmenu': {
'profile': 'webmenu',
'name': 'DocData Web Menu',
'supports_recurring': False,
'supports_single': True,
},
'dd-webdirect': {
'profile': 'webdirect',
'name': 'DocData WebDirect Direct Debit',
'restricted_countries': ('NL',),
'supports_recurring': True,
'supports_single': False,
},
}
# Default VAT percentage as string (used in payouts)
VAT_RATE = '0.21'
# Settings for organization bank account. Please set this in secrets.py
# SEPA = {
# 'iban': '',
# 'bic': '',
# 'name': '',
# 'id': ''
# }
# Salesforce app settings
SALESFORCE_QUERY_TIMEOUT = 3
DATABASE_ROUTERS = [
"salesforce.router.ModelRouter"
]
# E-mail settings
DEFAULT_FROM_EMAIL = '<website@onepercentclub.com> 1%Club'
# Django-registration settings
ACCOUNT_ACTIVATION_DAYS = 4
HTML_ACTIVATION_EMAIL = True # Note this setting is from our forked version.
# Functional testing
# Selenium and Splinter settings
SELENIUM_TESTS = True
SELENIUM_WEBDRIVER = 'phantomjs' # Can be any of chrome, firefox, phantomjs
FIXTURE_DIRS = [
os.path.join(DJANGO_PROJECT, 'fixtures')
]
# PhantomJS for flat page generation.
# NOTE: This has nothing to do with testing against phantomjs.
CRAWLABLE_PHANTOMJS_DEDICATED_MODE = True
# If dedicated mode is enabled, configure the port:
CRAWLABLE_PHANTOMJS_DEDICATED_PORT = 8910
# If dedicated mode is disabled, you can specify arguments to start phantomjs.
CRAWLABLE_PHANTOMJS_ARGS = []
# Use HTTPS for PhantomJS requests.
CRAWLABLE_FORCE_HTTPS = True
# Send email to console by default
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
STATICI18N_ROOT = os.path.join(PROJECT_ROOT, 'static', 'global')
SESSION_COOKIE_NAME = 'bb-session-id'
# Support legacy passwords
PASSWORD_HASHERS = global_settings.PASSWORD_HASHERS + (
'legacyauth.hashers.LegacyPasswordHasher',
)
# Twitter handles, per language
TWITTER_HANDLES = {
'nl': '1procentclub',
'en': '1percentclub',
}
DEFAULT_TWITTER_HANDLE = TWITTER_HANDLES['nl']
MINIMAL_PAYOUT_AMOUNT = 21.00
SOCIAL_AUTH_PIPELINE = (
'social.pipeline.social_auth.social_details',
'social.pipeline.social_auth.social_uid',
'social.pipeline.social_auth.auth_allowed',
'social.pipeline.social_auth.social_user',
'social.pipeline.user.get_username',
'social.pipeline.social_auth.associate_by_email',
'social.pipeline.user.create_user',
'social.pipeline.social_auth.associate_user',
'social.pipeline.social_auth.load_extra_data',
'social.pipeline.user.user_details',
'bluebottle.auth.utils.save_profile_picture',
'bluebottle.auth.utils.get_extra_facebook_data',
'bluebottle.auth.utils.send_welcome_mail_pipe'
)
AFOM_ENABLED = False
SOCIAL_AUTH_PROTECTED_USER_FIELDS = ['email', 'first_name', 'last_name', ]
SOCIAL_AUTH_USERNAME_IS_FULL_EMAIL = True
SEND_WELCOME_MAIL = True
| onepercentclub/onepercentclub-site | onepercentclub/settings/base.py | Python | bsd-3-clause | 20,342 |
##########################################################################
#
# Copyright (c) 2017, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
from IECore import *
class MeshAlgoTangentTest( unittest.TestCase ) :
def makeSingleTriangleMesh( self ):
verticesPerFace = IntVectorData( [ 3 ] )
vertexIds = IntVectorData( [ 0, 1, 2 ] )
p = V3fVectorData( [ V3f( 0, 0, 0 ), V3f( 1, 0, 0 ), V3f( 0, 1, 0 ) ] )
s = FloatVectorData( [ 0, 1, 0 ] )
t = FloatVectorData( [ 0, 0, 1 ] )
mesh = MeshPrimitive( verticesPerFace, vertexIds, "linear", p )
mesh["s"] = PrimitiveVariable( PrimitiveVariable.Interpolation.FaceVarying, s )
mesh["t"] = PrimitiveVariable( PrimitiveVariable.Interpolation.FaceVarying, t )
mesh["foo_s"] = PrimitiveVariable( PrimitiveVariable.Interpolation.FaceVarying, FloatVectorData( [0, 0, 1] ) )
mesh["foo_t"] = PrimitiveVariable( PrimitiveVariable.Interpolation.FaceVarying, FloatVectorData( [0, 1, 0] ) )
prefData = V3fVectorData( [V3f( 0, 0, 0 ), V3f( 0, -1, 0 ), V3f( 1, 0, 0 )] )
mesh["Pref"] = PrimitiveVariable( PrimitiveVariable.Interpolation.Vertex, prefData )
return mesh
def makeSingleBadUVTriangleMesh( self ) :
verticesPerFace = IntVectorData( [3] )
vertexIds = IntVectorData( [0, 1, 2] )
p = V3fVectorData( [V3f( 0, 0, 0 ), V3f( 1, 0, 0 ), V3f( 0, 1, 0 )] )
s = FloatVectorData( [0] )
t = FloatVectorData( [0] )
mesh = MeshPrimitive( verticesPerFace, vertexIds, "linear", p )
mesh["s"] = PrimitiveVariable( PrimitiveVariable.Interpolation.Uniform, s )
mesh["t"] = PrimitiveVariable( PrimitiveVariable.Interpolation.Uniform, t )
return mesh
def testSingleTriangleGeneratesCorrectTangents( self ) :
triangle = self.makeSingleTriangleMesh()
tangentPrimVar, bitangentPrimVar = MeshAlgo.calculateTangents( triangle )
self.assertEqual(tangentPrimVar.interpolation, PrimitiveVariable.Interpolation.FaceVarying)
self.assertEqual(bitangentPrimVar.interpolation, PrimitiveVariable.Interpolation.FaceVarying)
tangents = V3fVectorData( tangentPrimVar.data )
bitangent = V3fVectorData( bitangentPrimVar.data )
self.assertEqual( len( tangents ), 3 )
self.assertEqual( len( bitangent ), 3 )
for t in tangents :
self.assertAlmostEqual( t[0], 1.0 )
self.assertAlmostEqual( t[1], 0.0 )
self.assertAlmostEqual( t[2], 0.0 )
for b in bitangent :
self.assertAlmostEqual( b[0], 0.0 )
self.assertAlmostEqual( b[1], 1.0 )
self.assertAlmostEqual( b[2], 0.0 )
def testJoinedUVEdges( self ) :
mesh = ObjectReader( "test/IECore/data/cobFiles/twoTrianglesWithSharedUVs.cob" ).read()
self.assert_( mesh.arePrimitiveVariablesValid() )
tangentPrimVar, bitangentPrimVar = MeshAlgo.calculateTangents( mesh )
self.assertEqual( tangentPrimVar.interpolation, PrimitiveVariable.Interpolation.FaceVarying )
self.assertEqual( bitangentPrimVar.interpolation, PrimitiveVariable.Interpolation.FaceVarying )
for v in tangentPrimVar.data :
self.failUnless( v.equalWithAbsError( V3f( 1, 0, 0 ), 0.000001 ) )
for v in bitangentPrimVar.data :
self.failUnless( v.equalWithAbsError( V3f( 0, 0, -1 ), 0.000001 ) )
def testSplitAndOpposedUVEdges( self ) :
mesh = ObjectReader( "test/IECore/data/cobFiles/twoTrianglesWithSplitAndOpposedUVs.cob" ).read()
tangentPrimVar, bitangentPrimVar = MeshAlgo.calculateTangents( mesh )
self.assertEqual( tangentPrimVar.interpolation, PrimitiveVariable.Interpolation.FaceVarying )
self.assertEqual( bitangentPrimVar.interpolation, PrimitiveVariable.Interpolation.FaceVarying )
for v in tangentPrimVar.data[:3] :
self.failUnless( v.equalWithAbsError( V3f( -1, 0, 0 ), 0.000001 ) )
for v in tangentPrimVar.data[3:] :
self.failUnless( v.equalWithAbsError( V3f( 1, 0, 0 ), 0.000001 ) )
for v in bitangentPrimVar.data[:3] :
self.failUnless( v.equalWithAbsError( V3f( 0, 0, 1 ), 0.000001 ) )
for v in bitangentPrimVar.data[3:] :
self.failUnless( v.equalWithAbsError( V3f( 0, 0, -1 ), 0.000001 ) )
def testNonTriangulatedMeshRaisesException( self ):
plane = MeshPrimitive.createPlane( Box2f( V2f( -0.1 ), V2f( 0.1 ) ) )
self.assertRaises( RuntimeError, lambda : MeshAlgo.calculateTangents( plane ) )
def testInvalidPositionPrimVarRaisesException( self ) :
triangle = self.makeSingleTriangleMesh()
self.assertRaises( RuntimeError, lambda : MeshAlgo.calculateTangents( triangle, position = "foo" ) )
def testMissingUVsetPrimVarsRaisesException ( self ):
triangle = self.makeSingleTriangleMesh()
self.assertRaises( RuntimeError, lambda : MeshAlgo.calculateTangents( triangle, uvSet = "bar") )
def testIncorrectUVPrimVarInterpolationRaisesException ( self ):
triangle = self.makeSingleBadUVTriangleMesh()
self.assertRaises( RuntimeError, lambda : MeshAlgo.calculateTangents( triangle ) )
def testCanUseSecondUVSet( self ) :
triangle = self.makeSingleTriangleMesh()
uTangent, vTangent = MeshAlgo.calculateTangents( triangle , uvSet = "foo" )
self.assertEqual( len( uTangent.data ), 3 )
self.assertEqual( len( vTangent.data ), 3 )
for v in uTangent.data :
self.failUnless( v.equalWithAbsError( V3f( 0, 1, 0 ), 0.000001 ) )
# really I'd expect the naive answer to the vTangent to be V3f( 1, 0, 0 )
# but the code forces the triple of n, uT, vT to flip the direction of vT if we don't have a correctly handed set of basis vectors
for v in vTangent.data :
self.failUnless( v.equalWithAbsError( V3f( -1, 0, 0 ), 0.000001 ) )
def testCanUsePref( self ) :
triangle = self.makeSingleTriangleMesh()
uTangent, vTangent = MeshAlgo.calculateTangents( triangle , position = "Pref")
self.assertEqual( len( uTangent.data ), 3 )
self.assertEqual( len( vTangent.data ), 3 )
for v in uTangent.data :
self.failUnless( v.equalWithAbsError( V3f( 0, -1, 0 ), 0.000001 ) )
for v in vTangent.data :
self.failUnless( v.equalWithAbsError( V3f( 1, 0, 0 ), 0.000001 ) )
class MeshAlgoPrimitiveVariableTest( unittest.TestCase ) :
@classmethod
def makeMesh( cls ) :
testObject = MeshPrimitive.createPlane( Box2f( V2f( 0 ), V2f( 10 ) ), V2i( 2 ) )
testObject["a"] = PrimitiveVariable( PrimitiveVariable.Interpolation.Constant, FloatData( 0.5 ) )
testObject["b"] = PrimitiveVariable( PrimitiveVariable.Interpolation.Vertex, FloatVectorData( range( 0, 9 ) ) )
testObject["c"] = PrimitiveVariable( PrimitiveVariable.Interpolation.Uniform, FloatVectorData( range( 0, 4 ) ) )
testObject["d"] = PrimitiveVariable( PrimitiveVariable.Interpolation.Varying, FloatVectorData( range( 0, 9 ) ) )
testObject["e"] = PrimitiveVariable( PrimitiveVariable.Interpolation.FaceVarying, FloatVectorData( range( 0, 16 ) ) )
return testObject
@classmethod
def setUpClass(cls):
cls.mesh = cls.makeMesh()
def testMeshConstantToVertex( self ) :
p = self.mesh["a"]
MeshAlgo.resamplePrimitiveVariable( self.mesh, p, PrimitiveVariable.Interpolation.Vertex );
self.assertEqual( p.interpolation, PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( p.data, FloatVectorData( [ 0.5 ] * 9 ) )
def testMeshConstantToUniform( self ) :
p = self.mesh["a"]
MeshAlgo.resamplePrimitiveVariable( self.mesh, p, PrimitiveVariable.Interpolation.Uniform )
self.assertEqual( p.interpolation, PrimitiveVariable.Interpolation.Uniform )
self.assertEqual( p.data, FloatVectorData( [ 0.5 ] * 4 ) )
def testMeshConstantToVarying( self ) :
p = self.mesh["a"]
MeshAlgo.resamplePrimitiveVariable( self.mesh, p, PrimitiveVariable.Interpolation.Varying )
self.assertEqual( p.interpolation, PrimitiveVariable.Interpolation.Varying )
self.assertEqual( p.data, FloatVectorData( [ 0.5 ] * 9 ) )
def testMeshConstantToFaceVarying( self ) :
p = self.mesh["a"]
MeshAlgo.resamplePrimitiveVariable( self.mesh, p, PrimitiveVariable.Interpolation.FaceVarying )
self.assertEqual( p.interpolation, PrimitiveVariable.Interpolation.FaceVarying )
self.assertEqual( p.data, FloatVectorData( [ 0.5 ] * 16 ) )
def testMeshVertexToConstant( self ) :
p = self.mesh["b"]
MeshAlgo.resamplePrimitiveVariable( self.mesh, p, PrimitiveVariable.Interpolation.Constant )
self.assertEqual( p.interpolation, PrimitiveVariable.Interpolation.Constant )
self.assertEqual( p.data, FloatData( sum(range(0,9))/9. ) )
def testMeshVertexToUniform( self ) :
p = self.mesh["b"]
MeshAlgo.resamplePrimitiveVariable( self.mesh, p, PrimitiveVariable.Interpolation.Uniform )
self.assertEqual( p.interpolation, PrimitiveVariable.Interpolation.Uniform )
self.assertEqual( p.data, FloatVectorData( [ 2, 3, 5, 6 ] ) )
def testMeshVertexToVarying( self ) :
p = self.mesh["b"]
MeshAlgo.resamplePrimitiveVariable( self.mesh, p, PrimitiveVariable.Interpolation.Varying )
self.assertEqual( p.interpolation, PrimitiveVariable.Interpolation.Varying )
self.assertEqual( p.data, FloatVectorData( range( 0, 9 ) ) )
def testMeshVertexToFaceVarying( self ) :
p = self.mesh["b"]
MeshAlgo.resamplePrimitiveVariable( self.mesh, p, PrimitiveVariable.Interpolation.FaceVarying )
self.assertEqual( p.interpolation, PrimitiveVariable.Interpolation.FaceVarying )
orig = range( 0, 9 )
self.assertEqual( p.data, FloatVectorData( [ orig[x] for x in self.mesh.vertexIds ] ) )
def testMeshUniformToConstant( self ) :
p = self.mesh["c"]
MeshAlgo.resamplePrimitiveVariable( self.mesh, p, PrimitiveVariable.Interpolation.Constant )
self.assertEqual( p.interpolation, PrimitiveVariable.Interpolation.Constant )
self.assertEqual( p.data, FloatData( sum(range(0,4))/4. ) )
def testMeshUniformToVertex( self ) :
p = self.mesh["c"]
MeshAlgo.resamplePrimitiveVariable( self.mesh, p, PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( p.interpolation, PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( p.data, FloatVectorData( [ 0, 0.5, 1, 1, 1.5, 2, 2, 2.5, 3 ] ) )
def testMeshUniformToVarying( self ) :
p = self.mesh["c"]
MeshAlgo.resamplePrimitiveVariable( self.mesh, p, PrimitiveVariable.Interpolation.Varying )
self.assertEqual( p.interpolation, PrimitiveVariable.Interpolation.Varying )
self.assertEqual( p.data, FloatVectorData( [ 0, 0.5, 1, 1, 1.5, 2, 2, 2.5, 3 ] ) )
def testMeshUniformToFaceVarying( self ) :
p = self.mesh["c"]
MeshAlgo.resamplePrimitiveVariable( self.mesh, p, PrimitiveVariable.Interpolation.FaceVarying )
self.assertEqual( p.interpolation, PrimitiveVariable.Interpolation.FaceVarying )
self.assertEqual( p.data, FloatVectorData( ( [ 0 ] * 4 ) + ( [ 1 ] * 4 ) + ( [ 2 ] * 4 ) + ( [ 3 ] * 4 ) ) )
def testMeshVaryingToConstant( self ) :
p = self.mesh["d"]
MeshAlgo.resamplePrimitiveVariable( self.mesh, p, PrimitiveVariable.Interpolation.Constant )
self.assertEqual( p.interpolation, PrimitiveVariable.Interpolation.Constant )
self.assertEqual( p.data, FloatData( sum(range(0,9))/9. ) )
def testMeshVaryingToVertex( self ) :
p = self.mesh["d"]
MeshAlgo.resamplePrimitiveVariable( self.mesh, p, PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( p.interpolation, PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( p.data, FloatVectorData( range( 0, 9 ) ) )
def testMeshVaryingToUniform( self ) :
p = self.mesh["d"]
MeshAlgo.resamplePrimitiveVariable( self.mesh, p, PrimitiveVariable.Interpolation.Uniform )
self.assertEqual( p.interpolation, PrimitiveVariable.Interpolation.Uniform )
self.assertEqual( p.data, FloatVectorData( [ 2, 3, 5, 6 ] ) )
def testMeshVaryingToFaceVarying( self ) :
p = self.mesh["d"]
MeshAlgo.resamplePrimitiveVariable( self.mesh, p, PrimitiveVariable.Interpolation.FaceVarying )
self.assertEqual( p.interpolation, PrimitiveVariable.Interpolation.FaceVarying )
orig = range( 0, 9 )
self.assertEqual( p.data, FloatVectorData( [ orig[x] for x in self.mesh.vertexIds ] ) )
def testMeshFaceVaryingToConstant( self ) :
p = self.mesh["e"]
MeshAlgo.resamplePrimitiveVariable( self.mesh, p, PrimitiveVariable.Interpolation.Constant )
self.assertEqual( p.interpolation, PrimitiveVariable.Interpolation.Constant )
self.assertEqual( p.data, FloatData( sum(range(0,16))/16. ) )
def testMeshFaceVaryingToVertex( self ) :
p = self.mesh["e"]
MeshAlgo.resamplePrimitiveVariable( self.mesh, p, PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( p.interpolation, PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( p.data, FloatVectorData( [ 0, 2.5, 5, 5.5, 7.5, 9.5, 11, 12.5, 14 ] ) )
def testMeshFaceVaryingToUniform( self ) :
p = self.mesh["e"]
MeshAlgo.resamplePrimitiveVariable( self.mesh, p, PrimitiveVariable.Interpolation.Uniform )
self.assertEqual( p.interpolation, PrimitiveVariable.Interpolation.Uniform )
self.assertEqual( p.data, FloatVectorData( [ 1.5, 5.5, 9.5, 13.5 ] ) )
def testMeshFaceVaryingToVarying( self ) :
p = self.mesh["e"]
MeshAlgo.resamplePrimitiveVariable( self.mesh, p, PrimitiveVariable.Interpolation.Varying )
self.assertEqual( p.interpolation, PrimitiveVariable.Interpolation.Varying )
self.assertEqual( p.data, FloatVectorData( [ 0, 2.5, 5, 5.5, 7.5, 9.5, 11, 12.5, 14 ] ) )
if __name__ == "__main__":
unittest.main()
| hradec/cortex | test/IECore/MeshAlgoTest.py | Python | bsd-3-clause | 14,716 |
# Class that represents a King DF
import numpy
from scipy import special, integrate, interpolate
from ..util import conversion
from .df import df
from .sphericaldf import isotropicsphericaldf
_FOURPI= 4.*numpy.pi
_TWOOVERSQRTPI= 2./numpy.sqrt(numpy.pi)
class kingdf(isotropicsphericaldf):
"""Class that represents a King DF"""
def __init__(self,W0,M=1.,rt=1.,npt=1001,ro=None,vo=None):
"""
NAME:
__init__
PURPOSE:
Initialize a King DF
INPUT:
W0 - dimensionless central potential W0 = Psi(0)/sigma^2 (in practice, needs to be <~ 200, where the DF is essentially isothermal)
M= (1.) total mass (can be a Quantity)
rt= (1.) tidal radius (can be a Quantity)
npt= (1001) number of points to use to solve for Psi(r)
ro=, vo= standard galpy unit scaling parameters
OUTPUT:
(none; sets up instance)
HISTORY:
2020-07-09 - Written - Bovy (UofT)
"""
# Just run df init to set up unit-conversion parameters
df.__init__(self,ro=ro,vo=vo)
self.W0= W0
self.M= conversion.parse_mass(M,ro=self._ro,vo=self._vo)
self.rt= conversion.parse_length(rt,ro=self._ro)
# Solve (mass,rtidal)-scale-free model, which is the basis for
# the full solution
self._scalefree_kdf= _scalefreekingdf(self.W0)
self._scalefree_kdf.solve(npt)
# Set up scaling factors
self._radius_scale= self.rt/self._scalefree_kdf.rt
self._mass_scale= self.M/self._scalefree_kdf.mass
self._velocity_scale= numpy.sqrt(self._mass_scale/self._radius_scale)
self._density_scale= self._mass_scale/self._radius_scale**3.
# Store central density, r0...
self.rho0= self._scalefree_kdf.rho0*self._density_scale
self.r0= self._scalefree_kdf.r0*self._radius_scale
self.c= self._scalefree_kdf.c # invariant
self.sigma= self._velocity_scale
self._sigma2= self.sigma**2.
self.rho1= self._density_scale
# Setup the potential, use original params in case they had units
# because then the initialization will turn on units for this object
from ..potential import KingPotential
pot= KingPotential(W0=self.W0,M=M,rt=rt,_sfkdf=self._scalefree_kdf,
ro=ro,vo=vo)
# Now initialize the isotropic DF
isotropicsphericaldf.__init__(self,pot=pot,scale=self.r0,
rmax=self.rt,ro=ro,vo=vo)
self._potInf= self._pot(self.rt,0.,use_physical=False)
# Setup inverse cumulative mass function for radius sampling
self._icmf= interpolate.InterpolatedUnivariateSpline(\
self._mass_scale*self._scalefree_kdf._cumul_mass/self.M,
self._radius_scale*self._scalefree_kdf._r,
k=3)
# Setup velocity DF interpolator for velocity sampling here
self._rmin_sampling= 0.
self._v_vesc_pvr_interpolator= self._make_pvr_interpolator(\
r_a_end=numpy.log10(self.rt/self._scale))
def dens(self,r):
return self._scalefree_kdf.dens(r/self._radius_scale)\
*self._density_scale
def fE(self,E):
out= numpy.zeros(numpy.atleast_1d(E).shape)
varE= self._potInf-E
if numpy.sum(varE > 0.) > 0:
out[varE > 0.]= (numpy.exp(varE[varE > 0.]/self._sigma2)-1.)\
*(2.*numpy.pi*self._sigma2)**-1.5*self.rho1
return out# mass density, not /self.M as for number density
class _scalefreekingdf(object):
"""Internal helper class to solve the scale-free King DF model, that is, the one that only depends on W = Psi/sigma^2"""
def __init__(self,W0):
self.W0= W0
def solve(self,npt=1001):
"""Solve the model W(r) at npt points (note: not equally spaced in
either r or W, because combination of two ODEs for different r ranges)"""
# Set up arrays for outputs
r= numpy.zeros(npt)
W= numpy.zeros(npt)
dWdr= numpy.zeros(npt)
# Initialize (r[0]=0 already)
W[0]= self.W0
# Determine central density and r0
self.rho0= self._dens_W(self.W0)
self.r0= numpy.sqrt(9./4./numpy.pi/self.rho0)
# First solve Poisson equation ODE from r=0 to r0 using form
# d^2 Psi / dr^2 = ... (d psi / dr = v, r^2 dv / dr = RHS-2*r*v)
if self.W0 < 2.:
rbreak= self.r0/100.
else:
rbreak= self.r0
#Using linspace focuses on what happens ~rbreak rather than on <<rbreak
# which is what you want, because W ~ constant at r <~ r0
r[:npt//2]= numpy.linspace(0.,rbreak,npt//2)
sol= integrate.solve_ivp(\
lambda t,y: [y[1],-_FOURPI*self._dens_W(y[0])
-(2.*y[1]/t if t > 0. else 0.)],
[0.,rbreak],[self.W0,0.],method='LSODA',t_eval=r[:npt//2])
W[:npt//2]= sol.y[0]
dWdr[:npt//2]= sol.y[1]
# Then solve Poisson equation ODE from Psi(r0) to Psi=0 using form
# d^2 r / d Psi^2 = ... (d r / d psi = 1/v, dv / dpsi = 1/v(RHS-2*r*v))
# Added advantage that this becomes ~log-spaced in r, which is what
# you want
W[npt//2-1:]= numpy.linspace(sol.y[0,-1],0.,npt-npt//2+1)
sol= integrate.solve_ivp(\
lambda t,y: [1./y[1],
-1./y[1]*(_FOURPI*self._dens_W(t)
+2.*y[1]/y[0])],
[sol.y[0,-1],0.],[rbreak,sol.y[1,-1]],
method='LSODA',t_eval=W[npt//2-1:])
r[npt//2-1:]= sol.y[0]
dWdr[npt//2-1:]= sol.y[1]
# Store solution
self._r= r
self._W= W
self._dWdr= dWdr
# Also store density at these points, and the tidal radius
self._rho= self._dens_W(self._W)
self.rt= r[-1]
self.c= numpy.log10(self.rt/self.r0)
# Interpolate solution
self._W_from_r=\
interpolate.InterpolatedUnivariateSpline(self._r,self._W,k=3)
# Compute the cumulative mass and store the total mass
mass_shells= numpy.array([\
integrate.quad(lambda r: _FOURPI*r**2*self.dens(r),
rlo,rhi)[0] for rlo,rhi in zip(r[:-1],r[1:])])
self._cumul_mass= numpy.hstack((\
integrate.quad(lambda r: _FOURPI*r**2*self.dens(r),0.,r[0])[0],
numpy.cumsum(mass_shells)))
self.mass= self._cumul_mass[-1]
return None
def _dens_W(self,W):
"""Density as a function of W"""
sqW= numpy.sqrt(W)
return numpy.exp(W)*special.erf(sqW)-_TWOOVERSQRTPI*sqW*(1.+2./3.*W)
def dens(self,r):
return self._dens_W(self._W_from_r(r))
| jobovy/galpy | galpy/df/kingdf.py | Python | bsd-3-clause | 6,898 |
# -*- coding: utf-8 -*-
# vispy: gallery 2
# -----------------------------------------------------------------------------
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
"""
+----+-------------+
| | |
| y | viewbox |
| | |
+----+-------------+
| sp | x |
+----+-------------+
"""
import sys
from vispy import scene, app
canvas = scene.SceneCanvas(keys='interactive')
canvas.size = 600, 600
canvas.show()
grid = canvas.central_widget.add_grid()
widget_y_axis = grid.add_widget(row=0, col=0)
widget_y_axis.bgcolor = "#999999"
widget_viewbox = grid.add_widget(row=0, col=1)
widget_viewbox.bgcolor = "#dd0000"
widget_spacer_bottom = grid.add_widget(row=1, col=0)
widget_spacer_bottom.bgcolor = "#efefef"
widget_x_axis = grid.add_widget(row=1, col=1)
widget_x_axis.bgcolor = "#0000dd"
widget_y_axis.width_min = 50
widget_y_axis.width_max = 50
widget_x_axis.height_min = 50
widget_x_axis.height_max = 50
if __name__ == '__main__' and sys.flags.interactive == 0:
app.run()
| Eric89GXL/vispy | examples/basics/plotting/grid_x_y_viewbox.py | Python | bsd-3-clause | 1,194 |
# -*- coding: utf-8 -*-
"""Test audit columns"""
import pytest
from pyrseas.testutils import AugmentToMapTestCase
CREATE_STMT = "CREATE TABLE t1 (c1 integer, c2 text)"
FUNC_SRC1 = """
BEGIN
NEW.modified_by_user = SESSION_USER;
NEW.modified_timestamp = CURRENT_TIMESTAMP;
RETURN NEW;
END"""
FUNC_SRC2 = """
BEGIN
NEW.updated = CURRENT_TIMESTAMP;
RETURN NEW;
END"""
class AuditColumnsTestCase(AugmentToMapTestCase):
"""Test mapping of audit column augmentations"""
def test_predef_column(self):
"Add predefined audit column"
augmap = {'schema public': {'table t1': {
'audit_columns': 'created_date_only'}}}
dbmap = self.to_map([CREATE_STMT], augmap)
expmap = {'columns': [
{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}},
{'created_date': {'type': 'date', 'not_null': True,
'default': "('now'::text)::date"}}]}
assert expmap == dbmap['schema public']['table t1']
def test_unknown_table(self):
"Error on non-existent table"
augmap = {'schema public': {'table t2': {
'audit_columns': 'created_date_only'}}}
with pytest.raises(KeyError):
self.to_map([CREATE_STMT], augmap)
def test_bad_audit_spec(self):
"Error on bad audit column specification"
augmap = {'schema public': {'table t1': {
'audit_column': 'created_date_only'}}}
with pytest.raises(KeyError):
self.to_map([CREATE_STMT], augmap)
def test_unknown_audit_spec(self):
"Error on non-existent audit column specification"
augmap = {'schema public': {'table t1': {
'audit_columns': 'created_date'}}}
with pytest.raises(KeyError):
self.to_map([CREATE_STMT], augmap)
def test_new_column(self):
"Add new (non-predefined) audit column"
augmap = {'augmenter': {'columns': {
'modified_date': {'type': 'date', 'not_null': True,
'default': "('now'::text)::date"}},
'audit_columns': {'modified_date_only': {
'columns': ['modified_date']}}},
'schema public': {'table t1': {
'audit_columns': 'modified_date_only'}}}
dbmap = self.to_map([CREATE_STMT], augmap)
expmap = {'columns': [
{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}},
{'modified_date': {'type': 'date', 'not_null': True,
'default': "('now'::text)::date"}}]}
assert expmap == dbmap['schema public']['table t1']
def test_rename_column(self):
"Add predefined audit column but with new name"
augmap = {'augmenter': {'columns': {
'modified_timestamp': {'name': 'updated'}}},
'schema public': {'table t1': {
'audit_columns': 'modified_only'}}}
dbmap = self.to_map([CREATE_STMT], augmap)
colmap = {'columns': [
{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}},
{'updated': {'type': 'timestamp with time zone',
'not_null': True}}],
'triggers': {'t1_20_audit_modified_only': {
'events': ['insert', 'update'], 'level': 'row',
'procedure': 'audit_modified()', 'timing': 'before'}}}
funcmap = {'language': 'plpgsql', 'returns': 'trigger',
'security_definer': True, 'description':
'Provides modified_timestamp values for audit columns.',
'source': FUNC_SRC2}
assert dbmap['schema public']['table t1'] == colmap
assert dbmap['schema public']['function audit_modified()'] == funcmap
def test_change_column_type(self):
"Add predefined audit column but with changed datatype"
augmap = {'augmenter': {'columns': {'created_date': {'type': 'text'}}},
'schema public': {'table t1': {
'audit_columns': 'created_date_only'}}}
dbmap = self.to_map([CREATE_STMT], augmap)
expmap = {'columns': [
{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}},
{'created_date': {'type': 'text', 'not_null': True,
'default': "('now'::text)::date"}}]}
assert expmap == dbmap['schema public']['table t1']
def test_columns_with_trigger(self):
"Add predefined audit columns with trigger"
augmap = {'schema public': {'table t1': {'audit_columns': 'default'}}}
dbmap = self.to_map([CREATE_STMT], augmap)
expmap = {'columns': [
{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}},
{'modified_by_user': {'type': 'character varying(63)',
'not_null': True}},
{'modified_timestamp': {'type': 'timestamp with time zone',
'not_null': True}}],
'triggers': {'t1_20_audit_default': {
'events': ['update'], 'level': 'row',
'procedure': 'audit_default()', 'timing': 'before'}}}
assert expmap == dbmap['schema public']['table t1']
assert dbmap['schema public']['function audit_default()'][
'returns'] == 'trigger'
assert dbmap['schema public']['function audit_default()'][
'source'] == FUNC_SRC1
def test_nonpublic_schema_with_trigger(self):
"Add predefined audit columns with trigger in a non-public schema"
stmts = ["CREATE SCHEMA s1",
"CREATE TABLE s1.t1 (c1 integer, c2 text)"]
augmap = {'schema s1': {'table t1': {'audit_columns': 'default'}}}
dbmap = self.to_map(stmts, augmap)
expmap = {'columns': [
{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}},
{'modified_by_user': {'type': 'character varying(63)',
'not_null': True}},
{'modified_timestamp': {'type': 'timestamp with time zone',
'not_null': True}}],
'triggers': {'t1_20_audit_default': {
'events': ['update'], 'level': 'row',
'procedure': 's1.audit_default()', 'timing': 'before'}}}
assert expmap == dbmap['schema s1']['table t1']
assert dbmap['schema s1']['function audit_default()']['returns'] == \
'trigger'
assert dbmap['schema s1']['function audit_default()'][
'source'] == FUNC_SRC1
def test_skip_existing_columns(self):
"Do not add already existing audit columns"
stmts = [CREATE_STMT,
"ALTER TABLE t1 ADD modified_by_user varchar(63) NOT NULL",
"ALTER TABLE t1 ADD modified_timestamp "
"timestamp with time zone NOT NULL"]
augmap = {'schema public': {'table t1': {
'audit_columns': 'default'}}}
dbmap = self.to_map(stmts, augmap)
expmap = [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}},
{'modified_by_user': {'type': 'character varying(63)',
'not_null': True}},
{'modified_timestamp': {'type': 'timestamp with time zone',
'not_null': True}}]
assert expmap == dbmap['schema public']['table t1']['columns']
def test_change_existing_columns(self):
"Change already existing audit columns"
stmts = [CREATE_STMT, "ALTER TABLE t1 ADD modified_by_user text ",
"ALTER TABLE t1 ADD modified_timestamp "
"timestamp with time zone NOT NULL"]
augmap = {'schema public': {'table t1': {'audit_columns': 'default'}}}
dbmap = self.to_map(stmts, augmap)
expmap = [{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}},
{'modified_by_user': {'type': 'character varying(63)',
'not_null': True}},
{'modified_timestamp': {'type': 'timestamp with time zone',
'not_null': True}}]
assert expmap == dbmap['schema public']['table t1']['columns']
def test_custom_function_template(self):
"Add new (non-predefined) audit trigger using a function template"
template = """
BEGIN
NEW.{{modified_by_user}} = SESSION_USER;
NEW.{{modified_timestamp}} = CURRENT_TIMESTAMP::timestamp(0);
RETURN NEW;
END"""
source = """
BEGIN
NEW.modified_by_user = SESSION_USER;
NEW.modified_timestamp = CURRENT_TIMESTAMP::timestamp(0);
RETURN NEW;
END"""
augmap = {
'augmenter': {
'audit_columns': {'custom': {
'columns': ['modified_by_user', 'modified_timestamp'],
'triggers': ['custom_audit']}},
'function_templates': {'custom_template': template},
'functions': {'custom_audit()': {
'description': 'Maintain custom audit columns',
'language': 'plpgsql',
'returns': 'trigger',
'security_definer': True,
'source': '{{custom_template}}'}},
'triggers': {'custom_audit': {
'events': ['insert', 'update'],
'level': 'row',
'name': '{{table_name}}_20_custom_audit',
'procedure': 'custom_audit()',
'timing': 'before'}}},
'schema public': {'table t1': {
'audit_columns': 'custom'}}}
dbmap = self.to_map([CREATE_STMT], augmap)
expmap = {'columns': [
{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}},
{'modified_by_user': {'type': 'character varying(63)',
'not_null': True}},
{'modified_timestamp': {'type': 'timestamp with time zone',
'not_null': True}}],
'triggers': {'t1_20_custom_audit': {
'events': ['insert', 'update'], 'level': 'row',
'procedure': 'custom_audit()', 'timing': 'before'}}}
assert expmap == dbmap['schema public']['table t1']
assert dbmap['schema public']['function custom_audit()'][
'returns'] == 'trigger'
assert dbmap['schema public']['function custom_audit()'][
'source'] == source
def test_custom_function_inline_with_column_substitution(self):
"Add new (non-predefined) audit trigger using an inline definition"
template = """
BEGIN
NEW.{{modified_by_user}} = SESSION_USER;
NEW.{{modified_timestamp}} = CURRENT_TIMESTAMP::timestamp(0);
RETURN NEW;
END"""
source = """
BEGIN
NEW.modified_by_user = SESSION_USER;
NEW.modified_timestamp = CURRENT_TIMESTAMP::timestamp(0);
RETURN NEW;
END"""
augmap = {
'augmenter': {
'audit_columns': {'custom': {
'columns': ['modified_by_user', 'modified_timestamp'],
'triggers': ['custom_audit']}},
'functions': {'custom_audit()': {
'description': 'Maintain custom audit columns',
'language': 'plpgsql',
'returns': 'trigger',
'security_definer': True,
'source': template}},
'triggers': {'custom_audit': {
'events': ['insert', 'update'],
'level': 'row',
'name': '{{table_name}}_20_custom_audit',
'procedure': 'custom_audit()',
'timing': 'before'}}},
'schema public': {'table t1': {
'audit_columns': 'custom'}}}
dbmap = self.to_map([CREATE_STMT], augmap)
expmap = {'columns': [
{'c1': {'type': 'integer'}}, {'c2': {'type': 'text'}},
{'modified_by_user': {'type': 'character varying(63)',
'not_null': True}},
{'modified_timestamp': {'type': 'timestamp with time zone',
'not_null': True}}],
'triggers': {'t1_20_custom_audit': {
'events': ['insert', 'update'], 'level': 'row',
'procedure': 'custom_audit()', 'timing': 'before'}}}
assert expmap == dbmap['schema public']['table t1']
assert dbmap['schema public']['function custom_audit()'][
'returns'] == 'trigger'
assert dbmap['schema public']['function custom_audit()'][
'source'] == source
| dvarrazzo/Pyrseas | tests/augment/test_audit.py | Python | bsd-3-clause | 12,757 |
import heapq
class AbstractSimulation(object):
def __init__(self, reality, antmoves, stats):
self.reality = reality
self.antmoves = antmoves
heapq.heapify(antmoves)
self.stats = stats
self.ticks = 0
def tick(self):
ant_move = heapq.heappop(self.antmoves)
self.reality.world.elapsed_time = ant_move.end_time # simulation is now at the point of ant_move.ant arriving at ant_move.destination
new_antmove, changed_items_end = ant_move.process_end(self.reality, self.stats)
assert not self.reality.world.elapsed_time > ant_move.end_time
changed_items_start = new_antmove.process_start()
assert changed_items_start is not None, new_antmove
heapq.heappush(self.antmoves, new_antmove)
self.ticks += 1
return changed_items_start & changed_items_end, self.stats
class TickStepSimulation(AbstractSimulation):
def advance(self):
if self.reality.is_resolved():
return [], True, None
tick_changed_items, stats = self.tick()
print 'ticks: %d, food_discovered: %d' % (self.ticks, stats.food_discovered)
return tick_changed_items, False, stats.last_route
class MultiSpawnStepSimulation(AbstractSimulation):
def __init__(self, reality, *args, **kwargs):
super(MultiSpawnStepSimulation, self).__init__(reality, *args, **kwargs)
self.spawn_amount = 50
self.anthills = reality.world.get_anthills()
def _anthill_food_sum(self):
return sum(anthill.food for anthill in self.anthills)
def advance(self):
if self.reality.is_resolved():
return [], True, None
anthill_food_pre_tick = self._anthill_food_sum()
changed_items = set()
amount = 0
while amount <= self.spawn_amount:
tick_changed_items, stats = self.tick()
changed_items.update(tick_changed_items)
anthill_food_post_tick = self._anthill_food_sum()
if anthill_food_post_tick != anthill_food_pre_tick+amount:
if self.reality.is_resolved():
break
amount += 1
return changed_items, False, stats.last_route
class SpawnStepSimulation(MultiSpawnStepSimulation):
def __init__(self, reality, *args, **kwargs):
super(SpawnStepSimulation, self).__init__(reality, *args, **kwargs)
self.spawn_amount = 1
class LastSpawnStepSimulation(MultiSpawnStepSimulation):
def __init__(self, reality, *args, **kwargs):
super(LastSpawnStepSimulation, self).__init__(reality, *args, **kwargs)
self.spawn_amount = reality.world.get_total_food()
| ppolewicz/ant-colony | antcolony/simulation.py | Python | bsd-3-clause | 2,667 |
from django import forms
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.client import RequestFactory
from djadmin2.site import djadmin2_site
from ..admin2 import UserAdmin2
class UserAdminTest(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.user = User(
username='admin',
is_staff=True,
is_superuser=True)
self.user.set_password('admin')
self.user.save()
def test_create_form_uses_floppyform_widgets(self):
form = UserAdmin2.create_form_class()
self.assertTrue(
isinstance(form.fields['username'].widget,
forms.TextInput))
request = self.factory.get(reverse('admin2:auth_user_create'))
request.user = self.user
model_admin = UserAdmin2(User, djadmin2_site)
view = model_admin.create_view.view.as_view(
**model_admin.get_create_kwargs())
response = view(request)
form = response.context_data['form']
self.assertTrue(
isinstance(form.fields['username'].widget,
forms.TextInput))
def test_update_form_uses_floppyform_widgets(self):
form = UserAdmin2.update_form_class()
self.assertTrue(
isinstance(form.fields['username'].widget,
forms.TextInput))
self.assertTrue(
isinstance(form.fields['date_joined'].widget,
forms.DateTimeInput))
request = self.factory.get(
reverse('admin2:auth_user_update', args=(self.user.pk,)))
request.user = self.user
model_admin = UserAdmin2(User, djadmin2_site)
view = model_admin.update_view.view.as_view(
**model_admin.get_update_kwargs())
response = view(request, pk=self.user.pk)
form = response.context_data['form']
self.assertTrue(
isinstance(form.fields['username'].widget,
forms.TextInput))
self.assertTrue(
isinstance(form.fields['date_joined'].widget,
forms.DateTimeInput))
| andrewsmedina/django-admin2 | djadmin2/tests/test_auth_admin.py | Python | bsd-3-clause | 2,211 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Exercise 9.3 from Kane 1985."""
from __future__ import division
from sympy import cos, diff, expand, pi, solve, symbols
from sympy.physics.mechanics import ReferenceFrame, Point
from sympy.physics.mechanics import dot, dynamicsymbols
from util import msprint, subs, partial_velocities
from util import generalized_active_forces, potential_energy
g, m, Px, Py, Pz, R, t = symbols('g m Px Py Pz R t')
q = dynamicsymbols('q1:6')
qd = dynamicsymbols('q1:6', level=1)
u = dynamicsymbols('u1:6')
## --- Define ReferenceFrames ---
A = ReferenceFrame('A')
B_prime = A.orientnew('B_prime', 'Axis', [q[0], A.z])
B = B_prime.orientnew('B', 'Axis', [pi/2 - q[1], B_prime.x])
C = B.orientnew('C', 'Axis', [q[2], B.z])
## --- Define Points and their velocities ---
pO = Point('O')
pO.set_vel(A, 0)
# R is the point in plane H that comes into contact with disk C.
pR = pO.locatenew('R', q[3]*A.x + q[4]*A.y)
pR.set_vel(A, pR.pos_from(pO).diff(t, A))
pR.set_vel(B, 0)
# C^ is the point in disk C that comes into contact with plane H.
pC_hat = pR.locatenew('C^', 0)
pC_hat.set_vel(C, 0)
# C* is the point at the center of disk C.
pCs = pC_hat.locatenew('C*', R*B.y)
pCs.set_vel(C, 0)
pCs.set_vel(B, 0)
# calculate velocities in A
pCs.v2pt_theory(pR, A, B)
pC_hat.v2pt_theory(pCs, A, C)
## --- Expressions for generalized speeds u1, u2, u3, u4, u5 ---
u_expr = map(lambda x: dot(C.ang_vel_in(A), x), B)
u_expr += qd[3:]
kde = [u_i - u_ex for u_i, u_ex in zip(u, u_expr)]
kde_map = solve(kde, qd)
## --- Define forces on each point in the system ---
R_C_hat = Px*A.x + Py*A.y + Pz*A.z
R_Cs = -m*g*A.z
forces = [(pC_hat, R_C_hat), (pCs, R_Cs)]
## --- Calculate generalized active forces ---
partials = partial_velocities([pC_hat, pCs], u, A, kde_map)
Fr, _ = generalized_active_forces(partials, forces)
# Impose the condition that disk C is rolling without slipping
u_indep = u[:3]
u_dep = u[3:]
vc = map(lambda x: dot(pC_hat.vel(A), x), [A.x, A.y])
vc_map = solve(subs(vc, kde_map), u_dep)
partials_tilde = partial_velocities([pC_hat, pCs], u_indep, A, kde_map, vc_map)
Fr_tilde, _ = generalized_active_forces(partials_tilde, forces)
Fr_tilde = map(expand, Fr_tilde)
# solve for ∂V/∂qs using 5.1.9
V_gamma = m * g * R * cos(q[1])
print(('\nVerify V_γ = {0} is a potential energy '.format(V_gamma) +
'contribution of γ for C.'))
V_gamma_dot = -sum(fr * ur for fr, ur in
zip(*generalized_active_forces(partials_tilde,
forces[1:])))
if V_gamma_dot == V_gamma.diff(t).subs(kde_map):
print('d/dt(V_γ) == -sum(Fr_γ * ur).')
else:
print('d/dt(V_γ) != -sum(Fr_γ * ur).')
print('d/dt(V_γ) = {0}'.format(msprint(V_gamma.diff(t))))
print('-sum(Fr_γ * ur) = {0}'.format(msprint(V_gamma_dot)))
#print('\nFinding a potential energy function V while C is rolling '
# 'without slip.')
#V = potential_energy(Fr_tilde, q, u_indep, kde_map, vc_map)
#if V is not None:
# print('V = {0}'.format(V))
print('\nFinding a potential energy function V while C is rolling with slip.')
V = potential_energy(Fr, q, u, kde_map)
if V is not None:
print('V = {0}'.format(V))
print('\nFinding a potential energy function V while C is rolling with slip '
'without friction.')
V = potential_energy(subs(Fr, {Px: 0, Py: 0}), q, u, kde_map)
if V is not None:
print('Define a2, C as functions of t such that the respective '
'contributing potential terms go to zero.')
print('V = {0}'.format(V.subs(dict(zip(symbols('C α2'), [0, pi/2])))))
| nouiz/pydy | examples/Kane1985/Chapter5/Ex9.3.py | Python | bsd-3-clause | 3,590 |
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Integration'] , ['PolyTrend'] , ['Seasonal_Minute'] , ['NoAR'] ); | antoinecarme/pyaf | tests/model_control/detailed/transf_Integration/model_control_one_enabled_Integration_PolyTrend_Seasonal_Minute_NoAR.py | Python | bsd-3-clause | 161 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/configuration.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import copy
import json
import os
try:
import yaml
except ImportError:
has_yaml = False
"""Whether the :py:mod:`yaml` module is available."""
else:
has_yaml = True
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
SERIALIZER_DRIVERS = {}
"""A dictionary containing a mapping of driver names to serialization function data."""
SERIALIZER_DRIVERS['json'] = {'load': json.load, 'dumps': lambda obj: json.dumps(obj, sort_keys=True, indent=4)}
SERIALIZER_DRIVERS['jsn'] = {'load': json.load, 'dumps': lambda obj: json.dumps(obj, sort_keys=True, indent=4)}
SERIALIZER_DRIVERS['yaml'] = {'load': lambda file_obj: yaml.load(file_obj, Loader=Loader), 'dumps': lambda obj: yaml.dumps(obj, default_flow_style=False, Dumper=Dumper)}
SERIALIZER_DRIVERS['yml'] = {'load': lambda file_obj: yaml.load(file_obj, Loader=Loader), 'dumps': lambda obj: yaml.dumps(obj, default_flow_style=False, Dumper=Dumper)}
class Configuration(object):
"""
This class provides a generic object for parsing configuration files
in multiple formats.
"""
def __init__(self, configuration_file, prefix=''):
"""
:param str configuration_file: The configuration file to parse.
:param str prefix: String to be prefixed to all option names.
"""
self.prefix = prefix
self.seperator = '.'
self.configuration_file = configuration_file
file_h = open(self.configuration_file, 'r')
self._storage = dict(self._serializer('load', file_h))
file_h.close()
@property
def configuration_file_ext(self):
"""
The extension of the current configuration file.
"""
return os.path.splitext(self.configuration_file)[1][1:]
def _serializer(self, operation, *args):
if not self.configuration_file_ext in SERIALIZER_DRIVERS:
raise ValueError('unknown file type \'' + self.configuration_file_ext + '\'')
function = SERIALIZER_DRIVERS[self.configuration_file_ext][operation]
return function(*args)
def get(self, item_name):
"""
Retrieve the value of an option.
:param str item_name: The name of the option to retrieve.
:return: The value of *item_name* in the configuration.
"""
if self.prefix:
item_name = self.prefix + self.seperator + item_name
item_names = item_name.split(self.seperator)
node = self._storage
for item_name in item_names:
node = node[item_name]
return node
def get_if_exists(self, item_name, default_value=None):
"""
Retrieve the value of an option if it exists, otherwise
return *default_value* instead of raising an error:
:param str item_name: The name of the option to retrieve.
:param default_value: The value to return if *item_name* does not exist.
:return: The value of *item_name* in the configuration.
"""
if self.has_option(item_name):
return self.get(item_name)
return default_value
def get_storage(self):
"""
Get a copy of the internal configuration. Changes made to the returned
copy will not affect this object.
:return: A copy of the internal storage object.
:rtype: dict
"""
return copy.deepcopy(self._storage)
def get_missing(self, verify_file):
"""
Use a verification configuration which has a list of required options
and their respective types. This information is used to identify missing
and incompatbile options in the loaded configuration.
:param str verify_file: The file to load for verification data.
:return: A dictionary of missing and incompatible settings.
:rtype: dict
"""
vconf = Configuration(verify_file)
missing = {}
for setting, setting_type in vconf.get('settings').items():
if not self.has_option(setting):
missing['missing'] = missing.get('settings', [])
missing['missing'].append(setting)
elif not type(self.get(setting)).__name__ == setting_type:
missing['incompatible'] = missing.get('incompatible', [])
missing['incompatible'].append((setting, setting_type))
return missing
def has_option(self, option_name):
"""
Check that an option exists.
:param str option_name: The name of the option to check.
:return: True of the option exists in the configuration.
:rtype: bool
"""
if self.prefix:
option_name = self.prefix + self.seperator + option_name
item_names = option_name.split(self.seperator)
node = self._storage
for item_name in item_names:
if not item_name in node:
return False
node = node[item_name]
return True
def has_section(self, section_name):
"""
Checks that an option exists and that it contains sub options.
:param str section_name: The name of the section to check.
:return: True if the section exists.
:rtype: dict
"""
if not self.has_option(section_name):
return False
return isinstance(self.get(section_name), dict)
def set(self, item_name, item_value):
"""
Sets the value of an option in the configuration.
:param str item_name: The name of the option to set.
:param item_value: The value of the option to set.
"""
if self.prefix:
item_name = self.prefix + self.seperator + item_name
item_names = item_name.split(self.seperator)
item_last = item_names.pop()
node = self._storage
for item_name in item_names:
if not item_name in node:
node[item_name] = {}
node = node[item_name]
node[item_last] = item_value
return
def save(self):
"""
Save the current configuration to disk.
"""
file_h = open(self.configuration_file, 'wb')
file_h.write(self._serializer('dumps', self._storage))
file_h.close()
| 0x0mar/king-phisher | king_phisher/configuration.py | Python | bsd-3-clause | 7,045 |
import re
from collections import namedtuple, deque
from .regex import RE_SPLIT, RE_KEY, RE_INDEX
from .utils import Enum, _missing
StackItem = namedtuple('StackItem', ['name', 'access_method'])
# Accessor methods:
# INDEX means this accessor is index or key-based, eg. [1] or ['foo']
# DEFAULT means property
AccessorType = Enum(['INDEX', 'DEFAULT'])
def first(*vals):
"""Return the first value that's not _missing."""
for val in vals:
if val is not _missing:
return val
return _missing
def get_key(obj, index):
"""Retrieve index or key from the specified obj, or return
_missing if it does not exist.
"""
try:
return obj[index]
except (KeyError, IndexError, TypeError):
return _missing
def get_attribute(obj, attr):
"""Retrieve attribute from the specified obj, or return
_missing if it does not exist.
"""
try:
return getattr(obj, attr)
except AttributeError:
return _missing
class StringAttribute(object):
"""
Used to access a deeply nested attributes of a Python data structure
using a string representation of Python-like syntax.
Eg.:
# Data structure
my_dict = {
'foo': {
'bar': [
{'baz': 'wee'},
{'baz': 'woo'}
]
}
}
# Return 'woo'
StringAttribute('foo.bar[1].baz').get(my_dict)
"""
def __init__(self, string_attr_path=None, default=_missing):
self._default = default
if string_attr_path is not None:
self._stack = self._parse(string_attr_path)
self._string_attr_path = string_attr_path
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self._string_attr_path)
def __str__(self):
return '%r' % self._string_attr_path
def _get(self, obj, stack, default=_missing):
"""Retrieve value from an object structure given a list of
attributes."""
pointer = obj
# Try all access methods
for accessor in stack:
# Key or index accessors
if accessor.access_method == AccessorType.INDEX:
pointer = get_key(pointer, accessor.name)
# Default accessor
elif accessor.access_method == AccessorType.DEFAULT:
# Attempt to get the object attribute first, or if that fails
# try to get a key with that name or list index
pointer = first(get_attribute(pointer, accessor.name),
get_key(pointer, accessor.name))
# If nothing could be accessed return None or raise an error
if pointer is _missing:
if default is not _missing:
return default
else:
self._raise_exception(obj, accessor.name)
return pointer
def _parse(self, string_attr_path):
"""Parse string_attr_path into a stack of accessors."""
stack = deque()
for node in self._split(string_attr_path):
# Node is a list index (eg. '[2]')
if re.match(RE_INDEX, node):
# Convert into integer
list_index = int(node.translate(None, '[]'))
stack.append(StackItem(list_index, AccessorType.INDEX))
# Node is a key (string-based index)
elif re.match(RE_KEY, node):
key = re.match(RE_KEY, node).groups()[0]
stack.append(StackItem(key, AccessorType.INDEX))
else:
# Default accessor method
stack.append(StackItem(node, AccessorType.DEFAULT))
return stack
def _raise_exception(self, obj, node):
"""Raise exception."""
raise Exception('%r object has no key or attribute at path %r' % (obj.__class__.__name__, node))
@classmethod
def _split(cls, string_attr_path):
"""Split string into list of accessor nodes."""
# Split string at '.' and '[0]'
nodes = re.split(RE_SPLIT, string_attr_path)
# Filter out empty position params from the split
nodes = filter(lambda x: x, nodes)
return nodes
def get(self, obj, string_attr_path=None, default=_missing):
"""Retrieve value from an object structure using string
representation of attributes path."""
# Get defaults
if default is _missing:
default = self._default
if string_attr_path is not None:
stack = self._parse(string_attr_path)
else:
string_attr_path = self._string_attr_path
stack = self._stack
return self._get(obj, stack, default)
def set(self, base_obj, value, string_attr_path=None):
"""Set value on an object structure using string representation
of attributes path."""
if string_attr_path is not None:
stack = self._parse(string_attr_path)
else:
string_attr_path = self._string_attr_path
stack = self._stack
# Get the name of the attribute we're setting (the last item in
# the stack)
attr = stack.pop()
# Get the actual object we're going to operate on
target_obj = self._get(base_obj, stack)
# Set the attribute or key value
if attr.access_method == AccessorType.INDEX:
target_obj[attr.name] = value
else:
setattr(target_obj, attr.name, value)
# Wrapper functions for a builtin-esque feel...
def getstrattr(obj, attr, default=_missing):
"""Retrieve value from an object structure using string
representation of attributes path."""
return StringAttribute().get(obj, attr, default)
def setstrattr(obj, attr, val):
"""Set value on an object structure using string representation
of attributes path."""
return StringAttribute().set(obj, val, attr)
| dansimau/pystringattr | stringattr.py | Python | bsd-3-clause | 5,928 |
# Copyright 2017 The Cobalt Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base application-platform build configuration.
If applications wish to customize or extend build configuration information by
platform, they should add an <application-name>/configuration.py file to the
port directory of the platform they wish to customize
(e.g. linux/x64x11/cobalt/configuration.py for the `cobalt` application and the
`linux-x64x11` platform). This module should contain an class that extends the
class defined here.
"""
import os
class ApplicationConfiguration(object):
"""Base build configuration class for all Starboard applications.
Should be derived by application configurations.
"""
def __init__(self, platform_configuration, application_name,
application_directory):
"""Initialize ApplicationConfiguration.
Args:
platform_configuration: An instance of StarboardBuildConfiguration for the
platform being built.
application_name: The name of the application that is loading this
configuration.
application_directory: The absolute path of the directory containing the
application configuration being loaded.
"""
self._platform_configuration = platform_configuration
self._application_name = application_name
self._application_directory = application_directory
def GetName(self):
"""Gets the application name."""
return self._application_name
def GetDirectory(self):
"""Gets the directory of the application configuration."""
return self._application_directory
def GetPreIncludes(self):
"""Get a list of absolute paths to gypi files to include in order.
These files will be included by GYP before any processed .gyp file. The
files returned by this function will be included by GYP before any other
gypi files.
Returns:
An ordered list containing absolute paths to .gypi files.
"""
return []
def GetPostIncludes(self):
"""Get a list of absolute paths to gypi files to include in order.
These files will be included by GYP before any processed .gyp file. The
files return by this function will be included by GYP after any other gypi
files.
Returns:
An ordered list containing absolute paths to .gypi files.
"""
standard_gypi_path = os.path.join(self.GetDirectory(), 'configuration.gypi')
if os.path.isfile(standard_gypi_path):
return [standard_gypi_path]
return []
def GetEnvironmentVariables(self):
"""Get a Mapping of environment variable overrides.
The environment variables returned by this function are set before calling
GYP or GN and can be used to manipulate their behavior. They will override
any environment variables of the same name in the calling environment, or
any that are set by default or by the platform.
Returns:
A dictionary containing environment variables.
"""
return {}
def GetVariables(self, config_name):
"""Get a Mapping of GYP/GN variable overrides.
Get a Mapping of GYP/GN variable names to values. Any defined values will
override any values defined by default or by the platform. GYP or GN files
can then have conditionals that check the values of these variables.
Args:
config_name: The name of the starboard.tools.config build type.
Returns:
A Mapping of GYP/GN variables to be merged into the global Mapping
provided to GYP/GN.
"""
del config_name
return {}
def GetGeneratorVariables(self, config_name):
"""Get a Mapping of generator variable overrides.
Args:
config_name: The name of the starboard.tools.config build type.
Returns:
A Mapping of generator variable names and values.
"""
del config_name
return {}
def GetTestEnvVariables(self):
"""Gets a dict of environment variables needed by unit test binaries."""
return {}
def GetTestFilters(self):
"""Gets all tests to be excluded from a unit test run.
Returns:
A list of initialized starboard.tools.testing.TestFilter objects.
"""
return []
def GetTestTargets(self):
"""Gets all tests to be run in a unit test run.
Returns:
A list of strings of test target names.
"""
return []
def GetDefaultTargetBuildFile(self):
"""Gets the build file to build by default."""
return None
| youtube/cobalt | starboard/build/application_configuration.py | Python | bsd-3-clause | 4,981 |
# -*- coding:utf-8 -*-
from __future__ import unicode_literals
import collections
from django.db import models
from django.utils import six
from django.utils.six.moves import zip
from yepes.contrib.datamigrations.exceptions import (
UnableToCreateError,
UnableToExportError,
UnableToImportError,
UnableToUpdateError,
)
from yepes.contrib.datamigrations.fields import (
BooleanField,
DateField, DateTimeField, TimeField,
FileField,
FloatField, IntegerField, NumberField,
TextField,
)
from yepes.contrib.datamigrations.importation_plans import importation_plans
from yepes.contrib.datamigrations.serializers import serializers
from yepes.contrib.datamigrations.utils import ModelFieldsCache
from yepes.types import Undefined
from yepes.utils.properties import cached_property
class DataMigration(object):
can_create = False
can_update = False
fields = []
def export_data(self, file=None, serializer=None):
if not self.can_export:
raise UnableToExportError
serializer = self.get_serializer(serializer)
headers = [fld.name for fld in self.fields_to_export]
data = self.get_data_to_export(serializer)
return serializer.serialize(headers, data, file)
def get_data_to_export(self, serializer):
raise NotImplementedError('Subclasses of DataMigration must override get_data_to_export() method')
def get_data_to_import(self, source, serializer):
fields = self.fields_to_import
headers = [fld.name for fld in fields]
data = serializer.deserialize(headers, source)
return (
{
fld.path: fld.import_value(val, serializer)
for val, fld
in zip(row, fields)
if val is not Undefined
}
for row
in data
)
def get_importation_plan(self, plan_class):
if isinstance(plan_class, six.string_types):
plan_class = importation_plans.get_plan(plan_class)
return plan_class(self)
def get_serializer(self, serializer=None):
if serializer is None:
serializer = 'json'
if isinstance(serializer, six.string_types):
serializer = serializers.get_serializer(serializer)
if isinstance(serializer, collections.Callable):
serializer = serializer()
return serializer
def import_data(self, source, serializer=None, plan=None, batch_size=100):
if not self.can_import:
raise UnableToImportError
plan = self.get_importation_plan(plan)
if not self.can_create and getattr(plan, 'inserts_data', True):
raise UnableToCreateError
if not self.can_update and getattr(plan, 'updates_data', True):
raise UnableToUpdateError
serializer = self.get_serializer(serializer)
data = self.get_data_to_import(source, serializer)
plan.run(data, batch_size)
@property
def can_export(self):
return bool(self.fields_to_export)
@property
def can_import(self):
return self.fields_to_import and (self.can_create or self.can_update)
@cached_property
def fields_to_export(self):
return self.fields
@cached_property
def fields_to_import(self):
return self.fields if self.can_create or self.can_update else []
class BaseModelMigration(DataMigration):
def __init__(self, model, use_base_manager=False,
ignore_missing_foreign_keys=False):
self.model = model
self.use_base_manager = use_base_manager
self.ignore_missing_foreign_keys = ignore_missing_foreign_keys
def get_data_to_export(self, serializer):
if self.use_base_manager:
manager = self.model._base_manager
else:
manager = self.model._default_manager
qs = manager.get_queryset()
if self.requires_model_instances:
return self._data_from_objects(qs, serializer)
else:
return self._data_from_values(qs, serializer)
def _data_from_objects(self, queryset, serializer):
fields = self.fields_to_export
if (queryset._result_cache is None
and not queryset._prefetch_related_lookups):
queryset = queryset.iterator()
return (
[
fld.export_value(
fld.value_from_object(obj),
serializer,
)
for fld
in fields
]
for obj
in queryset
)
def _data_from_values(self, queryset, serializer):
fields = self.fields_to_export
return (
[
fld.export_value(val, serializer)
for val, fld
in zip(row, fields)
]
for row
in queryset.values_list(*[
fld.path
for fld
in fields
]).iterator()
)
def get_importation_plan(self, plan_class=None):
if plan_class is None:
if self.can_create and self.can_update:
plan_class = 'update_or_create'
elif self.can_create:
plan_class = 'create'
elif self.can_update:
plan_class = 'update'
return super(BaseModelMigration, self).get_importation_plan(plan_class)
@cached_property
def can_create(self):
included_fields = {
self.model_fields[fld][0]
for fld
in self.fields_to_import
}
required_fields = {
f
for f
in self.model._meta.get_fields()
if not (f.is_relation and f.auto_created)
and not f.blank
and not f.has_default()
}
return (required_fields <= included_fields)
@property
def can_update(self):
return (self.primary_key is not None)
@cached_property
def fields_to_import(self):
fields = []
for fld, model_fields in six.iteritems(self.model_fields):
if (len(model_fields) == 1
and '__' not in fld.path):
fields.append(fld)
elif (len(model_fields) == 2
and fld.path.count('__') == 1):
f1, f2 = model_fields
if (f2.unique and not f2.null
and f1.remote_field is not None and f2.remote_field is None):
fields.append(fld) # This allows use of natural keys.
return fields
@cached_property
def model_fields(self):
cache = ModelFieldsCache()
fields = []
for fld in self.fields:
model_fields = cache.get_model_fields(
self.model,
fld.path.split('__'),
)
fields.append((fld, model_fields))
return collections.OrderedDict(fields)
@cached_property
def natural_foreign_keys(self):
return [
fld
for fld
in self.fields_to_import
if '__' in fld.path
] or None
@cached_property
def primary_key(self):
key = None
opts = self.model._meta
for fld in self.fields_to_import:
f = self.model_fields[fld][0]
if f.primary_key:
return fld
if key is None and f.unique and not f.null:
key = fld
if key is None and opts.unique_together:
available_model_fields = {
model_fields[0].name: fld
for fld, model_fields
in six.iteritems(self.model_fields)
if fld in self.fields_to_import
}
for set in opts.unique_together:
try:
key = tuple(
available_model_fields[name]
for name
in set
)
except KeyError:
continue
else:
break
return key
@cached_property
def requires_model_instances(self):
for fld, model_fields in six.iteritems(self.model_fields):
if len(model_fields) < (fld.path.count('__') + 1):
return True # Field path points to an object property.
return False
class ModelMigration(BaseModelMigration):
def __init__(self, model, fields=None, exclude=None,
use_natural_primary_keys=False,
use_natural_foreign_keys=False,
use_base_manager=False,
ignore_missing_foreign_keys=False):
super(ModelMigration, self).__init__(model, use_base_manager,
ignore_missing_foreign_keys)
if not fields:
self.selected_fields = None
else:
self.selected_fields = [ # Field order matters
name if name != 'pk' else model._meta.pk.name
for name
in fields
]
if not exclude:
self.excluded_fields = None
else:
self.excluded_fields = {
name if name != 'pk' else model._meta.pk.name
for name
in exclude
}
self.use_natural_primary_keys = use_natural_primary_keys
self.use_natural_foreign_keys = use_natural_foreign_keys
def build_field(self, model_field, path=None, name=None, attname=None):
if hasattr(model_field, 'migrationfield'):
return model_field.migrationfield(path, name, attname)
if path is None:
path = model_field.attname
if name is None:
name = model_field.name
if attname is None:
attname = path
if isinstance(model_field, (models.BooleanField, models.NullBooleanField)):
field_class = BooleanField
elif isinstance(model_field, models.DateTimeField):
field_class = DateTimeField
elif isinstance(model_field, models.DateField):
field_class = DateField
elif isinstance(model_field, models.FileField):
field_class = FileField
elif isinstance(model_field, models.FloatField):
field_class = FloatField
elif isinstance(model_field, (models.IntegerField, models.AutoField)):
field_class = IntegerField
elif isinstance(model_field, models.DecimalField):
field_class = NumberField
elif isinstance(model_field, (models.CharField, models.TextField,
models.FilePathField,
models.IPAddressField, models.GenericIPAddressField)):
field_class = TextField
elif isinstance(model_field, models.TimeField):
field_class = TimeField
else:
return None
return field_class(path, name, attname)
def build_relation(self, model_field, path=None, name=None, attname=None):
# Discard ManyToManyFields and GenericForeignKeys
if not isinstance(model_field, models.ForeignKey):
return None
if path is None:
path = model_field.attname
if name is None:
name = model_field.name
if attname is None:
attname = path
target_field = model_field.target_field
if self.use_natural_foreign_keys:
opts = target_field.model._meta
natural_key = self.find_natural_key(
opts.get_fields(),
opts.unique_together)
if natural_key is not None:
if not isinstance(natural_key, collections.Iterable):
fld = self.build_field(
natural_key,
''.join((name, '__', natural_key.attname)),
''.join((name, '__', natural_key.name)),
attname)
if fld is not None:
return [(fld, [model_field, natural_key])]
else:
flds = [
self.build_field(
key,
''.join((name, '__', key.attname)),
''.join((name, '__', key.name)),
attname)
for key
in natural_key
]
if all(fld is not None for fld in flds):
return [
(fld, [model_field, key])
for fld, key
in zip(flds, natural_key)
]
fld = self.build_field(target_field, path, name, attname)
return [(fld, [model_field])]
def find_natural_key(self, model_fields, unique_together=()):
for f in model_fields:
if not f.is_relation and f.unique and not f.primary_key:
return f
if unique_together:
available_model_fields = {
f.name: f
for f
in model_fields
}
for set in unique_together:
try:
return tuple(
available_model_fields[name]
for name
in set
)
except KeyError:
continue
return None
@cached_property
def fields(self):
return [fld for fld in self.model_fields]
@cached_property
def model_fields(self):
cache = ModelFieldsCache()
selected_fields = self.selected_fields or [
f.name
for f
in cache.get_all_model_fields(self.model)
]
if self.excluded_fields:
selected_fields = [
field_name
for field_name
in selected_fields
if field_name not in self.excluded_fields
]
if self.use_natural_primary_keys and not self.selected_fields:
model_fields = [
f
for f
in cache.get_all_model_fields(self.model)
if f.name in selected_fields
]
natural_key = self.find_natural_key(
model_fields,
self.model._meta.unique_together
)
if natural_key is not None:
excluded_fields = [
f.name
for f
in model_fields
if f.primary_key
]
selected_fields = [
field_name
for field_name
in selected_fields
if field_name not in excluded_fields
]
fields = []
for name in selected_fields:
path = name.split('__')
model_fields = cache.get_model_fields(self.model, path)
if not model_fields or len(model_fields) < len(path):
continue # ModelMigration cannot handle properties.
model_field = model_fields[-1]
path = '__'.join(f.attname for f in model_fields)
name = '__'.join(f.name for f in model_fields)
attname = path
if not model_field.is_relation:
fld = self.build_field(model_field, path, name, attname)
if fld is not None:
fields.append((fld, model_fields))
else:
rel = self.build_relation(model_field, path, name, attname)
if rel is not None:
if len(model_fields) > 1:
previous_model_fields = model_fields[:-1]
rel = [
(fld, previous_model_fields + rel_model_fields)
for fld, rel_model_fields
in rel
]
fields.extend(rel)
return collections.OrderedDict(fields)
class QuerySetExportation(ModelMigration):
can_create = False
can_update = False
fields_to_import = []
def __init__(self, queryset):
model = queryset.model
opts = model._meta
fields = None
exclude = None
field_names, defer = queryset.query.deferred_loading
if field_names:
field_names = sorted(field_names, key=(
lambda n: opts.get_field(n).creation_counter))
if defer:
exclude = field_names
else:
fields = field_names
super(QuerySetExportation, self).__init__(model, fields, exclude)
self.queryset = queryset
def get_data_to_export(self, serializer):
if self.requires_model_instances:
return self._data_from_objects(self.queryset, serializer)
else:
return self._data_from_values(self.queryset, serializer)
@cached_property
def requires_model_instances(self):
return (self.queryset._result_cache is not None
or self.queryset._prefetch_related_lookups)
| samuelmaudo/yepes | yepes/contrib/datamigrations/data_migrations.py | Python | bsd-3-clause | 17,481 |
import unittest
from tornadowebapi.renderers import JSONRenderer
class TestJSONRenderer(unittest.TestCase):
def test_basic_rendering(self):
renderer = JSONRenderer()
self.assertEqual(renderer.render({}), "{}")
self.assertEqual(renderer.render(None), None)
| simphony/tornado-webapi | tornadowebapi/renderers/tests/test_json_renderer.py | Python | bsd-3-clause | 287 |
# -*- coding: utf-8 -*-
import os
import os.path
from StringIO import StringIO
from time import time
__all__ = ['Parser', 'IncrementalParser', 'DispatchParser']
import xml.dom as xd
import xml.dom.minidom as xdm
import xml.sax as xs
import xml.sax.handler as xsh
import xml.sax.saxutils as xss
from xml.sax.saxutils import quoteattr, escape, unescape
from bridge import Element, ENCODING, Attribute, PI, Comment, Document
from bridge.common import ANY_NAMESPACE
class Parser(object):
def __init__(self):
self.buffer = []
def __deserialize_fragment(self, current, parent):
if current.attributes:
for key in iter(current.attributes.keys()):
attr = current.attributes[key]
Attribute(attr.localName, attr.value,
attr.prefix, attr.namespaceURI, parent)
children_num = len(current.childNodes)
children = iter(current.childNodes)
for child in children:
nt = child.nodeType
if nt == xd.Node.TEXT_NODE:
data = escape(child.data)
if children_num == 1:
parent.xml_text = data
else:
parent.xml_children.append(data)
elif nt == xd.Node.CDATA_SECTION_NODE:
parent.as_cdata = True
data = child.data
if children_num == 1:
parent.xml_text = data
else:
parent.xml_children.append(data)
elif nt == xd.Node.COMMENT_NODE:
Comment(data=unicode(child.data), parent=parent)
elif nt == xd.Node.PROCESSING_INSTRUCTION_NODE:
PI(target=unicode(child.target), data=unicode(child.data), parent=parent)
elif nt == xd.Node.ELEMENT_NODE:
element = Element(name=child.localName, prefix=child.prefix,
namespace=child.namespaceURI, parent=parent)
self.__deserialize_fragment(child, element)
def __qname(self, name, prefix=None):
if prefix:
return "%s:%s" % (prefix, name)
return name
def __attrs(self, node):
for attr_ns, attr_name in iter(node.xml_attributes):
if attr_ns == xd.XMLNS_NAMESPACE and attr_name == 'xmlns':
continue
attr = node.xml_attributes[(attr_ns, attr_name)]
ns = attr.xml_ns
prefix = attr.xml_prefix
name = attr.xml_name
yield ns, name, prefix, attr.xml_text or ''
def __append_namespace(self, prefix, ns):
if prefix:
self.buffer.append(' xmlns:%s="%s"' % (prefix, ns))
elif ns is not None:
self.buffer.append(' xmlns="%s"' % (ns, ))
def __build_ns_map(self, ns_map, element):
for child in element.xml_children:
if isinstance(child, Element):
if child.xml_ns not in ns_map:
ns_map[child.xml_prefix] = child.xml_ns
for attr_ns, attr_name in child.xml_attributes:
if attr_ns not in ns_map:
ns_map[attr_ns] = child.xml_attributes[(attr_ns, attr_name)].xml_prefix
def __is_known(self, ns_map, prefix, ns):
if prefix in ns_map and ns_map[prefix] == ns:
return True
ns_map[prefix] = ns
return False
def __append_text(self, text, as_cdata):
if as_cdata:
self.buffer.append('<![CDATA[')
self.buffer.append(text)
if as_cdata:
self.buffer.append(']]>')
def __serialize_element(self, element, parent_ns_map=None):
for child in iter(element.xml_children):
if isinstance(child, basestring):
child = child.strip().strip('\n').strip('\r\n')
if not child:
continue
self.__append_text(child, element.as_cdata)
elif isinstance(child, Element):
ns_map = {}
ns_map.update(parent_ns_map or {})
prefix = ns = name = None
if child.xml_prefix:
prefix = child.xml_prefix
if child.xml_ns:
ns = child.xml_ns
name = child.xml_name
qname = self.__qname(name, prefix=prefix)
self.buffer.append('<%s' % qname)
if not self.__is_known(ns_map, prefix, ns):
self.__append_namespace(prefix, ns)
for ns, name, prefix, value in self.__attrs(child):
if ns is None:
pass
elif ns == xd.XML_NAMESPACE:
name = 'xml:%s' % name
elif ns == xd.XMLNS_NAMESPACE:
if not self.__is_known(ns_map, name, value):
self.__append_namespace(name, value)
continue
else:
name = '%s:%s' % (prefix, name)
if not self.__is_known(ns_map, prefix, ns):
self.__append_namespace(prefix, ns)
self.buffer.append(' %s=%s' % (name, quoteattr(value)))
if child.xml_text or child.xml_children:
self.buffer.append('>')
if child.xml_text:
self.__append_text(child.xml_text, child.as_cdata)
if child.xml_children:
self.__serialize_element(child, ns_map)
self.buffer.append('</%s>' % (qname, ))
else:
self.buffer.append(' />')
elif isinstance(child, Comment):
self.buffer.append('<!--%s-->\n' % (child.data,))
elif isinstance(child, PI):
self.buffer.append('<?%s %s?>\n' % (child.target, child.data))
def serialize(self, document, indent=False, encoding=ENCODING, prefixes=None, omit_declaration=False):
if not isinstance(document, Document):
root = document
document = Document()
document.xml_children.append(root)
self.__serialize_element(document)
if not omit_declaration:
self.buffer.insert(0, '<?xml version="1.0" encoding="%s"?>%s' % (encoding, os.linesep))
content = ''.join(self.buffer)
self.buffer = []
if indent:
return content.rstrip(os.linesep).encode(encoding)
return content.encode(encoding)
def deserialize(self, source, prefixes=None, strict=False):
doc = None
if isinstance(source, basestring):
if os.path.exists(source):
doc = xdm.parse(source)
else:
doc = xdm.parseString(source)
elif hasattr(source, 'read'):
doc = xdm.parse(source)
document = Document()
self.__deserialize_fragment(doc, document)
if doc:
try:
doc.unlink()
except KeyError:
pass
return document
import xml.sax as xs
import xml.sax.saxutils as xss
from xml.parsers import expat
try:
import cStringIO as StringIO
except ImportError:
import StringIO
from time import time
class IncrementalHandler(xss.XMLGenerator):
def __init__(self, out, encoding=ENCODING):
xss.XMLGenerator.__init__(self, out, encoding)
self._root = Document()
self._current_el = self._root
self._current_level = 0
self._as_cdata = False
def reset(self):
if self._root:
self._root.forget()
self._root = None
if self._current_el:
self._current_el.forget()
self._current_el = None
self._root = Document()
self._current_el = self._root
self._current_level = 0
def startDocument(self):
self._root = Document()
self._current_el = self._root
self._current_level = 0
self._as_cdata = False
# see http://www.xml.com/pub/a/2003/03/10/python.html
def _split_qname(self, qname):
qname_split = qname.split(':')
if len(qname_split) == 2:
prefix, local = qname_split
else:
prefix = None
local = qname_split
return prefix, local
def processingInstruction(self, target, data):
PI(target, data, self._current_el)
def startElementNS(self, name, qname, attrs):
#print "$%s%s: %f" % (" " * self._current_level, name, time())
uri, local_name = name
prefix = None
if uri and uri in self._current_context:
prefix = self._current_context[uri]
#print "$$%s%s: %f" % (" " * self._current_level, name, time())
e = Element(local_name, prefix=prefix, namespace=uri, parent=self._current_el)
#print "$$$%s%s: %f" % (" " * self._current_level, name, time())
for name, value in iter(attrs.items()):
(namespace, local_name) = name
qname = attrs.getQNameByName(name)
prefix = self._split_qname(qname)[0]
Attribute(local_name, value, prefix, namespace, e)
#print "$$$$%s%s: %f" % (" " * self._current_level, name, time())
self._current_el = e
self._current_level = self._current_level + 1
#print "$$$$$%s%s: %f" % (" " * self._current_level, name, time())
def endElementNS(self, name, qname):
self._current_level = current_level = self._current_level - 1
self._current_el = self._current_el.xml_parent
def characters(self, content):
self._current_el.as_cdata = self._as_cdata
if not self._as_cdata and not self._current_el.xml_text:
self._current_el.xml_text = content
else:
self._current_el.xml_children.append(content)
self._as_cdata = False
def comment(self, data):
Comment(data, self._current_el)
def startCDATA(self):
self._as_cdata = True
def endCDATA(self):
pass
def startDTD(self, name, public_id, system_id):
pass
def endDTD(self):
pass
def doc(self):
"""Returns the root Document instance of the parsed
document. You have to call the close() method of the
parser first.
"""
return self._root
class IncrementalParser(object):
def __init__(self, out=None, encoding=ENCODING):
self.parser = xs.make_parser()
self.parser.setFeature(xs.handler.feature_namespaces, True)
if not out:
out = StringIO.StringIO()
self.out = out
self.handler = IncrementalHandler(self.out, encoding)
self.parser.setContentHandler(self.handler)
self.parser.setProperty(xs.handler.property_lexical_handler, self.handler)
def feed(self, chunk):
self.parser.feed(chunk)
def reset(self):
self.handler.reset()
self.parser.reset()
class DispatchHandler(IncrementalHandler):
def __init__(self, out, encoding='UTF-8'):
IncrementalHandler.__init__(self, out=None, encoding=ENCODING)
"""This handler allows the incremental parsing of an XML document
while providing simple ways to dispatch at precise point of the
parsing back to the caller.
Here's an example:
>>> from parser import DispatchParser
>>> p = DispatchParser()
>>> def dispatch(e):
... print e.xml()
...
>>> h.register_at_level(1, dispatch)
>>> p.feed('<r')
>>> p.feed('><b')
>>> p.feed('/></r>')
<?xml version="1.0" encoding="UTF-8"?>
<b xmlns=""></b>
Alternatively this can even be used as a generic parser. If you
don't need dispatching you simply call ``disable_dispatching``.
>>> from parser import DispatchParser
>>> p = DispatchParser()
>>> h.disable_dispatching()
>>> p.feed('<r><b/></r>')
>>> h.doc()
<r element at 0xb7ca99ccL />
>>> h.doc().xml(omit_declaration=True)
'<r xmlns=""><b></b></r>'
Note that this handler has limitations as it doesn't
manage DTDs.
Note also that this class is not thread-safe.
"""
self._level_dispatchers = {}
self._element_dispatchers = {}
self._element_level_dispatchers = {}
self._path_dispatchers = {}
self.default_dispatcher = None
self.default_dispatcher_start_element = None
self.disable_dispatching()
def register_default(self, handler):
self.default_dispatcher = handler
def unregister_default(self):
self.default_dispatcher = None
def register_default_start_element(self, handler):
self.default_dispatcher_start_element = handler
def unregister_default_start_element(self):
self.default_dispatcher_start_element = None
def disable_dispatching(self):
self.default_dispatcher = None
self.default_dispatcher_start_element = None
self.enable_level_dispatching = False
self.enable_element_dispatching = False
self.enable_element_by_level_dispatching = False
self.enable_dispatching_by_path = False
def enable_dispatching(self):
self.enable_level_dispatching = True
self.enable_element_dispatching = True
self.enable_element_by_level_dispatching = True
self.enable_dispatching_by_path = True
def register_at_level(self, level, dispatcher):
"""Registers a dispatcher at a given level within the
XML tree of elements being built.
The ``level``, an integer, is zero-based. So the root
element of the XML tree is 0 and its direct children
are at level 1.
The ``dispatcher`` is a callable object only taking
one parameter, a Element instance.
"""
self.enable_level_dispatching = True
self._level_dispatchers[level] = dispatcher
def unregister_at_level(self, level):
"""Unregisters a dispatcher at a given level
"""
if level in self._level_dispatchers:
del self._level_dispatchers[level]
if len(self._level_dispatchers) == 0:
self.enable_level_dispatching = False
def register_on_element(self, local_name, dispatcher, namespace=None):
"""Registers a dispatcher on a given element met during
the parsing.
The ``local_name`` is the local name of the element. This
element can be namespaced if you provide the ``namespace``
parameter.
The ``dispatcher`` is a callable object only taking
one parameter, a Element instance.
"""
self.enable_element_dispatching = True
self._element_dispatchers[(namespace, local_name)] = dispatcher
def unregister_on_element(self, local_name, namespace=None):
"""Unregisters a dispatcher for a specific element.
"""
key = (namespace, local_name)
if key in self._element_dispatchers:
del self._element_dispatchers[key]
if len(self._element_dispatchers) == 0:
self.enable_element_dispatching = False
def register_on_element_per_level(self, local_name, level, dispatcher, namespace=None):
"""Registers a dispatcher at a given level within the
XML tree of elements being built as well as for a
specific element.
The ``level``, an integer, is zero-based. So the root
element of the XML tree is 0 and its direct children
are at level 1.
The ``local_name`` is the local name of the element. This
element can be namespaced if you provide the ``namespace``
parameter.
The ``dispatcher`` is a callable object only taking
one parameter, a Element instance.
"""
self.enable_element_by_level_dispatching = True
self._element_level_dispatchers[(level, (namespace, local_name))] = dispatcher
def unregister_on_element_per_level(self, local_name, level, namespace=None):
"""Unregisters a dispatcher at a given level for a specific
element.
"""
key = (level, (namespace, local_name))
if key in self._element_level_dispatchers:
del self._element_level_dispatchers[key]
if len(self._element_level_dispatchers) == 0:
self.enable_element_by_level_dispatching = False
def register_by_path(self, path, dispatcher):
self.enable_dispatching_by_path = True
self._path_dispatchers[path] = dispatcher
def unregister_by_path(self, path):
if path in self._path_dispatchers:
del self._path_dispatchers[path]
if len(self._path_dispatchers) == 0:
self.enable_dispatching_by_path = False
def startElementNS(self, name, qname, attrs):
#print "%s: %f" % (name, time())
IncrementalHandler.startElementNS(self, name, qname, attrs)
if self.default_dispatcher_start_element:
self.default_dispatcher_start_element(self._current_el)
def endElementNS(self, name, qname):
self._current_level = current_level = self._current_level - 1
if not self._current_el:
return
current_element = self._current_el
parent_element = self._current_el.xml_parent
dispatched = False
if self.enable_element_dispatching:
pattern = (current_element.xml_ns, current_element.xml_name)
if pattern in self._element_dispatchers:
self._element_dispatchers[pattern](current_element)
dispatched = True
if not dispatched and self.default_dispatcher:
self.default_dispatcher(current_element)
self._current_el = parent_element
class DispatchParser(object):
def __init__(self, out=None, encoding=ENCODING):
self.parser = xs.make_parser()
self.parser.setFeature(xs.handler.feature_namespaces, True)
if not out:
out = StringIO.StringIO()
self.out = out
self.handler = DispatchHandler(self.out, encoding)
self.parser.setContentHandler(self.handler)
self.parser.setProperty(xs.handler.property_lexical_handler, self.handler)
def feed(self, chunk):
self.parser.feed(chunk)
def register_default(self, handler):
self.handler.register_default(handler)
def unregister_default(self):
self.handler.unregister_default()
def register_default_start_element(self, handler):
self.handler.register_default_start_element(handler)
def unregister_default_start_element(self):
self.handler.unregister_default_start_element()
def reset(self):
self.handler.reset()
self.parser.reset()
def disable_dispatching(self):
self.handler.disable_dispatching()
def enable_dispatching(self):
self.handler.enable_dispatching()
def register_at_level(self, level, dispatcher):
"""Registers a dispatcher at a given level within the
XML tree of elements being built.
The ``level``, an integer, is zero-based. So the root
element of the XML tree is 0 and its direct children
are at level 1.
The ``dispatcher`` is a callable object only taking
one parameter, a Element instance.
"""
self.handler.register_at_level(level, dispatcher)
def unregister_at_level(self, level):
"""Unregisters a dispatcher at a given level
"""
self.handler.unregister_at_level(level, dispatcher)
def register_on_element(self, local_name, dispatcher, namespace=None):
"""Registers a dispatcher on a given element met during
the parsing.
The ``local_name`` is the local name of the element. This
element can be namespaced if you provide the ``namespace``
parameter.
The ``dispatcher`` is a callable object only taking
one parameter, a Element instance.
"""
self.handler.register_on_element(local_name, dispatcher, namespace)
def unregister_on_element(self, local_name, namespace=None):
"""Unregisters a dispatcher for a specific element.
"""
self.handler.unregister_on_element(local_name, namespace)
def register_on_element_per_level(self, local_name, level, dispatcher, namespace=None):
"""Registers a dispatcher at a given level within the
XML tree of elements being built as well as for a
specific element.
The ``level``, an integer, is zero-based. So the root
element of the XML tree is 0 and its direct children
are at level 1.
The ``local_name`` is the local name of the element. This
element can be namespaced if you provide the ``namespace``
parameter.
The ``dispatcher`` is a callable object only taking
one parameter, a Element instance.
"""
self.handler.register_on_element_per_level(local_name, level, dispatcher, namespace)
def unregister_on_element_per_level(self, local_name, level, namespace=None):
"""Unregisters a dispatcher at a given level for a specific
element.
"""
self.handler.unregister_on_element_per_level(local_name, level, namespace)
def register_by_path(self, path, dispatcher):
self.handler.register_by_path(path, dispatcher)
def unregister_by_path(self, path):
self.handler.unregister_by_path(path)
| Lawouach/bridge | bridge/parser/bridge_default.py | Python | bsd-3-clause | 21,896 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'DiskOffering.available_size_kb'
db.add_column(u'physical_diskoffering', 'available_size_kb',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
def backwards(self, orm):
# Deleting field 'DiskOffering.available_size_kb'
db.delete_column(u'physical_diskoffering', 'available_size_kb')
models = {
u'physical.databaseinfra': {
'Meta': {'object_name': 'DatabaseInfra'},
'capacity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'endpoint_dns': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Engine']"}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '406', 'blank': 'True'}),
'per_database_size_mbytes': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Plan']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'physical.diskoffering': {
'Meta': {'object_name': 'DiskOffering'},
'available_size_kb': ('django.db.models.fields.PositiveIntegerField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'size_kb': ('django.db.models.fields.PositiveIntegerField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.engine': {
'Meta': {'unique_together': "((u'version', u'engine_type'),)", 'object_name': 'Engine'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'engines'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}),
'engine_upgrade_option': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_engine'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Engine']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'template_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user_data_script': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'physical.enginetype': {
'Meta': {'object_name': 'EngineType'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.environment': {
'Meta': {'object_name': 'Environment'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'equivalent_environment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Environment']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.host': {
'Meta': {'object_name': 'Host'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'future_host': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Host']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'monitor_url': ('django.db.models.fields.URLField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.instance': {
'Meta': {'unique_together': "((u'address', u'port'),)", 'object_name': 'Instance'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'databaseinfra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.DatabaseInfra']"}),
'dns': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'future_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Instance']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Host']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_arbiter': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'port': ('django.db.models.fields.IntegerField', [], {}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.plan': {
'Meta': {'object_name': 'Plan'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plans'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plans'", 'to': u"orm['physical.Engine']"}),
'engine_equivalent_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_plan'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Plan']"}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['physical.Environment']", 'symmetrical': 'False'}),
'equivalent_plan': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Plan']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_ha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'max_db_size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'provider': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'replication_topology': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'replication_topology'", 'null': 'True', 'to': u"orm['physical.ReplicationTopology']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.planattribute': {
'Meta': {'object_name': 'PlanAttribute'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plan_attributes'", 'to': u"orm['physical.Plan']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'physical.replicationtopology': {
'Meta': {'object_name': 'ReplicationTopology'},
'class_path': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'replication_topologies'", 'symmetrical': 'False', 'to': u"orm['physical.Engine']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['physical'] | globocom/database-as-a-service | dbaas/physical/migrations/0025_auto__add_field_diskoffering_available_size_kb.py | Python | bsd-3-clause | 11,926 |
# -*- coding: utf-8 -*-
#
# flask_slack documentation build configuration file, created by
# sphinx-quickstart on Sun Oct 19 16:31:35 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
sys.path.append(os.path.abspath('_themes'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'flask_slack'
copyright = u'2014, VeryCB'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.5'
# The full version, including alpha/beta/rc tags.
release = '0.1.5'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'flask'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'index_logo': None
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'flask_slackdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'flask_slack.tex', u'flask\\_slack Documentation',
u'VeryCB', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'flask_slack', u'flask_slack Documentation',
[u'VeryCB'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'flask_slack', u'flask_slack Documentation',
u'VeryCB', 'flask_slack', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| VeryCB/flask-slack | docs/conf.py | Python | bsd-3-clause | 8,287 |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import json
from telemetry.page import page_set
from telemetry.page import page_test
from telemetry.core import util
src_path = os.path.join(os.path.dirname(__file__), '..', '..', '..', '..')
conformance_path = os.path.join(src_path, 'third_party', 'webgl_conformance')
conformance_harness_script = r"""
var testHarness = {};
testHarness._allTestSucceeded = true;
testHarness._messages = '';
testHarness._failures = 0;
testHarness._finished = false;
testHarness.reportResults = function(success, msg) {
testHarness._allTestSucceeded = testHarness._allTestSucceeded && !!success;
if(!success) {
testHarness._failures++;
if(msg) {
testHarness._messages += msg + "\n";
}
}
};
testHarness.notifyFinished = function() {
testHarness._finished = true;
};
testHarness.navigateToPage = function(src) {
var testFrame = document.getElementById("test-frame");
testFrame.src = src;
};
window.webglTestHarness = testHarness;
window.parent.webglTestHarness = testHarness;
console.log("Harness injected.");
"""
def _DidWebGLTestSucceed(tab):
return tab.EvaluateJavaScript('webglTestHarness._allTestSucceeded')
def _WebGLTestMessages(tab):
return tab.EvaluateJavaScript('webglTestHarness._messages')
class WebGLConformanceTest(page_test.PageTest):
def __init__(self):
super(WebGLConformanceTest, self).__init__('ValidatePage')
def CreatePageSet(self, options):
tests = WebGLConformanceTest._ParseTests('00_test_list.txt', '1.0.1')
page_set_dict = {
'description': 'Executes WebGL conformance tests',
'user_agent_type': 'desktop',
'serving_dirs': [
'../../../../third_party/webgl_conformance'
],
'pages': []
}
pages = page_set_dict['pages']
for test in tests:
pages.append({
'url': 'file:///../../../../third_party/webgl_conformance/' + test,
'script_to_evaluate_on_commit': conformance_harness_script,
'wait_for_javascript_expression': 'webglTestHarness._finished'
})
return page_set.PageSet.FromDict(page_set_dict, __file__)
def ValidatePage(self, page, tab, results):
if _DidWebGLTestSucceed(tab):
results.AddSuccess(page)
else:
results.AddFailureMessage(page, _WebGLTestMessages(tab))
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArg('--enable-webgl')
@staticmethod
def _ParseTests(path, version = None):
test_paths = []
current_dir = os.path.dirname(path)
full_path = os.path.normpath(os.path.join(conformance_path, path))
if not os.path.exists(full_path):
raise Exception('The WebGL conformance test path specified ' +
'does not exist: ' + full_path)
with open(full_path, 'r') as f:
for line in f:
line = line.strip()
if not line:
continue
if line.startswith('//') or line.startswith('#'):
continue
line_tokens = line.split(' ')
i = 0
min_version = None
while i < len(line_tokens):
token = line_tokens[i]
if token == '--min-version':
i += 1
min_version = line_tokens[i]
i += 1
if version and min_version and version < min_version:
continue
test_name = line_tokens[-1]
if '.txt' in test_name:
include_path = os.path.join(current_dir, test_name)
test_paths += WebGLConformanceTest._ParseTests(
include_path, version)
else:
test = os.path.join(current_dir, test_name)
test_paths.append(test)
return test_paths
| loopCM/chromium | content/test/gpu/gpu_tests/webgl_conformance_test.py | Python | bsd-3-clause | 3,821 |
#!/usr/bin/env python
# coding=UTF-8
__author__ = "Pierre-Yves Langlois"
__copyright__ = "https://github.com/pylanglois/uwsa/blob/master/LICENCE"
__credits__ = ["Pierre-Yves Langlois"]
__license__ = "BSD"
__maintainer__ = "Pierre-Yves Langlois"
from uwsas.common import *
from uwsas.commands.abstract_command import AbstractCommand
class CommandManager(AbstractCommand):
NAME = 'CommandManager'
def __init__(self):
AbstractCommand.__init__(self)
self.help = t("""
Usage: uwsa cmd param
where cmd in %s
""")
def get_log_name(self):
return 'uwsas'
cmanager = CommandManager()
| pylanglois/uwsa | uwsas/commands/command_manager.py | Python | bsd-3-clause | 619 |
# Copyright (c) 2011 Tencent Inc.
# All rights reserved.
#
# Author: Michaelpeng <michaelpeng@tencent.com>
# Date: January 09, 2012
"""
This is the configuration parse module which parses
the BLADE_ROOT as a configuration file.
"""
import os
import sys
import console
from blade_util import var_to_list
from cc_targets import HEAP_CHECK_VALUES
from proto_library_target import ProtocPlugin
# Global config object
blade_config = None
def config_items(**kwargs):
"""Used in config functions for config file, to construct a appended
items dict, and then make syntax more pretty
"""
return kwargs
class BladeConfig(object):
"""BladeConfig. A configuration parser class. """
def __init__(self, current_source_dir):
self.current_source_dir = current_source_dir
self.current_file_name = ''
self.configs = {
'global_config' : {
'build_path_template': 'build${m}_${profile}',
'duplicated_source_action': 'warning', # Can be 'warning', 'error', 'none'
'test_timeout': None,
},
'cc_test_config': {
'dynamic_link': False,
'heap_check': '',
'gperftools_libs': [],
'gperftools_debug_libs': [],
'gtest_libs': [],
'gtest_main_libs': [],
'pprof_path': '',
},
'cc_binary_config': {
'extra_libs': [],
'run_lib_paths' : [],
},
'distcc_config': {
'enabled': False
},
'link_config': {
'link_on_tmp': False,
'enable_dccc': False
},
'java_config': {
'version': '1.6',
'source_version': '',
'target_version': '',
'maven': 'mvn',
'maven_central': '',
'warnings':['-Werror', '-Xlint:all'],
'source_encoding': None,
'java_home':''
},
'java_binary_config': {
'one_jar_boot_jar' : '',
},
'java_test_config': {
'junit_libs' : [],
'jacoco_home' : '',
'coverage_reporter' : '',
},
'scala_config': {
'scala_home' : '',
'target_platform' : '',
'warnings' : '',
'source_encoding' : None,
},
'scala_test_config': {
'scalatest_libs' : '',
},
'go_config' : {
'go' : '',
'go_home' : '', # GOPATH
},
'thrift_config': {
'thrift': 'thrift',
'thrift_libs': [],
'thrift_incs': [],
},
'fbthrift_config': {
'fbthrift1': 'thrift1',
'fbthrift2': 'thrift2',
'fbthrift_libs': [],
'fbthrift_incs': [],
},
'proto_library_config': {
'protoc': 'thirdparty/protobuf/bin/protoc',
'protoc_java': '',
'protobuf_libs': [],
'protobuf_path': '',
'protobuf_incs': [],
'protobuf_php_path': '',
'protoc_php_plugin': '',
'protobuf_java_libs' : [],
'protoc_go_plugin': '',
# All the generated go source files will be placed
# into $GOPATH/src/protobuf_go_path
'protobuf_go_path': '',
},
'protoc_plugin_config' : {
},
'cc_config': {
'extra_incs': [],
'cppflags': [],
'cflags': [],
'cxxflags': [],
'linkflags': [],
'c_warnings': [],
'cxx_warnings': [],
'warnings': [],
'cpplint': 'cpplint.py',
'optimize': [],
'benchmark_libs': [],
'benchmark_main_libs': [],
'securecc' : None,
},
'cc_library_config': {
'generate_dynamic' : None,
# Options passed to ar/ranlib to control how
# the archive is created, such as, let ar operate
# in deterministic mode discarding timestamps
'arflags': [],
'ranlibflags': [],
}
}
def _try_parse_file(self, filename):
"""load the configuration file and parse. """
try:
self.current_file_name = filename
if os.path.exists(filename):
execfile(filename)
except SystemExit:
console.error_exit('Parse error in config file %s, exit...' % filename)
def parse(self):
"""load the configuration file and parse. """
self._try_parse_file(os.path.join(os.path.dirname(sys.argv[0]), 'blade.conf'))
self._try_parse_file(os.path.expanduser('~/.bladerc'))
self._try_parse_file(os.path.join(self.current_source_dir, 'BLADE_ROOT'))
def update_config(self, section_name, append, user_config):
"""update config section by name. """
config = self.configs.get(section_name, {})
if config:
if append:
self._append_config(section_name, config, append)
self._replace_config(section_name, config, user_config)
else:
console.error('%s: %s: unknown config section name' % (
self.current_file_name, section_name))
def _append_config(self, section_name, config, append):
"""Append config section items"""
if not isinstance(append, dict):
console.error('%s: %s: append must be a dict' %
(self.current_file_name, section_name))
else:
for k in append:
if k in config:
if isinstance(config[k], list):
config[k] += var_to_list(append[k])
else:
console.warning('%s: %s: config item %s is not a list' %
(self.current_file_name, section_name, k))
else:
console.warning('%s: %s: unknown config item name: %s' %
(self.current_file_name, section_name, k))
def _replace_config(self, section_name, config, user_config):
"""Replace config section items"""
unknown_keys = []
for k in user_config:
if k in config:
if isinstance(config[k], list):
user_config[k] = var_to_list(user_config[k])
else:
console.warning('%s: %s: unknown config item name: %s' %
(self.current_file_name, section_name, k))
unknown_keys.append(k)
for k in unknown_keys:
del user_config[k]
config.update(user_config)
def get_config(self, section_name):
"""get config section, returns default values if not set """
return self.configs.get(section_name, {})
def cc_test_config(append=None, **kwargs):
"""cc_test_config section. """
heap_check = kwargs.get('heap_check')
if heap_check is not None and heap_check not in HEAP_CHECK_VALUES:
console.error_exit('cc_test_config: heap_check can only be in %s' %
HEAP_CHECK_VALUES)
blade_config.update_config('cc_test_config', append, kwargs)
def cc_binary_config(append=None, **kwargs):
"""cc_binary_config section. """
blade_config.update_config('cc_binary_config', append, kwargs)
def cc_library_config(append=None, **kwargs):
"""cc_library_config section. """
blade_config.update_config('cc_library_config', append, kwargs)
__DUPLICATED_SOURCE_ACTION_VALUES = set(['warning', 'error', 'none', None])
def global_config(append=None, **kwargs):
"""global_config section. """
duplicated_source_action = kwargs.get('duplicated_source_action')
if duplicated_source_action not in __DUPLICATED_SOURCE_ACTION_VALUES:
console.error_exit('Invalid global_config.duplicated_source_action '
'value, can only be in %s' % __DUPLICATED_SOURCE_ACTION_VALUES)
blade_config.update_config('global_config', append, kwargs)
def distcc_config(append=None, **kwargs):
"""distcc_config. """
blade_config.update_config('distcc_config', append, kwargs)
def link_config(append=None, **kwargs):
"""link_config. """
blade_config.update_config('link_config', append, kwargs)
def java_config(append=None, **kwargs):
"""java_config. """
blade_config.update_config('java_config', append, kwargs)
def java_binary_config(append=None, **kwargs):
"""java_test_config. """
blade_config.update_config('java_binary_config', append, kwargs)
def java_test_config(append=None, **kwargs):
"""java_test_config. """
blade_config.update_config('java_test_config', append, kwargs)
def scala_config(append=None, **kwargs):
"""scala_config. """
blade_config.update_config('scala_config', append, kwargs)
def scala_test_config(append=None, **kwargs):
"""scala_test_config. """
blade_config.update_config('scala_test_config', append, kwargs)
def go_config(append=None, **kwargs):
"""go_config. """
blade_config.update_config('go_config', append, kwargs)
def proto_library_config(append=None, **kwargs):
"""protoc config. """
path = kwargs.get('protobuf_include_path')
if path:
console.warning(('%s: proto_library_config: protobuf_include_path has '
'been renamed to protobuf_incs, and become a list') %
blade_config.current_file_name)
del kwargs['protobuf_include_path']
if isinstance(path, basestring) and ' ' in path:
kwargs['protobuf_incs'] = path.split()
else:
kwargs['protobuf_incs'] = [path]
blade_config.update_config('proto_library_config', append, kwargs)
def protoc_plugin(**kwargs):
"""protoc_plugin. """
if 'name' not in kwargs:
console.error_exit("Missing 'name' in protoc_plugin parameters: %s" % kwargs)
config = blade_config.get_config('protoc_plugin_config')
config[kwargs['name']] = ProtocPlugin(**kwargs)
def thrift_library_config(append=None, **kwargs):
"""thrift config. """
blade_config.update_config('thrift_config', append, kwargs)
def fbthrift_library_config(append=None, **kwargs):
"""fbthrift config. """
blade_config.update_config('fbthrift_config', append, kwargs)
def cc_config(append=None, **kwargs):
"""extra cc config, like extra cpp include path splited by space. """
if 'extra_incs' in kwargs:
extra_incs = kwargs['extra_incs']
if isinstance(extra_incs, basestring) and ' ' in extra_incs:
console.warning('%s: cc_config: extra_incs has been changed to list' %
blade_config.current_file_name)
kwargs['extra_incs'] = extra_incs.split()
blade_config.update_config('cc_config', append, kwargs)
| project-zerus/blade | src/blade/configparse.py | Python | bsd-3-clause | 11,273 |
import tests.periodicities.period_test as per
per.buildModel((60 , 'D' , 100));
| antoinecarme/pyaf | tests/periodicities/Day/Cycle_Day_100_D_60.py | Python | bsd-3-clause | 82 |
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import weakref
import IECore
import Gaffer
import GafferUI
def appendDefinitions( menuDefinition, prefix ) :
menuDefinition.append( prefix + "/About Gaffer...", { "command" : about } )
menuDefinition.append( prefix + "/Preferences...", { "command" : preferences } )
menuDefinition.append( prefix + "/Documentation...", { "command" : IECore.curry( GafferUI.showURL, os.path.expandvars( "$GAFFER_ROOT/doc/gaffer/html/index.html" ) ) } )
menuDefinition.append( prefix + "/Quit", { "command" : quit, "shortCut" : "Ctrl+Q" } )
def quit( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
application = scriptWindow.scriptNode().ancestor( Gaffer.ApplicationRoot )
unsavedNames = []
for script in application["scripts"].children() :
if script["unsavedChanges"].getValue() :
f = script["fileName"].getValue()
f = f.rpartition( "/" )[2] if f else "untitled"
unsavedNames.append( f )
if unsavedNames :
dialogue = GafferUI.ConfirmationDialogue(
"Discard Unsaved Changes?",
"The following files have unsaved changes : \n\n" +
"\n".join( [ " - " + n for n in unsavedNames ] ) +
"\n\nDo you want to discard the changes and quit?",
confirmLabel = "Discard and Quit"
)
if not dialogue.waitForConfirmation( parentWindow=scriptWindow ) :
return
# Defer the actual removal of scripts till an idle event - removing all
# the scripts will result in the removal of the window our menu item is
# parented to, which would cause a crash as it's deleted away from over us.
GafferUI.EventLoop.addIdleCallback( IECore.curry( __removeAllScripts, application ) )
def __removeAllScripts( application ) :
for script in application["scripts"].children() :
application["scripts"].removeChild( script )
return False # remove idle callback
__aboutWindow = None
def about( menu ) :
global __aboutWindow
if __aboutWindow is not None and __aboutWindow() :
window = __aboutWindow()
else :
window = GafferUI.AboutWindow( Gaffer.About )
__aboutWindow = weakref.ref( window )
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
scriptWindow.addChildWindow( window )
window.setVisible( True )
__preferencesWindows = weakref.WeakKeyDictionary()
def preferences( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
application = scriptWindow.scriptNode().ancestor( Gaffer.ApplicationRoot )
global __preferencesWindows
window = __preferencesWindows.get( application, None )
if window is not None and window() :
window = window()
else :
window = GafferUI.Dialogue( "Preferences" )
closeButton = window._addButton( "Close" )
window.__closeButtonConnection = closeButton.clickedSignal().connect( __closePreferences )
saveButton = window._addButton( "Save" )
window.__saveButtonConnection = saveButton.clickedSignal().connect( __savePreferences )
nodeUI = GafferUI.NodeUI.create( application["preferences"] )
window._setWidget( nodeUI )
__preferencesWindows[application] = weakref.ref( window )
scriptWindow.addChildWindow( window )
window.setVisible( True )
def __closePreferences( button ) :
button.ancestor( type=GafferUI.Window ).setVisible( False )
def __savePreferences( button ) :
scriptWindow = button.ancestor( GafferUI.ScriptWindow )
application = scriptWindow.scriptNode().ancestor( Gaffer.ApplicationRoot )
application.savePreferences()
button.ancestor( type=GafferUI.Window ).setVisible( False )
| cedriclaunay/gaffer | python/GafferUI/ApplicationMenu.py | Python | bsd-3-clause | 5,283 |
import string
from random import choice
from django.contrib.auth.models import User
def get_random_id():
valid_id = False
test_name = 'EMPTY'
while valid_id is False:
s1 = ''.join([choice(string.ascii_uppercase) for i in range(2)])
s2 = ''.join([choice(string.digits) for i in range(8)])
test_name = u'%s%s' % (s1,s2)
try:
User.objects.get(username=test_name)
except:
valid_id = True
return test_name
| pocketone/django-shoppy | shoppy/util/randomuserid.py | Python | bsd-3-clause | 499 |
from __future__ import unicode_literals
import numpy as np
import param
from ..core import (HoloMap, DynamicMap, CompositeOverlay, Layout,
GridSpace, NdLayout, Store)
from ..core.util import (match_spec, is_number, wrap_tuple, basestring,
get_overlay_spec, unique_iterator, safe_unicode)
def displayable(obj):
"""
Predicate that returns whether the object is displayable or not
(i.e whether the object obeys the nesting hierarchy
"""
if isinstance(obj, HoloMap):
return not (obj.type in [Layout, GridSpace, NdLayout])
if isinstance(obj, (GridSpace, Layout, NdLayout)):
for el in obj.values():
if not displayable(el):
return False
return True
return True
class Warning(param.Parameterized): pass
display_warning = Warning(name='Warning')
def collate(obj):
if isinstance(obj, HoloMap):
display_warning.warning("Nesting %ss within a HoloMap makes it difficult "
"to access your data or control how it appears; "
"we recommend calling .collate() on the HoloMap "
"in order to follow the recommended nesting "
"structure shown in the Composing Data tutorial"
"(http://git.io/vtIQh)" % obj.type.__name__)
return obj.collate()
elif isinstance(obj, (Layout, NdLayout)):
try:
display_warning.warning(
"Layout contains HoloMaps which are not nested in the "
"recommended format for accessing your data; calling "
".collate() on these objects will resolve any violations "
"of the recommended nesting presented in the Composing Data "
"tutorial (http://git.io/vqs03)")
expanded = []
for el in obj.values():
if isinstance(el, HoloMap) and not displayable(el):
collated_layout = Layout.from_values(el.collate())
expanded.extend(collated_layout.values())
return Layout(expanded)
except:
raise Exception(undisplayable_info(obj))
else:
raise Exception(undisplayable_info(obj))
def undisplayable_info(obj, html=False):
"Generate helpful message regarding an undisplayable object"
collate = '<tt>collate</tt>' if html else 'collate'
info = "For more information, please consult the Composing Data tutorial (http://git.io/vtIQh)"
if isinstance(obj, HoloMap):
error = "HoloMap of %s objects cannot be displayed." % obj.type.__name__
remedy = "Please call the %s method to generate a displayable object" % collate
elif isinstance(obj, Layout):
error = "Layout containing HoloMaps of Layout or GridSpace objects cannot be displayed."
remedy = "Please call the %s method on the appropriate elements." % collate
elif isinstance(obj, GridSpace):
error = "GridSpace containing HoloMaps of Layouts cannot be displayed."
remedy = "Please call the %s method on the appropriate elements." % collate
if not html:
return '\n'.join([error, remedy, info])
else:
return "<center>{msg}</center>".format(msg=('<br>'.join(
['<b>%s</b>' % error, remedy, '<i>%s</i>' % info])))
def compute_sizes(sizes, size_fn, scaling_factor, scaling_method, base_size):
"""
Scales point sizes according to a scaling factor,
base size and size_fn, which will be applied before
scaling.
"""
if scaling_method == 'area':
pass
elif scaling_method == 'width':
scaling_factor = scaling_factor**2
else:
raise ValueError(
'Invalid value for argument "scaling_method": "{}". '
'Valid values are: "width", "area".'.format(scaling_method))
sizes = size_fn(sizes)
return (base_size*scaling_factor*sizes)
def get_sideplot_ranges(plot, element, main, ranges):
"""
Utility to find the range for an adjoined
plot given the plot, the element, the
Element the plot is adjoined to and the
dictionary of ranges.
"""
key = plot.current_key
dims = element.dimensions(label=True)
dim = dims[1] if dims[1] != 'Frequency' else dims[0]
range_item = main
if isinstance(main, HoloMap):
if issubclass(main.type, CompositeOverlay):
range_item = [hm for hm in main.split_overlays()[1]
if dim in hm.dimensions('all', label=True)][0]
else:
range_item = HoloMap({0: main}, kdims=['Frame'])
ranges = match_spec(range_item.last, ranges)
if dim in ranges:
main_range = ranges[dim]
else:
framewise = plot.lookup_options(range_item.last, 'norm').options.get('framewise')
if framewise and range_item.get(key, False):
main_range = range_item[key].range(dim)
else:
main_range = range_item.range(dim)
# If .main is an NdOverlay or a HoloMap of Overlays get the correct style
if isinstance(range_item, HoloMap):
range_item = range_item.last
if isinstance(range_item, CompositeOverlay):
range_item = [ov for ov in range_item
if dim in ov.dimensions('all', label=True)][0]
return range_item, main_range, dim
def within_range(range1, range2):
"""Checks whether range1 is within the range specified by range2."""
return ((range1[0] is None or range2[0] is None or range1[0] >= range2[0]) and
(range1[1] is None or range2[1] is None or range1[1] <= range2[1]))
def validate_sampled_mode(holomaps, dynmaps):
composite = HoloMap(enumerate(holomaps), kdims=['testing_kdim'])
holomap_kdims = set(unique_iterator([kd.name for dm in holomaps for kd in dm.kdims]))
hmranges = {d: composite.range(d) for d in holomap_kdims}
if any(not set(d.name for d in dm.kdims) <= holomap_kdims
for dm in dynmaps):
raise Exception('In sampled mode DynamicMap key dimensions must be a '
'subset of dimensions of the HoloMap(s) defining the sampling.')
elif not all(within_range(hmrange, dm.range(d)) for dm in dynmaps
for d, hmrange in hmranges.items() if d in dm.kdims):
raise Exception('HoloMap(s) have keys outside the ranges specified on '
'the DynamicMap(s).')
def get_dynamic_mode(composite):
"Returns the common mode of the dynamic maps in given composite object"
dynmaps = composite.traverse(lambda x: x, [DynamicMap])
holomaps = composite.traverse(lambda x: x, ['HoloMap'])
dynamic_modes = [m.call_mode for m in dynmaps]
dynamic_sampled = any(m.sampled for m in dynmaps)
if holomaps:
validate_sampled_mode(holomaps, dynmaps)
elif dynamic_sampled and not holomaps:
raise Exception("DynamicMaps in sampled mode must be displayed alongside "
"a HoloMap to define the sampling.")
if len(set(dynamic_modes)) > 1:
raise Exception("Cannot display composites of DynamicMap objects "
"with different interval modes (i.e open or bounded mode).")
elif dynamic_modes and not holomaps:
return 'bounded' if dynamic_modes[0] == 'key' else 'open', dynamic_sampled
else:
return None, dynamic_sampled
def initialize_sampled(obj, dimensions, key):
"""
Initializes any DynamicMaps in sampled mode.
"""
select = dict(zip([d.name for d in dimensions], key))
try:
obj.select([DynamicMap], **select)
except KeyError:
pass
def save_frames(obj, filename, fmt=None, backend=None, options=None):
"""
Utility to export object to files frame by frame, numbered individually.
Will use default backend and figure format by default.
"""
backend = Store.current_backend if backend is None else backend
renderer = Store.renderers[backend]
fmt = renderer.params('fig').objects[0] if fmt is None else fmt
plot = renderer.get_plot(obj)
for i in range(len(plot)):
plot.update(i)
renderer.save(plot, '%s_%s' % (filename, i), fmt=fmt, options=options)
def dynamic_update(plot, subplot, key, overlay, items):
"""
Given a plot, subplot and dynamically generated (Nd)Overlay
find the closest matching Element for that plot.
"""
match_spec = get_overlay_spec(overlay,
wrap_tuple(key),
subplot.current_frame)
specs = [(i, get_overlay_spec(overlay, wrap_tuple(k), el))
for i, (k, el) in enumerate(items)]
return closest_match(match_spec, specs)
def closest_match(match, specs, depth=0):
"""
Recursively iterates over type, group, label and overlay key,
finding the closest matching spec.
"""
new_specs = []
match_lengths = []
for i, spec in specs:
if spec[0] == match[0]:
new_specs.append((i, spec[1:]))
else:
if is_number(match[0]) and is_number(spec[0]):
match_length = -abs(match[0]-spec[0])
elif all(isinstance(s[0], basestring) for s in [spec, match]):
match_length = max(i for i in range(len(match[0]))
if match[0].startswith(spec[0][:i]))
else:
match_length = 0
match_lengths.append((i, match_length, spec[0]))
if len(new_specs) == 1:
return new_specs[0][0]
elif new_specs:
depth = depth+1
return closest_match(match[1:], new_specs, depth)
else:
if depth == 0 or not match_lengths:
return None
else:
return sorted(match_lengths, key=lambda x: -x[1])[0][0]
def map_colors(arr, crange, cmap, hex=True):
"""
Maps an array of values to RGB hex strings, given
a color range and colormap.
"""
if crange:
cmin, cmax = crange
else:
cmin, cmax = np.nanmin(arr), np.nanmax(arr)
arr = (arr - cmin) / (cmax-cmin)
arr = np.ma.array(arr, mask=np.logical_not(np.isfinite(arr)))
arr = cmap(arr)
if hex:
arr *= 255
return ["#{0:02x}{1:02x}{2:02x}".format(*(int(v) for v in c[:-1]))
for c in arr]
else:
return arr
def dim_axis_label(dimensions, separator=', '):
"""
Returns an axis label for one or more dimensions.
"""
if not isinstance(dimensions, list): dimensions = [dimensions]
return separator.join([safe_unicode(d.pprint_label)
for d in dimensions])
| vascotenner/holoviews | holoviews/plotting/util.py | Python | bsd-3-clause | 10,673 |
from sympy.core import *
def test_rational():
a = Rational(1, 5)
assert a**Rational(1, 2) == a**Rational(1, 2)
assert 2 * a**Rational(1, 2) == 2 * a**Rational(1, 2)
assert a**Rational(3, 2) == a * a**Rational(1, 2)
assert 2 * a**Rational(3, 2) == 2*a * a**Rational(1, 2)
assert a**Rational(17, 3) == a**5 * a**Rational(2, 3)
assert 2 * a**Rational(17, 3) == 2*a**5 * a**Rational(2, 3)
def test_large_rational():
e = (Rational(123712**12-1,7)+Rational(1,7))**Rational(1,3)
assert e == 234232585392159195136 * (Rational(1,7)**Rational(1,3))
def test_negative_real():
def feq(a,b):
return abs(a - b) < 1E-10
assert feq(Basic.One() / Real(-0.5), -Integer(2))
def test_expand():
x = Symbol('x')
assert (2**(-1-x)).expand() == Rational(1,2)*2**(-x)
def test_issue153():
#test that is runs:
a = Basic.sqrt(2*(1+Basic.sqrt(2)))
def test_issue350():
#test if powers are simplified correctly
a = Symbol('a')
assert ((a**Rational(1,3))**Rational(2)) == a**Rational(2,3)
assert ((a**Rational(3))**Rational(2,5)) != a**Rational(6,5)
a = Symbol('a', real = True)
assert (a**Rational(3))**Rational(2,5) == a**Rational(6,5)
#assert Number(5)**Rational(2,3)==Number(25)**Rational(1,3)
test_issue350()
| certik/sympy-oldcore | sympy/core/tests/test_eval_power.py | Python | bsd-3-clause | 1,302 |
##
##
# File auto-generated against equivalent DynamicSerialize Java class
from .Property import Property
class Header(object):
def __init__(self, properties=None, multimap=None):
if properties is None:
self.properties = []
else:
self.properties = properties
if multimap is not None:
for k, l in multimap.items():
for v in l:
self.properties.append(Property(k, v))
def getProperties(self):
return self.properties
def setProperties(self, properties):
self.properties = properties
| mjames-upc/python-awips | dynamicserialize/dstypes/com/raytheon/uf/common/message/Header.py | Python | bsd-3-clause | 611 |
"""
See LICENSE file for copyright and license details.
"""
from app import app
from flask import render_template, flash, redirect
#from app.forms import LoginForm
from app.modules.constant import *
@app.route("/")
@app.route("/index")
@app.route("/index/")
@app.route("/<app_profile>/index")
@app.route("/<app_profile>/index/")
@app.route("/<app_profile>")
@app.route("/<app_profile>/")
def index(app_profile = AppProfile.PERSONAL):
"""
Index page
"""
user = { 'login': 'rockwolf' } # fake user
if app_profile == '':
app_profile = 'personal'
return render_template("index.html",
title = 'Central command entity',
user = user,
app_profile = app_profile.lower())
@app.route("/report_finance")
@app.route("/report_finance/")
@app.route("/<app_profile>/report_finance")
@app.route("/<app_profile>/report_finance/")
def report_finance(app_profile = AppProfile.PERSONAL):
"""
Financial reports.
"""
# Make reports per year in pdf (gnucash) and put links to them here.
return('TBD');
@app.route("/trading_journal")
@app.route("/trading_journal/")
@app.route("/<app_profile>/trading_journal")
@app.route("/<app_profile>/trading_journal/")
def trading_journal(app_profile = AppProfile.PERSONAL):
"""
Trading Journal
"""
if app_profile == AppProfile.ZIVLE:
return render_template("trading_journal.html",
title = 'Trading Journal',
user = user,
app_profile = app_profile.lower())
else:
return render_template("404.html",
title = '404')
@app.route("/contact")
@app.route("/contact/")
@app.route("/<app_profile>/contact")
@app.route("/<app_profile>/contact/")
def contact(app_profile = AppProfile.PERSONAL):
"""
Address book.
"""
# Try to sync this with abook? Can abook export them?
return('TBD');
@app.route("/task")
@app.route("/task/")
@app.route("/<app_profile>/task")
@app.route("/<app_profile>/task/")
def task(app_profile = AppProfile.PERSONAL):
"""
Task and schedule information.
"""
# TODO: generate output of reminders and put it in a new text-file,
# e.g. remind ~/.reminders -c etc.
# TODO: where to schedule the reminders.txt generation?
if app_profile == AppProfile.ZIVLE:
task_file = TaskFile.ZIVLE
reminder_file = ReminderFile.ZIVLE
elif app_profile == AppProfile.PERSONAL:
task_file = TaskFile.PERSONAL
reminder_file = ReminderFile.PERSONAL
else:
error = true
if not error:
return render_template("task.html",
title = 'Tasks',
user = user,
app_profile = app_profile.lower(),
tasks = load_lines(task_file),
reminders = load_lines(reminder_file)
)
else:
return render_template("404.html",
title = '404')
@app.route('/login', methods = ['GET', 'POST'])
@app.route('/login/', methods = ['GET', 'POST'])
def login():
form = LoginForm()
return render_template('login.html',
title = 'Sign In',
form = form)
@app.route("/links")
@app.route("/links/")
@app.route("/<app_profile>/links")
@app.route("/<app_profile>/links/")
def links(app_profile = AppProfile.PERSONAL):
"""
Link bookmarks.
"""
user = { 'login': 'rockwolf' } # fake user
# Try to read from text-files and build links dynamically
# Format: data/<profile>/links.txt
# Textfile format: <url>;<name>;<description>
#TODO: put links_file in constant.py
#or find a more general way to configure files?
#links_file = 'C:\\Users\\AN\\home\\other\\Dropbox\\cece\\app\\data\\' + app_profile + '\\links.txt'
links_file = '/home/rockwolf/Dropbox/cece/app/data/' + app_profile + '/links.txt'
links_full = load_lines(links_file)
links = []
for link_full in links_full:
links.append(link_full.split(';'))
links.sort(key=lambda k: k[1])
categories = []
for link in links:
if link[1] not in categories:
categories.append(link[1])
return render_template("links.html",
title = 'Bookmarks',
user = user,
app_profile = app_profile.lower(),
categories = categories,
total = len(links),
links = links
)
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html',
title = '404'), 404
def load_lines(text_file):
"""
Reads the text file and returns a list of lines.
"""
lines = []
with open(text_file, encoding='utf-8') as text:
for line in text:
lines.append(line.strip())
return lines
| rockwolf/python | cece/app/views.py | Python | bsd-3-clause | 4,840 |
# -*- coding: utf-8 -*-
from datetime import datetime
import json
from urlparse import urlparse
from django.core.urlresolvers import reverse
from django.db import reset_queries
from django.http import QueryDict
from django.test.utils import override_settings
import mock
from mock import patch
from nose.tools import eq_, ok_
import mkt
import mkt.regions
from mkt.api.tests.test_oauth import RestOAuth
from mkt.developers.models import ActivityLog
from mkt.prices.models import AddonPurchase
from mkt.ratings.models import Review, ReviewFlag
from mkt.site.fixtures import fixture
from mkt.site.utils import app_factory, version_factory
from mkt.webapps.models import AddonExcludedRegion, AddonUser, Webapp
from mkt.users.models import UserProfile
@mock.patch('mkt.webapps.models.Webapp.get_cached_manifest', mock.Mock)
class TestRatingResource(RestOAuth, mkt.site.tests.MktPaths):
fixtures = fixture('user_2519', 'webapp_337141')
def setUp(self):
super(TestRatingResource, self).setUp()
self.app = Webapp.objects.get(pk=337141)
self.user = UserProfile.objects.get(pk=2519)
self.user2 = UserProfile.objects.get(pk=31337)
self.list_url = reverse('ratings-list')
def _get_url(self, url, client=None, **kwargs):
if client is None:
client = self.client
res = client.get(url, kwargs)
data = json.loads(res.content)
return res, data
def _get_filter(self, client=None, expected_status=200, **params):
res, data = self._get_url(self.list_url, client=client, **params)
eq_(res.status_code, expected_status)
if expected_status == 200:
eq_(len(data['objects']), 1)
return res, data
def _compare_review_data(self, client, data, review):
self.assertApiUrlEqual(data['app'], '/apps/app/337141/')
eq_(data['body'], review.body)
self.assertCloseToNow(data['created'], now=review.created)
self.assertCloseToNow(data['modified'], now=review.modified)
eq_(data['rating'], review.rating)
eq_(data['report_spam'],
reverse('ratings-flag', kwargs={'pk': review.pk}))
eq_(data['resource_uri'],
reverse('ratings-detail', kwargs={'pk': review.pk}))
eq_(data['user']['display_name'], review.user.display_name)
eq_(data['version']['version'], review.version.version)
eq_(data['version']['resource_uri'],
reverse('version-detail', kwargs={'pk': review.version.pk}))
if client != self.anon:
eq_(data['is_author'], review.user == self.user)
else:
ok_('is_author' not in data)
def test_has_cors(self):
self.assertCORS(self.client.get(self.list_url),
'get', 'post', 'put', 'delete')
def test_options(self):
res = self.anon.options(self.list_url)
eq_(res.status_code, 200)
data = json.loads(res.content)
ok_('application/json' in data['renders'])
ok_('application/json' in data['parses'])
def test_get_empty_with_app(self):
AddonUser.objects.create(user=self.user, addon=self.app)
res, data = self._get_url(self.list_url, app=self.app.pk)
eq_(res.status_code, 200)
eq_(data['info']['average'], self.app.average_rating)
eq_(data['info']['slug'], self.app.app_slug)
assert not data['user']['can_rate']
assert not data['user']['has_rated']
def test_get(self, client=None):
first_version = self.app.current_version
rev = Review.objects.create(addon=self.app, user=self.user,
version=first_version,
body=u'I lôve this app',
rating=5)
rev.update(created=self.days_ago(2))
rev2 = Review.objects.create(addon=self.app, user=self.user2,
version=first_version,
body=u'I also lôve this app',
rating=4)
# Extra review for another app, should be ignored.
extra_app = app_factory()
Review.objects.create(addon=extra_app, user=self.user,
version=extra_app.current_version,
body=u'I häte this extra app',
rating=1)
self.app.total_reviews = 2
ver = version_factory(addon=self.app, version='2.0',
file_kw=dict(status=mkt.STATUS_PUBLIC))
self.app.update_version()
reset_queries()
res, data = self._get_url(self.list_url, app=self.app.pk,
client=client)
eq_(len(data['objects']), 2)
self._compare_review_data(client, data['objects'][0], rev2)
self._compare_review_data(client, data['objects'][1], rev)
eq_(data['info']['average'], self.app.average_rating)
eq_(data['info']['slug'], self.app.app_slug)
eq_(data['info']['current_version'], ver.version)
if client != self.anon:
eq_(data['user']['can_rate'], True)
eq_(data['user']['has_rated'], True)
return res
def test_get_304(self):
etag = self.test_get(client=self.anon)['ETag']
res = self.anon.get(self.list_url, {'app': self.app.pk},
HTTP_IF_NONE_MATCH='%s' % etag)
eq_(res.status_code, 304)
@override_settings(DEBUG=True)
def test_get_anonymous_queries(self):
first_version = self.app.current_version
Review.objects.create(addon=self.app, user=self.user,
version=first_version,
body=u'I lôve this app',
rating=5)
Review.objects.create(addon=self.app, user=self.user2,
version=first_version,
body=u'I also lôve this app',
rating=4)
self.app.total_reviews = 2
version_factory(addon=self.app, version='2.0',
file_kw=dict(status=mkt.STATUS_PUBLIC))
self.app.update_version()
reset_queries()
with self.assertNumQueries(7):
# 7 queries:
# - 1 SAVEPOINT
# - 2 for the Reviews queryset and the translations
# - 2 for the Version associated to the reviews (qs + translations)
# - 1 for the File attached to the Version
# - 1 RELEASE SAVEPOINT
#
# Notes:
# - In prod, we actually do COMMIT/ROLLBACK and not
# SAVEPOINT/RELEASE SAVEPOINT. It would be nice to avoid those for
# all GET requests in the API, but it's not trivial to do for
# ViewSets which implement multiple actions through the same view
# function (non_atomic_requests() really want to be applied to the
# view function).
#
# - The query count is slightly higher in prod. In tests, we patch
# get_app() to avoid the app queries to pollute the queries count.
#
# Once we are on django 1.7, we'll be able to play with Prefetch
# to reduce the number of queries further by customizing the
# queryset used for the complex related objects like versions and
# webapp.
with patch('mkt.ratings.views.RatingViewSet.get_app') as get_app:
get_app.return_value = self.app
res, data = self._get_url(self.list_url, client=self.anon,
app=self.app.pk)
def test_is_flagged_false(self):
Review.objects.create(addon=self.app, user=self.user2, body='yes')
res, data = self._get_url(self.list_url, app=self.app.pk)
eq_(data['objects'][0]['is_author'], False)
eq_(data['objects'][0]['has_flagged'], False)
def test_is_flagged_is_author(self):
Review.objects.create(addon=self.app, user=self.user, body='yes')
res, data = self._get_url(self.list_url, app=self.app.pk)
eq_(data['objects'][0]['is_author'], True)
eq_(data['objects'][0]['has_flagged'], False)
def test_is_flagged_true(self):
rat = Review.objects.create(addon=self.app, user=self.user2, body='ah')
ReviewFlag.objects.create(review=rat, user=self.user,
flag=ReviewFlag.SPAM)
res, data = self._get_url(self.list_url, app=self.app.pk)
eq_(data['objects'][0]['is_author'], False)
eq_(data['objects'][0]['has_flagged'], True)
def test_get_detail(self):
fmt = '%Y-%m-%dT%H:%M:%S'
Review.objects.create(addon=self.app, user=self.user2, body='no')
rev = Review.objects.create(addon=self.app, user=self.user, body='yes')
url = reverse('ratings-detail', kwargs={'pk': rev.pk})
res, data = self._get_url(url)
self.assertCloseToNow(datetime.strptime(data['modified'], fmt))
self.assertCloseToNow(datetime.strptime(data['created'], fmt))
eq_(data['body'], 'yes')
def test_filter_self(self):
Review.objects.create(addon=self.app, user=self.user, body='yes')
Review.objects.create(addon=self.app, user=self.user2, body='no')
self._get_filter(user=self.user.pk)
def test_filter_mine(self):
Review.objects.create(addon=self.app, user=self.user, body='yes')
Review.objects.create(addon=self.app, user=self.user2, body='no')
self._get_filter(user='mine')
def test_filter_mine_anonymous(self):
Review.objects.create(addon=self.app, user=self.user, body='yes')
self._get_filter(user='mine', client=self.anon, expected_status=403)
def test_filter_by_app_slug(self):
self.app2 = app_factory()
Review.objects.create(addon=self.app2, user=self.user, body='no')
Review.objects.create(addon=self.app, user=self.user, body='yes')
res, data = self._get_filter(app=self.app.app_slug)
eq_(data['info']['slug'], self.app.app_slug)
eq_(data['info']['current_version'], self.app.current_version.version)
def test_filter_by_app_pk(self):
self.app2 = app_factory()
Review.objects.create(addon=self.app2, user=self.user, body='no')
Review.objects.create(addon=self.app, user=self.user, body='yes')
res, data = self._get_filter(app=self.app.pk)
eq_(data['info']['slug'], self.app.app_slug)
eq_(data['info']['current_version'], self.app.current_version.version)
def test_filter_by_invalid_app(self):
Review.objects.create(addon=self.app, user=self.user, body='yes')
self._get_filter(app='wrongslug', expected_status=404)
self._get_filter(app=2465478, expected_status=404)
@patch('mkt.ratings.views.get_region')
def test_filter_by_nonpublic_app(self, get_region_mock):
Review.objects.create(addon=self.app, user=self.user, body='yes')
self.app.update(status=mkt.STATUS_PENDING)
get_region_mock.return_value = mkt.regions.USA
res, data = self._get_filter(
app=self.app.app_slug, expected_status=403)
eq_(data['detail'], 'The app requested is not public or not available '
'in region "us".')
def test_filter_by_nonpublic_app_admin(self):
Review.objects.create(addon=self.app, user=self.user, body='yes')
self.grant_permission(self.user, 'Apps:Edit')
self.app.update(status=mkt.STATUS_PENDING)
self._get_filter(app=self.app.app_slug)
def test_filter_by_nonpublic_app_owner(self):
Review.objects.create(addon=self.app, user=self.user, body='yes')
AddonUser.objects.create(user=self.user, addon=self.app)
self.app.update(status=mkt.STATUS_PENDING)
self._get_filter(app=self.app.app_slug)
@patch('mkt.ratings.views.get_region')
def test_filter_by_app_excluded_in_region(self, get_region_mock):
Review.objects.create(addon=self.app, user=self.user, body='yes')
AddonExcludedRegion.objects.create(addon=self.app,
region=mkt.regions.BRA.id)
get_region_mock.return_value = mkt.regions.BRA
res, data = self._get_filter(
app=self.app.app_slug, expected_status=403)
eq_(data['detail'], 'The app requested is not public or not available '
'in region "br".')
@patch('mkt.ratings.views.get_region')
def test_filter_by_app_excluded_in_region_admin(self, get_region_mock):
Review.objects.create(addon=self.app, user=self.user, body='yes')
self.grant_permission(self.user, 'Apps:Edit')
AddonExcludedRegion.objects.create(addon=self.app,
region=mkt.regions.BRA.id)
get_region_mock.return_value = mkt.regions.BRA
self._get_filter(app=self.app.app_slug)
@patch('mkt.ratings.views.get_region')
def test_filter_by_app_excluded_in_region_owner(self, get_region_mock):
Review.objects.create(addon=self.app, user=self.user, body='yes')
AddonUser.objects.create(user=self.user, addon=self.app)
AddonExcludedRegion.objects.create(addon=self.app,
region=mkt.regions.BRA.id)
get_region_mock.return_value = mkt.regions.BRA
self._get_filter(app=self.app.app_slug)
def test_anonymous_get_list_without_app(self):
Review.objects.create(addon=self.app, user=self.user, body='yes')
res, data = self._get_url(self.list_url, client=self.anon)
eq_(res.status_code, 200)
assert 'user' not in data
eq_(len(data['objects']), 1)
eq_(data['objects'][0]['body'], 'yes')
def test_anonymous_get_list_app(self):
res, data = self._get_url(self.list_url, app=self.app.app_slug,
client=self.anon)
eq_(res.status_code, 200)
eq_(data['user'], None)
def test_non_owner(self):
res, data = self._get_url(self.list_url, app=self.app.app_slug)
assert data['user']['can_rate']
assert not data['user']['has_rated']
@patch('mkt.webapps.models.Webapp.get_excluded_region_ids')
def test_can_rate_unpurchased(self, exclude_mock):
exclude_mock.return_value = []
self.app.update(premium_type=mkt.ADDON_PREMIUM)
res, data = self._get_url(self.list_url, app=self.app.app_slug)
assert not res.json['user']['can_rate']
@patch('mkt.webapps.models.Webapp.get_excluded_region_ids')
def test_can_rate_purchased(self, exclude_mock):
exclude_mock.return_value = []
self.app.update(premium_type=mkt.ADDON_PREMIUM)
AddonPurchase.objects.create(addon=self.app, user=self.user)
res, data = self._get_url(self.list_url, app=self.app.app_slug)
assert res.json['user']['can_rate']
def test_isowner_true(self):
Review.objects.create(addon=self.app, user=self.user, body='yes')
res, data = self._get_url(self.list_url, app=self.app.app_slug)
data = json.loads(res.content)
eq_(data['objects'][0]['is_author'], True)
def test_isowner_false(self):
Review.objects.create(addon=self.app, user=self.user2, body='yes')
res, data = self._get_url(self.list_url, app=self.app.app_slug)
data = json.loads(res.content)
eq_(data['objects'][0]['is_author'], False)
def test_isowner_anonymous(self):
Review.objects.create(addon=self.app, user=self.user, body='yes')
res, data = self._get_url(self.list_url, app=self.app.app_slug,
client=self.anon)
data = json.loads(res.content)
self.assertNotIn('is_author', data['objects'][0])
def test_already_rated(self):
Review.objects.create(addon=self.app, user=self.user, body='yes')
res, data = self._get_url(self.list_url, app=self.app.app_slug)
data = json.loads(res.content)
assert data['user']['can_rate']
assert data['user']['has_rated']
def test_already_rated_version(self):
self.app.update(is_packaged=True)
Review.objects.create(addon=self.app, user=self.user, body='yes')
version_factory(addon=self.app, version='3.0')
self.app.update_version()
res, data = self._get_url(self.list_url, app=self.app.app_slug)
data = json.loads(res.content)
assert data['user']['can_rate']
assert not data['user']['has_rated']
def _create(self, data=None, anonymous=False, version=None):
version = version or self.app.current_version
default_data = {
'app': self.app.id,
'body': 'Rocking the free web.',
'rating': 5,
'version': version.id
}
if data:
default_data.update(data)
json_data = json.dumps(default_data)
client = self.anon if anonymous else self.client
res = client.post(self.list_url, data=json_data)
try:
res_data = json.loads(res.content)
except ValueError:
res_data = res.content
return res, res_data
def test_anonymous_create_fails(self):
res, data = self._create(anonymous=True)
eq_(res.status_code, 403)
@patch('mkt.ratings.views.record_action')
def test_create(self, record_action):
log_review_id = mkt.LOG.ADD_REVIEW.id
eq_(ActivityLog.objects.filter(action=log_review_id).count(), 0)
res, data = self._create()
eq_(201, res.status_code)
pk = Review.objects.latest('pk').pk
eq_(data['body'], 'Rocking the free web.')
eq_(data['rating'], 5)
eq_(data['resource_uri'], reverse('ratings-detail', kwargs={'pk': pk}))
eq_(data['report_spam'], reverse('ratings-flag', kwargs={'pk': pk}))
eq_(record_action.call_count, 1)
eq_(record_action.call_args[0][0], 'new-review')
eq_(record_action.call_args[0][2], {'app-id': 337141})
eq_(ActivityLog.objects.filter(action=log_review_id).count(), 1)
return res, data
def test_create_packaged(self):
self.app.update(is_packaged=True)
res, data = self.test_create()
eq_(data['version']['version'], '1.0')
def test_create_bad_data(self):
res, data = self._create({'body': None})
eq_(400, res.status_code)
assert 'body' in data
def test_create_nonexistent_app(self):
res, data = self._create({'app': -1})
eq_(400, res.status_code)
assert 'app' in data
@patch('mkt.ratings.serializers.get_region')
def test_create_for_nonregion(self, get_region_mock):
AddonExcludedRegion.objects.create(addon=self.app,
region=mkt.regions.BRA.id)
get_region_mock.return_value = mkt.regions.BRA
res, data = self._create()
eq_(403, res.status_code)
def test_create_for_nonpublic(self):
self.app.update(status=mkt.STATUS_PENDING)
res, data = self._create(version=self.app.latest_version)
eq_(403, res.status_code)
def test_create_duplicate_rating(self):
self._create()
res, data = self._create()
eq_(409, res.status_code)
def test_new_rating_for_new_version(self):
self.app.update(is_packaged=True)
self._create()
version = version_factory(addon=self.app, version='3.0')
self.app.update_version()
eq_(self.app.reload().current_version, version)
res, data = self._create()
eq_(201, res.status_code)
eq_(data['version']['version'], '3.0')
def test_create_duplicate_rating_packaged(self):
self.app.update(is_packaged=True)
self._create()
res, data = self._create()
eq_(409, res.status_code)
def test_create_own_app(self):
AddonUser.objects.create(user=self.user, addon=self.app)
res, data = self._create()
eq_(403, res.status_code)
@patch('mkt.webapps.models.Webapp.get_excluded_region_ids')
def test_rate_unpurchased_premium(self, exclude_mock):
exclude_mock.return_value = []
self.app.update(premium_type=mkt.ADDON_PREMIUM)
res, data = self._create()
eq_(403, res.status_code)
@patch('mkt.webapps.models.Webapp.get_excluded_region_ids')
def test_rate_purchased_premium(self, exclude_mock):
exclude_mock.return_value = []
self.app.update(premium_type=mkt.ADDON_PREMIUM)
AddonPurchase.objects.create(addon=self.app, user=self.user)
res, data = self._create()
eq_(201, res.status_code)
def _create_default_review(self):
# Create the original review
default_data = {
'body': 'Rocking the free web.',
'rating': 5
}
res, res_data = self._create(default_data)
return res, res_data
def test_patch_not_implemented(self):
self._create_default_review()
pk = Review.objects.latest('id').pk
json_data = json.dumps({
'body': 'Totally rocking the free web.',
})
res = self.client.patch(reverse('ratings-detail', kwargs={'pk': pk}),
data=json_data)
# Should return a 405 but permission check is done first. It's fine.
eq_(res.status_code, 403)
def _update(self, updated_data, pk=None):
# Update the review
if pk is None:
pk = Review.objects.latest('id').pk
json_data = json.dumps(updated_data)
res = self.client.put(reverse('ratings-detail', kwargs={'pk': pk}),
data=json_data)
try:
res_data = json.loads(res.content)
except ValueError:
res_data = res.content
return res, res_data
def test_update(self):
rev = Review.objects.create(addon=self.app, user=self.user,
body='abcd', ip_address='1.2.3.4')
new_data = {
'body': 'Totally rocking the free web.',
'rating': 4,
}
log_review_id = mkt.LOG.EDIT_REVIEW.id
eq_(ActivityLog.objects.filter(action=log_review_id).count(), 0)
res, data = self._update(new_data)
eq_(res.status_code, 200)
eq_(data['body'], new_data['body'])
eq_(data['rating'], new_data['rating'])
rev.reload()
eq_(rev.body, new_data['body'])
eq_(rev.rating, new_data['rating'])
eq_(rev.user, self.user)
eq_(rev.ip_address, '1.2.3.4')
eq_(ActivityLog.objects.filter(action=log_review_id).count(), 1)
def test_update_admin(self):
self.grant_permission(self.user, 'Apps:Edit')
rev = Review.objects.create(addon=self.app, user=self.user2,
body='abcd', ip_address='1.2.3.4')
new_data = {
'body': 'Edited by admin',
'rating': 1,
}
log_review_id = mkt.LOG.EDIT_REVIEW.id
res = self.client.put(reverse('ratings-detail', kwargs={'pk': rev.pk}),
json.dumps(new_data))
eq_(res.status_code, 200)
data = json.loads(res.content)
eq_(data['body'], new_data['body'])
eq_(data['rating'], new_data['rating'])
rev.reload()
eq_(rev.body, new_data['body'])
eq_(rev.rating, new_data['rating'])
eq_(rev.user, self.user2)
eq_(rev.ip_address, '1.2.3.4')
eq_(ActivityLog.objects.filter(action=log_review_id).count(), 1)
def test_update_bad_data(self):
self._create_default_review()
res, data = self._update({'body': None})
eq_(400, res.status_code)
assert 'body' in data
def test_update_change_app(self):
_, previous_data = self._create_default_review()
self.app2 = app_factory()
new_data = {
'body': 'Totally rocking the free web.',
'rating': 4,
'app': self.app2.pk
}
res, data = self._update(new_data)
eq_(res.status_code, 200)
eq_(data['body'], new_data['body'])
eq_(data['rating'], new_data['rating'])
eq_(data['app'], previous_data['app'])
def test_update_comment_not_mine(self):
rev = Review.objects.create(addon=self.app, user=self.user2,
body='yes')
res = self.client.put(reverse('ratings-detail', kwargs={'pk': rev.pk}),
json.dumps({'body': 'no', 'rating': 1}))
eq_(res.status_code, 403)
rev.reload()
eq_(rev.body, 'yes')
def test_delete_app_mine(self):
AddonUser.objects.filter(addon=self.app).update(user=self.user)
rev = Review.objects.create(addon=self.app, user=self.user2,
body='yes')
url = reverse('ratings-detail', kwargs={'pk': rev.pk})
res = self.client.delete(url)
eq_(res.status_code, 204)
eq_(Review.objects.count(), 0)
log_review_id = mkt.LOG.DELETE_REVIEW.id
eq_(ActivityLog.objects.filter(action=log_review_id).count(), 1)
def test_delete_comment_mine(self):
rev = Review.objects.create(addon=self.app, user=self.user, body='yes')
url = reverse('ratings-detail', kwargs={'pk': rev.pk})
res = self.client.delete(url)
eq_(res.status_code, 204)
eq_(Review.objects.count(), 0)
log_review_id = mkt.LOG.DELETE_REVIEW.id
eq_(ActivityLog.objects.filter(action=log_review_id).count(), 1)
def test_delete_addons_admin(self):
self.grant_permission(self.user, 'Apps:Edit')
rev = Review.objects.create(addon=self.app, user=self.user2,
body='yes')
url = reverse('ratings-detail', kwargs={'pk': rev.pk})
res = self.client.delete(url)
eq_(res.status_code, 204)
eq_(Review.objects.count(), 0)
log_review_id = mkt.LOG.DELETE_REVIEW.id
eq_(ActivityLog.objects.filter(action=log_review_id).count(), 1)
def test_delete_users_admin(self):
self.grant_permission(self.user, 'Users:Edit')
rev = Review.objects.create(addon=self.app, user=self.user2,
body='yes')
url = reverse('ratings-detail', kwargs={'pk': rev.pk})
res = self.client.delete(url)
eq_(res.status_code, 204)
eq_(Review.objects.count(), 0)
log_review_id = mkt.LOG.DELETE_REVIEW.id
eq_(ActivityLog.objects.filter(action=log_review_id).count(), 1)
def test_delete_not_mine(self):
rev = Review.objects.create(addon=self.app, user=self.user2,
body='yes')
url = reverse('ratings-detail', kwargs={'pk': rev.pk})
self.app.authors.clear()
res = self.client.delete(url)
eq_(res.status_code, 403)
eq_(Review.objects.count(), 1)
log_review_id = mkt.LOG.DELETE_REVIEW.id
eq_(ActivityLog.objects.filter(action=log_review_id).count(), 0)
def test_delete_not_there(self):
url = reverse('ratings-detail', kwargs={'pk': 123})
res = self.client.delete(url)
eq_(res.status_code, 404)
log_review_id = mkt.LOG.DELETE_REVIEW.id
eq_(ActivityLog.objects.filter(action=log_review_id).count(), 0)
class TestRatingResourcePagination(RestOAuth, mkt.site.tests.MktPaths):
fixtures = fixture('user_2519', 'user_999', 'webapp_337141')
def setUp(self):
super(TestRatingResourcePagination, self).setUp()
self.app = Webapp.objects.get(pk=337141)
self.user = UserProfile.objects.get(pk=2519)
self.user2 = UserProfile.objects.get(pk=31337)
self.user3 = UserProfile.objects.get(pk=999)
self.url = reverse('ratings-list')
def test_pagination(self):
first_version = self.app.current_version
rev1 = Review.objects.create(addon=self.app, user=self.user,
version=first_version,
body=u'I häte this app',
rating=0)
rev2 = Review.objects.create(addon=self.app, user=self.user2,
version=first_version,
body=u'I lôve this app',
rating=5)
rev3 = Review.objects.create(addon=self.app, user=self.user3,
version=first_version,
body=u'Blurp.',
rating=3)
rev1.update(created=self.days_ago(3))
rev2.update(created=self.days_ago(2))
self.app.update(total_reviews=3)
res = self.client.get(self.url, {'app': self.app.pk, 'limit': 2})
eq_(res.status_code, 200)
data = json.loads(res.content)
eq_(len(data['objects']), 2)
eq_(data['objects'][0]['body'], rev3.body)
eq_(data['objects'][1]['body'], rev2.body)
eq_(data['meta']['total_count'], 3)
eq_(data['meta']['limit'], 2)
eq_(data['meta']['previous'], None)
eq_(data['meta']['offset'], 0)
next = urlparse(data['meta']['next'])
eq_(next.path, self.url)
eq_(QueryDict(next.query).dict(),
{'app': str(self.app.pk), 'limit': '2', 'offset': '2'})
res = self.client.get(self.url,
{'app': self.app.pk, 'limit': 2, 'offset': 2})
eq_(res.status_code, 200)
data = json.loads(res.content)
eq_(len(data['objects']), 1)
eq_(data['objects'][0]['body'], rev1.body)
eq_(data['meta']['total_count'], 3)
eq_(data['meta']['limit'], 2)
prev = urlparse(data['meta']['previous'])
eq_(next.path, self.url)
eq_(QueryDict(prev.query).dict(),
{'app': str(self.app.pk), 'limit': '2', 'offset': '0'})
eq_(data['meta']['offset'], 2)
eq_(data['meta']['next'], None)
def test_total_count(self):
Review.objects.create(addon=self.app, user=self.user,
version=self.app.current_version,
body=u'I häte this app',
rating=0)
self.app.update(total_reviews=42)
res = self.client.get(self.url)
data = json.loads(res.content)
# We are not passing an app, so the app's total_reviews isn't used.
eq_(data['meta']['total_count'], 1)
# With an app however, it should be used as the total count.
res = self.client.get(self.url, data={'app': self.app.pk})
data = json.loads(res.content)
eq_(data['meta']['total_count'], 42)
def test_pagination_invalid(self):
res = self.client.get(self.url, data={'offset': '%E2%98%83'})
eq_(res.status_code, 200)
class TestReviewFlagResource(RestOAuth, mkt.site.tests.MktPaths):
fixtures = fixture('user_2519', 'webapp_337141')
def setUp(self):
super(TestReviewFlagResource, self).setUp()
self.app = Webapp.objects.get(pk=337141)
self.user = UserProfile.objects.get(pk=2519)
self.user2 = UserProfile.objects.get(pk=31337)
self.rating = Review.objects.create(addon=self.app,
user=self.user2, body='yes')
self.flag_url = reverse('ratings-flag', kwargs={'pk': self.rating.pk})
def test_has_cors(self):
self.assertCORS(self.client.post(self.flag_url), 'post')
def test_flag(self):
data = json.dumps({'flag': ReviewFlag.SPAM})
res = self.client.post(self.flag_url, data=data)
eq_(res.status_code, 201)
rf = ReviewFlag.objects.get(review=self.rating)
eq_(rf.user, self.user)
eq_(rf.flag, ReviewFlag.SPAM)
eq_(rf.note, '')
def test_flag_note(self):
note = 'do not want'
data = json.dumps({'flag': ReviewFlag.SPAM, 'note': note})
res = self.client.post(self.flag_url, data=data)
eq_(res.status_code, 201)
rf = ReviewFlag.objects.get(review=self.rating)
eq_(rf.user, self.user)
eq_(rf.flag, ReviewFlag.OTHER)
eq_(rf.note, note)
def test_flag_anon(self):
data = json.dumps({'flag': ReviewFlag.SPAM})
res = self.anon.post(self.flag_url, data=data)
eq_(res.status_code, 201)
rf = ReviewFlag.objects.get(review=self.rating)
eq_(rf.user, None)
eq_(rf.flag, ReviewFlag.SPAM)
eq_(rf.note, '')
def test_flag_conflict(self):
data = json.dumps({'flag': ReviewFlag.SPAM})
res = self.client.post(self.flag_url, data=data)
res = self.client.post(self.flag_url, data=data)
eq_(res.status_code, 409)
| clouserw/zamboni | mkt/ratings/tests/test_views.py | Python | bsd-3-clause | 32,940 |