code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
import pymel.core as pm
from ..errors import errors
from ..maya_testing import general
import control
# plane
def build_plane(name=None, crv=None, spans=None,
direction='u', width_axis=None,
width=1, log=True):
'''Given a valid curve, build nurbs plane
Attributes:
name -- Prefix name for plane. Str
crv -- Curve to use as build guide. nt.Transform
spans -- Spans for the plane. Int
direction -- Build direction. 'u' or 'v'
width_axis -- Plane width axis. 'x', 'y' or 'z'
width -- Width of plane. Float
'''
general.check_type(name, 'name', [str])
general.check_type(crv, 'crv', [pm.nt.Transform])
general.check_type(spans, 'spans', [int])
general.check_type(direction, 'direction', [str])
general.check_type(width_axis, 'width_axis', [str])
general.check_type(width, 'width', [float, int])
if direction not in ['u', 'v']:
raise errors.InputError('direction', direction, "'u' or 'v'")
if width_axis not in ['x', 'y', 'z']:
raise errors.InputError('width_axis', width_axis, "'x', 'y' or 'z'")
d1 = crv.duplicate()
d2 = crv.duplicate()
move_amt = []
if width_axis == 'x':
move_amt = [width, 0, 0]
elif width_axis == 'y':
move_amt = [0, width, 0]
elif width_axis == 'z':
move_amt = [0, 0, width]
if log:
str_1 = 'move_amt: ', move_amt
general.logging.debug(str_1)
pm.move(move_amt[0],
move_amt[1],
move_amt[2],
d1, r=1)
pm.move(-move_amt[0],
-move_amt[1],
-move_amt[2],
d2, r=1)
p = pm.loft(d1, d2, n=name+'_plane', ch=0)[0]
if direction == 'u':
pm.rebuildSurface(p, dir=2, su=spans, sv=2)
if direction == 'v':
pm.rebuildSurface(p, dir=2, sv=spans, su=2)
pm.delete(d1)
pm.delete(d2)
return p
# reg_node, chain[]
def build_joint_chain(name=None, crv=None,
order=None, num=None,
loc=None, reg_node=None, log=False):
'''Given a valid curve, build joint chain along curve, num joints long
Attributes:
name -- Prefix name for plane. Str
crv -- Curve to use as build guide. pm.nt.Transform
order -- ['xyz','xzy','yxz','yzx','zxy','zyx']
num -- Number of joints. 3 - 50, Int
loc -- Used to set aim of secondary axis nt.Transform
reg_node -- Registratioin node. nt.Transform
log -- Output logging messages. Bool
'''
general.check_type(name, 'name', [str])
general.check_type(crv, 'crv', [pm.nt.Transform])
general.check_type(order, 'order', [str])
general.check_type(num, 'num', [int])
general.check_type(loc, 'loc', [pm.nt.Transform])
orders = ['xyz', 'xzy', 'yxz', 'yzx', 'zxy', 'zyx']
if order not in orders:
raise errors.InputError('order', order, orders)
if num < 3 or num > 50:
raise errors.InputError('num', num, 'Range: 3 - 50')
loc = loc.duplicate()[0]
chain = []
loc_v = None
incr = float(1.0/num)
if log:
str_1 = 'Curve Length: ', pm.arclen(crv)
str_2 = 'Increment: ', incr
general.logging.debug(str_1)
general.logging.debug(str_2)
param = 0
pm.select(clear=1)
for i in range(num):
pos = pm.pointOnCurve(crv, pr=param, p=True, top=True)
if i == 0: # Get vector to locator
pos_v = pm.dt.Vector(pos)
loc_pos = pm.dt.Vector(pm.xform(loc, q=1, ws=1, rp=1))
loc_v = loc_pos - pos_v
if log:
str_1 = 'Jnt Pos: ', pos_v
str_2 = 'Loc Pos: ', loc_pos
str_3 = 'Loc Vec: ', loc_v
general.logging.debug(str_1)
general.logging.debug(str_2)
general.logging.debug(str_3)
j = pm.joint(p=pos, name='%s_Jnt_%s' % (name, (i+1)))
chain.append(j)
if log:
str_1 = 'Created Joint: ', j
str_2 = 'Parameter: ', param
str_3 = 'Pos: ', pos
str_4 = 'Curve: ', crv
general.logging.debug(str_1)
general.logging.debug(str_2)
general.logging.debug(str_3)
general.logging.debug(str_4)
param += incr
if log:
str_1 = 'Chain: ', str(chain)
general.logging.debug(str_1)
# aim vector
aim_v = []
if order[0].lower() == 'x':
aim_v = [1, 0, 0]
if order[0].lower() == 'y':
aim_v = [0, 1, 0]
if order[0].lower() == 'z':
aim_v = [0, 0, 1]
# up vector
up_v = []
if order[1].lower() == 'x':
up_v = [1, 0, 0]
if order[1].lower() == 'y':
up_v = [0, 1, 0]
if order[1].lower() == 'z':
up_v = [0, 0, 1]
for jnt in chain[:-1]:
# Snap/parent locator to jnt
pm.parent(loc, jnt)
loc.setTranslation(0)
loc.setRotation([0, 0, 0])
# move by loc_v
pm.move(loc_v[0],
loc_v[1],
loc_v[2],
loc, r=1)
pm.parent(loc, w=1)
# Remove joint from hierarchy
p = jnt.getParent()
c = jnt.getChildren()
try:
pm.parent(jnt, w=1)
except:
pass
pm.parent(c, w=1)
# Aim to child
pm.delete(pm.aimConstraint(c,
jnt,
aim=aim_v,
wu=up_v,
wut='object',
wuo=loc,
mo=0))
# Reinsert to heirarchy
if c:
pm.parent(c, jnt)
if p:
pm.parent(jnt, p)
# Oreint last joint to none
pm.joint(chain[-1], e=1, oj='none', zso=True)
if not reg_node:
reg_node = control.create_register_node(name)
control.register_object(reg_node,
'%s_chain_root' % name,
chain[0])
pm.select(clear=1)
pm.delete(loc)
return reg_node, chain
|
Mauricio3000/fk_ik_sine_rig
|
tool/rig/utils.py
|
Python
|
gpl-3.0
| 6,142
|
import pymysql
from pymysql import converters
BITCOIN_API = "http://api.coindesk.com/v1/bpi/historical/close.json?start={start_date}&end={end_date}¤cy={currency}"
conv = converters.conversions.copy()
conv[246] = float # convert decimals to floats
conv[10] = str
conv[12] = str
DATABASE = {
'user': 'root',
'password': '',
'host': 'localhost',
'database': 'dark_web',
'conv': conv,
}
connection = pymysql.connect(**DATABASE)
connection.commit()
cursor = connection.cursor()
|
mauromsl/dark_web_datamining
|
python_src/default_settings.py
|
Python
|
gpl-3.0
| 507
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2018 David Arroyo Menéndez
# Author: David Arroyo Menéndez <davidam@gnu.org>
# Maintainer: David Arroyo Menéndez <davidam@gnu.org>
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with GNU Emacs; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA,
import json
class SimpleClass:
def __init__(self, a=None, b=None, c=None):
self.a = a
self.b = b
self.c = c
def serialize_json(instance=None, path=None):
dt = {}
dt.update(vars(instance))
with open(path, "w") as file:
json.dump(dt, file)
def deserialize_json(cls=None, path=None):
def read_json(_path):
with open(_path, "r") as file:
return json.load(file)
data = read_json(path)
instance = object.__new__(cls)
for key, value in data.items():
setattr(instance, key, value)
return instance
# Usage: Create class and serialize under Windows file system.
write_settings = SimpleClass(a=1, b=2, c=3)
serialize_json(write_settings, r"object.json")
# Read back and rehydrate.
read_settings = deserialize_json(SimpleClass, r"object.json")
print("The object is written in object.json")
|
davidam/python-examples
|
basics/json/object2json.py
|
Python
|
gpl-3.0
| 1,785
|
#!/usr/bin/env python
"""
=========================================
dummyActuator.py - Dummy Actuator Handler
=========================================
Does nothing more than print the actuator name and state; for testing purposes.
"""
import subprocess, os, time, socket
import sys
class actuatorHandler:
def __init__(self, proj, shared_data):
self.proj = proj
self.p_gui = None
def _stop(self):
if self.p_gui is not None:
print >>sys.__stderr__, "(SENS) Killing dummyactuator GUI..."
try:
self.p_gui.stdin.write(":QUIT\n")
self.p_gui.stdin.close()
except IOError:
# Probably already closed by user
pass
def setActuator(self, name, actuatorVal,initial):
"""
Pretends to set actuator of name ``name`` to be in state ``val`` (bool).
name (string): Name of the actuator
"""
if initial:
if self.p_gui is None:
# Prepare to receive initialization signal
host = 'localhost'
port = 23559
buf = 1024
addr = (host,port)
UDPSock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
UDPSock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
UDPSock.bind(addr)
except:
print "ERROR: Cannot bind to port. Try killing all Python processes and trying again."
return
# Create a subprocess
print "(SENS) Starting actuatorHandler window..."
self.p_gui = subprocess.Popen(["python", "-u", os.path.join(self.proj.ltlmop_root,"lib","handlers","share","_ActuatorHandler.py")], stderr=subprocess.PIPE, stdin=subprocess.PIPE)
data = ''
while "Hello!" not in data:
# Wait for and receive a message from the subwindow
try:
data,addrFrom = UDPSock.recvfrom(1024)
except socket.timeout:
print "Waiting for GUI..."
continue
UDPSock.close()
self.p_gui.stdin.write(name + ",init\n")
else:
if actuatorVal:
time.sleep(0.1) # Fake some time lag for the actuator to enable
self.p_gui.stdin.write("{},{}\n".format(name,int(actuatorVal)))
print "(ACT) Actuator %s is now %s!" % tuple(map(str, (name, actuatorVal)))
|
jadecastro/LTLMoP
|
src/lib/handlers/share/dummyActuator.py
|
Python
|
gpl-3.0
| 2,618
|
"""Regresssion tests for urllib"""
import urllib.parse
import urllib.request
import http.client
import email.message
import io
import unittest
from test import support
import os
import tempfile
def hexescape(char):
"""Escape char as RFC 2396 specifies"""
hex_repr = hex(ord(char))[2:].upper()
if len(hex_repr) == 1:
hex_repr = "0%s" % hex_repr
return "%" + hex_repr
# Shortcut for testing FancyURLopener
_urlopener = None
def urlopen(url, data=None, proxies=None):
"""urlopen(url [, data]) -> open file-like object"""
global _urlopener
if proxies is not None:
opener = urllib.request.FancyURLopener(proxies=proxies)
elif not _urlopener:
opener = urllib.request.FancyURLopener()
_urlopener = opener
else:
opener = _urlopener
if data is None:
return opener.open(url)
else:
return opener.open(url, data)
class urlopen_FileTests(unittest.TestCase):
"""Test urlopen() opening a temporary file.
Try to test as much functionality as possible so as to cut down on reliance
on connecting to the Net for testing.
"""
def setUp(self):
# Create a temp file to use for testing
self.text = bytes("test_urllib: %s\n" % self.__class__.__name__,
"ascii")
f = open(support.TESTFN, 'wb')
try:
f.write(self.text)
finally:
f.close()
self.pathname = support.TESTFN
self.returned_obj = urlopen("file:%s" % self.pathname)
def tearDown(self):
"""Shut down the open object"""
self.returned_obj.close()
os.remove(support.TESTFN)
def test_interface(self):
# Make sure object returned by urlopen() has the specified methods
for attr in ("read", "readline", "readlines", "fileno",
"close", "info", "geturl", "getcode", "__iter__"):
self.assertTrue(hasattr(self.returned_obj, attr),
"object returned by urlopen() lacks %s attribute" %
attr)
def test_read(self):
self.assertEqual(self.text, self.returned_obj.read())
def test_readline(self):
self.assertEqual(self.text, self.returned_obj.readline())
self.assertEqual(b'', self.returned_obj.readline(),
"calling readline() after exhausting the file did not"
" return an empty string")
def test_readlines(self):
lines_list = self.returned_obj.readlines()
self.assertEqual(len(lines_list), 1,
"readlines() returned the wrong number of lines")
self.assertEqual(lines_list[0], self.text,
"readlines() returned improper text")
def test_fileno(self):
file_num = self.returned_obj.fileno()
self.assertTrue(isinstance(file_num, int),
"fileno() did not return an int")
self.assertEqual(os.read(file_num, len(self.text)), self.text,
"Reading on the file descriptor returned by fileno() "
"did not return the expected text")
def test_close(self):
# Test close() by calling it hear and then having it be called again
# by the tearDown() method for the test
self.returned_obj.close()
def test_info(self):
self.assertTrue(isinstance(self.returned_obj.info(), email.message.Message))
def test_geturl(self):
self.assertEqual(self.returned_obj.geturl(), self.pathname)
def test_getcode(self):
self.assertEqual(self.returned_obj.getcode(), None)
def test_iter(self):
# Test iterator
# Don't need to count number of iterations since test would fail the
# instant it returned anything beyond the first line from the
# comparison
for line in self.returned_obj.__iter__():
self.assertEqual(line, self.text)
class ProxyTests(unittest.TestCase):
def setUp(self):
# Records changes to env vars
self.env = support.EnvironmentVarGuard()
# Delete all proxy related env vars
for k in os.environ.keys():
if 'proxy' in k.lower():
self.env.unset(k)
def tearDown(self):
# Restore all proxy related env vars
self.env.__exit__()
del self.env
def test_getproxies_environment_keep_no_proxies(self):
self.env.set('NO_PROXY', 'localhost')
proxies = urllib.request.getproxies_environment()
# getproxies_environment use lowered case truncated (no '_proxy') keys
self.assertEquals('localhost', proxies['no'])
class urlopen_HttpTests(unittest.TestCase):
"""Test urlopen() opening a fake http connection."""
def fakehttp(self, fakedata):
class FakeSocket(io.BytesIO):
def sendall(self, str): pass
def makefile(self, *args, **kwds):
return self
def read(self, amt=None):
if self.closed: return b""
return io.BytesIO.read(self, amt)
def readline(self, length=None):
if self.closed: return b""
return io.BytesIO.readline(self, length)
class FakeHTTPConnection(http.client.HTTPConnection):
def connect(self):
self.sock = FakeSocket(fakedata)
self._connection_class = http.client.HTTPConnection
http.client.HTTPConnection = FakeHTTPConnection
def unfakehttp(self):
http.client.HTTPConnection = self._connection_class
def test_read(self):
self.fakehttp(b"Hello!")
try:
fp = urlopen("http://python.org/")
self.assertEqual(fp.readline(), b"Hello!")
self.assertEqual(fp.readline(), b"")
self.assertEqual(fp.geturl(), 'http://python.org/')
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_read_bogus(self):
# urlopen() should raise IOError for many error codes.
self.fakehttp(b'''HTTP/1.1 401 Authentication Required
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Type: text/html; charset=iso-8859-1
''')
try:
self.assertRaises(IOError, urlopen, "http://python.org/")
finally:
self.unfakehttp()
def test_empty_socket(self):
# urlopen() raises IOError if the underlying socket does not send any
# data. (#1680230)
self.fakehttp(b'')
try:
self.assertRaises(IOError, urlopen, "http://something")
finally:
self.unfakehttp()
class urlretrieve_FileTests(unittest.TestCase):
"""Test urllib.urlretrieve() on local files"""
def setUp(self):
# Create a list of temporary files. Each item in the list is a file
# name (absolute path or relative to the current working directory).
# All files in this list will be deleted in the tearDown method. Note,
# this only helps to makes sure temporary files get deleted, but it
# does nothing about trying to close files that may still be open. It
# is the responsibility of the developer to properly close files even
# when exceptional conditions occur.
self.tempFiles = []
# Create a temporary file.
self.registerFileForCleanUp(support.TESTFN)
self.text = b'testing urllib.urlretrieve'
try:
FILE = open(support.TESTFN, 'wb')
FILE.write(self.text)
FILE.close()
finally:
try: FILE.close()
except: pass
def tearDown(self):
# Delete the temporary files.
for each in self.tempFiles:
try: os.remove(each)
except: pass
def constructLocalFileUrl(self, filePath):
return "file://%s" % urllib.request.pathname2url(
os.path.abspath(filePath))
def createNewTempFile(self, data=b""):
"""Creates a new temporary file containing the specified data,
registers the file for deletion during the test fixture tear down, and
returns the absolute path of the file."""
newFd, newFilePath = tempfile.mkstemp()
try:
self.registerFileForCleanUp(newFilePath)
newFile = os.fdopen(newFd, "wb")
newFile.write(data)
newFile.close()
finally:
try: newFile.close()
except: pass
return newFilePath
def registerFileForCleanUp(self, fileName):
self.tempFiles.append(fileName)
def test_basic(self):
# Make sure that a local file just gets its own location returned and
# a headers value is returned.
result = urllib.request.urlretrieve("file:%s" % support.TESTFN)
self.assertEqual(result[0], support.TESTFN)
self.assertTrue(isinstance(result[1], email.message.Message),
"did not get a email.message.Message instance as second "
"returned value")
def test_copy(self):
# Test that setting the filename argument works.
second_temp = "%s.2" % support.TESTFN
self.registerFileForCleanUp(second_temp)
result = urllib.request.urlretrieve(self.constructLocalFileUrl(
support.TESTFN), second_temp)
self.assertEqual(second_temp, result[0])
self.assertTrue(os.path.exists(second_temp), "copy of the file was not "
"made")
FILE = open(second_temp, 'rb')
try:
text = FILE.read()
FILE.close()
finally:
try: FILE.close()
except: pass
self.assertEqual(self.text, text)
def test_reporthook(self):
# Make sure that the reporthook works.
def hooktester(count, block_size, total_size, count_holder=[0]):
self.assertTrue(isinstance(count, int))
self.assertTrue(isinstance(block_size, int))
self.assertTrue(isinstance(total_size, int))
self.assertEqual(count, count_holder[0])
count_holder[0] = count_holder[0] + 1
second_temp = "%s.2" % support.TESTFN
self.registerFileForCleanUp(second_temp)
urllib.request.urlretrieve(
self.constructLocalFileUrl(support.TESTFN),
second_temp, hooktester)
def test_reporthook_0_bytes(self):
# Test on zero length file. Should call reporthook only 1 time.
report = []
def hooktester(count, block_size, total_size, _report=report):
_report.append((count, block_size, total_size))
srcFileName = self.createNewTempFile()
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 1)
self.assertEqual(report[0][2], 0)
def test_reporthook_5_bytes(self):
# Test on 5 byte file. Should call reporthook only 2 times (once when
# the "network connection" is established and once when the block is
# read). Since the block size is 8192 bytes, only one block read is
# required to read the entire file.
report = []
def hooktester(count, block_size, total_size, _report=report):
_report.append((count, block_size, total_size))
srcFileName = self.createNewTempFile(b"x" * 5)
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 2)
self.assertEqual(report[0][1], 8192)
self.assertEqual(report[0][2], 5)
def test_reporthook_8193_bytes(self):
# Test on 8193 byte file. Should call reporthook only 3 times (once
# when the "network connection" is established, once for the next 8192
# bytes, and once for the last byte).
report = []
def hooktester(count, block_size, total_size, _report=report):
_report.append((count, block_size, total_size))
srcFileName = self.createNewTempFile(b"x" * 8193)
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 3)
self.assertEqual(report[0][1], 8192)
self.assertEqual(report[0][2], 8193)
class QuotingTests(unittest.TestCase):
"""Tests for urllib.quote() and urllib.quote_plus()
According to RFC 2396 (Uniform Resource Identifiers), to escape a
character you write it as '%' + <2 character US-ASCII hex value>.
The Python code of ``'%' + hex(ord(<character>))[2:]`` escapes a
character properly. Case does not matter on the hex letters.
The various character sets specified are:
Reserved characters : ";/?:@&=+$,"
Have special meaning in URIs and must be escaped if not being used for
their special meaning
Data characters : letters, digits, and "-_.!~*'()"
Unreserved and do not need to be escaped; can be, though, if desired
Control characters : 0x00 - 0x1F, 0x7F
Have no use in URIs so must be escaped
space : 0x20
Must be escaped
Delimiters : '<>#%"'
Must be escaped
Unwise : "{}|\^[]`"
Must be escaped
"""
def test_never_quote(self):
# Make sure quote() does not quote letters, digits, and "_,.-"
do_not_quote = '' .join(["ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"abcdefghijklmnopqrstuvwxyz",
"0123456789",
"_.-"])
result = urllib.parse.quote(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote(): %r != %r" % (do_not_quote, result))
result = urllib.parse.quote_plus(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote_plus(): %r != %r" % (do_not_quote, result))
def test_default_safe(self):
# Test '/' is default value for 'safe' parameter
self.assertEqual(urllib.parse.quote.__defaults__[0], '/')
def test_safe(self):
# Test setting 'safe' parameter does what it should do
quote_by_default = "<>"
result = urllib.parse.quote(quote_by_default, safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote(): %r != %r" % (quote_by_default, result))
result = urllib.parse.quote_plus(quote_by_default,
safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote_plus(): %r != %r" %
(quote_by_default, result))
# Safe expressed as bytes rather than str
result = urllib.parse.quote(quote_by_default, safe=b"<>")
self.assertEqual(quote_by_default, result,
"using quote(): %r != %r" % (quote_by_default, result))
# "Safe" non-ASCII characters should have no effect
# (Since URIs are not allowed to have non-ASCII characters)
result = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="\xfc")
expect = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="")
self.assertEqual(expect, result,
"using quote(): %r != %r" %
(expect, result))
# Same as above, but using a bytes rather than str
result = urllib.parse.quote("a\xfcb", encoding="latin-1", safe=b"\xfc")
expect = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="")
self.assertEqual(expect, result,
"using quote(): %r != %r" %
(expect, result))
def test_default_quoting(self):
# Make sure all characters that should be quoted are by default sans
# space (separate test for that).
should_quote = [chr(num) for num in range(32)] # For 0x00 - 0x1F
should_quote.append('<>#%"{}|\^[]`')
should_quote.append(chr(127)) # For 0x7F
should_quote = ''.join(should_quote)
for char in should_quote:
result = urllib.parse.quote(char)
self.assertEqual(hexescape(char), result,
"using quote(): "
"%s should be escaped to %s, not %s" %
(char, hexescape(char), result))
result = urllib.parse.quote_plus(char)
self.assertEqual(hexescape(char), result,
"using quote_plus(): "
"%s should be escapes to %s, not %s" %
(char, hexescape(char), result))
del should_quote
partial_quote = "ab[]cd"
expected = "ab%5B%5Dcd"
result = urllib.parse.quote(partial_quote)
self.assertEqual(expected, result,
"using quote(): %r != %r" % (expected, result))
self.assertEqual(expected, result,
"using quote_plus(): %r != %r" % (expected, result))
def test_quoting_space(self):
# Make sure quote() and quote_plus() handle spaces as specified in
# their unique way
result = urllib.parse.quote(' ')
self.assertEqual(result, hexescape(' '),
"using quote(): %r != %r" % (result, hexescape(' ')))
result = urllib.parse.quote_plus(' ')
self.assertEqual(result, '+',
"using quote_plus(): %r != +" % result)
given = "a b cd e f"
expect = given.replace(' ', hexescape(' '))
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
expect = given.replace(' ', '+')
result = urllib.parse.quote_plus(given)
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
def test_quoting_plus(self):
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma'),
'alpha%2Bbeta+gamma')
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', '+'),
'alpha+beta+gamma')
# Test with bytes
self.assertEqual(urllib.parse.quote_plus(b'alpha+beta gamma'),
'alpha%2Bbeta+gamma')
# Test with safe bytes
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', b'+'),
'alpha+beta+gamma')
def test_quote_bytes(self):
# Bytes should quote directly to percent-encoded values
given = b"\xa2\xd8ab\xff"
expect = "%A2%D8ab%FF"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Encoding argument should raise type error on bytes input
self.assertRaises(TypeError, urllib.parse.quote, given,
encoding="latin-1")
# quote_from_bytes should work the same
result = urllib.parse.quote_from_bytes(given)
self.assertEqual(expect, result,
"using quote_from_bytes(): %r != %r"
% (expect, result))
def test_quote_with_unicode(self):
# Characters in Latin-1 range, encoded by default in UTF-8
given = "\xa2\xd8ab\xff"
expect = "%C2%A2%C3%98ab%C3%BF"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in Latin-1 range, encoded by with None (default)
result = urllib.parse.quote(given, encoding=None, errors=None)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in Latin-1 range, encoded with Latin-1
given = "\xa2\xd8ab\xff"
expect = "%A2%D8ab%FF"
result = urllib.parse.quote(given, encoding="latin-1")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in BMP, encoded by default in UTF-8
given = "\u6f22\u5b57" # "Kanji"
expect = "%E6%BC%A2%E5%AD%97"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in BMP, encoded with Latin-1
given = "\u6f22\u5b57"
self.assertRaises(UnicodeEncodeError, urllib.parse.quote, given,
encoding="latin-1")
# Characters in BMP, encoded with Latin-1, with replace error handling
given = "\u6f22\u5b57"
expect = "%3F%3F" # "??"
result = urllib.parse.quote(given, encoding="latin-1",
errors="replace")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in BMP, Latin-1, with xmlcharref error handling
given = "\u6f22\u5b57"
expect = "%26%2328450%3B%26%2323383%3B" # "漢字"
result = urllib.parse.quote(given, encoding="latin-1",
errors="xmlcharrefreplace")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
def test_quote_plus_with_unicode(self):
# Encoding (latin-1) test for quote_plus
given = "\xa2\xd8 \xff"
expect = "%A2%D8+%FF"
result = urllib.parse.quote_plus(given, encoding="latin-1")
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
# Errors test for quote_plus
given = "ab\u6f22\u5b57 cd"
expect = "ab%3F%3F+cd"
result = urllib.parse.quote_plus(given, encoding="latin-1",
errors="replace")
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
class UnquotingTests(unittest.TestCase):
"""Tests for unquote() and unquote_plus()
See the doc string for quoting_Tests for details on quoting and such.
"""
def test_unquoting(self):
# Make sure unquoting of all ASCII values works
escape_list = []
for num in range(128):
given = hexescape(chr(num))
expect = chr(num)
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" %
(expect, result))
escape_list.append(given)
escape_string = ''.join(escape_list)
del escape_list
result = urllib.parse.unquote(escape_string)
self.assertEqual(result.count('%'), 1,
"using unquote(): not all characters escaped: "
"%s" % result)
def test_unquoting_badpercent(self):
# Test unquoting on bad percent-escapes
given = '%xab'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%x'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
# unquote_to_bytes
given = '%xab'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
given = '%x'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
given = '%'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
def test_unquoting_mixed_case(self):
# Test unquoting on mixed-case hex digits in the percent-escapes
given = '%Ab%eA'
expect = b'\xab\xea'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
def test_unquoting_parts(self):
# Make sure unquoting works when have non-quoted characters
# interspersed
given = 'ab%sd' % hexescape('c')
expect = "abcd"
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" % (expect, result))
def test_unquoting_plus(self):
# Test difference between unquote() and unquote_plus()
given = "are+there+spaces..."
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
expect = given.replace('+', ' ')
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" % (expect, result))
def test_unquote_to_bytes(self):
given = 'br%C3%BCckner_sapporo_20050930.doc'
expect = b'br\xc3\xbcckner_sapporo_20050930.doc'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test on a string with unescaped non-ASCII characters
# (Technically an invalid URI; expect those characters to be UTF-8
# encoded).
result = urllib.parse.unquote_to_bytes("\u6f22%C3%BC")
expect = b'\xe6\xbc\xa2\xc3\xbc' # UTF-8 for "\u6f22\u00fc"
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test with a bytes as input
given = b'%A2%D8ab%FF'
expect = b'\xa2\xd8ab\xff'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test with a bytes as input, with unescaped non-ASCII bytes
# (Technically an invalid URI; expect those bytes to be preserved)
given = b'%A2\xd8ab%FF'
expect = b'\xa2\xd8ab\xff'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
def test_unquote_with_unicode(self):
# Characters in the Latin-1 range, encoded with UTF-8
given = 'br%C3%BCckner_sapporo_20050930.doc'
expect = 'br\u00fcckner_sapporo_20050930.doc'
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters in the Latin-1 range, encoded with None (default)
result = urllib.parse.unquote(given, encoding=None, errors=None)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters in the Latin-1 range, encoded with Latin-1
result = urllib.parse.unquote('br%FCckner_sapporo_20050930.doc',
encoding="latin-1")
expect = 'br\u00fcckner_sapporo_20050930.doc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters in BMP, encoded with UTF-8
given = "%E6%BC%A2%E5%AD%97"
expect = "\u6f22\u5b57" # "Kanji"
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode with UTF-8, invalid sequence
given = "%F3%B1"
expect = "\ufffd" # Replacement character
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode with UTF-8, invalid sequence, replace errors
result = urllib.parse.unquote(given, errors="replace")
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode with UTF-8, invalid sequence, ignoring errors
given = "%F3%B1"
expect = ""
result = urllib.parse.unquote(given, errors="ignore")
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# A mix of non-ASCII and percent-encoded characters, UTF-8
result = urllib.parse.unquote("\u6f22%C3%BC")
expect = '\u6f22\u00fc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# A mix of non-ASCII and percent-encoded characters, Latin-1
# (Note, the string contains non-Latin-1-representable characters)
result = urllib.parse.unquote("\u6f22%FC", encoding="latin-1")
expect = '\u6f22\u00fc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
class urlencode_Tests(unittest.TestCase):
"""Tests for urlencode()"""
def help_inputtype(self, given, test_type):
"""Helper method for testing different input types.
'given' must lead to only the pairs:
* 1st, 1
* 2nd, 2
* 3rd, 3
Test cannot assume anything about order. Docs make no guarantee and
have possible dictionary input.
"""
expect_somewhere = ["1st=1", "2nd=2", "3rd=3"]
result = urllib.parse.urlencode(given)
for expected in expect_somewhere:
self.assertTrue(expected in result,
"testing %s: %s not found in %s" %
(test_type, expected, result))
self.assertEqual(result.count('&'), 2,
"testing %s: expected 2 '&'s; got %s" %
(test_type, result.count('&')))
amp_location = result.index('&')
on_amp_left = result[amp_location - 1]
on_amp_right = result[amp_location + 1]
self.assertTrue(on_amp_left.isdigit() and on_amp_right.isdigit(),
"testing %s: '&' not located in proper place in %s" %
(test_type, result))
self.assertEqual(len(result), (5 * 3) + 2, #5 chars per thing and amps
"testing %s: "
"unexpected number of characters: %s != %s" %
(test_type, len(result), (5 * 3) + 2))
def test_using_mapping(self):
# Test passing in a mapping object as an argument.
self.help_inputtype({"1st":'1', "2nd":'2', "3rd":'3'},
"using dict as input type")
def test_using_sequence(self):
# Test passing in a sequence of two-item sequences as an argument.
self.help_inputtype([('1st', '1'), ('2nd', '2'), ('3rd', '3')],
"using sequence of two-item tuples as input")
def test_quoting(self):
# Make sure keys and values are quoted using quote_plus()
given = {"&":"="}
expect = "%s=%s" % (hexescape('&'), hexescape('='))
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
given = {"key name":"A bunch of pluses"}
expect = "key+name=A+bunch+of+pluses"
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
def test_doseq(self):
# Test that passing True for 'doseq' parameter works correctly
given = {'sequence':['1', '2', '3']}
expect = "sequence=%s" % urllib.parse.quote_plus(str(['1', '2', '3']))
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
result = urllib.parse.urlencode(given, True)
for value in given["sequence"]:
expect = "sequence=%s" % value
self.assertTrue(expect in result,
"%s not found in %s" % (expect, result))
self.assertEqual(result.count('&'), 2,
"Expected 2 '&'s, got %s" % result.count('&'))
def test_empty_sequence(self):
self.assertEqual("", urllib.parse.urlencode({}))
self.assertEqual("", urllib.parse.urlencode([]))
def test_nonstring_values(self):
self.assertEqual("a=1", urllib.parse.urlencode({"a": 1}))
self.assertEqual("a=None", urllib.parse.urlencode({"a": None}))
def test_nonstring_seq_values(self):
self.assertEqual("a=1&a=2", urllib.parse.urlencode({"a": [1, 2]}, True))
self.assertEqual("a=None&a=a",
urllib.parse.urlencode({"a": [None, "a"]}, True))
self.assertEqual("a=a&a=b",
urllib.parse.urlencode({"a": {"a": 1, "b": 1}}, True))
class Pathname_Tests(unittest.TestCase):
"""Test pathname2url() and url2pathname()"""
def test_basic(self):
# Make sure simple tests pass
expected_path = os.path.join("parts", "of", "a", "path")
expected_url = "parts/of/a/path"
result = urllib.request.pathname2url(expected_path)
self.assertEqual(expected_url, result,
"pathname2url() failed; %s != %s" %
(result, expected_url))
result = urllib.request.url2pathname(expected_url)
self.assertEqual(expected_path, result,
"url2pathame() failed; %s != %s" %
(result, expected_path))
def test_quoting(self):
# Test automatic quoting and unquoting works for pathnam2url() and
# url2pathname() respectively
given = os.path.join("needs", "quot=ing", "here")
expect = "needs/%s/here" % urllib.parse.quote("quot=ing")
result = urllib.request.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
expect = given
result = urllib.request.url2pathname(result)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
given = os.path.join("make sure", "using_quote")
expect = "%s/using_quote" % urllib.parse.quote("make sure")
result = urllib.request.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
given = "make+sure/using_unquote"
expect = os.path.join("make+sure", "using_unquote")
result = urllib.request.url2pathname(given)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
class Utility_Tests(unittest.TestCase):
"""Testcase to test the various utility functions in the urllib."""
def test_splitpasswd(self):
"""Some of password examples are not sensible, but it is added to
confirming to RFC2617 and addressing issue4675.
"""
self.assertEqual(('user', 'ab'),urllib.parse.splitpasswd('user:ab'))
self.assertEqual(('user', 'a\nb'),urllib.parse.splitpasswd('user:a\nb'))
self.assertEqual(('user', 'a\tb'),urllib.parse.splitpasswd('user:a\tb'))
self.assertEqual(('user', 'a\rb'),urllib.parse.splitpasswd('user:a\rb'))
self.assertEqual(('user', 'a\fb'),urllib.parse.splitpasswd('user:a\fb'))
self.assertEqual(('user', 'a\vb'),urllib.parse.splitpasswd('user:a\vb'))
self.assertEqual(('user', 'a:b'),urllib.parse.splitpasswd('user:a:b'))
class URLopener_Tests(unittest.TestCase):
"""Testcase to test the open method of URLopener class."""
def test_quoted_open(self):
class DummyURLopener(urllib.request.URLopener):
def open_spam(self, url):
return url
self.assertEqual(DummyURLopener().open(
'spam://example/ /'),'//example/%20/')
# test the safe characters are not quoted by urlopen
self.assertEqual(DummyURLopener().open(
"spam://c:|windows%/:=&?~#+!$,;'@()*[]|/path/"),
"//c:|windows%/:=&?~#+!$,;'@()*[]|/path/")
# Just commented them out.
# Can't really tell why keep failing in windows and sparc.
# Everywhere else they work ok, but on those machines, someteimes
# fail in one of the tests, sometimes in other. I have a linux, and
# the tests go ok.
# If anybody has one of the problematic enviroments, please help!
# . Facundo
#
# def server(evt):
# import socket, time
# serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# serv.settimeout(3)
# serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# serv.bind(("", 9093))
# serv.listen(5)
# try:
# conn, addr = serv.accept()
# conn.send("1 Hola mundo\n")
# cantdata = 0
# while cantdata < 13:
# data = conn.recv(13-cantdata)
# cantdata += len(data)
# time.sleep(.3)
# conn.send("2 No more lines\n")
# conn.close()
# except socket.timeout:
# pass
# finally:
# serv.close()
# evt.set()
#
# class FTPWrapperTests(unittest.TestCase):
#
# def setUp(self):
# import ftplib, time, threading
# ftplib.FTP.port = 9093
# self.evt = threading.Event()
# threading.Thread(target=server, args=(self.evt,)).start()
# time.sleep(.1)
#
# def tearDown(self):
# self.evt.wait()
#
# def testBasic(self):
# # connects
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# ftp.close()
#
# def testTimeoutNone(self):
# # global default timeout is ignored
# import socket
# self.assertTrue(socket.getdefaulttimeout() is None)
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutDefault(self):
# # global default timeout is used
# import socket
# self.assertTrue(socket.getdefaulttimeout() is None)
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutValue(self):
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [],
# timeout=30)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
def test_main():
support.run_unittest(
urlopen_FileTests,
urlopen_HttpTests,
urlretrieve_FileTests,
ProxyTests,
QuotingTests,
UnquotingTests,
urlencode_Tests,
Pathname_Tests,
Utility_Tests,
URLopener_Tests,
#FTPWrapperTests,
)
if __name__ == '__main__':
test_main()
|
mancoast/CPythonPyc_test
|
fail/312_test_urllib.py
|
Python
|
gpl-3.0
| 40,650
|
# -*- coding: utf-8 -*-
{
'!langcode!': 'pt-br',
'!langname!': 'Português (do Brasil)',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" é uma expressão opcional como "campo1=\'novovalor\'". Você não pode atualizar ou apagar os resultados de um JOIN',
'%s %%{row} deleted': '%s linhas apagadas',
'%s %%{row} updated': '%s linhas atualizadas',
'%s selected': '%s selecionado',
'%Y-%m-%d': '%d-%m-%Y',
'%Y-%m-%d %H:%M:%S': '%d-%m-%Y %H:%M:%S',
'(**%.0d MB**)': '(**%.0d MB**)',
'**%(items)s** %%{item(items)}, **%(bytes)s** %%{byte(bytes)}': '**%(items)s** %%{item(items)}, **%(bytes)s** %%{byte(bytes)}',
'**%(items)s** items, **%(bytes)s** %%{byte(bytes)}': '**%(items)s** items, **%(bytes)s** %%{byte(bytes)}',
'**not available** (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)': '**not available** (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)',
'?': '?',
'@markmin\x01An error occured, please [[reload %s]] the page': 'Ocorreu um erro, por favor [[reload %s]] a página',
'``**not available**``:red (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)': '``**not available**``:red (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)',
'About': 'Sobre',
'Access Control': 'Controle de Acesso',
'Add user': 'Add user',
'Administrative Interface': 'Interface Administrativa',
'Administrative interface': 'Interface administrativa',
'Ajax Recipes': 'Receitas de Ajax',
'An error occured, please [[reload %s]] the page': 'An error occured, please [[reload %s]] the page',
'Ansible': 'Ansible',
'appadmin is disabled because insecure channel': 'Administração desativada porque o canal não é seguro',
'Aprobacion': 'Aprobacion',
'Are you sure you want to delete this object?': 'Você está certo que deseja apagar este objeto?',
'Available Databases and Tables': 'Bancos de dados e tabelas disponíveis',
'Back': 'Back',
'Buy this book': 'Compre o livro',
"Buy web2py's book": "Buy web2py's book",
'cache': 'cache',
'Cache': 'Cache',
'Cache Cleared': 'Cache Cleared',
'Cache contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'Cache contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'Cache Keys': 'Chaves de cache',
'Cannot be empty': 'Não pode ser vazio',
'change password': 'modificar senha',
'Check to delete': 'Marque para apagar',
'Choose One': 'Choose One',
'Clear CACHE?': 'Limpar CACHE?',
'Clear DISK': 'Limpar DISCO',
'Clear RAM': 'Limpar memória RAM',
'Client IP': 'IP do cliente',
'Code Course': 'Code Course',
'Code Group': 'Code Group',
'Community': 'Comunidade',
'Components and Plugins': 'Componentes e Plugins',
'Config.ini': 'Config.ini',
'Controller': 'Controlador',
'Copy Files': 'Copy Files',
'Copyright': 'Copyright',
'Course': 'Course',
'Course Code': 'Course Code',
'Course Create!': 'Course Create!',
'Course Description': 'Course Description',
'Course Name': 'Course Name',
'Courses': 'Courses',
'Create User': 'Create User',
'Current request': 'Requisição atual',
'Current response': 'Resposta atual',
'Current session': 'Sessão atual',
'customize me!': 'Personalize-me!',
'data uploaded': 'dados enviados',
'Database': 'banco de dados',
'Database %s select': 'Selecionar banco de dados %s',
'Database Administration (appadmin)': 'Database Administration (appadmin)',
'db': 'bd',
'DB Model': 'Modelo BD',
'Delete User': 'Delete User',
'Delete:': 'Apagar:',
'Demo': 'Demo',
'Deployment Recipes': 'Receitas de deploy',
'Description': 'Descrição',
'design': 'projeto',
'Design': 'Design',
'DISK': 'DISK',
'Disk Cache Keys': 'Chaves do Cache de Disco',
'Disk Cleared': 'Disco Limpo',
'DISK contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'DISK contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'Do you really whish to restart the selected machines?': 'Do you really whish to restart the selected machines?',
'Documentation': 'Documentação',
"Don't know what to do?": 'Não sabe o que fazer?',
'done!': 'concluído!',
'Download': 'Download',
'E-mail': 'E-mail',
'Edit': 'Editar',
'Edit current record': 'Editar o registro atual',
'edit profile': 'editar perfil',
'Edit This App': 'Editar esta aplicação',
'Email and SMS': 'Email e SMS',
'Enter an integer between %(min)g and %(max)g': 'Informe um valor inteiro entre %(min)g e %(max)g',
'Errors': 'Erros',
'export as csv file': 'exportar como um arquivo csv',
'Fabric': 'Fabric',
'FAQ': 'Perguntas frequentes',
'First name': 'Nome',
'Forms and Validators': 'Formulários e Validadores',
'Free Applications': 'Aplicações gratuitas',
'Graph Model': 'Graph Model',
'Group ID': 'ID do Grupo',
'Group Number': 'Group Number',
'Groups': 'Grupos',
'Hello People UD': 'Hello People UD',
'Hello World': 'Olá Mundo',
'Helping web2py': 'Helping web2py',
'Hit Ratio: **%(ratio)s%%** (**%(hits)s** %%{hit(hits)} and **%(misses)s** %%{miss(misses)})': 'Hit Ratio: **%(ratio)s%%** (**%(hits)s** %%{hit(hits)} and **%(misses)s** %%{miss(misses)})',
'Hola ': 'Hola ',
'Home': 'Principal',
'Hostname': 'Hostname',
'How did you get here?': 'Como você chegou aqui?',
'import': 'importar',
'Import/Export': 'Importar/Exportar',
'Index': 'Início',
'insert new': 'inserir novo',
'insert new %s': 'inserir novo %s',
'Internal State': 'Estado Interno',
'Introduction': 'Introdução',
'Invalid email': 'Email inválido',
'Invalid Query': 'Consulta Inválida',
'invalid request': 'requisição inválida',
'IPv4 Machine': 'IPv4 Machine',
'Job Name': 'Job Name',
'Key': 'Chave',
'Last name': 'Sobrenome',
'Layout': 'Layout',
'Layout Plugins': 'Plugins de Layout',
'Layouts': 'Layouts',
'Live chat': 'Chat ao vivo',
'Live Chat': 'Chat ao vivo',
'Log In': 'Log In',
'login': 'Entrar',
'Login': 'Autentique-se',
'logout': 'Sair',
'Lost Password': 'Esqueceu sua senha?',
'lost password?': 'esqueceu sua senha?',
'Machine': 'Machine',
'Machines': 'Machines',
'Main Menu': 'Menu Principal',
'Manage %(action)s': 'Manage %(action)s',
'Manage Access Control': 'Manage Access Control',
'Manage Cache': 'Gerenciar Cache',
'Memberships': 'Memberships',
'Memory RAM': 'Memory RAM',
'Menu Model': 'Modelo de Menu',
'My jobs': 'My jobs',
'My Sites': 'Meus sites',
'Name': 'Nome',
'New Record': 'Novo Registro',
'new record inserted': 'novo registro inserido',
'next %s rows': 'next %s rows',
'next 100 rows': 'próximas 100 linhas',
'No databases in this application': 'Não há bancos de dados nesta aplicação',
'Number of entries: **%s**': 'Number of entries: **%s**',
'Object or table name': 'Nome do objeto do da tabela',
'of': 'of',
'Online book': 'Online book',
'Online examples': 'Exemplos online',
'Operative System': 'Operative System',
'or import from csv file': 'ou importar de um arquivo csv',
'Origin': 'Origem',
'Other': 'Other',
'Other MB': 'Other MB',
'Other Plugins': 'Outros Plugins',
'Other Recipes': 'Outras Receitas',
'Overview': 'Visão Geral',
'Password': 'Senha',
'Permission': 'Permission',
'Permissions': 'Permissions',
'Please select at least one machine': 'Please select at least one machine',
'Please select at leats one id': 'Please select at leats one id',
'Plugins': 'Plugins',
'Port': 'Port',
'Powered by': 'Desenvolvido com',
'Preface': 'Prefácio',
'previous %s rows': 'previous %s rows',
'previous 100 rows': '100 linhas anteriores',
'Processor': 'Processor',
'pygraphviz library not found': 'pygraphviz library not found',
'Python': 'Python',
'Query:': 'Consulta:',
'Quick Examples': 'Exemplos rápidos',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram Cleared',
'RAM contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'RAM contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'Recipes': 'Receitas',
'Record': 'Registro',
'record does not exist': 'registro não existe',
'Record ID': 'ID do Registro',
'Record id': 'id do registro',
'Register': 'Registre-se',
'register': 'Registre-se',
'Register Machine': 'Register Machine',
'Registration identifier': 'Idenficador de registro',
'Registration key': 'Chave de registro',
'Reset Password key': 'Resetar chave de senha',
'Resources': 'Recursos',
'Restart': 'Restart',
'Role': 'Papel',
'Roles': 'Roles',
'Rows in Table': 'Linhas na tabela',
'Rows selected': 'Linhas selecionadas',
'Save model as...': 'Save model as...',
'Select One': 'Select One',
'Select one group': 'Select one group',
'Semantic': 'Semântico',
'Semester': 'Semester',
'Services': 'Serviço',
'Sign Up': 'Sign Up',
'Size of cache:': 'Tamanho do cache:',
'SSH Tests': 'SSH Tests',
'state': 'estado',
'State': 'State',
'Statistics': 'Estatísticas',
'Stylesheet': 'Folha de estilo',
'submit': 'enviar',
'Support': 'Suporte',
'Sure you want to delete this object?': 'Está certo(a) que deseja apagar este objeto?',
'Table': 'Tabela',
'Table name': 'Nome da tabela',
'Task ID': 'Task ID',
'Teacher': 'Teacher',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'Uma "consulta" é uma condição como "db.tabela1.campo1==\'valor\'". Expressões como "db.tabela1.campo1==db.tabela2.campo2" resultam em um JOIN SQL.',
'The Core': 'The Core',
'The output of the file is a dictionary that was rendered by the view %s': 'A saída do arquivo é um dicionário que foi apresentado pela visão %s',
'The Views': 'As views',
'These are your machines': 'These are your machines',
'This App': 'Esta aplicação',
'This email already has an account': 'Este email já tem uma conta',
'This is a copy of the scaffolding application': 'Isto é uma cópia da aplicação modelo',
'Time in Cache (h:m:s)': 'Tempo em Cache (h:m:s)',
'Timestamp': 'Timestamp',
'To register': 'To register',
'Traceback': 'Traceback',
'Twitter': 'Twitter',
'unable to parse csv file': 'não foi possível analisar arquivo csv',
'Update:': 'Atualizar:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Use (...)&(...) para AND, (...)|(...) para OR, e ~(...) para NOT para construir consultas mais complexas.',
'User': 'User',
'User ID': 'ID do Usuário',
'User name': 'User name',
'User Voice': 'Opinião dos usuários',
'Users': 'Users',
'Videos': 'Vídeos',
'View': 'Visualização',
'Web2py': 'Web2py',
'Welcome': 'Bem-vindo',
'Welcome %s': 'Bem-vindo %s',
'Welcome to web2py': 'Bem-vindo ao web2py',
'Welcome to web2py!': 'Bem-vindo ao web2py!',
'Which called the function %s located in the file %s': 'Que chamou a função %s localizada no arquivo %s',
'Working...': 'Trabalhando...',
'You are successfully running web2py': 'Você está executando o web2py com sucesso',
'You are successfully running web2py.': 'Você está executando o web2py com sucesso.',
'You can modify this application and adapt it to your needs': 'Você pode modificar esta aplicação e adaptá-la às suas necessidades',
'You visited the url %s': 'Você acessou a url %s',
}
|
urrego093/proyecto_mv
|
applications/datos/languages/pt-br.py
|
Python
|
gpl-3.0
| 11,305
|
#!/usr/bin/env python
'''
Using ciscoconfparse find the crypto maps that are not using AES (based-on the
transform set name). Print these entries and their corresponding transform set
name.
'''
import re
from ciscoconfparse import CiscoConfParse
def main():
'''
Using ciscoconfparse find the crypto maps that are not using AES (based-on
the transform set name). Print these entries and their corresponding
transform set name.
'''
cisco_file = 'Exercise8ConfigFile'
cisco_cfg = CiscoConfParse(cisco_file)
crypto_maps = cisco_cfg.find_objects_wo_child(parentspec=r'crypto map CRYPTO',
childspec=r'AES')
print "\nCrypto maps not using AES:"
for entry in crypto_maps:
for child in entry.children:
if 'transform' in child.text:
match = re.search(r"set transform-set (.*)$", child.text)
encryption = match.group(1)
print " {0} >>> {1}".format(entry.text.strip(), encryption)
print
if __name__ == "__main__":
main()
|
shahalimurtaza/Pynet
|
Week1/Exercise10-KirkSolution.py
|
Python
|
gpl-3.0
| 1,072
|
from vsg.vhdlFile.extract import tokens
from vsg.vhdlFile.extract import utils
def get_consecutive_lines_starting_with_token(search_token, min_num_lines, lAllTokens, oTokenMap):
lReturn = []
lSearchLines = get_line_numbers_of_tokens_which_start_line(search_token, oTokenMap)
lGroups = group_lines(lSearchLines)
lFilteredGroups = filter_groups_based_on_number_of_lines(lGroups, min_num_lines)
for lGroup in lFilteredGroups:
iStartLine = lGroup[0]
iEndLine = lGroup[-1]
iStartToken = oTokenMap.get_index_of_line(iStartLine)
iEndToken = oTokenMap.get_index_of_carriage_return_after_index(oTokenMap.get_index_of_line(iEndLine))
lTemp = lAllTokens[iStartToken:iEndToken]
lReturn.append(tokens.New(iStartToken, iStartLine, lTemp))
return lReturn
def get_line_numbers_of_tokens_which_start_line(search_token, oTokenMap):
lSearchIndexes = utils.filter_tokens_which_start_a_line(search_token, oTokenMap)
lSearchLines = []
for iSearchIndex in lSearchIndexes:
lSearchLines.append(oTokenMap.get_line_number_of_index(iSearchIndex))
return lSearchLines
def group_lines(lLines):
lReturn = []
if len(lLines) == 0:
return []
lTemp = [lLines[0]]
for iLine in lLines[1:]:
if iLine == lTemp[-1] + 1:
lTemp.append(iLine)
else:
lReturn.append(lTemp)
lTemp = [iLine]
lReturn.append(lTemp)
return lReturn
def filter_groups_based_on_number_of_lines(lGroups, min_num_lines):
lReturn = []
for lGroup in lGroups:
if len(lGroup) >= min_num_lines:
lReturn.append(lGroup)
return lReturn
|
jeremiah-c-leary/vhdl-style-guide
|
vsg/vhdlFile/extract/get_consecutive_lines_starting_with_token.py
|
Python
|
gpl-3.0
| 1,683
|
from __future__ import absolute_import
from pimlico.datatypes.corpora.data_points import DataPointType, InvalidDocument, is_invalid_doc, \
invalid_document_or_raw, invalid_document
from .base import IterableCorpus
from .grouped import GroupedCorpus
from . import data_points
from . import tokenized
from . import floats
from . import ints
from . import json
from . import table
from . import word_annotations
from . import strings
"""
This list collects together all the built-in data point types. These can be specified
using only their class name, rather than requiring a fully-qualified path, when giving
a data point type in a config file
"""
DATA_POINT_TYPES = [
data_points.RawDocumentType,
data_points.RawTextDocumentType, data_points.TextDocumentType,
tokenized.TokenizedDocumentType, tokenized.CharacterTokenizedDocumentType, tokenized.SegmentedLinesDocumentType,
floats.FloatListDocumentType, floats.FloatListDocumentType, floats.VectorDocumentType,
ints.IntegerListsDocumentType, ints.IntegerListDocumentType,
json.JsonDocumentType,
table.IntegerTableDocumentType,
word_annotations.WordAnnotationsDocumentType, word_annotations.AddAnnotationField, word_annotations.AddAnnotationFields,
strings.LabelDocumentType,
]
|
markgw/pimlico
|
src/python/pimlico/datatypes/corpora/__init__.py
|
Python
|
gpl-3.0
| 1,270
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-08-13 07:57
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('street_agitation_bot', '0030_auto_20170813_0756'),
]
operations = [
migrations.RenameField(
model_name='conversationstate',
old_name='agitator2',
new_name='agitator',
),
migrations.RenameField(
model_name='cubeusageinevent',
old_name='delivered_by2',
new_name='delivered_by',
),
migrations.RenameField(
model_name='cubeusageinevent',
old_name='shipped_by2',
new_name='shipped_by',
),
]
|
Kurpilyansky/street-agitation-telegram-bot
|
street_agitation_bot/migrations/0031_auto_20170813_0757.py
|
Python
|
gpl-3.0
| 775
|
#!/usr/bin/env python
# simple partition tool with purpose on pyparted testing
import parted
import getopt
import sys
import rfparted
import partedprint
def check_device(devpath):
"""Given the operating system level path to a device node, set
the value of self.dev and self.disk. Return false if an
invalid path is given."""
try:
parted.getDevice(devpath)
except parted.DeviceException:
return False
return True
def usage():
print "%s: [OPTION]... [DEVICE [CMD [PARAM]...]...]" % (sys.argv[0], )
print "Option:"
#print "\t-d,--dev device operate only on device specified"
print "\t-h,--help \t\t show help information"
print "\t-l,--list[free|json] \t lists partition layout on all block devices"
print
print "Command:"
print "\tmklabel LABEL-TYPE create a new disklabel (partition table)"
print "\tmkpart PART-TYPE START END [FS-TYPE] make a partition"
print "\trm NUMBER delete partition NUMBER"
print "\tprint [free|json] display the partition table,free space "
print "\t or get the result with json"
def print_handler(opts,args):
#disks = {devpath:disks}
disks = {}
isjson = False
withfree = False
for opt in opts:
op, arg = opt[0],opt[1]
if op == '-l' or op == '--list':
disks = None
elif op == '-h' or op == '--help':
usage()
sys.exit(0)
else:
usage()
sys.exit(1)
for arg in args:
if arg == 'json':
isjson = True
elif arg == 'free':
withfree = True
elif arg == 'print' and disks == []:
if not check_device(args[0]):
usage()
sys.exit(1)
else:
dev = parted.getDevice(args[0])
try:
disks[dev.path] = parted.disk.Disk(dev)
except:
disks[dev.path] = None
if isjson:
print partedprint.parted_print(disks,isjson,withfree)
else:
partedprint.parted_print(disks,isjson,withfree)
if __name__ == "__main__":
# check uid
try:
opts, args = getopt.getopt(sys.argv[1:], "hl", ["help", "list"])
except getopt.GetoptError:
usage()
sys.exit(2)
#print_handler
if opts or "print".startswith(args[1]):
print_handler(opts,args)
sys.exit(0)
if not check_device(args[0]):
sys.exit(1)
if len(args) > 1:
cmd = args[1]
dev = parted.getDevice(args[0])
try:
disk = parted.disk.Disk(dev)
except:
print "the dev's type is unknow."
sys.exit(1)
del args[1]
del args[0]
new_disk = dispatch(cmd, args, dev, disk)
if new_disk:
new_disk.commit()
print_disk_helper(new_disk)
|
sonald/Red-Flag-Linux-Installer
|
hippo/services/lib/fdisk.py
|
Python
|
gpl-3.0
| 3,072
|
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from django.db import models
from django.core.exceptions import ValidationError
from django.contrib.auth.models import Group
CATEGORY = (('', '----'), ('1', _('Student')),
('2', _('Teaching Staff')), ('3', _('Employees')))
ENGINEERING = (('0', '----'), ('1', _('Software')), ('2', _('Eletronic')),
('3', _('Energy')), ('4', _('Automotive')),
('5', _('Aerospace')), ('6', _('Engineerings')))
class UserProfile(models.Model):
registration_number = models.CharField(
max_length=20, unique=True,
error_messages={'unique': _('Registration Number already used.')})
user = models.OneToOneField(User, on_delete=models.CASCADE,
related_name="profile_user")
category = models.CharField(choices=CATEGORY, max_length=20)
engineering = models.CharField(choices=ENGINEERING, max_length=15,
default=1)
def create_user(self):
if not hasattr(self, "user"):
self.user = User()
def name(self, name):
self.create_user()
names = name.split()
self.user.first_name = names.pop(0)
self.user.last_name = str.join(" ", names)
def full_name(self):
self.create_user()
name = str.join(" ", [self.user.first_name, self.user.last_name])
return name
def clean_fields(self, exclude=None):
validation = Validation()
# Registration Number validation
registration_number = self.registration_number
if (len(registration_number) != 9):
raise ValidationError({'registration_number':
[_('Registration number \
must have 9 digits.'), ]})
if validation.hasLetters(registration_number):
raise ValidationError({'registration_number':
[_('Registration number \
cannot contain letters.'), ]})
if validation.hasSpecialCharacters(registration_number):
raise ValidationError({'registration_number':
[_('Registration number \
cannot contain special characters.'), ]})
def save(self, *args, **kwargs):
self.user.save()
self.user_id = self.user.pk
super(UserProfile, self).save(*args, **kwargs)
def make_as_academic_staff(self):
try:
academic_staff = Group.objects.get(name="academic_staff")
except:
academic_staff, created = (Group.objects.
get_or_create(name="academic_staff"))
self.create_user()
self.user.groups.add(academic_staff)
def make_as_admin(self):
try:
admin = Group.objects.get(name="admin")
except:
admin, created = Group.objects.get_or_create(name="admin")
self.create_user()
self.user.groups.add(admin)
def is_admin(self):
try:
self.user.groups.get(name="admin")
return True
except Group.DoesNotExist:
return False
def is_academic_staff(self):
try:
self.user.groups.get(name="academic_staff")
return True
except Group.DoesNotExist:
return False
@staticmethod
def get_users():
users = UserProfile.objects.all()
choices = []
for user in users:
new_choice = (user, user)
choices.append(new_choice)
choices = sorted(choices, key=lambda user_tuple:
user_tuple[0].full_name())
choices.insert(0, ('', ''))
return choices
def __str__(self):
return '\n'.join((self.full_name(), '<' + self.user.username + '>'))
class Settings(models.Model):
start_semester = models.DateField(null=False, blank=False)
end_semester = models.DateField(null=False, blank=False)
def get_start(self):
return Settings.objects.last().start_semester
def get_end(self):
return Settings.objects.last().end_semester
class Validation():
def hasNumbers(self, string):
if (string is not None):
if any(char.isdigit() for char in string):
return True
return False
else:
return False
def hasLetters(self, number):
if (number is not None):
if any(char.isalpha() for char in number):
return True
return False
else:
return False
def hasSpecialCharacters(self, string):
if (string is not None):
for character in '@#$%^&+=/\{[]()}-_+=*!§|':
if character in string:
return True
return False
|
fga-gpp-mds/2016.2-SAS_FGA
|
sas/user/models.py
|
Python
|
gpl-3.0
| 4,935
|
from urllib.request import urlopen
import xml.etree.ElementTree as ET
#--testing
url = 'http://py4e-data.dr-chuck.net/comments_42.xml'
#--actual
url = 'http://py4e-data.dr-chuck.net/comments_35445.xml'
data = urlopen(url).read()
tree = ET.fromstring(data)
counts = tree.findall('.//count')
counts = [int(count.text) for count in counts]
print(str(sum(counts)))
|
rlmitchell/coursera
|
py4e/3_using_python_to_access_web_data/ex-5.py
|
Python
|
gpl-3.0
| 364
|
from PySpice.Netlist import SubCircuit, Circuit
from PySpice.Units import *
subcircuit_1N4148 = SubCircuit('1N4148', 1, 2)
subcircuit_1N4148.R('1', 1, 2, 5.827E+9)
subcircuit_1N4148.D('1', 1, 2, '1N4148')
subcircuit_1N4148.model('1N4148', 'D',
IS=4.352E-9, N=1.906, BV=110, IBV=0.0001, RS=0.6458, CJO=7.048E-13,
VJ=0.869, M=0.03, FC=0.5, TT=3.48E-9)
# print str(subcircuit_1N4148)
frequence = 50
perdiod = 1. / frequence
step_time = perdiod/200
end_time = perdiod*10
line_peak_voltage = 10
circuit = Circuit('Simple Rectifier', global_nodes=(0, 'out'))
circuit.subcircuit(subcircuit_1N4148)
circuit.V('input', 'in', circuit.gnd, 'DC 0V', 'SIN(0V {}V {}Hz)'.format(line_peak_voltage, frequence))
circuit.X('D', '1N4148', 'in', 'out')
circuit.C('load', 'out', circuit.gnd, micro(100))
circuit.R('load', 'out', circuit.gnd, kilo(1), ac='1k')
circuit.Cload.plus.add_current_probe(circuit)
simulation = circuit.simulation(temperature=25, nominal_temperature=25, pipe=True)
simulation.options(filetype='binary')
simulation.save('V(in)', 'V(out)')
simulation.tran(step_time, end_time)
print(circuit.nodes)
for node in circuit.nodes:
print(repr(node), ':', ' '.join(element.name for element in node.elements))
print(circuit.Cload.plus)
# print repr(circuit.Cload)
# # print circuit.1N4148
# print subcircuit_1N4148['1N4148']
# print circuit.out
print('\n\n')
print(str(simulation))
# python PySpice/test_netlist.py | ngspice -s > data
####################################################################################################
#
# End
#
####################################################################################################
|
thomaslima/PySpice
|
unit-test/todo/test_netlist.py
|
Python
|
gpl-3.0
| 1,702
|
from pyramid.events import NewRequest, subscriber, BeforeRender
from pyramid.i18n import get_localizer, TranslationStringFactory, \
TranslationString
from pyramid.security import authenticated_userid
from lokp.models import DBSession
from lokp.config.customization import get_customization_name
from lokp.models import User
tsf = TranslationStringFactory('lokp')
tsf_deform = TranslationStringFactory('deform')
tsf_colander = TranslationStringFactory('colander')
@subscriber(BeforeRender)
def add_renderer_globals(event):
"""
Thanks to Alexandre Bourget:
http://blog.abourget.net/2011/1/13/pyramid-and-mako:-how-to-do-i18n-the-
pylons-way/
"""
request = event['request']
event['_'] = request.translate
event['localizer'] = request.localizer
@subscriber(NewRequest)
def add_localizer(event):
"""
Thanks to Alexandre Bourget:
http://blog.abourget.net/2011/1/13/pyramid-and-mako:-how-to-do-i18n-the-
pylons-way/
"""
request = event.request
localizer = get_localizer(request)
# Create the customized TranslationFactory
tsf_custom = TranslationStringFactory(
get_customization_name(request=request))
def auto_translate(string):
# Try to translate the string within the [custom] domain
translation = localizer.translate(tsf_custom(string))
if (isinstance(string, TranslationString)
and translation != string.interpolate()
or not isinstance(string, TranslationString)
and translation != string):
return translation
# If no translation found, try to translate the string within the
# 'lmkp' domain.
translation = localizer.translate(tsf(string))
if (isinstance(string, TranslationString)
and translation != string.interpolate()
or not isinstance(string, TranslationString)
and translation != string):
return translation
# If no translation found, try to translate the string within the
# 'deform' domain.
translation = localizer.translate(tsf_deform(string))
if (isinstance(string, TranslationString)
and translation != string.interpolate()
or not isinstance(string, TranslationString)
and translation != string):
return translation
# If no translation found, try to translate the string within the
# 'colander' domain.
translation = localizer.translate(tsf_colander(string))
if (isinstance(string, TranslationString)
and translation != string.interpolate()
or not isinstance(string, TranslationString)
and translation != string):
return translation
# If no translation was found, return the string as it is.
if isinstance(string, TranslationString):
# If it it is a TranslationString, return it interpolated
return string.interpolate()
return string
request.localizer = localizer
request.translate = auto_translate
@subscriber(NewRequest)
def add_user(event):
def _get_user(request):
userid = authenticated_userid(request)
# log.debug("Found user: %s" % userid)
if userid is not None:
user = DBSession.query(User).filter(User.username == userid).first()
return user
request = event.request
request.set_property(_get_user, 'user', reify=True)
|
CDE-UNIBE/lokp
|
lokp/subscribers.py
|
Python
|
gpl-3.0
| 3,518
|
#!/usr/bin/python2
import sys
lines = sys.stdin.readlines()
sorted_lines = sorted(lines)
sys.stdout.writelines(sorted_lines)
|
jmahler/shootout
|
sort/python2/sort.py
|
Python
|
gpl-3.0
| 129
|
import json
import pytest
from randovania.game_description import data_reader, data_writer, game_migration
def test_round_trip_small(test_files_dir):
# Setup
with test_files_dir.joinpath("prime2_small_v1.json").open("r") as data_file:
original_data = game_migration.migrate_to_current(json.load(data_file))
game = data_reader.decode_data(original_data)
encoded_data = data_writer.write_game_description(game)
assert encoded_data == original_data
def test_round_trip_area_conflict(test_files_dir):
with test_files_dir.joinpath("prime2_small_v1.json").open("r") as data_file:
json_data = json.load(data_file)
json_data["worlds"][0]["areas"].append(dict(json_data["worlds"][0]["areas"][0]))
with pytest.raises(ValueError):
game_migration.migrate_to_current(json_data)
def test_round_trip_node_conflict(test_files_dir):
with test_files_dir.joinpath("prime2_small_v1.json").open("r") as data_file:
json_data = json.load(data_file)
json_data["worlds"][0]["areas"][0]["nodes"].append(dict(json_data["worlds"][0]["areas"][0]["nodes"][0]))
with pytest.raises(ValueError):
game_migration.migrate_to_current(json_data)
|
henriquegemignani/randovania
|
test/game_description/test_schema_migration.py
|
Python
|
gpl-3.0
| 1,219
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Interface for Dynamips NIO bridge module ("nio_bridge").
http://github.com/GNS3/dynamips/blob/master/README.hypervisor#L538
"""
import asyncio
from .device import Device
class Bridge(Device):
"""
Dynamips bridge.
:param name: name for this bridge
:param node_id: Node instance identifier
:param project: Project instance
:param manager: Parent VM Manager
:param hypervisor: Dynamips hypervisor instance
"""
def __init__(self, name, node_id, project, manager, hypervisor=None):
super().__init__(name, node_id, project, manager, hypervisor)
self._nios = []
@asyncio.coroutine
def create(self):
if self._hypervisor is None:
module_workdir = self.project.module_working_directory(self.manager.module_name.lower())
self._hypervisor = yield from self.manager.start_new_hypervisor(working_dir=module_workdir)
yield from self._hypervisor.send('nio_bridge create "{}"'.format(self._name))
self._hypervisor.devices.append(self)
@asyncio.coroutine
def set_name(self, new_name):
"""
Renames this bridge.
:param new_name: New name for this bridge
"""
yield from self._hypervisor.send('nio_bridge rename "{name}" "{new_name}"'.format(name=self._name,
new_name=new_name))
self._name = new_name
@property
def nios(self):
"""
Returns all the NIOs member of this bridge.
:returns: nio list
"""
return self._nios
@asyncio.coroutine
def delete(self):
"""
Deletes this bridge.
"""
if self._hypervisor and self in self._hypervisor.devices:
self._hypervisor.devices.remove(self)
if self._hypervisor and not self._hypervisor.devices:
yield from self._hypervisor.send('nio_bridge delete "{}"'.format(self._name))
@asyncio.coroutine
def add_nio(self, nio):
"""
Adds a NIO as new port on this bridge.
:param nio: NIO instance to add
"""
yield from self._hypervisor.send('nio_bridge add_nio "{name}" {nio}'.format(name=self._name, nio=nio))
self._nios.append(nio)
@asyncio.coroutine
def remove_nio(self, nio):
"""
Removes the specified NIO as member of this bridge.
:param nio: NIO instance to remove
"""
yield from self._hypervisor.send('nio_bridge remove_nio "{name}" {nio}'.format(name=self._name, nio=nio))
self._nios.remove(nio)
|
harrijs/gns3-server
|
gns3server/modules/dynamips/nodes/bridge.py
|
Python
|
gpl-3.0
| 3,328
|
def function6(a, *param):
print "variadicArgs"
def function7(a, *args, **kwargs):
print "variadicAndKeywordArgs"
|
bblfsh/python-driver
|
fixtures/vararg_python2.py
|
Python
|
gpl-3.0
| 121
|
#!/usr/bin/env python
"""
================================================
playerLocomotionCommand.py - Player Pos2D Client
================================================
Sends velocity commands to player
"""
import sys
class locomotionCommandHandler:
def __init__(self, proj, shared_data):
try:
self.p = shared_data['PlayerPos2D']
except KeyError:
print "(LOCO) ERROR: Player doesn't seem to be initialized!"
sys.exit(-1)
pass
def sendCommand(self, cmd):
""" Send command to player. Arguments depend on robot model. """
self.p.set_cmd_vel(cmd[0], cmd[1], 0, 1)
|
jadecastro/LTLMoP
|
src/lib/handlers/deprecated/locomotionCommand/playerLocomotionCommand.py
|
Python
|
gpl-3.0
| 664
|
from DE_code import DE_opt, numpy
from optTune import get_F_vals_at_specified_OFE_budgets
def Ros_ND(x) :
"gerneralised Rossenbrock function"
return sum([100*(x[ii+1]-x[ii]**2)**2 + (1-x[ii])**2 for ii in range(len(x)-1)])
prob_d = 6
def run_DE_on_Ros_ND(CPV_tuple, OFE_budgets, randomSeed):
X_min, f_best_hist, X_hist, F_hist = DE_opt(
objfun = Ros_ND,
x_lb = -5.0 * numpy.ones(prob_d),
x_ub = 5.0 * numpy.ones(prob_d),
Np = int(CPV_tuple[0]),
Cr = CPV_tuple[1],
F = CPV_tuple[2],
evals = max(OFE_budgets),
printLevel=0
)
F = numpy.array(f_best_hist)
OFEs_made = int(CPV_tuple[0])*numpy.arange(1,len(X_hist)+1)
return get_F_vals_at_specified_OFE_budgets(F, OFEs_made, OFE_budgets)
def CPV_validity_checks(CPV_array, OFE_budget):
'check tuning constraints'
N, Cr, F = CPV_array
if OFE_budget < N :
return False, 'OFE_budget < N'
if N < 5:
return False, 'N < 5'
if Cr < 0 or Cr > 1 :
return False, 'Cr not in [0,1]'
if F < 0:
return False, 'F < 0'
return True, ""
#initilization bounds
CPV_lb = numpy.array([ 5, 0.0, 0.0 ])
CPV_ub = numpy.array([ 50, 1.0, 1.0 ])
OFE_budgets_to_tune_under = numpy.logspace(1,3,30).astype(int)
sampleSizes = [2,8,20]
tuningBudget = 50*1000*30 # tuning budget is equvialent of assessing 50 CPV tuples upto 1000 OFEs using 30 resampling runs each
|
hamish2014/optTune
|
examples/DE_tuning_setup.py
|
Python
|
gpl-3.0
| 1,445
|
from plone.indexer.decorator import indexer
from Products.CMFCore.interfaces import IContentish
from Products.CMFCore.utils import getToolByName
@indexer(IContentish)
def get_last_modified_user(object, **kw):
pr = getToolByName(object, "portal_repository")
history = pr.getHistoryMetadata(object)
if history:
revisions = history.getLength(countPurged=False)
vdatafull = history.retrieve(revisions-1, countPurged=False)
vdata = vdatafull['metadata']
modifier = vdata['sys_metadata']['principal']
return modifier
return None
|
esteele/Plone-Users-Group-Browser-Views-Demo
|
src/demo.testproduct/src/demo/testproduct/index.py
|
Python
|
gpl-3.0
| 582
|
"""Runs all tests."""
import os
import sys
import glob
import unittest
def main():
"""Runs the tests."""
failurestrings = []
for test in glob.glob('test_*.py'):
sys.stderr.write('\nRunning tests in {0}...\n'.format(test))
test = os.path.splitext(test)[0]
suite = unittest.TestLoader().loadTestsFromName(test)
result = unittest.TestResult()
suite.run(result)
if result.wasSuccessful():
sys.stderr.write('All tests were successful.\n')
else:
sys.stderr.write('Test(s) FAILED!\n')
for (_testcase, failstring) in result.failures + result.errors:
failurestrings.append(failstring)
if not failurestrings:
sys.stderr.write('\nTesting complete. All passed successfully.\n')
else:
sys.stderr.write('\nTesting complete. Failed on the following:\n')
for fstring in failurestrings:
sys.stderr.write('\n*********\n{0}\n********\n'.format(fstring))
if __name__ == '__main__':
main()
|
jbloomlab/phydms
|
tests/run_tests.py
|
Python
|
gpl-3.0
| 1,044
|
import operator
import re
from collections import namedtuple
# BEGIN operator flags
# operator precedence
PREC_ADD = 0
PREC_MUL = 1
PREC_MOD = 2
PREC_EXP = 3
# associativity
ASSOC_LEFT = 0
ASSOC_RIGHT = 1
#END operator flags
# grouping tokens
GROUP_OPEN_TOKENS = ('(')
GROUP_CLOSE_TOKENS = (')')
# operators dictionary
# { <operator> : (<precedence>, <associativity>, <operator>)}
OPERATORS = {
'+' : (PREC_ADD, ASSOC_LEFT, operator.add),
'-' : (PREC_ADD, ASSOC_LEFT, operator.sub),
'*' : (PREC_MUL, ASSOC_LEFT, operator.mul),
'/' : (PREC_MUL, ASSOC_LEFT, lambda x,y: operator.truediv(y,x)), # work around denominator-first popping
'%' : (PREC_MOD, ASSOC_LEFT, lambda x,y: operator.mod(y,x)), # workaround divisor-first popping
'^' : (PREC_EXP, ASSOC_LEFT, lambda x,y: operator.pow(y,x)) # workaround power-first popping
}
# evaluate an operator given its token and a tuple of arguments
def evaluate_operation(o, arg):
if is_operator(o):
return OPERATORS[o][2](*arg)
else:
raise ValueError("can't evaluate invalid operator " + o)
# operator comparison methods
def compair_precedence(o1, o2):
if is_operator(o1) and is_operator(o2):
return OPERATORS[o1][0] - OPERATORS[o2][0]
else:
raise ValueError("can't compair precedence between invalid operator " + o1 + " or " + o2)
def compair_associativity(o1, o2):
if is_operator(o1) and is_operator(o2):
return OPERATORS[o1][1] - OPERATORS[o2][1]
else:
raise ValueError("can't compair associativity between invalid operator " + o1 + " or " + o2)
# validation methods
def is_operator(o):
return o in OPERATORS
def is_group_open_token(t):
return t in GROUP_OPEN_TOKENS
def is_group_close_token(t):
return t in GROUP_CLOSE_TOKENS
def is_group_token(t):
return is_group_open_token(t) or is_group_close_token(t)
def is_numerical_token(t):
try:
return int(t)
return True
except ValueError:
try:
float(t)
return True
except ValueError:
return False
# evaluate an rpn iterator
def evaluate_rpn(rpn_iterator):
eval_stack = []
for t in rpn_iterator:
if is_operator(t):
if len(eval_stack) < 2:
if len(eval_stack) == 0:
raise ValueError("Invalid operation " + t + " on empty stack.")
else:
raise ValueError("invalid operation" + t + " on stack (" + " ".join([str(i) for i in eval_stack]) + "): evaluation stack is too short")
else:
eval_stack.append(evaluate_operation(t, (eval_stack.pop(), eval_stack.pop())))
else:
eval_stack.append(t)
return eval_stack[-1]
# generate an iterator that returns tokens in RPN order
def rpn_iterator(token_iterator):
operator_stack = []
for t in token_iterator:
if is_numerical_token(t):
try:
yield int(t)
except ValueError:
yield float(t)
elif is_group_token(t):
if is_group_open_token(t):
operator_stack.append(t)
else:
while not is_group_open_token(operator_stack[-1]):
yield operator_stack.pop()
operator_stack.pop()
elif is_operator(t):
if not operator_stack:
operator_stack.append(t)
else:
while operator_stack and is_operator(operator_stack[-1]):
if (compair_associativity(t, operator_stack[-1]) == 0 and compair_precedence(t, operator_stack[-1]) == 0) or compair_precedence(t, operator_stack[-1]) < 0:
yield operator_stack.pop()
else:
break
operator_stack.append(t)
else:
raise ValueError("invalid token " + t)
while operator_stack:
yield operator_stack.pop()
# return an iterator of parsed input
def token_iterator(expression):
tokenization_pattern = re.compile(r"""
(?:(?:(?<!\d)-)?\d+(?:\.\d+)?) # matches any positive or negative real number
| (?:\D) # matches any operators
""", re.VERBOSE)
return tokenization_pattern.finditer(expression)
def calculate(expression):
try:
return evaluate_rpn(rpn_iterator(t.group(0) for t in token_iterator(expression)))
except ValueError as E:
print(E)
|
ringulreith/tidbits
|
ShuntingYardCalculator.py
|
Python
|
gpl-3.0
| 4,491
|
import sys, os
sys.path.append(os.path.join(sys.path[0],'core','glsl', '__global__'))
import pkgutil
import inspect
__all__ = []
for loader, name, is_pkg in pkgutil.walk_packages(__path__):
module = loader.find_module(name).load_module(name)
for name, value in inspect.getmembers(module):
if name.startswith('__'):
continue
globals()[name] = value
__all__.append(name)
|
elmeunick9/UPBGE-CommunityAddon
|
project/core/glsl/__init__.py
|
Python
|
gpl-3.0
| 390
|
#!/usr/bin/env python3
'''
Export database tables to json files.
Copyrght 2000-2016 GoodCrypto
Last modified: 2016-05-30
'''
from __future__ import unicode_literals
import os, re, sys, tarfile
from traceback import format_exc
from django.conf import settings
from django.core import serializers
from django.db.models.loading import get_models
import django
django.setup()
from reinhardt import get_json_dir
from syr import text_file
from syr.log import get_log
from syr.fs import match_parent_owner
log = get_log()
def reloadable_models():
''' Returns reloadable models.
Returns a list of tuples, (app_name, model, table_name). '''
models = []
for model in get_models():
app_name = model._meta.app_label.lower()
table_name = model._meta.object_name.lower()
if app_name == 'admin' or app_name == 'contenttypes' or app_name == 'sessions':
pass
elif app_name == 'auth' and (table_name == 'message' or table_name == 'permission'):
pass
else:
models.append((app_name, model, table_name,))
log('reloadable models: {}'.format(models))
return models
def non_django_models():
''' Returns all non django models.
Returns a list of tuples, (app_name, model, table_name). '''
models = []
for model in get_models():
app_name = model._meta.app_label.lower()
table_name = model._meta.object_name.lower()
model_string = '{}'.format(model)
m = re.match("<class '(.*?)'>", model_string)
if m:
model_name = m.group(1)
else:
model_name = None
if (model_name and model_name.startswith('django')):
pass
else:
models.append((app_name, model, table_name,))
log('non-django models: {}'.format(models))
return models
def export_reloadable_models():
'''Export the models which can be reloaded to create the database.
The admin, auth.message, auth.permission, contenttypes, and sessions
models and tables cause challenges when reloaded.
The auth.group is stripped of permissions as the permissions are non importable.
'''
def strip_group_permissions():
'''Strip the permissions from the group table.
The permissions must be manually recreated when you reload
the database from scratch.'''
try:
new_lines = []
permissions_started = False
fullname = os.path.join(get_json_dir(), get_json_filename('auth', 'group'))
lines = text_file.read(fullname)
for line in lines:
if line.find('"permissions": [') >= 0:
permissions_started = True
new_lines.append(line)
elif line.find(']') >= 0:
permissions_started = False
new_lines.append(line)
elif not permissions_started:
new_lines.append(line)
text_file.write(fullname, new_lines)
log('group permissions stripped')
except:
log(format_exc())
print(format_exc())
clean_json_files()
ok = True
if export_models(reloadable_models()):
log('all models exported ok')
else:
log("one or more models didn't export")
strip_group_permissions()
return ok
def export_non_django_models():
'''Export the non-djanog models.'''
django.setup()
clean_json_files()
return export_models(non_django_models())
def clean_json_files():
'''Erase all the json files.'''
create_json_dir()
filenames = os.listdir(get_json_dir())
if filenames:
for filename in filenames:
if filename.endswith('.json'):
os.remove(os.path.join(get_json_dir(), filename))
def export_models(models):
'''Export the models to json files.'''
ok = True
if models:
for app_name, model, table_name in models:
if not export_data(model, app_name, table_name):
ok = False
log('unable to dump %r:%r' % (app_name, table_name))
else:
ok = False
log('no models defined')
return ok
def export_data(model, app_name, table_name):
ok = True
try:
create_json_dir()
data = serializers.serialize('json', model.objects.all(), indent=4)
log('exported %r from %r' % (len(model.objects.all()), table_name))
full_path = os.path.join(get_json_dir(), get_json_filename(app_name, table_name))
out = open(full_path, 'w')
out.write(data)
out.close()
match_parent_owner(full_path)
os.chmod(full_path, 0o660)
log('finished exporting %s' % table_name)
except:
ok = False
log('unable to export {} {} {}'.format(model, app_name, table_name))
return ok
def compress_files(compress_name=settings.TOP_LEVEL_DOMAIN):
ok = True
try:
'''
# a little magic knowledge to keep good crypto's databases separate
if compress_name.lower() == 'goodcrypto':
database_name = settings.DATABASES['default']['NAME']
if database_name.lower().endswith('server.sqlite'):
compress_name = '{}-server'.format(settings.TOP_LEVEL_DOMAIN)
'''
compressed_filename = os.path.join(get_json_dir(), '{}.json.tgz'.format(compress_name))
log('started to compress json files to %s' % compressed_filename)
tar = tarfile.open(compressed_filename, 'w:gz')
# compress all of the json files, without their directory structure
file_list = os.listdir(get_json_dir())
for filename in file_list:
if filename.endswith('.json'):
full_path = os.path.join(get_json_dir(), filename)
tar.add(full_path, arcname=filename, recursive=False)
tar.close()
match_parent_owner(compressed_filename)
os.chmod(compressed_filename, 0o660)
ok = True
# verify the files were compressed successfully
tar = tarfile.open(compressed_filename, 'r:gz')
for tarinfo in tar:
try:
full_path = os.path.join(get_json_dir(), tarinfo.name)
statinfo = os.stat(full_path)
if statinfo.st_size != tarinfo.size:
ok = False
log(full_path + ' size does not match tar file')
else:
os.remove(full_path)
except:
ok = False
log(format_exc())
tar.close()
log('finished compressing json files ok: %r' % ok)
if ok and file_list is not None:
# remove uncompressed files
for filename in file_list:
if filename.endswith('.json'):
try:
os.remove(os.path.join(get_json_dir(), filename))
except:
pass
log('finished deleting json files')
except:
ok = False
log(format_exc())
return ok
def get_json_filename(app_name, table_name):
return '%s.%s.json' % (app_name, table_name)
def create_json_dir():
dir = get_json_dir()
if not os.path.exists(dir):
os.mkdir(dir)
match_parent_owner(get_json_dir())
os.chmod(get_json_dir(), 0o770)
def main(arg='reloadable'):
django.setup()
if arg == 'reloadable':
ok = export_reloadable_models()
if ok:
compress_files()
elif arg == 'non-django':
ok = export_non_django_models()
return ok
if __name__ == '__main__':
if len(sys.argv) > 1:
arg = sys.argv[1]
else:
arg = 'reloadable'
ok = main(arg=arg)
code = int(not ok)
sys.exit(code)
|
goodcrypto/goodcrypto-libs
|
reinhardt/export_db_to_json.py
|
Python
|
gpl-3.0
| 7,886
|
# -*- coding: utf-8 -*-
#
# django-ponies
# Sample Django project to get started
#
# Inspired by mibou
#
# Copyright (C) 2016 euhmeuh
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Django settings for ponies project.
Generated by 'django-admin startproject' using Django 1.8.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
import os
import dj_database_url
APP_VERSION = '0.2'
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = str(os.environ.get('DJANGO_SECRET_KEY', ''))
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(os.environ.get('DJANGO_DEBUG', False))
ALLOWED_HOSTS = [
]
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'ponies.apps.main',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(PROJECT_ROOT, 'templates'),
],
'APP_DIRS': False,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'ponies.context_processors.settings'
],
'builtins': ['pyjade.ext.django.templatetags'],
'loaders': [
('pyjade.ext.django.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
))
]
},
},
]
WSGI_APPLICATION = 'ponies.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': dj_database_url.config(default=os.environ.get('DATABASE_URL'))
}
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static/')
STATIC_URL = '/static/'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
|
euhmeuh/django-ponies
|
ponies/settings.py
|
Python
|
gpl-3.0
| 4,406
|
# -*- coding: utf-8 -*-
"""
Retrieves an scanning algorithms list from the given web server, and allows the user to execute one of them
"""
2
import os
import urllib
import scanconfig
def import_URL(URL):
exec urllib.urlopen(URL).read() in globals()
menurl = scanconfig.cte_web_root+"/scan_exes"
print "Running on "+os.name
print "The menu chooser will be downloaded from "+menurl+".py"
print "The list of avaliable algorithms are here: "+menurl
import_URL(menurl+".py")
if (scan_id_to_execute != 0):
scanurl=scanconfig.cte_web_root+"/scan_exes/"+str(scan_id_to_execute)
print "The algorithm to be downloaded is "+scanurl+".py"
print "It corresponds to the one described at "+scanurl
import_URL(scanurl+".py")
|
EST-EAST/FoVScanTester
|
launcher.py
|
Python
|
gpl-3.0
| 738
|
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import copy
from functools import partial
from operator import attrgetter
from future_builtins import map
from calibre.ebooks.metadata import author_to_author_sort
from calibre.library.field_metadata import TagsIcons
from calibre.utils.config_base import tweaks
from calibre.utils.icu import sort_key
CATEGORY_SORTS = ('name', 'popularity', 'rating') # This has to be a tuple not a set
class Tag(object):
__slots__ = ('name', 'original_name', 'id', 'count', 'state', 'is_hierarchical',
'is_editable', 'is_searchable', 'id_set', 'avg_rating', 'sort',
'use_sort_as_name', 'tooltip', 'icon', 'category')
def __init__(self, name, id=None, count=0, state=0, avg=0, sort=None,
tooltip=None, icon=None, category=None, id_set=None,
is_editable=True, is_searchable=True, use_sort_as_name=False):
self.name = self.original_name = name
self.id = id
self.count = count
self.state = state
self.is_hierarchical = ''
self.is_editable = is_editable
self.is_searchable = is_searchable
self.id_set = id_set if id_set is not None else set([])
self.avg_rating = avg/2.0 if avg is not None else 0
self.sort = sort
self.use_sort_as_name = use_sort_as_name
if tooltip is None:
tooltip = '(%s:%s)'%(category, name)
if self.avg_rating > 0:
if tooltip:
tooltip = tooltip + ': '
tooltip = _('%(tt)sAverage rating is %(rating)3.1f')%dict(
tt=tooltip, rating=self.avg_rating)
self.tooltip = tooltip
self.icon = icon
self.category = category
def __unicode__(self):
return u'%s:%s:%s:%s:%s:%s'%(self.name, self.count, self.id, self.state,
self.category, self.tooltip)
def __str__(self):
return unicode(self).encode('utf-8')
def __repr__(self):
return str(self)
def find_categories(field_metadata):
for category, cat in field_metadata.iteritems():
if (cat['is_category'] and cat['kind'] not in {'user', 'search'}):
yield (category, cat['is_multiple'].get('cache_to_list', None), False)
elif (cat['datatype'] == 'composite' and
cat['display'].get('make_category', False)):
yield (category, cat['is_multiple'].get('cache_to_list', None), True)
def create_tag_class(category, fm, icon_map):
cat = fm[category]
dt = cat['datatype']
icon = None
label = fm.key_to_label(category)
if icon_map:
if not fm.is_custom_field(category):
if category in icon_map:
icon = icon_map[label]
else:
icon = icon_map['custom:']
icon_map[category] = icon
is_editable = category not in {'news', 'rating', 'languages', 'formats',
'identifiers'} and dt != 'composite'
if (tweaks['categories_use_field_for_author_name'] == 'author_sort' and
(category == 'authors' or
(cat['display'].get('is_names', False) and
cat['is_custom'] and cat['is_multiple'] and
dt == 'text'))):
use_sort_as_name = True
else:
use_sort_as_name = False
return partial(Tag, use_sort_as_name=use_sort_as_name, icon=icon,
is_editable=is_editable, category=category)
def clean_user_categories(dbcache):
user_cats = dbcache.pref('user_categories', {})
new_cats = {}
for k in user_cats:
comps = [c.strip() for c in k.split('.') if c.strip()]
if len(comps) == 0:
i = 1
while True:
if unicode(i) not in user_cats:
new_cats[unicode(i)] = user_cats[k]
break
i += 1
else:
new_cats['.'.join(comps)] = user_cats[k]
try:
if new_cats != user_cats:
dbcache.set_pref('user_categories', new_cats)
except:
pass
return new_cats
def sort_categories(items, sort):
reverse = True
if sort == 'popularity':
key=attrgetter('count')
elif sort == 'rating':
key=attrgetter('avg_rating')
else:
key=lambda x:sort_key(x.sort or x.name)
reverse=False
items.sort(key=key, reverse=reverse)
return items
def get_categories(dbcache, sort='name', book_ids=None, icon_map=None):
if icon_map is not None and type(icon_map) != TagsIcons:
raise TypeError('icon_map passed to get_categories must be of type TagIcons')
if sort not in CATEGORY_SORTS:
raise ValueError('sort ' + sort + ' not a valid value')
fm = dbcache.field_metadata
book_rating_map = dbcache.fields['rating'].book_value_map
lang_map = dbcache.fields['languages'].book_value_map
categories = {}
book_ids = frozenset(book_ids) if book_ids else book_ids
pm_cache = {}
def get_metadata(book_id):
ans = pm_cache.get(book_id)
if ans is None:
ans = pm_cache[book_id] = dbcache._get_proxy_metadata(book_id)
return ans
bids = None
for category, is_multiple, is_composite in find_categories(fm):
tag_class = create_tag_class(category, fm, icon_map)
if is_composite:
if bids is None:
bids = dbcache._all_book_ids() if book_ids is None else book_ids
cats = dbcache.fields[category].get_composite_categories(
tag_class, book_rating_map, bids, is_multiple, get_metadata)
elif category == 'news':
cats = dbcache.fields['tags'].get_news_category(tag_class, book_ids)
else:
cat = fm[category]
brm = book_rating_map
if cat['datatype'] == 'rating' and category != 'rating':
brm = dbcache.fields[category].book_value_map
cats = dbcache.fields[category].get_categories(
tag_class, brm, lang_map, book_ids)
if (category != 'authors' and cat['datatype'] == 'text' and
cat['is_multiple'] and cat['display'].get('is_names', False)):
for item in cats:
item.sort = author_to_author_sort(item.sort)
sort_categories(cats, sort)
categories[category] = cats
# Needed for legacy databases that have multiple ratings that
# map to n stars
for r in categories['rating']:
for x in tuple(categories['rating']):
if r.name == x.name and r.id != x.id:
r.id_set |= x.id_set
r.count = r.count + x.count
categories['rating'].remove(x)
break
# User categories
user_categories = clean_user_categories(dbcache).copy()
if user_categories:
# We want to use same node in the user category as in the source
# category. To do that, we need to find the original Tag node. There is
# a time/space tradeoff here. By converting the tags into a map, we can
# do the verification in the category loop much faster, at the cost of
# temporarily duplicating the categories lists.
taglist = {}
for c, items in categories.iteritems():
taglist[c] = dict(map(lambda t:(icu_lower(t.name), t), items))
muc = dbcache.pref('grouped_search_make_user_categories', [])
gst = dbcache.pref('grouped_search_terms', {})
for c in gst:
if c not in muc:
continue
user_categories[c] = []
for sc in gst[c]:
if sc in categories.keys():
for t in categories[sc]:
user_categories[c].append([t.name, sc, 0])
gst_icon = icon_map['gst'] if icon_map else None
for user_cat in sorted(user_categories.iterkeys(), key=sort_key):
items = []
names_seen = {}
for name, label, ign in user_categories[user_cat]:
n = icu_lower(name)
if label in taglist and n in taglist[label]:
if user_cat in gst:
# for gst items, make copy and consolidate the tags by name.
if n in names_seen:
t = names_seen[n]
t.id_set |= taglist[label][n].id_set
t.count += taglist[label][n].count
t.tooltip = t.tooltip.replace(')', ', ' + label + ')')
else:
t = copy.copy(taglist[label][n])
t.icon = gst_icon
names_seen[t.name] = t
items.append(t)
else:
items.append(taglist[label][n])
# else: do nothing, to not include nodes w zero counts
cat_name = '@' + user_cat # add the '@' to avoid name collision
# Not a problem if we accumulate entries in the icon map
if icon_map is not None:
icon_map[cat_name] = icon_map['user:']
categories[cat_name] = sort_categories(items, sort)
# ### Finally, the saved searches category ####
items = []
icon = None
if icon_map and 'search' in icon_map:
icon = icon_map['search']
queries = dbcache._search_api.saved_searches.queries
for srch in sorted(queries, key=sort_key):
items.append(Tag(srch, tooltip=queries[srch], sort=srch, icon=icon,
category='search', is_editable=False))
if len(items):
categories['search'] = items
return categories
|
sharad/calibre
|
src/calibre/db/categories.py
|
Python
|
gpl-3.0
| 9,987
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2009-2021 Tom Keffer <tkeffer@gmail.com>
#
# See the file LICENSE.txt for your full rights.
#
"""Classes and functions for interfacing with a Davis VantagePro, VantagePro2,
or VantageVue weather station"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import logging
import struct
import sys
import time
import six
from six import int2byte, indexbytes, byte2int
from six.moves import map
from six.moves import zip
import weeutil.weeutil
import weewx.drivers
import weewx.engine
import weewx.units
from weeutil.weeutil import to_int, to_sorted_string
from weewx.crc16 import crc16
log = logging.getLogger(__name__)
DRIVER_NAME = 'Vantage'
DRIVER_VERSION = '3.2.3'
def loader(config_dict, engine):
return VantageService(engine, config_dict)
def configurator_loader(config_dict): # @UnusedVariable
return VantageConfigurator()
def confeditor_loader():
return VantageConfEditor()
# A few handy constants:
_ack = b'\x06'
_resend = b'\x15' # NB: The Davis documentation gives this code as 0x21, but it's actually decimal 21
#===============================================================================
# class BaseWrapper
#===============================================================================
class BaseWrapper(object):
"""Base class for (Serial|Ethernet)Wrapper"""
def __init__(self, wait_before_retry, command_delay):
self.wait_before_retry = wait_before_retry
self.command_delay = command_delay
def read(self, nbytes=1):
raise NotImplementedError
def write(self, buf):
raise NotImplementedError
def flush_input(self):
raise NotImplementedError
#===============================================================================
# Primitives for working with the Davis Console
#===============================================================================
def wakeup_console(self, max_tries=3):
"""Wake up a Davis Vantage console.
This call has three purposes:
1. Wake up a sleeping console;
2. Cancel pending LOOP data (if any);
3. Flush the input buffer
Note: a flushed buffer is important before sending a command; we want to make sure
the next received character is the expected ACK.
If unsuccessful, an exception of type weewx.WakeupError is thrown"""
for count in range(max_tries):
try:
# Wake up console and cancel pending LOOP data.
# First try a gentle wake up
self.write(b'\n')
_resp = self.read(2)
if _resp == b'\n\r': # LF, CR = 0x0a, 0x0d
# We're done; the console accepted our cancel LOOP command; nothing to flush
log.debug("Gentle wake up of console successful")
return
# That didn't work. Try a rude wake up.
# Flush any pending LOOP packets
self.flush_input()
# Look for the acknowledgment of the sent '\n'
_resp = self.read(2)
if _resp == b'\n\r':
log.debug("Rude wake up of console successful")
return
except weewx.WeeWxIOError:
pass
log.debug("Retry #%d failed", count)
print("Unable to wake up console... sleeping")
time.sleep(self.wait_before_retry)
print("Unable to wake up console... retrying")
log.error("Unable to wake up console")
raise weewx.WakeupError("Unable to wake up Vantage console")
def send_data(self, data):
"""Send data to the Davis console, waiting for an acknowledging <ACK>
If the <ACK> is not received, no retry is attempted. Instead, an exception
of type weewx.WeeWxIOError is raised
data: The data to send, as a byte string"""
self.write(data)
# Look for the acknowledging ACK character
_resp = self.read()
if _resp != _ack:
log.error("No <ACK> received from console")
raise weewx.WeeWxIOError("No <ACK> received from Vantage console")
def send_data_with_crc16(self, data, max_tries=3):
"""Send data to the Davis console along with a CRC check, waiting for an acknowledging <ack>.
If none received, resend up to max_tries times.
data: The data to send, as a byte string"""
# Calculate the crc for the data:
_crc = crc16(data)
# ...and pack that on to the end of the data in big-endian order:
_data_with_crc = data + struct.pack(">H", _crc)
# Retry up to max_tries times:
for count in range(max_tries):
try:
self.write(_data_with_crc)
# Look for the acknowledgment.
_resp = self.read()
if _resp == _ack:
return
except weewx.WeeWxIOError:
pass
log.debug("send_data_with_crc16; try #%d", count + 1)
log.error("Unable to pass CRC16 check while sending data")
raise weewx.CRCError("Unable to pass CRC16 check while sending data to Vantage console")
def send_command(self, command, max_tries=3):
"""Send a command to the console, then look for the byte string 'OK' in the response.
Any response from the console is split on \n\r characters and returned as a list."""
for count in range(max_tries):
try:
self.wakeup_console(max_tries=max_tries)
self.write(command)
# Takes some time for the Vantage to react and fill up the buffer. Sleep for a bit:
time.sleep(self.command_delay)
# Can't use function serial.readline() because the VP responds with \n\r, not just \n.
# So, instead find how many bytes are waiting and fetch them all
nc = self.queued_bytes()
_buffer = self.read(nc)
# Split the buffer on the newlines
_buffer_list = _buffer.strip().split(b'\n\r')
# The first member should be the 'OK' in the VP response
if _buffer_list[0] == b'OK':
# Return the rest:
return _buffer_list[1:]
except weewx.WeeWxIOError:
# Caught an error. Keep trying...
pass
log.debug("send_command; try #%d failed", count + 1)
log.error("Max retries exceeded while sending command %s", command)
raise weewx.RetriesExceeded("Max retries exceeded while sending command %s" % command)
def get_data_with_crc16(self, nbytes, prompt=None, max_tries=3):
"""Get a packet of data and do a CRC16 check on it, asking for retransmit if necessary.
It is guaranteed that the length of the returned data will be of the requested length.
An exception of type CRCError will be thrown if the data cannot pass the CRC test
in the requested number of retries.
nbytes: The number of bytes (including the 2 byte CRC) to get.
prompt: Any string to be sent before requesting the data. Default=None
max_tries: Number of tries before giving up. Default=3
returns: the packet data as a byte string. The last 2 bytes will be the CRC"""
if prompt:
self.write(prompt)
first_time = True
_buffer = b''
for count in range(max_tries):
try:
if not first_time:
self.write(_resend)
_buffer = self.read(nbytes)
if crc16(_buffer) == 0:
return _buffer
log.debug("Get_data_with_crc16; try #%d failed. CRC error", count + 1)
except weewx.WeeWxIOError as e:
log.debug("Get_data_with_crc16; try #%d failed: %s", count + 1, e)
first_time = False
if _buffer:
log.error("Unable to pass CRC16 check while getting data")
raise weewx.CRCError("Unable to pass CRC16 check while getting data")
else:
log.debug("Timeout in get_data_with_crc16")
raise weewx.WeeWxIOError("Timeout in get_data_with_crc16")
#===============================================================================
# class Serial Wrapper
#===============================================================================
def guard_termios(fn):
"""Decorator function that converts termios exceptions into weewx exceptions."""
# Some functions in the module 'serial' can raise undocumented termios
# exceptions. This catches them and converts them to weewx exceptions.
try:
import termios
def guarded_fn(*args, **kwargs):
try:
return fn(*args, **kwargs)
except termios.error as e:
raise weewx.WeeWxIOError(e)
except ImportError:
def guarded_fn(*args, **kwargs):
return fn(*args, **kwargs)
return guarded_fn
class SerialWrapper(BaseWrapper):
"""Wraps a serial connection returned from package serial"""
def __init__(self, port, baudrate, timeout, wait_before_retry, command_delay):
super(SerialWrapper, self).__init__(wait_before_retry=wait_before_retry,
command_delay=command_delay)
self.port = port
self.baudrate = baudrate
self.timeout = timeout
@guard_termios
def flush_input(self):
self.serial_port.flushInput()
@guard_termios
def flush_output(self):
self.serial_port.flushOutput()
@guard_termios
def queued_bytes(self):
return self.serial_port.inWaiting()
def read(self, chars=1):
import serial
try:
_buffer = self.serial_port.read(chars)
except serial.serialutil.SerialException as e:
log.error("SerialException on read.")
log.error(" **** %s", e)
log.error(" **** Is there a competing process running??")
# Reraise as a Weewx error I/O error:
raise weewx.WeeWxIOError(e)
N = len(_buffer)
if N != chars:
raise weewx.WeeWxIOError("Expected to read %d chars; got %d instead" % (chars, N))
return _buffer
def write(self, data):
import serial
try:
N = self.serial_port.write(data)
except serial.serialutil.SerialException as e:
log.error("SerialException on write.")
log.error(" **** %s", e)
# Reraise as a Weewx error I/O error:
raise weewx.WeeWxIOError(e)
# Python version 2.5 and earlier returns 'None', so it cannot be used to test for completion.
if N is not None and N != len(data):
raise weewx.WeeWxIOError("Expected to write %d chars; sent %d instead" % (len(data), N))
def openPort(self):
import serial
# Open up the port and store it
self.serial_port = serial.Serial(self.port, self.baudrate, timeout=self.timeout)
log.debug("Opened up serial port %s; baud %d; timeout %.2f", self.port, self.baudrate, self.timeout)
def closePort(self):
try:
# This will cancel any pending loop:
self.write(b'\n')
except:
pass
self.serial_port.close()
#===============================================================================
# class EthernetWrapper
#===============================================================================
class EthernetWrapper(BaseWrapper):
"""Wrap a socket"""
def __init__(self, host, port, timeout, tcp_send_delay, wait_before_retry, command_delay):
super(EthernetWrapper, self).__init__(wait_before_retry=wait_before_retry,
command_delay=command_delay)
self.host = host
self.port = port
self.timeout = timeout
self.tcp_send_delay = tcp_send_delay
def openPort(self):
import socket
try:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.settimeout(self.timeout)
self.socket.connect((self.host, self.port))
except (socket.error, socket.timeout, socket.herror) as ex:
log.error("Socket error while opening port %d to ethernet host %s.", self.port, self.host)
# Reraise as a weewx I/O error:
raise weewx.WeeWxIOError(ex)
except:
log.error("Unable to connect to ethernet host %s on port %d.", self.host, self.port)
raise
log.debug("Opened up ethernet host %s on port %d. timeout=%s, tcp_send_delay=%s",
self.host, self.port, self.timeout, self.tcp_send_delay)
def closePort(self):
import socket
try:
# This will cancel any pending loop:
self.write(b'\n')
except:
pass
self.socket.shutdown(socket.SHUT_RDWR)
self.socket.close()
def flush_input(self):
"""Flush the input buffer from WeatherLinkIP"""
import socket
try:
# This is a bit of a hack, but there is no analogue to pyserial's flushInput()
# Set socket timeout to 0 to get immediate result
self.socket.settimeout(0)
self.socket.recv(4096)
except (socket.timeout, socket.error):
pass
finally:
# set socket timeout back to original value
self.socket.settimeout(self.timeout)
def flush_output(self):
"""Flush the output buffer to WeatherLinkIP
This function does nothing as there should never be anything left in
the buffer when using socket.sendall()"""
pass
def queued_bytes(self):
"""Determine how many bytes are in the buffer"""
import socket
length = 0
try:
self.socket.settimeout(0)
length = len(self.socket.recv(8192, socket.MSG_PEEK))
except socket.error:
pass
finally:
self.socket.settimeout(self.timeout)
return length
def read(self, chars=1):
"""Read bytes from WeatherLinkIP"""
import socket
_buffer = b''
_remaining = chars
while _remaining:
_N = min(4096, _remaining)
try:
_recv = self.socket.recv(_N)
except (socket.timeout, socket.error) as ex:
log.error("ip-read error: %s", ex)
# Reraise as a weewx I/O error:
raise weewx.WeeWxIOError(ex)
_nread = len(_recv)
if _nread == 0:
raise weewx.WeeWxIOError("Expected %d characters; got zero instead" % (_N,))
_buffer += _recv
_remaining -= _nread
return _buffer
def write(self, data):
"""Write to a WeatherLinkIP"""
import socket
try:
self.socket.sendall(data)
# A delay of 0.0 gives socket write error; 0.01 gives no ack error; 0.05 is OK for weewx program
# Note: a delay of 0.5 s is required for wee_device --logger=logger_info
time.sleep(self.tcp_send_delay)
except (socket.timeout, socket.error) as ex:
log.error("ip-write error: %s", ex)
# Reraise as a weewx I/O error:
raise weewx.WeeWxIOError(ex)
#===============================================================================
# class Vantage
#===============================================================================
class Vantage(weewx.drivers.AbstractDevice):
"""Class that represents a connection to a Davis Vantage console.
The connection to the console will be open after initialization"""
# Various codes used internally by the VP2:
barometer_unit_dict = {0:'inHg', 1:'mmHg', 2:'hPa', 3:'mbar'}
temperature_unit_dict = {0:'degree_F', 1:'degree_10F', 2:'degree_C', 3:'degree_10C'}
altitude_unit_dict = {0:'foot', 1:'meter'}
rain_unit_dict = {0:'inch', 1:'mm'}
wind_unit_dict = {0:'mile_per_hour', 1:'meter_per_second', 2:'km_per_hour', 3:'knot'}
wind_cup_dict = {0:'small', 1:'large'}
rain_bucket_dict = {0:'0.01 inches', 1:'0.2 mm', 2:'0.1 mm'}
transmitter_type_dict = {0:'iss', 1:'temp', 2:'hum', 3:'temp_hum', 4:'wind',
5:'rain', 6:'leaf', 7:'soil', 8:'leaf_soil',
9:'sensorlink', 10:'none'}
repeater_dict = {0:'none', 1:'A', 2:'B', 3:'C', 4:'D',
5:'E', 6:'F', 7:'G', 8:'H'}
listen_dict = {0:'inactive', 1:'active'}
def __init__(self, **vp_dict):
"""Initialize an object of type Vantage.
NAMED ARGUMENTS:
connection_type: The type of connection (serial|ethernet) [Required]
port: The serial port of the VP. [Required if serial/USB
communication]
host: The Vantage network host [Required if Ethernet communication]
baudrate: Baudrate of the port. [Optional. Default 19200]
tcp_port: TCP port to connect to [Optional. Default 22222]
tcp_send_delay: Block after sending data to WeatherLinkIP to allow it
to process the command [Optional. Default is 0.5]
timeout: How long to wait before giving up on a response from the
serial port. [Optional. Default is 4]
wait_before_retry: How long to wait before retrying. [Optional.
Default is 1.2 seconds]
command_delay: How long to wait after sending a command before looking
for acknowledgement. [Optional. Default is 0.5 seconds]
max_tries: How many times to try again before giving up. [Optional.
Default is 4]
iss_id: The station number of the ISS [Optional. Default is 1]
model_type: Vantage Pro model type. 1=Vantage Pro; 2=Vantage Pro2
[Optional. Default is 2]
loop_request: Requested packet type. 1=LOOP; 2=LOOP2; 3=both.
"""
log.debug('Driver version is %s', DRIVER_VERSION)
self.hardware_type = None
# These come from the configuration dictionary:
self.max_tries = to_int(vp_dict.get('max_tries', 4))
self.iss_id = to_int(vp_dict.get('iss_id'))
self.model_type = to_int(vp_dict.get('model_type', 2))
if self.model_type not in list(range(1, 3)):
raise weewx.UnsupportedFeature("Unknown model_type (%d)" % self.model_type)
self.loop_request = to_int(vp_dict.get('loop_request', 1))
log.debug("Option loop_request=%d", self.loop_request)
self.save_day_rain = None
self.max_dst_jump = 7200
# Get an appropriate port, depending on the connection type:
self.port = Vantage._port_factory(vp_dict)
# Open it up:
self.port.openPort()
# Read the EEPROM and fill in properties in this instance
self._setup()
log.debug("Hardware name: %s", self.hardware_name)
def openPort(self):
"""Open up the connection to the console"""
self.port.openPort()
def closePort(self):
"""Close the connection to the console. """
self.port.closePort()
def genLoopPackets(self):
"""Generator function that returns loop packets"""
while True:
# Get LOOP packets in big batches This is necessary because there is
# an undocumented limit to how many LOOP records you can request
# on the VP (somewhere around 220).
for _loop_packet in self.genDavisLoopPackets(200):
yield _loop_packet
def genDavisLoopPackets(self, N=1):
"""Generator function to return N loop packets from a Vantage console
N: The number of packets to generate [default is 1]
yields: up to N loop packets (could be less in the event of a
read or CRC error).
"""
log.debug("Requesting %d LOOP packets.", N)
self.port.wakeup_console(self.max_tries)
if self.loop_request == 1:
# If asking for old-fashioned LOOP1 data, send the older command in case the
# station does not support the LPS command:
self.port.send_data(b"LOOP %d\n" % N)
else:
# Request N packets of type "loop_request":
self.port.send_data(b"LPS %d %d\n" % (self.loop_request, N))
for loop in range(N):
for count in range(self.max_tries):
try:
loop_packet = self._get_packet()
except weewx.WeeWxIOError as e:
log.error("LOOP try #%d; error: %s", count + 1, e)
else:
yield loop_packet
break
else:
log.error("LOOP max tries (%d) exceeded.", self.max_tries)
raise weewx.RetriesExceeded("Max tries exceeded while getting LOOP data.")
def _get_packet(self):
"""Get a single LOOP packet"""
# Fetch a packet...
_buffer = self.port.read(99)
# ... see if it passes the CRC test ...
crc = crc16(_buffer)
if crc:
if weewx.debug > 1:
log.error("LOOP buffer failed CRC check. Calculated CRC=%d" % crc)
if six.PY2:
log.error("Buffer: " + "".join("\\x%02x" % ord(c) for c in _buffer))
else:
log.error("Buffer: %s", _buffer)
raise weewx.CRCError("LOOP buffer failed CRC check")
# ... decode it ...
loop_packet = self._unpackLoopPacket(_buffer[:95])
# .. then return it
return loop_packet
def genArchiveRecords(self, since_ts):
"""A generator function to return archive packets from a Davis Vantage station.
since_ts: A timestamp. All data since (but not including) this time will be returned.
Pass in None for all data
yields: a sequence of dictionaries containing the data
"""
count = 0
while count < self.max_tries:
try:
for _record in self.genDavisArchiveRecords(since_ts):
# Successfully retrieved record. Set count back to zero.
count = 0
since_ts = _record['dateTime']
yield _record
# The generator loop exited. We're done.
return
except weewx.WeeWxIOError as e:
# Problem. Increment retry count
count += 1
log.error("DMPAFT try #%d; error: %s", count, e)
log.error("DMPAFT max tries (%d) exceeded.", self.max_tries)
raise weewx.RetriesExceeded("Max tries exceeded while getting archive data.")
def genDavisArchiveRecords(self, since_ts):
"""A generator function to return archive records from a Davis Vantage station.
This version does not catch any exceptions."""
if since_ts:
since_tt = time.localtime(since_ts)
# NB: note that some of the Davis documentation gives the year offset as 1900.
# From experimentation, 2000 seems to be right, at least for the newer models:
_vantageDateStamp = since_tt[2] + (since_tt[1] << 5) + ((since_tt[0] - 2000) << 9)
_vantageTimeStamp = since_tt[3] * 100 + since_tt[4]
log.debug('Getting archive packets since %s', weeutil.weeutil.timestamp_to_string(since_ts))
else:
_vantageDateStamp = _vantageTimeStamp = 0
log.debug('Getting all archive packets')
# Pack the date and time into a string, little-endian order
_datestr = struct.pack("<HH", _vantageDateStamp, _vantageTimeStamp)
# Save the last good time:
_last_good_ts = since_ts if since_ts else 0
# Get the starting page and index. First, wake up the console...
self.port.wakeup_console(self.max_tries)
# ... request a dump...
self.port.send_data(b'DMPAFT\n')
# ... from the designated date (allow only one try because that's all the console allows):
self.port.send_data_with_crc16(_datestr, max_tries=1)
# Get the response with how many pages and starting index and decode it. Again, allow only one try:
_buffer = self.port.get_data_with_crc16(6, max_tries=1)
(_npages, _start_index) = struct.unpack("<HH", _buffer[:4])
log.debug("Retrieving %d page(s); starting index= %d", _npages, _start_index)
# Cycle through the pages...
for ipage in range(_npages):
# ... get a page of archive data
_page = self.port.get_data_with_crc16(267, prompt=_ack, max_tries=1)
# Now extract each record from the page
for _index in range(_start_index, 5):
# Get the record string buffer for this index:
_record_string = _page[1 + 52 * _index:53 + 52 * _index]
# If the console has been recently initialized, there will
# be unused records, which are filled with 0xff. Detect this
# by looking at the first 4 bytes (the date and time):
if _record_string[0:4] == 4 * b'\xff' or _record_string[0:4] == 4 * b'\x00':
# This record has never been used. We're done.
log.debug("Empty record page %d; index %d", ipage, _index)
return
# Unpack the archive packet from the string buffer:
_record = self._unpackArchivePacket(_record_string)
# Check to see if the time stamps are declining, which would
# signal that we are done.
if _record['dateTime'] is None or _record['dateTime'] <= _last_good_ts - self.max_dst_jump:
# The time stamp is declining. We're done.
log.debug("DMPAFT complete: page timestamp %s less than final timestamp %s",
weeutil.weeutil.timestamp_to_string(_record['dateTime']),
weeutil.weeutil.timestamp_to_string(_last_good_ts))
log.debug("Catch up complete.")
return
# Set the last time to the current time, and yield the packet
_last_good_ts = _record['dateTime']
yield _record
# The starting index for pages other than the first is always zero
_start_index = 0
def genArchiveDump(self, progress_fn=None):
"""
A generator function to return all archive packets in the memory of a Davis Vantage station.
Args:
progress_fn: A function that will be called before every page request. It should have
a single argument: the page number. If set to None, no progress will be reported.
Yields: a sequence of dictionaries containing the data
"""
import weewx.wxformulas
# Wake up the console...
self.port.wakeup_console(self.max_tries)
# ... request a dump...
self.port.send_data(b'DMP\n')
log.debug("Dumping all records.")
# Cycle through the pages...
for ipage in range(512):
# If requested, provide users with some feedback:
if progress_fn:
progress_fn(ipage)
# ... get a page of archive data
_page = self.port.get_data_with_crc16(267, prompt=_ack, max_tries=self.max_tries)
# Now extract each record from the page
for _index in range(5):
# Get the record string buffer for this index:
_record_string = _page[1 + 52 * _index:53 + 52 * _index]
# If the console has been recently initialized, there will
# be unused records, which are filled with 0xff. Detect this
# by looking at the first 4 bytes (the date and time):
if _record_string[0:4] == 4 * b'\xff' or _record_string[0:4] == 4 * b'\x00':
# This record has never been used. Skip it
log.debug("Empty record page %d; index %d", ipage, _index)
continue
# Unpack the raw archive packet:
_record = self._unpackArchivePacket(_record_string)
# Because the dump command does not go through the normal weewx
# engine pipeline, we have to add these important software derived
# variables here.
try:
T = _record['outTemp']
R = _record['outHumidity']
W = _record['windSpeed']
_record['dewpoint'] = weewx.wxformulas.dewpointF(T, R)
_record['heatindex'] = weewx.wxformulas.heatindexF(T, R)
_record['windchill'] = weewx.wxformulas.windchillF(T, W)
except KeyError:
pass
yield _record
def genLoggerSummary(self):
"""A generator function to return a summary of each page in the logger.
yields: A 8-way tuple containing (page, index, year, month, day, hour, minute, timestamp)
"""
# Wake up the console...
self.port.wakeup_console(self.max_tries)
# ... request a dump...
self.port.send_data(b'DMP\n')
log.debug("Starting logger summary.")
# Cycle through the pages...
for _ipage in range(512):
# ... get a page of archive data
_page = self.port.get_data_with_crc16(267, prompt=_ack, max_tries=self.max_tries)
# Now extract each record from the page
for _index in range(5):
# Get the record string buffer for this index:
_record_string = _page[1 + 52 * _index:53 + 52 * _index]
# If the console has been recently initialized, there will
# be unused records, which are filled with 0xff. Detect this
# by looking at the first 4 bytes (the date and time):
if _record_string[0:4] == 4 * b'\xff' or _record_string[0:4] == 4 * b'\x00':
# This record has never been used.
y = mo = d = h = mn = time_ts = None
else:
# Extract the date and time from the raw buffer:
datestamp, timestamp = struct.unpack("<HH", _record_string[0:4])
time_ts = _archive_datetime(datestamp, timestamp)
y = (0xfe00 & datestamp) >> 9 # year
mo = (0x01e0 & datestamp) >> 5 # month
d = (0x001f & datestamp) # day
h = timestamp // 100 # hour
mn = timestamp % 100 # minute
yield (_ipage, _index, y, mo, d, h, mn, time_ts)
log.debug("Vantage: Finished logger summary.")
def getTime(self):
"""Get the current time from the console, returning it as timestamp"""
time_dt = self.getConsoleTime()
return time.mktime(time_dt.timetuple())
def getConsoleTime(self):
"""Return the raw time on the console, uncorrected for DST or timezone."""
# Try up to max_tries times:
for unused_count in range(self.max_tries):
try:
# Wake up the console...
self.port.wakeup_console(max_tries=self.max_tries)
# ... request the time...
self.port.send_data(b'GETTIME\n')
# ... get the binary data. No prompt, only one try:
_buffer = self.port.get_data_with_crc16(8, max_tries=1)
(sec, minute, hr, day, mon, yr, unused_crc) = struct.unpack("<bbbbbbH", _buffer)
return datetime.datetime(yr + 1900, mon, day, hr, minute, sec)
except weewx.WeeWxIOError:
# Caught an error. Keep retrying...
continue
log.error("Max retries exceeded while getting time")
raise weewx.RetriesExceeded("While getting console time")
def setTime(self):
"""Set the clock on the Davis Vantage console"""
for unused_count in range(self.max_tries):
try:
# Wake the console and begin the setTime command
self.port.wakeup_console(max_tries=self.max_tries)
self.port.send_data(b'SETTIME\n')
# Unfortunately, clock resolution is only 1 second, and transmission takes a
# little while to complete, so round up the clock up. 0.5 for clock resolution
# and 0.25 for transmission delay
newtime_tt = time.localtime(int(time.time() + 0.75))
# The Davis expects the time in reversed order, and the year is since 1900
_buffer = struct.pack("<bbbbbb", newtime_tt[5], newtime_tt[4], newtime_tt[3], newtime_tt[2],
newtime_tt[1], newtime_tt[0] - 1900)
# Complete the setTime command
self.port.send_data_with_crc16(_buffer, max_tries=1)
log.info("Clock set to %s", weeutil.weeutil.timestamp_to_string(time.mktime(newtime_tt)))
return
except weewx.WeeWxIOError:
# Caught an error. Keep retrying...
continue
log.error("Max retries exceeded while setting time")
raise weewx.RetriesExceeded("While setting console time")
def setDST(self, dst='auto'):
"""Turn DST on or off, or set it to auto.
dst: One of 'auto', 'on' or 'off' """
_dst = dst.strip().lower()
if _dst not in ['auto', 'on', 'off']:
raise weewx.ViolatedPrecondition("Invalid DST setting %s" % dst)
# Set flag whether DST is auto or manual:
man_auto = 0 if _dst == 'auto' else 1
self.port.send_data(b"EEBWR 12 01\n")
self.port.send_data_with_crc16(int2byte(man_auto))
# If DST is manual, set it on or off:
if _dst in ['on', 'off']:
on_off = 0 if _dst == 'off' else 1
self.port.send_data(b"EEBWR 13 01\n")
self.port.send_data_with_crc16(int2byte(on_off))
def setTZcode(self, code):
"""Set the console's time zone code. See the Davis Vantage manual for the table
of preset time zones."""
if code < 0 or code > 46:
raise weewx.ViolatedPrecondition("Invalid time zone code %d" % code)
# Set the GMT_OR_ZONE byte to use TIME_ZONE value
self.port.send_data(b"EEBWR 16 01\n")
self.port.send_data_with_crc16(int2byte(0))
# Set the TIME_ZONE value
self.port.send_data(b"EEBWR 11 01\n")
self.port.send_data_with_crc16(int2byte(code))
def setTZoffset(self, offset):
"""Set the console's time zone to a custom offset.
offset: Offset. This is an integer in hundredths of hours. E.g., -175 would be 1h45m negative offset."""
# Set the GMT_OR_ZONE byte to use GMT_OFFSET value
self.port.send_data(b"EEBWR 16 01\n")
self.port.send_data_with_crc16(int2byte(1))
# Set the GMT_OFFSET value
self.port.send_data(b"EEBWR 14 02\n")
self.port.send_data_with_crc16(struct.pack("<h", offset))
def setWindCupType(self, new_wind_cup_code):
"""Set the wind cup type.
new_windCup_code: The new wind cup type. Must be one of 0 or 1
"""
if new_wind_cup_code not in (0, 1):
raise weewx.ViolatedPrecondition("Invalid wind cup code %d" % new_wind_cup_code)
old_setup_bits = self._getEEPROM_value(0x2B)[0]
new_setup_bits = (old_setup_bits & 0xF7) | (new_wind_cup_code << 3)
# Tell the console to put one byte in hex location 0x2B
self.port.send_data(b"EEBWR 2B 01\n")
# Follow it up with the data:
self.port.send_data_with_crc16(int2byte(new_setup_bits), max_tries=1)
# Then call NEWSETUP to get it to stick:
self.port.send_data(b"NEWSETUP\n")
self._setup()
log.info("Wind cup type set to %d (%s)", self.wind_cup_type, self.wind_cup_size)
def setBucketType(self, new_bucket_code):
"""Set the rain bucket type.
new_bucket_code: The new bucket type. Must be one of 0, 1, or 2
"""
if new_bucket_code not in (0, 1, 2):
raise weewx.ViolatedPrecondition("Invalid bucket code %d" % new_bucket_code)
old_setup_bits = self._getEEPROM_value(0x2B)[0]
new_setup_bits = (old_setup_bits & 0xCF) | (new_bucket_code << 4)
# Tell the console to put one byte in hex location 0x2B
self.port.send_data(b"EEBWR 2B 01\n")
# Follow it up with the data:
self.port.send_data_with_crc16(int2byte(new_setup_bits), max_tries=1)
# Then call NEWSETUP to get it to stick:
self.port.send_data(b"NEWSETUP\n")
self._setup()
log.info("Rain bucket type set to %d (%s)", self.rain_bucket_type, self.rain_bucket_size)
def setRainYearStart(self, new_rain_year_start):
"""Set the start of the rain season.
new_rain_year_start: Must be in the closed range 1...12
"""
if not 1 <= new_rain_year_start <= 12:
raise weewx.ViolatedPrecondition("Invalid rain season start %d" % (new_rain_year_start,))
# Tell the console to put one byte in hex location 0x2C
self.port.send_data(b"EEBWR 2C 01\n")
# Follow it up with the data:
self.port.send_data_with_crc16(int2byte(new_rain_year_start), max_tries=1)
self._setup()
log.info("Rain year start set to %d", self.rain_year_start)
def setBarData(self, new_barometer_inHg, new_altitude_foot):
"""Set the internal barometer calibration and altitude settings in the console.
new_barometer_inHg: The local, reference barometric pressure in inHg.
new_altitude_foot: The new altitude in feet."""
new_barometer = int(new_barometer_inHg * 1000.0)
new_altitude = int(new_altitude_foot)
command = b"BAR=%d %d\n" % (new_barometer, new_altitude)
self.port.send_command(command)
self._setup()
log.info("Set barometer calibration.")
def setLatitude(self, latitude_dg):
"""Set the stations latitude.
latitude_dg: Must be in the closed range -90.0...90.0
"""
latitude = int(round((latitude_dg * 10), 0))
if not -900 <= latitude <= 900:
raise weewx.ViolatedPrecondition("Invalid latitude %.1f degree" % (latitude_dg,))
# Tell the console to put one byte in hex location 0x0B
self.port.send_data(b"EEBWR 0B 02\n")
# Follow it up with the data:
self.port.send_data_with_crc16(struct.pack('<BB', latitude & 0xff, (latitude // 256) & 0xff), max_tries=1)
# Then call NEWSETUP to get it to stick:
self.port.send_data(b"NEWSETUP\n")
log.info("Station latitude set to %.1f degree", latitude_dg)
def setLongitude(self, longitude_dg):
"""Set the stations longitude.
longitude_dg: Must be in the closed range -180.0...180.0
"""
longitude = int(round((longitude_dg * 10), 0))
if not -1800 <= longitude <= 1800:
raise weewx.ViolatedPrecondition("Invalid longitude %.1f degree" % (longitude_dg,))
# Tell the console to put one byte in hex location 0x0D
self.port.send_data(b"EEBWR 0D 02\n")
# Follow it up with the data:
self.port.send_data_with_crc16(struct.pack('<BB', longitude & 0xff, (longitude // 256) & 0xff), max_tries = 1)
# Then call NEWSETUP to get it to stick:
self.port.send_data(b"NEWSETUP\n")
log.info("Station longitude set to %.1f degree", longitude_dg)
def setArchiveInterval(self, archive_interval_seconds):
"""Set the archive interval of the Vantage.
archive_interval_seconds: The new interval to use in seconds. Must be one of
60, 300, 600, 900, 1800, 3600, or 7200
"""
if archive_interval_seconds not in (60, 300, 600, 900, 1800, 3600, 7200):
raise weewx.ViolatedPrecondition("Invalid archive interval (%d)" % (archive_interval_seconds,))
# The console expects the interval in minutes. Divide by 60.
command = b'SETPER %d\n' % int(archive_interval_seconds // 60)
self.port.send_command(command, max_tries=self.max_tries)
self._setup()
log.info("Archive interval set to %d seconds", archive_interval_seconds)
def setLamp(self, onoff='OFF'):
"""Set the lamp on or off"""
try:
_setting = {'off': b'0', 'on': b'1'}[onoff.lower()]
except KeyError:
raise ValueError("Unknown lamp setting '%s'" % onoff)
_command = b"LAMPS %s\n" % _setting
self.port.send_command(_command, max_tries=self.max_tries)
log.info("Lamp set to '%s'", onoff)
def setTransmitterType(self, new_channel, new_transmitter_type, new_extra_temp, new_extra_hum, new_repeater):
"""Set the transmitter type for one of the eight channels."""
# Default value just for tidiness.
new_temp_hum_bits = 0xFF
# Check arguments are consistent.
if new_channel not in list(range(1, 9)):
raise weewx.ViolatedPrecondition("Invalid channel %d" % new_channel)
if new_repeater not in list(range(0, 9)):
raise weewx.ViolatedPrecondition("Invalid repeater %d" % new_repeater)
if new_transmitter_type not in list(range(0, 11)):
raise weewx.ViolatedPrecondition("Invalid transmitter type %d" % new_transmitter_type)
if self.transmitter_type_dict[new_transmitter_type] in ['temp', 'temp_hum']:
if new_extra_temp not in list(range(1, 9)):
raise weewx.ViolatedPrecondition("Invalid extra temperature number %d" % new_extra_temp)
# Extra temp is origin 0.
new_temp_hum_bits = new_temp_hum_bits & 0xF0 | (new_extra_temp - 1)
if self.transmitter_type_dict[new_transmitter_type] in ['hum', 'temp_hum']:
if new_extra_hum not in list(range(1, 9)):
raise weewx.ViolatedPrecondition("Invalid extra humidity number %d" % new_extra_hum)
# Extra humidity is origin 1.
new_temp_hum_bits = new_temp_hum_bits & 0x0F | (new_extra_hum << 4)
if new_repeater == 0:
new_type_bits = (new_transmitter_type & 0x0F)
else:
new_type_bits = ((new_repeater + 7) << 4) | (new_transmitter_type & 0x0F)
# A transmitter type of 10 indicates that channel does not have a transmitter.
# So, turn off its usetx bit as well. Otherwise, turn it on.
usetx = 1 if new_transmitter_type != 10 else 0
old_usetx_bits = self._getEEPROM_value(0x17)[0]
new_usetx_bits = old_usetx_bits & ~(1 << (new_channel - 1)) | usetx * (1 << (new_channel - 1))
# Each channel uses two bytes. Find the correct starting byte for this channel
start_byte = 0x19 + (new_channel - 1) * 2
# Tell the console to put two bytes in that location.
self.port.send_data(b"EEBWR %X 02\n" % start_byte)
# Follow it up with the two bytes of data, little-endian order:
self.port.send_data_with_crc16(struct.pack('<BB', new_type_bits, new_temp_hum_bits), max_tries=1)
# Now tell the console to put the one byte "usetx" in hex location 0x17
self.port.send_data(b"EEBWR 17 01\n")
# Follow it up with the usetx data:
self.port.send_data_with_crc16(struct.pack('>B', new_usetx_bits), max_tries=1)
# Then call NEWSETUP to get it all to stick:
self.port.send_data(b"NEWSETUP\n")
self._setup()
log.info("Transmitter type for channel %d set to %d (%s), repeater: %s, %s",
new_channel, new_transmitter_type,
self.transmitter_type_dict[new_transmitter_type],
self.repeater_dict[new_repeater], self.listen_dict[usetx])
def setRetransmit(self, new_channel):
"""Set console retransmit channel."""
# Tell the console to put one byte in hex location 0x18
self.port.send_data(b"EEBWR 18 01\n")
# Follow it up with the data:
self.port.send_data_with_crc16(int2byte(new_channel), max_tries=1)
# Then call NEWSETUP to get it to stick:
self.port.send_data(b"NEWSETUP\n")
self._setup()
if new_channel != 0:
log.info("Retransmit set to 'ON' at channel: %d", new_channel)
else:
log.info("Retransmit set to 'OFF'")
def setTempLogging(self, new_tempLogging='AVERAGE'):
"""Set console temperature logging to 'AVERAGE' or 'LAST'."""
try:
_setting = {'LAST': 1, 'AVERAGE': 0}[new_tempLogging.upper()]
except KeyError:
raise ValueError("Unknown console temperature logging setting '%s'" % new_tempLogging.upper())
# Tell the console to put one byte in hex location 0x2B
self.port.send_data(b"EEBWR FFC 01\n")
# Follow it up with the data:
self.port.send_data_with_crc16(int2byte(_setting), max_tries=1)
# Then call NEWSETUP to get it to stick:
self.port.send_data(b"NEWSETUP\n")
log.info("Console temperature logging set to '%s'", new_tempLogging.upper())
def setCalibrationWindDir(self, offset):
"""Set the on-board wind direction calibration."""
if not -359 <= offset <= 359:
raise weewx.ViolatedPrecondition("Offset %d out of range [-359, 359]." % offset)
# Tell the console to put two bytes in hex location 0x4D
self.port.send_data(b"EEBWR 4D 02\n")
# Follow it up with the data:
self.port.send_data_with_crc16(struct.pack("<h", offset), max_tries=1)
log.info("Wind calibration set to %d", offset)
def setCalibrationTemp(self, variable, offset):
"""Set an on-board temperature calibration."""
# Offset is in tenths of degree Fahrenheit.
if not -12.8 <= offset <= 12.7:
raise weewx.ViolatedPrecondition("Offset %.1f out of range [-12.8, 12.7]." % offset)
byte = struct.pack("b", int(round(offset * 10)))
variable_dict = { 'outTemp': 0x34 }
for i in range(1, 8): variable_dict['extraTemp%d' % i] = 0x34 + i
for i in range(1, 5): variable_dict['soilTemp%d' % i] = 0x3B + i
for i in range(1, 5): variable_dict['leafTemp%d' % i] = 0x3F + i
if variable == "inTemp":
# Inside temp is special, needs ones' complement in next byte.
complement_byte = struct.pack("B", ~int(round(offset * 10)) & 0xFF)
self.port.send_data(b"EEBWR 32 02\n")
self.port.send_data_with_crc16(byte + complement_byte, max_tries=1)
elif variable in variable_dict:
# Other variables are just sent as-is.
self.port.send_data(b"EEBWR %X 01\n" % variable_dict[variable])
self.port.send_data_with_crc16(byte, max_tries=1)
else:
raise weewx.ViolatedPrecondition("Variable name %s not known" % variable)
log.info("Temperature calibration %s set to %.1f", variable, offset)
def setCalibrationHumid(self, variable, offset):
"""Set an on-board humidity calibration."""
# Offset is in percentage points.
if not -100 <= offset <= 100:
raise weewx.ViolatedPrecondition("Offset %d out of range [-100, 100]." % offset)
byte = struct.pack("b", offset)
variable_dict = { 'inHumid': 0x44, 'outHumid': 0x45 }
for i in range(1, 8):
variable_dict['extraHumid%d' % i] = 0x45 + i
if variable in variable_dict:
self.port.send_data(b"EEBWR %X 01\n" % variable_dict[variable])
self.port.send_data_with_crc16(byte, max_tries=1)
else:
raise weewx.ViolatedPrecondition("Variable name %s not known" % variable)
log.info("Humidity calibration %s set to %d", variable, offset)
def clearLog(self):
"""Clear the internal archive memory in the Vantage."""
for unused_count in range(self.max_tries):
try:
self.port.wakeup_console(max_tries=self.max_tries)
self.port.send_data(b"CLRLOG\n")
log.info("Archive memory cleared.")
return
except weewx.WeeWxIOError:
# Caught an error. Keey trying...
continue
log.error("Max retries exceeded while clearing log")
raise weewx.RetriesExceeded("While clearing log")
def getRX(self):
"""Returns reception statistics from the console.
Returns a tuple with 5 values: (# of packets, # of missed packets,
# of resynchronizations, the max # of packets received w/o an error,
the # of CRC errors detected.)"""
rx_list = self.port.send_command(b'RXCHECK\n')
if weewx.debug:
assert(len(rx_list) == 1)
# The following is a list of the reception statistics, but the elements are byte strings
rx_list_str = rx_list[0].split()
# Convert to numbers and return as a tuple:
rx_list = tuple(int(x) for x in rx_list_str)
return rx_list
def getBarData(self):
"""Gets barometer calibration data. Returns as a 9 element list."""
_bardata = self.port.send_command(b"BARDATA\n")
_barometer = float(_bardata[0].split()[1])/1000.0
_altitude = float(_bardata[1].split()[1])
_dewpoint = float(_bardata[2].split()[2])
_virt_temp = float(_bardata[3].split()[2])
_c = float(_bardata[4].split()[1])/10.0
_r = float(_bardata[5].split()[1])/1000.0
_barcal = float(_bardata[6].split()[1])/1000.0
_gain = float(_bardata[7].split()[1])
_offset = float(_bardata[8].split()[1])
return (_barometer, _altitude, _dewpoint, _virt_temp,
_c, _r, _barcal, _gain, _offset)
def getFirmwareDate(self):
"""Return the firmware date as a string. """
return self.port.send_command(b'VER\n')[0]
def getFirmwareVersion(self):
"""Return the firmware version as a string."""
return self.port.send_command(b'NVER\n')[0]
def getStnInfo(self):
"""Return lat / lon, time zone, etc."""
(stnlat, stnlon) = self._getEEPROM_value(0x0B, "<2h")
stnlat /= 10.0
stnlon /= 10.0
man_or_auto = "MANUAL" if self._getEEPROM_value(0x12)[0] else "AUTO"
dst = "ON" if self._getEEPROM_value(0x13)[0] else "OFF"
gmt_or_zone = "GMT_OFFSET" if self._getEEPROM_value(0x16)[0] else "ZONE_CODE"
zone_code = self._getEEPROM_value(0x11)[0]
gmt_offset = self._getEEPROM_value(0x14, "<h")[0] / 100.0
tempLogging = "LAST" if self._getEEPROM_value(0xffc)[0] else "AVERAGE"
retransmit_channel = self._getEEPROM_value(0x18)[0]
return (stnlat, stnlon, man_or_auto, dst, gmt_or_zone, zone_code, gmt_offset,
tempLogging, retransmit_channel)
def getStnTransmitters(self):
""" Get the types of transmitters on the eight channels."""
transmitters = [ ]
use_tx = self._getEEPROM_value(0x17)[0]
transmitter_data = self._getEEPROM_value(0x19, "16B")
for transmitter_id in range(8):
transmitter_type = self.transmitter_type_dict[transmitter_data[transmitter_id * 2] & 0x0F]
repeater = 0
repeater = transmitter_data[transmitter_id * 2] & 0xF0
repeater = (repeater >> 4) - 7 if repeater > 127 else 0
transmitter = {"transmitter_type": transmitter_type,
"repeater": self.repeater_dict[repeater],
"listen": self.listen_dict[(use_tx >> transmitter_id) & 1] }
if transmitter_type in ['temp', 'temp_hum']:
# Extra temperature is origin 0.
transmitter['temp'] = (transmitter_data[transmitter_id * 2 + 1] & 0xF) + 1
if transmitter_type in ['hum', 'temp_hum']:
# Extra humidity is origin 1.
transmitter['hum'] = transmitter_data[transmitter_id * 2 + 1] >> 4
transmitters.append(transmitter)
return transmitters
def getStnCalibration(self):
""" Get the temperature/humidity/wind calibrations built into the console. """
(inTemp, inTempComp, outTemp,
extraTemp1, extraTemp2, extraTemp3, extraTemp4, extraTemp5, extraTemp6, extraTemp7,
soilTemp1, soilTemp2, soilTemp3, soilTemp4, leafTemp1, leafTemp2, leafTemp3, leafTemp4,
inHumid,
outHumid, extraHumid1, extraHumid2, extraHumid3, extraHumid4, extraHumid5, extraHumid6, extraHumid7,
wind) = self._getEEPROM_value(0x32, "<27bh")
# inTempComp is 1's complement of inTemp.
if inTemp + inTempComp != -1:
log.error("Inconsistent EEPROM calibration values")
return None
# Temperatures are in tenths of a degree F; Humidity in 1 percent.
return {
"inTemp": inTemp / 10.0,
"outTemp": outTemp / 10.0,
"extraTemp1": extraTemp1 / 10.0,
"extraTemp2": extraTemp2 / 10.0,
"extraTemp3": extraTemp3 / 10.0,
"extraTemp4": extraTemp4 / 10.0,
"extraTemp5": extraTemp5 / 10.0,
"extraTemp6": extraTemp6 / 10.0,
"extraTemp7": extraTemp7 / 10.0,
"soilTemp1": soilTemp1 / 10.0,
"soilTemp2": soilTemp2 / 10.0,
"soilTemp3": soilTemp3 / 10.0,
"soilTemp4": soilTemp4 / 10.0,
"leafTemp1": leafTemp1 / 10.0,
"leafTemp2": leafTemp2 / 10.0,
"leafTemp3": leafTemp3 / 10.0,
"leafTemp4": leafTemp4 / 10.0,
"inHumid": inHumid,
"outHumid": outHumid,
"extraHumid1": extraHumid1,
"extraHumid2": extraHumid2,
"extraHumid3": extraHumid3,
"extraHumid4": extraHumid4,
"extraHumid5": extraHumid5,
"extraHumid6": extraHumid6,
"extraHumid7": extraHumid7,
"wind": wind
}
def startLogger(self):
self.port.send_command(b"START\n")
def stopLogger(self):
self.port.send_command(b'STOP\n')
#===========================================================================
# Davis Vantage utility functions
#===========================================================================
@property
def hardware_name(self):
if self.hardware_type == 16:
if self.model_type == 1:
return "Vantage Pro"
else:
return "Vantage Pro2"
elif self.hardware_type == 17:
return "Vantage Vue"
else:
raise weewx.UnsupportedFeature("Unknown hardware type %d" % self.hardware_type)
@property
def archive_interval(self):
return self.archive_interval_
def _determine_hardware(self):
# Determine the type of hardware:
for count in range(self.max_tries):
try:
self.port.send_data(b"WRD\x12\x4d\n")
self.hardware_type = byte2int(self.port.read())
log.debug("Hardware type is %d", self.hardware_type)
# 16 = Pro, Pro2, 17 = Vue
return self.hardware_type
except weewx.WeeWxIOError as e:
log.error("_determine_hardware; retry #%d: '%s'", count, e)
log.error("Unable to read hardware type; raise WeeWxIOError")
raise weewx.WeeWxIOError("Unable to read hardware type")
def _setup(self):
"""Retrieve the EEPROM data block from a VP2 and use it to set various properties"""
self.port.wakeup_console(max_tries=self.max_tries)
# Get hardware type, if not done yet.
if self.hardware_type is None:
self.hardware_type = self._determine_hardware()
# Overwrite model_type if we have Vantage Vue.
if self.hardware_type == 17:
self.model_type = 2
unit_bits = self._getEEPROM_value(0x29)[0]
setup_bits = self._getEEPROM_value(0x2B)[0]
self.rain_year_start = self._getEEPROM_value(0x2C)[0]
self.archive_interval_ = self._getEEPROM_value(0x2D)[0] * 60
self.altitude = self._getEEPROM_value(0x0F, "<h")[0]
self.altitude_vt = weewx.units.ValueTuple(self.altitude, "foot", "group_altitude")
barometer_unit_code = unit_bits & 0x03
temperature_unit_code = (unit_bits & 0x0C) >> 2
altitude_unit_code = (unit_bits & 0x10) >> 4
rain_unit_code = (unit_bits & 0x20) >> 5
wind_unit_code = (unit_bits & 0xC0) >> 6
self.wind_cup_type = (setup_bits & 0x08) >> 3
self.rain_bucket_type = (setup_bits & 0x30) >> 4
self.barometer_unit = Vantage.barometer_unit_dict[barometer_unit_code]
self.temperature_unit = Vantage.temperature_unit_dict[temperature_unit_code]
self.altitude_unit = Vantage.altitude_unit_dict[altitude_unit_code]
self.rain_unit = Vantage.rain_unit_dict[rain_unit_code]
self.wind_unit = Vantage.wind_unit_dict[wind_unit_code]
self.wind_cup_size = Vantage.wind_cup_dict[self.wind_cup_type]
self.rain_bucket_size = Vantage.rain_bucket_dict[self.rain_bucket_type]
# Try to guess the ISS ID for gauging reception strength.
if self.iss_id is None:
stations = self.getStnTransmitters()
# Wind retransmitter is best candidate.
for station_id in range(0, 8):
if stations[station_id]['transmitter_type'] == 'wind':
self.iss_id = station_id + 1 # Origin 1.
break
else:
# ISS is next best candidate.
for station_id in range(0, 8):
if stations[station_id]['transmitter_type'] == 'iss':
self.iss_id = station_id + 1 # Origin 1.
break
else:
# On Vue, can use VP2 ISS, which reports as "rain"
for station_id in range(0, 8):
if stations[station_id]['transmitter_type'] == 'rain':
self.iss_id = station_id + 1 # Origin 1.
break
else:
self.iss_id = 1 # Pick a reasonable default.
log.debug("ISS ID is %s", self.iss_id)
def _getEEPROM_value(self, offset, v_format="B"):
"""Return a list of values from the EEPROM starting at a specified offset, using a specified format"""
nbytes = struct.calcsize(v_format)
# Don't bother waking up the console for the first try. It's probably
# already awake from opening the port. However, if we fail, then do a
# wakeup.
firsttime = True
command = b"EEBRD %X %X\n" % (offset, nbytes)
for unused_count in range(self.max_tries):
try:
if not firsttime:
self.port.wakeup_console(max_tries=self.max_tries)
firsttime = False
self.port.send_data(command)
_buffer = self.port.get_data_with_crc16(nbytes + 2, max_tries=1)
_value = struct.unpack(v_format, _buffer[:-2])
return _value
except weewx.WeeWxIOError:
continue
log.error("Max retries exceeded while getting EEPROM data at address 0x%X", offset)
raise weewx.RetriesExceeded("While getting EEPROM data value at address 0x%X" % offset)
@staticmethod
def _port_factory(vp_dict):
"""Produce a serial or ethernet port object"""
timeout = float(vp_dict.get('timeout', 4.0))
wait_before_retry = float(vp_dict.get('wait_before_retry', 1.2))
command_delay = float(vp_dict.get('command_delay', 0.5))
# Get the connection type. If it is not specified, assume 'serial':
connection_type = vp_dict.get('type', 'serial').lower()
if connection_type == "serial":
port = vp_dict['port']
baudrate = int(vp_dict.get('baudrate', 19200))
return SerialWrapper(port, baudrate, timeout,
wait_before_retry, command_delay)
elif connection_type == "ethernet":
hostname = vp_dict['host']
tcp_port = int(vp_dict.get('tcp_port', 22222))
tcp_send_delay = float(vp_dict.get('tcp_send_delay', 0.5))
return EthernetWrapper(hostname, tcp_port, timeout, tcp_send_delay,
wait_before_retry, command_delay)
raise weewx.UnsupportedFeature(vp_dict['type'])
def _unpackLoopPacket(self, raw_loop_buffer):
"""Decode a raw Davis LOOP packet, returning the results as a dictionary in physical units.
raw_loop_buffer: The loop packet data buffer, passed in as
a string (Python 2), or a byte array (Python 3).
returns:
A dictionary. The key will be an observation type, the value will be
the observation in physical units."""
# Get the packet type. It's in byte 4.
packet_type = indexbytes(raw_loop_buffer, 4)
if packet_type == 0:
loop_struct = loop1_struct
loop_types = loop1_types
elif packet_type == 1:
loop_struct = loop2_struct
loop_types = loop2_types
else:
raise weewx.WeeWxIOError("Unknown LOOP packet type %s" % packet_type)
# Unpack the data, using the appropriate compiled stuct.Struct buffer.
# The result will be a long tuple with just the raw values from the console.
data_tuple = loop_struct.unpack(raw_loop_buffer)
# Combine it with the data types. The result will be a long iterable of 2-way
# tuples: (type, raw-value)
raw_loop_tuples = zip(loop_types, data_tuple)
# Convert to a dictionary:
raw_loop_packet = dict(raw_loop_tuples)
# Add the bucket type. It's needed to decode rain bucket tips.
raw_loop_packet['bucket_type'] = self.rain_bucket_type
loop_packet = {
'dateTime': int(time.time() + 0.5),
'usUnits' : weewx.US
}
# Now we need to map the raw values to physical units.
for _type in raw_loop_packet:
# Get the mapping function for this type. If there is
# no such function, supply a lambda function that returns None
func = _loop_map.get(_type, lambda p, k: None)
# Apply the function
val = func(raw_loop_packet, _type)
# Ignore None values:
if val is not None:
loop_packet[_type] = val
# Adjust sunrise and sunset:
start_of_day = weeutil.weeutil.startOfDay(loop_packet['dateTime'])
if 'sunrise' in loop_packet:
loop_packet['sunrise'] += start_of_day
if 'sunset' in loop_packet:
loop_packet['sunset'] += start_of_day
# Because the Davis stations do not offer bucket tips in LOOP data, we
# must calculate it by looking for changes in rain totals. This won't
# work for the very first rain packet.
if self.save_day_rain is None:
delta = None
else:
delta = loop_packet['dayRain'] - self.save_day_rain
# If the difference is negative, we're at the beginning of a month.
if delta < 0: delta = None
loop_packet['rain'] = delta
self.save_day_rain = loop_packet['dayRain']
return loop_packet
def _unpackArchivePacket(self, raw_archive_buffer):
"""Decode a Davis archive packet, returning the results as a dictionary.
raw_archive_buffer: The archive record data buffer, passed in as
a string (Python 2), or a byte array (Python 3).
returns:
A dictionary. The key will be an observation type, the value will be
the observation in physical units."""
# Get the record type. It's in byte 42.
record_type = indexbytes(raw_archive_buffer, 42)
if record_type == 0xff:
# Rev A packet type:
rec_struct = rec_A_struct
rec_types = rec_types_A
elif record_type == 0x00:
# Rev B packet type:
rec_struct = rec_B_struct
rec_types = rec_types_B
else:
raise weewx.UnknownArchiveType("Unknown archive type = 0x%x" % (record_type,))
data_tuple = rec_struct.unpack(raw_archive_buffer)
raw_archive_record = dict(zip(rec_types, data_tuple))
raw_archive_record['bucket_type'] = self.rain_bucket_type
archive_record = {
'dateTime': _archive_datetime(raw_archive_record['date_stamp'],
raw_archive_record['time_stamp']),
'usUnits': weewx.US,
# Divide archive interval by 60 to keep consistent with wview
'interval': int(self.archive_interval // 60),
}
archive_record['rxCheckPercent'] = _rxcheck(self.model_type,
archive_record['interval'],
self.iss_id,
raw_archive_record['wind_samples'])
for _type in raw_archive_record:
# Get the mapping function for this type. If there is no such
# function, supply a lambda function that will just return None
func = _archive_map.get(_type, lambda p, k: None)
# Call the function:
val = func(raw_archive_record, _type)
# Skip all null values
if val is not None:
archive_record[_type] = val
return archive_record
#===============================================================================
# LOOP packet
#===============================================================================
# A list of all the types held in a Vantage LOOP packet in their native order.
loop1_schema = [
('loop', '3s'), ('rev_type', 'b'), ('packet_type', 'B'),
('next_record', 'H'), ('barometer', 'H'), ('inTemp', 'h'),
('inHumidity', 'B'), ('outTemp', 'h'), ('windSpeed', 'B'),
('windSpeed10', 'B'), ('windDir', 'H'), ('extraTemp1', 'B'),
('extraTemp2', 'B'), ('extraTemp3', 'B'), ('extraTemp4', 'B'),
('extraTemp5', 'B'), ('extraTemp6', 'B'), ('extraTemp7', 'B'),
('soilTemp1', 'B'), ('soilTemp2', 'B'), ('soilTemp3', 'B'),
('soilTemp4', 'B'), ('leafTemp1', 'B'), ('leafTemp2', 'B'),
('leafTemp3', 'B'), ('leafTemp4', 'B'), ('outHumidity', 'B'),
('extraHumid1', 'B'), ('extraHumid2', 'B'), ('extraHumid3', 'B'),
('extraHumid4', 'B'), ('extraHumid5', 'B'), ('extraHumid6', 'B'),
('extraHumid7', 'B'), ('rainRate', 'H'), ('UV', 'B'),
('radiation', 'H'), ('stormRain', 'H'), ('stormStart', 'H'),
('dayRain', 'H'), ('monthRain', 'H'), ('yearRain', 'H'),
('dayET', 'H'), ('monthET', 'H'), ('yearET', 'H'),
('soilMoist1', 'B'), ('soilMoist2', 'B'), ('soilMoist3', 'B'),
('soilMoist4', 'B'), ('leafWet1', 'B'), ('leafWet2', 'B'),
('leafWet3', 'B'), ('leafWet4', 'B'), ('insideAlarm', 'B'),
('rainAlarm', 'B'), ('outsideAlarm1', 'B'), ('outsideAlarm2', 'B'),
('extraAlarm1', 'B'), ('extraAlarm2', 'B'), ('extraAlarm3', 'B'),
('extraAlarm4', 'B'), ('extraAlarm5', 'B'), ('extraAlarm6', 'B'),
('extraAlarm7', 'B'), ('extraAlarm8', 'B'), ('soilLeafAlarm1', 'B'),
('soilLeafAlarm2', 'B'), ('soilLeafAlarm3', 'B'), ('soilLeafAlarm4', 'B'),
('txBatteryStatus', 'B'), ('consBatteryVoltage', 'H'), ('forecastIcon', 'B'),
('forecastRule', 'B'), ('sunrise', 'H'), ('sunset', 'H')
]
loop2_schema = [
('loop', '3s'), ('trendIcon', 'b'), ('packet_type', 'B'),
('_unused', 'H'), ('barometer', 'H'), ('inTemp', 'h'),
('inHumidity', 'B'), ('outTemp', 'h'), ('windSpeed', 'B'),
('_unused', 'B'), ('windDir', 'H'), ('windSpeed10', 'H'),
('windSpeed2', 'H'), ('windGust10', 'H'), ('windGustDir10', 'H'),
('_unused', 'H'), ('_unused', 'H'), ('dewpoint', 'h'),
('_unused', 'B'), ('outHumidity', 'B'), ('_unused', 'B'),
('heatindex', 'h'), ('windchill', 'h'), ('THSW', 'h'),
('rainRate', 'H'), ('UV', 'B'), ('radiation', 'H'),
('stormRain', 'H'), ('stormStart', 'H'), ('dayRain', 'H'),
('rain15', 'H'), ('hourRain', 'H'), ('dayET', 'H'),
('rain24', 'H'), ('bar_reduction', 'B'), ('bar_offset', 'h'),
('bar_calibration', 'h'), ('pressure_raw', 'H'), ('pressure', 'H'),
('altimeter', 'H'), ('_unused', 'B'), ('_unused', 'B'),
('_unused_graph', 'B'), ('_unused_graph', 'B'), ('_unused_graph', 'B'),
('_unused_graph', 'B'), ('_unused_graph', 'B'), ('_unused_graph', 'B'),
('_unused_graph', 'B'), ('_unused_graph', 'B'), ('_unused_graph', 'B'),
('_unused_graph', 'B'), ('_unused', 'H'), ('_unused', 'H'),
('_unused', 'H'), ('_unused', 'H'), ('_unused', 'H'),
('_unused', 'H')
]
# Extract the types and struct.Struct formats for the two types of LOOP packets
loop1_types, loop1_code = list(zip(*loop1_schema))
loop1_struct = struct.Struct('<' + ''.join(loop1_code))
loop2_types, loop2_code = list(zip(*loop2_schema))
loop2_struct = struct.Struct('<' + ''.join(loop2_code))
#===============================================================================
# archive packet
#===============================================================================
rec_A_schema =[
('date_stamp', 'H'), ('time_stamp', 'H'), ('outTemp', 'h'),
('highOutTemp', 'h'), ('lowOutTemp', 'h'), ('rain', 'H'),
('rainRate', 'H'), ('barometer', 'H'), ('radiation', 'H'),
('wind_samples', 'H'), ('inTemp', 'h'), ('inHumidity', 'B'),
('outHumidity', 'B'), ('windSpeed', 'B'), ('windGust', 'B'),
('windGustDir', 'B'), ('windDir', 'B'), ('UV', 'B'),
('ET', 'B'), ('invalid_data', 'B'), ('soilMoist1', 'B'),
('soilMoist2', 'B'), ('soilMoist3', 'B'), ('soilMoist4', 'B'),
('soilTemp1', 'B'), ('soilTemp2', 'B'), ('soilTemp3', 'B'),
('soilTemp4', 'B'), ('leafWet1', 'B'), ('leafWet2', 'B'),
('leafWet3', 'B'), ('leafWet4', 'B'), ('extraTemp1', 'B'),
('extraTemp2', 'B'), ('extraHumid1', 'B'), ('extraHumid2','B'),
('readClosed', 'H'), ('readOpened', 'H'), ('unused', 'B')
]
rec_B_schema = [
('date_stamp', 'H'), ('time_stamp', 'H'), ('outTemp', 'h'),
('highOutTemp', 'h'), ('lowOutTemp', 'h'), ('rain', 'H'),
('rainRate', 'H'), ('barometer', 'H'), ('radiation', 'H'),
('wind_samples', 'H'), ('inTemp', 'h'), ('inHumidity', 'B'),
('outHumidity', 'B'), ('windSpeed', 'B'), ('windGust', 'B'),
('windGustDir', 'B'), ('windDir', 'B'), ('UV', 'B'),
('ET', 'B'), ('highRadiation', 'H'), ('highUV', 'B'),
('forecastRule', 'B'), ('leafTemp1', 'B'), ('leafTemp2', 'B'),
('leafWet1', 'B'), ('leafWet2', 'B'), ('soilTemp1', 'B'),
('soilTemp2', 'B'), ('soilTemp3', 'B'), ('soilTemp4', 'B'),
('download_record_type', 'B'), ('extraHumid1', 'B'), ('extraHumid2','B'),
('extraTemp1', 'B'), ('extraTemp2', 'B'), ('extraTemp3', 'B'),
('soilMoist1', 'B'), ('soilMoist2', 'B'), ('soilMoist3', 'B'),
('soilMoist4', 'B')
]
# Extract the types and struct.Struct formats for the two types of archive packets:
rec_types_A, fmt_A = list(zip(*rec_A_schema))
rec_types_B, fmt_B = list(zip(*rec_B_schema))
rec_A_struct = struct.Struct('<' + ''.join(fmt_A))
rec_B_struct = struct.Struct('<' + ''.join(fmt_B))
def _rxcheck(model_type, interval, iss_id, number_of_wind_samples):
"""Gives an estimate of the fraction of packets received.
Ref: Vantage Serial Protocol doc, V2.1.0, released 25-Jan-05; p42"""
# The formula for the expected # of packets varies with model number.
if model_type == 1:
_expected_packets = float(interval * 60) / ( 2.5 + (iss_id-1) / 16.0) -\
float(interval * 60) / (50.0 + (iss_id-1) * 1.25)
elif model_type == 2:
_expected_packets = 960.0 * interval / float(41 + iss_id - 1)
else:
return None
_frac = number_of_wind_samples * 100.0 / _expected_packets
if _frac > 100.0:
_frac = 100.0
return _frac
#===============================================================================
# Decoding routines
#===============================================================================
def _archive_datetime(datestamp, timestamp):
"""Returns the epoch time of the archive packet."""
try:
# Construct a time tuple from Davis time. Unfortunately, as timestamps come
# off the Vantage logger, there is no way of telling whether or not DST is
# in effect. So, have the operating system guess by using a '-1' in the last
# position of the time tuple. It's the best we can do...
time_tuple = (((0xfe00 & datestamp) >> 9) + 2000, # year
(0x01e0 & datestamp) >> 5, # month
(0x001f & datestamp), # day
timestamp // 100, # hour
timestamp % 100, # minute
0, # second
0, 0, -1) # have OS guess DST
# Convert to epoch time:
ts = int(time.mktime(time_tuple))
except (OverflowError, ValueError, TypeError):
ts = None
return ts
def _loop_date(p, k):
"""Returns the epoch time stamp of a time encoded in the LOOP packet,
which, for some reason, uses a different encoding scheme than the archive packet.
Also, the Davis documentation isn't clear whether "bit 0" refers to the least-significant
bit, or the most-significant bit. I'm assuming the former, which is the usual
in little-endian machines."""
v = p[k]
if v == 0xffff:
return None
time_tuple = ((0x007f & v) + 2000, # year
(0xf000 & v) >> 12, # month
(0x0f80 & v) >> 7, # day
0, 0, 0, # h, m, s
0, 0, -1)
# Convert to epoch time:
try:
ts = int(time.mktime(time_tuple))
except (OverflowError, ValueError):
ts = None
return ts
def _decode_rain(p, k):
if p['bucket_type'] == 0:
# 0.01 inch bucket
return p[k] / 100.0
elif p['bucket_type'] == 1:
# 0.2 mm bucket
return p[k] * 0.0078740157
elif p['bucket_type'] == 2:
# 0.1 mm bucket
return p[k] * 0.00393700787
else:
log.warning("Unknown bucket type $s" % p['bucket_type'])
def _decode_windSpeed_H(p, k):
"""Decode 10-min average wind speed. It is encoded slightly
differently between type 0 and type 1 LOOP packets."""
if p['packet_type'] == 0:
return float(p[k]) if p[k] != 0xff else None
elif p['packet_type'] == 1:
return float(p[k]) / 10.0 if p[k] != 0xffff else None
else:
log.warning("Unknown LOOP packet type %s" % p['packet_type'])
# This dictionary maps a type key to a function. The function should be able to
# decode a sensor value held in the LOOP packet in the internal, Davis form into US
# units and return it.
_loop_map = {
'altimeter' : lambda p, k: float(p[k]) / 1000.0 if p[k] else None,
'bar_calibration' : lambda p, k: float(p[k]) / 1000.0 if p[k] else None,
'bar_offset' : lambda p, k: float(p[k]) / 1000.0 if p[k] else None,
'bar_reduction' : lambda p, k: p[k],
'barometer' : lambda p, k: float(p[k]) / 1000.0 if p[k] else None,
'consBatteryVoltage': lambda p, k: float((p[k] * 300) >> 9) / 100.0,
'dayET' : lambda p, k: float(p[k]) / 1000.0,
'dayRain' : _decode_rain,
'dewpoint' : lambda p, k: float(p[k]) if p[k] & 0xff != 0xff else None,
'extraAlarm1' : lambda p, k: p[k],
'extraAlarm2' : lambda p, k: p[k],
'extraAlarm3' : lambda p, k: p[k],
'extraAlarm4' : lambda p, k: p[k],
'extraAlarm5' : lambda p, k: p[k],
'extraAlarm6' : lambda p, k: p[k],
'extraAlarm7' : lambda p, k: p[k],
'extraAlarm8' : lambda p, k: p[k],
'extraHumid1' : lambda p, k: float(p[k]) if p[k] != 0xff else None,
'extraHumid2' : lambda p, k: float(p[k]) if p[k] != 0xff else None,
'extraHumid3' : lambda p, k: float(p[k]) if p[k] != 0xff else None,
'extraHumid4' : lambda p, k: float(p[k]) if p[k] != 0xff else None,
'extraHumid5' : lambda p, k: float(p[k]) if p[k] != 0xff else None,
'extraHumid6' : lambda p, k: float(p[k]) if p[k] != 0xff else None,
'extraHumid7' : lambda p, k: float(p[k]) if p[k] != 0xff else None,
'extraTemp1' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None,
'extraTemp2' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None,
'extraTemp3' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None,
'extraTemp4' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None,
'extraTemp5' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None,
'extraTemp6' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None,
'extraTemp7' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None,
'forecastIcon' : lambda p, k: p[k],
'forecastRule' : lambda p, k: p[k],
'heatindex' : lambda p, k: float(p[k]) if p[k] & 0xff != 0xff else None,
'hourRain' : _decode_rain,
'inHumidity' : lambda p, k: float(p[k]) if p[k] != 0xff else None,
'insideAlarm' : lambda p, k: p[k],
'inTemp' : lambda p, k: float(p[k]) / 10.0 if p[k] != 0x7fff else None,
'leafTemp1' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None,
'leafTemp2' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None,
'leafTemp3' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None,
'leafTemp4' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None,
'leafWet1' : lambda p, k: float(p[k]) if p[k] != 0xff else None,
'leafWet2' : lambda p, k: float(p[k]) if p[k] != 0xff else None,
'leafWet3' : lambda p, k: float(p[k]) if p[k] != 0xff else None,
'leafWet4' : lambda p, k: float(p[k]) if p[k] != 0xff else None,
'monthET' : lambda p, k: float(p[k]) / 100.0,
'monthRain' : _decode_rain,
'outHumidity' : lambda p, k: float(p[k]) if p[k] != 0xff else None,
'outsideAlarm1' : lambda p, k: p[k],
'outsideAlarm2' : lambda p, k: p[k],
'outTemp' : lambda p, k: float(p[k]) / 10.0 if p[k] != 0x7fff else None,
'pressure' : lambda p, k: float(p[k]) / 1000.0 if p[k] else None,
'pressure_raw' : lambda p, k: float(p[k]) / 1000.0 if p[k] else None,
'radiation' : lambda p, k: float(p[k]) if p[k] != 0x7fff else None,
'rain15' : _decode_rain,
'rain24' : _decode_rain,
'rainAlarm' : lambda p, k: p[k],
'rainRate' : _decode_rain,
'soilLeafAlarm1' : lambda p, k: p[k],
'soilLeafAlarm2' : lambda p, k: p[k],
'soilLeafAlarm3' : lambda p, k: p[k],
'soilLeafAlarm4' : lambda p, k: p[k],
'soilMoist1' : lambda p, k: float(p[k]) if p[k] != 0xff else None,
'soilMoist2' : lambda p, k: float(p[k]) if p[k] != 0xff else None,
'soilMoist3' : lambda p, k: float(p[k]) if p[k] != 0xff else None,
'soilMoist4' : lambda p, k: float(p[k]) if p[k] != 0xff else None,
'soilTemp1' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None,
'soilTemp2' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None,
'soilTemp3' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None,
'soilTemp4' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None,
'stormRain' : _decode_rain,
'stormStart' : _loop_date,
'sunrise' : lambda p, k: 3600 * (p[k] // 100) + 60 * (p[k] % 100),
'sunset' : lambda p, k: 3600 * (p[k] // 100) + 60 * (p[k] % 100),
'THSW' : lambda p, k: float(p[k]) if p[k] & 0xff != 0xff else None,
'trendIcon' : lambda p, k: p[k],
'txBatteryStatus' : lambda p, k: int(p[k]),
'UV' : lambda p, k: float(p[k]) / 10.0 if p[k] != 0xff else None,
'windchill' : lambda p, k: float(p[k]) if p[k] & 0xff != 0xff else None,
'windDir' : lambda p, k: (float(p[k]) if p[k] != 360 else 0) if p[k] and p[k] != 0x7fff else None,
'windGust10' : lambda p, k: float(p[k]) if p[k] != 0xff else None,
'windGustDir10' : lambda p, k: (float(p[k]) if p[k] != 360 else 0) if p[k] and p[k] != 0x7fff else None,
'windSpeed' : lambda p, k: float(p[k]) if p[k] != 0xff else None,
'windSpeed10' : _decode_windSpeed_H,
'windSpeed2' : _decode_windSpeed_H,
'yearET' : lambda p, k: float(p[k]) / 100.0,
'yearRain' : _decode_rain,
}
# This dictionary maps a type key to a function. The function should be able to
# decode a sensor value held in the archive packet in the internal, Davis form into US
# units and return it.
_archive_map = {
'barometer' : lambda p, k: float(p[k]) / 1000.0 if p[k] else None,
'ET' : lambda p, k: float(p[k]) / 1000.0,
'extraHumid1' : lambda p, k: float(p[k]) if p[k] != 0xff else None,
'extraHumid2' : lambda p, k: float(p[k]) if p[k] != 0xff else None,
'extraTemp1' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None,
'extraTemp2' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None,
'extraTemp3' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None,
'forecastRule' : lambda p, k: p[k] if p[k] != 193 else None,
'highOutTemp' : lambda p, k: float(p[k] / 10.0) if p[k] != -32768 else None,
'highRadiation' : lambda p, k: float(p[k]) if p[k] != 0x7fff else None,
'highUV' : lambda p, k: float(p[k]) / 10.0 if p[k] != 0xff else None,
'inHumidity' : lambda p, k: float(p[k]) if p[k] != 0xff else None,
'inTemp' : lambda p, k: float(p[k]) / 10.0 if p[k] != 0x7fff else None,
'leafTemp1' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None,
'leafTemp2' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None,
'leafWet1' : lambda p, k: float(p[k]) if p[k] != 0xff else None,
'leafWet2' : lambda p, k: float(p[k]) if p[k] != 0xff else None,
'leafWet3' : lambda p, k: float(p[k]) if p[k] != 0xff else None,
'leafWet4' : lambda p, k: float(p[k]) if p[k] != 0xff else None,
'lowOutTemp' : lambda p, k: float(p[k]) / 10.0 if p[k] != 0x7fff else None,
'outHumidity' : lambda p, k: float(p[k]) if p[k] != 0xff else None,
'outTemp' : lambda p, k: float(p[k]) / 10.0 if p[k] != 0x7fff else None,
'radiation' : lambda p, k: float(p[k]) if p[k] != 0x7fff else None,
'rain' : _decode_rain,
'rainRate' : _decode_rain,
'readClosed' : lambda p, k: p[k],
'readOpened' : lambda p, k: p[k],
'soilMoist1' : lambda p, k: float(p[k]) if p[k] != 0xff else None,
'soilMoist2' : lambda p, k: float(p[k]) if p[k] != 0xff else None,
'soilMoist3' : lambda p, k: float(p[k]) if p[k] != 0xff else None,
'soilMoist4' : lambda p, k: float(p[k]) if p[k] != 0xff else None,
'soilTemp1' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None,
'soilTemp2' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None,
'soilTemp3' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None,
'soilTemp4' : lambda p, k: float(p[k] - 90) if p[k] != 0xff else None,
'UV' : lambda p, k: float(p[k]) / 10.0 if p[k] != 0xff else None,
'wind_samples' : lambda p, k: float(p[k]) if p[k] else None,
'windDir' : lambda p, k: float(p[k]) * 22.5 if p[k] != 0xff else None,
'windGust' : lambda p, k: float(p[k]),
'windGustDir' : lambda p, k: float(p[k]) * 22.5 if p[k] != 0xff else None,
'windSpeed' : lambda p, k: float(p[k]) if p[k] != 0xff else None,
}
#===============================================================================
# class VantageService
#===============================================================================
# This class uses multiple inheritance:
class VantageService(Vantage, weewx.engine.StdService):
"""Weewx service for the Vantage weather stations."""
def __init__(self, engine, config_dict):
Vantage.__init__(self, **config_dict[DRIVER_NAME])
weewx.engine.StdService.__init__(self, engine, config_dict)
self.max_loop_gust = 0.0
self.max_loop_gustdir = None
self.bind(weewx.STARTUP, self.startup)
self.bind(weewx.NEW_LOOP_PACKET, self.new_loop_packet)
self.bind(weewx.END_ARCHIVE_PERIOD, self.end_archive_period)
self.bind(weewx.NEW_ARCHIVE_RECORD, self.new_archive_record)
def startup(self, event): # @UnusedVariable
self.max_loop_gust = 0.0
self.max_loop_gustdir = None
self.loop_data = {'txBatteryStatus': None,
'consBatteryVoltage': None}
def closePort(self):
# Now close my superclass's port:
Vantage.closePort(self)
def new_loop_packet(self, event):
"""Calculate the max gust seen since the last archive record."""
# Calculate the max gust seen since the start of this archive record
# and put it in the packet.
windSpeed = event.packet.get('windSpeed')
windDir = event.packet.get('windDir')
if windSpeed is not None and windSpeed > self.max_loop_gust:
self.max_loop_gust = windSpeed
self.max_loop_gustdir = windDir
event.packet['windGust'] = self.max_loop_gust
event.packet['windGustDir'] = self.max_loop_gustdir
# Save the battery statuses:
for k in self.loop_data:
self.loop_data[k] = event.packet.get(k)
def end_archive_period(self, event): # @UnusedVariable
"""Zero out the max gust seen since the start of the record"""
self.max_loop_gust = 0.0
self.max_loop_gustdir = None
def new_archive_record(self, event):
"""Add the battery status to the archive record."""
# Add the last battery status:
event.record.update(self.loop_data)
#===============================================================================
# Class VantageConfigurator
#===============================================================================
class VantageConfigurator(weewx.drivers.AbstractConfigurator):
@property
def description(self):
return "Configures the Davis Vantage weather station."
@property
def usage(self):
return """%prog --help
%prog --info [config_file]
%prog --current [config_file]
%prog --clear-memory [config_file] [-y]
%prog --set-interval=MINUTES [config_file] [-y]
%prog --set-latitude=DEGREE [config_file] [-y]
%prog --set-longitude=DEGREE [config_file] [-y]
%prog --set-altitude=FEET [config_file] [-y]
%prog --set-barometer=inHg [config_file] [-y]
%prog --set-wind-cup=CODE [config_file] [-y]
%prog --set-bucket=CODE [config_file] [-y]
%prog --set-rain-year-start=MM [config_file] [-y]
%prog --set-offset=VARIABLE,OFFSET [config_file] [-y]
%prog --set-transmitter-type=CHANNEL,TYPE,TEMP,HUM,REPEATER_ID [config_file] [-y]
%prog --set-retransmit=[OFF|ON|ON,CHANNEL] [config_file] [-y]
%prog --set-temperature-logging=[LAST|AVERAGE] [config_file] [-y]
%prog --set-time [config_file] [-y]
%prog --set-dst=[AUTO|ON|OFF] [config_file] [-y]
%prog --set-tz-code=TZCODE [config_file] [-y]
%prog --set-tz-offset=HHMM [config_file] [-y]
%prog --set-lamp=[ON|OFF] [config_file]
%prog --dump [--batch-size=BATCH_SIZE] [config_file] [-y]
%prog --logger-summary=FILE [config_file] [-y]
%prog [--start | --stop] [config_file]"""
def add_options(self, parser):
super(VantageConfigurator, self).add_options(parser)
parser.add_option("--info", action="store_true", dest="info",
help="To print configuration, reception, and barometer "
"calibration information about your weather station.")
parser.add_option("--current", action="store_true",
help="To print current LOOP information.")
parser.add_option("--clear-memory", action="store_true", dest="clear_memory",
help="To clear the memory of your weather station.")
parser.add_option("--set-interval", type=int, dest="set_interval",
metavar="MINUTES",
help="Sets the archive interval to the specified number of minutes. "
"Valid values are 1, 5, 10, 15, 30, 60, or 120.")
parser.add_option("--set-latitude", type=float, dest="set_latitude",
metavar="DEGREE",
help="Sets the latitude of the station to the specified number of tenth degree.")
parser.add_option("--set-longitude", type=float, dest="set_longitude",
metavar="DEGREE",
help="Sets the longitude of the station to the specified number of tenth degree.")
parser.add_option("--set-altitude", type=float, dest="set_altitude",
metavar="FEET",
help="Sets the altitude of the station to the specified number of feet.")
parser.add_option("--set-barometer", type=float, dest="set_barometer",
metavar="inHg",
help="Sets the barometer reading of the station to a known correct "
"value in inches of mercury. Specify 0 (zero) to have the console "
"pick a sensible value.")
parser.add_option("--set-wind-cup", type=int, dest="set_wind_cup",
metavar="CODE",
help="Set the type of wind cup. Specify '0' for small size; '1' for large size")
parser.add_option("--set-bucket", type=int, dest="set_bucket",
metavar="CODE",
help="Set the type of rain bucket. Specify '0' for 0.01 inches; "
"'1' for 0.2 mm; '2' for 0.1 mm")
parser.add_option("--set-rain-year-start", type=int,
dest="set_rain_year_start", metavar="MM",
help="Set the rain year start (1=Jan, 2=Feb, etc.).")
parser.add_option("--set-offset", type=str,
dest="set_offset", metavar="VARIABLE,OFFSET",
help="Set the onboard offset for VARIABLE inTemp, outTemp, extraTemp[1-7], "
"inHumid, outHumid, extraHumid[1-7], soilTemp[1-4], leafTemp[1-4], windDir) "
"to OFFSET (Fahrenheit, %, degrees)")
parser.add_option("--set-transmitter-type", type=str,
dest="set_transmitter_type",
metavar="CHANNEL,TYPE,TEMP,HUM,REPEATER_ID",
help="Set the transmitter type for CHANNEL (1-8), TYPE (0=iss, 1=temp, 2=hum, "
"3=temp_hum, 4=wind, 5=rain, 6=leaf, 7=soil, 8=leaf_soil, 9=sensorlink, 10=none), "
"as extra TEMP station and extra HUM station (both 1-7, if applicable), "
"REPEATER_ID ('A'-'H', if used)")
parser.add_option("--set-retransmit", type=str, dest="set_retransmit",
metavar="OFF|ON|ON,CHANNEL",
help="Turn console retransmit function 'ON' or 'OFF'.")
parser.add_option("--set-temperature-logging", dest="set_temp_logging",
metavar="LAST|AVERAGE",
help="Set console temperature logging to either 'LAST' or 'AVERAGE'.")
parser.add_option("--set-time", action="store_true", dest="set_time",
help="Set the onboard clock to the current time.")
parser.add_option("--set-dst", dest="set_dst",
metavar="AUTO|ON|OFF",
help="Set DST to 'ON', 'OFF', or 'AUTO'")
parser.add_option("--set-tz-code", type=int, dest="set_tz_code",
metavar="TZCODE",
help="Set timezone code to TZCODE. See your Vantage manual for "
"valid codes.")
parser.add_option("--set-tz-offset", dest="set_tz_offset",
help="Set timezone offset to HHMM. E.g. '-0800' for U.S. Pacific Time.",
metavar="HHMM")
parser.add_option("--set-lamp", dest="set_lamp",
metavar="ON|OFF",
help="Turn the console lamp 'ON' or 'OFF'.")
parser.add_option("--dump", action="store_true",
help="Dump all data to the archive. "
"NB: This may result in many duplicate primary key errors.")
parser.add_option("--batch-size", type=int, default=1, metavar="BATCH_SIZE",
help="Use with option --dump. Pages are read off the console in batches "
"of BATCH_SIZE. A BATCH_SIZE of zero means dump all data first, "
"then put it in the database. This can improve performance in "
"high-latency environments, but requires sufficient memory to "
"hold all station data. Default is 1 (one).")
parser.add_option("--logger-summary", type="string",
dest="logger_summary", metavar="FILE",
help="Save diagnostic summary to FILE (for debugging the logger).")
parser.add_option("--start", action="store_true",
help="Start the logger.")
parser.add_option("--stop", action="store_true",
help="Stop the logger.")
def do_options(self, options, parser, config_dict, prompt):
if options.start and options.stop:
parser.error("Cannot specify both --start and --stop")
if options.set_tz_code and options.set_tz_offset:
parser.error("Cannot specify both --set-tz-code and --set-tz-offset")
station = Vantage(**config_dict[DRIVER_NAME])
if options.info:
self.show_info(station)
if options.current:
self.current(station)
if options.set_interval is not None:
self.set_interval(station, options.set_interval, options.noprompt)
if options.set_latitude is not None:
self.set_latitude(station, options.set_latitude, options.noprompt)
if options.set_longitude is not None:
self.set_longitude(station, options.set_longitude, options.noprompt)
if options.set_altitude is not None:
self.set_altitude(station, options.set_altitude, options.noprompt)
if options.set_barometer is not None:
self.set_barometer(station, options.set_barometer, options.noprompt)
if options.clear_memory:
self.clear_memory(station, options.noprompt)
if options.set_wind_cup is not None:
self.set_wind_cup(station, options.set_wind_cup, options.noprompt)
if options.set_bucket is not None:
self.set_bucket(station, options.set_bucket, options.noprompt)
if options.set_rain_year_start is not None:
self.set_rain_year_start(station, options.set_rain_year_start, options.noprompt)
if options.set_offset is not None:
self.set_offset(station, options.set_offset, options.noprompt)
if options.set_transmitter_type is not None:
self.set_transmitter_type(station, options.set_transmitter_type, options.noprompt)
if options.set_retransmit is not None:
self.set_retransmit(station, options.set_retransmit, options.noprompt)
if options.set_temp_logging is not None:
self.set_temp_logging(station, options.set_temp_logging, options.noprompt)
if options.set_time:
self.set_time(station)
if options.set_dst:
self.set_dst(station, options.set_dst)
if options.set_tz_code:
self.set_tz_code(station, options.set_tz_code)
if options.set_tz_offset:
self.set_tz_offset(station, options.set_tz_offset)
if options.set_lamp:
self.set_lamp(station, options.set_lamp)
if options.dump:
self.dump_logger(station, config_dict, options.noprompt, options.batch_size)
if options.logger_summary:
self.logger_summary(station, options.logger_summary)
if options.start:
self.start_logger(station)
if options.stop:
self.stop_logger(station)
@staticmethod
def show_info(station, dest=sys.stdout):
"""Query the configuration of the Vantage, printing out status
information"""
print("Querying...")
try:
_firmware_date = station.getFirmwareDate().decode('ascii')
except weewx.RetriesExceeded:
_firmware_date = "<Unavailable>"
try:
_firmware_version = station.getFirmwareVersion().decode('ascii')
except weewx.RetriesExceeded:
_firmware_version = '<Unavailable>'
console_time = station.getConsoleTime()
altitude_converted = weewx.units.convert(station.altitude_vt, station.altitude_unit)[0]
print("""Davis Vantage EEPROM settings:
CONSOLE TYPE: %s
CONSOLE FIRMWARE:
Date: %s
Version: %s
CONSOLE SETTINGS:
Archive interval: %d (seconds)
Altitude: %d (%s)
Wind cup type: %s
Rain bucket type: %s
Rain year start: %d
Onboard time: %s
CONSOLE DISPLAY UNITS:
Barometer: %s
Temperature: %s
Rain: %s
Wind: %s
""" % (station.hardware_name, _firmware_date, _firmware_version,
station.archive_interval,
altitude_converted, station.altitude_unit,
station.wind_cup_size, station.rain_bucket_size,
station.rain_year_start, console_time,
station.barometer_unit, station.temperature_unit,
station.rain_unit, station.wind_unit), file=dest)
try:
(stnlat, stnlon, man_or_auto, dst, gmt_or_zone, zone_code, gmt_offset,
tempLogging, retransmit_channel) = station.getStnInfo()
if man_or_auto == 'AUTO':
dst = 'N/A'
if gmt_or_zone == 'ZONE_CODE':
gmt_offset_str = 'N/A'
else:
gmt_offset_str = "%+.1f hours" % gmt_offset
zone_code = 'N/A'
on_off = "ON" if retransmit_channel else "OFF"
print(""" CONSOLE STATION INFO:
Latitude (onboard): %+0.1f
Longitude (onboard): %+0.1f
Use manual or auto DST? %s
DST setting: %s
Use GMT offset or zone code? %s
Time zone code: %s
GMT offset: %s
Temperature logging: %s
Retransmit channel: %s (%d)
""" % (stnlat, stnlon, man_or_auto, dst, gmt_or_zone, zone_code, gmt_offset_str,
tempLogging, on_off, retransmit_channel), file=dest)
except weewx.RetriesExceeded:
pass
# Add transmitter types for each channel, if we can:
transmitter_list = None
try:
transmitter_list = station.getStnTransmitters()
print(" TRANSMITTERS: ", file=dest)
print(" Channel Receive Repeater Type", file=dest)
for transmitter_id in range(0, 8):
comment = ""
transmitter_type = transmitter_list[transmitter_id]["transmitter_type"]
repeater = transmitter_list[transmitter_id]["repeater"]
listen = transmitter_list[transmitter_id]["listen"]
if transmitter_type == 'temp_hum':
comment = "(as extra temperature %d and extra humidity %d)" % \
(transmitter_list[transmitter_id]["temp"], transmitter_list[transmitter_id]["hum"])
elif transmitter_type == 'temp':
comment = "(as extra temperature %d)" % transmitter_list[transmitter_id]["temp"]
elif transmitter_type == 'hum':
comment = "(as extra humidity %d)" % transmitter_list[transmitter_id]["hum"]
elif transmitter_type == 'none':
transmitter_type = "(N/A)"
print(" %d %-8s %-4s %s %s"
% (transmitter_id + 1, listen, repeater, transmitter_type, comment), file=dest)
print("", file=dest)
except weewx.RetriesExceeded:
pass
# Add reception statistics if we can:
try:
_rx_list = station.getRX()
print(""" RECEPTION STATS:
Total packets received: %d
Total packets missed: %d
Number of resynchronizations: %d
Longest good stretch: %d
Number of CRC errors: %d
""" % _rx_list, file=dest)
except:
pass
# Add barometer calibration data if we can.
try:
_bar_list = station.getBarData()
print(""" BAROMETER CALIBRATION DATA:
Current barometer reading: %.3f inHg
Altitude: %.0f feet
Dew point: %.0f F
Virtual temperature: %.0f F
Humidity correction factor: %.1f
Correction ratio: %.3f
Correction constant: %+.3f inHg
Gain: %.3f
Offset: %.3f
""" % _bar_list, file=dest)
except weewx.RetriesExceeded:
pass
# Add temperature/humidity/wind calibration if we can.
calibration_dict = station.getStnCalibration()
print(""" OFFSETS:
Wind direction: %(wind)+.0f deg
Inside Temperature: %(inTemp)+.1f F
Inside Humidity: %(inHumid)+.0f %%
Outside Temperature: %(outTemp)+.1f F
Outside Humidity: %(outHumid)+.0f %%""" % calibration_dict, file=dest)
if transmitter_list is not None:
# Only print the calibrations for channels that we are
# listening to.
for extraTemp in range(1, 8):
for t_id in range(0, 8):
t_type = transmitter_list[t_id]["transmitter_type"]
if t_type in ['temp', 'temp_hum'] and \
extraTemp == transmitter_list[t_id]["temp"]:
print(" Extra Temperature %d: %+.1f F"
% (extraTemp, calibration_dict["extraTemp%d" % extraTemp]), file=dest)
for extraHumid in range(1, 8):
for t_id in range(0, 8):
t_type = transmitter_list[t_id]["transmitter_type"]
if t_type in ['hum', 'temp_hum'] and \
extraHumid == transmitter_list[t_id]["hum"]:
print(" Extra Humidity %d: %+.1f F"
% (extraHumid, calibration_dict["extraHumid%d" % extraHumid]), file=dest)
for t_id in range(0, 8):
t_type = transmitter_list[t_id]["transmitter_type"]
if t_type in ['soil', 'leaf_soil']:
for soil in range(1, 5):
print(" Soil Temperature %d: %+.1f F"
% (soil, calibration_dict["soilTemp%d" % soil]), file=dest)
for t_id in range(0, 8):
t_type = transmitter_list[t_id]["transmitter_type"]
if t_type in ['leaf', 'leaf_soil']:
for leaf in range(1, 5):
print(" Leaf Temperature %d: %+.1f F"
% (leaf, calibration_dict["leafTemp%d" % leaf]), file=dest)
print("", file=dest)
@staticmethod
def current(station):
"""Print a single, current LOOP packet."""
print('Querying the station for current weather data...')
for pack in station.genDavisLoopPackets(1):
print(weeutil.weeutil.timestamp_to_string(pack['dateTime']),
to_sorted_string(pack))
@staticmethod
def set_interval(station, new_interval_minutes, noprompt):
"""Set the console archive interval."""
old_interval_minutes = station.archive_interval // 60
print("Old archive interval is %d minutes, new one will be %d minutes."
% (station.archive_interval // 60, new_interval_minutes))
if old_interval_minutes == new_interval_minutes:
print("Old and new archive intervals are the same. Nothing done.")
else:
ans = weeutil.weeutil.y_or_n("Proceeding will change the archive interval "
"as well as erase all old archive records.\n"
"Are you sure you want to proceed (y/n)? ",
noprompt)
if ans == 'y':
station.setArchiveInterval(new_interval_minutes * 60)
print("Archive interval now set to %d seconds." % (station.archive_interval,))
# The Davis documentation implies that the log is
# cleared after changing the archive interval, but that
# doesn't seem to be the case. Clear it explicitly:
station.clearLog()
print("Archive records erased.")
else:
print("Nothing done.")
@staticmethod
def set_latitude(station, latitude_dg, noprompt):
"""Set the console station latitude"""
ans = weeutil.weeutil.y_or_n("Proceeding will set the latitude value to %.1f degree.\n"
"Are you sure you wish to proceed (y/n)? " % latitude_dg,
noprompt)
if ans == 'y':
station.setLatitude(latitude_dg)
print("Station latitude set to %.1f degree." % latitude_dg)
else:
print("Nothing done.")
@staticmethod
def set_longitude(station, longitude_dg, noprompt):
"""Set the console station longitude"""
ans = weeutil.weeutil.y_or_n("Proceeding will set the longitude value to %.1f degree.\n"
"Are you sure you wish to proceed (y/n)? " % longitude_dg,
noprompt)
if ans == 'y':
station.setLongitude(longitude_dg)
print("Station longitude set to %.1f degree." % longitude_dg)
else:
print("Nothing done.")
@staticmethod
def set_altitude(station, altitude_ft, noprompt):
"""Set the console station altitude"""
ans = weeutil.weeutil.y_or_n("Proceeding will set the station altitude to %.0f feet.\n"
"Are you sure you wish to proceed (y/n)? " % altitude_ft,
noprompt)
if ans == 'y':
# Hit the console to get the current barometer calibration data and preserve it:
_bardata = station.getBarData()
_barcal = _bardata[6]
# Set new altitude to station and clear previous _barcal value
station.setBarData(0.0, altitude_ft)
if _barcal != 0.0:
# Hit the console again to get the new barometer data:
_bardata = station.getBarData()
# Set previous _barcal value
station.setBarData(_bardata[0] + _barcal, altitude_ft)
else:
print("Nothing done.")
@staticmethod
def set_barometer(station, barometer_inHg, noprompt):
"""Set the barometer reading to a known correct value."""
# Hit the console to get the current barometer calibration data:
_bardata = station.getBarData()
if barometer_inHg:
msg = "Proceeding will set the barometer value to %.3f and " \
"the station altitude to %.0f feet.\n" % (barometer_inHg, _bardata[1])
else:
msg = "Proceeding will have the console pick a sensible barometer " \
"calibration and set the station altitude to %.0f feet.\n" % (_bardata[1],)
ans = weeutil.weeutil.y_or_n(msg + "Are you sure you wish to proceed (y/n)? ",
noprompt)
if ans == 'y':
station.setBarData(barometer_inHg, _bardata[1])
else:
print("Nothing done.")
@staticmethod
def clear_memory(station, noprompt):
"""Clear the archive memory of a VantagePro"""
ans = weeutil.weeutil.y_or_n("Proceeding will erase all archive records.\n"
"Are you sure you wish to proceed (y/n)? ",
noprompt)
if ans == 'y':
print("Erasing all archive records ...")
station.clearLog()
print("Archive records erased.")
else:
print("Nothing done.")
@staticmethod
def set_wind_cup(station, new_wind_cup_type, noprompt):
"""Set the wind cup type on the console."""
if station.hardware_type != 16:
print("Unable to set new wind cup type.")
print ("Reason: command only valid with Vantage Pro or Vantage Pro2 station.", file=sys.stderr)
return
print("Old rain wind cup type is %d (%s), new one is %d (%s)."
% (station.wind_cup_type,
station.wind_cup_size,
new_wind_cup_type,
Vantage.wind_cup_dict[new_wind_cup_type]))
if station.wind_cup_type == new_wind_cup_type:
print("Old and new wind cup types are the same. Nothing done.")
else:
ans = weeutil.weeutil.y_or_n("Proceeding will change the wind cup type.\n"
"Are you sure you want to proceed (y/n)? ",
noprompt)
if ans == 'y':
station.setWindCupType(new_wind_cup_type)
print("Wind cup type set to %d (%s)." % (station.wind_cup_type, station.wind_cup_size))
else:
print("Nothing done.")
@staticmethod
def set_bucket(station, new_bucket_type, noprompt):
"""Set the bucket type on the console."""
print("Old rain bucket type is %d (%s), new one is %d (%s)."
% (station.rain_bucket_type,
station.rain_bucket_size,
new_bucket_type,
Vantage.rain_bucket_dict[new_bucket_type]))
if station.rain_bucket_type == new_bucket_type:
print("Old and new bucket types are the same. Nothing done.")
else:
ans = weeutil.weeutil.y_or_n("Proceeding will change the rain bucket type.\n"
"Are you sure you want to proceed (y/n)? ",
noprompt)
if ans == 'y':
station.setBucketType(new_bucket_type)
print("Bucket type now set to %d." % (station.rain_bucket_type,))
else:
print("Nothing done.")
@staticmethod
def set_rain_year_start(station, rain_year_start, noprompt):
print("Old rain season start is %d, new one is %d." % (station.rain_year_start, rain_year_start))
if station.rain_year_start == rain_year_start:
print("Old and new rain season starts are the same. Nothing done.")
else:
ans = weeutil.weeutil.y_or_n("Proceeding will change the rain season start.\n"
"Are you sure you want to proceed (y/n)? ",
noprompt)
if ans == 'y':
station.setRainYearStart(rain_year_start)
print("Rain year start now set to %d." % (station.rain_year_start,))
else:
print("Nothing done.")
@staticmethod
def set_offset(station, offset_list, noprompt):
"""Set the on-board offset for a temperature, humidity or wind direction variable."""
(variable, offset_str) = offset_list.split(',')
# These variables may be calibrated.
temp_variables = ['inTemp', 'outTemp' ] + \
['extraTemp%d' % i for i in range(1, 8)] + \
['soilTemp%d' % i for i in range(1, 5)] + \
['leafTemp%d' % i for i in range(1, 5)]
humid_variables = ['inHumid', 'outHumid'] + \
['extraHumid%d' % i for i in range(1, 8)]
# Wind direction can also be calibrated.
if variable == "windDir":
offset = int(offset_str)
if not -359 <= offset <= 359:
print("Wind direction offset %d is out of range." % offset, file=sys.stderr)
else:
ans = weeutil.weeutil.y_or_n("Proceeding will set offset for wind direction to %+d.\n" % offset +
"Are you sure you want to proceed (y/n)? ",
noprompt)
if ans == 'y':
station.setCalibrationWindDir(offset)
print("Wind direction offset now set to %+d." % offset)
else:
print("Nothing done.")
elif variable in temp_variables:
offset = float(offset_str)
if not -12.8 <= offset <= 12.7:
print("Temperature offset %+.1f is out of range." % (offset), file=sys.stderr)
else:
ans = weeutil.weeutil.y_or_n("Proceeding will set offset for "
"temperature %s to %+.1f.\n" % (variable, offset) +
"Are you sure you want to proceed (y/n)? ",
noprompt)
if ans == 'y':
station.setCalibrationTemp(variable, offset)
print("Temperature offset %s now set to %+.1f." % (variable, offset))
else:
print("Nothing done.")
elif variable in humid_variables:
offset = int(offset_str)
if not 0 <= offset <= 100:
print("Humidity offset %+d is out of range." % (offset), file=sys.stderr)
else:
ans = weeutil.weeutil.y_or_n("Proceeding will set offset for "
"humidity %s to %+d.\n" % (variable, offset) +
"Are you sure you want to proceed (y/n)? ",
noprompt)
if ans == 'y':
station.setCalibrationHumid(variable, offset)
print("Humidity offset %s now set to %+d." % (variable, offset))
else:
print("Nothing done.")
else:
print("Unknown variable %s" % variable, file=sys.stderr)
@staticmethod
def set_transmitter_type(station, transmitter_list, noprompt):
"""Set the transmitter type for one of the eight channels."""
transmitter_list = list(map((lambda x: int(x) if x.isdigit() else x if x != "" else None),
transmitter_list.split(',')))
channel = transmitter_list[0]
if not 1 <= channel <= 8:
print("Channel number must be between 1 and 8.")
return
# Check new channel against retransmit channel.
# Warn and stop if new channel is used as retransmit channel.
retransmit_channel = station._getEEPROM_value(0x18)[0]
if retransmit_channel == channel:
print("This channel is used as retransmit channel. "
"Please turn off retransmit function or choose another channel.")
return
# Init repeater to 'no repeater'
repeater = 0
# Check the last entry in transmitter_list to see if it is a repeater letter
try:
if transmitter_list[len(transmitter_list)-1].isalpha():
repeater_id = transmitter_list[len(transmitter_list)-1].upper()
del transmitter_list[len(transmitter_list)-1]
# Check with repeater_dict and get the ID number
for key in list(station.repeater_dict.keys()):
if station.repeater_dict[key] == repeater_id:
repeater = key
break
if repeater == 0:
print("Repeater ID must be between 'A' and 'H'.")
return
except AttributeError:
# No repeater letter
pass
transmitter_type = transmitter_list[1]
extra_temp = transmitter_list[2] if len(transmitter_list) > 2 else None
extra_hum = transmitter_list[3] if len(transmitter_list) > 3 else None
usetx = 1 if transmitter_type != 10 else 0
try:
transmitter_type_name = station.transmitter_type_dict[transmitter_type]
except KeyError:
print("Unknown transmitter type (%s)" % transmitter_type)
return
if transmitter_type_name in ['temp', 'temp_hum'] and extra_temp not in list(range(1, 8)):
print("Transmitter type %s requires extra_temp in range 1-7'" % transmitter_type_name)
return
if transmitter_type_name in ['hum', 'temp_hum'] and extra_hum not in list(range(1, 8)):
print("Transmitter type %s requires extra_hum in range 1-7'" % transmitter_type_name)
return
msg = "Proceeding will set channel %d to type %d (%s), repeater: %s, %s.\n" \
% (channel,
transmitter_type,
transmitter_type_name,
station.repeater_dict[repeater],
station.listen_dict[usetx])
ans = weeutil.weeutil.y_or_n(msg + "Are you sure you want to proceed (y/n)? ",
noprompt)
if ans == 'y':
station.setTransmitterType(channel, transmitter_type, extra_temp, extra_hum, repeater)
print("Transmitter type for channel %d set to %d (%s), repeater: %s, %s."
% (channel,
transmitter_type,
transmitter_type_name,
station.repeater_dict[repeater],
station.listen_dict[usetx]))
else:
print("Nothing done.")
@staticmethod
def set_retransmit(station, channel_on_off, noprompt):
"""Set console retransmit channel."""
channel = 0
channel_on_off = channel_on_off.strip().upper()
channel_on_off_list = channel_on_off.split(',')
on_off = channel_on_off_list[0]
if on_off != "OFF":
if len(channel_on_off_list) > 1:
channel = map((lambda x: int(x) if x != "" else None), channel_on_off_list[1])[0]
if not 0 < channel < 9:
print("Channel out of range 1..8. Nothing done.")
return
transmitter_list = station.getStnTransmitters()
if channel:
if transmitter_list[channel-1]["listen"] == "active":
print("Channel %d in use. Please select another channel. Nothing done." % channel)
return
else:
for i in range(0, 7):
if transmitter_list[i]["listen"] == "inactive":
channel = i+1
break
if channel == 0:
print("All Channels in use. Retransmit can't be enabled. Nothing done.")
return
old_channel = station._getEEPROM_value(0x18)[0]
if old_channel == channel:
print("Old and new retransmit settings are the same. Nothing done.")
return
if channel:
msg = "Proceeding will set retransmit to 'ON' at channel: %d.\n" % channel
else:
msg = "Proceeding will set retransmit to 'OFF'\n."
ans = weeutil.weeutil.y_or_n(msg + "Are you sure you want to proceed (y/n)? ",
noprompt)
if ans == 'y':
station.setRetransmit(channel)
if channel:
print("Retransmit set to 'ON' at channel: %d." % channel)
else:
print("Retransmit set to 'OFF'.")
else:
print("Nothing done.")
@staticmethod
def set_temp_logging(station, tempLogging, noprompt):
"""Set console temperature logging to 'LAST' or 'AVERAGE'."""
msg = "Proceeding will change the console temperature logging to '%s'.\n" % tempLogging.upper()
ans = weeutil.weeutil.y_or_n(msg + "Are you sure you want to proceed (y/n)? ",
noprompt)
if ans == 'y':
station.setTempLogging(tempLogging)
print("Console temperature logging set to '%s'." % (tempLogging.upper()))
else:
print("Nothing done.")
@staticmethod
def set_time(station):
print("Setting time on console...")
station.setTime()
newtime_ts = station.getTime()
print("Current console time is %s" % weeutil.weeutil.timestamp_to_string(newtime_ts))
@staticmethod
def set_dst(station, dst):
station.setDST(dst)
print("Set DST on console to '%s'" % dst)
@staticmethod
def set_tz_code(station, tz_code):
print("Setting time zone code to %d..." % tz_code)
station.setTZcode(tz_code)
new_tz_code = station.getStnInfo()[5]
print("Set time zone code to %s" % new_tz_code)
@staticmethod
def set_tz_offset(station, tz_offset):
offset_int = int(tz_offset)
h = abs(offset_int) // 100
m = abs(offset_int) % 100
if h > 12 or m >= 60:
raise ValueError("Invalid time zone offset: %s" % tz_offset)
offset = h * 100 + (100 * m // 60)
if offset_int < 0:
offset = -offset
station.setTZoffset(offset)
new_offset = station.getStnInfo()[6]
print("Set time zone offset to %+.1f hours" % new_offset)
@staticmethod
def set_lamp(station, onoff):
print("Setting lamp on console...")
station.setLamp(onoff)
@staticmethod
def start_logger(station):
print("Starting logger ...")
station.startLogger()
print("Logger started")
@staticmethod
def stop_logger(station):
print("Stopping logger ...")
station.stopLogger()
print("Logger stopped")
@staticmethod
def dump_logger(station, config_dict, noprompt, batch_size=1):
import weewx.manager
ans = weeutil.weeutil.y_or_n("Proceeding will dump all data in the logger.\n"
"Are you sure you want to proceed (y/n)? ",
noprompt)
if ans == 'y':
with weewx.manager.open_manager_with_config(config_dict, 'wx_binding',
initialize=True) as archive:
nrecs = 0
# Determine whether to use something to show our progress:
progress_fn = print_page if batch_size == 0 else None
# Wrap the Vantage generator function in a converter, which will convert the units
# to the same units used by the database:
converted_generator = weewx.units.GenWithConvert(
station.genArchiveDump(progress_fn=progress_fn),
archive.std_unit_system)
# Wrap it again, to dump in the requested batch size
converted_generator = weeutil.weeutil.GenByBatch(converted_generator, batch_size)
print("Starting dump ...")
for record in converted_generator:
archive.addRecord(record)
nrecs += 1
print("Records processed: %d; Timestamp: %s\r"
% (nrecs, weeutil.weeutil.timestamp_to_string(record['dateTime'])),
end=' ',
file=sys.stdout)
sys.stdout.flush()
print("\nFinished dump. %d records added" % (nrecs,))
else:
print("Nothing done.")
@staticmethod
def logger_summary(station, dest_path):
with open(dest_path, mode="w") as dest:
VantageConfigurator.show_info(station, dest)
print("Starting download of logger summary...")
nrecs = 0
for (page, index, y, mo, d, h, mn, time_ts) in station.genLoggerSummary():
if time_ts:
print("%4d %4d %4d | %4d-%02d-%02d %02d:%02d | %s"
% (nrecs, page, index, y + 2000, mo, d, h, mn,
weeutil.weeutil.timestamp_to_string(time_ts)), file=dest)
else:
print("%4d %4d %4d [*** Unused index ***]"
% (nrecs, page, index), file=dest)
nrecs += 1
if nrecs % 10 == 0:
print("Records processed: %d; Timestamp: %s\r"
% (nrecs, weeutil.weeutil.timestamp_to_string(time_ts)), end=' ', file=sys.stdout)
sys.stdout.flush()
print("\nFinished download of logger summary to file '%s'. %d records processed." % (dest_path, nrecs))
# =============================================================================
# Class VantageConfEditor
# =============================================================================
class VantageConfEditor(weewx.drivers.AbstractConfEditor):
@property
def default_stanza(self):
return """
[Vantage]
# This section is for the Davis Vantage series of weather stations.
# Connection type: serial or ethernet
# serial (the classic VantagePro)
# ethernet (the WeatherLinkIP or Serial-Ethernet bridge)
type = serial
# If the connection type is serial, a port must be specified:
# Debian, Ubuntu, Redhat, Fedora, and SuSE:
# /dev/ttyUSB0 is a common USB port name
# /dev/ttyS0 is a common serial port name
# BSD:
# /dev/cuaU0 is a common serial port name
port = /dev/ttyUSB0
# If the connection type is ethernet, an IP Address/hostname is required:
host = 1.2.3.4
######################################################
# The rest of this section rarely needs any attention.
# You can safely leave it "as is."
######################################################
# Serial baud rate (usually 19200)
baudrate = 19200
# TCP port (when using the WeatherLinkIP)
tcp_port = 22222
# TCP send delay (when using the WeatherLinkIP):
tcp_send_delay = 0.5
# The type of LOOP packet to request: 1 = LOOP1; 2 = LOOP2; 3 = both
loop_request = 1
# The id of your ISS station (usually 1). If you use a wind meter connected
# to a anemometer transmitter kit, use its id
iss_id = 1
# How long to wait for a response from the station before giving up (in
# seconds; must be greater than 2)
timeout = 4
# How long to wait before trying again (in seconds)
wait_before_retry = 1.2
# How many times to try before giving up:
max_tries = 4
# Vantage model Type: 1 = Vantage Pro; 2 = Vantage Pro2
model_type = 2
# The driver to use:
driver = weewx.drivers.vantage
"""
def prompt_for_settings(self):
settings = dict()
print("Specify the hardware interface, either 'serial' or 'ethernet'.")
print("If the station is connected by serial, USB, or serial-to-USB")
print("adapter, specify serial. Specify ethernet for stations with")
print("WeatherLinkIP interface.")
settings['type'] = self._prompt('type', 'serial', ['serial', 'ethernet'])
if settings['type'] == 'serial':
print("Specify a port for stations with a serial interface, for")
print("example /dev/ttyUSB0 or /dev/ttyS0.")
settings['port'] = self._prompt('port', '/dev/ttyUSB0')
else:
print("Specify the IP address (e.g., 192.168.0.10) or hostname")
print("(e.g., console or console.example.com) for stations with")
print("an ethernet interface.")
settings['host'] = self._prompt('host')
return settings
def print_page(ipage):
print("Requesting page %d/512\r" % ipage, end=' ', file=sys.stdout)
sys.stdout.flush()
# Define a main entry point for basic testing of the station without weewx
# engine and service overhead. Invoke this as follows from the weewx root directory:
#
# PYTHONPATH=bin python -m weewx.drivers.vantage
if __name__ == '__main__':
import optparse
import weewx
import weeutil.logger
weewx.debug = 1
weeutil.logger.setup('vantage', {})
usage = """Usage: python -m weewx.drivers.vantage --help
python -m weewx.drivers.vantage --version
python -m weewx.drivers.vantage [--port=PORT]"""
parser = optparse.OptionParser(usage=usage)
parser.add_option('--version', action='store_true',
help='Display driver version')
parser.add_option('--port', default='/dev/ttyUSB0',
help='Serial port to use. Default is "/dev/ttyUSB0"',
metavar="PORT")
(options, args) = parser.parse_args()
if options.version:
print("Vantage driver version %s" % DRIVER_VERSION)
exit(0)
vantage = Vantage(connection_type = 'serial', port=options.port)
for packet in vantage.genLoopPackets():
print(packet)
|
weewx/weewx
|
bin/weewx/drivers/vantage.py
|
Python
|
gpl-3.0
| 134,839
|
#!/usr/bin/python
#
# test_run.py
#
# Author:
# keldzh <keldzh@gmail.com>
#
# Copyright (c) 2015 Anton Kovalyov
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
import unittest
from unittest.mock import MagicMock
from unittest.mock import patch
import pygroot
class text_run(unittest.TestCase):
def setUp(self):
self.e = pygroot.executor()
@patch('pygroot.print', create=True)
@patch('pygroot.lex')
def test_simple_run(self, mock, print_):
mock.get_token.side_effect = ["inc", "inc", "inc", "out", None]
self.e.run()
print_.assert_called_with(chr(3))
@patch('pygroot.print', create=True)
@patch('pygroot.lex')
def test_run_with_jump(self, mock, print_):
mock.get_token.side_effect = [
"inc", "inc", "inc", "jump", "right", "inc",
"inc", "inc", "inc", "inc", "left", "dec",
"jump_back", "right", "out", None]
self.e.run()
print_.assert_called_with(chr(15))
@patch('pygroot.print_error')
@patch('pygroot.lex')
def test_run_jump_back_error(self, mock, print_):
mock.get_token.side_effect = ["inc", "jump_back"]
self.e.run()
print_.assert_called_with(
"There is no 'I'm Groot' previously than 'We are Groot'",
True)
if __name__ == '__main__':
unittest.main()
|
fAntel/pygroot
|
tests/test_run.py
|
Python
|
gpl-3.0
| 1,823
|
#! /usr/bin/env python
"""Test script for the binhex C module
Uses the mechanism of the python binhex module
Roger E. Masse
"""
import binhex
import tempfile
from test_support import verbose
def test():
try:
fname1 = tempfile.mktemp()
fname2 = tempfile.mktemp()
f = open(fname1, 'w')
except:
raise ImportError, "Cannot test binhex without a temp file"
start = 'Jack is my hero'
f.write(start)
f.close()
binhex.binhex(fname1, fname2)
if verbose:
print 'binhex'
binhex.hexbin(fname2, fname1)
if verbose:
print 'hexbin'
f = open(fname1, 'r')
finish = f.readline()
if start <> finish:
print 'Error: binhex <> hexbin'
elif verbose:
print 'binhex == hexbin'
try:
import os
os.unlink(fname1)
os.unlink(fname2)
except:
pass
test()
|
mancoast/CPythonPyc_test
|
cpython/161_test_binhex.py
|
Python
|
gpl-3.0
| 898
|
from matplotlib_venn import venn2, venn3, venn2_unweighted, venn3_unweighted
def venn_2d(dataframe, setOne='BPT', setTwo='He_II', compareBy='AGN', unweighted=False):
setSizes = {}
#groupOne = (setOne + ':P(Mixed)')
groupOne = (setOne + ':P(' + compareBy + ')')
groupTwo = (setTwo + ':P(' + compareBy + ')')
maskBPT = df[(df[groupOne] > 0)]
maskHeII = df[(df[groupTwo] > 0)]
onlyBPT = maskBPT[(maskBPT[groupTwo] == 0)]
onlyHeII = maskHeII[(maskHeII[groupOne] == 0)]
BPTandHeII = df[(np.logical_and(
(df[groupOne] > 0), (df[groupTwo] > 0)))]
setSizes['10'] = onlyBPT.shape[0]
setSizes['11'] = BPTandHeII.shape[0]
setSizes['01'] = onlyHeII.shape[0]
if not unweighted:
v = venn2(subsets=setSizes, set_labels=(setOne, setTwo))
else:
v = venn2_unweighted(subsets=setSizes, set_labels=(setOne, setTwo))
titleString = ('Overlap of ' + compareBy +
' objects found by each method')
plt.title(titleString)
def venn_3d(dataframe, compareBy='AGN' unweighted=False):
df = dataframe
df = general.set_Pointwise_by_NeIII(df)
if compareBy == 'AGN':
op = 'Starforming'
maskBPT = df[(df['BPT:P(SF)'] > 0)]
maskHeII = df[(df['He_II:P(SF)'] > 0)]
maskNeIII = df[(df['Pointwise Ne_III Location'] == 'Starforming')]
onlyBPT = maskBPT[np.logical_and(
(maskBPT['He_II:P(SF)'] == 0), (maskBPT['Pointwise Ne_III Location'] == 'AGN'))]
onlyHeII = maskHeII[np.logical_and(
(maskHeII['BPT:P(SF)'] == 0), (maskHeII['Pointwise Ne_III Location'] == 'AGN'))]
onlyNeIII = maskNeIII[np.logical_and(
(maskNeIII['He_II:P(SF)'] == 0), (maskNeIII['BPT:P(SF)'] == 0))]
BPTandHeII = df[np.logical_and((np.logical_and(
(df['BPT:P(SF)'] > 0), (df['He_II:P(SF)'] > 0))), (df['Pointwise Ne_III Location'] == 'AGN'))]
BPTandNeIII = maskBPT[np.logical_and(
(maskBPT['He_II:P(SF)'] == 0), (maskBPT['Pointwise Ne_III Location'] == 'StarForming'))]
HeIIandNeIII = maskHeII[np.logical_and(
(maskHeII['BPT:P(SF)'] == 0), (maskHeII['Pointwise Ne_III Location'] == 'StarForming'))]
allThree = maskBPT[np.logical_and(
(maskBPT['He_II:P(SF)'] > 0), (maskBPT['Pointwise Ne_III Location'] == 'StarForming'))]
setSizes = {}
setSizes['100'] = onlyBPT.shape[0]
setSizes['110'] = BPTandHeII.shape[0]
setSizes['010'] = onlyHeII.shape[0]
setSizes['011'] = HeIIandNeIII.shape[0]
setSizes['001'] = onlyNeIII.shape[0]
setSizes['101'] = BPTandNeIII.shape[0]
setSizes['111'] = allThree.shape[0]
if not unweighted:
venn3(subsets=setSizes, set_labels=('BPT', 'HeII', 'NeIII'))
else:
venn3_unweighted(subsets=setSizes, set_labels=('BPT', 'HeII', 'NeIII'))
plt.title("Number of SF objects found by each method")
|
HowDoIUseThis/AGNClassification
|
utils/graphing/venn.py
|
Python
|
gpl-3.0
| 2,887
|
#!/usr/bin/python
'''
convert images from PGM to other formats
'''
import os, sys, glob, cv2, argparse
def parse_args():
'''parse command line arguments'''
parser = argparse.ArgumentParser("Convert pgm image to png or jpg")
parser.add_argument("directory", default=None,
help="directory containing PGM image files")
parser.add_argument("--output-directory", default=None,
help="directory to use for converted files")
parser.add_argument("--format", default='png', choices=['png', 'jpg'], help="type of file to convert to (png or jpg)")
return parser.parse_args()
def process(args):
'''process a set of files'''
files = []
if os.path.isdir(args.directory):
files.extend(glob.glob(os.path.join(args.directory, '*.pgm')))
else:
if args.directory.find('*') != -1:
files.extend(glob.glob(args.directory))
else:
files.append(args.directory)
files.sort()
for f in files:
im_orig = cv2.imread(f,-1)
im_colour = cv2.cvtColor(im_orig, cv2.COLOR_BAYER_GR2BGR)
if not args.output_directory:
outdir = os.path.dirname(f)
else:
outdir = args.output_directory
basename = os.path.basename(f)[:-4]
new_name = os.path.join(outdir, basename + '.' + args.format)
print("Creating %s" % new_name)
cv2.imwrite(new_name, im_colour)
if __name__ == '__main__':
args = parse_args()
# main program
process(args)
|
CanberraUAV/cuav
|
capturescripts/chameleon/pgm_convert.py
|
Python
|
gpl-3.0
| 1,457
|
import xml.etree.ElementTree as ET
from sets import Set
from com.uva.preprocess.dataset import DataSet
from com.uva.data import Data
class NetScience(DataSet):
""" Process netscience data set """
def __init__(self):
pass
def _process(self):
max_n = 500
"""
The netscience data is stored in xml format. The function just reads all the vertices
and edges.
* if vertices are not record as the format of 0,1,2,3....., we need to do some
process. Fortunally, there is no such issue with netscience data set.
"""
# V stores the mapping between node ID and attribute. i.e title, name. etc
# i.e {0: "WU, C", 1 :CHUA, L"}
V = {}
# file path of netscience data set.
tree = ET.parse("/home/liwenzhe/workspace/SGRLDForMMSB/datasets/netscience.xml")
for node in tree.iter("node"):
attrs = node.attrib
V[attrs['id']] = attrs['title']
N = min(len(V), max_n)
# iterate every link in the graph, and store those links into Set<Edge> object.
E = Set()
for link in tree.iter("link"):
attrs = link.attrib
if int(attrs['target']) >= N or int(attrs['source'])>=N:
continue
E.add((int(attrs['target']), int(attrs['source'])))
return Data(V, E, N)
# file path of netscience data set.
tree = ET.parse("/home/liwenzhe/workspace/SGRLDForMMSB/datasets/netscience.xml")
f = open('netscience.txt', 'wb')
# iterate every link in the graph, and store those links into Set<Edge> object.
for link in tree.iter("link"):
attrs = link.attrib
f.write(attrs['target']+"\t"+attrs['source']+"\n")
f.close()
|
wenzheli/python_new
|
com/uva/preprocess/netscience.py
|
Python
|
gpl-3.0
| 1,859
|
import numpy as np
from .abstract_model import Model
class Graph(Model):
def __init__(self, input_shape, nodes, connectivity):
super().__init__(input_shape)
self.nodes = nodes # some iterable containing Model instances (Layerstacks and/or Graphs)
self.conn = connectivity # 0/1 matrix defining connectivity vs timestep
def feedforward(self, X):
for mask in self.conn:
X = np.concatenate(
[node.feedforward(X) for ix, node in enumerate(self.nodes) if ix in mask]
)
return X
@property
def outshape(self):
return self.nodes[-1].outshape
@property
def nparams(self):
return sum(node.num_params for node in self.nodes)
def get_weights(self, unfold=True):
return []
def set_weights(self, fold=True):
pass
|
csxeba/brainforge
|
brainforge/model/graph.py
|
Python
|
gpl-3.0
| 854
|
#!/usr/bin/env python
""" __init__.py
retrieves WikiPedia articles """
# (C)opyrights 2008 by Albert Weichselbraun <albert@weichselbraun.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from future import standard_library
standard_library.install_aliases()
from builtins import object
__version__ = "$Header$"
from urllib.parse import quote
from urllib.error import HTTPError
from eWRT.access.http import Retrieve
WIKIPEDIA_SEARCH_QUERY = 'http://%s.wikipedia.org/wiki/%s'
class WikiPedia(object):
""" returns a wikipedia article """
def __init__(self):
self.r = Retrieve( WikiPedia.__name__ )
def getDescriptor(self, synonym, lang='en'):
""" returns the descriptor for the given synonym in the diven language """
assert( len(lang)==2 )
try:
result = self.getWikipediaSearchResults(synonym, lang)
return result[0]
except (HTTPError, IndexError):
return None
def getWikipediaSearchResults(self, term, lang):
""" returns a list of wikipedia search results for the given term
or None if nothing was found
"""
search_query = WIKIPEDIA_SEARCH_QUERY % (lang, quote(term) )
f=self.r.open(search_query)
results = WikiPedia._parse_wikipedia_search_results( f.read() )
f.close()
return results
@staticmethod
def _parse_wikipedia_search_results( text ):
result = []
for line in text.split("\n"):
# only consider lines containing search results
if not "class='searchresult'" in line: continue
(prefix, tmp) = line.split("title=\"", 1)
(descriptor, suffix ) = tmp.split("\"", 1)
result.append(descriptor)
return result
|
weblyzard/ewrt
|
src/eWRT/ws/wikipedia/descriptor.py
|
Python
|
gpl-3.0
| 2,380
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
###############################################################
# CLAM: Computational Linguistics Application Mediator
# -- Service Configuration File (Template) --
# by Maarten van Gompel (proycon)
# Centre for Language and Speech Technology / Language Machines
# Radboud University Nijmegen
#
# https://proycon.github.io/clam
#
# Licensed under GPLv3
#
###############################################################
#Consult the CLAM manual for extensive documentation
from clam.common.parameters import *
from clam.common.formats import *
from clam.common.converters import *
from clam.common.viewers import *
from clam.common.data import *
from clam.common.digestauth import pwhash
import clam
import sys
import os
REQUIRE_VERSION = 3.0
CLAMDIR = clam.__path__[0] #directory where CLAM is installed, detected automatically
WEBSERVICEDIR = os.path.dirname(os.path.abspath(__file__)) #directory where this webservice is installed, detected automatically
# ======== GENERAL INFORMATION ===========
# General information concerning your system.
#The System ID, a short alphanumeric identifier for internal use only (mandatory!)
SYSTEM_ID = ""
#System name, the way the system is presented to the world
SYSTEM_NAME = ""
#An informative description for this system (this should be fairly short, about one paragraph, and may not contain HTML)
SYSTEM_DESCRIPTION = "Enter a nice description for your system"
#A version label of the underlying tool and/or this CLAM wrapper
#(If you can derive this dynamically then that is strongly recommended!)
#SYSTEM_VERSION = 0.1
#The author(s) of the underlying tool and/or this CLAM wrapper
#(If you can derive this dynamically then that is strongly recommended!)
#SYSTEM_AUTHOR = ""
#How to reach the authors?
#SYSTEM_EMAIL = ""
#Does this system have a homepage (or possibly a source repository otherwise)
#SYSTEM_URL = ""
#Is this webservice embedded in a larger system? Like part of an institution or particular portal site. If so, mention the URL here.
#SYSTEM_PARENT_URL = ""
#The URL of a cover image to prominently display in the header of the interface. You may also want to set INTERFACEOPTIONS="centercover" to center it horizontally.
#SYSTEM_COVER_URL = ""
#URL to a website where users can register an account for use with this webservice. This link is only for human end
#users, not an API endpoint.
#SYSTEM_REGISTER_URL = ""
# ======== LOCATION ===========
#Either add a section for your host here, or
#specify these variables in an external yaml file
#called $hostname.yaml or config.yaml and use the loadconfig() mechanism.
#Such an external file will be looked for my default and is the recommended way.
host = os.uname()[1]
if host == "yourhostname":
#The root directory for CLAM, all project files, (input & output) and
#pre-installed corpora will be stored here. Set to an absolute path:
ROOT = "/tmp/clam.projects/"
#The URL of the system (If you start clam with the built-in webserver, you can override this with -P)
PORT= 8080
#The hostname of the system. Will be automatically determined if not set. (If you start clam with the built-in webserver, you can override this with -H)
#Users *must* make use of this hostname and no other (even if it points to the same IP) for the web application to work.
HOST = 'yourhostname'
#If the webservice runs in another webserver (e.g. apache, nginx, lighttpd), and it
#doesn't run at the root of the server, you can specify a URL prefix here:
#URLPREFIX = "/myservice/"
#If you run behind a reverse proxy, you can autodetect your host if you run
#if your reverse proxy properly sets the X-Forwarded-Host and X-Forwarded-Proto headers.
#Setting this when you are NOT behind a reverse proxy that output these headers, is a security risk:
#USE_FORWARDED_HOST = False
#Alternatively to the above, you can force the full URL CLAM has to use, rather than rely on any autodetected measures:
#FORCEURL = "http://yourhostname.com"
# ======== AUTHENTICATION & SECURITY ===========
#Users and passwords
#set security realm, a required component for hashing passwords (will default to SYSTEM_ID if not set)
#REALM = SYSTEM_ID
USERS = None #no user authentication/security (this is not recommended for production environments!)
#If you want to enable user-based security, you can define a dictionary
#of users and (hashed) passwords here. The actual authentication will proceed
#as HTTP Digest Authentication. Although being a convenient shortcut,
#using pwhash and plaintext password in this code is not secure!!
#USERS = { user1': '4f8dh8337e2a5a83734b','user2': pwhash('username', REALM, 'secret') }
ADMINS = None #List of usernames that are administrator and can access the administrative web-interface (on URL /admin/)
else:
#This invokes the automatic loader, do not change it;
#it will try to find a file named $system_id.$hostname.yml or just $hostname.yml, where $hostname
#is the auto-detected hostname of this system. Alternatively, it tries a static $system_id.config.yml or just config.yml .
#You can also set an environment variable CONFIGFILE to specify the exact file to load at run-time.
#It will look in several paths including the current working directory and the path this settings script is loaded from.
#Such an external configuration file simply defines variables that will be imported here. If it fails to find
#a configuration file, an exception will be raised.
loadconfig(__name__)
#Amount of free memory required prior to starting a new process (in MB!), Free Memory + Cached (without swap!). Set to 0 to disable this check (not recommended)
REQUIREMEMORY = 10
#Maximum load average at which processes are still started (first number reported by 'uptime'). Set to 0 to disable this check (not recommended)
#MAXLOADAVG = 4.0
#Minimum amount of free diskspace in MB. Set to 0 to disable this check (not recommended)
#DISK = '/dev/sda1' #set this to the disk where ROOT is on
#MINDISKSPACE = 10
#The amount of diskspace a user may use (in MB), this is a soft quota which can be exceeded, but creation of new projects is blocked until usage drops below the quota again
#USERQUOTA = 100
#The secret key is used internally for cryptographically signing session data, in production environments, you'll want to set this to a persistent value. If not set it will be randomly generated.
#SECRET_KEY = 'mysecret'
#Allow Asynchronous HTTP requests from **web browsers** in following domains (sets Access-Control-Allow-Origin HTTP headers), by default this is unrestricted
#ALLOW_ORIGIN = "*"
# ======== WEB-APPLICATION STYLING =============
#Choose a style (has to be defined as a CSS file in clam/style/ ). You can copy, rename and adapt it to make your own style
STYLE = 'classic'
# ======== ENABLED FORMATS ===========
#In CUSTOM_FORMATS you can specify a list of Python classes corresponding to extra formats.
#You can define the classes first, and then put them in CUSTOM_FORMATS, as shown in this example:
#class MyXMLFormat(CLAMMetaData):
# attributes = {}
# name = "My XML format"
# mimetype = 'text/xml'
# CUSTOM_FORMATS = [ MyXMLFormat ]
# ======== ENABLED VIEWERS ===========
#In CUSTOM_VIEWERS you can specify a list of Python classes corresponding to extra viewers.
#You can define the classes first, and then put them in CUSTOM_VIEWERS, as shown in this example:
# CUSTOM_VIEWERS = [ MyXMLViewer ]
# ======= INTERFACE OPTIONS ===========
#Here you can specify additional interface options (space separated list), see the documentation for all allowed options
#INTERFACEOPTIONS = "inputfromweb" #allow CLAM to download its input from a user-specified url
# ======== PROJECTS: PREINSTALLED DATA ===========
#INPUTSOURCES = [
# InputSource(id='sampledocs',label='Sample texts',path=ROOT+'/inputsources/sampledata',defaultmetadata=PlainTextFormat(None, encoding='utf-8') ),
#]
# ======== PROJECTS: PROFILE DEFINITIONS ===========
#Define your profiles here. This is required for the project paradigm, but can be set to an empty list if you only use the action paradigm.
PROFILES = [
Profile(
InputTemplate('replace-with-a-unique-identifier', PlainTextFormat,"Replace with human label for this input template",
StaticParameter(id='encoding',name='Encoding',description='The character encoding of the file', value='utf-8'), #note that encoding is required if you work with PlainTextFormat
#ChoiceParameter(id='language',name='Language',description='The language the text is in', choices=[('en','English'),('nl','Dutch'),('fr','French')]),
#StringParameter(id='author',name='Author',description="The author's name", maxlength=100),
#InputSource(id='sampledoc', label="Sample Document", path=ROOT+'/inputsources/sampledoc.txt', metadata=PlainTextFormat(None, encoding='utf-8',language='en')),
#CharEncodingConverter(id='latin1',label='Convert from Latin-1',charset='iso-8859-1'),
#PDFtoTextConverter(id='pdfconv',label='Convert from PDF Document'),
#MSWordConverter(id='docconv',label='Convert from MS Word Document'),
#RequireMeta(somefield="somevalue") #constraint implementation (only works if the format implements a validator)
#ForbidMeta(somefield="somevalue") #constraint implementation (only works if the format implements a validator)
extension='.txt',
#filename='filename.txt',
unique=True #set unique=True if the user may only upload a file for this input template once. Set multi=True if you the user may upload multiple of such files
),
#------------------------------------------------------------------------------------------------------------------------
OutputTemplate('replace-with-a-unique-identifier',PlainTextFormat,'Replace with human label for this output template',
SetMetaField('encoding','ascii'), #note that encoding is required if you work with PlainTextFormat
removeextensions=[".txt"], #remove these extensions from the associated input prior to appending the output extension
extension='.stats', #set an output extension or set a filename:
#filename='filename.stats',
unique=True,
#If you want to associate any viewers with your output, then this is the place to do so!
),
)
]
# ======== PROJECTS: COMMAND ===========
#The system command for the project paradigm.
#It is recommended you set this to small wrapper
#script around your actual system. Full shell syntax is supported. Using
#absolute paths is preferred. The current working directory will be
#set to the project directory.
#
#You can make use of the following special variables,
#which will be automatically set by CLAM:
# $INPUTDIRECTORY - The directory where input files are uploaded.
# $OUTPUTDIRECTORY - The directory where the system should output
# its output files.
# $TMPDIRECTORY - The directory where the system should output
# its temporary files.
# $STATUSFILE - Filename of the .status file where the system
# should output status messages.
# $DATAFILE - Filename of the clam.xml file describing the
# system and chosen configuration.
# $USERNAME - The username of the currently logged in user
# (set to "anonymous" if there is none)
# $PARAMETERS - List of chosen parameters, using the specified flags
#
COMMAND = WEBSERVICEDIR + "/your-wrapper-script.py $DATAFILE $STATUSFILE $OUTPUTDIRECTORY"
#Or for the shell variant:
#COMMAND = WEBSERVICEDIR + "/your-wrapper-script.sh $STATUSFILE $INPUTDIRECTORY $OUTPUTDIRECTORY $PARAMETERS"
#Or if you only use the action paradigm, set COMMAND = None
# ======== PARAMETER DEFINITIONS ===========
#The global parameters (for the project paradigm) are subdivided into several
#groups. In the form of a list of (groupname, parameters) tuples. The parameters
#are a list of instances from common/parameters.py
PARAMETERS = [
('Group title', [ #change or comment this
#BooleanParameter(id='createlexicon',name='Create Lexicon',description='Generate a separate overall lexicon?'),
#ChoiceParameter(id='casesensitive',name='Case Sensitivity',description='Enable case sensitive behaviour?', choices=['yes','no'],default='no'),
#StringParameter(id='author',name='Author',description='Sign output metadata with the specified author name',maxlength=255),
] )
]
# ======= ACTIONS =============
#The action paradigm is an independent Remote-Procedure-Call mechanism that
#allows you to tie scripts (command=) or Python functions (function=) to URLs.
#It has no notion of projects or files and must respond in real-time. The syntax
#for commands is equal to those of COMMAND above, any file or project specific
#variables are not available though, so there is no $DATAFILE, $STATUSFILE, $INPUTDIRECTORY, $OUTPUTDIRECTORY or $PROJECT.
ACTIONS = [
#Action(id='multiply',name='Multiply',parameters=[
# IntegerParameter(id='x',name='Value'),
# IntegerParameter(id='y',name='Multiplier'),
# ],
# command=sys.path[0] + "/actions/multiply.sh $PARAMETERS"
# tmpdir=False, #if your command writes intermediate files, you need to set this to True i
#(or to a specific directory), so temporary files can be written.
#You can pass the actual directory in the command above by adding the parameter $TMPDIRECTORY .
# allowanonymous=False,
#),
#Action(id='multiply',name='Multiply',parameters=[
# IntegerParameter(id='x',name='Value'),
# IntegerParameter(id='y',name='Multiplier')
# ],
# function=lambda x,y: x*y
# allowanonymous=False,
#),
#Action(id="tabler",
# name="Tabler",
# allowanonymous=True, #allow unauthenticated access to this action
# description="Puts a comma separated list in a table (viewer example)",
# function=lambda x: x, #as you see, this function doesn't really do anything, we just demonstrate the viewer
# parameters=[
# TextParameter(id="text", name="Text", required=True),
# ],
# viewer=SimpleTableViewer(id="simpletableviewer",delimiter=",")
# )
]
# ======= FORWARDERS =============
#Global forwarders call a remote service, passing a backlink for the remote service to download an archive of ALL the output data. The remote service is expected to return a redirect (HTTP 302) . CLAM will insert the backlink where you put $BACKLINK in the url:
#FORWARDERS = [
#Forwarder(id='otherservice', name="Other service", description="", url="https://my.service.com/grabfrom=$BACKLINK")
#]
# ======== DISPATCHING (ADVANCED! YOU CAN SAFELY SKIP THIS!) ========
#The dispatcher to use (defaults to clamdispatcher.py), you almost never want to change this
#DISPATCHER = 'clamdispatcher.py'
#DISPATCHER_POLLINTERVAL = 30 #interval at which the dispatcher polls for resource consumption (default: 30 secs)
#DISPATCHER_MAXRESMEM = 0 #maximum consumption of resident memory (in megabytes), processes that exceed this will be automatically aborted. (0 = unlimited, default)
#DISPATCHER_MAXTIME = 0 #maximum number of seconds a process may run, it will be aborted if this duration is exceeded. (0=unlimited, default)
#DISPATCHER_PYTHONPATH = [] #list of extra directories to add to the python path prior to launch of dispatcher
#Run background process on a remote host? Then set the following (leave the lambda in):
#REMOTEHOST = lambda: return 'some.remote.host'
#REMOTEUSER = 'username'
#For this to work, the user under which CLAM runs must have (passwordless) ssh access (use ssh keys) to the remote host using the specified username (ssh REMOTEUSER@REMOTEHOST)
#Moreover, both systems must have access to the same filesystem (ROOT) under the same mountpoint.
|
proycon/clam
|
clam/config/template.py
|
Python
|
gpl-3.0
| 16,253
|
#########################################################################
#
# Astronomy Club Membership
# file: membership/membership_log.py
#
# Copyright (C) 2017 Teruo Utsumi, San Jose Astronomical Association
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# Contributors:
# 2017-06-01 Teruo Utsumi, initial code
#
#########################################################################
import pdb
import logging
import logging.handlers
FILENAME_LOG = 'membership.log'
current_user = 'sam'
class ContextFilter(logging.Filter):
"""
this is a filter which injects contextual information into the log.
"""
def filter(self, record):
record.user = current_user
return True
def setup_log():
handler = logging.handlers.RotatingFileHandler(FILENAME_LOG, maxBytes=2**20, backupCount=5)
formatter = logging.Formatter('%(levelname)-7s %(asctime)s %(message)s', '%Y-%m-%d %H:%M:%S')
handler.setFormatter(formatter)
membership_log = logging.getLogger('Membership logger')
membership_log.setLevel(logging.INFO)
logging.basicConfig(level=logging.INFO)
membership_log.addHandler(handler)
return membership_log
membership_log = setup_log()
# in files
# from membership_log import *
|
sjaa/scheduler
|
membership/membership_log.py
|
Python
|
gpl-3.0
| 1,723
|
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"label": _("Student"),
"items": [
{
"type": "doctype",
"name": "Student"
},
{
"type": "doctype",
"name": "Guardian"
},
{
"type": "doctype",
"name": "Student Log"
},
{
"type": "doctype",
"name": "Student Batch"
},
{
"type": "doctype",
"name": "Student Group"
},
{
"type": "doctype",
"name": "Student Group Creation Tool"
},
{
"type": "report",
"is_query_report": True,
"name": "Student and Guardian Contact Details",
"doctype": "Program Enrollment"
}
]
},
{
"label": _("Admission"),
"items": [
{
"type": "doctype",
"name": "Student Applicant"
},
{
"type": "doctype",
"name": "Student Admission"
},
{
"type": "doctype",
"name": "Program Enrollment"
},
{
"type": "doctype",
"name": "Program Enrollment Tool"
},
{
"type": "doctype",
"name": "Student Batch Creation Tool"
}
]
},
{
"label": _("Attendance"),
"items": [
{
"type": "doctype",
"name": "Student Attendance"
},
{
"type": "doctype",
"name": "Student Leave Application"
},
{
"type": "doctype",
"name": "Student Attendance Tool"
},
{
"type": "report",
"is_query_report": True,
"name": "Absent Student Report",
"doctype": "Student Attendance"
},
{
"type": "report",
"is_query_report": True,
"name": "Student Batch-Wise Attendance",
"doctype": "Student Attendance"
},
{
"type": "report",
"is_query_report": True,
"name": "Student Monthly Attendance Sheet",
"doctype": "Student Attendance"
}
]
},
{
"label": _("Schedule"),
"items": [
{
"type": "doctype",
"name": "Course Schedule",
"route": "Calendar/Course Schedule"
},
{
"type": "doctype",
"name": "Course Scheduling Tool"
}
]
},
{
"label": _("Assessment"),
"items": [
{
"type": "doctype",
"name": "Assessment Plan"
},
{
"type": "doctype",
"name": "Assessment Group",
"link": "Tree/Assessment Group",
},
{
"type": "doctype",
"name": "Assessment Result"
},
{
"type": "doctype",
"name": "Grading Scale"
},
{
"type": "doctype",
"name": "Assessment Criteria"
},
{
"type": "doctype",
"name": "Assessment Criteria Group"
},
{
"type": "doctype",
"name": "Assessment Result Tool"
}
]
},
{
"label": _("Fees"),
"items": [
{
"type": "doctype",
"name": "Fees"
},
{
"type": "doctype",
"name": "Fee Structure"
},
{
"type": "doctype",
"name": "Fee Category"
},
{
"type": "report",
"name": "Student Fee Collection",
"doctype": "Fees",
"is_query_report": True
}
]
},
{
"label": _("Setup"),
"items": [
{
"type": "doctype",
"name": "Course"
},
{
"type": "doctype",
"name": "Program"
},
{
"type": "doctype",
"name": "Instructor"
},
{
"type": "doctype",
"name": "Room"
},
{
"type": "doctype",
"name": "Student Category"
},
{
"type": "doctype",
"name": "Student Batch Name"
},
{
"type": "doctype",
"name": "Academic Term"
},
{
"type": "doctype",
"name": "Academic Year"
}
]
},
]
|
RandyLowery/erpnext
|
erpnext/config/schools.py
|
Python
|
gpl-3.0
| 3,604
|
# -*- coding: utf-8 -*-
# Copyright (C) 2006-2012 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
from .namespaces import *
import re
import types
pattern_color = re.compile(r'#[0-9a-fA-F]{6}')
pattern_vector3D = re.compile(
r'\([ ]*-?([0-9]+(\.[0-9]*)?|\.[0-9]+)([ ]+-?([0-9]+(\.[0-9]*)?|\.[0-9]+)){2}[ ]*\)')
def make_NCName(arg):
for c in (':', ' '):
arg = arg.replace(c, "_%x_" % ord(c))
return arg
def cnv_anyURI(attribute, arg, element):
return unicode(arg)
def cnv_boolean(attribute, arg, element):
""" XML Schema Part 2: Datatypes Second Edition
An instance of a datatype that is defined as boolean can have the
following legal literals {true, false, 1, 0}
"""
if str(arg).lower() in ("0", "false", "no"):
return "false"
if str(arg).lower() in ("1", "true", "yes"):
return "true"
raise ValueError(
"'%s' not allowed as Boolean value for %s" %
(str(arg), attribute))
# Potentially accept color values
def cnv_color(attribute, arg, element):
""" A RGB color in conformance with §5.9.11 of [XSL], that is a RGB color in notation “#rrggbb”, where
rr, gg and bb are 8-bit hexadecimal digits.
"""
return str(arg)
def cnv_configtype(attribute, arg, element):
if str(arg) not in ("boolean", "short", "int", "long",
"double", "string", "datetime", "base64Binary"):
raise ValueError("'%s' not allowed" % str(arg))
return str(arg)
def cnv_data_source_has_labels(attribute, arg, element):
if str(arg) not in ("none", "row", "column", "both"):
raise ValueError("'%s' not allowed" % str(arg))
return str(arg)
# Understand different date formats
def cnv_date(attribute, arg, element):
""" A dateOrDateTime value is either an [xmlschema-2] date value or an [xmlschema-2] dateTime
value.
"""
return str(arg)
def cnv_dateTime(attribute, arg, element):
""" A dateOrDateTime value is either an [xmlschema-2] date value or an [xmlschema-2] dateTime
value.
"""
return str(arg)
def cnv_double(attribute, arg, element):
return str(arg)
def cnv_duration(attribute, arg, element):
return str(arg)
def cnv_family(attribute, arg, element):
""" A style family """
if str(arg) not in (
"text",
"paragraph",
"section",
"ruby",
"table",
"table-column",
"table-row",
"table-cell",
"graphic",
"presentation",
"drawing-page",
"chart"):
raise ValueError("'%s' not allowed" % str(arg))
return str(arg)
def __save_prefix(attribute, arg, element):
prefix = arg.split(':', 1)[0]
if prefix == arg:
return unicode(arg)
namespace = element.get_knownns(prefix)
if namespace is None:
#raise ValueError, "'%s' is an unknown prefix" % str(prefix)
return unicode(arg)
p = element.get_nsprefix(namespace)
return unicode(arg)
def cnv_formula(attribute, arg, element):
""" A string containing a formula. Formulas do not have a predefined syntax, but the string should
begin with a namespace prefix, followed by a “:” (COLON, U+003A) separator, followed by the text
of the formula. The namespace bound to the prefix determines the syntax and semantics of the
formula.
"""
return __save_prefix(attribute, arg, element)
def cnv_ID(attribute, arg, element):
return str(arg)
def cnv_IDREF(attribute, arg, element):
return str(arg)
def cnv_integer(attribute, arg, element):
return str(arg)
def cnv_legend_position(attribute, arg, element):
if str(arg) not in (
"start",
"end",
"top",
"bottom",
"top-start",
"bottom-start",
"top-end",
"bottom-end"):
raise ValueError("'%s' not allowed" % str(arg))
return str(arg)
pattern_length = re.compile(
r'-?([0-9]+(\.[0-9]*)?|\.[0-9]+)((cm)|(mm)|(in)|(pt)|(pc)|(px))')
def cnv_length(attribute, arg, element):
""" A (positive or negative) physical length, consisting of magnitude and unit, in conformance with the
Units of Measure defined in §5.9.13 of [XSL].
"""
global pattern_length
if not pattern_length.match(arg):
raise ValueError("'%s' is not a valid length" % arg)
return arg
def cnv_lengthorpercent(attribute, arg, element):
failed = False
try:
return cnv_length(attribute, arg, element)
except:
failed = True
try:
return cnv_percent(attribute, arg, element)
except:
failed = True
if failed:
raise ValueError("'%s' is not a valid length or percent" % arg)
return arg
def cnv_metavaluetype(attribute, arg, element):
if str(arg) not in ("float", "date", "time", "boolean", "string"):
raise ValueError("'%s' not allowed" % str(arg))
return str(arg)
def cnv_major_minor(attribute, arg, element):
if arg not in ('major', 'minor'):
raise ValueError("'%s' is not either 'minor' or 'major'" % arg)
pattern_namespacedToken = re.compile(r'[0-9a-zA-Z_]+:[0-9a-zA-Z._\-]+')
def cnv_namespacedToken(attribute, arg, element):
global pattern_namespacedToken
if not pattern_namespacedToken.match(arg):
raise ValueError("'%s' is not a valid namespaced token" % arg)
return __save_prefix(attribute, arg, element)
def cnv_NCName(attribute, arg, element):
""" NCName is defined in http://www.w3.org/TR/REC-xml-names/#NT-NCName
Essentially an XML name minus ':'
"""
if type(arg) in types.StringTypes:
return make_NCName(arg)
else:
return arg.getAttrNS(STYLENS, 'name')
# This function takes either an instance of a style (preferred)
# or a text string naming the style. If it is a text string, then it must
# already have been converted to an NCName
# The text-string argument is mainly for when we build a structure from XML
def cnv_StyleNameRef(attribute, arg, element):
try:
return arg.getAttrNS(STYLENS, 'name')
except:
return arg
# This function takes either an instance of a style (preferred)
# or a text string naming the style. If it is a text string, then it must
# already have been converted to an NCName
# The text-string argument is mainly for when we build a structure from XML
def cnv_DrawNameRef(attribute, arg, element):
try:
return arg.getAttrNS(DRAWNS, 'name')
except:
return arg
# Must accept list of Style objects
def cnv_NCNames(attribute, arg, element):
return ' '.join(arg)
def cnv_nonNegativeInteger(attribute, arg, element):
return str(arg)
pattern_percent = re.compile(r'-?([0-9]+(\.[0-9]*)?|\.[0-9]+)%')
def cnv_percent(attribute, arg, element):
global pattern_percent
if not pattern_percent.match(arg):
raise ValueError("'%s' is not a valid length" % arg)
return arg
# Real one doesn't allow floating point values
pattern_points = re.compile(r'-?[0-9]+,-?[0-9]+([ ]+-?[0-9]+,-?[0-9]+)*')
#pattern_points = re.compile(r'-?[0-9.]+,-?[0-9.]+([ ]+-?[0-9.]+,-?[0-9.]+)*')
def cnv_points(attribute, arg, element):
global pattern_points
if type(arg) in types.StringTypes:
if not pattern_points.match(arg):
raise ValueError(
"x,y are separated by a comma and the points are separated by white spaces")
return arg
else:
try:
strarg = ' '.join(["%d,%d" % p for p in arg])
except:
raise ValueError(
"Points must be string or [(0,0),(1,1)] - not %s" %
arg)
return strarg
def cnv_positiveInteger(attribute, arg, element):
return str(arg)
def cnv_string(attribute, arg, element):
return unicode(arg)
def cnv_textnoteclass(attribute, arg, element):
if str(arg) not in ("footnote", "endnote"):
raise ValueError("'%s' not allowed" % str(arg))
return str(arg)
# Understand different time formats
def cnv_time(attribute, arg, element):
return str(arg)
def cnv_token(attribute, arg, element):
return str(arg)
pattern_viewbox = re.compile(r'-?[0-9]+([ ]+-?[0-9]+){3}$')
def cnv_viewbox(attribute, arg, element):
global pattern_viewbox
if not pattern_viewbox.match(arg):
raise ValueError(
"viewBox must be four integers separated by whitespaces")
return arg
def cnv_xlinkshow(attribute, arg, element):
if str(arg) not in ("new", "replace", "embed"):
raise ValueError("'%s' not allowed" % str(arg))
return str(arg)
attrconverters = {
((ANIMNS, u'audio-level'), None): cnv_double,
((ANIMNS, u'color-interpolation'), None): cnv_string,
((ANIMNS, u'color-interpolation-direction'), None): cnv_string,
((ANIMNS, u'command'), None): cnv_string,
((ANIMNS, u'formula'), None): cnv_string,
((ANIMNS, u'id'), None): cnv_ID,
((ANIMNS, u'iterate-interval'), None): cnv_duration,
((ANIMNS, u'iterate-type'), None): cnv_string,
((ANIMNS, u'name'), None): cnv_string,
((ANIMNS, u'sub-item'), None): cnv_string,
((ANIMNS, u'value'), None): cnv_string,
# ((DBNS,u'type'), None): cnv_namespacedToken,
((CHARTNS, u'attached-axis'), None): cnv_string,
((CHARTNS, u'class'), (CHARTNS, u'grid')): cnv_major_minor,
((CHARTNS, u'class'), None): cnv_namespacedToken,
((CHARTNS, u'column-mapping'), None): cnv_string,
((CHARTNS, u'connect-bars'), None): cnv_boolean,
((CHARTNS, u'data-label-number'), None): cnv_string,
((CHARTNS, u'data-label-symbol'), None): cnv_boolean,
((CHARTNS, u'data-label-text'), None): cnv_boolean,
((CHARTNS, u'data-source-has-labels'), None): cnv_data_source_has_labels,
((CHARTNS, u'deep'), None): cnv_boolean,
((CHARTNS, u'dimension'), None): cnv_string,
((CHARTNS, u'display-label'), None): cnv_boolean,
((CHARTNS, u'error-category'), None): cnv_string,
((CHARTNS, u'error-lower-indicator'), None): cnv_boolean,
((CHARTNS, u'error-lower-limit'), None): cnv_string,
((CHARTNS, u'error-margin'), None): cnv_string,
((CHARTNS, u'error-percentage'), None): cnv_string,
((CHARTNS, u'error-upper-indicator'), None): cnv_boolean,
((CHARTNS, u'error-upper-limit'), None): cnv_string,
((CHARTNS, u'gap-width'), None): cnv_string,
((CHARTNS, u'interpolation'), None): cnv_string,
((CHARTNS, u'interval-major'), None): cnv_string,
((CHARTNS, u'interval-minor-divisor'), None): cnv_string,
((CHARTNS, u'japanese-candle-stick'), None): cnv_boolean,
((CHARTNS, u'label-arrangement'), None): cnv_string,
((CHARTNS, u'label-cell-address'), None): cnv_string,
((CHARTNS, u'legend-align'), None): cnv_string,
((CHARTNS, u'legend-position'), None): cnv_legend_position,
((CHARTNS, u'lines'), None): cnv_boolean,
((CHARTNS, u'link-data-style-to-source'), None): cnv_boolean,
((CHARTNS, u'logarithmic'), None): cnv_boolean,
((CHARTNS, u'maximum'), None): cnv_string,
((CHARTNS, u'mean-value'), None): cnv_boolean,
((CHARTNS, u'minimum'), None): cnv_string,
((CHARTNS, u'name'), None): cnv_string,
((CHARTNS, u'origin'), None): cnv_string,
((CHARTNS, u'overlap'), None): cnv_string,
((CHARTNS, u'percentage'), None): cnv_boolean,
((CHARTNS, u'pie-offset'), None): cnv_string,
((CHARTNS, u'regression-type'), None): cnv_string,
((CHARTNS, u'repeated'), None): cnv_nonNegativeInteger,
((CHARTNS, u'row-mapping'), None): cnv_string,
((CHARTNS, u'scale-text'), None): cnv_boolean,
((CHARTNS, u'series-source'), None): cnv_string,
((CHARTNS, u'solid-type'), None): cnv_string,
((CHARTNS, u'spline-order'), None): cnv_string,
((CHARTNS, u'spline-resolution'), None): cnv_string,
((CHARTNS, u'stacked'), None): cnv_boolean,
((CHARTNS, u'style-name'), None): cnv_StyleNameRef,
((CHARTNS, u'symbol-height'), None): cnv_string,
((CHARTNS, u'symbol-name'), None): cnv_string,
((CHARTNS, u'symbol-type'), None): cnv_string,
((CHARTNS, u'symbol-width'), None): cnv_string,
((CHARTNS, u'text-overlap'), None): cnv_boolean,
((CHARTNS, u'three-dimensional'), None): cnv_boolean,
((CHARTNS, u'tick-marks-major-inner'), None): cnv_boolean,
((CHARTNS, u'tick-marks-major-outer'), None): cnv_boolean,
((CHARTNS, u'tick-marks-minor-inner'), None): cnv_boolean,
((CHARTNS, u'tick-marks-minor-outer'), None): cnv_boolean,
((CHARTNS, u'values-cell-range-address'), None): cnv_string,
((CHARTNS, u'vertical'), None): cnv_boolean,
((CHARTNS, u'visible'), None): cnv_boolean,
((CONFIGNS, u'name'), None): cnv_formula,
((CONFIGNS, u'type'), None): cnv_configtype,
((DR3DNS, u'ambient-color'), None): cnv_string,
((DR3DNS, u'back-scale'), None): cnv_string,
((DR3DNS, u'backface-culling'), None): cnv_string,
((DR3DNS, u'center'), None): cnv_string,
((DR3DNS, u'close-back'), None): cnv_boolean,
((DR3DNS, u'close-front'), None): cnv_boolean,
((DR3DNS, u'depth'), None): cnv_length,
((DR3DNS, u'diffuse-color'), None): cnv_string,
((DR3DNS, u'direction'), None): cnv_string,
((DR3DNS, u'distance'), None): cnv_length,
((DR3DNS, u'edge-rounding'), None): cnv_string,
((DR3DNS, u'edge-rounding-mode'), None): cnv_string,
((DR3DNS, u'emissive-color'), None): cnv_string,
((DR3DNS, u'enabled'), None): cnv_boolean,
((DR3DNS, u'end-angle'), None): cnv_string,
((DR3DNS, u'focal-length'), None): cnv_length,
((DR3DNS, u'horizontal-segments'), None): cnv_string,
((DR3DNS, u'lighting-mode'), None): cnv_boolean,
((DR3DNS, u'max-edge'), None): cnv_string,
((DR3DNS, u'min-edge'), None): cnv_string,
((DR3DNS, u'normals-direction'), None): cnv_string,
((DR3DNS, u'normals-kind'), None): cnv_string,
((DR3DNS, u'projection'), None): cnv_string,
((DR3DNS, u'shade-mode'), None): cnv_string,
((DR3DNS, u'shadow'), None): cnv_string,
((DR3DNS, u'shadow-slant'), None): cnv_nonNegativeInteger,
((DR3DNS, u'shininess'), None): cnv_string,
((DR3DNS, u'size'), None): cnv_string,
((DR3DNS, u'specular'), None): cnv_boolean,
((DR3DNS, u'specular-color'), None): cnv_string,
((DR3DNS, u'texture-filter'), None): cnv_string,
((DR3DNS, u'texture-generation-mode-x'), None): cnv_string,
((DR3DNS, u'texture-generation-mode-y'), None): cnv_string,
((DR3DNS, u'texture-kind'), None): cnv_string,
((DR3DNS, u'texture-mode'), None): cnv_string,
((DR3DNS, u'transform'), None): cnv_string,
((DR3DNS, u'vertical-segments'), None): cnv_string,
((DR3DNS, u'vpn'), None): cnv_string,
((DR3DNS, u'vrp'), None): cnv_string,
((DR3DNS, u'vup'), None): cnv_string,
((DRAWNS, u'align'), None): cnv_string,
((DRAWNS, u'angle'), None): cnv_integer,
((DRAWNS, u'archive'), None): cnv_string,
((DRAWNS, u'auto-grow-height'), None): cnv_boolean,
((DRAWNS, u'auto-grow-width'), None): cnv_boolean,
((DRAWNS, u'background-size'), None): cnv_string,
((DRAWNS, u'blue'), None): cnv_string,
((DRAWNS, u'border'), None): cnv_string,
((DRAWNS, u'caption-angle'), None): cnv_string,
((DRAWNS, u'caption-angle-type'), None): cnv_string,
((DRAWNS, u'caption-escape'), None): cnv_string,
((DRAWNS, u'caption-escape-direction'), None): cnv_string,
((DRAWNS, u'caption-fit-line-length'), None): cnv_boolean,
((DRAWNS, u'caption-gap'), None): cnv_string,
((DRAWNS, u'caption-line-length'), None): cnv_length,
((DRAWNS, u'caption-point-x'), None): cnv_string,
((DRAWNS, u'caption-point-y'), None): cnv_string,
((DRAWNS, u'caption-id'), None): cnv_IDREF,
((DRAWNS, u'caption-type'), None): cnv_string,
((DRAWNS, u'chain-next-name'), None): cnv_string,
((DRAWNS, u'class-id'), None): cnv_string,
((DRAWNS, u'class-names'), None): cnv_NCNames,
((DRAWNS, u'code'), None): cnv_string,
((DRAWNS, u'color'), None): cnv_string,
((DRAWNS, u'color-inversion'), None): cnv_boolean,
((DRAWNS, u'color-mode'), None): cnv_string,
((DRAWNS, u'concave'), None): cnv_string,
((DRAWNS, u'concentric-gradient-fill-allowed'), None): cnv_boolean,
((DRAWNS, u'contrast'), None): cnv_string,
((DRAWNS, u'control'), None): cnv_IDREF,
((DRAWNS, u'copy-of'), None): cnv_string,
((DRAWNS, u'corner-radius'), None): cnv_length,
((DRAWNS, u'corners'), None): cnv_positiveInteger,
((DRAWNS, u'cx'), None): cnv_string,
((DRAWNS, u'cy'), None): cnv_string,
((DRAWNS, u'data'), None): cnv_string,
((DRAWNS, u'decimal-places'), None): cnv_string,
((DRAWNS, u'display'), None): cnv_string,
((DRAWNS, u'display-name'), None): cnv_string,
((DRAWNS, u'distance'), None): cnv_lengthorpercent,
((DRAWNS, u'dots1'), None): cnv_integer,
((DRAWNS, u'dots1-length'), None): cnv_lengthorpercent,
((DRAWNS, u'dots2'), None): cnv_integer,
((DRAWNS, u'dots2-length'), None): cnv_lengthorpercent,
((DRAWNS, u'end-angle'), None): cnv_double,
((DRAWNS, u'end'), None): cnv_string,
((DRAWNS, u'end-color'), None): cnv_string,
((DRAWNS, u'end-glue-point'), None): cnv_nonNegativeInteger,
((DRAWNS, u'end-guide'), None): cnv_length,
((DRAWNS, u'end-intensity'), None): cnv_string,
((DRAWNS, u'end-line-spacing-horizontal'), None): cnv_string,
((DRAWNS, u'end-line-spacing-vertical'), None): cnv_string,
((DRAWNS, u'end-shape'), None): cnv_IDREF,
((DRAWNS, u'engine'), None): cnv_namespacedToken,
((DRAWNS, u'enhanced-path'), None): cnv_string,
((DRAWNS, u'escape-direction'), None): cnv_string,
((DRAWNS, u'extrusion-allowed'), None): cnv_boolean,
((DRAWNS, u'extrusion-brightness'), None): cnv_string,
((DRAWNS, u'extrusion'), None): cnv_boolean,
((DRAWNS, u'extrusion-color'), None): cnv_boolean,
((DRAWNS, u'extrusion-depth'), None): cnv_double,
((DRAWNS, u'extrusion-diffusion'), None): cnv_string,
((DRAWNS, u'extrusion-first-light-direction'), None): cnv_string,
((DRAWNS, u'extrusion-first-light-harsh'), None): cnv_boolean,
((DRAWNS, u'extrusion-first-light-level'), None): cnv_string,
((DRAWNS, u'extrusion-light-face'), None): cnv_boolean,
((DRAWNS, u'extrusion-metal'), None): cnv_boolean,
((DRAWNS, u'extrusion-number-of-line-segments'), None): cnv_integer,
((DRAWNS, u'extrusion-origin'), None): cnv_double,
((DRAWNS, u'extrusion-rotation-angle'), None): cnv_double,
((DRAWNS, u'extrusion-rotation-center'), None): cnv_string,
((DRAWNS, u'extrusion-second-light-direction'), None): cnv_string,
((DRAWNS, u'extrusion-second-light-harsh'), None): cnv_boolean,
((DRAWNS, u'extrusion-second-light-level'), None): cnv_string,
((DRAWNS, u'extrusion-shininess'), None): cnv_string,
((DRAWNS, u'extrusion-skew'), None): cnv_double,
((DRAWNS, u'extrusion-specularity'), None): cnv_string,
((DRAWNS, u'extrusion-viewpoint'), None): cnv_string,
((DRAWNS, u'fill'), None): cnv_string,
((DRAWNS, u'fill-color'), None): cnv_string,
((DRAWNS, u'fill-gradient-name'), None): cnv_string,
((DRAWNS, u'fill-hatch-name'), None): cnv_string,
((DRAWNS, u'fill-hatch-solid'), None): cnv_boolean,
((DRAWNS, u'fill-image-height'), None): cnv_lengthorpercent,
((DRAWNS, u'fill-image-name'), None): cnv_DrawNameRef,
((DRAWNS, u'fill-image-ref-point'), None): cnv_string,
((DRAWNS, u'fill-image-ref-point-x'), None): cnv_string,
((DRAWNS, u'fill-image-ref-point-y'), None): cnv_string,
((DRAWNS, u'fill-image-width'), None): cnv_lengthorpercent,
((DRAWNS, u'filter-name'), None): cnv_string,
((DRAWNS, u'fit-to-contour'), None): cnv_boolean,
((DRAWNS, u'fit-to-size'), None): cnv_string, # ODF 1.2 says boolean
((DRAWNS, u'formula'), None): cnv_string,
((DRAWNS, u'frame-display-border'), None): cnv_boolean,
((DRAWNS, u'frame-display-scrollbar'), None): cnv_boolean,
((DRAWNS, u'frame-margin-horizontal'), None): cnv_string,
((DRAWNS, u'frame-margin-vertical'), None): cnv_string,
((DRAWNS, u'frame-name'), None): cnv_string,
((DRAWNS, u'gamma'), None): cnv_string,
((DRAWNS, u'glue-point-leaving-directions'), None): cnv_string,
((DRAWNS, u'glue-point-type'), None): cnv_string,
((DRAWNS, u'glue-points'), None): cnv_string,
((DRAWNS, u'gradient-step-count'), None): cnv_string,
((DRAWNS, u'green'), None): cnv_string,
((DRAWNS, u'guide-distance'), None): cnv_string,
((DRAWNS, u'guide-overhang'), None): cnv_length,
((DRAWNS, u'handle-mirror-horizontal'), None): cnv_boolean,
((DRAWNS, u'handle-mirror-vertical'), None): cnv_boolean,
((DRAWNS, u'handle-polar'), None): cnv_string,
((DRAWNS, u'handle-position'), None): cnv_string,
((DRAWNS, u'handle-radius-range-maximum'), None): cnv_string,
((DRAWNS, u'handle-radius-range-minimum'), None): cnv_string,
((DRAWNS, u'handle-range-x-maximum'), None): cnv_string,
((DRAWNS, u'handle-range-x-minimum'), None): cnv_string,
((DRAWNS, u'handle-range-y-maximum'), None): cnv_string,
((DRAWNS, u'handle-range-y-minimum'), None): cnv_string,
((DRAWNS, u'handle-switched'), None): cnv_boolean,
# ((DRAWNS,u'id'), None): cnv_ID,
# ((DRAWNS,u'id'), None): cnv_nonNegativeInteger, # ?? line 6581 in RNG
((DRAWNS, u'id'), None): cnv_string,
((DRAWNS, u'image-opacity'), None): cnv_string,
((DRAWNS, u'kind'), None): cnv_string,
((DRAWNS, u'layer'), None): cnv_string,
((DRAWNS, u'line-distance'), None): cnv_string,
((DRAWNS, u'line-skew'), None): cnv_string,
((DRAWNS, u'luminance'), None): cnv_string,
((DRAWNS, u'marker-end-center'), None): cnv_boolean,
((DRAWNS, u'marker-end'), None): cnv_string,
((DRAWNS, u'marker-end-width'), None): cnv_length,
((DRAWNS, u'marker-start-center'), None): cnv_boolean,
((DRAWNS, u'marker-start'), None): cnv_string,
((DRAWNS, u'marker-start-width'), None): cnv_length,
((DRAWNS, u'master-page-name'), None): cnv_StyleNameRef,
((DRAWNS, u'may-script'), None): cnv_boolean,
((DRAWNS, u'measure-align'), None): cnv_string,
((DRAWNS, u'measure-vertical-align'), None): cnv_string,
((DRAWNS, u'mime-type'), None): cnv_string,
((DRAWNS, u'mirror-horizontal'), None): cnv_boolean,
((DRAWNS, u'mirror-vertical'), None): cnv_boolean,
((DRAWNS, u'modifiers'), None): cnv_string,
((DRAWNS, u'name'), None): cnv_NCName,
# ((DRAWNS,u'name'), None): cnv_string,
((DRAWNS, u'nav-order'), None): cnv_IDREF,
((DRAWNS, u'nohref'), None): cnv_string,
((DRAWNS, u'notify-on-update-of-ranges'), None): cnv_string,
((DRAWNS, u'object'), None): cnv_string,
((DRAWNS, u'ole-draw-aspect'), None): cnv_string,
((DRAWNS, u'opacity'), None): cnv_string,
((DRAWNS, u'opacity-name'), None): cnv_string,
((DRAWNS, u'page-number'), None): cnv_positiveInteger,
((DRAWNS, u'parallel'), None): cnv_boolean,
((DRAWNS, u'path-stretchpoint-x'), None): cnv_double,
((DRAWNS, u'path-stretchpoint-y'), None): cnv_double,
((DRAWNS, u'placing'), None): cnv_string,
((DRAWNS, u'points'), None): cnv_points,
((DRAWNS, u'protected'), None): cnv_boolean,
((DRAWNS, u'recreate-on-edit'), None): cnv_boolean,
((DRAWNS, u'red'), None): cnv_string,
((DRAWNS, u'rotation'), None): cnv_integer,
((DRAWNS, u'secondary-fill-color'), None): cnv_string,
((DRAWNS, u'shadow'), None): cnv_string,
((DRAWNS, u'shadow-color'), None): cnv_string,
((DRAWNS, u'shadow-offset-x'), None): cnv_length,
((DRAWNS, u'shadow-offset-y'), None): cnv_length,
((DRAWNS, u'shadow-opacity'), None): cnv_string,
((DRAWNS, u'shape-id'), None): cnv_IDREF,
((DRAWNS, u'sharpness'), None): cnv_string,
((DRAWNS, u'show-unit'), None): cnv_boolean,
((DRAWNS, u'start-angle'), None): cnv_double,
((DRAWNS, u'start'), None): cnv_string,
((DRAWNS, u'start-color'), None): cnv_string,
((DRAWNS, u'start-glue-point'), None): cnv_nonNegativeInteger,
((DRAWNS, u'start-guide'), None): cnv_length,
((DRAWNS, u'start-intensity'), None): cnv_string,
((DRAWNS, u'start-line-spacing-horizontal'), None): cnv_string,
((DRAWNS, u'start-line-spacing-vertical'), None): cnv_string,
((DRAWNS, u'start-shape'), None): cnv_IDREF,
((DRAWNS, u'stroke'), None): cnv_string,
((DRAWNS, u'stroke-dash'), None): cnv_string,
((DRAWNS, u'stroke-dash-names'), None): cnv_string,
((DRAWNS, u'stroke-linejoin'), None): cnv_string,
((DRAWNS, u'style'), None): cnv_string,
((DRAWNS, u'style-name'), None): cnv_StyleNameRef,
((DRAWNS, u'symbol-color'), None): cnv_string,
((DRAWNS, u'text-areas'), None): cnv_string,
((DRAWNS, u'text-path-allowed'), None): cnv_boolean,
((DRAWNS, u'text-path'), None): cnv_boolean,
((DRAWNS, u'text-path-mode'), None): cnv_string,
((DRAWNS, u'text-path-same-letter-heights'), None): cnv_boolean,
((DRAWNS, u'text-path-scale'), None): cnv_string,
((DRAWNS, u'text-rotate-angle'), None): cnv_double,
((DRAWNS, u'text-style-name'), None): cnv_StyleNameRef,
((DRAWNS, u'textarea-horizontal-align'), None): cnv_string,
((DRAWNS, u'textarea-vertical-align'), None): cnv_string,
((DRAWNS, u'tile-repeat-offset'), None): cnv_string,
((DRAWNS, u'transform'), None): cnv_string,
((DRAWNS, u'type'), None): cnv_string,
((DRAWNS, u'unit'), None): cnv_string,
((DRAWNS, u'value'), None): cnv_string,
((DRAWNS, u'visible-area-height'), None): cnv_string,
((DRAWNS, u'visible-area-left'), None): cnv_string,
((DRAWNS, u'visible-area-top'), None): cnv_string,
((DRAWNS, u'visible-area-width'), None): cnv_string,
((DRAWNS, u'wrap-influence-on-position'), None): cnv_string,
((DRAWNS, u'z-index'), None): cnv_nonNegativeInteger,
((FONS, u'background-color'), None): cnv_string,
((FONS, u'border-bottom'), None): cnv_string,
((FONS, u'border'), None): cnv_string,
((FONS, u'border-left'), None): cnv_string,
((FONS, u'border-right'), None): cnv_string,
((FONS, u'border-top'), None): cnv_string,
((FONS, u'break-after'), None): cnv_string,
((FONS, u'break-before'), None): cnv_string,
((FONS, u'clip'), None): cnv_string,
((FONS, u'color'), None): cnv_string,
((FONS, u'column-count'), None): cnv_positiveInteger,
((FONS, u'column-gap'), None): cnv_length,
((FONS, u'country'), None): cnv_token,
((FONS, u'end-indent'), None): cnv_length,
((FONS, u'font-family'), None): cnv_string,
((FONS, u'font-size'), None): cnv_string,
((FONS, u'font-style'), None): cnv_string,
((FONS, u'font-variant'), None): cnv_string,
((FONS, u'font-weight'), None): cnv_string,
((FONS, u'height'), None): cnv_string,
((FONS, u'hyphenate'), None): cnv_boolean,
((FONS, u'hyphenation-keep'), None): cnv_string,
((FONS, u'hyphenation-ladder-count'), None): cnv_string,
((FONS, u'hyphenation-push-char-count'), None): cnv_string,
((FONS, u'hyphenation-remain-char-count'), None): cnv_string,
((FONS, u'keep-together'), None): cnv_string,
((FONS, u'keep-with-next'), None): cnv_string,
((FONS, u'language'), None): cnv_token,
((FONS, u'letter-spacing'), None): cnv_string,
((FONS, u'line-height'), None): cnv_string,
((FONS, u'margin-bottom'), None): cnv_string,
((FONS, u'margin'), None): cnv_string,
((FONS, u'margin-left'), None): cnv_string,
((FONS, u'margin-right'), None): cnv_string,
((FONS, u'margin-top'), None): cnv_string,
((FONS, u'max-height'), None): cnv_string,
((FONS, u'max-width'), None): cnv_string,
((FONS, u'min-height'), None): cnv_length,
((FONS, u'min-width'), None): cnv_string,
((FONS, u'orphans'), None): cnv_string,
((FONS, u'padding-bottom'), None): cnv_string,
((FONS, u'padding'), None): cnv_string,
((FONS, u'padding-left'), None): cnv_string,
((FONS, u'padding-right'), None): cnv_string,
((FONS, u'padding-top'), None): cnv_string,
((FONS, u'page-height'), None): cnv_length,
((FONS, u'page-width'), None): cnv_length,
((FONS, u'space-after'), None): cnv_length,
((FONS, u'space-before'), None): cnv_length,
((FONS, u'start-indent'), None): cnv_length,
((FONS, u'text-align'), None): cnv_string,
((FONS, u'text-align-last'), None): cnv_string,
((FONS, u'text-indent'), None): cnv_string,
((FONS, u'text-shadow'), None): cnv_string,
((FONS, u'text-transform'), None): cnv_string,
((FONS, u'widows'), None): cnv_string,
((FONS, u'width'), None): cnv_string,
((FONS, u'wrap-option'), None): cnv_string,
((FORMNS, u'allow-deletes'), None): cnv_boolean,
((FORMNS, u'allow-inserts'), None): cnv_boolean,
((FORMNS, u'allow-updates'), None): cnv_boolean,
((FORMNS, u'apply-design-mode'), None): cnv_boolean,
((FORMNS, u'apply-filter'), None): cnv_boolean,
((FORMNS, u'auto-complete'), None): cnv_boolean,
((FORMNS, u'automatic-focus'), None): cnv_boolean,
((FORMNS, u'bound-column'), None): cnv_string,
((FORMNS, u'button-type'), None): cnv_string,
((FORMNS, u'command'), None): cnv_string,
((FORMNS, u'command-type'), None): cnv_string,
((FORMNS, u'control-implementation'), None): cnv_namespacedToken,
((FORMNS, u'convert-empty-to-null'), None): cnv_boolean,
((FORMNS, u'current-selected'), None): cnv_boolean,
((FORMNS, u'current-state'), None): cnv_string,
# ((FORMNS,u'current-value'), None): cnv_date,
# ((FORMNS,u'current-value'), None): cnv_double,
((FORMNS, u'current-value'), None): cnv_string,
# ((FORMNS,u'current-value'), None): cnv_time,
((FORMNS, u'data-field'), None): cnv_string,
((FORMNS, u'datasource'), None): cnv_string,
((FORMNS, u'default-button'), None): cnv_boolean,
((FORMNS, u'delay-for-repeat'), None): cnv_duration,
((FORMNS, u'detail-fields'), None): cnv_string,
((FORMNS, u'disabled'), None): cnv_boolean,
((FORMNS, u'dropdown'), None): cnv_boolean,
((FORMNS, u'echo-char'), None): cnv_string,
((FORMNS, u'enctype'), None): cnv_string,
((FORMNS, u'escape-processing'), None): cnv_boolean,
((FORMNS, u'filter'), None): cnv_string,
((FORMNS, u'focus-on-click'), None): cnv_boolean,
((FORMNS, u'for'), None): cnv_string,
((FORMNS, u'id'), None): cnv_ID,
((FORMNS, u'ignore-result'), None): cnv_boolean,
((FORMNS, u'image-align'), None): cnv_string,
((FORMNS, u'image-data'), None): cnv_anyURI,
((FORMNS, u'image-position'), None): cnv_string,
((FORMNS, u'is-tristate'), None): cnv_boolean,
((FORMNS, u'label'), None): cnv_string,
((FORMNS, u'list-source'), None): cnv_string,
((FORMNS, u'list-source-type'), None): cnv_string,
((FORMNS, u'master-fields'), None): cnv_string,
((FORMNS, u'max-length'), None): cnv_nonNegativeInteger,
# ((FORMNS,u'max-value'), None): cnv_date,
# ((FORMNS,u'max-value'), None): cnv_double,
((FORMNS, u'max-value'), None): cnv_string,
# ((FORMNS,u'max-value'), None): cnv_time,
((FORMNS, u'method'), None): cnv_string,
# ((FORMNS,u'min-value'), None): cnv_date,
# ((FORMNS,u'min-value'), None): cnv_double,
((FORMNS, u'min-value'), None): cnv_string,
# ((FORMNS,u'min-value'), None): cnv_time,
((FORMNS, u'multi-line'), None): cnv_boolean,
((FORMNS, u'multiple'), None): cnv_boolean,
((FORMNS, u'name'), None): cnv_string,
((FORMNS, u'navigation-mode'), None): cnv_string,
((FORMNS, u'order'), None): cnv_string,
((FORMNS, u'orientation'), None): cnv_string,
((FORMNS, u'page-step-size'), None): cnv_positiveInteger,
((FORMNS, u'printable'), None): cnv_boolean,
((FORMNS, u'property-name'), None): cnv_string,
((FORMNS, u'readonly'), None): cnv_boolean,
((FORMNS, u'selected'), None): cnv_boolean,
((FORMNS, u'size'), None): cnv_nonNegativeInteger,
((FORMNS, u'state'), None): cnv_string,
((FORMNS, u'step-size'), None): cnv_positiveInteger,
((FORMNS, u'tab-cycle'), None): cnv_string,
((FORMNS, u'tab-index'), None): cnv_nonNegativeInteger,
((FORMNS, u'tab-stop'), None): cnv_boolean,
((FORMNS, u'text-style-name'), None): cnv_StyleNameRef,
((FORMNS, u'title'), None): cnv_string,
((FORMNS, u'toggle'), None): cnv_boolean,
((FORMNS, u'validation'), None): cnv_boolean,
# ((FORMNS,u'value'), None): cnv_date,
# ((FORMNS,u'value'), None): cnv_double,
((FORMNS, u'value'), None): cnv_string,
# ((FORMNS,u'value'), None): cnv_time,
((FORMNS, u'visual-effect'), None): cnv_string,
((FORMNS, u'xforms-list-source'), None): cnv_string,
((FORMNS, u'xforms-submission'), None): cnv_string,
((MANIFESTNS, 'algorithm-name'), None): cnv_string,
((MANIFESTNS, 'checksum'), None): cnv_string,
((MANIFESTNS, 'checksum-type'), None): cnv_string,
((MANIFESTNS, 'full-path'), None): cnv_string,
((MANIFESTNS, 'initialisation-vector'), None): cnv_string,
((MANIFESTNS, 'iteration-count'), None): cnv_nonNegativeInteger,
((MANIFESTNS, 'key-derivation-name'), None): cnv_string,
((MANIFESTNS, 'media-type'), None): cnv_string,
((MANIFESTNS, 'salt'), None): cnv_string,
((MANIFESTNS, 'size'), None): cnv_nonNegativeInteger,
((METANS, u'cell-count'), None): cnv_nonNegativeInteger,
((METANS, u'character-count'), None): cnv_nonNegativeInteger,
((METANS, u'date'), None): cnv_dateTime,
((METANS, u'delay'), None): cnv_duration,
((METANS, u'draw-count'), None): cnv_nonNegativeInteger,
((METANS, u'frame-count'), None): cnv_nonNegativeInteger,
((METANS, u'image-count'), None): cnv_nonNegativeInteger,
((METANS, u'name'), None): cnv_string,
((METANS, u'non-whitespace-character-count'), None): cnv_nonNegativeInteger,
((METANS, u'object-count'), None): cnv_nonNegativeInteger,
((METANS, u'ole-object-count'), None): cnv_nonNegativeInteger,
((METANS, u'page-count'), None): cnv_nonNegativeInteger,
((METANS, u'paragraph-count'), None): cnv_nonNegativeInteger,
((METANS, u'row-count'), None): cnv_nonNegativeInteger,
((METANS, u'sentence-count'), None): cnv_nonNegativeInteger,
((METANS, u'syllable-count'), None): cnv_nonNegativeInteger,
((METANS, u'table-count'), None): cnv_nonNegativeInteger,
((METANS, u'value-type'), None): cnv_metavaluetype,
((METANS, u'word-count'), None): cnv_nonNegativeInteger,
((NUMBERNS, u'automatic-order'), None): cnv_boolean,
((NUMBERNS, u'calendar'), None): cnv_string,
((NUMBERNS, u'country'), None): cnv_token,
((NUMBERNS, u'decimal-places'), None): cnv_integer,
((NUMBERNS, u'decimal-replacement'), None): cnv_string,
((NUMBERNS, u'denominator-value'), None): cnv_integer,
((NUMBERNS, u'display-factor'), None): cnv_double,
((NUMBERNS, u'format-source'), None): cnv_string,
((NUMBERNS, u'grouping'), None): cnv_boolean,
((NUMBERNS, u'language'), None): cnv_token,
((NUMBERNS, u'min-denominator-digits'), None): cnv_integer,
((NUMBERNS, u'min-exponent-digits'), None): cnv_integer,
((NUMBERNS, u'min-integer-digits'), None): cnv_integer,
((NUMBERNS, u'min-numerator-digits'), None): cnv_integer,
((NUMBERNS, u'position'), None): cnv_integer,
((NUMBERNS, u'possessive-form'), None): cnv_boolean,
((NUMBERNS, u'style'), None): cnv_string,
((NUMBERNS, u'textual'), None): cnv_boolean,
((NUMBERNS, u'title'), None): cnv_string,
((NUMBERNS, u'transliteration-country'), None): cnv_token,
((NUMBERNS, u'transliteration-format'), None): cnv_string,
((NUMBERNS, u'transliteration-language'), None): cnv_token,
((NUMBERNS, u'transliteration-style'), None): cnv_string,
((NUMBERNS, u'truncate-on-overflow'), None): cnv_boolean,
((OFFICENS, u'automatic-update'), None): cnv_boolean,
((OFFICENS, u'boolean-value'), None): cnv_boolean,
((OFFICENS, u'conversion-mode'), None): cnv_string,
((OFFICENS, u'currency'), None): cnv_string,
((OFFICENS, u'date-value'), None): cnv_dateTime,
((OFFICENS, u'dde-application'), None): cnv_string,
((OFFICENS, u'dde-item'), None): cnv_string,
((OFFICENS, u'dde-topic'), None): cnv_string,
((OFFICENS, u'display'), None): cnv_boolean,
((OFFICENS, u'mimetype'), None): cnv_string,
((OFFICENS, u'name'), None): cnv_string,
((OFFICENS, u'process-content'), None): cnv_boolean,
((OFFICENS, u'server-map'), None): cnv_boolean,
((OFFICENS, u'string-value'), None): cnv_string,
((OFFICENS, u'target-frame'), None): cnv_string,
((OFFICENS, u'target-frame-name'), None): cnv_string,
((OFFICENS, u'time-value'), None): cnv_duration,
((OFFICENS, u'title'), None): cnv_string,
((OFFICENS, u'value'), None): cnv_double,
((OFFICENS, u'value-type'), None): cnv_string,
((OFFICENS, u'version'), None): cnv_string,
((PRESENTATIONNS, u'action'), None): cnv_string,
((PRESENTATIONNS, u'animations'), None): cnv_string,
((PRESENTATIONNS, u'background-objects-visible'), None): cnv_boolean,
((PRESENTATIONNS, u'background-visible'), None): cnv_boolean,
((PRESENTATIONNS, u'class'), None): cnv_string,
((PRESENTATIONNS, u'class-names'), None): cnv_NCNames,
((PRESENTATIONNS, u'delay'), None): cnv_duration,
((PRESENTATIONNS, u'direction'), None): cnv_string,
((PRESENTATIONNS, u'display-date-time'), None): cnv_boolean,
((PRESENTATIONNS, u'display-footer'), None): cnv_boolean,
((PRESENTATIONNS, u'display-header'), None): cnv_boolean,
((PRESENTATIONNS, u'display-page-number'), None): cnv_boolean,
((PRESENTATIONNS, u'duration'), None): cnv_string,
((PRESENTATIONNS, u'effect'), None): cnv_string,
((PRESENTATIONNS, u'endless'), None): cnv_boolean,
((PRESENTATIONNS, u'force-manual'), None): cnv_boolean,
((PRESENTATIONNS, u'full-screen'), None): cnv_boolean,
((PRESENTATIONNS, u'group-id'), None): cnv_string,
((PRESENTATIONNS, u'master-element'), None): cnv_IDREF,
((PRESENTATIONNS, u'mouse-as-pen'), None): cnv_boolean,
((PRESENTATIONNS, u'mouse-visible'), None): cnv_boolean,
((PRESENTATIONNS, u'name'), None): cnv_string,
((PRESENTATIONNS, u'node-type'), None): cnv_string,
((PRESENTATIONNS, u'object'), None): cnv_string,
((PRESENTATIONNS, u'pages'), None): cnv_string,
((PRESENTATIONNS, u'path-id'), None): cnv_string,
((PRESENTATIONNS, u'pause'), None): cnv_duration,
((PRESENTATIONNS, u'placeholder'), None): cnv_boolean,
((PRESENTATIONNS, u'play-full'), None): cnv_boolean,
((PRESENTATIONNS, u'presentation-page-layout-name'), None): cnv_StyleNameRef,
((PRESENTATIONNS, u'preset-class'), None): cnv_string,
((PRESENTATIONNS, u'preset-id'), None): cnv_string,
((PRESENTATIONNS, u'preset-sub-type'), None): cnv_string,
((PRESENTATIONNS, u'show'), None): cnv_string,
((PRESENTATIONNS, u'show-end-of-presentation-slide'), None): cnv_boolean,
((PRESENTATIONNS, u'show-logo'), None): cnv_boolean,
((PRESENTATIONNS, u'source'), None): cnv_string,
((PRESENTATIONNS, u'speed'), None): cnv_string,
((PRESENTATIONNS, u'start-page'), None): cnv_string,
((PRESENTATIONNS, u'start-scale'), None): cnv_string,
((PRESENTATIONNS, u'start-with-navigator'), None): cnv_boolean,
((PRESENTATIONNS, u'stay-on-top'), None): cnv_boolean,
((PRESENTATIONNS, u'style-name'), None): cnv_StyleNameRef,
((PRESENTATIONNS, u'transition-on-click'), None): cnv_string,
((PRESENTATIONNS, u'transition-speed'), None): cnv_string,
((PRESENTATIONNS, u'transition-style'), None): cnv_string,
((PRESENTATIONNS, u'transition-type'), None): cnv_string,
((PRESENTATIONNS, u'use-date-time-name'), None): cnv_string,
((PRESENTATIONNS, u'use-footer-name'), None): cnv_string,
((PRESENTATIONNS, u'use-header-name'), None): cnv_string,
((PRESENTATIONNS, u'user-transformed'), None): cnv_boolean,
((PRESENTATIONNS, u'verb'), None): cnv_nonNegativeInteger,
((PRESENTATIONNS, u'visibility'), None): cnv_string,
((SCRIPTNS, u'event-name'), None): cnv_formula,
((SCRIPTNS, u'language'), None): cnv_formula,
((SCRIPTNS, u'macro-name'), None): cnv_string,
((SMILNS, u'accelerate'), None): cnv_double,
((SMILNS, u'accumulate'), None): cnv_string,
((SMILNS, u'additive'), None): cnv_string,
((SMILNS, u'attributeName'), None): cnv_string,
((SMILNS, u'autoReverse'), None): cnv_boolean,
((SMILNS, u'begin'), None): cnv_string,
((SMILNS, u'by'), None): cnv_string,
((SMILNS, u'calcMode'), None): cnv_string,
((SMILNS, u'decelerate'), None): cnv_double,
((SMILNS, u'direction'), None): cnv_string,
((SMILNS, u'dur'), None): cnv_string,
((SMILNS, u'end'), None): cnv_string,
((SMILNS, u'endsync'), None): cnv_string,
((SMILNS, u'fadeColor'), None): cnv_string,
((SMILNS, u'fill'), None): cnv_string,
((SMILNS, u'fillDefault'), None): cnv_string,
((SMILNS, u'from'), None): cnv_string,
((SMILNS, u'keySplines'), None): cnv_string,
((SMILNS, u'keyTimes'), None): cnv_string,
((SMILNS, u'mode'), None): cnv_string,
((SMILNS, u'repeatCount'), None): cnv_nonNegativeInteger,
((SMILNS, u'repeatDur'), None): cnv_string,
((SMILNS, u'restart'), None): cnv_string,
((SMILNS, u'restartDefault'), None): cnv_string,
((SMILNS, u'subtype'), None): cnv_string,
((SMILNS, u'targetElement'), None): cnv_IDREF,
((SMILNS, u'to'), None): cnv_string,
((SMILNS, u'type'), None): cnv_string,
((SMILNS, u'values'), None): cnv_string,
((STYLENS, u'adjustment'), None): cnv_string,
((STYLENS, u'apply-style-name'), None): cnv_StyleNameRef,
((STYLENS, u'auto-text-indent'), None): cnv_boolean,
((STYLENS, u'auto-update'), None): cnv_boolean,
((STYLENS, u'background-transparency'), None): cnv_string,
((STYLENS, u'base-cell-address'), None): cnv_string,
((STYLENS, u'border-line-width-bottom'), None): cnv_string,
((STYLENS, u'border-line-width'), None): cnv_string,
((STYLENS, u'border-line-width-left'), None): cnv_string,
((STYLENS, u'border-line-width-right'), None): cnv_string,
((STYLENS, u'border-line-width-top'), None): cnv_string,
((STYLENS, u'cell-protect'), None): cnv_string,
((STYLENS, u'char'), None): cnv_string,
((STYLENS, u'class'), None): cnv_string,
((STYLENS, u'color'), None): cnv_string,
((STYLENS, u'column-width'), None): cnv_string,
((STYLENS, u'condition'), None): cnv_string,
((STYLENS, u'country-asian'), None): cnv_string,
((STYLENS, u'country-complex'), None): cnv_string,
((STYLENS, u'data-style-name'), None): cnv_StyleNameRef,
((STYLENS, u'decimal-places'), None): cnv_string,
((STYLENS, u'default-outline-level'), None): cnv_positiveInteger,
((STYLENS, u'diagonal-bl-tr'), None): cnv_string,
((STYLENS, u'diagonal-bl-tr-widths'), None): cnv_string,
((STYLENS, u'diagonal-tl-br'), None): cnv_string,
((STYLENS, u'diagonal-tl-br-widths'), None): cnv_string,
((STYLENS, u'direction'), None): cnv_string,
((STYLENS, u'display'), None): cnv_boolean,
((STYLENS, u'display-name'), None): cnv_string,
((STYLENS, u'distance-after-sep'), None): cnv_length,
((STYLENS, u'distance-before-sep'), None): cnv_length,
((STYLENS, u'distance'), None): cnv_length,
((STYLENS, u'dynamic-spacing'), None): cnv_boolean,
((STYLENS, u'editable'), None): cnv_boolean,
((STYLENS, u'family'), None): cnv_family,
((STYLENS, u'filter-name'), None): cnv_string,
((STYLENS, u'first-page-number'), None): cnv_string,
((STYLENS, u'flow-with-text'), None): cnv_boolean,
((STYLENS, u'font-adornments'), None): cnv_string,
((STYLENS, u'font-charset'), None): cnv_string,
((STYLENS, u'font-charset-asian'), None): cnv_string,
((STYLENS, u'font-charset-complex'), None): cnv_string,
((STYLENS, u'font-family-asian'), None): cnv_string,
((STYLENS, u'font-family-complex'), None): cnv_string,
((STYLENS, u'font-family-generic-asian'), None): cnv_string,
((STYLENS, u'font-family-generic'), None): cnv_string,
((STYLENS, u'font-family-generic-complex'), None): cnv_string,
((STYLENS, u'font-independent-line-spacing'), None): cnv_boolean,
((STYLENS, u'font-name-asian'), None): cnv_string,
((STYLENS, u'font-name'), None): cnv_string,
((STYLENS, u'font-name-complex'), None): cnv_string,
((STYLENS, u'font-pitch-asian'), None): cnv_string,
((STYLENS, u'font-pitch'), None): cnv_string,
((STYLENS, u'font-pitch-complex'), None): cnv_string,
((STYLENS, u'font-relief'), None): cnv_string,
((STYLENS, u'font-size-asian'), None): cnv_string,
((STYLENS, u'font-size-complex'), None): cnv_string,
((STYLENS, u'font-size-rel-asian'), None): cnv_length,
((STYLENS, u'font-size-rel'), None): cnv_length,
((STYLENS, u'font-size-rel-complex'), None): cnv_length,
((STYLENS, u'font-style-asian'), None): cnv_string,
((STYLENS, u'font-style-complex'), None): cnv_string,
((STYLENS, u'font-style-name-asian'), None): cnv_string,
((STYLENS, u'font-style-name'), None): cnv_string,
((STYLENS, u'font-style-name-complex'), None): cnv_string,
((STYLENS, u'font-weight-asian'), None): cnv_string,
((STYLENS, u'font-weight-complex'), None): cnv_string,
((STYLENS, u'footnote-max-height'), None): cnv_length,
((STYLENS, u'glyph-orientation-vertical'), None): cnv_string,
((STYLENS, u'height'), None): cnv_string,
((STYLENS, u'horizontal-pos'), None): cnv_string,
((STYLENS, u'horizontal-rel'), None): cnv_string,
((STYLENS, u'justify-single-word'), None): cnv_boolean,
((STYLENS, u'language-asian'), None): cnv_string,
((STYLENS, u'language-complex'), None): cnv_string,
((STYLENS, u'layout-grid-base-height'), None): cnv_length,
((STYLENS, u'layout-grid-color'), None): cnv_string,
((STYLENS, u'layout-grid-display'), None): cnv_boolean,
((STYLENS, u'layout-grid-lines'), None): cnv_string,
((STYLENS, u'layout-grid-mode'), None): cnv_string,
((STYLENS, u'layout-grid-print'), None): cnv_boolean,
((STYLENS, u'layout-grid-ruby-below'), None): cnv_boolean,
((STYLENS, u'layout-grid-ruby-height'), None): cnv_length,
((STYLENS, u'leader-char'), None): cnv_string,
((STYLENS, u'leader-color'), None): cnv_string,
((STYLENS, u'leader-style'), None): cnv_string,
((STYLENS, u'leader-text'), None): cnv_string,
((STYLENS, u'leader-text-style'), None): cnv_StyleNameRef,
((STYLENS, u'leader-type'), None): cnv_string,
((STYLENS, u'leader-width'), None): cnv_string,
((STYLENS, u'legend-expansion-aspect-ratio'), None): cnv_double,
((STYLENS, u'legend-expansion'), None): cnv_string,
((STYLENS, u'length'), None): cnv_positiveInteger,
((STYLENS, u'letter-kerning'), None): cnv_boolean,
((STYLENS, u'line-break'), None): cnv_string,
((STYLENS, u'line-height-at-least'), None): cnv_string,
((STYLENS, u'line-spacing'), None): cnv_length,
((STYLENS, u'line-style'), None): cnv_string,
((STYLENS, u'lines'), None): cnv_positiveInteger,
((STYLENS, u'list-style-name'), None): cnv_StyleNameRef,
((STYLENS, u'master-page-name'), None): cnv_StyleNameRef,
((STYLENS, u'may-break-between-rows'), None): cnv_boolean,
((STYLENS, u'min-row-height'), None): cnv_string,
((STYLENS, u'mirror'), None): cnv_string,
((STYLENS, u'name'), None): cnv_NCName,
((STYLENS, u'name'), (STYLENS, u'font-face')): cnv_string,
((STYLENS, u'next-style-name'), None): cnv_StyleNameRef,
((STYLENS, u'num-format'), None): cnv_string,
((STYLENS, u'num-letter-sync'), None): cnv_boolean,
((STYLENS, u'num-prefix'), None): cnv_string,
((STYLENS, u'num-suffix'), None): cnv_string,
((STYLENS, u'number-wrapped-paragraphs'), None): cnv_string,
((STYLENS, u'overflow-behavior'), None): cnv_string,
((STYLENS, u'page-layout-name'), None): cnv_StyleNameRef,
((STYLENS, u'page-number'), None): cnv_string,
((STYLENS, u'page-usage'), None): cnv_string,
((STYLENS, u'paper-tray-name'), None): cnv_string,
((STYLENS, u'parent-style-name'), None): cnv_StyleNameRef,
((STYLENS, u'position'), (STYLENS, u'tab-stop')): cnv_length,
((STYLENS, u'position'), None): cnv_string,
((STYLENS, u'print'), None): cnv_string,
((STYLENS, u'print-content'), None): cnv_boolean,
((STYLENS, u'print-orientation'), None): cnv_string,
((STYLENS, u'print-page-order'), None): cnv_string,
((STYLENS, u'protect'), (STYLENS, u'section-properties')): cnv_boolean,
((STYLENS, u'protect'), (STYLENS, u'graphic-properties')): cnv_string,
# ((STYLENS,u'protect'), None): cnv_boolean,
((STYLENS, u'punctuation-wrap'), None): cnv_string,
((STYLENS, u'register-true'), None): cnv_boolean,
((STYLENS, u'register-truth-ref-style-name'), None): cnv_string,
((STYLENS, u'rel-column-width'), None): cnv_string,
((STYLENS, u'rel-height'), None): cnv_string,
((STYLENS, u'rel-width'), None): cnv_string,
((STYLENS, u'repeat'), None): cnv_string,
((STYLENS, u'repeat-content'), None): cnv_boolean,
((STYLENS, u'rotation-align'), None): cnv_string,
((STYLENS, u'rotation-angle'), None): cnv_string,
((STYLENS, u'row-height'), None): cnv_string,
((STYLENS, u'ruby-align'), None): cnv_string,
((STYLENS, u'ruby-position'), None): cnv_string,
((STYLENS, u'run-through'), None): cnv_string,
((STYLENS, u'scale-to'), None): cnv_string,
((STYLENS, u'scale-to-pages'), None): cnv_string,
((STYLENS, u'script-type'), None): cnv_string,
((STYLENS, u'shadow'), None): cnv_string,
((STYLENS, u'shrink-to-fit'), None): cnv_boolean,
((STYLENS, u'snap-to-layout-grid'), None): cnv_boolean,
((STYLENS, u'style'), None): cnv_string,
((STYLENS, u'style-name'), None): cnv_StyleNameRef,
((STYLENS, u'tab-stop-distance'), None): cnv_string,
((STYLENS, u'table-centering'), None): cnv_string,
((STYLENS, u'text-align-source'), None): cnv_string,
((STYLENS, u'text-autospace'), None): cnv_string,
((STYLENS, u'text-blinking'), None): cnv_boolean,
((STYLENS, u'text-combine'), None): cnv_string,
((STYLENS, u'text-combine-end-char'), None): cnv_string,
((STYLENS, u'text-combine-start-char'), None): cnv_string,
((STYLENS, u'text-emphasize'), None): cnv_string,
((STYLENS, u'text-line-through-color'), None): cnv_string,
((STYLENS, u'text-line-through-mode'), None): cnv_string,
((STYLENS, u'text-line-through-style'), None): cnv_string,
((STYLENS, u'text-line-through-text'), None): cnv_string,
((STYLENS, u'text-line-through-text-style'), None): cnv_string,
((STYLENS, u'text-line-through-type'), None): cnv_string,
((STYLENS, u'text-line-through-width'), None): cnv_string,
((STYLENS, u'text-outline'), None): cnv_boolean,
((STYLENS, u'text-position'), None): cnv_string,
((STYLENS, u'text-rotation-angle'), None): cnv_string,
((STYLENS, u'text-rotation-scale'), None): cnv_string,
((STYLENS, u'text-scale'), None): cnv_string,
((STYLENS, u'text-underline-color'), None): cnv_string,
((STYLENS, u'text-underline-mode'), None): cnv_string,
((STYLENS, u'text-underline-style'), None): cnv_string,
((STYLENS, u'text-underline-type'), None): cnv_string,
((STYLENS, u'text-underline-width'), None): cnv_string,
((STYLENS, u'type'), None): cnv_string,
((STYLENS, u'use-optimal-column-width'), None): cnv_boolean,
((STYLENS, u'use-optimal-row-height'), None): cnv_boolean,
((STYLENS, u'use-window-font-color'), None): cnv_boolean,
((STYLENS, u'vertical-align'), None): cnv_string,
((STYLENS, u'vertical-pos'), None): cnv_string,
((STYLENS, u'vertical-rel'), None): cnv_string,
((STYLENS, u'volatile'), None): cnv_boolean,
((STYLENS, u'width'), None): cnv_string,
((STYLENS, u'wrap'), None): cnv_string,
((STYLENS, u'wrap-contour'), None): cnv_boolean,
((STYLENS, u'wrap-contour-mode'), None): cnv_string,
((STYLENS, u'wrap-dynamic-threshold'), None): cnv_length,
((STYLENS, u'writing-mode-automatic'), None): cnv_boolean,
((STYLENS, u'writing-mode'), None): cnv_string,
((SVGNS, u'accent-height'), None): cnv_integer,
((SVGNS, u'alphabetic'), None): cnv_integer,
((SVGNS, u'ascent'), None): cnv_integer,
((SVGNS, u'bbox'), None): cnv_string,
((SVGNS, u'cap-height'), None): cnv_integer,
((SVGNS, u'cx'), None): cnv_string,
((SVGNS, u'cy'), None): cnv_string,
((SVGNS, u'd'), None): cnv_string,
((SVGNS, u'descent'), None): cnv_integer,
((SVGNS, u'fill-rule'), None): cnv_string,
((SVGNS, u'font-family'), None): cnv_string,
((SVGNS, u'font-size'), None): cnv_string,
((SVGNS, u'font-stretch'), None): cnv_string,
((SVGNS, u'font-style'), None): cnv_string,
((SVGNS, u'font-variant'), None): cnv_string,
((SVGNS, u'font-weight'), None): cnv_string,
((SVGNS, u'fx'), None): cnv_string,
((SVGNS, u'fy'), None): cnv_string,
((SVGNS, u'gradientTransform'), None): cnv_string,
((SVGNS, u'gradientUnits'), None): cnv_string,
((SVGNS, u'hanging'), None): cnv_integer,
((SVGNS, u'height'), None): cnv_length,
((SVGNS, u'ideographic'), None): cnv_integer,
((SVGNS, u'mathematical'), None): cnv_integer,
((SVGNS, u'name'), None): cnv_string,
((SVGNS, u'offset'), None): cnv_string,
((SVGNS, u'origin'), None): cnv_string,
((SVGNS, u'overline-position'), None): cnv_integer,
((SVGNS, u'overline-thickness'), None): cnv_integer,
((SVGNS, u'panose-1'), None): cnv_string,
((SVGNS, u'path'), None): cnv_string,
((SVGNS, u'r'), None): cnv_length,
((SVGNS, u'rx'), None): cnv_length,
((SVGNS, u'ry'), None): cnv_length,
((SVGNS, u'slope'), None): cnv_integer,
((SVGNS, u'spreadMethod'), None): cnv_string,
((SVGNS, u'stemh'), None): cnv_integer,
((SVGNS, u'stemv'), None): cnv_integer,
((SVGNS, u'stop-color'), None): cnv_string,
((SVGNS, u'stop-opacity'), None): cnv_double,
((SVGNS, u'strikethrough-position'), None): cnv_integer,
((SVGNS, u'strikethrough-thickness'), None): cnv_integer,
((SVGNS, u'string'), None): cnv_string,
((SVGNS, u'stroke-color'), None): cnv_string,
((SVGNS, u'stroke-opacity'), None): cnv_string,
((SVGNS, u'stroke-width'), None): cnv_length,
((SVGNS, u'type'), None): cnv_string,
((SVGNS, u'underline-position'), None): cnv_integer,
((SVGNS, u'underline-thickness'), None): cnv_integer,
((SVGNS, u'unicode-range'), None): cnv_string,
((SVGNS, u'units-per-em'), None): cnv_integer,
((SVGNS, u'v-alphabetic'), None): cnv_integer,
((SVGNS, u'v-hanging'), None): cnv_integer,
((SVGNS, u'v-ideographic'), None): cnv_integer,
((SVGNS, u'v-mathematical'), None): cnv_integer,
((SVGNS, u'viewBox'), None): cnv_viewbox,
((SVGNS, u'width'), None): cnv_length,
((SVGNS, u'widths'), None): cnv_string,
((SVGNS, u'x'), None): cnv_length,
((SVGNS, u'x-height'), None): cnv_integer,
((SVGNS, u'x1'), None): cnv_lengthorpercent,
((SVGNS, u'x2'), None): cnv_lengthorpercent,
((SVGNS, u'y'), None): cnv_length,
((SVGNS, u'y1'), None): cnv_lengthorpercent,
((SVGNS, u'y2'), None): cnv_lengthorpercent,
((TABLENS, u'acceptance-state'), None): cnv_string,
((TABLENS, u'add-empty-lines'), None): cnv_boolean,
((TABLENS, u'algorithm'), None): cnv_formula,
((TABLENS, u'align'), None): cnv_string,
((TABLENS, u'allow-empty-cell'), None): cnv_boolean,
((TABLENS, u'application-data'), None): cnv_string,
((TABLENS, u'automatic-find-labels'), None): cnv_boolean,
((TABLENS, u'base-cell-address'), None): cnv_string,
((TABLENS, u'bind-styles-to-content'), None): cnv_boolean,
((TABLENS, u'border-color'), None): cnv_string,
((TABLENS, u'border-model'), None): cnv_string,
((TABLENS, u'buttons'), None): cnv_string,
((TABLENS, u'buttons'), None): cnv_string,
((TABLENS, u'case-sensitive'), None): cnv_boolean,
((TABLENS, u'case-sensitive'), None): cnv_string,
((TABLENS, u'cell-address'), None): cnv_string,
((TABLENS, u'cell-range-address'), None): cnv_string,
((TABLENS, u'cell-range-address'), None): cnv_string,
((TABLENS, u'cell-range'), None): cnv_string,
((TABLENS, u'column'), None): cnv_integer,
((TABLENS, u'comment'), None): cnv_string,
((TABLENS, u'condition'), None): cnv_formula,
((TABLENS, u'condition-source'), None): cnv_string,
((TABLENS, u'condition-source-range-address'), None): cnv_string,
((TABLENS, u'contains-error'), None): cnv_boolean,
((TABLENS, u'contains-header'), None): cnv_boolean,
((TABLENS, u'content-validation-name'), None): cnv_string,
((TABLENS, u'copy-back'), None): cnv_boolean,
((TABLENS, u'copy-formulas'), None): cnv_boolean,
((TABLENS, u'copy-styles'), None): cnv_boolean,
((TABLENS, u'count'), None): cnv_positiveInteger,
((TABLENS, u'country'), None): cnv_token,
((TABLENS, u'data-cell-range-address'), None): cnv_string,
((TABLENS, u'data-field'), None): cnv_string,
((TABLENS, u'data-type'), None): cnv_string,
((TABLENS, u'database-name'), None): cnv_string,
((TABLENS, u'database-table-name'), None): cnv_string,
((TABLENS, u'date-end'), None): cnv_string,
((TABLENS, u'date-start'), None): cnv_string,
((TABLENS, u'date-value'), None): cnv_date,
((TABLENS, u'default-cell-style-name'), None): cnv_StyleNameRef,
((TABLENS, u'direction'), None): cnv_string,
((TABLENS, u'display-border'), None): cnv_boolean,
((TABLENS, u'display'), None): cnv_boolean,
((TABLENS, u'display-duplicates'), None): cnv_boolean,
((TABLENS, u'display-filter-buttons'), None): cnv_boolean,
((TABLENS, u'display-list'), None): cnv_string,
((TABLENS, u'display-member-mode'), None): cnv_string,
((TABLENS, u'drill-down-on-double-click'), None): cnv_boolean,
((TABLENS, u'enabled'), None): cnv_boolean,
((TABLENS, u'end-cell-address'), None): cnv_string,
((TABLENS, u'end'), None): cnv_string,
((TABLENS, u'end-column'), None): cnv_integer,
((TABLENS, u'end-position'), None): cnv_integer,
((TABLENS, u'end-row'), None): cnv_integer,
((TABLENS, u'end-table'), None): cnv_integer,
((TABLENS, u'end-x'), None): cnv_length,
((TABLENS, u'end-y'), None): cnv_length,
((TABLENS, u'execute'), None): cnv_boolean,
((TABLENS, u'expression'), None): cnv_formula,
((TABLENS, u'field-name'), None): cnv_string,
((TABLENS, u'field-number'), None): cnv_nonNegativeInteger,
((TABLENS, u'field-number'), None): cnv_string,
((TABLENS, u'filter-name'), None): cnv_string,
((TABLENS, u'filter-options'), None): cnv_string,
((TABLENS, u'formula'), None): cnv_formula,
((TABLENS, u'function'), None): cnv_string,
((TABLENS, u'function'), None): cnv_string,
((TABLENS, u'grand-total'), None): cnv_string,
((TABLENS, u'group-by-field-number'), None): cnv_nonNegativeInteger,
((TABLENS, u'grouped-by'), None): cnv_string,
((TABLENS, u'has-persistent-data'), None): cnv_boolean,
((TABLENS, u'id'), None): cnv_string,
((TABLENS, u'identify-categories'), None): cnv_boolean,
((TABLENS, u'ignore-empty-rows'), None): cnv_boolean,
((TABLENS, u'index'), None): cnv_nonNegativeInteger,
((TABLENS, u'is-active'), None): cnv_boolean,
((TABLENS, u'is-data-layout-field'), None): cnv_string,
((TABLENS, u'is-selection'), None): cnv_boolean,
((TABLENS, u'is-sub-table'), None): cnv_boolean,
((TABLENS, u'label-cell-range-address'), None): cnv_string,
((TABLENS, u'language'), None): cnv_token,
((TABLENS, u'language'), None): cnv_token,
((TABLENS, u'last-column-spanned'), None): cnv_positiveInteger,
((TABLENS, u'last-row-spanned'), None): cnv_positiveInteger,
((TABLENS, u'layout-mode'), None): cnv_string,
((TABLENS, u'link-to-source-data'), None): cnv_boolean,
((TABLENS, u'marked-invalid'), None): cnv_boolean,
((TABLENS, u'matrix-covered'), None): cnv_boolean,
((TABLENS, u'maximum-difference'), None): cnv_double,
((TABLENS, u'member-count'), None): cnv_nonNegativeInteger,
((TABLENS, u'member-name'), None): cnv_string,
((TABLENS, u'member-type'), None): cnv_string,
((TABLENS, u'message-type'), None): cnv_string,
((TABLENS, u'mode'), None): cnv_string,
((TABLENS, u'multi-deletion-spanned'), None): cnv_integer,
((TABLENS, u'name'), None): cnv_string,
((TABLENS, u'name'), None): cnv_string,
((TABLENS, u'null-year'), None): cnv_positiveInteger,
((TABLENS, u'number-columns-repeated'), None): cnv_positiveInteger,
((TABLENS, u'number-columns-spanned'), None): cnv_positiveInteger,
((TABLENS, u'number-matrix-columns-spanned'), None): cnv_positiveInteger,
((TABLENS, u'number-matrix-rows-spanned'), None): cnv_positiveInteger,
((TABLENS, u'number-rows-repeated'), None): cnv_positiveInteger,
((TABLENS, u'number-rows-spanned'), None): cnv_positiveInteger,
((TABLENS, u'object-name'), None): cnv_string,
((TABLENS, u'on-update-keep-size'), None): cnv_boolean,
((TABLENS, u'on-update-keep-styles'), None): cnv_boolean,
((TABLENS, u'operator'), None): cnv_string,
((TABLENS, u'operator'), None): cnv_string,
((TABLENS, u'order'), None): cnv_string,
((TABLENS, u'orientation'), None): cnv_string,
((TABLENS, u'orientation'), None): cnv_string,
((TABLENS, u'page-breaks-on-group-change'), None): cnv_boolean,
((TABLENS, u'parse-sql-statement'), None): cnv_boolean,
((TABLENS, u'password'), None): cnv_string,
((TABLENS, u'position'), None): cnv_integer,
((TABLENS, u'precision-as-shown'), None): cnv_boolean,
((TABLENS, u'print'), None): cnv_boolean,
((TABLENS, u'print-ranges'), None): cnv_string,
((TABLENS, u'protect'), None): cnv_boolean,
((TABLENS, u'protected'), None): cnv_boolean,
((TABLENS, u'protection-key'), None): cnv_string,
((TABLENS, u'query-name'), None): cnv_string,
((TABLENS, u'range-usable-as'), None): cnv_string,
((TABLENS, u'refresh-delay'), None): cnv_boolean,
((TABLENS, u'refresh-delay'), None): cnv_duration,
((TABLENS, u'rejecting-change-id'), None): cnv_string,
((TABLENS, u'row'), None): cnv_integer,
((TABLENS, u'scenario-ranges'), None): cnv_string,
((TABLENS, u'search-criteria-must-apply-to-whole-cell'), None): cnv_boolean,
((TABLENS, u'selected-page'), None): cnv_string,
((TABLENS, u'show-details'), None): cnv_boolean,
((TABLENS, u'show-empty'), None): cnv_boolean,
((TABLENS, u'show-empty'), None): cnv_string,
((TABLENS, u'show-filter-button'), None): cnv_boolean,
((TABLENS, u'sort-mode'), None): cnv_string,
((TABLENS, u'source-cell-range-addresses'), None): cnv_string,
((TABLENS, u'source-cell-range-addresses'), None): cnv_string,
((TABLENS, u'source-field-name'), None): cnv_string,
((TABLENS, u'source-field-name'), None): cnv_string,
((TABLENS, u'source-name'), None): cnv_string,
((TABLENS, u'sql-statement'), None): cnv_string,
((TABLENS, u'start'), None): cnv_string,
((TABLENS, u'start-column'), None): cnv_integer,
((TABLENS, u'start-position'), None): cnv_integer,
((TABLENS, u'start-row'), None): cnv_integer,
((TABLENS, u'start-table'), None): cnv_integer,
((TABLENS, u'status'), None): cnv_string,
((TABLENS, u'step'), None): cnv_double,
((TABLENS, u'steps'), None): cnv_positiveInteger,
((TABLENS, u'structure-protected'), None): cnv_boolean,
((TABLENS, u'style-name'), None): cnv_StyleNameRef,
((TABLENS, u'table-background'), None): cnv_boolean,
((TABLENS, u'table'), None): cnv_integer,
((TABLENS, u'table-name'), None): cnv_string,
((TABLENS, u'target-cell-address'), None): cnv_string,
((TABLENS, u'target-cell-address'), None): cnv_string,
((TABLENS, u'target-range-address'), None): cnv_string,
((TABLENS, u'target-range-address'), None): cnv_string,
((TABLENS, u'title'), None): cnv_string,
((TABLENS, u'track-changes'), None): cnv_boolean,
((TABLENS, u'type'), None): cnv_string,
((TABLENS, u'use-labels'), None): cnv_string,
((TABLENS, u'use-regular-expressions'), None): cnv_boolean,
((TABLENS, u'used-hierarchy'), None): cnv_integer,
((TABLENS, u'user-name'), None): cnv_string,
((TABLENS, u'value'), None): cnv_string,
((TABLENS, u'value'), None): cnv_string,
((TABLENS, u'value-type'), None): cnv_string,
((TABLENS, u'visibility'), None): cnv_string,
((TEXTNS, u'active'), None): cnv_boolean,
((TEXTNS, u'address'), None): cnv_string,
((TEXTNS, u'alphabetical-separators'), None): cnv_boolean,
((TEXTNS, u'anchor-page-number'), None): cnv_positiveInteger,
((TEXTNS, u'anchor-type'), None): cnv_string,
((TEXTNS, u'animation'), None): cnv_string,
((TEXTNS, u'animation-delay'), None): cnv_string,
((TEXTNS, u'animation-direction'), None): cnv_string,
((TEXTNS, u'animation-repeat'), None): cnv_string,
((TEXTNS, u'animation-start-inside'), None): cnv_boolean,
((TEXTNS, u'animation-steps'), None): cnv_length,
((TEXTNS, u'animation-stop-inside'), None): cnv_boolean,
((TEXTNS, u'annote'), None): cnv_string,
((TEXTNS, u'author'), None): cnv_string,
((TEXTNS, u'bibliography-data-field'), None): cnv_string,
((TEXTNS, u'bibliography-type'), None): cnv_string,
((TEXTNS, u'booktitle'), None): cnv_string,
((TEXTNS, u'bullet-char'), None): cnv_string,
((TEXTNS, u'bullet-relative-size'), None): cnv_string,
((TEXTNS, u'c'), None): cnv_nonNegativeInteger,
((TEXTNS, u'capitalize-entries'), None): cnv_boolean,
((TEXTNS, u'caption-sequence-format'), None): cnv_string,
((TEXTNS, u'caption-sequence-name'), None): cnv_string,
((TEXTNS, u'change-id'), None): cnv_IDREF,
((TEXTNS, u'chapter'), None): cnv_string,
((TEXTNS, u'citation-body-style-name'), None): cnv_StyleNameRef,
((TEXTNS, u'citation-style-name'), None): cnv_StyleNameRef,
((TEXTNS, u'class-names'), None): cnv_NCNames,
((TEXTNS, u'column-name'), None): cnv_string,
((TEXTNS, u'combine-entries'), None): cnv_boolean,
((TEXTNS, u'combine-entries-with-dash'), None): cnv_boolean,
((TEXTNS, u'combine-entries-with-pp'), None): cnv_boolean,
((TEXTNS, u'comma-separated'), None): cnv_boolean,
((TEXTNS, u'cond-style-name'), None): cnv_StyleNameRef,
((TEXTNS, u'condition'), None): cnv_formula,
((TEXTNS, u'connection-name'), None): cnv_string,
((TEXTNS, u'consecutive-numbering'), None): cnv_boolean,
((TEXTNS, u'continue-numbering'), None): cnv_boolean,
((TEXTNS, u'copy-outline-levels'), None): cnv_boolean,
((TEXTNS, u'count-empty-lines'), None): cnv_boolean,
((TEXTNS, u'count-in-text-boxes'), None): cnv_boolean,
((TEXTNS, u'current-value'), None): cnv_boolean,
((TEXTNS, u'custom1'), None): cnv_string,
((TEXTNS, u'custom2'), None): cnv_string,
((TEXTNS, u'custom3'), None): cnv_string,
((TEXTNS, u'custom4'), None): cnv_string,
((TEXTNS, u'custom5'), None): cnv_string,
((TEXTNS, u'database-name'), None): cnv_string,
((TEXTNS, u'date-adjust'), None): cnv_duration,
((TEXTNS, u'date-value'), None): cnv_date,
# ((TEXTNS,u'date-value'), None): cnv_dateTime,
((TEXTNS, u'default-style-name'), None): cnv_StyleNameRef,
((TEXTNS, u'description'), None): cnv_string,
((TEXTNS, u'display'), None): cnv_string,
((TEXTNS, u'display-levels'), None): cnv_positiveInteger,
((TEXTNS, u'display-outline-level'), None): cnv_nonNegativeInteger,
((TEXTNS, u'dont-balance-text-columns'), None): cnv_boolean,
((TEXTNS, u'duration'), None): cnv_duration,
((TEXTNS, u'edition'), None): cnv_string,
((TEXTNS, u'editor'), None): cnv_string,
((TEXTNS, u'filter-name'), None): cnv_string,
((TEXTNS, u'first-row-end-column'), None): cnv_string,
((TEXTNS, u'first-row-start-column'), None): cnv_string,
((TEXTNS, u'fixed'), None): cnv_boolean,
((TEXTNS, u'footnotes-position'), None): cnv_string,
((TEXTNS, u'formula'), None): cnv_formula,
((TEXTNS, u'global'), None): cnv_boolean,
((TEXTNS, u'howpublished'), None): cnv_string,
((TEXTNS, u'id'), None): cnv_ID,
# ((TEXTNS,u'id'), None): cnv_string,
((TEXTNS, u'identifier'), None): cnv_string,
((TEXTNS, u'ignore-case'), None): cnv_boolean,
((TEXTNS, u'increment'), None): cnv_nonNegativeInteger,
((TEXTNS, u'index-name'), None): cnv_string,
((TEXTNS, u'index-scope'), None): cnv_string,
((TEXTNS, u'institution'), None): cnv_string,
((TEXTNS, u'is-hidden'), None): cnv_boolean,
((TEXTNS, u'is-list-header'), None): cnv_boolean,
((TEXTNS, u'isbn'), None): cnv_string,
((TEXTNS, u'issn'), None): cnv_string,
((TEXTNS, u'issn'), None): cnv_string,
((TEXTNS, u'journal'), None): cnv_string,
((TEXTNS, u'key'), None): cnv_string,
((TEXTNS, u'key1'), None): cnv_string,
((TEXTNS, u'key1-phonetic'), None): cnv_string,
((TEXTNS, u'key2'), None): cnv_string,
((TEXTNS, u'key2-phonetic'), None): cnv_string,
((TEXTNS, u'kind'), None): cnv_string,
((TEXTNS, u'label'), None): cnv_string,
((TEXTNS, u'last-row-end-column'), None): cnv_string,
((TEXTNS, u'last-row-start-column'), None): cnv_string,
((TEXTNS, u'level'), None): cnv_positiveInteger,
((TEXTNS, u'line-break'), None): cnv_boolean,
((TEXTNS, u'line-number'), None): cnv_string,
((TEXTNS, u'main-entry'), None): cnv_boolean,
((TEXTNS, u'main-entry-style-name'), None): cnv_StyleNameRef,
((TEXTNS, u'master-page-name'), None): cnv_StyleNameRef,
((TEXTNS, u'min-label-distance'), None): cnv_string,
((TEXTNS, u'min-label-width'), None): cnv_string,
((TEXTNS, u'month'), None): cnv_string,
((TEXTNS, u'name'), None): cnv_string,
((TEXTNS, u'note-class'), None): cnv_textnoteclass,
((TEXTNS, u'note'), None): cnv_string,
((TEXTNS, u'number'), None): cnv_string,
((TEXTNS, u'number-lines'), None): cnv_boolean,
((TEXTNS, u'number-position'), None): cnv_string,
((TEXTNS, u'numbered-entries'), None): cnv_boolean,
((TEXTNS, u'offset'), None): cnv_string,
((TEXTNS, u'organizations'), None): cnv_string,
((TEXTNS, u'outline-level'), None): cnv_string,
((TEXTNS, u'page-adjust'), None): cnv_integer,
((TEXTNS, u'pages'), None): cnv_string,
((TEXTNS, u'paragraph-style-name'), None): cnv_StyleNameRef,
((TEXTNS, u'placeholder-type'), None): cnv_string,
((TEXTNS, u'prefix'), None): cnv_string,
((TEXTNS, u'protected'), None): cnv_boolean,
((TEXTNS, u'protection-key'), None): cnv_string,
((TEXTNS, u'publisher'), None): cnv_string,
((TEXTNS, u'ref-name'), None): cnv_string,
((TEXTNS, u'reference-format'), None): cnv_string,
((TEXTNS, u'relative-tab-stop-position'), None): cnv_boolean,
((TEXTNS, u'report-type'), None): cnv_string,
((TEXTNS, u'restart-numbering'), None): cnv_boolean,
((TEXTNS, u'restart-on-page'), None): cnv_boolean,
((TEXTNS, u'row-number'), None): cnv_nonNegativeInteger,
((TEXTNS, u'school'), None): cnv_string,
((TEXTNS, u'section-name'), None): cnv_string,
((TEXTNS, u'select-page'), None): cnv_string,
((TEXTNS, u'separation-character'), None): cnv_string,
((TEXTNS, u'series'), None): cnv_string,
((TEXTNS, u'sort-algorithm'), None): cnv_string,
((TEXTNS, u'sort-ascending'), None): cnv_boolean,
((TEXTNS, u'sort-by-position'), None): cnv_boolean,
((TEXTNS, u'space-before'), None): cnv_string,
((TEXTNS, u'start-numbering-at'), None): cnv_string,
((TEXTNS, u'start-value'), None): cnv_nonNegativeInteger,
((TEXTNS, u'start-value'), None): cnv_positiveInteger,
((TEXTNS, u'string-value'), None): cnv_string,
((TEXTNS, u'string-value-if-false'), None): cnv_string,
((TEXTNS, u'string-value-if-true'), None): cnv_string,
((TEXTNS, u'string-value-phonetic'), None): cnv_string,
((TEXTNS, u'style-name'), None): cnv_StyleNameRef,
((TEXTNS, u'suffix'), None): cnv_string,
((TEXTNS, u'tab-ref'), None): cnv_nonNegativeInteger,
((TEXTNS, u'table-name'), None): cnv_string,
((TEXTNS, u'table-type'), None): cnv_string,
((TEXTNS, u'time-adjust'), None): cnv_duration,
((TEXTNS, u'time-value'), None): cnv_dateTime,
((TEXTNS, u'time-value'), None): cnv_time,
((TEXTNS, u'title'), None): cnv_string,
((TEXTNS, u'track-changes'), None): cnv_boolean,
((TEXTNS, u'url'), None): cnv_string,
((TEXTNS, u'use-caption'), None): cnv_boolean,
((TEXTNS, u'use-chart-objects'), None): cnv_boolean,
((TEXTNS, u'use-draw-objects'), None): cnv_boolean,
((TEXTNS, u'use-floating-frames'), None): cnv_boolean,
((TEXTNS, u'use-graphics'), None): cnv_boolean,
((TEXTNS, u'use-index-marks'), None): cnv_boolean,
((TEXTNS, u'use-index-source-styles'), None): cnv_boolean,
((TEXTNS, u'use-keys-as-entries'), None): cnv_boolean,
((TEXTNS, u'use-math-objects'), None): cnv_boolean,
((TEXTNS, u'use-objects'), None): cnv_boolean,
((TEXTNS, u'use-other-objects'), None): cnv_boolean,
((TEXTNS, u'use-outline-level'), None): cnv_boolean,
((TEXTNS, u'use-soft-page-breaks'), None): cnv_boolean,
((TEXTNS, u'use-spreadsheet-objects'), None): cnv_boolean,
((TEXTNS, u'use-tables'), None): cnv_boolean,
((TEXTNS, u'value'), None): cnv_nonNegativeInteger,
((TEXTNS, u'visited-style-name'), None): cnv_StyleNameRef,
((TEXTNS, u'volume'), None): cnv_string,
((TEXTNS, u'year'), None): cnv_string,
((XFORMSNS, u'bind'), None): cnv_string,
((XLINKNS, u'actuate'), None): cnv_string,
((XLINKNS, u'href'), None): cnv_anyURI,
((XLINKNS, u'show'), None): cnv_xlinkshow,
((XLINKNS, u'title'), None): cnv_string,
((XLINKNS, u'type'), None): cnv_string,
}
class AttrConverters:
def convert(self, attribute, value, element):
""" Based on the element, figures out how to check/convert the attribute value
All values are converted to string
"""
conversion = attrconverters.get((attribute, element.qname), None)
if conversion is not None:
return conversion(attribute, value, element)
else:
conversion = attrconverters.get((attribute, None), None)
if conversion is not None:
return conversion(attribute, value, element)
return unicode(value)
|
walterbender/portfolio
|
odf/attrconverters.py
|
Python
|
gpl-3.0
| 75,243
|
## plugin.py
##
## Copyright (C) 2003-2005 Alexey "Snake" Nezhdanov
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2, or (at your option)
## any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
# $Id: client.py,v 1.52 2006/01/02 19:40:55 normanr Exp $
"""
Provides PlugIn class functionality to develop extentions for xmpppy
"""
import logging
log = logging.getLogger('nbxmpp.plugin')
class PlugIn(object):
"""
Abstract xmpppy plugin infrastructure code, providing plugging in/out and
debugging functionality
Inherit to develop pluggable objects. No code change on the owner class
required (the object where we plug into)
For every instance of PlugIn class the 'owner' is the class in what the plug
was plugged.
"""
def __init__(self):
self._exported_methods=[]
def PlugIn(self, owner, *args, **kwargs):
"""
Attach to owner and register ourself and our _exported_methods in it.
If defined by a subclass, call self.plugin(owner) to execute hook
code after plugging
"""
self._owner=owner
log.info('Plugging %s __INTO__ %s' % (self, self._owner))
if self.__class__.__name__ in owner.__dict__:
log.debug('Plugging ignored: another instance already plugged.')
return
self._old_owners_methods=[]
for method in self._exported_methods:
if method.__name__ in owner.__dict__:
self._old_owners_methods.append(owner.__dict__[method.__name__])
owner.__dict__[method.__name__]=method
if self.__class__.__name__.endswith('Dispatcher'):
# FIXME: I need BOSHDispatcher or XMPPDispatcher on .Dispatcher
# there must be a better way..
owner.__dict__['Dispatcher']=self
else:
owner.__dict__[self.__class__.__name__]=self
# Execute hook
if hasattr(self, 'plugin'):
return self.plugin(owner, *args, **kwargs)
def PlugOut(self, *args, **kwargs):
"""
Unregister our _exported_methods from owner and detach from it.
If defined by a subclass, call self.plugout() after unplugging to execute
hook code
"""
log.info('Plugging %s __OUT__ of %s.' % (self, self._owner))
for method in self._exported_methods:
del self._owner.__dict__[method.__name__]
for method in self._old_owners_methods:
self._owner.__dict__[method.__name__]=method
# FIXME: Dispatcher workaround
if self.__class__.__name__.endswith('Dispatcher'):
del self._owner.__dict__['Dispatcher']
else:
del self._owner.__dict__[self.__class__.__name__]
# Execute hook
if hasattr(self, 'plugout'):
return self.plugout(*args, **kwargs)
del self._owner
@classmethod
def get_instance(cls, *args, **kwargs):
"""
Factory Method for object creation
Use this instead of directly initializing the class in order to make
unit testing easier. For testing, this method can be patched to inject
mock objects.
"""
return cls(*args, **kwargs)
|
jabber-at/python-nbxmpp
|
nbxmpp/plugin.py
|
Python
|
gpl-3.0
| 3,577
|
import services
from entities import EventLog
from entities import enum
from entities import IpAddress
import math
from datetime import datetime, timedelta
import logging
Weight = enum(HOURS=10, DAYS=10, SERVER=15, SUCCESS=35, VPN=0, INT=10, EXT=15, IP=15)
Threshold = enum(CRITICAL=50, SCARY=30, SCARECOUNT=2, SCAREDATEEXPIRE=1)
updateService = None
emailService = None
def setServices(conf=None):
global updateService
global emailService
updateService = services.UpdateService(conf)
emailService = services.EmailService(conf)
def testProcess():
eventLog = EventLog(date.today(), 'nrhine', '127.0.0.1', True, 'ae1-app80-prd')
processEventLog(eventLog)
def processEventLog(eventLog):
auditEventLog(eventLog)
score = calculateNewScore(eventLog)
user = updateService.fetchUser(eventLog)
timeDiff = eventLog.date - user.lastScareDate
updateService.updateUserScore(user, score)
if score > Threshold.CRITICAL:
processAlert(user, eventLog)
elif score > Threshold.SCARY:
if user.scareCount >= Threshold.SCARECOUNT:
processAlert(user, eventLog)
user = updateService.updateUserScareCount(user)
elif abs(timeDiff.days) >= Threshold.SCAREDATEEXPIRE:
updateService.resetUserScareCount(user)
def calculateNewScore(eventLog):
successScore = calculateSuccessScore(eventLog.success)
ipLocationScore = calculateIpLocationScore(eventLog.ipAddress)
serverScore = calculateServerScore(eventLog)
ipScore = calculateIpScore(eventLog)
dayScore = calculateDaysScore(eventLog)
hourScore = calculateHoursScore(eventLog)
totalScore = successScore + ipLocationScore + serverScore + ipScore + dayScore + hourScore
logging.debug("Total Score: %s" % totalScore)
return totalScore
def auditEventLog(eventLog):
updateService.auditEventLog(eventLog)
def processAlert(user, eventLog):
emailService.sendEmailAlert(user, eventLog)
def calculateHoursScore(eventLog):
hourFreq = updateService.updateAndReturnHourFreqForUser(eventLog)
hourScore = calculateSubscore(hourFreq)*Weight.HOURS
return hourScore
def calculateDaysScore(eventLog):
dayFreq = updateService.updateAndReturnDayFreqForUser(eventLog)
dayScore = calculateSubscore(dayFreq)*Weight.DAYS
return dayScore
def calculateServerScore(eventLog):
serverFreq = updateService.updateAndReturnServerFreqForUser(eventLog)
serverScore = calculateSubscore(serverFreq) * Weight.SERVER
return serverScore
def calculateIpScore(eventLog):
ipFreq = updateService.updateAndReturnIpFreqForUser(eventLog)
ipScore = calculateSubscore(ipFreq) * Weight.IP
return ipScore
def calculateSubscore(freq):
subscore = math.log(freq, 2)
subscore = subscore*-10
if subscore>100 :
return 100
return float(subscore)/100
def calculateSuccessScore(success):
successScore = Weight.SUCCESS
if success:
successScore = 0
return successScore
def calculateIpLocationScore(ipAddress):
ipScore = Weight.EXT
if IpAddress.checkIpForVpn(ipAddress):
ipScore=Weight.VPN
if IpAddress.checkIpForInternal(ipAddress):
ipScore=Weight.INT
return ipScore
|
dandb/hacklog
|
hacklog/algorithm.py
|
Python
|
gpl-3.0
| 3,083
|
# coding=utf-8
"""Tests for medusa.clients.torrent module."""
import medusa.clients.torrent as sut
from medusa.clients.torrent import (
deluge_client, deluged_client, download_station_client, mlnet_client,
qbittorrent_client, rtorrent_client, transmission_client, utorrent_client
)
import pytest
@pytest.mark.parametrize('p', [
{ # p0
'client': 'deluge',
'expected': deluge_client
},
{ # p1
'client': 'deluged',
'expected': deluged_client
},
{ # p2
'client': 'download_station',
'expected': download_station_client
},
{ # p3
'client': 'mlnet',
'expected': mlnet_client
},
{ # p4
'client': 'qbittorrent',
'expected': qbittorrent_client
},
{ # p5
'client': 'rtorrent',
'expected': rtorrent_client
},
{ # p6
'client': 'transmission',
'expected': transmission_client
},
{ # p7
'client': 'utorrent',
'expected': utorrent_client
}
])
def test_get_client_module(p):
# Given
client_name = p['client']
expected = p['expected']
# When
actual = sut.get_client_module(client_name)
# Then
assert expected == actual
def test_get_client_module__non_existent():
# Given
client_name = 'strangeonehere'
with pytest.raises(ImportError): # Then
# When
sut.get_client_module(client_name)
@pytest.mark.parametrize('p', [
{ # p0
'client': 'deluge',
'expected': deluge_client.DelugeAPI
},
{ # p1
'client': 'deluged',
'expected': deluged_client.DelugeDAPI
},
{ # p2
'client': 'download_station',
'expected': download_station_client.DownloadStationAPI
},
{ # p3
'client': 'mlnet',
'expected': mlnet_client.MLNetAPI
},
{ # p4
'client': 'qbittorrent',
'expected': qbittorrent_client.QBittorrentAPI
},
{ # p5
'client': 'rtorrent',
'expected': rtorrent_client.RTorrentAPI
},
{ # p6
'client': 'transmission',
'expected': transmission_client.TransmissionAPI
},
{ # p7
'client': 'utorrent',
'expected': utorrent_client.UTorrentAPI
}
])
def test_get_client_class(p):
# Given
client_name = p['client']
expected = p['expected']
# When
actual = sut.get_client_class(client_name)
# Then
assert expected == actual
|
fernandog/Medusa
|
tests/test_clients.py
|
Python
|
gpl-3.0
| 2,480
|
# TODO: Fix typing in this file.
# mypy: ignore-errors
import chess.svg
class WidgetError(Exception):
"""
raised when ipywidgets is not installed
"""
class NotJupyter(Exception):
"""
raised when InteractiveViewer is instantiated from a non jupyter shell
"""
try:
from ipywidgets import Button, GridBox, Layout, HTML, Output, HBox, Select
from IPython.display import display, clear_output
except ModuleNotFoundError:
raise WidgetError("You need to have ipywidgets installed and running from Jupyter")
class InteractiveViewer:
def __new__(cls, game):
jupyter = True
try:
if get_ipython().__class__.__name__ != "ZMQInteractiveShell":
jupyter = False
except NameError:
jupyter = False
if not jupyter:
raise NotJupyter("The interactive viewer only runs in Jupyter shell")
return object.__new__(cls)
def __init__(self, game):
self.game = game
self.__board = game.board()
self.__moves = list(game.mainline_moves())
self.__white_moves = [str(move) for (i, move) in enumerate(self.__moves) if i % 2 == 0]
self.__black_moves = [str(move) for (i, move) in enumerate(self.__moves) if i % 2 == 1]
self.__move_list_len = len(self.__white_moves)
self.__num_moves = len(self.__moves)
self.__next_move = 0 if self.__moves else None
self.__out = Output()
def __next_click(self, _):
move = self.__moves[self.__next_move]
self.__next_move += 1
self.__board.push(move)
self.show()
def __prev_click(self, _):
self.__board.pop()
self.__next_move -= 1
self.show()
def __reset_click(self, _):
self.__board.reset()
self.__next_move = 0
self.show()
def __white_select_change(self, change):
new = change["new"]
if (isinstance(new, dict)) and ("index" in new):
target = new["index"] * 2
self.__seek(target)
self.show()
def __black_select_change(self, change):
new = change["new"]
if (isinstance(new, dict)) and ("index" in new):
target = new["index"] * 2 + 1
self.__seek(target)
self.show()
def __seek(self, target):
while self.__next_move <= target:
move = self.__moves[self.__next_move]
self.__next_move += 1
self.__board.push(move)
while self.__next_move > target + 1:
self.__board.pop()
self.__next_move -= 1
def show(self):
display(self.__out)
next_move = Button(
icon="step-forward",
layout=Layout(width="60px", grid_area="right"),
disabled=self.__next_move >= self.__num_moves,
)
prev_move = Button(
icon="step-backward",
layout=Layout(width="60px", grid_area="left"),
disabled=self.__next_move == 0,
)
reset = Button(
icon="stop",
layout=Layout(width="60px", grid_area="middle"),
disabled=self.__next_move == 0,
)
if self.__next_move == 0:
white_move = None
black_move = None
else:
white_move = (
self.__white_moves[self.__next_move // 2]
if (self.__next_move % 2) == 1
else None
)
black_move = (
self.__black_moves[self.__next_move // 2 - 1]
if (self.__next_move % 2) == 0
else None
)
white_move_list = Select(
options=self.__white_moves,
value=white_move,
rows=max(self.__move_list_len, 24),
disabled=False,
layout=Layout(width="80px"),
)
black_move_list = Select(
options=self.__black_moves,
value=black_move,
rows=max(self.__move_list_len, 24),
disabled=False,
layout=Layout(width="80px"),
)
white_move_list.observe(self.__white_select_change)
black_move_list.observe(self.__black_select_change)
move_number_width = 3 + len(str(self.__move_list_len)) * 10
move_number = Select(
options=range(1, self.__move_list_len + 1),
value=None,
disabled=True,
rows=max(self.__move_list_len, 24),
layout=Layout(width=f"{move_number_width}px"),
)
move_list = HBox(
[move_number, white_move_list, black_move_list],
layout=Layout(height="407px", grid_area="moves"),
)
next_move.on_click(self.__next_click)
prev_move.on_click(self.__prev_click)
reset.on_click(self.__reset_click)
with self.__out:
grid_box = GridBox(
children=[next_move, prev_move, reset, self.svg, move_list],
layout=Layout(
width=f"{390+move_number_width+160}px",
grid_template_rows="90% 10%",
grid_template_areas="""
"top top top top top moves"
". left middle right . moves"
""",
),
)
clear_output(wait=True)
display(grid_box)
@property
def svg(self) -> HTML:
svg = chess.svg.board(
board=self.__board,
size=390,
lastmove=self.__board.peek() if self.__board.move_stack else None,
check=self.__board.king(self.__board.turn)
if self.__board.is_check()
else None,
)
svg_widget = HTML(value=svg, layout=Layout(grid_area="top"))
return svg_widget
|
niklasf/python-chess
|
chess/_interactive.py
|
Python
|
gpl-3.0
| 5,843
|
# Copyright 2016 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
from PyQt5.QtWidgets import QApplication
from hscommon import trans
from core.app import AppMode
from core.scanner import ScanType
from qtlib.preferences import Preferences as PreferencesBase
class Preferences(PreferencesBase):
def _load_values(self, settings):
get = self.get_value
self.filter_hardness = get("FilterHardness", self.filter_hardness)
self.mix_file_kind = get("MixFileKind", self.mix_file_kind)
self.ignore_hardlink_matches = get(
"IgnoreHardlinkMatches", self.ignore_hardlink_matches
)
self.use_regexp = get("UseRegexp", self.use_regexp)
self.remove_empty_folders = get("RemoveEmptyFolders", self.remove_empty_folders)
self.debug_mode = get("DebugMode", self.debug_mode)
self.destination_type = get("DestinationType", self.destination_type)
self.custom_command = get("CustomCommand", self.custom_command)
self.language = get("Language", self.language)
if not self.language and trans.installed_lang:
self.language = trans.installed_lang
self.tableFontSize = get("TableFontSize", self.tableFontSize)
self.reference_bold_font = get('ReferenceBoldFont', self.reference_bold_font)
self.resultWindowIsMaximized = get(
"ResultWindowIsMaximized", self.resultWindowIsMaximized
)
self.resultWindowRect = self.get_rect("ResultWindowRect", self.resultWindowRect)
self.mainWindowIsMaximized = get(
"MainWindowIsMaximized", self.mainWindowIsMaximized
)
self.mainWindowRect = self.get_rect("MainWindowRect", self.mainWindowRect)
self.directoriesWindowRect = self.get_rect(
"DirectoriesWindowRect", self.directoriesWindowRect
)
self.recentResults = get("RecentResults", self.recentResults)
self.recentFolders = get("RecentFolders", self.recentFolders)
self.tabs_default_pos = get("TabsDefaultPosition", self.tabs_default_pos)
self.word_weighting = get("WordWeighting", self.word_weighting)
self.match_similar = get("MatchSimilar", self.match_similar)
self.ignore_small_files = get("IgnoreSmallFiles", self.ignore_small_files)
self.small_file_threshold = get("SmallFileThreshold", self.small_file_threshold)
self.scan_tag_track = get("ScanTagTrack", self.scan_tag_track)
self.scan_tag_artist = get("ScanTagArtist", self.scan_tag_artist)
self.scan_tag_album = get("ScanTagAlbum", self.scan_tag_album)
self.scan_tag_title = get("ScanTagTitle", self.scan_tag_title)
self.scan_tag_genre = get("ScanTagGenre", self.scan_tag_genre)
self.scan_tag_year = get("ScanTagYear", self.scan_tag_year)
self.match_scaled = get("MatchScaled", self.match_scaled)
self.picture_cache_type = get("PictureCacheType", self.picture_cache_type)
def reset(self):
self.filter_hardness = 95
self.mix_file_kind = True
self.use_regexp = False
self.ignore_hardlink_matches = False
self.remove_empty_folders = False
self.debug_mode = False
self.destination_type = 1
self.custom_command = ""
self.language = trans.installed_lang if trans.installed_lang else ""
self.tableFontSize = QApplication.font().pointSize()
self.reference_bold_font = True
self.resultWindowIsMaximized = False
self.resultWindowRect = None
self.directoriesWindowRect = None
self.mainWindowRect = None
self.mainWindowIsMaximized = False
self.recentResults = []
self.recentFolders = []
self.tabs_default_pos = True
self.word_weighting = True
self.match_similar = False
self.ignore_small_files = True
self.small_file_threshold = 10 # KB
self.scan_tag_track = False
self.scan_tag_artist = True
self.scan_tag_album = True
self.scan_tag_title = True
self.scan_tag_genre = False
self.scan_tag_year = False
self.match_scaled = False
self.picture_cache_type = "sqlite"
def _save_values(self, settings):
set_ = self.set_value
set_("FilterHardness", self.filter_hardness)
set_("MixFileKind", self.mix_file_kind)
set_("IgnoreHardlinkMatches", self.ignore_hardlink_matches)
set_("UseRegexp", self.use_regexp)
set_("RemoveEmptyFolders", self.remove_empty_folders)
set_("DebugMode", self.debug_mode)
set_("DestinationType", self.destination_type)
set_("CustomCommand", self.custom_command)
set_("Language", self.language)
set_("TableFontSize", self.tableFontSize)
set_('ReferenceBoldFont', self.reference_bold_font)
set_("ResultWindowIsMaximized", self.resultWindowIsMaximized)
set_("MainWindowIsMaximized", self.mainWindowIsMaximized)
self.set_rect("ResultWindowRect", self.resultWindowRect)
self.set_rect("MainWindowRect", self.mainWindowRect)
self.set_rect("DirectoriesWindowRect", self.directoriesWindowRect)
set_("RecentResults", self.recentResults)
set_("RecentFolders", self.recentFolders)
set_("TabsDefaultPosition", self.tabs_default_pos)
set_("WordWeighting", self.word_weighting)
set_("MatchSimilar", self.match_similar)
set_("IgnoreSmallFiles", self.ignore_small_files)
set_("SmallFileThreshold", self.small_file_threshold)
set_("ScanTagTrack", self.scan_tag_track)
set_("ScanTagArtist", self.scan_tag_artist)
set_("ScanTagAlbum", self.scan_tag_album)
set_("ScanTagTitle", self.scan_tag_title)
set_("ScanTagGenre", self.scan_tag_genre)
set_("ScanTagYear", self.scan_tag_year)
set_("MatchScaled", self.match_scaled)
set_("PictureCacheType", self.picture_cache_type)
# scan_type is special because we save it immediately when we set it.
def get_scan_type(self, app_mode):
if app_mode == AppMode.Picture:
return self.get_value("ScanTypePicture", ScanType.FuzzyBlock)
elif app_mode == AppMode.Music:
return self.get_value("ScanTypeMusic", ScanType.Tag)
else:
return self.get_value("ScanTypeStandard", ScanType.Contents)
def set_scan_type(self, app_mode, value):
if app_mode == AppMode.Picture:
self.set_value("ScanTypePicture", value)
elif app_mode == AppMode.Music:
self.set_value("ScanTypeMusic", value)
else:
self.set_value("ScanTypeStandard", value)
|
hsoft/dupeguru
|
qt/preferences.py
|
Python
|
gpl-3.0
| 6,879
|
# -*- coding:Utf-8 -*-
"""Misc widgets used in the GUI."""
import os
from gi.repository import Gtk, GObject, Pango, GdkPixbuf, GLib
from gi.repository.GdkPixbuf import Pixbuf
from lutris.downloader import Downloader
from lutris.util import datapath
# from lutris.util.log import logger
from lutris import settings
PADDING = 5
DEFAULT_BANNER = os.path.join(datapath.get(), 'media/default_banner.png')
DEFAULT_ICON = os.path.join(datapath.get(), 'media/default_icon.png')
UNAVAILABLE_GAME_OVERLAY = os.path.join(datapath.get(),
'media/unavailable.png')
BANNER_SIZE = (184, 69)
BANNER_SMALL_SIZE = (120, 45)
ICON_SIZE = (32, 32)
(
COL_ID,
COL_NAME,
COL_ICON,
COL_YEAR,
COL_RUNNER,
COL_INSTALLED,
) = range(6)
def get_runner_icon(runner_name, format='image', size=None):
icon_path = os.path.join(datapath.get(), 'media/runner_icons',
runner_name + '.png')
if format == 'image':
icon = Gtk.Image()
icon.set_from_file(icon_path)
elif format == 'pixbuf' and size:
icon = GdkPixbuf.Pixbuf.new_from_file_at_size(icon_path,
size[0], size[1])
else:
raise ValueError("Invalid arguments")
return icon
def sort_func(store, a_iter, b_iter, _user_data):
"""Default sort function."""
a_name = store.get(a_iter, COL_NAME)
b_name = store.get(b_iter, COL_NAME)
if a_name > b_name:
return 1
elif a_name < b_name:
return -1
else:
return 0
def get_pixbuf_for_game(game_slug, icon_type="banner", is_installed=True):
if icon_type in ("banner", "banner_small"):
size = BANNER_SIZE if icon_type == "banner" else BANNER_SMALL_SIZE
default_icon = DEFAULT_BANNER
icon_path = os.path.join(settings.BANNER_PATH,
"%s.jpg" % game_slug)
elif icon_type == "icon":
size = ICON_SIZE
default_icon = DEFAULT_ICON
icon_path = os.path.join(settings.ICON_PATH,
"lutris_%s.png" % game_slug)
if not os.path.exists(icon_path):
icon_path = default_icon
try:
pixbuf = Pixbuf.new_from_file_at_size(icon_path, size[0], size[1])
except GLib.GError:
pixbuf = Pixbuf.new_from_file_at_size(default_icon, size[0], size[1])
if not is_installed:
transparent_pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size(
UNAVAILABLE_GAME_OVERLAY, size[0], size[1]
)
transparent_pixbuf = transparent_pixbuf.scale_simple(
size[0], size[1], GdkPixbuf.InterpType.NEAREST
)
pixbuf.composite(transparent_pixbuf, 0, 0, size[0], size[1],
0, 0, 1, 1, GdkPixbuf.InterpType.NEAREST, 100)
return transparent_pixbuf
return pixbuf
class ContextualMenu(Gtk.Menu):
menu_labels = {
'play': "Play",
'install': "Install",
'add': "Add manually",
'configure': "Configure",
'browse': "Browse files",
'desktop-shortcut': "Create desktop shortcut",
'menu-shortcut': "Create application menu shortcut",
'remove': "Remove",
}
def __init__(self, callbacks):
super(ContextualMenu, self).__init__()
for callback in callbacks:
name = callback[0]
label = self.menu_labels[name]
action = Gtk.Action(name=name, label=label,
tooltip=None, stock_id=None)
action.connect('activate', callback[1])
menuitem = action.create_menu_item()
menuitem.action_id = name
self.append(menuitem)
self.show_all()
def popup(self, event, game_row):
is_installed = game_row[COL_INSTALLED]
hide_when_installed = ('install', 'add')
hide_when_not_installed = ('play', 'configure', 'desktop-shortcut',
'menu-shortcut', 'browse')
for menuitem in self.get_children():
action = menuitem.action_id
if is_installed:
menuitem.set_visible(action not in hide_when_installed)
else:
menuitem.set_visible(action not in hide_when_not_installed)
super(ContextualMenu, self).popup(None, None, None, None,
event.button, event.time)
class GridViewCellRenderer(Gtk.CellRendererText):
def __init__(self, width=None, *args, **kwargs):
super(GridViewCellRenderer, self).__init__(*args, **kwargs)
self.props.alignment = Pango.Alignment.CENTER
self.props.wrap_mode = Pango.WrapMode.WORD
self.props.xalign = 0.5
self.props.yalign = 0
self.props.width = width
self.props.wrap_width = width
class GameStore(object):
def __init__(self, games, filter_text='', filter_runner='', icon_type=None):
self.filter_text = filter_text
self.filter_runner = filter_runner
self.icon_type = icon_type
self.store = Gtk.ListStore(str, str, Pixbuf, str, str, bool)
self.store.set_default_sort_func(sort_func)
self.store.set_sort_column_id(-1, Gtk.SortType.ASCENDING)
self.fill_store(games)
self.modelfilter = self.store.filter_new()
self.modelfilter.set_visible_func(self.filter_view)
def filter_view(self, model, _iter, filter_data=None):
"""Filter the game list."""
name = model.get_value(_iter, COL_NAME)
runner = model.get_value(_iter, COL_RUNNER)
if self.filter_text:
name_matches = self.filter_text.lower() in name.lower()
else:
name_matches = True
if self.filter_runner:
runner_matches = self.filter_runner == runner
else:
runner_matches = True
return name_matches and runner_matches
def fill_store(self, games):
self.store.clear()
for game in games:
self.add_game(game)
def add_game(self, game):
"""Add a game into the store."""
if not game.name:
return
pixbuf = get_pixbuf_for_game(game.slug, self.icon_type,
is_installed=game.is_installed)
name = game.name.replace('&', "&")
self.store.append(
(game.slug, name, pixbuf, str(game.year), game.runner_name,
game.is_installed)
)
class GameView(object):
__gsignals__ = {
"game-selected": (GObject.SIGNAL_RUN_FIRST, None, ()),
"game-activated": (GObject.SIGNAL_RUN_FIRST, None, ()),
"game-installed": (GObject.SIGNAL_RUN_FIRST, None, (str,)),
"filter-updated": (GObject.SIGNAL_RUN_FIRST, None, ()),
}
selected_game = None
current_path = None
contextual_menu = None
def connect_signals(self):
"""Signal handlers common to all views"""
self.connect('filter-updated', self.update_filter)
self.connect('button-press-event', self.popup_contextual_menu)
@property
def n_games(self):
return len(self.game_store.store)
def get_row_by_slug(self, game_slug):
game_row = None
for model_row in self.game_store.store:
if model_row[COL_ID] == game_slug:
game_row = model_row
return game_row
def add_game(self, game):
self.game_store.add_game(game)
def remove_game(self, removed_id):
row = self.get_row_by_slug(removed_id)
if row:
self.remove_row(row.iter)
def remove_row(self, model_iter):
"""Remove a game from the view."""
store = self.game_store.store
store.remove(model_iter)
def set_installed(self, game):
"""Update a game row to show as installed"""
row = self.get_row_by_slug(game.slug)
if not row:
self.add_game(game)
else:
row[COL_RUNNER] = game.runner_name
self.update_image(game.slug, is_installed=True)
def set_uninstalled(self, game_slug):
"""Update a game row to show as uninstalled"""
row = self.get_row_by_slug(game_slug)
row[COL_RUNNER] = ''
self.update_image(game_slug, is_installed=False)
def update_filter(self, widget):
self.game_store.modelfilter.refilter()
def update_row(self, game):
"""Update game informations.
:param dict game: Dict holding game details
"""
row = self.get_row_by_slug(game['slug'])
if row:
row[COL_YEAR] = str(game['year'])
self.update_image(game['slug'], row[COL_INSTALLED])
def update_image(self, game_slug, is_installed=False):
"""Update game icon."""
row = self.get_row_by_slug(game_slug)
if row:
game_pixpuf = get_pixbuf_for_game(game_slug, self.icon_type,
is_installed=is_installed)
row[COL_ICON] = game_pixpuf
row[COL_INSTALLED] = is_installed
if type(self) is GameGridView:
GLib.idle_add(self.queue_draw)
def popup_contextual_menu(self, view, event):
"""Contextual menu."""
if event.button != 3:
return
try:
view.current_path = view.get_path_at_pos(event.x, event.y)
if view.current_path:
if type(view) is GameGridView:
view.select_path(view.current_path)
elif type(view) is GameListView:
view.set_cursor(view.current_path[0])
except ValueError:
(_, path) = view.get_selection().get_selected()
view.current_path = path
if view.current_path:
game_row = self.get_row_by_slug(self.selected_game)
self.contextual_menu.popup(event, game_row)
class GameListView(Gtk.TreeView, GameView):
"""Show the main list of games."""
__gsignals__ = GameView.__gsignals__
def __init__(self, games, filter_text='', filter_runner='', icon_type=None):
self.icon_type = icon_type
self.game_store = GameStore(games, icon_type=icon_type,
filter_text=filter_text,
filter_runner=filter_runner)
self.model = self.game_store.modelfilter.sort_new_with_model()
super(GameListView, self).__init__(self.model)
self.set_rules_hint(True)
# Icon column
image_cell = Gtk.CellRendererPixbuf()
column = Gtk.TreeViewColumn("", image_cell, pixbuf=COL_ICON)
column.set_reorderable(True)
self.append_column(column)
# Text columns
default_text_cell = self.set_text_cell()
name_cell = self.set_text_cell()
name_cell.set_padding(5, 0)
column = self.set_column(name_cell, "Name", COL_NAME)
self.append_column(column)
column = self.set_column(default_text_cell, "Year", COL_YEAR)
self.append_column(column)
column = self.set_column(default_text_cell, "Runner", COL_RUNNER)
self.append_column(column)
self.get_selection().set_mode(Gtk.SelectionMode.SINGLE)
self.connect_signals()
self.connect('row-activated', self.get_selected_game, True)
self.connect('cursor-changed', self.get_selected_game, False)
def set_text_cell(self):
text_cell = Gtk.CellRendererText()
text_cell.set_padding(10, 0)
text_cell.set_property("ellipsize", Pango.EllipsizeMode.END)
return text_cell
def set_column(self, cell, header, column_id):
column = Gtk.TreeViewColumn(header, cell, markup=column_id)
column.set_sort_indicator(True)
column.set_sort_column_id(column_id)
column.set_resizable(True)
column.set_reorderable(True)
return column
def get_selected_game(self, widget, line=None, column=None, launch=False):
selection = self.get_selection()
if not selection:
return
model, select_iter = selection.get_selected()
self.selected_game = model.get_value(select_iter, COL_ID)
if launch:
self.emit("game-activated")
else:
self.emit("game-selected")
def set_selected_game(self, game):
row = self.get_row_by_slug(game.slug)
if row:
self.set_cursor(row.path)
class GameGridView(Gtk.IconView, GameView):
__gsignals__ = GameView.__gsignals__
icon_padding = 1
def __init__(self, games, filter_text='', filter_runner='', icon_type=None):
self.icon_type = icon_type
self.game_store = GameStore(games, icon_type=icon_type,
filter_text=filter_text,
filter_runner=filter_runner)
self.model = self.game_store.modelfilter
super(GameGridView, self).__init__(model=self.model)
self.set_columns(1)
self.set_column_spacing(1)
self.set_pixbuf_column(COL_ICON)
self.cell_width = BANNER_SIZE[0] if icon_type == "banner" \
else BANNER_SMALL_SIZE[0]
gridview_cell_renderer = GridViewCellRenderer(width=self.cell_width)
self.pack_end(gridview_cell_renderer, False)
self.add_attribute(gridview_cell_renderer, 'markup', COL_NAME)
self.set_item_padding(self.icon_padding)
self.connect_signals()
self.connect('item-activated', self.on_item_activated)
self.connect('selection-changed', self.on_selection_changed)
self.connect('size-allocate', self.on_size_allocate)
def set_fluid_columns(self, width):
cell_width = self.cell_width + self.icon_padding * 2
nb_columns = (width / cell_width)
self.set_columns(nb_columns)
def on_size_allocate(self, widget, rect):
"""Recalculate the colum spacing based on total widget width."""
width = self.get_parent().get_allocated_width()
self.set_fluid_columns(width - 20)
self.do_size_allocate(widget, rect)
def on_item_activated(self, view, path):
self.get_selected_game(True)
def on_selection_changed(self, view):
self.get_selected_game(False)
def get_selected_game(self, launch=False):
selection = self.get_selected_items()
if not selection:
return
self.current_path = selection[0]
store = self.get_model()
self.selected_game = store.get(store.get_iter(self.current_path),
COL_ID)[0]
if launch:
self.emit("game-activated")
else:
self.emit("game-selected")
def set_selected_game(self, game):
row = self.get_row_by_slug(game.slug)
if row:
self.select_path(row.path)
class DownloadProgressBox(Gtk.HBox):
"""Progress bar used to monitor a file download."""
__gsignals__ = {
'complete': (GObject.SignalFlags.RUN_LAST, None,
(GObject.TYPE_PYOBJECT,)),
'cancelrequested': (GObject.SignalFlags.RUN_LAST, None,
(GObject.TYPE_PYOBJECT,))
}
def __init__(self, params, cancelable=True):
super(DownloadProgressBox, self).__init__()
self.downloader = None
self.progress_box = Gtk.VBox()
self.progressbar = Gtk.ProgressBar()
self.progress_box.pack_start(self.progressbar, True, True, 10)
self.progress_label = Gtk.Label()
self.progress_box.pack_start(self.progress_label, True, True, 10)
self.pack_start(self.progress_box, True, True, 10)
self.progress_box.show_all()
self.cancel_button = Gtk.Button(stock=Gtk.STOCK_CANCEL)
if cancelable:
self.cancel_button.show()
self.cancel_button.set_sensitive(False)
self.cancel_button.connect('clicked', self.cancel)
self.pack_end(self.cancel_button, False, False, 10)
self.url = params['url']
self.dest = params['dest']
def start(self):
"""Start downloading a file."""
self.downloader = Downloader(self.url, self.dest)
timer_id = GLib.timeout_add(100, self.progress)
self.cancel_button.set_sensitive(True)
self.downloader.start()
return timer_id
def progress(self):
"""Show download progress."""
progress = min(self.downloader.progress, 1)
if self.downloader.cancelled:
self.progressbar.set_fraction(0)
self.progress_label.set_text("Download canceled")
self.emit('cancelrequested', {})
return False
self.progressbar.set_fraction(progress)
megabytes = 1024 * 1024
progress_text = (
"%0.2fMb out of %0.2fMb (%0.2fMb/s), %d seconds remaining" % (
float(self.downloader.downloaded_bytes) / megabytes,
float(self.downloader.total_bytes) / megabytes,
float(self.downloader.speed) / megabytes,
self.downloader.time_remaining
)
)
self.progress_label.set_text(progress_text)
self.progressbar.set_fraction(progress)
if progress >= 1.0:
self.cancel_button.set_sensitive(False)
self.emit('complete', {})
return False
return True
def cancel(self, _widget):
"""Cancel the current download."""
if self.downloader:
self.downloader.cancel()
self.cancel_button.set_sensitive(False)
class FileChooserEntry(Gtk.Box):
def __init__(self, action=Gtk.FileChooserAction.SELECT_FOLDER,
default=None):
super(FileChooserEntry, self).__init__()
self.entry = Gtk.Entry()
if default:
self.entry.set_text(default)
self.pack_start(self.entry, True, True, 0)
self.path_completion = Gtk.ListStore(str)
completion = Gtk.EntryCompletion()
completion.set_model(self.path_completion)
completion.set_text_column(0)
self.entry.set_completion(completion)
self.entry.connect("changed", self.entry_changed)
self.file_chooser_dlg = Gtk.FileChooserDialog(
title="Select folder",
transient_for=None,
action=action
)
self.file_chooser_dlg.add_buttons(
Gtk.STOCK_CLOSE, Gtk.ResponseType.CLOSE,
Gtk.STOCK_OPEN, Gtk.ResponseType.OK
)
if default:
self.file_chooser_dlg.set_current_folder(default)
button = Gtk.Button()
button.set_label("Browse...")
button.connect('clicked', self.open_filechooser)
self.add(button)
def open_filechooser(self, widget):
self.file_chooser_dlg.connect('response', self.select_file)
self.file_chooser_dlg.run()
def entry_changed(self, widget):
self.path_completion.clear()
current_path = widget.get_text()
if not current_path:
current_path = "/"
if not os.path.exists(current_path):
current_path, filefilter = os.path.split(current_path)
else:
filefilter = None
if os.path.isdir(current_path):
index = 0
for filename in sorted(os.listdir(current_path)):
if filename.startswith("."):
continue
if filefilter is not None \
and not filename.startswith(filefilter):
continue
self.path_completion.append(
[os.path.join(current_path, filename)]
)
index += 1
if index > 15:
break
def select_file(self, dialog, response):
if response == Gtk.ResponseType.OK:
target_path = dialog.get_filename()
if target_path:
self.file_chooser_dlg.set_current_folder(target_path)
self.entry.set_text(target_path)
dialog.hide()
def get_text(self):
return self.entry.get_text()
class Label(Gtk.Label):
"""Standardised label for config vboxes."""
def __init__(self, message=None):
"""Custom init of label"""
super(Label, self).__init__(label=message)
self.set_alignment(0.1, 0.0)
self.set_padding(PADDING, 0)
self.set_line_wrap(True)
class VBox(Gtk.VBox):
def __init__(self):
GObject.GObject.__init__(self)
self.set_margin_top(20)
class Dialog(Gtk.Dialog):
def __init__(self, title=None, parent=None):
super(Dialog, self).__init__()
self.set_border_width(10)
if title:
self.set_title(title)
if parent:
self.set_transient_for(parent)
self.set_destroy_with_parent(True)
|
malkavi/lutris
|
lutris/gui/widgets.py
|
Python
|
gpl-3.0
| 20,833
|
# BlenderFDS, an open tool for the NIST Fire Dynamics Simulator
# Copyright (C) 2013 Emanuele Gissi, http://www.blenderfds.org
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTIBILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import bpy
# Shortcut
# TODO
# from . import bl_info
# module = bl_info['name']
# PKG = __package__
# mod = addon_utils.addons_fake_modules.get(PKG)
# mod.bl_info['show_expanded'] = True
## Supported file version
supported_file_version = 5, 0, 0
## Default SURFs
default_mas = { # name: diffuse_color
"Dummy Color1": ((1.0, 1.0, 1.0, 0.05),), # white
"Dummy Color2": ((1.0, 1.0, 0.0, 0.05),), # yellow
"Dummy Color3": ((1.0, 0.0, 1.0, 0.05),), # purple
"INERT": ((0.8, 0.8, 0.2, 1.0),),
"HVAC": ((0.2, 0.2, 0.8, 0.5),),
"MIRROR": ((1.0, 0.0, 1.0, 0.2),),
"OPEN": ((0.2, 0.8, 0.8, 0.05),),
"PERIODIC": ((1.0, 0.0, 1.0, 0.2),),
}
## Frequently used output QUANTITYs (FDS User's guide, table 16.3)
quantities = ( # name, description, units, qtype, subject
("ACTUATED SPRINKLERS", "Number of activated sprinklers", "", "D", "Det"),
(
"ADIABATIC SURFACE TEMPERATURE",
"Adiabatic surface temperature (AST), a quantity that is representative of the heat flux to a solid surface",
"°C",
"B,D",
"Wall",
),
(
"ASPIRATION",
"Central detector of aspiration detection system",
"%/m",
"D",
"Det",
),
("BACKGROUND PRESSURE", "Background pressure", "Pa", "D,I,P,S", "Pressure"),
(
"BACK WALL TEMPERATURE",
"Temperature of the back of an EXPOSED surface.\nThe coordinates XYZ, and orientation IOR, refer to the front surface",
"°C",
"B,D",
"Wall",
),
("BURNING RATE", "Mass loss rate of fuel", "kg/(m²·s)", "B,D", "Fire"),
("CHAMBER OBSCURATION", "Smoke detector chamber obscuration", "%/m", "D", "Det"),
("CONDUCTIVITY", "Thermal conductivity", "W/(m·K)", "D,I,P,S", "Gas"),
(
"CONVECTIVE HEAT FLUX",
"Convective component of NET HEAT FLUX",
"kW/m²",
"B,D",
"Heat",
),
("CPU TIME", "Elapsed CPU time since the start of the simulation", "s", "D", "Sim"),
("DENSITY", "Density", "kg/m³", "D,I,P,S", "Gas"),
("DEPOSITION VELOCITY", "Deposition velocity at the wall", "m/s", "B,D", "Wall"),
("DIVERGENCE", "Divergence", "1/s", "D,I,P,S", "Sim"),
("EXTINCTION COEFFICIENT", "", "1/m", "D,I,P,S", "Visibility"),
(
"FED",
"The fractional effective dose index (FED), developed by Purser,\nis a commonly used measure of human incapacitation due to the combustion gases",
"",
"D",
"Tenability",
),
(
"FIC",
"The fractional irritant concentration (FIC), developed by Purser,\nrepresents the toxic effect which depends upon the immediate concentrations of irritants.",
"",
"D,S",
"Tenability",
),
(
"GAUGE HEAT FLUX",
"This quantity simulates a measurement made with a cold water heat flux gauge",
"kW/m²",
"B,D",
"Det",
),
(
"HEAT FLOW",
"Net flow of energy into or out of a planar surface",
"kW",
"D",
"Heat",
),
(
"HEAT FLOW WALL",
"Net flow of energy into or out of a solid boundary",
"kW",
"D",
"Wall",
),
(
"NET HEAT FLUX",
"Sum of the emitted and absorbed radiation at a solid surface",
"kW/m²",
"B,D",
"Heat",
),
("HRR", "Heat release rate", "kW", "D", "Fire"),
("HRRPUA", "Heat release rate per unit area", "kW/m²", "D", "Fire"),
("HRRPUV", "Heat release rate per unit volume", "kW/m³", "D,I,P,S", "Fire"),
("INCIDENT HEAT FLUX", "Incident term of NET HEAT FLUX", "kW/m²", "B,D", "Heat"),
(
"INSIDE WALL TEMPERATURE",
"Temperature inside a solid surface",
"°C",
"D",
"Wall",
),
("INSIDE WALL DEPTH", "Depth inside a solid surface", "m", "D", "Wall"),
(
"ITERATION",
"Number of time steps completed at the given time of the simulation",
"",
"D",
"Sim",
),
(
"LAYER HEIGHT",
"Layer height, location of the interface between the hot, smoke-laden upper layer and the cooler lower layer in a burning compartment",
"m",
"D",
"Zones",
),
(
"LINK TEMPERATURE",
"Defines a heat detector, which uses essentially the same activation algorithm as a sprinkler, without the water spray",
"°C",
"D",
"Det",
),
("LOWER TEMPERATURE", "Lower layer temperature", "°C", "D", "Zones"),
(
"MASS FLOW",
"Net flow of mass into or out of a planar surface",
"kg/s",
"D",
"Gas",
),
(
"MASS FLOW WALL",
"Net flow of mass into or out of a solid boundary",
"kg/s",
"D",
"Wall",
),
("MASS FRACTION", "", "kg/kg", "D,I,P,S", "Gas"),
("MIXTURE FRACTION", "", "kg/kg", "D,I,P,S", "Gas"),
("NORMAL VELOCITY", "Wall normal velocity", "m/s", "D,B", "Wall"),
("OPTICAL DENSITY", "", "1/m", "D,I,P,S", "Visibility"),
("PATH OBSCURATION", "Beam detector path obscuration", "%", "D", "Det"),
("PRESSURE", "Perturbation pressure", "Pa", "D,I,P,S", "Pressure"),
("PRESSURE ZONE", "Pressure zone", "", "D,S", "Pressure"),
(
"RADIATIVE HEAT FLUX",
"Radiative component of NET HEAT FLUX",
"kW/m²",
"B,D",
"Heat",
),
(
"RADIATIVE HEAT FLUX GAS",
"This records the radiative heat flux away from a solid surface",
"kW/m²",
"D",
"Heat",
),
(
"RADIOMETER",
"Similar to a GAUGE HEAT FLUX, this quantity measures only the radiative heat flux",
"kW/m²",
"B,D",
"Det",
),
("RELATIVE HUMIDITY", "Relative humidity", "%", "D,I,P,S", "Gas"),
("SOLID CONDUCTIVITY", "Material component conductivity", "W/(m·K)", "D", "Wall"),
("SOLID DENSITY", "Material component density", "kg/m³", "D", "Wall"),
(
"SOLID SPECIFIC HEAT",
"Material component specific heat",
"kJ/(kg·K)",
"D",
"Wall",
),
(
"SPRINKLER LINK TEMPERATURE",
"Compute the activation of the device using the standard RTI (Response Time Index) algorithm",
"°C",
"D",
"Det",
),
("SURFACE DEPOSITION", "Surface deposition of SPEC_ID", "kg/m²", "B,D", "Wall"),
("TEMPERATURE", "", "°C", "D,I,P,S", "Gas"),
(
"THERMOCOUPLE",
"Temperature of a modeled thermocouple.\nThe thermocouple temperature lags the true gas temperature by an amount determined mainly by its bead size.",
"°C",
"D",
"Det",
),
("TIME", "Activation time", "s", "D", "Sim"),
("TIME STEP", "Duration of a simulation time step", "s", "D", "Sim"),
("U-VELOCITY", "Gas velocity component", "m/s", "D,I,P,S", "Gas"),
("V-VELOCITY", "Gas velocity component", "m/s", "D,I,P,S", "Gas"),
("W-VELOCITY", "Gas velocity component", "m/s", "D,I,P,S", "Gas"),
("UPPER TEMPERATURE", "Upper layer temperature", "°C", "D", "Zones"),
("VELOCITY", "Gas velocity", "m/s", "D,I,P,S", "Gas"),
("VISCOSITY", "Effective viscosity", "kg/(m·s)", "D,I,P,S", "Gas"),
("VISIBILITY", "Visibility through smoke", "m", "D,I,P,S", "Visibility"),
(
"VOLUME FLOW",
"Net flow of volume into or out of a planar surface",
"m³/s",
"D",
"Gas",
),
(
"VOLUME FLOW WALL",
"Net flow of volume into or out of a solid boundary",
"m³/s",
"D",
"Wall",
),
("VOLUME FRACTION", "", "mol/mol", "D,I,P,S", "Gas"),
(
"WALL CLOCK TIME",
"Elapsed wall clock time since the start of the simulation",
"s",
"D",
"Sim",
),
(
"WALL CLOCK TIME ITERATIONS",
"Elapsed wall clock time since the start of the time stepping loop",
"s",
"D",
"Sim",
),
("WALL TEMPERATURE", "Surface temperature", "°C", "B,D", "Wall"),
)
def get_quantity_items(qtype):
"""!
Prepare quantity items for menus.
"""
items = []
# Generated like this: (("[Heat] NET HEAT FLUX", "NET HEAT FLUX (kW/m²)", "Description...",) ...)
for q in quantities:
name, desc, units, allowed_qtype, subject = q
if qtype in allowed_qtype:
items.append((name, f"{subject} - {name} [{units}]", desc))
items.sort(key=lambda k: k[1])
return items
## Color table from FDS source code (data.f90)
fds_colors = {
"INVISIBLE": (255, 255, 255),
"ALICE BLUE": (240, 248, 255),
"ANTIQUE WHITE": (250, 235, 215),
"ANTIQUE WHITE 1": (255, 239, 219),
"ANTIQUE WHITE 2": (238, 223, 204),
"ANTIQUE WHITE 3": (205, 192, 176),
"ANTIQUE WHITE 4": (139, 131, 120),
"AQUAMARINE": (127, 255, 212),
"AQUAMARINE 1": (118, 238, 198),
"AQUAMARINE 2": (102, 205, 170),
"AQUAMARINE 3": (69, 139, 116),
"AZURE": (240, 255, 255),
"AZURE 1": (224, 238, 238),
"AZURE 2": (193, 205, 205),
"AZURE 3": (131, 139, 139),
"BANANA": (227, 207, 87),
"BEIGE": (245, 245, 220),
"BISQUE": (255, 228, 196),
"BISQUE 1": (238, 213, 183),
"BISQUE 2": (205, 183, 158),
"BISQUE 3": (139, 125, 107),
"BLACK": (0, 0, 0),
"BLANCHED ALMOND": (255, 235, 205),
"BLUE": (0, 0, 255),
"BLUE 2": (0, 0, 238),
"BLUE 3": (0, 0, 205),
"BLUE 4": (0, 0, 139),
"BLUE VIOLET": (138, 43, 226),
"BRICK": (156, 102, 31),
"BROWN": (165, 42, 42),
"BROWN 1": (255, 64, 64),
"BROWN 2": (238, 59, 59),
"BROWN 3": (205, 51, 51),
"BROWN 4": (139, 35, 35),
"BURLY WOOD": (222, 184, 135),
"BURLY WOOD 1": (255, 211, 155),
"BURLY WOOD 2": (238, 197, 145),
"BURLY WOOD 3": (205, 170, 125),
"BURLY WOOD 4": (139, 115, 85),
"BURNT ORANGE": (204, 85, 0),
"BURNT SIENNA": (138, 54, 15),
"BURNT UMBER": (138, 51, 36),
"CADET BLUE": (95, 158, 160),
"CADET BLUE 1": (152, 245, 255),
"CADET BLUE 2": (142, 229, 238),
"CADET BLUE 3": (122, 197, 205),
"CADET BLUE 4": (83, 134, 139),
"CADMIUM ORANGE": (255, 97, 3),
"CADMIUM YELLOW": (255, 153, 18),
"CARROT": (237, 145, 33),
"CHARTREUSE": (127, 255, 0),
"CHARTREUSE 1": (118, 238, 0),
"CHARTREUSE 2": (102, 205, 0),
"CHARTREUSE 3": (69, 139, 0),
"CHOCOLATE": (210, 105, 30),
"CHOCOLATE 1": (255, 127, 36),
"CHOCOLATE 2": (238, 118, 33),
"CHOCOLATE 3": (205, 102, 29),
"CHOCOLATE 4": (139, 69, 19),
"COBALT": (61, 89, 171),
"COBALT GREEN": (61, 145, 64),
"COLD GREY": (128, 138, 135),
"CORAL": (255, 127, 80),
"CORAL 1": (255, 114, 86),
"CORAL 2": (238, 106, 80),
"CORAL 3": (205, 91, 69),
"CORAL 4": (139, 62, 47),
"CORNFLOWER BLUE": (100, 149, 237),
"CORNSILK": (255, 248, 220),
"CORNSILK 1": (238, 232, 205),
"CORNSILK 2": (205, 200, 177),
"CORNSILK 3": (139, 136, 120),
"CRIMSON": (220, 20, 60),
"CYAN": (0, 255, 255),
"CYAN 2": (0, 238, 238),
"CYAN 3": (0, 205, 205),
"CYAN 4": (0, 139, 139),
"DARK GOLDENROD": (184, 134, 11),
"DARK GOLDENROD 1": (255, 185, 15),
"DARK GOLDENROD 2": (238, 173, 14),
"DARK GOLDENROD 3": (205, 149, 12),
"DARK GOLDENROD 4": (139, 101, 8),
"DARK GRAY": (169, 169, 169),
"DARK GREEN": (0, 100, 0),
"DARK KHAKI": (189, 183, 107),
"DARK OLIVE GREEN": (85, 107, 47),
"DARK OLIVE GREEN 1": (202, 255, 112),
"DARK OLIVE GREEN 2": (188, 238, 104),
"DARK OLIVE GREEN 3": (162, 205, 90),
"DARK OLIVE GREEN 4": (110, 139, 61),
"DARK ORANGE": (255, 140, 0),
"DARK ORANGE 1": (255, 127, 0),
"DARK ORANGE 2": (238, 118, 0),
"DARK ORANGE 3": (205, 102, 0),
"DARK ORANGE 4": (139, 69, 0),
"DARK ORCHID": (153, 50, 204),
"DARK ORCHID 1": (191, 62, 255),
"DARK ORCHID 2": (178, 58, 238),
"DARK ORCHID 3": (154, 50, 205),
"DARK ORCHID 4": (104, 34, 139),
"DARK SALMON": (233, 150, 122),
"DARK SEA GREEN": (143, 188, 143),
"DARK SEA GREEN 1": (193, 255, 193),
"DARK SEA GREEN 2": (180, 238, 180),
"DARK SEA GREEN 3": (155, 205, 155),
"DARK SEA GREEN 4": (105, 139, 105),
"DARK SLATE BLUE": (72, 61, 139),
"DARK SLATE GRAY": (47, 79, 79),
"DARK SLATE GRAY 1": (151, 255, 255),
"DARK SLATE GRAY 2": (141, 238, 238),
"DARK SLATE GRAY 3": (121, 205, 205),
"DARK SLATE GRAY 4": (82, 139, 139),
"DARK TURQUOISE": (0, 206, 209),
"DARK VIOLET": (148, 0, 211),
"DEEP PINK": (255, 20, 147),
"DEEP PINK 1": (238, 18, 137),
"DEEP PINK 2": (205, 16, 118),
"DEEP PINK 3": (139, 10, 80),
"DEEP SKYBLUE": (0, 191, 255),
"DEEP SKYBLUE 1": (0, 178, 238),
"DEEP SKYBLUE 2": (0, 154, 205),
"DEEP SKYBLUE 3": (0, 104, 139),
"DIM GRAY": (105, 105, 105),
"DODGERBLUE": (30, 144, 255),
"DODGERBLUE 1": (28, 134, 238),
"DODGERBLUE 2": (24, 116, 205),
"DODGERBLUE 3": (16, 78, 139),
"EGGSHELL": (252, 230, 201),
"EMERALD GREEN": (0, 201, 87),
"FIREBRICK": (178, 34, 34),
"FIREBRICK 1": (255, 48, 48),
"FIREBRICK 2": (238, 44, 44),
"FIREBRICK 3": (205, 38, 38),
"FIREBRICK 4": (139, 26, 26),
"FLESH": (255, 125, 64),
"FLORAL WHITE": (255, 250, 240),
"FOREST GREEN": (34, 139, 34),
"GAINSBORO": (220, 220, 220),
"GHOST WHITE": (248, 248, 255),
"GOLD": (255, 215, 0),
"GOLD 1": (238, 201, 0),
"GOLD 2": (205, 173, 0),
"GOLD 3": (139, 117, 0),
"GOLDENROD": (218, 165, 32),
"GOLDENROD 1": (255, 193, 37),
"GOLDENROD 2": (238, 180, 34),
"GOLDENROD 3": (205, 155, 29),
"GOLDENROD 4": (139, 105, 20),
"GRAY": (128, 128, 128),
"GRAY 1": (3, 3, 3),
"GRAY 10": (26, 26, 26),
"GRAY 11": (28, 28, 28),
"GRAY 12": (31, 31, 31),
"GRAY 13": (33, 33, 33),
"GRAY 14": (36, 36, 36),
"GRAY 15": (38, 38, 38),
"GRAY 16": (41, 41, 41),
"GRAY 17": (43, 43, 43),
"GRAY 18": (46, 46, 46),
"GRAY 19": (48, 48, 48),
"GRAY 2": (5, 5, 5),
"GRAY 20": (51, 51, 51),
"GRAY 21": (54, 54, 54),
"GRAY 22": (56, 56, 56),
"GRAY 23": (59, 59, 59),
"GRAY 24": (61, 61, 61),
"GRAY 25": (64, 64, 64),
"GRAY 26": (66, 66, 66),
"GRAY 27": (69, 69, 69),
"GRAY 28": (71, 71, 71),
"GRAY 29": (74, 74, 74),
"GRAY 3": (8, 8, 8),
"GRAY 30": (77, 77, 77),
"GRAY 31": (79, 79, 79),
"GRAY 32": (82, 82, 82),
"GRAY 33": (84, 84, 84),
"GRAY 34": (87, 87, 87),
"GRAY 35": (89, 89, 89),
"GRAY 36": (92, 92, 92),
"GRAY 37": (94, 94, 94),
"GRAY 38": (97, 97, 97),
"GRAY 39": (99, 99, 99),
"GRAY 4": (10, 10, 10),
"GRAY 40": (102, 102, 102),
"GRAY 42": (107, 107, 107),
"GRAY 43": (110, 110, 110),
"GRAY 44": (112, 112, 112),
"GRAY 45": (115, 115, 115),
"GRAY 46": (117, 117, 117),
"GRAY 47": (120, 120, 120),
"GRAY 48": (122, 122, 122),
"GRAY 49": (125, 125, 125),
"GRAY 5": (13, 13, 13),
"GRAY 50": (127, 127, 127),
"GRAY 51": (130, 130, 130),
"GRAY 52": (133, 133, 133),
"GRAY 53": (135, 135, 135),
"GRAY 54": (138, 138, 138),
"GRAY 55": (140, 140, 140),
"GRAY 56": (143, 143, 143),
"GRAY 57": (145, 145, 145),
"GRAY 58": (148, 148, 148),
"GRAY 59": (150, 150, 150),
"GRAY 6": (15, 15, 15),
"GRAY 60": (153, 153, 153),
"GRAY 61": (156, 156, 156),
"GRAY 62": (158, 158, 158),
"GRAY 63": (161, 161, 161),
"GRAY 64": (163, 163, 163),
"GRAY 65": (166, 166, 166),
"GRAY 66": (168, 168, 168),
"GRAY 67": (171, 171, 171),
"GRAY 68": (173, 173, 173),
"GRAY 69": (176, 176, 176),
"GRAY 7": (18, 18, 18),
"GRAY 70": (179, 179, 179),
"GRAY 71": (181, 181, 181),
"GRAY 72": (184, 184, 184),
"GRAY 73": (186, 186, 186),
"GRAY 74": (189, 189, 189),
"GRAY 75": (191, 191, 191),
"GRAY 76": (194, 194, 194),
"GRAY 77": (196, 196, 196),
"GRAY 78": (199, 199, 199),
"GRAY 79": (201, 201, 201),
"GRAY 8": (20, 20, 20),
"GRAY 80": (204, 204, 204),
"GRAY 81": (207, 207, 207),
"GRAY 82": (209, 209, 209),
"GRAY 83": (212, 212, 212),
"GRAY 84": (214, 214, 214),
"GRAY 85": (217, 217, 217),
"GRAY 86": (219, 219, 219),
"GRAY 87": (222, 222, 222),
"GRAY 88": (224, 224, 224),
"GRAY 89": (227, 227, 227),
"GRAY 9": (23, 23, 23),
"GRAY 90": (229, 229, 229),
"GRAY 91": (232, 232, 232),
"GRAY 92": (235, 235, 235),
"GRAY 93": (237, 237, 237),
"GRAY 94": (240, 240, 240),
"GRAY 95": (242, 242, 242),
"GRAY 97": (247, 247, 247),
"GRAY 98": (250, 250, 250),
"GRAY 99": (252, 252, 252),
"GREEN": (0, 255, 0),
"GREEN 2": (0, 238, 0),
"GREEN 3": (0, 205, 0),
"GREEN 4": (0, 139, 0),
"GREEN YELLOW": (173, 255, 47),
"HONEYDEW": (240, 255, 240),
"HONEYDEW 1": (224, 238, 224),
"HONEYDEW 2": (193, 205, 193),
"HONEYDEW 3": (131, 139, 131),
"HOT PINK": (255, 105, 180),
"HOT PINK 1": (255, 110, 180),
"HOT PINK 2": (238, 106, 167),
"HOT PINK 3": (205, 96, 144),
"HOT PINK 4": (139, 58, 98),
"INDIAN RED": (205, 92, 92),
"INDIAN RED 1": (255, 106, 106),
"INDIAN RED 2": (238, 99, 99),
"INDIAN RED 3": (205, 85, 85),
"INDIAN RED 4": (139, 58, 58),
"INDIGO": (75, 0, 130),
"IVORY": (255, 255, 240),
"IVORY 1": (238, 238, 224),
"IVORY 2": (205, 205, 193),
"IVORY 3": (139, 139, 131),
"IVORY BLACK": (41, 36, 33),
"KELLY GREEN": (0, 128, 0),
"KHAKI": (240, 230, 140),
"KHAKI 1": (255, 246, 143),
"KHAKI 2": (238, 230, 133),
"KHAKI 3": (205, 198, 115),
"KHAKI 4": (139, 134, 78),
"LAVENDER": (230, 230, 250),
"LAVENDER BLUSH": (255, 240, 245),
"LAVENDER BLUSH 1": (238, 224, 229),
"LAVENDER BLUSH 2": (205, 193, 197),
"LAVENDER BLUSH 3": (139, 131, 134),
"LAWN GREEN": (124, 252, 0),
"LEMON CHIFFON": (255, 250, 205),
"LEMON CHIFFON 1": (238, 233, 191),
"LEMON CHIFFON 2": (205, 201, 165),
"LEMON CHIFFON 3": (139, 137, 112),
"LIGHT BLUE": (173, 216, 230),
"LIGHT BLUE 1": (191, 239, 255),
"LIGHT BLUE 2": (178, 223, 238),
"LIGHT BLUE 3": (154, 192, 205),
"LIGHT BLUE 4": (104, 131, 139),
"LIGHT CORAL": (240, 128, 128),
"LIGHT CYAN": (224, 255, 255),
"LIGHT CYAN 1": (209, 238, 238),
"LIGHT CYAN 2": (180, 205, 205),
"LIGHT CYAN 3": (122, 139, 139),
"LIGHT GOLDENROD": (255, 236, 139),
"LIGHT GOLDENROD 1": (238, 220, 130),
"LIGHT GOLDENROD 2": (205, 190, 112),
"LIGHT GOLDENROD 3": (139, 129, 76),
"LIGHT GOLDENROD YELLOW": (250, 250, 210),
"LIGHT GREY": (211, 211, 211),
"LIGHT PINK": (255, 182, 193),
"LIGHT PINK 1": (255, 174, 185),
"LIGHT PINK 2": (238, 162, 173),
"LIGHT PINK 3": (205, 140, 149),
"LIGHT PINK 4": (139, 95, 101),
"LIGHT SALMON": (255, 160, 122),
"LIGHT SALMON 1": (238, 149, 114),
"LIGHT SALMON 2": (205, 129, 98),
"LIGHT SALMON 3": (139, 87, 66),
"LIGHT SEA GREEN": (32, 178, 170),
"LIGHT SKY BLUE": (135, 206, 250),
"LIGHT SKY BLUE 1": (176, 226, 255),
"LIGHT SKY BLUE 2": (164, 211, 238),
"LIGHT SKY BLUE 3": (141, 182, 205),
"LIGHT SKY BLUE 4": (96, 123, 139),
"LIGHT SLATE BLUE": (132, 112, 255),
"LIGHT SLATE GRAY": (119, 136, 153),
"LIGHT STEEL BLUE": (176, 196, 222),
"LIGHT STEEL BLUE 1": (202, 225, 255),
"LIGHT STEEL BLUE 2": (188, 210, 238),
"LIGHT STEEL BLUE 3": (162, 181, 205),
"LIGHT STEEL BLUE 4": (110, 123, 139),
"LIGHT YELLOW 1": (255, 255, 224),
"LIGHT YELLOW 2": (238, 238, 209),
"LIGHT YELLOW 3": (205, 205, 180),
"LIGHT YELLOW 4": (139, 139, 122),
"LIME GREEN": (50, 205, 50),
"LINEN": (250, 240, 230),
"MAGENTA": (255, 0, 255),
"MAGENTA 2": (238, 0, 238),
"MAGENTA 3": (205, 0, 205),
"MAGENTA 4": (139, 0, 139),
"MANGANESE BLUE": (3, 168, 158),
"MAROON": (128, 0, 0),
"MAROON 1": (255, 52, 179),
"MAROON 2": (238, 48, 167),
"MAROON 3": (205, 41, 144),
"MAROON 4": (139, 28, 98),
"MEDIUM ORCHID": (186, 85, 211),
"MEDIUM ORCHID 1": (224, 102, 255),
"MEDIUM ORCHID 2": (209, 95, 238),
"MEDIUM ORCHID 3": (180, 82, 205),
"MEDIUM ORCHID 4": (122, 55, 139),
"MEDIUM PURPLE": (147, 112, 219),
"MEDIUM PURPLE 1": (171, 130, 255),
"MEDIUM PURPLE 2": (159, 121, 238),
"MEDIUM PURPLE 3": (137, 104, 205),
"MEDIUM PURPLE 4": (93, 71, 139),
"MEDIUM SEA GREEN": (60, 179, 113),
"MEDIUM SLATE BLUE": (123, 104, 238),
"MEDIUM SPRING GREEN": (0, 250, 154),
"MEDIUM TURQUOISE": (72, 209, 204),
"MEDIUM VIOLET RED": (199, 21, 133),
"MELON": (227, 168, 105),
"MIDNIGHT BLUE": (25, 25, 112),
"MINT": (189, 252, 201),
"MINT CREAM": (245, 255, 250),
"MISTY ROSE": (255, 228, 225),
"MISTY ROSE 1": (238, 213, 210),
"MISTY ROSE 2": (205, 183, 181),
"MISTY ROSE 3": (139, 125, 123),
"MOCCASIN": (255, 228, 181),
"NAVAJO WHITE": (255, 222, 173),
"NAVAJO WHITE 1": (238, 207, 161),
"NAVAJO WHITE 2": (205, 179, 139),
"NAVAJO WHITE 3": (139, 121, 94),
"NAVY": (0, 0, 128),
"OLD LACE": (253, 245, 230),
"OLIVE": (128, 128, 0),
"OLIVE DRAB": (192, 255, 62),
"OLIVE DRAB 1": (179, 238, 58),
"OLIVE DRAB 2": (154, 205, 50),
"OLIVE DRAB 3": (105, 139, 34),
"ORANGE": (255, 128, 0),
"ORANGE 1": (255, 165, 0),
"ORANGE 2": (238, 154, 0),
"ORANGE 3": (205, 133, 0),
"ORANGE 4": (139, 90, 0),
"ORANGE 5": (245, 102, 0),
"ORANGE RED": (255, 69, 0),
"ORANGE RED 1": (238, 64, 0),
"ORANGE RED 2": (205, 55, 0),
"ORANGE RED 3": (139, 37, 0),
"ORCHID": (218, 112, 214),
"ORCHID 1": (255, 131, 250),
"ORCHID 2": (238, 122, 233),
"ORCHID 3": (205, 105, 201),
"ORCHID 4": (139, 71, 137),
"PALE GOLDENROD": (238, 232, 170),
"PALE GREEN": (152, 251, 152),
"PALE GREEN 1": (154, 255, 154),
"PALE GREEN 2": (144, 238, 144),
"PALE GREEN 3": (124, 205, 124),
"PALE GREEN 4": (84, 139, 84),
"PALE TURQUOISE": (187, 255, 255),
"PALE TURQUOISE 1": (174, 238, 238),
"PALE TURQUOISE 2": (150, 205, 205),
"PALE TURQUOISE 3": (102, 139, 139),
"PALE VIOLET RED": (219, 112, 147),
"PALE VIOLET RED 1": (255, 130, 171),
"PALE VIOLET RED 2": (238, 121, 159),
"PALE VIOLET RED 3": (205, 104, 137),
"PALE VIOLET RED 4": (139, 71, 93),
"PAPAYA WHIP": (255, 239, 213),
"PEACH PUFF": (255, 218, 185),
"PEACH PUFF 1": (238, 203, 173),
"PEACH PUFF 2": (205, 175, 149),
"PEACH PUFF 3": (139, 119, 101),
"PEACOCK": (51, 161, 201),
"PINK": (255, 192, 203),
"PINK 1": (255, 181, 197),
"PINK 2": (238, 169, 184),
"PINK 3": (205, 145, 158),
"PINK 4": (139, 99, 108),
"PLUM": (221, 160, 221),
"PLUM 1": (255, 187, 255),
"PLUM 2": (238, 174, 238),
"PLUM 3": (205, 150, 205),
"PLUM 4": (139, 102, 139),
"POWDER BLUE": (176, 224, 230),
"PURPLE": (128, 0, 128),
"PURPLE 1": (155, 48, 255),
"PURPLE 2": (145, 44, 238),
"PURPLE 3": (125, 38, 205),
"PURPLE 4": (85, 26, 139),
"RASPBERRY": (135, 38, 87),
"RAW SIENNA": (199, 97, 20),
"RED": (255, 0, 0),
"RED 1": (238, 0, 0),
"RED 2": (205, 0, 0),
"RED 3": (139, 0, 0),
"ROSY BROWN": (188, 143, 143),
"ROSY BROWN 1": (255, 193, 193),
"ROSY BROWN 2": (238, 180, 180),
"ROSY BROWN 3": (205, 155, 155),
"ROSY BROWN 4": (139, 105, 105),
"ROYAL BLUE": (65, 105, 225),
"ROYAL BLUE 1": (72, 118, 255),
"ROYAL BLUE 2": (67, 110, 238),
"ROYAL BLUE 3": (58, 95, 205),
"ROYAL BLUE 4": (39, 64, 139),
"SALMON": (250, 128, 114),
"SALMON 1": (255, 140, 105),
"SALMON 2": (238, 130, 98),
"SALMON 3": (205, 112, 84),
"SALMON 4": (139, 76, 57),
"SANDY BROWN": (244, 164, 96),
"SAP GREEN": (48, 128, 20),
"SEA GREEN": (84, 255, 159),
"SEA GREEN 1": (78, 238, 148),
"SEA GREEN 2": (67, 205, 128),
"SEA GREEN 3": (46, 139, 87),
"SEASHELL": (255, 245, 238),
"SEASHELL 1": (238, 229, 222),
"SEASHELL 2": (205, 197, 191),
"SEASHELL 3": (139, 134, 130),
"SEPIA": (94, 38, 18),
"SIENNA": (160, 82, 45),
"SIENNA 1": (255, 130, 71),
"SIENNA 2": (238, 121, 66),
"SIENNA 3": (205, 104, 57),
"SIENNA 4": (139, 71, 38),
"SILVER": (192, 192, 192),
"SKY BLUE": (135, 206, 235),
"SKY BLUE 1": (135, 206, 255),
"SKY BLUE 2": (126, 192, 238),
"SKY BLUE 3": (108, 166, 205),
"SKY BLUE 4": (74, 112, 139),
"SKY BLUE 5": (185, 217, 235),
"SLATE BLUE": (106, 90, 205),
"SLATE BLUE 1": (131, 111, 255),
"SLATE BLUE 2": (122, 103, 238),
"SLATE BLUE 3": (105, 89, 205),
"SLATE BLUE 4": (71, 60, 139),
"SLATE GRAY": (112, 128, 144),
"SLATE GRAY 1": (198, 226, 255),
"SLATE GRAY 2": (185, 211, 238),
"SLATE GRAY 3": (159, 182, 205),
"SLATE GRAY 4": (108, 123, 139),
"SNOW": (255, 250, 250),
"SNOW 1": (238, 233, 233),
"SNOW 2": (205, 201, 201),
"SNOW 3": (139, 137, 137),
"SPRING GREEN": (0, 255, 127),
"SPRING GREEN 1": (0, 238, 118),
"SPRING GREEN 2": (0, 205, 102),
"SPRING GREEN 3": (0, 139, 69),
"STEEL BLUE": (70, 130, 180),
"STEEL BLUE 1": (99, 184, 255),
"STEEL BLUE 2": (92, 172, 238),
"STEEL BLUE 3": (79, 148, 205),
"STEEL BLUE 4": (54, 100, 139),
"TAN": (210, 180, 140),
"TAN 1": (255, 165, 79),
"TAN 2": (238, 154, 73),
"TAN 3": (205, 133, 63),
"TAN 4": (139, 90, 43),
"TEAL": (0, 128, 128),
"THISTLE": (216, 191, 216),
"THISTLE 1": (255, 225, 255),
"THISTLE 2": (238, 210, 238),
"THISTLE 3": (205, 181, 205),
"THISTLE 4": (139, 123, 139),
"TOMATO": (255, 99, 71),
"TOMATO 1": (238, 92, 66),
"TOMATO 2": (205, 79, 57),
"TOMATO 3": (139, 54, 38),
"TURQUOISE": (64, 224, 208),
"TURQUOISE 1": (0, 245, 255),
"TURQUOISE 2": (0, 229, 238),
"TURQUOISE 3": (0, 197, 205),
"TURQUOISE 4": (0, 134, 139),
"TURQUOISE BLUE": (0, 199, 140),
"VIOLET": (238, 130, 238),
"VIOLET RED": (208, 32, 144),
"VIOLET RED 1": (255, 62, 150),
"VIOLET RED 2": (238, 58, 140),
"VIOLET RED 3": (205, 50, 120),
"VIOLET RED 4": (139, 34, 82),
"WARM GREY": (128, 128, 105),
"WHEAT": (245, 222, 179),
"WHEAT 1": (255, 231, 186),
"WHEAT 2": (238, 216, 174),
"WHEAT 3": (205, 186, 150),
"WHEAT 4": (139, 126, 102),
"WHITE": (255, 255, 255),
"WHITE SMOKE": (245, 245, 245),
"YELLOW": (255, 255, 0),
"YELLOW 1": (238, 238, 0),
"YELLOW 2": (205, 205, 0),
"YELLOW 3": (139, 139, 0),
}
|
firetools/blenderfds
|
config.py
|
Python
|
gpl-3.0
| 27,453
|
import asyncio
import websockets
@asyncio.coroutine
def hello(websocket, path):
websocket.send("HTTP/1.0 200 \n\n")
name = yield from websocket.recv()
print("< {}".format(name))
greeting = "Hello {}!".format(name)
yield from websocket.send(greeting)
print("> {}".format(greeting))
start_server = websockets.serve(hello, '0.0.0.0', 8080)
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever()
|
belimawr/experiments
|
ESP8266_WebSocket/test.py
|
Python
|
gpl-3.0
| 463
|
from tkinter import *
from tkinter import ttk
root = Tk()
Label(root, text='Hello, Om!').pack()
button = ttk.Button(root, text='Click Me')
button.pack()
button['text'] = 'Press Me'
button.config(text='Push Me')
print(button)
print(root)
root.mainloop()
|
comprakash/learning-python
|
learning-tkinter/src/getting_started/hello.py
|
Python
|
gpl-3.0
| 259
|
#!/usr/bin/env python
import os
import atpy
from pyraf import iraf
from pylab import *
from LCScommon import *
mypath=os.getcwd()
if mypath.find('Users') > -1:
print "Running on Rose's mac pro"
homedir='/Users/rfinn/'
elif mypath.find('home') > -1:
print "Running on coma"
homedir='/home/rfinn/'
# from individual bcd image
GAIN=5 # e-/SN conversion
FLUXCONV=.0447 # DN/s to BUNIT
EXPT=2.62 # expt per scan
iraf.imutil()
iraf.images()
iraf.imfilter()
class cluster:
def __init__(self,clustername):
self.prefix=clustername
self.imagepath24=homedir+'research/LocalClusters/Images/'+self.prefix+'/24umWCS/'
self.sdssimagepath=homedir+'research/LocalClusters/Images/'+self.prefix+'/SDSS/'
self.sex_image=homedir+'research/LocalClusters/Images/'+self.prefix+'/24umWCS/'+self.prefix+'-WCS-mosaic_minus_median_extract.fits'
self.unc_image=homedir+'research/LocalClusters/Images/'+self.prefix+'/24umWCS/'+self.prefix+'-WCS-mosaic_unc.fits'
self.cov_image=homedir+'research/LocalClusters/Images/'+self.prefix+'/24umWCS/'+self.prefix+'-WCS-mosaic_cov.fits'
#infile=homedir+'research/LocalClusters/NSAmastertables/NSAwithAGC/'+clustername+'_NSAmastertable_topcat.fits'
#self.n=atpy.Table(infile)
def mk24noiseimage(self):
os.chdir(self.imagepath24)
# remove temp images if they exist
os.system('rm temp*.fits')
# multiply image by exptime x gain x coverage map
scale=FLUXCONV*GAIN*EXPT
iraf.imarith(operand1=self.sex_image,op='*',operand2=scale,result='temp1.fits')
iraf.imarith(operand1='temp1.fits',op='*',operand2=self.cov_image,result='temp2.fits')
# smooth image using iraf.images.imfilter.gauss
iraf.gauss(input='temp2.fits',output='temp3.fits',sigma=2,nsigma=6)
# take sqrt
iraf.imfunction(input='temp3.fits',output='temp4.fits',function='sqrt')
# divide by exptime x gain x coverage map
iraf.imarith(operand1='temp4.fits',op='/',operand2=scale,result='temp5.fits')
# mutliply image and sigma image by 100
s=self.prefix+'-scalednoise.fits'
iraf.imarith(operand1='temp5.fits',op='*',operand2=100,result=s)
s=self.prefix+'-scaled24.fits'
iraf.imarith(operand1=self.sex_image,op='*',operand2=100,result=s)
# adjust zp - make it fainter by 5 mag
mkw11=cluster('MKW11')
#for cname in clusternames:
# cl=cluster(cname)
# cl.mk24noiseimage()
# 1 NUMBER Running object number
# 2 X_IMAGE Object position along x [pixel]
# 3 Y_IMAGE Object position along y [pixel]
# 4 XMIN_IMAGE Minimum x-coordinate among detected pixels [pixel]
# 5 YMIN_IMAGE Minimum y-coordinate among detected pixels [pixel]
# 6 XMAX_IMAGE Maximum x-coordinate among detected pixels [pixel]
# 7 YMAX_IMAGE Maximum y-coordinate among detected pixels [pixel]
# 8 ALPHA_J2000 Right ascension of barycenter (J2000) [deg]
# 9 DELTA_J2000 Declination of barycenter (J2000) [deg]
# 10 FLUX_ISO Isophotal flux [count]
# 11 FLUXERR_ISO RMS error for isophotal flux [count]
# 12 MAG_ISO Isophotal magnitude [mag]
# 13 MAGERR_ISO RMS error for isophotal magnitude [mag]
# 14 FLUX_ISOCOR Corrected isophotal flux [count]
# 15 FLUXERR_ISOCOR RMS error for corrected isophotal flux [count]
# 16 MAG_ISOCOR Corrected isophotal magnitude [mag]
# 17 MAGERR_ISOCOR RMS error for corrected isophotal magnitude [mag]
# 18 FLUX_APER Flux vector within fixed circular aperture(s) [count]
# 21 FLUXERR_APER RMS error vector for aperture flux(es) [count]
# 24 MAG_APER Fixed aperture magnitude vector [mag]
# 27 MAGERR_APER RMS error vector for fixed aperture mag. [mag]
# 30 FLUX_AUTO Flux within a Kron-like elliptical aperture [count]
# 31 FLUXERR_AUTO RMS error for AUTO flux [count]
# 32 MAG_AUTO Kron-like elliptical aperture magnitude [mag]
# 33 MAGERR_AUTO RMS error for AUTO magnitude [mag]
# 34 FLUX_BEST Best of FLUX_AUTO and FLUX_ISOCOR [count]
# 35 FLUXERR_BEST RMS error for BEST flux [count]
# 36 MAG_BEST Best of MAG_AUTO and MAG_ISOCOR [mag]
# 37 MAGERR_BEST RMS error for MAG_BEST [mag]
# 38 KRON_RADIUS Kron apertures in units of A or B
# 39 PETRO_RADIUS Petrosian apertures in units of A or B
# 40 FLUX_PETRO Flux within a Petrosian-like elliptical apertur [count]
# 41 FLUXERR_PETRO RMS error for PETROsian flux [count]
# 42 MAG_PETRO Petrosian-like elliptical aperture magnitude [mag]
# 43 MAGERR_PETRO RMS error for PETROsian magnitude [mag]
# 44 FLUX_RADIUS Fraction-of-light radii [pixel]
# 47 BACKGROUND Background at centroid position [count]
# 48 THRESHOLD Detection threshold above background [count]
# 49 MU_THRESHOLD Detection threshold above background [mag * arcsec**(-2)]
# 50 FLUX_MAX Peak flux above background [count]
# 51 MU_MAX Peak surface brightness above background [mag * arcsec**(-2)]
# 52 ISOAREA_IMAGE Isophotal area above Analysis threshold [pixel**2]
# 53 ISOAREA_WORLD Isophotal area above Analysis threshold [deg**2]
# 54 A_IMAGE Profile RMS along major axis [pixel]
# 55 B_IMAGE Profile RMS along minor axis [pixel]
# 56 A_WORLD Profile RMS along major axis (world units) [deg]
# 57 B_WORLD Profile RMS along minor axis (world units) [deg]
# 58 THETA_IMAGE Position angle (CCW/x) [deg]
# 59 ERRTHETA_IMAGE Error ellipse position angle (CCW/x) [deg]
# 60 THETA_WORLD Position angle (CCW/world-x) [deg]
# 61 ERRTHETA_WORLD Error ellipse pos. angle (CCW/world-x) [deg]
# 62 THETA_J2000 Position angle (east of north) (J2000) [deg]
# 63 ERRTHETA_J2000 J2000 error ellipse pos. angle (east of north) [deg]
# 64 ELONGATION A_IMAGE/B_IMAGE
# 65 ELLIPTICITY 1 - B_IMAGE/A_IMAGE
# 66 FWHM_IMAGE FWHM assuming a gaussian core [pixel]
# 67 FWHM_WORLD FWHM assuming a gaussian core [deg]
# 68 FLAGS Extraction flags
# 69 CLASS_STAR S/G classifier output
|
rfinn/LCS
|
paper1code/LCSmakemipsnoiseimage.py
|
Python
|
gpl-3.0
| 6,850
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.desk.reportview import get_match_cond, get_filters_cond
from frappe.utils import nowdate
from collections import defaultdict
# searches for active employees
def employee_query(doctype, txt, searchfield, start, page_len, filters):
conditions = []
return frappe.db.sql("""select name, employee_name from `tabEmployee`
where status = 'Active'
and docstatus < 2
and ({key} like %(txt)s
or employee_name like %(txt)s)
{fcond} {mcond}
order by
if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),
if(locate(%(_txt)s, employee_name), locate(%(_txt)s, employee_name), 99999),
idx desc,
name, employee_name
limit %(start)s, %(page_len)s""".format(**{
'key': searchfield,
'fcond': get_filters_cond(doctype, filters, conditions),
'mcond': get_match_cond(doctype)
}), {
'txt': "%%%s%%" % txt,
'_txt': txt.replace("%", ""),
'start': start,
'page_len': page_len
})
# searches for leads which are not converted
def lead_query(doctype, txt, searchfield, start, page_len, filters):
return frappe.db.sql("""select name, lead_name, company_name from `tabLead`
where docstatus < 2
and ifnull(status, '') != 'Converted'
and ({key} like %(txt)s
or lead_name like %(txt)s
or company_name like %(txt)s)
{mcond}
order by
if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),
if(locate(%(_txt)s, lead_name), locate(%(_txt)s, lead_name), 99999),
if(locate(%(_txt)s, company_name), locate(%(_txt)s, company_name), 99999),
idx desc,
name, lead_name
limit %(start)s, %(page_len)s""".format(**{
'key': searchfield,
'mcond':get_match_cond(doctype)
}), {
'txt': "%%%s%%" % txt,
'_txt': txt.replace("%", ""),
'start': start,
'page_len': page_len
})
# searches for customer
def customer_query(doctype, txt, searchfield, start, page_len, filters):
conditions = []
cust_master_name = frappe.defaults.get_user_default("cust_master_name")
if cust_master_name == "Customer Name":
fields = ["name", "customer_group", "territory"]
else:
fields = ["name", "customer_name", "customer_group", "territory"]
meta = frappe.get_meta("Customer")
searchfields = meta.get_search_fields()
searchfields = searchfields + [f for f in [searchfield or "name", "customer_name"] \
if not f in searchfields]
fields = fields + [f for f in searchfields if not f in fields]
fields = ", ".join(fields)
searchfields = " or ".join([field + " like %(txt)s" for field in searchfields])
return frappe.db.sql("""select {fields} from `tabCustomer`
where docstatus < 2
and ({scond}) and disabled=0
{fcond} {mcond}
order by
if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),
if(locate(%(_txt)s, customer_name), locate(%(_txt)s, customer_name), 99999),
idx desc,
name, customer_name
limit %(start)s, %(page_len)s""".format(**{
"fields": fields,
"scond": searchfields,
"mcond": get_match_cond(doctype),
"fcond": get_filters_cond(doctype, filters, conditions).replace('%', '%%'),
}), {
'txt': "%%%s%%" % txt,
'_txt': txt.replace("%", ""),
'start': start,
'page_len': page_len
})
# searches for supplier
def supplier_query(doctype, txt, searchfield, start, page_len, filters):
supp_master_name = frappe.defaults.get_user_default("supp_master_name")
if supp_master_name == "Supplier Name":
fields = ["name", "supplier_type"]
else:
fields = ["name", "supplier_name", "supplier_type"]
fields = ", ".join(fields)
return frappe.db.sql("""select {field} from `tabSupplier`
where docstatus < 2
and ({key} like %(txt)s
or supplier_name like %(txt)s) and disabled=0
{mcond}
order by
if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),
if(locate(%(_txt)s, supplier_name), locate(%(_txt)s, supplier_name), 99999),
idx desc,
name, supplier_name
limit %(start)s, %(page_len)s """.format(**{
'field': fields,
'key': searchfield,
'mcond':get_match_cond(doctype)
}), {
'txt': "%%%s%%" % txt,
'_txt': txt.replace("%", ""),
'start': start,
'page_len': page_len
})
def tax_account_query(doctype, txt, searchfield, start, page_len, filters):
tax_accounts = frappe.db.sql("""select name, parent_account from tabAccount
where tabAccount.docstatus!=2
and account_type in (%s)
and is_group = 0
and company = %s
and `%s` LIKE %s
order by idx desc, name
limit %s, %s""" %
(", ".join(['%s']*len(filters.get("account_type"))), "%s", searchfield, "%s", "%s", "%s"),
tuple(filters.get("account_type") + [filters.get("company"), "%%%s%%" % txt,
start, page_len]))
if not tax_accounts:
tax_accounts = frappe.db.sql("""select name, parent_account from tabAccount
where tabAccount.docstatus!=2 and is_group = 0
and company = %s and `%s` LIKE %s limit %s, %s"""
% ("%s", searchfield, "%s", "%s", "%s"),
(filters.get("company"), "%%%s%%" % txt, start, page_len))
return tax_accounts
def item_query(doctype, txt, searchfield, start, page_len, filters, as_dict=False):
conditions = []
return frappe.db.sql("""select tabItem.name, tabItem.item_group, tabItem.image,
if(length(tabItem.item_name) > 40,
concat(substr(tabItem.item_name, 1, 40), "..."), item_name) as item_name,
if(length(tabItem.description) > 40, \
concat(substr(tabItem.description, 1, 40), "..."), description) as decription
from tabItem
where tabItem.docstatus < 2
and tabItem.has_variants=0
and tabItem.disabled=0
and (tabItem.end_of_life > %(today)s or ifnull(tabItem.end_of_life, '0000-00-00')='0000-00-00')
and (tabItem.`{key}` LIKE %(txt)s
or tabItem.item_group LIKE %(txt)s
or tabItem.item_name LIKE %(txt)s
or tabItem.description LIKE %(txt)s)
{fcond} {mcond}
order by
if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),
if(locate(%(_txt)s, item_name), locate(%(_txt)s, item_name), 99999),
idx desc,
name, item_name
limit %(start)s, %(page_len)s """.format(key=searchfield,
fcond=get_filters_cond(doctype, filters, conditions).replace('%', '%%'),
mcond=get_match_cond(doctype).replace('%', '%%')),
{
"today": nowdate(),
"txt": "%%%s%%" % txt,
"_txt": txt.replace("%", ""),
"start": start,
"page_len": page_len
}, as_dict=as_dict)
def bom(doctype, txt, searchfield, start, page_len, filters):
conditions = []
return frappe.db.sql("""select tabBOM.name, tabBOM.item
from tabBOM
where tabBOM.docstatus=1
and tabBOM.is_active=1
and tabBOM.`{key}` like %(txt)s
{fcond} {mcond}
order by
if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),
idx desc, name
limit %(start)s, %(page_len)s """.format(
fcond=get_filters_cond(doctype, filters, conditions),
mcond=get_match_cond(doctype),
key=frappe.db.escape(searchfield)),
{
'txt': "%%%s%%" % frappe.db.escape(txt),
'_txt': txt.replace("%", ""),
'start': start,
'page_len': page_len
})
def get_project_name(doctype, txt, searchfield, start, page_len, filters):
cond = ''
if filters.get('customer'):
cond = '(`tabProject`.customer = "' + filters['customer'] + '" or ifnull(`tabProject`.customer,"")="") and'
return frappe.db.sql("""select `tabProject`.name from `tabProject`
where `tabProject`.status not in ("Completed", "Cancelled")
and {cond} `tabProject`.name like %(txt)s {match_cond}
order by
if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),
idx desc,
`tabProject`.name asc
limit {start}, {page_len}""".format(
cond=cond,
match_cond=get_match_cond(doctype),
start=start,
page_len=page_len), {
"txt": "%{0}%".format(txt),
"_txt": txt.replace('%', '')
})
def get_delivery_notes_to_be_billed(doctype, txt, searchfield, start, page_len, filters, as_dict):
return frappe.db.sql("""
select `tabDelivery Note`.name, `tabDelivery Note`.customer, `tabDelivery Note`.posting_date
from `tabDelivery Note`
where `tabDelivery Note`.`%(key)s` like %(txt)s and
`tabDelivery Note`.docstatus = 1 and `tabDelivery Note`.is_return = 0
and status not in ("Stopped", "Closed") %(fcond)s
and (`tabDelivery Note`.per_billed < 100 or `tabDelivery Note`.grand_total = 0)
%(mcond)s order by `tabDelivery Note`.`%(key)s` asc
""" % {
"key": searchfield,
"fcond": get_filters_cond(doctype, filters, []),
"mcond": get_match_cond(doctype),
"txt": "%(txt)s"
}, { "txt": ("%%%s%%" % txt) }, as_dict=as_dict)
def get_batch_no(doctype, txt, searchfield, start, page_len, filters):
cond = ""
if filters.get("posting_date"):
cond = "and (ifnull(batch.expiry_date, '')='' or batch.expiry_date >= %(posting_date)s)"
batch_nos = None
args = {
'item_code': filters.get("item_code"),
'warehouse': filters.get("warehouse"),
'posting_date': filters.get('posting_date'),
'txt': "%{0}%".format(txt),
"start": start,
"page_len": page_len
}
if args.get('warehouse'):
batch_nos = frappe.db.sql("""select sle.batch_no, round(sum(sle.actual_qty),2), sle.stock_uom, batch.expiry_date
from `tabStock Ledger Entry` sle
INNER JOIN `tabBatch` batch on sle.batch_no = batch.name
where
sle.item_code = %(item_code)s
and sle.warehouse = %(warehouse)s
and sle.batch_no like %(txt)s
and batch.docstatus < 2
{0}
{match_conditions}
group by batch_no having sum(sle.actual_qty) > 0
order by batch.expiry_date, sle.batch_no desc
limit %(start)s, %(page_len)s""".format(cond, match_conditions=get_match_cond(doctype)), args)
if batch_nos:
return batch_nos
else:
return frappe.db.sql("""select name, expiry_date from `tabBatch` batch
where item = %(item_code)s
and name like %(txt)s
and docstatus < 2
{0}
{match_conditions}
order by expiry_date, name desc
limit %(start)s, %(page_len)s""".format(cond, match_conditions=get_match_cond(doctype)), args)
def get_account_list(doctype, txt, searchfield, start, page_len, filters):
filter_list = []
if isinstance(filters, dict):
for key, val in filters.items():
if isinstance(val, (list, tuple)):
filter_list.append([doctype, key, val[0], val[1]])
else:
filter_list.append([doctype, key, "=", val])
elif isinstance(filters, list):
filter_list.extend(filters)
if "is_group" not in [d[1] for d in filter_list]:
filter_list.append(["Account", "is_group", "=", "0"])
if searchfield and txt:
filter_list.append([doctype, searchfield, "like", "%%%s%%" % txt])
return frappe.desk.reportview.execute("Account", filters = filter_list,
fields = ["name", "parent_account"],
limit_start=start, limit_page_length=page_len, as_list=True)
@frappe.whitelist()
def get_income_account(doctype, txt, searchfield, start, page_len, filters):
from erpnext.controllers.queries import get_match_cond
# income account can be any Credit account,
# but can also be a Asset account with account_type='Income Account' in special circumstances.
# Hence the first condition is an "OR"
if not filters: filters = {}
condition = ""
if filters.get("company"):
condition += "and tabAccount.company = %(company)s"
return frappe.db.sql("""select tabAccount.name from `tabAccount`
where (tabAccount.report_type = "Profit and Loss"
or tabAccount.account_type in ("Income Account", "Temporary"))
and tabAccount.is_group=0
and tabAccount.`{key}` LIKE %(txt)s
{condition} {match_condition}
order by idx desc, name"""
.format(condition=condition, match_condition=get_match_cond(doctype), key=searchfield), {
'txt': "%%%s%%" % frappe.db.escape(txt),
'company': filters.get("company", "")
})
@frappe.whitelist()
def get_expense_account(doctype, txt, searchfield, start, page_len, filters):
from erpnext.controllers.queries import get_match_cond
if not filters: filters = {}
condition = ""
if filters.get("company"):
condition += "and tabAccount.company = %(company)s"
return frappe.db.sql("""select tabAccount.name from `tabAccount`
where (tabAccount.report_type = "Profit and Loss"
or tabAccount.account_type in ("Expense Account", "Fixed Asset", "Temporary"))
and tabAccount.is_group=0
and tabAccount.docstatus!=2
and tabAccount.{key} LIKE %(txt)s
{condition} {match_condition}"""
.format(condition=condition, key=frappe.db.escape(searchfield),
match_condition=get_match_cond(doctype)), {
'company': filters.get("company", ""),
'txt': "%%%s%%" % frappe.db.escape(txt)
})
@frappe.whitelist()
def warehouse_query(doctype, txt, searchfield, start, page_len, filters):
# Should be used when item code is passed in filters.
conditions, bin_conditions = [], []
filter_dict = get_doctype_wise_filters(filters)
sub_query = """ select round(`tabBin`.actual_qty, 2) from `tabBin`
where `tabBin`.warehouse = `tabWarehouse`.name
{bin_conditions} """.format(
bin_conditions=get_filters_cond(doctype, filter_dict.get("Bin"),
bin_conditions, ignore_permissions=True))
query = """select `tabWarehouse`.name,
CONCAT_WS(" : ", "Actual Qty", ifnull( ({sub_query}), 0) ) as actual_qty
from `tabWarehouse`
where
`tabWarehouse`.`{key}` like '{txt}'
{fcond} {mcond}
order by
`tabWarehouse`.name desc
limit
{start}, {page_len}
""".format(
sub_query=sub_query,
key=frappe.db.escape(searchfield),
fcond=get_filters_cond(doctype, filter_dict.get("Warehouse"), conditions),
mcond=get_match_cond(doctype),
start=start,
page_len=page_len,
txt=frappe.db.escape('%{0}%'.format(txt))
)
return frappe.db.sql(query)
def get_doctype_wise_filters(filters):
# Helper function to seperate filters doctype_wise
filter_dict = defaultdict(list)
for row in filters:
filter_dict[row[0]].append(row)
return filter_dict
|
mbauskar/erpnext
|
erpnext/controllers/queries.py
|
Python
|
gpl-3.0
| 13,804
|
# -*- coding: utf-8 -*-
###########################################################################
## Python code generated with wxFormBuilder (version Dec 22 2017)
## http://www.wxformbuilder.org/
##
## PLEASE DO *NOT* EDIT THIS FILE!
###########################################################################
import wx
import wx.xrc
import wx.dataview
###########################################################################
## Class PanelSelectSnapeda
###########################################################################
class PanelSelectSnapeda ( wx.Panel ):
def __init__( self, parent ):
wx.Panel.__init__ ( self, parent, id = wx.ID_ANY, pos = wx.DefaultPosition, size = wx.Size( 1050,407 ), style = wx.TAB_TRAVERSAL )
bSizer16 = wx.BoxSizer( wx.VERTICAL )
bSizer3 = wx.BoxSizer( wx.HORIZONTAL )
self.m_splitter1 = wx.SplitterWindow( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.SP_3D|wx.SP_LIVE_UPDATE )
self.m_splitter1.Bind( wx.EVT_IDLE, self.m_splitter1OnIdle )
self.m_panel2 = wx.Panel( self.m_splitter1, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
bSizer2 = wx.BoxSizer( wx.VERTICAL )
self.search_snapeda = wx.SearchCtrl( self.m_panel2, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, wx.TE_NOHIDESEL|wx.TE_PROCESS_ENTER )
self.search_snapeda.ShowSearchButton( True )
self.search_snapeda.ShowCancelButton( True )
self.search_snapeda.SetMinSize( wx.Size( 150,-1 ) )
bSizer2.Add( self.search_snapeda, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL|wx.EXPAND, 5 )
self.tree_snapedas = wx.dataview.DataViewCtrl( self.m_panel2, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer2.Add( self.tree_snapedas, 1, wx.ALL|wx.EXPAND, 5 )
self.m_panel2.SetSizer( bSizer2 )
self.m_panel2.Layout()
bSizer2.Fit( self.m_panel2 )
self.m_panel1 = wx.Panel( self.m_splitter1, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
bSizer6 = wx.BoxSizer( wx.VERTICAL )
self.bitmap_preview = wx.StaticBitmap( self.m_panel1, wx.ID_ANY, wx.NullBitmap, wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer6.Add( self.bitmap_preview, 1, wx.ALL|wx.EXPAND, 5 )
self.m_panel1.SetSizer( bSizer6 )
self.m_panel1.Layout()
bSizer6.Fit( self.m_panel1 )
self.m_splitter1.SplitVertically( self.m_panel2, self.m_panel1, 750 )
bSizer3.Add( self.m_splitter1, 1, wx.EXPAND, 5 )
bSizer16.Add( bSizer3, 1, wx.EXPAND, 5 )
m_sdbSizer2 = wx.StdDialogButtonSizer()
self.m_sdbSizer2OK = wx.Button( self, wx.ID_OK )
m_sdbSizer2.AddButton( self.m_sdbSizer2OK )
self.m_sdbSizer2Cancel = wx.Button( self, wx.ID_CANCEL )
m_sdbSizer2.AddButton( self.m_sdbSizer2Cancel )
m_sdbSizer2.Realize();
bSizer16.Add( m_sdbSizer2, 0, wx.EXPAND, 5 )
self.SetSizer( bSizer16 )
self.Layout()
# Connect Events
self.search_snapeda.Bind( wx.EVT_SEARCHCTRL_SEARCH_BTN, self.onSearchSnapedaButton )
self.search_snapeda.Bind( wx.EVT_TEXT_ENTER, self.onSearchSnapedaEnter )
self.m_sdbSizer2Cancel.Bind( wx.EVT_BUTTON, self.onButtonCancelClick )
self.m_sdbSizer2OK.Bind( wx.EVT_BUTTON, self.onButtonOkClick )
def __del__( self ):
pass
# Virtual event handlers, overide them in your derived class
def onSearchSnapedaButton( self, event ):
event.Skip()
def onSearchSnapedaEnter( self, event ):
event.Skip()
def onButtonCancelClick( self, event ):
event.Skip()
def onButtonOkClick( self, event ):
event.Skip()
def m_splitter1OnIdle( self, event ):
self.m_splitter1.SetSashPosition( 750 )
self.m_splitter1.Unbind( wx.EVT_IDLE )
|
turdusmerula/kipartman
|
kipartman/dialogs/panel_select_snapeda.py
|
Python
|
gpl-3.0
| 3,604
|
import math
def sieve(n):
t = range(3, n, 2)
sqrtn = int(math.sqrt(n))
i = 0
while t[i] <= sqrtn:
# remove all multiples of t[i]
p = t[i]
for j in range(len(t)-1, i, -1):
if t[j] % p == 0:
t.pop(j)
i += 1
return t
print sieve(1000)
|
otfried/cs101
|
code/lists/sieve.py
|
Python
|
gpl-3.0
| 274
|
import re
has_two_same_adjacent_digits_re = re.compile(r'(\d)\1')
has_a_discrete_pair_of_digits_re = re.compile(
'|'.join('((?<!{}){}{}(?!{}))'.format(n, n, n, n) for n in range(10)))
def has_two_same_adjacent_digits(n):
return has_two_same_adjacent_digits_re.search(str(n))
def has_a_discrete_pair_of_digits(n):
return has_a_discrete_pair_of_digits_re.search(str(n))
def digits_do_not_decrease(n):
prev_digit = 10
while n > 0:
next_digit = n % 10
if next_digit > prev_digit:
return False
prev_digit = next_digit
n //= 10
return True
def is_good_password(pw):
return has_a_discrete_pair_of_digits(pw) and digits_do_not_decrease(pw)
if __name__ == '__main__':
print(len([pw for pw in range(240920, 789858) if is_good_password(pw)]))
|
aarestad/advent-of-code-2015
|
aoc_2019/day4.py
|
Python
|
gpl-3.0
| 823
|
# -*- coding:utf-8 -*-
__author__ = 'Leo'
import tornado.options
import tornado.web
from tornado.options import options
from game.handler.basehandler import BaseHandler
from utils.db import GameBaseDB
from game.server_manager import ServerManager
CREATE_TICKETS_TABLE_QUERY = '''
CREATE TABLE IF NOT EXISTS `Tickets64_%s` (
`id` bigint(20) unsigned NOT NULL auto_increment,
`stub` char(1) NOT NULL default '',
PRIMARY KEY (`id`),
UNIQUE KEY `stub` (`stub`)
) ENGINE=MyISAM AUTO_INCREMENT=%s
'''
class MainHandler(BaseHandler):
def get(self, *args, **kwargs):
print "GET MainHandler"
#REPLACE INTO Tickets64 (stub) VALUES ('a');
print "execute =", self.db.execute(CREATE_TICKETS_TABLE_QUERY % ('002', '999999'))
entries = self.db.query("SELECT LAST_INSERT_ID() as ID;")
print "entries =", entries
self.write(str(entries))
#self.finish()
def post(self, *args, **kwargs):
#print "POST MainHandler"
raise tornado.web.HTTPError(404)
class GameApplication(tornado.web.Application):
db = None
server = ServerManager()
def __init__(self):
self._parse_config()
handlers = [
(r"/*", MainHandler),
]
settings = dict(
debug=True,
)
tornado.web.Application.__init__(self, handlers, **settings)
def _parse_config(self):
tornado.options.parse_command_line()
#print options.config, options.port, options.log_file_prefix, options.data
tornado.options.parse_config_file(options.config)
#print options.config, options.port, options.log_file_prefix, options.data
def _load_data(self):
#TODO:: load data
pass
def _init_db_pool(self):
self.db = GameBaseDB(database=options.database,
host=options.db_host,
user=options.db_user,
password=options.db_password,
maxsize=options.db_connect_num)
self.server.group_id = options.user_group
pass
def prepare_application(self):
#
self._load_data()
#
self._init_db_pool()
|
eJon/enjoy
|
game/game_server.py
|
Python
|
gpl-3.0
| 2,220
|
import logging
import newrelic.agent
from django.conf import settings
from treeherder.autoclassify.tasks import autoclassify
from treeherder.log_parser.crossreference import crossreference_job
from treeherder.log_parser.utils import post_log_artifacts
from treeherder.model.models import (Job,
JobLog)
from treeherder.workers.task import retryable_task
from . import failureline
logger = logging.getLogger(__name__)
def parser_task(f):
"""Decorator that ensures that log parsing task has not already run,
and also adds New Relic annotations.
"""
def inner(job_log_id, priority):
newrelic.agent.add_custom_parameter("job_log_id", job_log_id)
job_log = JobLog.objects.select_related("job").get(id=job_log_id)
newrelic.agent.add_custom_parameter("job_log_name", job_log.name)
newrelic.agent.add_custom_parameter("job_log_url", job_log.url)
newrelic.agent.add_custom_parameter("job_log_status_prior",
job_log.get_status_display())
if job_log.status == JobLog.PARSED:
logger.info("log already parsed")
return True
return f(job_log, priority)
inner.__name__ = f.__name__
inner.__doc__ = f.__doc__
return inner
def parse_job_log(func_name, routing_key, job_log):
"""
Schedule the log-related tasks to parse an individual log
"""
task_funcs = {
"store_failure_lines": store_failure_lines,
"parse_log": parse_log,
}
logger.debug("parse_job_log for job log %s (%s, %s)",
job_log.id, func_name, routing_key)
priority = routing_key.rsplit(".", 1)[1]
signature = task_funcs[func_name].si(job_log.id, priority)
signature.set(routing_key=routing_key)
signature.apply_async()
@retryable_task(name='log-parser', max_retries=10)
@parser_task
def parse_log(job_log, priority):
"""
Call ArtifactBuilderCollection on the given job.
"""
post_log_artifacts(job_log)
logger.debug("Scheduling crossreference for job %i from parse_log" % job_log.job.id)
crossreference_error_lines.apply_async(
args=[job_log.job.id, priority],
routing_key="crossreference_error_lines.%s" % priority)
@retryable_task(name='store-failure-lines', max_retries=10)
@parser_task
def store_failure_lines(job_log, priority):
"""Store the failure lines from a log corresponding to the structured
errorsummary file."""
logger.debug('Running store_failure_lines for job %s' % job_log.job.id)
failureline.store_failure_lines(job_log)
logger.debug("Scheduling crossreference for job %i from store_failure_lines" % job_log.job.id)
crossreference_error_lines.apply_async(
args=[job_log.job.id, priority],
routing_key="crossreference_error_lines.%s" % priority)
@retryable_task(name='crossreference-error-lines', max_retries=10)
def crossreference_error_lines(job_id, priority):
"""Match structured (FailureLine) and unstructured (TextLogError) lines
for a job."""
newrelic.agent.add_custom_parameter("job_id", job_id)
logger.debug("Running crossreference-error-lines for job %s" % job_id)
job = Job.objects.get(id=job_id)
has_lines = crossreference_job(job)
if has_lines and settings.AUTOCLASSIFY_JOBS:
logger.debug("Scheduling autoclassify for job %i" % job_id)
autoclassify.apply_async(
args=[job_id],
routing_key="autoclassify.%s" % priority)
elif not settings.AUTOCLASSIFY_JOBS:
job.autoclassify_status = Job.SKIPPED
job.save(update_fields=['autoclassify_status'])
else:
logger.debug("Job %i didn't have any crossreferenced lines, skipping autoclassify " % job_id)
|
kapy2010/treeherder
|
treeherder/log_parser/tasks.py
|
Python
|
mpl-2.0
| 3,773
|
import time
from tornado.httpclient import HTTPRequest
from tornado.escape import url_escape
import test.base
from models import User, Sharedfile, Sourcefile, Conversation, Comment
class ConversationTests(test.base.BaseAsyncTestCase):
def setUp(self):
super(ConversationTests, self).setUp()
self.admin = User(name='admin', email='admin@mltshp.com', email_confirmed=1, is_paid=1)
self.admin.set_password('asdfasdf')
self.admin.save()
self.user2 = User(name='user2', email='user2@example.com', email_confirmed=1, is_paid=1)
self.user2.set_password('asdfasdf')
self.user2.save()
self.sid = self.sign_in('user2', 'asdfasdf')
self.xsrf = self.get_xsrf()
self.src = Sourcefile(width=1, height=1, file_key='asdf', thumb_key='qwer')
self.src.save()
self.shf = Sharedfile(source_id=self.src.id, user_id=self.admin.id, name='shared.jpg', title='shared', share_key='1', content_type='image/jpg')
self.shf.save()
def test_creating_a_new_comment_creates_a_conversation(self):
request = HTTPRequest(self.get_url('/p/%s/comment' % self.shf.share_key), 'POST', {'Cookie':'_xsrf=%s;sid=%s' % (self.xsrf, self.sid)}, "body=%s&_xsrf=%s" % (url_escape("a comment"), self.xsrf))
self.http_client.fetch(request, self.stop)
response = self.wait()
conversations = Conversation.all()
self.assertEqual(len(conversations), 2)
def test_creating_a_new_comment_does_not_create_a_duplicate_conversation(self):
request = HTTPRequest(self.get_url('/p/%s/comment' % self.shf.share_key), 'POST', {'Cookie':'_xsrf=%s;sid=%s' % (self.xsrf, self.sid)}, "body=%s&_xsrf=%s" % (url_escape("a comment"), self.xsrf))
self.http_client.fetch(request, self.stop)
response = self.wait()
request = HTTPRequest(self.get_url('/p/%s/comment' % self.shf.share_key), 'POST', {'Cookie':'_xsrf=%s;sid=%s' % (self.xsrf, self.sid)}, "body=%s&_xsrf=%s" % (url_escape("a second comment"), self.xsrf))
self.http_client.fetch(request, self.stop)
response = self.wait()
conversations = Conversation.all()
self.assertEqual(len(conversations), 2)
def test_another_user_commenting_will_update_the_files_activity_at(self):
request = HTTPRequest(self.get_url('/p/%s/comment' % self.shf.share_key), 'POST', {'Cookie':'_xsrf=%s;sid=%s' % (self.xsrf, self.sid)}, "body=%s&_xsrf=%s" % (url_escape("a comment"), self.xsrf))
self.http_client.fetch(request, self.stop)
response = self.wait()
time.sleep(1)
sf = Sharedfile.get('id=%s', self.shf.id)
activity_one = sf.activity_at
request = HTTPRequest(self.get_url('/p/%s/comment' % self.shf.share_key), 'POST', {'Cookie':'_xsrf=%s;sid=%s' % (self.xsrf, self.sid)}, "body=%s&_xsrf=%s" % (url_escape("a second comment"), self.xsrf))
self.http_client.fetch(request, self.stop)
response = self.wait()
sf = Sharedfile.get('id=%s', self.shf.id)
activity_two = sf.activity_at
self.assertTrue(activity_two > activity_one)
def test_deleting_a_file_will_set_conversation_to_muted(self):
request = HTTPRequest(self.get_url('/p/%s/comment' % self.shf.share_key), 'POST', {'Cookie':'_xsrf=%s;sid=%s' % (self.xsrf, self.sid)}, "body=%s&_xsrf=%s" % (url_escape("a comment"), self.xsrf))
self.http_client.fetch(request, self.stop)
response = self.wait()
request = HTTPRequest(self.get_url('/p/%s/comment' % self.shf.share_key), 'POST', {'Cookie':'_xsrf=%s;sid=%s' % (self.xsrf, self.sid)}, "body=%s&_xsrf=%s" % (url_escape("a second comment"), self.xsrf))
self.http_client.fetch(request, self.stop)
response = self.wait()
self.shf.delete()
conversations = Conversation.all()
self.assertEqual(conversations[0].muted, 1)
self.assertEqual(conversations[1].muted, 1)
def test_muting_conversation(self):
"""
Add a comment, which will create a conversation for the commenter (user2) and sharedfile owner (admin).
When user2 tries to mute admin's conversation, it should fail and admin's conversation state will remain
unchanged. When muting own converastion, "muted" flag should change to true.
Contingent on user2 being signed in. (see setUp)
"""
comment = Comment(sharedfile_id=self.shf.id, user_id=self.user2.id, body='test')
comment.save()
admin_conversation = Conversation.get('user_id = %s', self.admin.id)
user2_conversation = Conversation.get('user_id = %s', self.user2.id)
self.assertEqual(admin_conversation.muted, 0)
self.assertEqual(user2_conversation.muted, 0)
request = HTTPRequest(self.get_url('/conversations/%s/mute' % admin_conversation.id), 'POST', {'Cookie':'_xsrf=%s;sid=%s' % (self.xsrf, self.sid)}, "_xsrf=%s" % (self.xsrf))
self.http_client.fetch(request, self.stop)
response = self.wait()
request = HTTPRequest(self.get_url('/conversations/%s/mute' % user2_conversation.id), 'POST', {'Cookie':'_xsrf=%s;sid=%s' % (self.xsrf, self.sid)}, "_xsrf=%s" % (self.xsrf))
self.http_client.fetch(request, self.stop)
response = self.wait()
# refetch from DB, and verify mute flags remain 0.
admin_conversation = Conversation.get('user_id = %s', self.admin.id)
user2_conversation = Conversation.get('user_id = %s', self.user2.id)
self.assertEqual(admin_conversation.muted, 0)
self.assertEqual(user2_conversation.muted, 1)
def test_order_of_conversations_changes_when_new_comment_is_created(self):
pass
|
MLTSHP/mltshp
|
test/functional/conversations_tests.py
|
Python
|
mpl-2.0
| 5,863
|
# Generated by Django 2.2.13 on 2020-06-06 18:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bot', '0006_auto_20200603_0522'),
]
operations = [
migrations.AddField(
model_name='account',
name='protected',
field=models.BooleanField(default=False, help_text='Is this Twitter account protected (locked)?'),
),
]
|
jeromecc/doctoctocbot
|
src/bot/migrations/0007_account_protected.py
|
Python
|
mpl-2.0
| 445
|
# -*- coding: utf-8 -*-
import asyncio
import requests
from cli_common.log import get_logger
from cli_common.phabricator import PhabricatorAPI
from cli_common.pulse import run_consumer
from cli_common.utils import retry
from pulselistener import task_monitoring
from pulselistener.hook import Hook
from pulselistener.hook import PulseHook
logger = get_logger(__name__)
class HookPhabricator(Hook):
'''
Taskcluster hook handling the static analysis
for Phabricator differentials
'''
latest_id = None
def __init__(self, configuration):
assert 'hookId' in configuration
super().__init__(
'project-releng',
configuration['hookId'],
)
# Connect to Phabricator API
assert 'phabricator_url' in configuration
assert 'phabricator_token' in configuration
self.api = PhabricatorAPI(
api_key=configuration['phabricator_token'],
url=configuration['phabricator_url'],
)
# List enabled repositories
enabled = configuration.get('repositories', ['mozilla-central', ])
self.repos = {
r['phid']: r
for r in self.api.list_repositories()
if r['fields']['name'] in enabled
}
assert len(self.repos) > 0, 'No repositories enabled'
logger.info('Enabled Phabricator repositories', repos=[r['fields']['name'] for r in self.repos.values()])
# Start by getting top id
diffs = self.api.search_diffs(limit=1)
assert len(diffs) == 1
self.latest_id = diffs[0]['id']
def list_differential(self):
'''
List new differential items using pagination
using an iterator
'''
cursor = self.latest_id
while cursor is not None:
diffs, cursor = self.api.search_diffs(
order='oldest',
limit=20,
after=self.latest_id,
output_cursor=True,
)
if not diffs:
break
for diff in diffs:
yield diff
# Update the latest id
if cursor and cursor['after']:
self.latest_id = cursor['after']
elif len(diffs) > 0:
self.latest_id = diffs[-1]['id']
async def build_consumer(self, *args, **kwargs):
'''
Query phabricator differentials regularly
'''
while True:
# Get new differential ids
for diff in self.list_differential():
if diff['type'] != 'DIFF':
logger.info('Skipping differential, not a diff', id=diff['id'], type=diff['type'])
continue
# Load revision to check the repository is authorized
rev = self.api.load_revision(diff['revisionPHID'])
repo_phid = rev['fields']['repositoryPHID']
if repo_phid not in self.repos:
logger.info('Skipping differential, repo not enabled', id=diff['id'], repo=repo_phid)
continue
# Create new task
await self.create_task({
'ANALYSIS_SOURCE': 'phabricator',
'ANALYSIS_ID': diff['phid']
})
# Sleep a bit before trying new diffs
await asyncio.sleep(60)
class HookCodeCoverage(PulseHook):
'''
Taskcluster hook handling the code coverage
'''
def __init__(self, configuration):
assert 'hookId' in configuration
self.triggered_groups = set()
super().__init__(
'project-releng',
configuration['hookId'],
'exchange/taskcluster-queue/v1/task-group-resolved',
'*.*.gecko-level-3._'
)
def is_coverage_task(self, task):
return any(task['task']['metadata']['name'].startswith(s) for s in ['build-linux64-ccov', 'build-win64-ccov'])
def is_mozilla_central_task(self, task):
if 'GECKO_HEAD_REPOSITORY' not in task['task']['payload']['env']:
logger.warn('Received groupResolved notification for a task without GECKO_HEAD_REPOSITORY', task_id=task['status']['taskId'])
return False
repo = task['task']['payload']['env']['GECKO_HEAD_REPOSITORY']
if repo != 'https://hg.mozilla.org/mozilla-central':
logger.warn('Received groupResolved notification for a non-mozilla-central coverage task', repo=repo)
return False
return True
def get_build_task_in_group(self, group_id):
if group_id in self.triggered_groups:
logger.info('Received duplicated groupResolved notification', group=group_id)
return None
def maybe_trigger(tasks):
for task in tasks:
if self.is_coverage_task(task):
self.triggered_groups.add(group_id)
return task
return None
list_url = 'https://queue.taskcluster.net/v1/task-group/{}/list'.format(group_id)
def retrieve_coverage_task():
r = requests.get(list_url, params={
'limit': 200
})
r.raise_for_status()
reply = r.json()
task = maybe_trigger(reply['tasks'])
while task is None and 'continuationToken' in reply:
r = requests.get(list_url, params={
'limit': 200,
'continuationToken': reply['continuationToken']
})
r.raise_for_status()
reply = r.json()
task = maybe_trigger(reply['tasks'])
return task
try:
return retry(retrieve_coverage_task)
except requests.exceptions.HTTPError:
return None
def parse(self, body):
'''
Extract revisions from payload
'''
taskGroupId = body['taskGroupId']
build_task = self.get_build_task_in_group(taskGroupId)
if build_task is None:
return None
if not self.is_mozilla_central_task(build_task):
return None
logger.info('Received groupResolved notification for coverage builds', revision=build_task['task']['payload']['env']['GECKO_HEAD_REV'], group=taskGroupId) # noqa
return [{
'REVISION': build_task['task']['payload']['env']['GECKO_HEAD_REV'],
}]
class PulseListener(object):
'''
Listen to pulse messages and trigger new tasks
'''
def __init__(self,
pulse_user,
pulse_password,
hooks_configuration,
taskcluster_client_id=None,
taskcluster_access_token=None,
):
self.pulse_user = pulse_user
self.pulse_password = pulse_password
self.hooks_configuration = hooks_configuration
self.taskcluster_client_id = taskcluster_client_id
self.taskcluster_access_token = taskcluster_access_token
task_monitoring.connect_taskcluster(
self.taskcluster_client_id,
self.taskcluster_access_token,
)
def run(self):
# Build hooks for each conf
hooks = [
self.build_hook(conf)
for conf in self.hooks_configuration
]
if not hooks:
raise Exception('No hooks created')
# Run hooks pulse listeners together
# but only use hooks with active definitions
consumers = [
hook.build_consumer(self.pulse_user, self.pulse_password)
for hook in hooks
if hook.connect_taskcluster(
self.taskcluster_client_id,
self.taskcluster_access_token,
)
]
# Add monitoring process
consumers.append(task_monitoring.run())
# Run all consumers together
run_consumer(asyncio.gather(*consumers))
def build_hook(self, conf):
'''
Build a new hook instance according to configuration
'''
assert isinstance(conf, dict)
assert 'type' in conf
classes = {
'static-analysis-phabricator': HookPhabricator,
'code-coverage': HookCodeCoverage,
}
hook_class = classes.get(conf['type'])
if hook_class is None:
raise Exception('Unsupported hook {}'.format(conf['type']))
return hook_class(conf)
|
lundjordan/services
|
src/pulselistener/pulselistener/listener.py
|
Python
|
mpl-2.0
| 8,476
|
#!/usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright 2017-2018 - Edoardo Morassutto <edoardo.morassutto@gmail.com>
# Copyright 2017 - Luca Versari <veluca93@gmail.com>
# Copyright 2017 - Massimo Cairo <cairomassimo@gmail.com>
from werkzeug.exceptions import InternalServerError, BadRequest
from terry.handlers.base_handler import BaseHandler
from terry.handlers.info_handler import InfoHandler
from terry.contest_manager import ContestManager
from terry.database import Database
from terry.detect_exe import get_exeflags
from terry.logger import Logger
from terry.storage_manager import StorageManager
from terry.validators import Validators
class UploadHandler(BaseHandler):
@Validators.during_contest
@Validators.register_user_ip
@Validators.validate_input_id
@Validators.validate_file
def upload_output(self, input, file):
"""
POST /upload_output
"""
output_id = Database.gen_id()
try:
path = StorageManager.new_output_file(output_id, file["name"])
except ValueError:
BaseHandler.raise_exc(BadRequest, "INVALID_FILENAME",
"The provided file has an invalid name")
StorageManager.save_file(path, file["content"])
file_size = StorageManager.get_file_size(path)
try:
result = ContestManager.evaluate_output(input["task"],
input["path"], path)
except:
BaseHandler.raise_exc(InternalServerError, "INTERNAL_ERROR",
"Failed to evaluate the output")
Database.add_output(output_id, input["id"], path, file_size, result)
Logger.info("UPLOAD", "User %s has uploaded the output %s" % (
input["token"], output_id))
return InfoHandler.patch_output(Database.get_output(output_id))
@Validators.during_contest
@Validators.register_user_ip
@Validators.validate_input_id
@Validators.validate_file
def upload_source(self, input, file):
"""
POST /upload_source
"""
alerts = []
if get_exeflags(file["content"]):
alerts.append({
"severity": "warning",
"message": "You have submitted an executable! Please send the "
"source code."
})
Logger.info("UPLOAD",
"User %s has uploaded an executable" % input["token"])
if not alerts:
alerts.append({
"severity": "success",
"message": "Source file uploaded correctly."
})
source_id = Database.gen_id()
try:
path = StorageManager.new_source_file(source_id, file["name"])
except ValueError:
BaseHandler.raise_exc(BadRequest, "INVALID_FILENAME",
"The provided file has an invalid name")
StorageManager.save_file(path, file["content"])
file_size = StorageManager.get_file_size(path)
Database.add_source(source_id, input["id"], path, file_size)
Logger.info("UPLOAD", "User %s has uploaded the source %s" % (
input["token"], source_id))
output = BaseHandler.format_dates(Database.get_source(source_id))
output["validation"] = {"alerts": alerts}
return output
|
algorithm-ninja/territoriali-backend
|
terry/handlers/upload_handler.py
|
Python
|
mpl-2.0
| 3,564
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from marionette_driver.by import By
from marionette_harness import MarionetteTestCase, WindowManagerMixin
class TestClickChrome(WindowManagerMixin, MarionetteTestCase):
def setUp(self):
super(TestClickChrome, self).setUp()
self.marionette.set_context("chrome")
def tearDown(self):
self.close_all_windows()
super(TestClickChrome, self).tearDown()
def test_click(self):
def open_with_js():
self.marionette.execute_script("""
window.open('chrome://marionette/content/test.xul',
'foo', 'chrome,centerscreen'); """)
win = self.open_window(open_with_js)
self.marionette.switch_to_window(win)
def checked():
return self.marionette.execute_script(
"return arguments[0].checked",
script_args=[box])
box = self.marionette.find_element(By.ID, "testBox")
self.assertFalse(checked())
box.click()
self.assertTrue(checked())
|
Yukarumya/Yukarum-Redfoxes
|
testing/marionette/harness/marionette_harness/tests/unit/test_click_chrome.py
|
Python
|
mpl-2.0
| 1,226
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
####################################################################################
##
## File: concant_properties.py
## Authors: Clinton De Young, Tyler Peterson and Dave Mamanakis
## Date: November 2, 2010
##
## -------------------------------------------------------------------------------
## Description: Pseudo Localizes all strings in the code bases
##
####################################################################################
import sys
import getopt
import os
import io
import codecs
from subprocess import Popen, PIPE
import re
import shutil
from exceptions import IOError
from random import randint
from Properties import Properties
from pprint import pformat
from HTMLParser import HTMLParser
class PseudoTranslate(object):
'''
The PseudoTranslate class takes a string, converts it to a pseudo
localized representation, and then regurgitates it as a flowing mess to
the caller via a get method.
'''
# This is a list of the supported targets. Whenever you accept user
# input, you should validate against this list to make sure that they
# have selected a valid target before proceeding. The "all" target
# will generate a mixed language build.
__supported_targets = [
'ja_JP',
'ko_KR',
'zh_TW',
'de_DE',
'fr_FR',
'it_IT',
'es_MX',
'pt_BR',
'ru_RU',
'all'
]
# Displaying any supported language regardless of the language your
# UI is set to, we should test pseudo localized builds using all
# supported languages
__mixed_language_strings = [
# Japanese Chars
'鼻毛:',
'指先:',
'眉毛:',
'ひれ:',
'ヘビ:',
'カブ:',
'子供:',
'日本:',
'言語:',
'馬鹿:',
# Korean Chars
'영어:',
'소금:',
'트럭:',
'히피:',
'포크:',
'토성:',
'아픈:',
'오리:',
'얼음:',
'극지:',
# DO NOT FORGET: There are several TYPES of Chinese Characters:
# CJK-Ext.A, CJK-Ext.B, CJK-Ext.C, CJK-Ext.D (They may require different fonts for support).
# Chinese Chars
'孩子:',
'嬉皮:',
'雲彩:',
'占星:',
'胡說:',
'膀胱:',
'沙拉:',
'蠢貨:',
'烘烤:',
'蝸牛:',
# Russian Chars
'да:',
'ща:',
'по:',
'не:',
'из:',
'за:',
'Ий:',
'дя:',
'ИФ:',
'ья:',
# Latin Chars
'Ãé:',
'Ûç:',
'Çó:',
'Ñá:',
'Ýň:',
'Èç:',
'Ìë:',
'Îú:',
'Öà:',
'Ūê:',
]
# In a pseudo localized build, all vowels and some consonants will be
# replaced with extended characters that look similar. This helps test
# whether or not a string has been properly placed in resource files.
# Regardless of the type of pseudo build you do (all languages vs. only
# the target UI language), this hash will be used.
__replacement_characters = {
'A':['À','Á','Â','Ã','Ä','Å','Ā','Ą','Ă','Ѧ'],
'C':['Ç','Ć','Č','Ĉ','Ċ'],
'D':['Ď','Đ'],
'E':['È','É','Ê','Ë','Ē','Ę','Ě','Ĕ','Ė','Э','Ѯ'],
'G':['Ĝ','Ğ','Ġ','Ģ'],
'H':['Ĥ','Ħ'],
'I':['Ì','Í','Î','Ï','Ī','Ĩ','Ĭ','Į','İ'],
'J':['Ĵ'],
'K':['Ķ'],
'L':['Ł','Ľ','Ĺ','Ļ','Ŀ'],
'N':['Ñ','Ń','Ň','Ņ','Ŋ','П','И'],
'O':['Ò','Ó','Ô','Õ','Ö','Ø','Ō','Ő','Ŏ'],
'R':['Ŕ','Ř','Ŗ','Я'],
'S':['Ś','Š','Ş','Ŝ','Ș'],
'T':['Ť','Ţ','Ŧ','Ț'],
'U':['Ù','Ú','Û','Ü','Ū','Ů','Ű','Ŭ','Ũ','Ų','Ц'],
'V':['Ѵ'],
'W':['Ŵ','Ш','Щ','Ѡ'],
'X':['Ж'],
'Y':['Ý','Ŷ','Ÿ'],
'Z':['Ź','Ž','Ż'],
'a':['à','á','â','ã','ä','å','ā','ą','ă',''],
'b':['Б','Ъ','Ь','Ѣ'],
'c':['ç','ć','č','ĉ','ċ'],
'd':['ď','đ'],
'e':['è','é','ê','ë','ē','ę','ě','ĕ','ė'],
'f':['ƒ'],
'g':['ĝ','ğ','ġ','ģ'],
'h':['ĥ','ħ'],
'i':['ì','í','î','ï','ī','ĩ','ĭ','į','ı'],
'j':['ĵ'],
'k':['ķ','ĸ'],
'l':['ł','ľ','ĺ','ļ','ŀ'],
'n':['ñ','ń','ň','ņ','ʼn','ŋ'],
'o':['ò','ó','ô','õ','ö','ø','ō','ő','ŏ','Ф'],
'r':['ŕ','ř','ŗ','я'],
's':['ś','š','ş','ŝ','ș'],
't':['ť','ţ','ŧ','ț'],
'u':['ù','ú','û','ü','ū','ů','ű','ŭ','ũ','ų'],
'v':['ѵ'],
'w':['ŵ'],
'y':['ý','ÿ','ŷ','Ч','Ѱ'],
'z':['ž','ż','ź'],
}
__pseudo_string = ""
__string_package = ""
__target = ""
def __init__(self, str):
# If the user has specified a valid language, process the
# request. Otherwise, raise an exception.
self.__str = str
self.__string_package = self.__mixed_language_strings
self.__storage = {}
self.__pseudo_localize()
def __pseudo_localize(self):
'''
__pseudo_localize does the work of making the Pseudo Strings
'''
temp = ''
s = 0
grab_em = ''
# Replace any characters that exist as keys in the
# self.__replacement_characters hash with a random character
# from the appropriate value list.
set_me = False
num = 0
# We had to account for some of the escaped characters, "<"
for char in self.__str:
if char == "&":
num += 1
set_me = True
if char == ";":
set_me = False
if set_me is True:
temp += char
continue
else:
if self.__replacement_characters.has_key(char):
temp += self.__replacement_characters[char][randint(0, len(self.__replacement_characters[char]) - 1)]
else:
temp += char
# Expand the string
self.__pseudo_string = self.__expand_string(temp, num)
def __expand_string(self, str, num):
'''
expand_string(str) - Take a string as an argument and adds
padding onto the end according to the following rules:
English String | Expansion Factor
1 - 5 | of about 15 characters
6 - 25 | of about 2.2 times
26 - 40 | of about 1.9 times
41 - 70 | of about 1.7 times
71 + | of about 1.5 times
'''
end_text = self.__string_package[randint(0, len(self.__string_package) - 1)]
gorgon = ''
alpha = ''
beta = ''
gamma = ''
# You have to convert the string to unicode in order to count
# the number of characters since Python only counts bytes in
# UTF-8 strings.
length = len(unicode(str, 'utf8'))
existing_text = length + len(unicode(end_text, 'utf8'))
if num > 0:
times = num * 6
minus = length - times
length = minus
if length == 0:
end_text = ''
################################################################
### Use this if you want to add extensions to the end of ###
### each of the words in a paragraph, but the end text at ###
### the end of the paragraph, not the word. ###
################################################################
alterations = str.split()
for alpha in alterations:
length = len(unicode(alpha, 'utf8'))
existing_text = length + len(unicode(end_text, 'utf8'))
if length <= 0:
expansion = ''
elif length > 0 and length <= 5:
expansion = '_' * (9 - existing_text)
elif length >= 6 and length <= 25:
expansion = '_' * (int(length * 1.9) - existing_text)
elif length >= 26 and length <= 40:
expansion = '_' * (int(length * 1.6) - existing_text)
elif length >= 41 and length <= 70:
expansion = '_' * (int(length * 1.3) - existing_text)
else:
expansion = '_' * (int(length * 1.0) - existing_text)
beta = alpha + expansion + ' '
gamma += beta
gorgon = gamma + ":" + end_text
################################################################
### Use this if you want to add extensions to the end of the ###
### paragraph. ###
################################################################
#if length <= 0:
#expansion = ''
#elif length > 0 and length <= 5:
#expansion = '_' * (15 - existing_text)
#elif length >= 6 and length <= 25:
#expansion = '_' * (int(length * 2.2) - existing_text)
#elif length >= 26 and length <= 40:
#expansion = '_' * (int(length * 1.9) - existing_text)
#elif length >= 41 and length <= 70:
#expansion = '_' * (int(length * 1.7) - existing_text)
#else:
#expansion = '_' * (int(length * 1.5) - existing_text)
#gorgon = str + expansion + end_text
return gorgon
def get_pseudo_str(self):
return self.__pseudo_string
def set_pseudo_str(self, str):
self.__str = str
self.__pseudo_localize()
class FilesAndStrings(object):
'''
The FilesAndStrings class recursively parses the specfied path and
returns all localizable properties files code base.
'''
def __init__(self, path):
self.__path = path
self.__files = []
self.__complete_files = []
self.__translations = []
self.__filter_1_files = [] # Only the .properties files
self.__filter_2_files = []
self.__filter_3_files = []
self.__filter_4_files = []
self.__temporary_debug_1 = []
self.__temporary_debug_2 = []
# We set this list for all the current languages we use in the
# product we are only interested in the ENGLISH file for this
# task
self.__ignore = [
'_fr',
'_de',
'_es',
'_pt',
'_ja',
'_zh',
'_ko',
'_it',
'_ru',
'_th',
'_nl',
'_BR',
'_CN',
'_JP',
'_KR',
'_TW',
'_eo',
'_EO',
]
self.__generate_file_list()
self.__generate_string_list()
def __generate_file_list(self):
'''
__generate_file_list gets a recursive list of HTML files from
the directory specified in --path.
'''
cmd = "/usr/bin/find '%s' -iname '*.properties' -print" % self.__path
findProcess = Popen(cmd, shell=True, stdout=PIPE)
htmfiles = findProcess.communicate()[0]
self.__files = htmfiles.split()
for file in self.__files:
# run the first bit of filtering to kill any foreign
# language or extra (non language) properties files, we
# only need the English, localizeable files.
if ".properties" in file:
left, right = file.split(".", 1)
check = left[-3:]
double_check = left[-13:]
# if the file name contains the last 3 chars of
# some foreign language indicator, ignore it.
if check in self.__ignore:
continue
# if this is the last bit on the file name,
# ignore it.
elif double_check == '_unicodeasian':
continue
# anything in a snapshot directory, ignore it.
elif 'SNAPSHOT' in file or 'target' in file:
continue
# this should leave us with only the English,
# Localizeable properties files. Listed.
else:
self.__filter_1_files.append(file)
# For (Frankie) only:
# We filter anything except what is in the following directories
# if they exist
for frankie in self.__filter_1_files:
if 'www-catalogapi/trunk/www-catalogapi-domain/src/main/resources/bundles/' in frankie:
self.__filter_2_files.append(frankie)
elif 'www-searchapi/trunk/www-searchapi/src/main/resources/bundles/' in frankie:
self.__filter_2_files.append(frankie)
elif 'www-web/trunk/www-web-app/grails-app/i18n/' in frankie:
self.__filter_2_files.append(frankie)
else:
continue
# if we don't have anything in the second filter, fill it
if not self.__filter_2_files:
self.__filter_2_files = self.__filter_1_files
# For SISU or other related structures:
# Removing any of the directories for foreign languages.
for sisu in self.__filter_2_files:
if 'de_DE' in sisu:
continue
elif 'es_ES' in sisu:
continue
elif 'fr_FR' in sisu:
continue
elif 'it_IT' in sisu:
continue
elif 'ja_JP' in sisu:
continue
elif 'ko_KR' in sisu:
continue
elif 'pt_BR' in sisu:
continue
elif 'ru_RU' in sisu:
continue
elif 'zh_TW' in sisu:
continue
elif 'eo_EO' in sisu:
continue
elif 'java' in sisu:
continue
elif 'en_US' in sisu:
self.__filter_3_files.append(sisu)
else:
self.__complete_files.append(sisu)
# put the filtered results into the final list
if self.__filter_3_files:
self.__complete_files = self.__filter_3_files
# For Gadgets or other related structures:
# Removing any of the non-language properties.
for gadget in self.__complete_files:
if "langs" in gadget:
self.__filter_4_files.append(gadget)
else:
continue
if self.__filter_4_files:
self.__complete_files = self.__filter_4_files
def __generate_string_list(self):
'''
__generate_string_list gets a list of all the strings and IDs
contained in the list of properties files.
'''
j = 0
strings = {}
faux = {}
# Initialize the HTML Parser to read the HTML
MHP = My_Html_Parser();
# iterate the items in the files list to work on them
for x_file in self.__complete_files:
# Create the OutPut file name (the PSEUDO file)
# From FLEX
if "en_US" in x_file:
new_file = x_file.replace("en_US", "eo_EO")
#From Frankie
elif "www-searchapi" in x_file or "www-web" in x_file or "www-catalogapi" in x_file:
new_file = x_file.replace(".properties", "_eo.properties")
#From Gadgets
elif "langs" in x_file and ".properties" in x_file:
new_file = x_file.replace(".properties", "_eo.properties")
#From Classic
elif "localization" in x_file and ".properties" in x_file:
new_file = x_file.replace(".properties", "_unicodeasian.properties")
self.__temporary_debug_1.append(x_file)
else:
self.__temporary_debug_2.append(x_file)
continue
# Initialize the English Properties Files via this
# program (creates a Dictionary of the Properties)
eng_props = Properties()
eng_props.load(open(x_file))
# We get the Dictionary for the Default Properties File
engProps = eng_props.getPropertyDict()
# Now we iterate the properties to make the list for
# PseudoLocalization
for q in engProps:
value = engProps[q]
MHP.reset()
MHP.my_init()
# We sometimes use "<" or ">" in our strings
# This will filter them out for the HTML
# Parser, to prevent errors.
# If errors happen, modify this section
if "<" in value or ">" in value:
if value == "<<First Page":
value = "<<First Page"
# Make note of the pseudo
# localized string in the "faux"
# dictionary
p = PseudoTranslate(value)
# Continue Building the Snippet
# with either the Localized
# Version or the English Version
tran_tmp = p.get_pseudo_str()
tmp_tran = tran_tmp.replace ("<", "<")
elif value == "Last Page>>":
value = "Last Page>>"
# Make note of the pseudo
# localized string in the "faux"
# dictionary
p = PseudoTranslate(value)
# Continue Building the Snippet
# with either the Localized
# Version or the English Version
tran_tmp = p.get_pseudo_str()
tmp_tran = tran_tmp.replace (">", ">")
elif value == "<Previous":
value = "<Previous"
# Make note of the pseudo
# localized string in the "faux"
# dictionary
p = PseudoTranslate(value)
# Continue Building the Snippet
# with either the Localized
# Version or the English Version
tran_tmp = p.get_pseudo_str()
tmp_tran = tran_tmp.replace ("<", "<")
elif value == "Next>":
value = "Next>"
# Make note of the pseudo
# localized string in the "faux"
# dictionary
p = PseudoTranslate(value)
# Continue Building the Snippet
# with either the Localized
# Version or the English Version
tran_tmp = p.get_pseudo_str()
tmp_tran = tran_tmp.replace (">", ">")
elif value == "<Back":
value = "<Back"
# Make note of the pseudo
# localized string in the "faux"
# dictionary
p = PseudoTranslate(value)
# Continue Building the Snippet
# with either the Localized
# Version or the English Version
tran_tmp = p.get_pseudo_str()
tmp_tran = tran_tmp.replace ("<", "<")
else:
# Read the string into the HTML
# Parser
MHP.feed(value)
tmp_tran = MHP.get_the_answer()
if tmp_tran == '':
MHP.my_init()
MHP.reset()
try_me = "<body>" + value + "</body>"
MHP.feed(try_me)
step_1 = MHP.get_the_answer()
step_2 = step_1.replace("<body>","")
step_3 = step_2.replace("</body>","")
tmp_tran = step_3
if tmp_tran == '':
print x_file + "\n" + q + "=" + value
else:
# Make note of the pseudo localized
# string in the "faux" dictionary
p = PseudoTranslate(value)
# Continue Building the Snippet with
# either the Localized Version or the
# English Version
tmp_tran = p.get_pseudo_str()
# Rebuild the format PROPERTY_ID = PROPERTY
self.__translations.append(q + "=" + tmp_tran)
# Open the new output file, write it's new content
N = codecs.open(new_file, 'w')
print "this is #%s of %s files to do" % (j+1, len(self.__complete_files))
j += 1
for write_me in self.__translations:
N.write(write_me + "\n")
N.close()
self.__translations = []
class GetUserInput(object):
'''
The GetUserInput class grabs command line arguments from the user and
makes them available
'''
__path = ""
def __init__(self):
try:
options, arguments = getopt.getopt(sys.argv[1:], 'hp:', ['help', 'path='])
except getopt.GetoptError, e:
print "A dark buzzard has fouled our breakfast bowl with the following droppings:\n"
print e
sys.exit(1)
if len(options) == 0:
print "You have not specified any command line arguments and I cannot read\nyour mind. You may try using one or more of these:\n"
self.__usage()
sys.exit(1)
for o,a in options:
if o in ('-h', '--help'):
self.__usage()
sys.exit(0)
if o in ('-p', '--path'):
self.__path = a
if not os.path.exists(self.__path):
print "\nYou have selected an invalid path François. Please try again.\n"
self.__usage()
sys.exit(1)
def __usage(self):
print '----------------------------------------------------------------------'
print 'USAGE:\n'
print '\n'
print '-h, --help\tPrint this help message.'
print '\n'
print '-p, --path\tSpecify the path to pull the strings from.\n'
print '\n'
print 'Example: pseudo.py -p /home/billy/sandbox/code/\n'
print '----------------------------------------------------------------------'
def get_path(self):
return self.__path
class InvalidTargetException(Exception):
def __init__(self, value):
self.parameter = value
def __str__(self):
return repr(self.parameter)
class My_Html_Parser(HTMLParser):
'''
This is the HTML Parsing Routine
'''
def my_init(self):
'''
my_init initializes for the HTML Parsing
'''
self.__property_file_whole = ''
self.__the_return_value = ''
self.__translation = ''
self.snippet = ''
self.textBuffer = ''
self.__findTags = ['alt', 'title']
def handle_starttag(self, tag, attrs):
'''
handle_starttag will identify and handle any Start Tags in the
HTML
'''
# If an end_text is encountered
self.__handle_end_text()
# Get the Start Tag
starttag = self.get_starttag_text()
if starttag == '<Select>':
a = starttag.replace("<","<")
b = a.replace(">",">")
starttag = b
# Make note of the pseudo localized string in the "faux"
# dictionary
p = PseudoTranslate(starttag)
# Continue Building the Snippet with either the Localized
# Version or the English Version
trantag = p.get_pseudo_str()
#starttag = trantag + ">"
starttag = trantag
# We had to deal with "input" in the HTML
for d in range(0, len(attrs)):
spork, foon = attrs[d]
if "input" in starttag:
if spork == "type":
if foon != "checkbox" and foon != "RADIO" and foon != "radio":
self.__findTags.append("value")
if spork in self.__findTags:
need_tran = foon
# Make note of the pseudo localized string in
# the "faux" dictionary
p = PseudoTranslate(need_tran)
# Continue Building the Snippet with either the
# Localized Version or the English Version
tranattr = p.get_pseudo_str()
#attrs[d] = spork, tranattr
after = spork + '="' + tranattr + '"'
attrs[d] = spork, tranattr
jumbo = []
jumbo = attrs
# Other issues to overcome with blank properties
# or properties that had odd names (NULL, etc)
for y in range(0, len(jumbo)+1):
if y == 0:
elmer, glue = attrs[y]
try:
temp = elmer + '="' + glue + '" '
except:
temp = elmer + '="None" '
new_starttag = "<" + temp
elif y > 0 and y < len(jumbo)-1:
elmer, glue = attrs[y]
try:
if glue == '':
temp = elmer + glue
else:
temp = elmer + '="' + glue + '" '
except:
temp = elmer + '="None" '
new_starttag += temp
elif y == len(jumbo)-1:
elmer, glue = attrs[y]
try:
temp = elmer + '="' + glue + '"'
except:
temp = elmer + '="None" '
new_starttag += temp
elif y == len(jumbo):
new_starttag += ">"
starttag = new_starttag
# We has other issues with "value"
if "value" in self.__findTags:
self.__findTags.remove("value")
# Continue with the Snippet Creation
self.snippet += starttag
def handle_endtag(self, tag):
'''
handle_endtag will handle any of the end tags in the HTML file
'''
# If an end_text is encountered
self.__handle_end_text()
# we are still formulating the snippet
self.snippet += "</%s>" % tag
#make the big picture
self.__property_file_whole += (self.snippet)
self.snippet = ''
def __handle_end_text(self):
'''
__handle_end_text will handle any of the end text items
encountered to build the html snippet
'''
# Initialize Variables
text = self.textBuffer
self.textBuffer = ''
# While making a snippet
if text == '' or text == "?" or text == ":" or "|" in text:
self.snippet += text
else:
x = text.strip()
# Make note of the pseudo localized string in the "faux"
# dictionary
p = PseudoTranslate(x)
# Continue Building the Snippet with either the
# Localized Version or the English Version
self.__translation = p.get_pseudo_str()
# Continue Building the Snippet with either the
# Localized Version or the English Version
self.snippet += self.__translation
def handle_data(self, data):
'''
handle_data handles the data
'''
self.textBuffer += data
def handle_charref(self, name):
'''
handle_charref handles the character references
'''
self.textBuffer += name
def handle_entityref(self, name):
'''
handle_entityref handles the entity references
'''
self.textBuffer += "&" + name + ";"
def get_the_answer(self):
'''
return the prop_translation
'''
return self.__property_file_whole
if __name__ == '__main__':
# Get the user's input, i.e. the PATH to the files
gui = GetUserInput()
try:
# Execute the routine to do all the work, passing the PATH
fas = FilesAndStrings(gui.get_path())
except Exception, e:
print "Agghhh! You've been attacked by a rabid snail, who's snarling: "
print e
sys.exit(1)
|
IntelligentSigma/PseudoLocCodeExamples
|
python/pseudo.py
|
Python
|
mpl-2.0
| 23,834
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# py-bson-rpc documentation build configuration file, created by
# sphinx-quickstart on Thu Jan 7 23:52:34 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# NOTE: The following line does not really work (python3 here!) !!!! :(
# Reason is a BUG: https://github.com/sphinx-doc/sphinx/issues/2046
# Workaround: symlink doc/bsonrpc pointing to ../bsonrpc
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'py-bson-rpc'
copyright = u'2016, Jussi Seppälä'
author = u'Jussi Seppälä'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2'
# The full version, including alpha/beta/rc tags.
release = '0.2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'alabaster'
#html_theme = 'classic'
html_theme = 'sphinxdoc'
#html_theme = 'sphinx_rtd_theme'
html_theme_options = {
#'rightsidebar': 'true',
#'relbarbgcolor': 'black'
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'py-bson-rpcdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'py-bson-rpc.tex', 'py-bson-rpc Documentation',
'Jussi Seppälä', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'py-bson-rpc', 'py-bson-rpc Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'py-bson-rpc', 'py-bson-rpc Documentation',
author, 'py-bson-rpc', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
seprich/py-bson-rpc
|
doc/source/conf.py
|
Python
|
mpl-2.0
| 9,654
|
"""Example using the ray transform with fan beam geometry."""
import numpy as np
import odl
# Reconstruction space: discretized functions on the rectangle
# [-20, 20]^2 with 300 samples per dimension.
reco_space = odl.uniform_discr(
min_pt=[-20, -20], max_pt=[20, 20], shape=[300, 300], dtype='float32')
# Make a fan beam geometry with flat detector
# Angles: uniformly spaced, n = 360, min = 0, max = 2 * pi
angle_partition = odl.uniform_partition(0, 2 * np.pi, 360)
# Detector: uniformly sampled, n = 512, min = -30, max = 30
detector_partition = odl.uniform_partition(-30, 30, 512)
geometry = odl.tomo.FanFlatGeometry(angle_partition, detector_partition,
src_radius=1000, det_radius=100)
# Ray transform (= forward projection).
ray_trafo = odl.tomo.RayTransform(reco_space, geometry)
# Create a discrete Shepp-Logan phantom (modified version)
phantom = odl.phantom.shepp_logan(reco_space, modified=True)
# Create projection data by calling the ray transform on the phantom
proj_data = ray_trafo(phantom)
# Back-projection can be done by simply calling the adjoint operator on the
# projection data (or any element in the projection space).
backproj = ray_trafo.adjoint(proj_data)
# Shows a slice of the phantom, projections, and reconstruction
phantom.show(title='Phantom')
proj_data.show(title='Projection data (sinogram)')
backproj.show(title='Back-projected data', force_show=True)
|
aringh/odl
|
examples/tomo/ray_trafo_cone_2d.py
|
Python
|
mpl-2.0
| 1,436
|
from rest_framework.renderers import JSONRenderer
class NamespaceJSONRenderer(JSONRenderer):
"A JSON renderer that wraps the result data in a namespace"
namespace = 'objects'
def render(self, data, *args, **kwargs):
data = {self.namespace: data}
return super(NamespaceJSONRenderer, self).render(data, *args, **kwargs)
class MetricsJSONRenderer(NamespaceJSONRenderer):
namespace = 'metrics'
|
openjck/distribution-viewer
|
viewer/api/renderers.py
|
Python
|
mpl-2.0
| 427
|
# Copyright 2012 Ben Cordero.
#
# This file is part of udpgen.
#
# udpgen is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# udpgen is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with udpgen. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls import patterns, include, url
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'udpgen.views.home', name='home'),
# url(r'^udpgen/', include('udpgen.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
)
urlpatterns += staticfiles_urlpatterns()
|
bencord0/udpgen
|
udpgen/urls.py
|
Python
|
agpl-3.0
| 1,370
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014-present Taiga Agile LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pytest
from unittest import mock
from django.urls import reverse
from .. import factories as f
from taiga.base.utils import json
pytestmark = pytest.mark.django_db
def test_invalid_project_export(client):
user = f.UserFactory.create()
client.login(user)
url = reverse("exporter-detail", args=[1000000])
response = client.get(url, content_type="application/json")
assert response.status_code == 404
def test_valid_project_export_with_celery_disabled(client, settings):
user = f.UserFactory.create()
project = f.ProjectFactory.create(owner=user)
f.MembershipFactory(project=project, user=user, is_admin=True)
client.login(user)
url = reverse("exporter-detail", args=[project.pk])
response = client.get(url, content_type="application/json")
assert response.status_code == 200
response_data = response.data
assert "url" in response_data
assert response_data["url"].endswith(".json")
def test_valid_project_export_with_celery_disabled_and_gzip(client, settings):
user = f.UserFactory.create()
project = f.ProjectFactory.create(owner=user)
f.MembershipFactory(project=project, user=user, is_admin=True)
client.login(user)
url = reverse("exporter-detail", args=[project.pk])
response = client.get(url+"?dump_format=gzip", content_type="application/json")
assert response.status_code == 200
response_data = response.data
assert "url" in response_data
assert response_data["url"].endswith(".gz")
def test_valid_project_export_with_celery_enabled(client, settings):
settings.CELERY_ENABLED = True
user = f.UserFactory.create()
project = f.ProjectFactory.create(owner=user)
f.MembershipFactory(project=project, user=user, is_admin=True)
client.login(user)
url = reverse("exporter-detail", args=[project.pk])
#delete_project_dump task should have been launched
with mock.patch('taiga.export_import.tasks.delete_project_dump') as delete_project_dump_mock:
response = client.get(url, content_type="application/json")
assert response.status_code == 202
response_data = response.data
assert "export_id" in response_data
args = (project.id, project.slug, response_data["export_id"], "plain")
kwargs = {"countdown": settings.EXPORTS_TTL}
delete_project_dump_mock.apply_async.assert_called_once_with(args, **kwargs)
settings.CELERY_ENABLED = False
def test_valid_project_export_with_celery_enabled_and_gzip(client, settings):
settings.CELERY_ENABLED = True
user = f.UserFactory.create()
project = f.ProjectFactory.create(owner=user)
f.MembershipFactory(project=project, user=user, is_admin=True)
client.login(user)
url = reverse("exporter-detail", args=[project.pk])
#delete_project_dump task should have been launched
with mock.patch('taiga.export_import.tasks.delete_project_dump') as delete_project_dump_mock:
response = client.get(url+"?dump_format=gzip", content_type="application/json")
assert response.status_code == 202
response_data = response.data
assert "export_id" in response_data
args = (project.id, project.slug, response_data["export_id"], "gzip")
kwargs = {"countdown": settings.EXPORTS_TTL}
delete_project_dump_mock.apply_async.assert_called_once_with(args, **kwargs)
settings.CELERY_ENABLED = False
def test_valid_project_with_throttling(client, settings):
settings.REST_FRAMEWORK["DEFAULT_THROTTLE_RATES"]["import-dump-mode"] = "1/minute"
user = f.UserFactory.create()
project = f.ProjectFactory.create(owner=user)
f.MembershipFactory(project=project, user=user, is_admin=True)
client.login(user)
url = reverse("exporter-detail", args=[project.pk])
response = client.get(url, content_type="application/json")
assert response.status_code == 200
response = client.get(url, content_type="application/json")
assert response.status_code == 429
|
taigaio/taiga-back
|
tests/integration/test_exporter_api.py
|
Python
|
agpl-3.0
| 4,732
|
''' -- imports from python libraries -- '''
import os
import ast
# from datetime import datetime
import datetime
''' -- imports from installed packages -- '''
from django.http import HttpResponseRedirect
from django.http import HttpResponse
from django.shortcuts import render_to_response #, render uncomment when to use
from django.template import RequestContext
from django.template.defaultfilters import slugify
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
try:
from bson import ObjectId
except ImportError: # old pymongo
from pymongo.objectid import ObjectId
''' -- imports from application folders/files -- '''
from gnowsys_ndf.ndf.org2any import org2html
from gnowsys_ndf.ndf.models import node_collection, triple_collection
from gnowsys_ndf.ndf.views.organization import *
from gnowsys_ndf.ndf.views.course import *
from gnowsys_ndf.ndf.views.person import *
from gnowsys_ndf.ndf.views.enrollment import *
from gnowsys_ndf.ndf.views.methods import get_execution_time
@get_execution_time
def mis_detail(request, group_id, app_id=None, app_set_id=None, app_set_instance_id=None, app_name=None):
"""
custom view for custom GAPPS
"""
auth = None
if ObjectId.is_valid(group_id) is False :
group_ins = node_collection.one({'_type': "Group","name": group_id})
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else :
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth :
group_id = str(auth._id)
else :
pass
app = None
if app_id is None:
app = node_collection.one({'_type': "GSystemType", 'name': app_name})
if app:
app_id = str(app._id)
else:
app = node_collection.one({'_id': ObjectId(app_id)})
app_name = app.name
app_collection_set = []
atlist = []
rtlist = []
app_set = ""
nodes = ""
# nodes_dict = ""
nodes_keys = []
app_menu = ""
app_set_template = ""
app_set_instance_template = ""
app_set_instance_name = ""
app_set_name = ""
title = ""
tags = ""
content = ""
location = ""
system = None
system_id = ""
system_type = ""
system_mime_type = ""
template = ""
property_display_order = []
events_arr = []
university_wise_students_count = []
template_prefix = "mis"
if request.user.id:
if auth is None:
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username)})
agency_type = auth.agency_type
agency_type_node = node_collection.one({'_type': "GSystemType", 'name': agency_type}, {'collection_set': 1})
if agency_type_node:
for eachset in agency_type_node.collection_set:
app_collection_set.append(node_collection.one({"_id": eachset}, {'_id': 1, 'name': 1, 'type_of': 1}))
# for eachset in app.collection_set:
# app_collection_set.append(node_collection.one({"_id":eachset}, {'_id': 1, 'name': 1, 'type_of': 1}))
# app_set = node_collection.find_one({"_id":eachset})
# app_collection_set.append({"id": str(app_set._id), "name": app_set.name, 'type_of'})
if app_set_id:
app_set = node_collection.one({'_type': "GSystemType", '_id': ObjectId(app_set_id)}, {'name': 1, 'type_of': 1})
view_file_extension = ".py"
app_set_view_file_name = ""
app_set_view_file_path = ""
if app_set.type_of:
app_set_type_of = node_collection.one({'_type': "GSystemType", '_id': ObjectId(app_set.type_of[0])}, {'name': 1})
app_set_view_file_name = app_set_type_of.name.lower().replace(" ", "_")
# print "\n app_set_view_file_name (type_of): ", app_set_view_file_name, "\n"
else:
app_set_view_file_name = app_set.name.lower().replace(" ", "_")
# print "\n app_set_view_file_name: ", app_set_view_file_name, "\n"
app_set_view_file_path = os.path.join(os.path.dirname(__file__), app_set_view_file_name + view_file_extension)
# print "\n app_set_view_file_path: ", app_set_view_file_path, "\n"
if os.path.exists(app_set_view_file_path):
# print "\n Call this function...\n"
if app_set_view_file_name == "course":
app_set_view_file_name = "mis_course"
return eval(app_set_view_file_name + "_detail")(request, group_id, app_id, app_set_id, app_set_instance_id, app_name)
# print "\n Perform fallback code...\n"
classtype = ""
app_set_template = "yes"
template = "ndf/"+template_prefix+"_list.html"
systemtype = node_collection.find_one({"_id":ObjectId(app_set_id)})
systemtype_name = systemtype.name
title = systemtype_name
if request.method=="POST":
search = request.POST.get("search","")
classtype = request.POST.get("class","")
nodes = list(node_collection.find({'name':{'$regex':search, '$options': 'i'},'member_of': {'$all': [systemtype._id]}}, {'name': 1}).sort('name', 1))
else :
nodes = list(node_collection.find({'member_of': {'$all': [systemtype._id]},'group_set':{'$all': [ObjectId(group_id)]}}, {'name': 1}).sort('name', 1))
nodes_keys = [('name', "Name")]
# nodes_dict = []
# for each in nodes:
# nodes_dict.append({"p_id":str(each._id), "name":each.name, "created_by":User.objects.get(id=each.created_by).username, "created_at":each.created_at})
else :
app_menu = "yes"
template = "ndf/"+template_prefix+"_list.html"
title = app_name
university_gst = node_collection.one({'_type': "GSystemType", 'name': "University"})
student_gst = node_collection.one({'_type': "GSystemType", 'name': "Student"})
mis_admin = node_collection.one(
{'_type': "Group", 'name': "MIS_admin"},
{'_id': 1}
)
university_cur = node_collection.find(
{'member_of': university_gst._id, 'group_set': mis_admin._id},
{'name': 1, 'relation_set.affiliated_college': 1}
).sort('name', 1)
for each_university in university_cur:
affiliated_college_ids_list = []
for rel in each_university.relation_set:
if rel and "affiliated_college" in rel:
affiliated_college_ids_list = rel["affiliated_college"]
break
students_cur = node_collection.find(
{
'member_of': student_gst._id,
'relation_set.student_belongs_to_college': {'$in': affiliated_college_ids_list}
}
)
# university_wise_students_count[each_university.name] = students_cur.count()
university_wise_students_count.append((each_university.name, students_cur.count()))
if app_set_instance_id :
app_set_instance_template = "yes"
template = "ndf/"+template_prefix+"_details.html"
app_set_template = ""
systemtype_attributetype_set = []
systemtype_relationtype_set = []
system = node_collection.find_one({"_id":ObjectId(app_set_instance_id)})
systemtype = node_collection.find_one({"_id":ObjectId(app_set_id)})
for each in systemtype.attribute_type_set:
systemtype_attributetype_set.append({"type":each.name,"type_id":str(each._id),"value":each.data_type})
for each in systemtype.relation_type_set:
systemtype_relationtype_set.append({"rt_name":each.name,"type_id":str(each._id)})
for eachatset in systemtype_attributetype_set :
for eachattribute in triple_collection.find({"_type":"GAttribute", "subject":system._id, "attribute_type.$id":ObjectId(eachatset["type_id"])}):
atlist.append({"type":eachatset["type"],"type_id":eachatset["type_id"],"value":eachattribute.object_value})
for eachrtset in systemtype_relationtype_set :
for eachrelation in triple_collection.find({"_type":"GRelation", "subject":system._id, "relation_type.$id":ObjectId(eachrtset["type_id"])}):
right_subject = node_collection.find_one({"_id":ObjectId(eachrelation.right_subject)})
rtlist.append({"type":eachrtset["rt_name"],"type_id":eachrtset["type_id"],"value_name": right_subject.name,"value_id":str(right_subject._id)})
# To support consistent view
property_order = system.property_order
system.get_neighbourhood(systemtype._id)
# array of dict for events ---------------------
if system.has_key('organiser_of_event') and len(system.organiser_of_event): # gives list of events
for event in system.organiser_of_event:
event.get_neighbourhood(event.member_of)
tempdict = {}
tempdict['title'] = event.name
if event.start_time:# and len(event.start_time) == 16:
# print "\n start_time: ", event.start_time, " -- ", event.start_time.strftime('%m/%d/%Y %H:%M')
# dt = datetime.datetime.strptime(event.start_time , '%m/%d/%Y %H:%M')
dt = event.start_time.strftime('%m/%d/%Y %H:%M')
tempdict['start'] = dt
if event.end_time:# and len(event.end_time) == 16:
# print "\n end_time: ", event.end_time, " -- ", event.end_time.strftime('%m/%d/%Y %H:%M')
# dt = datetime.datetime.strptime(event.end_time , '%m/%d/%Y %H:%M')
dt = event.end_time.strftime('%m/%d/%Y %H:%M')
tempdict['end'] = dt
tempdict['id'] = str(event._id)
events_arr.append(tempdict)
elif system.has_key('event_organised_by'): # gives list of colleges/host of events
for host in system.event_organised_by:
host.get_neighbourhood(host.member_of)
tempdict = {}
tempdict['title'] = host.name
if system.start_time:# and len(system.start_time) == 16:
# dt = datetime.datetime.strptime(system.start_time , '%m/%d/%Y %H:%M')
dt = event.start_time.strftime('%m/%d/%Y %H:%M')
tempdict['start'] = dt
if system.end_time:# and len(system.start_time) == 16:
# dt = datetime.datetime.strptime(system.end_time , '%m/%d/%Y %H:%M')
dt = event.end_time.strftime('%m/%d/%Y %H:%M')
tempdict['end'] = dt
tempdict['id'] = str(host._id)
events_arr.append(tempdict)
# print json.dumps(events_arr)
# END --- array of dict for events ---------------------
for tab_name, fields_order in property_order:
display_fields = []
for field, altname in fields_order:
if system.structure[field] == bool:
display_fields.append((altname, ("Yes" if system[field] else "No")))
elif not system[field]:
display_fields.append((altname, system[field]))
continue
elif system.structure[field] == datetime.datetime:
display_fields.append((altname, system[field].date()))
elif type(system.structure[field]) == list:
if system[field]:
if type(system.structure[field][0]) == ObjectId:
name_list = []
for right_sub_dict in system[field]:
name_list.append(right_sub_dict.name)
display_fields.append((altname, ", ".join(name_list)))
elif system.structure[field][0] == datetime.datetime:
date_list = []
for dt in system[field]:
date_list.append(dt.strftime("%d/%m/%Y"))
display_fields.append((altname, ", ".join(date_list)))
else:
display_fields.append((altname, ", ".join(system[field])))
else:
display_fields.append((altname, system[field]))
property_display_order.append((tab_name, display_fields))
# End of code
tags = ",".join(system.tags)
content = system.content
location = system.location
app_set_name = systemtype.name
system_id = system._id
system_type = system._type
# print "\n app_set_instance_name: ", app_set_instance_name
# print "\n app_set_name: ", app_set_name
if system_type == 'File':
system_mime_type = system.mime_type
app_set_instance_name = system.name
title = systemtype.name +"-" +system.name
variable = RequestContext(request, {
'group_id':group_id, 'groupid':group_id, 'app_name':app_name, 'app_id':app_id,
"app_collection_set":app_collection_set, "app_set_id":app_set_id,
"nodes":nodes, "nodes_keys": nodes_keys, "app_menu":app_menu, "app_set_template":app_set_template,
"app_set_instance_template":app_set_instance_template, "app_set_name":app_set_name,
"app_set_instance_name":app_set_instance_name, "title":title,
"app_set_instance_atlist":atlist, "app_set_instance_rtlist":rtlist,
'tags':tags, 'location':location, "content":content, "system_id":system_id,
"system_type":system_type,"mime_type":system_mime_type, "app_set_instance_id":app_set_instance_id,
"node":system, 'group_id':group_id, "property_display_order": property_display_order,
"events_arr":events_arr, 'university_wise_students_count': university_wise_students_count
})
return render_to_response(template, variable)
@login_required
@get_execution_time
def mis_create_edit(request, group_id, app_id, app_set_id=None, app_set_instance_id=None, app_name=None):
"""
create new instance of app_set of apps view for custom GAPPS
"""
auth = None
if ObjectId.is_valid(group_id) is False :
group_ins = node_collection.one({'_type': "Group","name": group_id})
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if group_ins:
group_id = str(group_ins._id)
else :
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username) })
if auth :
group_id = str(auth._id)
else :
pass
app = None
if app_id is None:
app = node_collection.one({'_type': "GSystemType", 'name': app_name})
if app:
app_id = str(app._id)
else:
app = node_collection.one({'_id': ObjectId(app_id)})
app_name = app.name
# app_name = "mis"
app_collection_set = []
# app = node_collection.find_one({"_id":ObjectId(app_id)})
app_set = ""
app_set_instance_name = ""
nodes = ""
systemtype = ""
title = ""
tags = ""
location=""
content_org = ""
system_id = ""
system_type = ""
system_mime_type = ""
systemtype_name = ""
systemtype_attributetype_set = []
systemtype_relationtype_set = []
title = ""
file_st_ids = []
app_type_of_id = ""
File = 'False'
obj_id_ins = ObjectId()
template_prefix = "mis"
user_id = int(request.user.id) # getting django user id
user_name = unicode(request.user.username) # getting django user name
if request.user.id:
if auth is None:
auth = node_collection.one({'_type': 'Author', 'name': unicode(request.user.username)})
agency_type = auth.agency_type
agency_type_node = node_collection.one({'_type': "GSystemType", 'name': agency_type}, {'collection_set': 1})
if agency_type_node:
for eachset in agency_type_node.collection_set:
app_collection_set.append(node_collection.one({"_id": eachset}, {'_id': 1, 'name': 1, 'type_of': 1}))
# for eachset in app.collection_set:
# app_collection_set.append(node_collection.one({"_id":eachset}, {'_id': 1, 'name': 1, 'type_of': 1}))
# app_set = node_collection.find_one({"_id":eachset})
# app_collection_set.append({"id": str(app_set._id), "name": app_set.name, 'type_of'})
if app_set_id:
app_set = node_collection.one({'_type': "GSystemType", '_id': ObjectId(app_set_id)}, {'name': 1, 'type_of': 1})
view_file_extension = ".py"
app_set_view_file_name = ""
app_set_view_file_path = ""
if app_set.type_of:
app_set_type_of = node_collection.one({'_type': "GSystemType", '_id': ObjectId(app_set.type_of[0])}, {'name': 1})
app_set_view_file_name = app_set_type_of.name.lower().replace(" ", "_")
# print "\n app_set_view_file_name (type_of): ", app_set_view_file_name, "\n"
else:
app_set_view_file_name = app_set.name.lower().replace(" ", "_")
# print "\n app_set_view_file_name: ", app_set_view_file_name, "\n"
app_set_view_file_path = os.path.join(os.path.dirname(__file__), app_set_view_file_name + view_file_extension)
# print "\n app_set_view_file_path: ", app_set_view_file_path, "\n"
if os.path.exists(app_set_view_file_path):
# print "\n Call this function...\n"
return eval(app_set_view_file_name + "_create_edit")(request, group_id, app_id, app_set_id, app_set_instance_id, app_name)
# print "\n Perform fallback code...\n"
systemtype = node_collection.find_one({"_id":ObjectId(app_set_id)})
systemtype_name = systemtype.name
title = systemtype_name + " - new"
for each in systemtype.attribute_type_set:
systemtype_attributetype_set.append({"type":each.name,"type_id":str(each._id),"value":each.data_type, 'sub_values': each.complex_data_type, 'altnames': each.altnames})
for eachrt in systemtype.relation_type_set:
# object_type = [ {"name":rtot.name, "id":str(rtot._id)} for rtot in node_collection.find({'member_of': {'$all': [ node_collection.find_one({"_id":eachrt.object_type[0]})._id]}}) ]
object_type_cur = node_collection.find({'member_of': {'$in': eachrt.object_type}})
object_type = []
for each in object_type_cur:
object_type.append({"name":each.name, "id":str(each._id)})
systemtype_relationtype_set.append({"rt_name": eachrt.name, "type_id": str(eachrt._id), "object_type": object_type})
request_at_dict = {}
request_rt_dict = {}
files_sts = ['File','Image','Video']
if app_set_id:
app = node_collection.one({'_id':ObjectId(app_set_id)})
for each in files_sts:
node_id = node_collection.one({'name':each,'_type':'GSystemType'})._id
if node_id in app.type_of:
File = 'True'
if app_set_instance_id : # at and rt set editing instance
system = node_collection.find_one({"_id":ObjectId(app_set_instance_id)})
for eachatset in systemtype_attributetype_set :
eachattribute = triple_collection.find_one({"_type":"GAttribute", "subject":system._id, "attribute_type.$id":ObjectId(eachatset["type_id"])})
if eachattribute :
eachatset['database_value'] = eachattribute.object_value
eachatset['database_id'] = str(eachattribute._id)
else :
eachatset['database_value'] = ""
eachatset['database_id'] = ""
for eachrtset in systemtype_relationtype_set :
eachrelation = triple_collection.find_one({"_type":"GRelation", "subject":system._id, "relation_type.$id":ObjectId(eachrtset["type_id"])})
if eachrelation:
right_subject = node_collection.find_one({"_id":ObjectId(eachrelation.right_subject)})
eachrtset['database_id'] = str(eachrelation._id)
eachrtset["database_value"] = right_subject.name
eachrtset["database_value_id"] = str(right_subject._id)
else :
eachrtset['database_id'] = ""
eachrtset["database_value"] = ""
eachrtset["database_value_id"] = ""
tags = ",".join(system.tags)
content_org = system.content_org
location = system.location
system_id = system._id
system_type = system._type
if system_type == 'File':
system_mime_type = system.mime_type
app_set_instance_name = system.name
title = system.name+"-"+"edit"
if request.method=="POST": # post methods
tags = request.POST.get("tags","")
content_org = unicode(request.POST.get("content_org",""))
name = request.POST.get("name","")
map_geojson_data = request.POST.get('map-geojson-data') # getting markers
user_last_visited_location = request.POST.get('last_visited_location') # getting last visited location by user
file1 = request.FILES.get('file', '')
for each in systemtype_attributetype_set:
if request.POST.get(each["type_id"],"") :
request_at_dict[each["type_id"]] = request.POST.get(each["type_id"],"")
for eachrtset in systemtype_relationtype_set:
if request.POST.get(eachrtset["type_id"],""):
request_rt_dict[eachrtset["type_id"]] = request.POST.get(eachrtset["type_id"],"")
if File == 'True':
if file1:
f = save_file(file1, name, request.user.id, group_id, content_org, tags)
if obj_id_ins.is_valid(f):
newgsystem = node_collection.one({'_id':f})
else:
template = "ndf/mis_list.html"
variable = RequestContext(request, {'group_id':group_id, 'groupid':group_id, 'app_name':app_name, 'app_id':app_id, "app_collection_set":app_collection_set, "app_set_id":app_set_id, "nodes":nodes, "systemtype_attributetype_set":systemtype_attributetype_set, "systemtype_relationtype_set":systemtype_relationtype_set, "create_new":"yes", "app_set_name":systemtype_name, 'title':title, 'File':File, 'already_uploaded_file':f})
return render_to_response(template, variable)
else:
newgsystem = node_collection.collection.File()
else:
newgsystem = node_collection.collection.GSystem()
if app_set_instance_id :
newgsystem = node_collection.find_one({"_id": ObjectId(app_set_instance_id)})
newgsystem.name = name
newgsystem.member_of=[ObjectId(app_set_id)]
if not app_set_instance_id :
newgsystem.created_by = request.user.id
newgsystem.modified_by = request.user.id
newgsystem.status = u"PUBLISHED"
newgsystem.group_set.append(ObjectId(group_id))
if tags:
newgsystem.tags = tags.split(",")
if content_org:
usrname = request.user.username
filename = slugify(newgsystem.name) + "-" + usrname
newgsystem.content = org2html(content_org, file_prefix=filename)
newgsystem.content_org = content_org
# check if map markers data exist in proper format then add it into newgsystem
if map_geojson_data:
map_geojson_data = map_geojson_data + ","
map_geojson_data = list(ast.literal_eval(map_geojson_data))
newgsystem.location = map_geojson_data
location = map_geojson_data
else:
map_geojson_data = []
location = []
newgsystem.location = map_geojson_data
# check if user_group_location exist in proper format then add it into newgsystem
if user_last_visited_location:
user_last_visited_location = list(ast.literal_eval(user_last_visited_location))
author = node_collection.one({'_type': "GSystemType", 'name': "Author"})
user_group_location = node_collection.one({'_type': "Author", 'member_of': author._id, 'created_by': user_id, 'name': user_name})
if user_group_location:
user_group_location['visited_location'] = user_last_visited_location
user_group_location.save()
newgsystem.save()
if not app_set_instance_id :
for key,value in request_at_dict.items():
attributetype_key = node_collection.find_one({"_id":ObjectId(key)})
ga_node = create_gattribute(newgsystem._id, attributetype_key, value)
# newattribute = triple_collection.collection.GAttribute()
# newattribute.subject = newgsystem._id
# newattribute.attribute_type = attributetype_key
# newattribute.object_value = value
# newattribute.save()
for key,value in request_rt_dict.items():
if key:
relationtype_key = node_collection.find_one({"_id": ObjectId(key)})
if value:
right_subject = node_collection.find_one({"_id": ObjectId(value)})
gr_node = create_grelation(newgsystem._id, relationtype_key, right_subject._id)
# newrelation = triple_collection.collection.GRelation()
# newrelation.subject = newgsystem._id
# newrelation.relation_type = relationtype_key
# newrelation.right_subject = right_subject._id
# newrelation.save()
if app_set_instance_id:
# editing instance
for each in systemtype_attributetype_set:
if each["database_id"]:
attribute_instance = triple_collection.find_one({"_id": ObjectId(each['database_id'])})
attribute_instance.object_value = request.POST.get(each["database_id"],"")
# attribute_instance.save()
ga_node = create_gattribute(attribute_instance.subject, attribute_instance.attribute_type, attribute_instance.object_value)
else :
if request.POST.get(each["type_id"],""):
attributetype_key = node_collection.find_one({"_id":ObjectId(each["type_id"])})
# newattribute = triple_collection.collection.GAttribute()
# newattribute.subject = newgsystem._id
# newattribute.attribute_type = attributetype_key
# newattribute.object_value = request.POST.get(each["type_id"],"")
# newattribute.save()
ga_node = create_gattribute(newgsystem._id, attributetype_key, request.POST.get(each["type_id"],""))
for eachrt in systemtype_relationtype_set:
if eachrt["database_id"]:
relation_instance = triple_collection.find_one({"_id":ObjectId(eachrt['database_id'])})
relation_instance.right_subject = ObjectId(request.POST.get(eachrt["database_id"],""))
# relation_instance.save()
gr_node = create_grelation(relation_instance.subject, relation_instance.relation_type, relation_instance.right_subject)
else :
if request.POST.get(eachrt["type_id"],""):
relationtype_key = node_collection.find_one({"_id":ObjectId(eachrt["type_id"])})
right_subject = node_collection.find_one({"_id":ObjectId(request.POST.get(eachrt["type_id"],""))})
gr_node = create_grelation(newgsystem._id, relationtype_key, right_subject._id)
# newrelation = triple_collection.collection.GRelation()
# newrelation.subject = newgsystem._id
# newrelation.relation_type = relationtype_key
# newrelation.right_subject = right_subject._id
# newrelation.save()
return HttpResponseRedirect(reverse(app_name.lower()+":"+template_prefix+'_app_detail', kwargs={'group_id': group_id, "app_id":app_id, "app_set_id":app_set_id}))
template = "ndf/"+template_prefix+"_create_edit.html"
variable = RequestContext(request, {'group_id':group_id, 'groupid':group_id, 'app_name':app_name, 'app_id':app_id, "app_collection_set":app_collection_set, "app_set_id":app_set_id, "nodes":nodes, "systemtype_attributetype_set":systemtype_attributetype_set, "systemtype_relationtype_set":systemtype_relationtype_set, "create_new":"yes", "app_set_name":systemtype_name, 'title':title, 'File':File, 'tags':tags, "content_org":content_org, "system_id":system_id,"system_type":system_type,"mime_type":system_mime_type, "app_set_instance_name":app_set_instance_name, "app_set_instance_id":app_set_instance_id, 'location':location})
return render_to_response(template, variable)
@login_required
@get_execution_time
def mis_enroll(request, group_id, app_id, app_set_id=None, app_set_instance_id=None, app_name=None):
"""
Redirects to student_enroll function of person-view.
"""
if app_set_id:
app_set = node_collection.one({'_type': "GSystemType", '_id': ObjectId(app_set_id)}, {'name': 1, 'type_of': 1})
view_file_extension = ".py"
app_set_view_file_name = ""
app_set_view_file_path = ""
if app_set.type_of:
app_set_type_of = node_collection.one({'_type': "GSystemType", '_id': ObjectId(app_set.type_of[0])}, {'name': 1})
app_set_view_file_name = app_set_type_of.name.lower().replace(" ", "_")
else:
app_set_view_file_name = app_set.name.lower().replace(" ", "_")
app_set_view_file_path = os.path.join(os.path.dirname(__file__), app_set_view_file_name + view_file_extension)
if os.path.exists(app_set_view_file_path):
return eval(app_set_view_file_name + "_enroll")(request, group_id, app_id, app_set_id, app_set_instance_id, app_name)
template = "ndf/student_enroll.html"
variable = RequestContext(request, {'groupid': group_id,
'title':title,
'app_id':app_id, 'app_name': app_name,
'app_collection_set': app_collection_set, 'app_set_id': app_set_id
# 'nodes':nodes,
})
return render_to_response(template, variable)
|
sunnychaudhari/gstudio
|
gnowsys-ndf/gnowsys_ndf/ndf/views/mis.py
|
Python
|
agpl-3.0
| 31,071
|
"""
:mod:`Tor2Web`
=====================================================
.. automodule:: Tor2Web
:synopsis: Stats routines
.. moduleauthor:: Arturo Filasto' <art@globaleaks.org>
.. moduleauthor:: Giovanni Pellerano <evilaliv3@globaleaks.org>
"""
# -*- coding: utf-8 -*-
import json
from datetime import date, datetime, timedelta
from twisted.internet import reactor
from twisted.internet.task import deferLater
class T2WStats(dict):
def __init__(self):
dict.__init__(self)
self.yesterday_stats = ''
self.update_stats()
def update(self, key):
if key not in self:
self[key] = 0
self[key] += 1
def update_stats(self, run_again=True):
yesterday = date.today() - timedelta(1)
hidden_services = list()
for k in self:
hidden_services.append(({'id': k, 'access_count': self[k]}))
self.yesterday_stats = json.dumps({'date': yesterday.strftime('%Y-%m-%d'),
'hidden_services': hidden_services})
self.clear()
next_time = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0) + \
timedelta(days=1)
next_delta = (next_time - datetime.now()).total_seconds()
deferLater(reactor, next_delta, self.update_stats)
|
globaleaks/Tor2web
|
tor2web/utils/stats.py
|
Python
|
agpl-3.0
| 1,327
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('categories', '0001_initial'),
('documents', '0002_page_refer_document'),
]
operations = [
migrations.AddField(
model_name='document',
name='refer_category',
field=models.ForeignKey(related_name='back_category', to='categories.Category', null=True, on_delete=models.CASCADE),
),
]
|
Foxugly/MyTaxAccountant
|
documents/migrations/0003_document_refer_category.py
|
Python
|
agpl-3.0
| 532
|
# Copyright 2018 Eficent Business and IT Consulting Services, S.L.
# <http://www.eficent.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import api, models
class Message(models.Model):
_inherit = "mail.message"
@api.multi
def unlink(self):
for rec in self:
self.env['mail.message.trash'].create({
'message_id': rec.message_id,
})
return super(Message, self).unlink()
|
Eficent/mailbox_replica
|
fetchmail_fetch_missing/models/mail_message.py
|
Python
|
agpl-3.0
| 481
|
# -*- coding: utf-8 -*-
from openerp import models, fields, api, exceptions
class Project(models.Model):
_name = 'bestja.project'
_inherit = ['message_template.mixin']
_order = 'id desc'
def _current_members(self):
"""
Limit to members of the current organization only.
"""
return """[
'|',
'&',
('organizations', '!=', False),
('organizations', '=', organization),
'&',
('coordinated_org', '!=', False),
('coordinated_org', '=', organization),
]"""
name = fields.Char(required=True, string=u"Nazwa")
organization = fields.Many2one(
'organization',
default=lambda self: self.env.user.coordinated_org,
required=True,
string=u"Organizacja",
domain=lambda self: [('coordinator', '=', self.env.uid)],
)
manager = fields.Many2one(
'res.users',
domain=_current_members,
string=u"Menadżer projektu",
)
responsible_user = fields.Many2one(
'res.users',
string=u"Osoba odpowiedzialna",
compute='_responsible_user'
)
date_start = fields.Date(
required=True,
string=u"od dnia",
)
date_stop = fields.Date(
required=True,
string=u"do dnia",
)
members = fields.Many2many(
'res.users',
relation='project_members_rel',
column1='project',
column2='member',
domain=_current_members,
string=u"Zespół"
)
tasks = fields.One2many('bestja.task', 'project', string=u"Zadania")
tasks_count = fields.Integer(compute='_tasks_count', string=u"Liczba zadań")
done_tasks_count = fields.Integer(compute='_tasks_count', string=u"Liczba skończonych zadań")
@api.one
@api.depends('manager', 'organization.coordinator')
def _responsible_user(self):
if self.manager:
self.responsible_user = self.manager
else:
self.responsible_user = self.organization.coordinator
@api.one
@api.depends('tasks')
def _tasks_count(self):
self.tasks_count = len(self.tasks)
self.done_tasks_count = self.tasks.search_count([
('project', '=', self.id),
('state', '=', 'done')
])
@api.multi
def unlink(self):
manager = self.manager
val = super(Project, self).unlink()
if manager:
manager._sync_manager_groups()
return val
@api.model
def create(self, vals):
record = super(Project, self).create(vals)
if record.manager:
record.send(
template='bestja_project.msg_manager',
recipients=record.manager,
)
record.manager._sync_manager_groups()
return record
@api.multi
def write(self, vals):
old_manager = self.manager
val = super(Project, self).write(vals)
if 'manager' in vals: # Manager changed
if old_manager:
old_manager._sync_manager_groups()
self.send(
template='bestja_project.msg_manager_changed',
recipients=old_manager,
)
if self.manager:
self.manager._sync_manager_groups()
self.send(
template='bestja_project.msg_manager',
recipients=self.manager,
)
return val
@api.one
@api.constrains('date_start', 'date_stop')
def _check_project_dates(self):
"""
Date of the beginning of the project needs to be
before the end
"""
if (self.date_start > self.date_stop):
raise exceptions.ValidationError("Data rozpoczęcia projektu musi być przed datą zakończenia.")
class Task(models.Model):
_name = 'bestja.task'
_inherit = ['message_template.mixin']
_order = 'state desc'
STATES = [
('new', "nowe"),
('in_progress', "w trakcie realizacji"),
('done', "zrealizowane"),
]
name = fields.Char(required=True, string=u"Nazwa zadania")
state = fields.Selection(STATES, default='new', string=u"Status")
user = fields.Many2one(
'res.users',
domain="[('projects', '=', project)]",
string=u"Wykonawca zadania",
)
user_assigned_task = fields.Boolean(
compute='_user_assigned_task'
)
date_start = fields.Datetime(required=True, string=u"od dnia")
date_stop = fields.Datetime(required=True, string=u"do dnia")
date_button_click_start = fields.Datetime(string=u"data rozpoczęcia")
date_button_click_stop = fields.Datetime(string=u"data zakończenia")
description = fields.Text(string=u"Opis zadania")
project = fields.Many2one(
'bestja.project',
required=True,
ondelete='cascade',
string=u"Projekt",
)
@api.one
def _user_assigned_task(self):
"""
Checks if current user == user responsible for task,
for hiding and unhiding button "rozpocznij"
"""
self.user_assigned_task = (self.env.uid == self.user.id)
@api.one
def set_in_progress(self):
self.state = 'in_progress'
self.date_button_click_start = fields.Datetime.now()
@api.one
def set_done(self):
self.state = 'done'
self.date_button_click_stop = fields.Datetime.now()
self.send(
template='bestja_project.msg_task_done_user',
recipients=self.user,
)
self.send(
template='bestja_project.msg_task_done_manager',
recipients=self.project.responsible_user,
)
@api.model
def create(self, vals):
record = super(Task, self).create(vals)
record.send(
template='bestja_project.msg_task',
recipients=record.user,
)
return record
@api.multi
def write(self, vals):
old_user = None
if 'user' in vals:
old_user = self.user
val = super(Task, self).write(vals)
if old_user is not None:
self.send(
template='bestja_project.msg_task',
recipients=self.user,
)
self.send(
template='bestja_project.msg_task_changed',
recipients=old_user,
sender=self.env.user,
)
return val
@api.one
@api.constrains('date_start', 'date_stop')
def _check_task_dates(self):
"""
Date of the beginning of the task needs to be
before the end and should be within project dates.
"""
if (self.date_start > self.date_stop):
raise exceptions.ValidationError("Data rozpoczęcia zadania musi być przed datą zakończenia.")
class UserWithProjects(models.Model):
_inherit = 'res.users'
projects = fields.Many2many(
'bestja.project',
relation='project_members_rel',
column1='member',
column2='project',
string=u"Projekty"
)
managed_projects = fields.One2many(
'bestja.project',
inverse_name='manager'
)
def __init__(self, pool, cr):
super(UserWithProjects, self).__init__(pool, cr)
self._add_permitted_fields(level='owner', fields={'projects', 'managed_projects'})
self._add_permitted_fields(level='privileged', fields={'projects', 'managed_projects'})
@api.one
def _sync_manager_groups(self):
"""
Add / remove user from the managers group, based on whether
she manages a project.
"""
self._sync_group(
group=self.env.ref('bestja_project.managers'),
domain=[('managed_projects', '!=', False)],
)
@api.one
@api.depends('projects')
def _compute_user_access_level(self):
"""
Access level that the current (logged in) user has for the object.
Either "owner", "admin", "privileged" or None.
"""
super(UserWithProjects, self)._compute_user_access_level()
if not self.user_access_level and self.user_has_groups('bestja_project.managers') \
and (self.env.user.managed_projects & self.sudo().projects):
self.user_access_level = 'privileged'
class OrganizationWithProjects(models.Model):
_inherit = 'organization'
projects = fields.One2many(
'bestja.project',
inverse_name='organization'
)
|
KrzysiekJ/bestja
|
addons/bestja_project/models.py
|
Python
|
agpl-3.0
| 8,591
|
# -*- coding: utf-8 -*-
# Akvo RSR is covered by the GNU Affero General Public License.
# See more details in the license.txt file located at the root folder of the Akvo RSR module.
# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
from django.db import models
from django.utils.translation import ugettext_lazy as _
from .base_codelist import BaseCodelist
class BudgetIdentifier(BaseCodelist):
category = models.CharField(_('category'), max_length=100, blank=True, null=False)
name = models.CharField(_('name'), max_length=300, blank=True, null=False)
sector = models.CharField(_('sector'), max_length=100, blank=True, null=False)
def __str__(self):
return self.code + ' - ' + self.name
class Meta:
app_label = 'codelists'
ordering = ('-version', 'code')
verbose_name = _('budget identifier')
verbose_name_plural = _('budget identifiers')
|
akvo/akvo-rsr
|
akvo/codelists/models/budget_identifier.py
|
Python
|
agpl-3.0
| 956
|
# Copyright (c) since 2001, Kisio Digital and/or its affiliates. All rights reserved.
import json
from chaos import models, exceptions, mapper, db
from utils import get_application_periods
def fill_and_get_pt_object(navitia, all_objects, json, add_to_db=True):
"""
:param navitia: Navitia Class
:param all_objects: dictionary of objects to be added in this session
:param json: Flux which contains json information of pt_object
:param add_to_db: ptobject insert into database
:return: a pt_object and modify all_objects param
"""
if json["id"] in all_objects:
return all_objects[json["id"]]
if not navitia.get_pt_object(json['id'], json['type']):
raise exceptions.ObjectUnknown(
"{} '{}' doesn't exist".format(
json['type'],
json['id']))
pt_object = models.PTobject.get_pt_object_by_uri(json["id"])
if pt_object:
all_objects[json["id"]] = pt_object
return pt_object
pt_object = models.PTobject()
mapper.fill_from_json(pt_object, json, mapper.object_mapping)
if add_to_db:
db.session.add(pt_object)
all_objects[json["id"]] = pt_object
return pt_object
def is_composed_pt_object(pt_object_json):
return type(pt_object_json) is dict and \
pt_object_json.get('type', '') in ['line_section', 'rail_section']
def manage_simple_pt_object(navitia, db_objects, json_attribute, json_data):
"""
:param navitia: Navitia Class
:param db_objects: pt_object in database models : localisations, objects
:param json_attribute: attribute in json
:param json_data: data
:return:
"""
pt_object_db = dict()
for ptobject in db_objects:
pt_object_db[ptobject.uri] = ptobject
pt_object_dict = dict()
if json_attribute in json_data:
for pt_object_json in json_data[json_attribute]:
if is_composed_pt_object(pt_object_json):
continue
ptobject = fill_and_get_pt_object(navitia, pt_object_dict, pt_object_json, False)
if ptobject.uri not in pt_object_db:
db_objects.append(ptobject)
for ptobject_uri in pt_object_db:
if ptobject_uri not in pt_object_dict:
db_objects.remove(pt_object_db[ptobject_uri])
def manage_wordings(db_object, json):
db_object.delete_wordings()
# handle wordings
wordings = json['wordings']
for json_wording in wordings:
db_wording = models.Wording()
key = json_wording["key"].strip()
if key == '':
raise exceptions.InvalidJson('Json invalid: key is empty, you give : {}'.format(wordings))
db_wording.key = json_wording["key"]
db_wording.value = json_wording["value"]
db_object.wordings.append(db_wording)
# handle wording
wording = db_object.wordings[0].value
if 'wording' in json:
wording = json['wording']
db_object.wording = wording
def manage_tags(disruption, json):
tags_db = dict((tag.id, tag) for tag in disruption.tags)
tags_json = {}
if 'tags' in json:
tags_json = dict((tag["id"], tag) for tag in json['tags'])
for tag_json in json['tags']:
if tag_json["id"] not in tags_db:
tag = models.Tag.get(tag_json['id'], disruption.client.id)
disruption.tags.append(tag)
tags_db[tag_json['id']] = tag
difference = set(tags_db) - set(tags_json)
for diff in difference:
tag = tags_db[diff]
disruption.tags.remove(tag)
def fill_and_add_line_section(navitia, all_objects, pt_object_json):
"""
:param navitia: Navitia Class
:param all_objects: dictionary of objects to be added in this session
:param pt_object_json: Flux which contains json information of pt_object
:return: pt_object and modify all_objects param
"""
ptobject = models.PTobject()
if 'line_section' in pt_object_json:
mapper.fill_from_json(ptobject, pt_object_json, mapper.line_section_object_mapping)
else:
mapper.fill_from_json(ptobject, pt_object_json, mapper.object_mapping)
# Here we treat all the objects in line_section like line, start_point, end_point
if 'line_section' not in pt_object_json:
raise exceptions.InvalidJson('Object of type line_section must have a line_section entry')
line_section_json = pt_object_json['line_section']
ptobject.uri = ":".join((line_section_json['line']['id'], ptobject.id))
line_section = models.LineSection(ptobject.id)
line_section.line = fill_and_get_pt_object(navitia, all_objects, line_section_json['line'])
line_section.start_point = fill_and_get_pt_object(navitia, all_objects, line_section_json['start_point'])
line_section.end_point = fill_and_get_pt_object(navitia, all_objects, line_section_json['end_point'])
# routes in line_section management
# "routes":[{"id":"route:MTD:9", "type": "route"}, {"id":"route:MTD:Nav23", "type": "route"}]
if 'routes' in line_section_json:
for route in line_section_json["routes"]:
route_object = fill_and_get_pt_object(navitia, all_objects, route, True)
line_section.routes.append(route_object)
# Fill wordings from json
# "meta":[{"key":"direction", "value": "1234"}, {"key":"direction", "value": "5678"}]
if 'metas' in line_section_json:
try:
metas = {}
metas['wordings'] = line_section_json['metas']
manage_wordings(line_section, metas)
except exceptions.InvalidJson:
raise
ptobject.insert_line_section(line_section)
return ptobject
def fill_and_add_rail_section(navitia, all_objects, pt_object_json):
"""
:param navitia: Class Navitia
:param all_objects: dictionary of objects to be added in this session
:param pt_object_json: Flux which contains json information of ordered pt_object
:return: pt_object and modify all_objects param
"""
ptobject = models.PTobject()
if 'rail_section' in pt_object_json:
mapper.fill_from_json(ptobject, pt_object_json, mapper.rail_section_object_mapping)
else:
mapper.fill_from_json(ptobject, pt_object_json, mapper.object_mapping)
# rail_section : ptobjects management like line, start_point, end_point
if 'rail_section' not in pt_object_json:
raise exceptions.InvalidJson('Object of type rail_section must have a rail_section entry')
rail_section_json = pt_object_json['rail_section']
ptobject.uri = ":".join((rail_section_json['start_point']['id'], rail_section_json['end_point']['id'], ptobject.id))
rail_section = models.RailSection(ptobject.id)
if 'line' in rail_section_json:
rail_section.line = fill_and_get_pt_object(navitia, all_objects, rail_section_json['line'])
rail_section.start_point = fill_and_get_pt_object(navitia, all_objects, rail_section_json['start_point'])
rail_section.end_point = fill_and_get_pt_object(navitia, all_objects, rail_section_json['end_point'])
# rail_section : blocked_stop_areas management
if 'blocked_stop_areas' in rail_section_json:
rail_section.blocked_stop_areas = json.dumps(rail_section_json["blocked_stop_areas"])
if 'routes' in rail_section_json:
for route in rail_section_json["routes"]:
route_object = fill_and_get_pt_object(navitia, all_objects, route, True)
rail_section.routes.append(route_object)
ptobject.insert_rail_section(rail_section)
return ptobject
def clean_message(msg):
if msg.channel.content_type == 'text/html':
msg.text = msg.text.replace('\r\n', ' ')
def manage_messages(impact, impact_json, client_id):
# Same management for every cases as 'messages' is not required
if 'messages' not in impact_json:
impact_json['messages'] = []
messages_json = dict((msg["channel"]["id"], msg) for msg in impact_json['messages'])
manage_channels_required(messages_json, client_id)
manage_messages_db(impact, impact_json['messages'], client_id)
def manage_delete_message_db(impact, messages_json, messages_db):
difference = set(messages_db) - set(messages_json)
for diff in difference:
impact.delete_message(messages_db[diff])
def manage_messages_db(impact, messages_json, client_id):
impact.delete_all_messages()
for message_json in messages_json:
message = create_message_from_json(message_json, client_id)
message.impact_id = impact.id
impact.insert_message(message)
def create_message_from_json(message_json, client_id):
channel_id = message_json["channel"]["id"]
channel = models.Channel.get(channel_id, client_id)
message = models.Message()
message.channel = channel
mapper.fill_from_json(message, message_json, mapper.message_mapping)
clean_message(message)
manage_message_meta(message, message_json)
return message
def manage_channels_required(messages_json, client_id):
channels_required = models.Channel.get_channels_required(client_id)
if channels_required:
for channel_required in channels_required:
if channel_required.id not in messages_json or messages_json[channel_required.id] is None:
raise exceptions.InvalidJson('Channel {} is required.'.format(channel_required.id))
elif not messages_json[channel_required.id]['text']:
raise exceptions.InvalidJson('Empty property \'text\' is not allowed for channel {}.'.format(channel_required.id))
def manage_message_meta(message, json):
meta_db = dict((meta.key, meta) for meta in message.meta)
metas_json = dict()
if 'meta' in json:
metas_json = dict((meta['key'], meta) for meta in json['meta'])
for meta_json in json['meta']:
if meta_json['key'] not in meta_db:
meta = models.Meta()
mapper.fill_from_json(meta, meta_json, mapper.meta_mapping)
message.insert_meta(meta)
meta_db[meta.key] = meta
else:
meta = meta_db[meta_json['key']]
mapper.fill_from_json(meta, meta_json, mapper.meta_mapping)
difference = set(meta_db) - set(metas_json)
for diff in difference:
message.delete_meta(meta_db[diff])
def manage_application_periods(impact, application_periods):
impact.delete_app_periods()
for app_period in application_periods:
db_application_period = models.ApplicationPeriods(impact.id)
db_application_period.start_date = app_period[0]
db_application_period.end_date = app_period[1]
impact.insert_app_period(db_application_period)
def manage_patterns(impact, json):
impact.delete_patterns()
if 'application_period_patterns' in json:
for json_pattern in json['application_period_patterns']:
pattern = models.Pattern(impact.id)
mapper.fill_from_json(pattern, json_pattern, mapper.pattern_mapping)
impact.insert_pattern(pattern)
manage_time_slot(pattern, json_pattern)
def manage_time_slot(pattern, json):
if 'time_slots' in json:
for json_time_slot in json['time_slots']:
time_slot = models.TimeSlot(pattern.id)
mapper.fill_from_json(time_slot, json_time_slot, mapper.time_slot_mapping)
pattern.insert_time_slot(time_slot)
def create_or_update_impact(disruption, json_impact, navitia, impact_id=None):
if impact_id:
# impact exists in database
impact_bd = models.Impact.get(impact_id, disruption.contributor.id)
impact_bd.upgrade_version()
else:
impact_bd = models.Impact()
impact_bd.severity = models.Severity.get(json_impact['severity']['id'], disruption.client.id)
impact_bd.disruption_id = disruption.id
if 'send_notifications' in json_impact:
impact_bd.send_notifications = json_impact['send_notifications']
if 'notification_date' in json_impact:
impact_bd.notification_date = json_impact['notification_date']
db.session.add(impact_bd)
# Ptobject is not added in the database before commit. If we have ptobject duplication
# in the json we have to handle it by using a dictionary. Each time we add a ptobject, we also
# add it in the dictionary
try:
manage_simple_pt_object(navitia, impact_bd.objects, 'objects', json_impact)
except exceptions.ObjectUnknown:
raise
all_objects = dict()
if 'objects' in json_impact:
for pt_object_json in json_impact['objects']:
# For an pt_objects of the type 'line_section' we format uri : uri:impact_id
# we insert this object in the table pt_object
if pt_object_json["type"] == 'line_section':
try:
ptobject = fill_and_add_line_section(navitia, all_objects, pt_object_json)
except exceptions.ObjectUnknown as object_unknown_exception:
exceptions.InvalidJson = object_unknown_exception
raise
impact_bd.objects.append(ptobject)
if pt_object_json["type"] == 'rail_section':
try:
ptobject = fill_and_add_rail_section(navitia, all_objects, pt_object_json)
except exceptions.ObjectUnknown as object_unknown_exception:
exceptions.InvalidJson = object_unknown_exception
raise
impact_bd.objects.append(ptobject)
# Severity
severity_json = json_impact['severity']
if (not impact_bd.severity_id) or (impact_bd.severity_id and (severity_json['id'] != impact_bd.severity_id)):
impact_bd.severity_id = severity_json['id']
impact_bd.severity = models.Severity.get(impact_bd.severity_id, disruption.client.id)
# For each object application_period_patterns create and fill a pattern and time_slots
manage_patterns(impact_bd, json_impact)
# Create a list of application periods either from application_period_patterns
# or from application_periods in the data json
app_periods_by_pattern = get_application_periods(json_impact)
manage_application_periods(impact_bd, app_periods_by_pattern)
manage_messages(impact_bd, json_impact, disruption.client.id)
return impact_bd
def manage_impacts(disruption, json, navitia):
if 'impacts' in json:
impacts_db = dict((impact.id, impact) for impact in disruption.impacts)
impacts_json = dict()
for json_impact in json['impacts']:
if 'id' in json_impact:
impact_id = json_impact['id']
else:
impact_id = None
impact_bd = create_or_update_impact(disruption, json_impact, navitia, impact_id)
impacts_json[impact_bd.id] = impact_bd
difference = set(impacts_db) - set(impacts_json)
for diff in difference:
impacts_db[diff].archive()
def manage_properties(disruption, json):
""" Add properties linked to a post|put disruption by creating
associate_disruption_property objects.
The property has to be present in database or the function
will end in error.
Json format expected:
"properties": [
{
"property_id": "",
"value": ""
}, ...
]
"""
if 'properties' in json:
properties_json = list()
properties_db = list(
(adp.property_id, adp.disruption_id, adp.value,)
for adp in disruption.properties
)
for json_property in json['properties']:
property_db = models.Property.get(
disruption.client.id,
json_property['property_id']
)
if property_db is None:
raise exceptions.ObjectUnknown(
'property {} not found'.format(json_property['property_id'])
)
adp_db = create_adp(
disruption,
property_db.id,
json_property['value']
)
properties_json.append(
(adp_db.property_id, adp_db.disruption_id, adp_db.value,)
)
difference = set(properties_db) - set(properties_json)
for diff in difference:
adp = models.AssociateDisruptionProperty.get(*diff)
db.session.delete(adp)
def create_adp(disruption, property_id, value):
""" Create or update an associate_disruption_property object in database
"""
adp = models.AssociateDisruptionProperty.get(
property_id,
disruption.id,
value
)
if adp is None:
adp = models.AssociateDisruptionProperty()
adp.value = value
adp.disruption_id = disruption.id
adp.property_id = property_id
db.session.add(adp)
return adp
def manage_channel_types(db_object, json_types):
db_object.delete_channel_types()
for json_type in json_types:
db_channel_type = models.ChannelType()
db_channel_type.name = json_type
db_object.insert_channel_type(db_channel_type)
|
CanalTP/Chaos
|
chaos/db_helper.py
|
Python
|
agpl-3.0
| 17,166
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2020-07-20 08:30
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('organization_projects', '0092_auto_20200703_1644'),
('organization_network', '0145_teamprojectordering'),
]
operations = [
migrations.AlterUniqueTogether(
name='teamprojectordering',
unique_together=set([('project_page', 'team_page')]),
),
]
|
Ircam-Web/mezzanine-organization
|
organization/network/migrations/0146_auto_20200720_1030.py
|
Python
|
agpl-3.0
| 523
|
"""
WSGI config for timtec project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "timtec.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "timtec.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
mupi/escolamupi
|
timtec/wsgi.py
|
Python
|
agpl-3.0
| 1,419
|
import datetime
def add_gigasecond(dt_obj):
return dt_obj + datetime.timedelta(seconds=10**9)
|
CubicComet/exercism-python-solutions
|
gigasecond/gigasecond.py
|
Python
|
agpl-3.0
| 101
|
#-*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2010 Camptocamp SA (http://www.camptocamp.com)
# All Right Reserved
#
# Author : Nicolas Bessi (Camptocamp)
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import intervent
import absence
import not_work
import contract
import contract_list
|
Micronaet/micronaet-contract
|
contract_manage_report/report/__init__.py
|
Python
|
agpl-3.0
| 1,479
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
# This file is only used if you use `make publish` or
# explicitly specify it as your config file.
import os
import sys
sys.path.append(os.curdir)
from pelicanconf import *
# 因為 publishconf.py 在 pelicanconf.py 之後, 因此若兩處有相同變數的設定, 將以較後讀入的 publishconf.py 中的設定為主.
# 請注意, 為了在近端讓 Tipue search 傳回的搜尋結果連結正確, 必須使用 ./
SITEURL = './'
# 此設定用於近端靜態網頁查驗, 因此使用相對 URL
RELATIVE_URLS = True
# 為了要讓 local 與 gh-pages 上都能夠使用 Tipue search, 可能要採用不同的 theme
THEME = 'theme/pelican-bootstrap3_local'
#BOOTSTRAP_THEME = 'readable'
#BOOTSTRAP_THEME = 'readable-old'
BOOTSTRAP_THEME = 'united'
#PYGMENTS_STYLE = 'paraiso-drak'
#PYGMENTS_STYLE = 'fruity'
# 為了同時兼容 render_math, 必須放棄 fruity
PYGMENTS_STYLE = 'monokai'
FEED_ALL_ATOM = 'feeds/all.atom.xml'
CATEGORY_FEED_ATOM = 'feeds/%s.atom.xml'
DELETE_OUTPUT_DIRECTORY = True
# Following items are often useful when publishing
#DISQUS_SITENAME = "kmolab"
#GOOGLE_ANALYTICS = ""
# 設定網誌以 md 檔案建立的 file system date 為準, 無需自行設定
DEFAULT_DATE = 'fs'
# 近端的 code hightlight
# MD_EXTENSIONS is deprecated
#MD_EXTENSIONS = ['fenced_code', 'extra', 'codehilite(linenums=True)']
MARKDOWN = {
'extension_configs': {
'markdown.extensions.fenced_code': {},
'markdown.extensions.codehilite(linenums=True)': {'css_class': 'highlight'},
'markdown.extensions.extra': {},
'markdown.extensions.meta': {},
},
'output_format': 'html5',
}
# 若要依照日期存檔呼叫
#ARTICLE_URL = 'posts/{date:%Y}/{date:%m}/{date:%d}/{slug}/index.html'
#ARTICLE_SAVE_AS = 'posts/{date:%Y}/{date:%m}/{date:%d}/{slug}/index.html'
PAGE_URL = 'pages/{slug}/'
PAGE_SAVE_AS = 'pages/{slug}/index.html'
SHOW_ARTICLE_AUTHOR = True
DISPLAY_PAGES_ON_MENU = True
|
mdecourse/2017springcd
|
local_publishconf.py
|
Python
|
agpl-3.0
| 2,026
|
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Interests Management',
'version': "15.0.1.0.0",
'category': 'Accounting',
'sequence': 14,
'summary': 'Calculate interests for selected partners',
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'license': 'AGPL-3',
'depends': [
'account',
],
'data': [
'views/res_company_views.xml',
'data/ir_cron_data.xml',
'security/ir.model.access.csv',
],
'installable': True,
'application': False,
}
|
ingadhoc/account-financial-tools
|
account_interests/__manifest__.py
|
Python
|
agpl-3.0
| 1,428
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014-present Taiga Agile LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pytest
import json
from unittest import mock
from django.urls import reverse
from .. import factories as f
from taiga.importers import exceptions
from taiga.base.utils import json
from taiga.base import exceptions as exc
pytestmark = pytest.mark.django_db
def test_auth_url(client):
user = f.UserFactory.create()
client.login(user)
url = reverse("importers-github-auth-url")+"?uri=http://localhost:9001/project/new?from=github"
response = client.get(url, content_type="application/json")
assert response.status_code == 200
assert 'url' in response.data
assert response.data['url'] == "https://github.com/login/oauth/authorize?client_id=&scope=user,repo&redirect_uri=http://localhost:9001/project/new?from=github"
def test_authorize(client, settings):
user = f.UserFactory.create()
client.login(user)
authorize_url = reverse("importers-github-authorize")
with mock.patch('taiga.importers.github.api.GithubImporter') as GithubImporterMock:
GithubImporterMock.get_access_token.return_value = "token"
response = client.post(authorize_url, content_type="application/json", data=json.dumps({"code": "code"}))
assert GithubImporterMock.get_access_token.calledWith(
settings.IMPORTERS['github']['client_id'],
settings.IMPORTERS['github']['client_secret'],
"code"
)
assert response.status_code == 200
assert 'token' in response.data
assert response.data['token'] == "token"
def test_authorize_without_code(client):
user = f.UserFactory.create()
client.login(user)
authorize_url = reverse("importers-github-authorize")
response = client.post(authorize_url, content_type="application/json", data=json.dumps({}))
assert response.status_code == 400
assert 'token' not in response.data
assert '_error_message' in response.data
assert response.data['_error_message'] == "Code param needed"
def test_authorize_with_bad_verify(client, settings):
user = f.UserFactory.create()
client.login(user)
authorize_url = reverse("importers-github-authorize")
with mock.patch('taiga.importers.github.api.GithubImporter') as GithubImporterMock:
GithubImporterMock.get_access_token.side_effect = exceptions.InvalidAuthResult()
response = client.post(authorize_url, content_type="application/json", data=json.dumps({"code": "bad"}))
assert GithubImporterMock.get_access_token.calledWith(
settings.IMPORTERS['github']['client_id'],
settings.IMPORTERS['github']['client_secret'],
"bad"
)
assert response.status_code == 400
assert 'token' not in response.data
assert '_error_message' in response.data
assert response.data['_error_message'] == "Invalid auth data"
def test_import_github_list_users(client):
user = f.UserFactory.create()
client.login(user)
url = reverse("importers-github-list-users")
with mock.patch('taiga.importers.github.api.GithubImporter') as GithubImporterMock:
instance = mock.Mock()
instance.list_users.return_value = [
{"id": 1, "username": "user1", "full_name": "user1", "detected_user": None},
{"id": 2, "username": "user2", "full_name": "user2", "detected_user": None}
]
GithubImporterMock.return_value = instance
response = client.post(url, content_type="application/json", data=json.dumps({"token": "token", "project": 1}))
assert response.status_code == 200
assert response.data[0]["id"] == 1
assert response.data[1]["id"] == 2
def test_import_github_list_users_without_project(client):
user = f.UserFactory.create()
client.login(user)
url = reverse("importers-github-list-users")
with mock.patch('taiga.importers.github.api.GithubImporter') as GithubImporterMock:
instance = mock.Mock()
instance.list_users.return_value = [
{"id": 1, "username": "user1", "full_name": "user1", "detected_user": None},
{"id": 2, "username": "user2", "full_name": "user2", "detected_user": None}
]
GithubImporterMock.return_value = instance
response = client.post(url, content_type="application/json", data=json.dumps({"token": "token"}))
assert response.status_code == 400
def test_import_github_list_users_with_problem_on_request(client):
user = f.UserFactory.create()
client.login(user)
url = reverse("importers-github-list-users")
with mock.patch('taiga.importers.github.importer.GithubClient') as GithubClientMock:
instance = mock.Mock()
instance.get.side_effect = exc.WrongArguments("Invalid Request")
GithubClientMock.return_value = instance
response = client.post(url, content_type="application/json", data=json.dumps({"token": "token", "project": 1}))
assert response.status_code == 400
def test_import_github_list_projects(client):
user = f.UserFactory.create()
client.login(user)
url = reverse("importers-github-list-projects")
with mock.patch('taiga.importers.github.api.GithubImporter') as GithubImporterMock:
instance = mock.Mock()
instance.list_projects.return_value = ["project1", "project2"]
GithubImporterMock.return_value = instance
response = client.post(url, content_type="application/json", data=json.dumps({"token": "token"}))
assert response.status_code == 200
assert response.data[0] == "project1"
assert response.data[1] == "project2"
def test_import_github_list_projects_with_problem_on_request(client):
user = f.UserFactory.create()
client.login(user)
url = reverse("importers-github-list-projects")
with mock.patch('taiga.importers.github.importer.GithubClient') as GithubClientMock:
instance = mock.Mock()
instance.get.side_effect = exc.WrongArguments("Invalid Request")
GithubClientMock.return_value = instance
response = client.post(url, content_type="application/json", data=json.dumps({"token": "token"}))
assert response.status_code == 400
def test_import_github_project_without_project_id(client, settings):
settings.CELERY_ENABLED = True
user = f.UserFactory.create()
client.login(user)
url = reverse("importers-github-import-project")
with mock.patch('taiga.importers.github.tasks.GithubImporter') as GithubImporterMock:
response = client.post(url, content_type="application/json", data=json.dumps({"token": "token"}))
assert response.status_code == 400
settings.CELERY_ENABLED = False
def test_import_github_project_with_celery_enabled(client, settings):
settings.CELERY_ENABLED = True
user = f.UserFactory.create()
project = f.ProjectFactory.create(slug="async-imported-project")
client.login(user)
url = reverse("importers-github-import-project")
with mock.patch('taiga.importers.github.tasks.GithubImporter') as GithubImporterMock:
instance = mock.Mock()
instance.import_project.return_value = project
GithubImporterMock.return_value = instance
response = client.post(url, content_type="application/json", data=json.dumps({"token": "token", "project": 1}))
assert response.status_code == 202
assert "task_id" in response.data
settings.CELERY_ENABLED = False
def test_import_github_project_with_celery_disabled(client, settings):
user = f.UserFactory.create()
project = f.ProjectFactory.create(slug="imported-project")
client.login(user)
url = reverse("importers-github-import-project")
with mock.patch('taiga.importers.github.api.GithubImporter') as GithubImporterMock:
instance = mock.Mock()
instance.import_project.return_value = project
GithubImporterMock.return_value = instance
response = client.post(url, content_type="application/json", data=json.dumps({"token": "token", "project": 1}))
assert response.status_code == 200
assert "slug" in response.data
assert response.data['slug'] == "imported-project"
|
taigaio/taiga-back
|
tests/integration/test_importers_github_api.py
|
Python
|
agpl-3.0
| 8,796
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import argparse
import subprocess
import time
import logging
from binary_utils import Binary
# Create logger
logger = logging.getLogger(__name__)
# Get program filename
program_filename = os.path.basename(sys.argv[0])
# Get MATAM root dir absolute path
matam_db_prepro_bin = os.path.realpath(sys.argv[0])
matam_bin_dir = os.path.dirname(matam_db_prepro_bin)
matam_root_dir = os.path.dirname(matam_bin_dir)
# Get all dependencies bin
matam_script_dir = os.path.join(matam_root_dir, 'scripts')
extract_taxo_bin = os.path.join(matam_script_dir, 'extract_taxo_from_fasta.py')
replace_Ns_bin = os.path.join(matam_script_dir, 'replace_Ns_by_As.py')
sort_fasta_bin = os.path.join(matam_script_dir, 'sort_fasta_by_length.py')
fasta_length_filter_bin = os.path.join(matam_script_dir, 'fasta_length_filter.py')
fasta_name_filter_bin = os.path.join(matam_script_dir, 'fasta_name_filter.py')
clean_name_bin = os.path.join(matam_script_dir, 'fasta_clean_name.py')
indexdb_bin = Binary.assert_which('indexdb_rna')
vsearch_bin = Binary.assert_which('vsearch')
# Define a null file handle
FNULL = open(os.devnull, 'w')
class DefaultHelpParser(argparse.ArgumentParser):
"""
This is a slightly modified argparse parser to display the full help
on parser error instead of only usage
"""
def error(self, message):
sys.stderr.write('\nError: %s\n\n' % message)
self.print_help()
sys.exit(2)
def parse_arguments():
"""
Parse the command line, and check if arguments are correct
"""
# Initiate argument parser
parser = DefaultHelpParser(description='MATAM db preprocessing',
# to precisely format help display
formatter_class=lambda prog: argparse.HelpFormatter(prog, width=120, max_help_position=80))
# Main parameters
group_main = parser.add_argument_group('Main parameters')
# -i / --input_ref
group_main.add_argument('-i', '--input_ref',
action = 'store',
metavar = 'INREF',
type = str,
required = True,
help = 'Input reference file (fasta format). '
'Silva-formated taxonomies will be used if available')
# -d / --db_dir
group_main.add_argument('-d', '--db_dir',
action = 'store',
metavar = 'DBDIR',
type = str,
default = os.getcwd(),
help = 'Database output directory. '
'Default is cwd')
# -v / --verbose
group_main.add_argument('-v', '--verbose',
action = 'store_true',
help = 'Increase verbosity')
# Performance parameters
group_perf = parser.add_argument_group('Performance')
# --cpu
group_perf.add_argument('--cpu',
action = 'store',
metavar = 'CPU',
type = int,
default = 1,
help = 'Max number of CPU to use. '
'Default is %(default)s cpu')
# --max_memory
group_perf.add_argument('--max_memory',
action = 'store',
metavar = 'MAXMEM',
type = int,
default = 10000,
help = 'Maximum memory to use (in MBi). '
'Default is %(default)s MBi')
# Advanced parameters
group_adv = parser.add_argument_group('Advanced parameters')
# -m / --min_length
group_adv.add_argument('-m', '--min_length',
action = 'store',
metavar = 'MINLGTH',
type = int,
default = None,
help = 'Ref sequences minimum length')
# -M / --max_length
group_adv.add_argument('-M', '--max_length',
action = 'store',
metavar = 'MAXLGTH',
type = int,
default = None,
help = 'Ref sequences maximum length')
# -n / --max_consecutive_n
group_adv.add_argument('-n', '--max_consecutive_n',
action = 'store',
metavar = 'MAXN',
type = int,
default = 5,
help = 'Maximum nb of consecutive Ns a sequence is allowed to have. '
'Default is %(default)s. Setting it to 0 will remove all '
'sequences with Ns. Ns in accepted sequences will be replaced '
'by As')
# --clustering_id_threshold
group_adv.add_argument('--clustering_id_threshold',
action = 'store',
metavar = 'REAL',
type = float,
default = 0.95,
help = 'Identity threshold for clustering. '
'Default is %(default)s')
# --by_kingdoms
group_adv.add_argument('--by_kingdom',
action = 'store_true',
help = 'Perform clustering by kingdom')
# --kingdoms
group_adv.add_argument('--kingdoms',
action = 'store',
metavar = 'STR',
type = str,
nargs = '+',
default = ['archaea', 'bacteria', 'eukaryota'],
help = 'Kingdoms to clusterize the DB for. '
'Default is %(default)s')
# -o / --out_db_name
group_adv.add_argument('--out_db_name',
action = 'store',
metavar = 'OUTNAME',
type = str,
help = 'Output MATAM db name. '
'Default is composed from parameters')
# --keep_tmp
group_adv.add_argument('--keep_tmp',
action = 'store_true',
help = 'Do not remove tmp files')
# --debug
group_adv.add_argument('--debug',
action = 'store_true',
help = 'Output debug infos')
#
args = parser.parse_args()
# Arguments checking
if args.clustering_id_threshold < 0 or args.clustering_id_threshold > 1:
parser.print_help()
raise Exception("clustering id threshold not in range [0,1]")
# Set debug parameters
if args.debug:
args.verbose = True
args.keep_tmp = True
# Get absolute path for all arguments
args.input_ref = os.path.abspath(args.input_ref)
args.db_dir = os.path.abspath(args.db_dir)
#
return args
def print_intro(args):
"""
Print the introduction
"""
sys.stdout.write("""
#################################
MATAM db pre-processing
#################################\n\n""")
# Retrieve complete cmd line
cmd_line = '{binpath} '.format(binpath=matam_db_prepro_bin)
# Verbose
if args.verbose:
cmd_line += '--verbose '
# Debug
if args.debug:
cmd_line += '--debug '
# Performance
cmd_line += """--cpu {cpu} --max_memory {memory} \
""".format(cpu=args.cpu,
memory=args.max_memory)
# Advanced parameters
if args.min_length:
cmd_line += '--min_length {} '.format(args.min_length)
if args.max_length:
cmd_line += '--max_length {} '.format(args.max_length)
cmd_line += '--max_consecutive_n {0} '.format(args.max_consecutive_n)
cmd_line += '--clustering_id_threshold {0} '.format(args.clustering_id_threshold)
if args.by_kingdom:
cmd_line += '--by_kingdom --kingdoms '
for kingdom in args.kingdoms:
cmd_line += '{0} '.format(kingdom)
if args.keep_tmp:
cmd_line += '--keep_tmp '
if args.out_db_name:
cmd_line += '--out_db_name {0} '.format(args.out_db_name)
# Main parameters
cmd_line += '--db_dir {0} '.format(args.db_dir)
cmd_line += '--input_ref {0}'.format(args.input_ref)
# Print cmd line
sys.stdout.write('CMD: {0}\n\n'.format(cmd_line))
return 0
def rm_files(filepath_list):
"""
Try to delete all files in filepath_list
"""
for filepath in filepath_list:
try:
logger.debug('rm {0}'.format(filepath))
os.remove(filepath)
except:
pass
if __name__ == '__main__':
# Arguments parsing
args = parse_arguments()
# Print intro infos
print_intro(args)
# Init error code
error_code = 0
# Set logging
# create console handler
ch = logging.StreamHandler()
#
if args.debug:
logger.setLevel(logging.DEBUG)
# create formatter for debug level
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
else:
if args.verbose:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.WARNING)
# create default formatter
formatter = logging.Formatter('%(levelname)s - %(message)s')
# add the formatter to the console handler
ch.setFormatter(formatter)
# add the handler to logger
logger.addHandler(ch)
# Init list of tmp files to delete at the end
to_rm_filepath_list = list()
##############################################
# Set all files and directories names + paths
complete_ref_db_filepath = args.input_ref
complete_ref_db_filename = os.path.basename(complete_ref_db_filepath)
complete_ref_db_basename = os.path.splitext(complete_ref_db_filename)[0]
try:
if not os.path.exists(args.db_dir):
logger.debug('mkdir {0}'.format(args.db_dir))
os.makedirs(args.db_dir)
except OSError:
logger.exception('Could not create output directory {0}'.format(args.db_dir))
raise
complete_ref_db_taxo_filename = complete_ref_db_basename + '.taxo.tab'
complete_ref_db_taxo_filepath = os.path.join(args.db_dir, complete_ref_db_taxo_filename)
cleaned_complete_ref_db_basename = complete_ref_db_basename
cleaned_complete_ref_db_filename = cleaned_complete_ref_db_basename + '.cleaned.fasta'
cleaned_complete_ref_db_filepath = os.path.join(args.db_dir, cleaned_complete_ref_db_filename)
clustering_id_threshold_int = int(args.clustering_id_threshold * 100)
vsearch_output_basename = complete_ref_db_basename + '.vsearch_'
vsearch_output_basename += '{0}'.format(clustering_id_threshold_int)
if args.by_kingdom:
vsearch_output_basename += '_by_kingdom'
vsearch_output_filename = vsearch_output_basename + '.fasta'
vsearch_output_filepath = os.path.join(args.db_dir, vsearch_output_filename)
vsearch_centroids_basename = vsearch_output_basename + '.centroids'
vsearch_centroids_basepath = os.path.join(args.db_dir, vsearch_output_basename)
vsearch_centroids_filename = vsearch_centroids_basename + '.fasta'
vsearch_centroids_filepath = os.path.join(args.db_dir, vsearch_centroids_filename)
clustered_ref_db_basename = cleaned_complete_ref_db_basename + '_NR{0}'.format(clustering_id_threshold_int)
if args.by_kingdom:
clustered_ref_db_basename += '_bk'
clustered_ref_db_basepath = os.path.join(args.db_dir, clustered_ref_db_basename)
clustered_ref_db_filename = clustered_ref_db_basename + '.fasta'
clustered_ref_db_filepath = os.path.join(args.db_dir, clustered_ref_db_filename)
# This is the output MATAM db basepath to pass to matam_assembly.py
output_ref_db_basename = clustered_ref_db_basename
if args.out_db_name:
output_ref_db_basename = os.path.basename(args.out_db_name)
output_ref_db_basepath = os.path.join(args.db_dir, output_ref_db_basename)
# This is the output MATAM db file names
# For the complete db fasta file
output_complete_ref_db_basename = output_ref_db_basename + '.complete'
output_complete_ref_db_basepath = os.path.join(args.db_dir, output_complete_ref_db_basename)
output_complete_ref_db_filename = output_complete_ref_db_basename + '.fasta'
output_complete_ref_db_filepath = os.path.join(args.db_dir, output_complete_ref_db_filename)
# For the complete db taxo file
output_complete_ref_db_taxo_filename = output_complete_ref_db_basename + '.taxo.tab'
output_complete_ref_db_taxo_filepath = os.path.join(args.db_dir, output_complete_ref_db_taxo_filename)
# For the clustered db fasta file
output_clustered_ref_db_basename = output_ref_db_basename + '.clustered'
output_clustered_ref_db_basepath = os.path.join(args.db_dir, output_clustered_ref_db_basename)
output_clustered_ref_db_filename = output_clustered_ref_db_basename + '.fasta'
output_clustered_ref_db_filepath = os.path.join(args.db_dir, output_clustered_ref_db_filename)
##############################################################
# Ref DB pre-processing (cleaning, extracting taxo, indexing)
logger.info('Starting ref db pre-processing')
# Extract taxo from ref DB and sort by ref id
logger.info('Extracting taxonomies from reference DB')
cmd_line = extract_taxo_bin + ' -i ' + complete_ref_db_filepath + ' | sort -k1,1 > '
cmd_line += complete_ref_db_taxo_filepath
logger.debug('CMD: {0}'.format(cmd_line))
error_code += subprocess.call(cmd_line, shell=True)
# TO DO, maybe, one day:
# Trim Ns from both sides
# Convert Us in Ts
# Option: Either filter out seq with Ns or replace Ns with random nucl
# Option: Filter too small or too long sequences
# Sort sequences by decreasing length
logger.info('Cleaning reference db')
cmd_line = 'cat ' + complete_ref_db_filepath
cmd_line += ' | sed "/^>/!s/U/T/g" | sed "/^>/!s/u/t/g" | sed "/^>/!s/ //g"'
cmd_line += ' | ' + replace_Ns_bin + ' -n {0} '.format(args.max_consecutive_n)
if args.min_length or args.max_length:
cmd_line += ' | ' + fasta_length_filter_bin
if args.min_length:
cmd_line += ' -m ' + str(args.min_length)
if args.max_length:
cmd_line += ' -M ' + str(args.max_length)
cmd_line += ' | ' + sort_fasta_bin + ' --reverse > '
cmd_line += cleaned_complete_ref_db_filepath
logger.debug('CMD: {0}'.format(cmd_line))
error_code += subprocess.call(cmd_line, shell=True)
####################
# Ref DB clustering
logger.info('Starting ref db clustering')
# Perform clustering, either by kingdom or at once
if args.by_kingdom:
# Perform clustering by kingdom
for kingdom in args.kingdoms:
# Define by-kingdom files names + paths
cleaned_complete_ref_db_kingdom_basename = cleaned_complete_ref_db_basename + '.' + kingdom
cleaned_complete_ref_db_kingdom_filename = cleaned_complete_ref_db_kingdom_basename + '.fasta'
cleaned_complete_ref_db_kingdom_filepath = os.path.join(args.db_dir, cleaned_complete_ref_db_kingdom_filename)
vsearch_output_kingdom_basename = cleaned_complete_ref_db_kingdom_basename + '.vsearch_'
vsearch_output_kingdom_basename += '{0}'.format(clustering_id_threshold_int)
vsearch_output_kingdom_filename = vsearch_output_kingdom_basename + '.fasta'
vsearch_output_kingdom_filepath = os.path.join(args.db_dir, vsearch_output_kingdom_filename)
# Extracting kingdoms fasta files
logger.info('Extracting sequences from {0} kingdom'.format(kingdom))
cmd_line = fasta_name_filter_bin + ' -i ' + cleaned_complete_ref_db_filepath
cmd_line += ' -s \' ' + kingdom + '\' > ' # !! need to be a space before the kingdom
cmd_line += cleaned_complete_ref_db_kingdom_filepath
logger.debug('CMD: {0}'.format(cmd_line))
error_code += subprocess.call(cmd_line, shell=True)
# Clutering with vsearch
# Aplly a null-penalty value to left and right gaps (--gaopoen "20I/0E") to use vsearch as a semi-global aligner
logger.info('Clustering {0} sequences @ {1} pct id'.format(kingdom, clustering_id_threshold_int))
cmd_line = vsearch_bin + ' --cluster_fast'
cmd_line += ' ' + cleaned_complete_ref_db_kingdom_filepath
cmd_line += ' --centroids ' + vsearch_output_kingdom_filepath
cmd_line += ' --id {0:.2f}'.format(args.clustering_id_threshold)
cmd_line += ' --gapopen "20I/0E"'
cmd_line += ' --threads ' + str(args.cpu)
logger.debug('CMD: {0}'.format(cmd_line))
if args.verbose:
error_code += subprocess.call(cmd_line, shell=True)
else:
# Needed because VSearch doesnt have a verbose option
# and output everything to stderr
error_code += subprocess.call(cmd_line, shell=True, stdout=FNULL, stderr=FNULL)
# Concatenate vsearch outputs
cmd_line = 'cat ' + vsearch_output_kingdom_filepath + ' >> '
cmd_line += vsearch_output_filepath
logger.debug('CMD: {0}'.format(cmd_line))
error_code += subprocess.call(cmd_line, shell=True)
# Tag tmp files for removal
to_rm_filepath_list.append(cleaned_complete_ref_db_kingdom_filepath)
to_rm_filepath_list.append(vsearch_output_kingdom_filepath)
else:
# Clutering with vsearch
# Aplly a null-penalty value to left and right gaps (--gaopoen "20I/0E") to use vsearch as a semi-global aligner
logger.info('Clustering sequences @ {0} pct id'.format(clustering_id_threshold_int))
cmd_line = vsearch_bin + ' --cluster_fast'
cmd_line += ' ' + cleaned_complete_ref_db_filepath
cmd_line += ' --centroids ' + vsearch_output_filepath
cmd_line += ' --id {0:.2f}'.format(args.clustering_id_threshold)
cmd_line += ' --gapopen "20I/0E"'
cmd_line += ' --threads ' + str(args.cpu)
logger.debug('CMD: {0}'.format(cmd_line))
if args.verbose:
error_code += subprocess.call(cmd_line, shell=True)
else:
# Needed because VSearch doesnt have a verbose option
# and output everything to stderr
error_code += subprocess.call(cmd_line, shell=True, stdout=FNULL, stderr=FNULL)
vsearch_centroids_filepath = vsearch_output_filepath
# Clean fasta headers
cmd_line = clean_name_bin + ' -i ' + vsearch_centroids_filepath
cmd_line += ' -o ' + clustered_ref_db_filepath
logger.debug('CMD: {0}'.format(cmd_line))
error_code += subprocess.call(cmd_line, shell=True)
# Tag tmp files for removal
to_rm_filepath_list.append(vsearch_output_filepath)
to_rm_filepath_list.append(vsearch_centroids_filepath)
##########################################
# Renaming output files as MATAM db files
logger.info('Renaming output files as MATAM db files')
try:
os.rename(cleaned_complete_ref_db_filepath, output_complete_ref_db_filepath)
os.rename(complete_ref_db_taxo_filepath, output_complete_ref_db_taxo_filepath)
os.rename(clustered_ref_db_filepath, output_clustered_ref_db_filepath)
except OSError:
logger.exception('Could not rename tmp files into MATAM db files')
raise
######################################################
# SortMeRNA indexing of complete and clustered ref db
# SortMeRNA complete ref db indexing
logger.info('Indexing complete ref db')
cmd_line = indexdb_bin + ' --ref ' + output_complete_ref_db_filepath
cmd_line += ',' + output_complete_ref_db_basepath
cmd_line += ' -m {0}'.format(args.max_memory)
if args.verbose:
cmd_line += ' -v '
logger.debug('CMD: {0}'.format(cmd_line))
error_code += subprocess.call(cmd_line, shell=True)
if args.verbose:
sys.stdout.write('\n')
# SortMeRNA clustered ref db indexing
logger.info('Indexing clustered ref db')
cmd_line = indexdb_bin + ' --ref ' + output_clustered_ref_db_filepath
cmd_line += ',' + output_clustered_ref_db_basepath
cmd_line += ' -m {0}'.format(args.max_memory)
if args.verbose:
cmd_line += ' -v '
logger.debug('CMD: {0}'.format(cmd_line))
error_code += subprocess.call(cmd_line, shell=True)
if args.verbose:
sys.stdout.write('\n')
###############
# Exit program
sys.stdout.write('Output MATAM db: {0}\n'.format(output_ref_db_basepath))
# Exit if everything went ok
if not error_code:
# Try to remove all tmp files
# won't crash if it cannot
if not args.keep_tmp:
sys.stdout.write('\n')
logger.info('Removing tmp files')
rm_files(to_rm_filepath_list)
#
sys.stdout.write('\n{0} terminated with no error\n'.format(program_filename))
exit(0)
# Deal with errors
else:
sys.stdout.write('\n{0} terminated with some errors. '.format(program_filename))
if args.verbose:
sys.stdout.write('Check the log for additional infos\n')
else:
sys.stdout.write('Rerun the program using --verbose or --debug option\n')
exit(1)
|
bonsai-team/matam
|
scripts/matam_db_preprocessing.py
|
Python
|
agpl-3.0
| 21,753
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd.
# License: GNU General Public License v3. See license.txt
#!/usr/bin/python
# This script is for cleaning up of all data from system including
# all transactions and masters (excludes default masters).
# Basically after running this file, system will reset to it's
# initial state.
# This script can be executed from lib/wnf.py using
# lib/wnf.py --cleanup-data
from __future__ import unicode_literals
import sys
sys.path.append("lib/py")
sys.path.append(".")
sys.path.append("owrang")
import webnotes
#--------------------------------
def delete_transactions():
print "Deleting transactions..."
trans = ['Task', 'Support Ticket', 'Stock Reconciliation', 'Stock Ledger Entry',
'Stock Entry', 'Sales Order', 'Salary Slip','Sales Invoice', 'Quotation',
'Quality Inspection', 'Purchase Receipt', 'Purchase Order', 'Production Order',
'POS Setting', 'Period Closing Voucher', 'Purchase Invoice', 'Maintenance Visit',
'Maintenance Schedule', 'Leave Application', 'Leave Allocation', 'Lead', 'Journal Voucher',
'Installation Note', 'Material Request', 'GL Entry', 'Expense Claim', 'Opportunity',
'Delivery Note', 'Customer Issue', 'Bin', 'Authorization Rule', 'Attendance', 'C-Form',
'Appraisal', 'Installation Note', 'Communication', "Supplier Quotation", "Newsletter",
"Job Applicant", "Web Page", "Website Slideshow", "Blog Post", "Blog Category", "Blogger",
"Time Log", "Time Log Batch", "Workflow"]
for d in trans:
for t in webnotes.conn.sql("select options from tabDocField where parent='%s' and fieldtype='Table'" % d):
webnotes.conn.sql("delete from `tab%s`" % (t))
webnotes.conn.sql("delete from `tab%s`" % (d))
print "Deleted " + d
def delete_masters():
print "Deleting masters...."
masters = {
'Workstation': ['Default Workstation'],
'Warehouse': ['Default Warehouse'],
'UOM': ['Kg', 'Mtr', 'Box', 'Ltr', 'Nos', 'Ft', 'Pair', 'Set'],
'Territory': ['All Territories', 'Default Territory'],
'Terms and Conditions': '',
'Tag': '',
'Supplier Type': ['Default Supplier Type'],
'Supplier': '',
'Serial No': '',
'Sales Person': ['Sales Team'],
'Sales Partner': '',
'Sales BOM': '',
'Salary Structure': '',
'Purchase Taxes and Charges Master': '',
'Project': '',
'Print Heading': '',
'Price List': ['Default Price List'],
'Sales Taxes and Charges Master': '',
'Letter Head': '',
'Leave Type': ['Leave Without Pay', 'Privilege Leave', 'Casual Leave', 'PL', 'CL', 'LWP',
'Compensatory Off', 'Sick Leave'],
'Appraisal Template': '',
'Item Group': ['All Item Groups', 'Default'],
'Item': '',
'Holiday List': '',
'Activity Type': '',
'Grade': '',
'Feed': '',
'Expense Claim Type': ['Travel', 'Medical', 'Calls', 'Food', 'Others'],
'Event': '',
'Employment Type': '',
'Employee': '',
'Earning Type': ['Basic', 'Conveyance', 'House Rent Allowance', 'Dearness Allowance',
'Medical Allowance', 'Telephone'],
'Designation': '',
'Department': '',
'Deduction Type': ['Income Tax', 'Professional Tax', 'Provident Fund', 'Leave Deduction'],
'Customer Group': ['All Customer Groups', 'Default Customer Group'],
'Customer': '',
'Cost Center': '',
'Contact': '',
'Campaign': '',
'Budget Distribution': '',
'Brand': '',
'Branch': '',
'Batch': '',
'Appraisal': '',
'Account': '',
'BOM': ''
}
for d in masters.keys():
for t in webnotes.conn.sql("select options from tabDocField where parent='%s' \
and fieldtype='Table'" % d):
webnotes.conn.sql("delete from `tab%s`" % (t))
lst = '"'+'","'.join(masters[d])+ '"'
webnotes.conn.sql("delete from `tab%s` where name not in (%s)" % (d, lst))
print "Deleted " + d
def reset_all_series():
# Reset master series
webnotes.conn.sql("""update tabSeries set current = 0 where name not in
('Ann/', 'BSD', 'DEF', 'DF', 'EV', 'Event Updates/', 'FileData-',
'FL', 'FMD/', 'GLM Detail', 'Login Page/', 'MDI', 'MDR', 'MI', 'MIR',
'PERM', 'PR', 'SRCH/C/', 'TD', 'TIC/', 'TMD/', 'TW', 'UR', '_FEED',
'_SRCH', '_TRIGGER', '__NSO', 'CustomField', 'Letter')
""")
print "Series updated"
def reset_transaction_series():
webnotes.conn.sql("""update tabSeries set current = 0 where name in
('JV', 'INV', 'BILL', 'SO', 'DN', 'PO', 'LEAD', 'ENQUIRY', 'ENQ', 'CI',
'IN', 'PS', 'IDT', 'QAI', 'QTN', 'STE', 'SQTN', 'SUP', 'SR',
'POS', 'LAP', 'LAL', 'EXP')""")
print "Series updated"
def delete_main_masters():
main_masters = ['Fiscal Year', 'Company', 'DefaultValue']
for d in main_masters:
for t in webnotes.conn.sql("select options from tabDocField where parent='%s' and fieldtype='Table'" % d):
webnotes.conn.sql("delete from `tab%s`" % (t))
webnotes.conn.sql("delete from `tab%s`" % (d))
print "Deleted " + d
def reset_global_defaults():
flds = {
'default_company': None,
'default_currency': None,
'current_fiscal_year': None,
'date_format': 'dd-mm-yyyy',
'sms_sender_name': None,
'default_item_group': 'Default',
'default_stock_uom': 'Nos',
'default_valuation_method': 'FIFO',
'tolerance': None,
'acc_frozen_upto': None,
'bde_auth_role': None,
'credit_controller': None,
'default_customer_group': 'Default Customer Group',
'default_territory': 'Default',
'default_price_list': 'Standard',
'default_supplier_type': 'Default Supplier Type',
'hide_currency_symbol': None,
'default_price_list_currency': None,
}
from webnotes.model.code import get_obj
gd = get_obj('Global Defaults', 'Global Defaults')
for d in flds:
gd.doc.fields[d] = flds[d]
gd.doc.save()
webnotes.clear_cache()
def run():
webnotes.connect()
webnotes.conn.begin()
# Confirmation from user
confirm = ''
while not confirm:
confirm = raw_input("Are you sure you want to delete the data from the system (N/Y)?")
if confirm.lower() != 'y':
raise Exception
cleanup_type = ''
while cleanup_type not in ['1', '2']:
cleanup_type = raw_input("""\nWhat type of cleanup you want ot perform?
1. Only Transactions
2. Both Masters and Transactions
Please enter your choice (1/2):
""")
# delete
delete_transactions()
if cleanup_type == '1':
print "Reset Transaction Series"
reset_transaction_series()
else:
delete_masters()
print "Reset All Series"
reset_all_series()
delete_main_masters()
reset_global_defaults()
print "System cleaned up succesfully"
webnotes.conn.commit()
webnotes.conn.close()
if __name__ == '__main__':
run()
|
Yellowen/Owrang
|
utilities/cleanup_data.py
|
Python
|
agpl-3.0
| 6,485
|
import time
from C4CApplication.page_objects.CreateBranchPage import CreateBranchPage
from C4CApplication.simulation.Actor import Actor
from C4CApplication.page_objects.HomePage import HomePage
from C4CApplication.page_objects.MyCare4Care import MyCare4Care
class BPAdminActor(Actor):
"""
This class represents a bp administrator that will do some actions
"""
action_list = None
def get_action_list(self):
if self.action_list is None:
self.action_list = [
[self.login_action, self.test_create_branch, self.logout_action]
]
return self.action_list
def remove_action_from_action_list(self, action):
self.action_list.remove(action)
@staticmethod
def login_action(selenium, live_server_url):
"""
Login the actor
:param selenium: The instance of selenium
:param live_server_url:
:return: True if the action was successfull,
False if the action is impossible now but perhaps could be performed in the future
None if the action failed unexcpectedly
"""
selenium.get('%s%s' % (live_server_url, '')) # Go to the home page
time.sleep(2)
page = HomePage(selenium)
page.login_successful("mathieu.jadin@student.uclouvain.be", "azertyuiop") # Kim is branch officer of LLN
time.sleep(2)
return True
@staticmethod
def test_create_branch(selenium, live_server_url):
"""
Creates a new branch and put himself as a branch officer
:return: True if the action was successfull,
False if the action is impossible now but perhaps could be performed in the future
None if the action failed unexcpectedly
"""
page = MyCare4Care(selenium)
page.BP_click_on_new_branch()
page = CreateBranchPage(selenium)
time.sleep(2)
page = page.fill_in_info('Bxl', 'Bruxelles-Molenbeek', 'mathieu.jadin@student.uclouvain.be', "Rue de la Reussite 42", "7652", "Bruxelles")
time.sleep(3)
page = page.click_on_submit()
time.sleep(2)
return True
@staticmethod
def logout_action(selenium, live_server_url):
"""
Logout the actor
:param selenium: The instance of selenium
:param live_server_url:
:return: True if the action was successfull,
False if the action is impossible now but perhaps could be performed in the future
None if the action failed unexcpectedly
"""
# Go to home page
page = MyCare4Care(selenium)
time.sleep(2)
page.click_home()
time.sleep(2)
# Logout
page = HomePage(selenium)
page.click_on_logout()
time.sleep(2)
return True
|
dsarkozi/care4care-sdp-grp4
|
Care4Care/C4CApplication/simulation/BPAdminActor.py
|
Python
|
agpl-3.0
| 2,859
|
# Generated by Django 2.2.17 on 2020-11-18 22:57
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_extensions.db.fields
import simple_history.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("credentials", "0016_credential_content_type"),
]
operations = [
migrations.CreateModel(
name="ProgramCompletionEmailConfiguration",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
(
"created",
django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name="created"),
),
(
"modified",
django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name="modified"),
),
(
"identifier",
models.CharField(
help_text='Should be either "default" to affect all programs, the program type slug, or the UUID of the program. Values are unique.',
max_length=50,
unique=True,
),
),
(
"html_template",
models.TextField(
help_text="For HTML emails.Allows tags include (a, b, blockquote, div, em, i, li, ol, span, strong, ul)"
),
),
(
"plaintext_template",
models.TextField(help_text="For plaintext emails. No formatting tags. Text will send as is."),
),
("enabled", models.BooleanField(default=False)),
],
options={
"get_latest_by": "modified",
"abstract": False,
},
),
migrations.CreateModel(
name="HistoricalProgramCompletionEmailConfiguration",
fields=[
("id", models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name="ID")),
(
"created",
django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name="created"),
),
(
"modified",
django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name="modified"),
),
(
"identifier",
models.CharField(
db_index=True,
help_text='Should be either "default" to affect all programs, the program type slug, or the UUID of the program. Values are unique.',
max_length=50,
),
),
(
"html_template",
models.TextField(
help_text="For HTML emails.Allows tags include (a, b, blockquote, div, em, i, li, ol, span, strong, ul)"
),
),
(
"plaintext_template",
models.TextField(help_text="For plaintext emails. No formatting tags. Text will send as is."),
),
("enabled", models.BooleanField(default=False)),
("history_id", models.AutoField(primary_key=True, serialize=False)),
("history_date", models.DateTimeField()),
("history_change_reason", models.CharField(max_length=100, null=True)),
(
"history_type",
models.CharField(choices=[("+", "Created"), ("~", "Changed"), ("-", "Deleted")], max_length=1),
),
(
"history_user",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"verbose_name": "historical program completion email configuration",
"ordering": ("-history_date", "-history_id"),
"get_latest_by": "history_date",
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
]
|
edx/credentials
|
credentials/apps/credentials/migrations/0017_historicalprogramcompletionemailconfiguration_programcompletionemailconfiguration.py
|
Python
|
agpl-3.0
| 4,637
|
# -*- coding: utf-8 -*-
from openerp import models, fields, api
class ExTimePeriod(models.Model):
_name = 'mqo.extimeperiod'
note = fields.Char(string="Note")
exercise_id = fields.Many2one('mqo.exercise',
ondelete='cascade', string="Exercise", required=True)
start_date = fields.Datetime(string="Start date", required=True, default=lambda self: fields.Datetime.now())
end_date = fields.Datetime(string="End date", required=True, default=lambda self: fields.Datetime.now())
mag = fields.Float(string="mag", default=0.0, required=True)
annual = fields.Boolean(string="Annual?", default=False)
class ExBoosted(models.Model):
_name = 'mqo.exboosted'
exercise_id = fields.Many2one('mqo.exercise',
ondelete='cascade', string="Exercise", required=True)
boost_exercise_id = fields.Many2one('mqo.exercise',
ondelete='cascade', string="Exercise", required=True)
sig_m = fields.Float(string="sig_m", default=0.0)
sig_c = fields.Float(string="sig_c", default=0.0)
sig_r = fields.Float(string="sig_r", default=1.0)
sig_e = fields.Float(string="sig_e", default=1.0)
class ExPre(models.Model):
_name = 'mqo.expre'
exercise_id = fields.Many2one('mqo.exercise',
ondelete='cascade', string="Exercise", required=True)
pre_exercise_id = fields.Many2one('mqo.exercise',
ondelete='cascade', string="Exercise", required=True)
pow_m = fields.Float(string="pow_m", default=10.0)
pow_c = fields.Float(string="pow_c", default=0.0)
pow_r = fields.Float(string="pow_r", default=1.0)
pow_e = fields.Float(string="pow_e", default=1.0)
class Exercise(models.Model):
_name = 'mqo.exercise'
name = fields.Char(string="Title", required=True)
instructions = fields.Text(string="Instructions")
allocation_ids = fields.One2many('mqo.allocation', 'exercise_id', string="Allocated Exercises")
assignment_ids = fields.One2many('mqo.assignment', 'exercise_id', string="Assigned exercises")
# Exercise params
sig_m = fields.Float(string="sig_m", default=0.0)
sig_c = fields.Float(string="sig_c", default=0.0)
sig_r = fields.Float(string="sig_r", default=1.0)
sig_e = fields.Float(string="sig_e", default=1.0)
exp_m = fields.Float(string="exp_m", default=0.0)
exp_c = fields.Float(string="exp_c", default=0.0)
exp_r = fields.Float(string="exp_r", default=1.0)
exp_e = fields.Float(string="exp_e", default=1.0)
bur_m = fields.Float(string="bur_m", default=0.0)
bur_c = fields.Float(string="bur_c", default=0.0)
bur_r = fields.Float(string="bur_r", default=1.0)
bur_e = fields.Float(string="bur_e", default=1.0)
bur_c2 = fields.Float(string="bur_c2", default=0.0)
bst_m = fields.Float(string="bst_m", default=0.0)
dur = fields.Float(string="dur", default=1.0)
tper_ids = fields.One2many('mqo.extimeperiod', 'exercise_id', string="Time periods")
bstex_ids = fields.One2many('mqo.exboosted', 'exercise_id', string="Boosted exercises")
pre_ids = fields.One2many('mqo.expre', 'pre_exercise_id', string="Prerequisite for")
surveyq_dat = fields.One2many('mqo.exsurveyqcoef', 'exercise_id', string="Survey question data")
default_response_survey = fields.Many2one('survey.survey', string="Default response survey")
class ExSurveyQCoef(models.Model):
_name = 'mqo.exsurveyqcoef'
exercise_id = fields.Many2one('mqo.exercise',
ondelete='cascade', string="Exercise", required=True)
survey_question = fields.Many2one('survey.question',
ondelete='cascade', string="Survey question", required=True)
coef = fields.Float(string="Coefficient", default=0.0)
|
drummingbird/ppe
|
mqo_exercises/models/exercise.py
|
Python
|
agpl-3.0
| 3,786
|
# This file is part of Fedora Community.
# Copyright (C) 2008-2010 Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from itertools import product
from paste.deploy.converters import asbool
from tg import config
from bodhi.client.bindings import BodhiClient
from datetime import datetime
from webhelpers.html import HTML
import markdown
from fedoracommunity.connectors.api import get_connector
from fedoracommunity.connectors.api import \
IConnector, ICall, IQuery, ParamFilter
from moksha.common.lib.dates import DateTimeDisplay
from fedoracommunity.lib.utils import parse_build
log = logging.getLogger(__name__)
koji_build_url = (
'http://koji.fedoraproject.org/koji/search?'
'terms=%(name)s-%(version)s-%(release)s&type=build&match=glob')
class BodhiConnector(IConnector, ICall, IQuery):
_method_paths = dict()
_query_paths = dict()
_cache_prompts = dict()
def __init__(self, environ, request):
super(BodhiConnector, self).__init__(environ, request)
self._prod_url = config.get(
'fedoracommunity.connector.bodhi.produrl',
'https://bodhi.fedoraproject.org')
self._bodhi_client = BodhiClient(self._base_url,
insecure=self._insecure)
@classmethod
def query_updates_cache_prompt(cls, msg):
if '.bodhi.' not in msg['topic']:
return
msg = msg['msg']
if 'update' in msg:
update = msg['update']
release = update['release']['name']
status = update['status']
nvrs = [build['nvr'] for build in update['builds']]
names = ['-'.join(nvr.split('-')[:-2]) for nvr in nvrs]
releases = [release, '']
statuses = [status, '']
groupings = [False]
headers = ['package', 'release', 'status', 'group_updates']
combinations = product(names, releases, statuses, groupings)
for values in combinations:
yield dict(zip(headers, values))
@classmethod
def query_active_releases_cache_prompt(cls, msg):
if '.bodhi.' not in msg['topic']:
return
msg = msg['msg']
if 'update' in msg:
nvrs = [build['nvr'] for build in msg['update']['builds']]
names = ['-'.join(nvr.split('-')[:-2]) for nvr in nvrs]
for name in names:
yield {'package': name}
# IConnector
@classmethod
def register(cls):
cls._base_url = config.get('fedoracommunity.connector.bodhi.baseurl',
'https://bodhi.fedoraproject.org/')
check_certs = asbool(config.get('fedora.clients.check_certs', True))
cls._insecure = not check_certs
cls.register_query_updates()
cls.register_query_active_releases()
def request_data(self, path, params):
return self._bodhi_client.send_request(path, auth=False, params=params)
def introspect(self):
# FIXME: return introspection data
return None
#ICall
def call(self, resource_path, params):
log.debug('BodhiConnector.call(%s)' % locals())
# proxy client only returns structured data so we can pass
# this off to request_data but we should fix that in ProxyClient
return self.request_data(resource_path, params)
#IQuery
@classmethod
def register_query_updates(cls):
path = cls.register_query(
'query_updates',
cls.query_updates,
cls.query_updates_cache_prompt,
primary_key_col='request_id',
default_sort_col='request_id',
default_sort_order=-1,
can_paginate=True)
path.register_column('request_id',
default_visible=False,
can_sort=False,
can_filter_wildcards=False)
path.register_column('updateid',
default_visible=True,
can_sort=False,
can_filter_wildcards=False)
path.register_column('nvr',
default_visible=True,
can_sort=False,
can_filter_wildcards=False)
path.register_column('submitter',
default_visible=True,
can_sort=False,
can_filter_wildcards=False)
path.register_column('status',
default_visible=True,
can_sort=False,
can_filter_wildcards=False)
path.register_column('request',
default_visible=True,
can_sort=False,
can_filter_wildcards=False)
path.register_column('karma',
default_visible=True,
can_sort=False,
can_filter_wildcards=False)
path.register_column('nagged',
default_visible=True,
can_sort=False,
can_filter_wildcards=False)
path.register_column('type',
default_visible=True,
can_sort=False,
can_filter_wildcards=False)
path.register_column('approved',
default_visible=True,
can_sort=False,
can_filter_wildcards=False)
path.register_column('date_submitted',
default_visible=True,
can_sort=False,
can_filter_wildcards=False)
path.register_column('date_pushed',
default_visible=True,
can_sort=False,
can_filter_wildcards=False)
path.register_column('date_modified',
default_visible=True,
can_sort=False,
can_filter_wildcards=False)
path.register_column('comments',
default_visible=True,
can_sort=False,
can_filter_wildcards=False)
path.register_column('bugs',
default_visible=True,
can_sort=False,
can_filter_wildcards=False)
path.register_column('builds',
default_visible=True,
can_sort=False,
can_filter_wildcards=False)
path.register_column('releases',
default_visible=True,
can_sort=False,
can_filter_wildcards=False)
path.register_column('release',
default_visible=True,
can_sort=False,
can_filter_wildcards=False)
path.register_column('karma_level',
default_visible=True,
can_sort=False,
can_filter_wildcards=False)
f = ParamFilter()
f.add_filter('package', ['nvr'], allow_none=False)
f.add_filter('status', ['status'], allow_none=True)
f.add_filter('group_updates', allow_none=True, cast=bool)
f.add_filter('granularity', allow_none=True)
f.add_filter('release', allow_none=False)
cls._query_updates_filter = f
def query_updates(self, start_row=None,
rows_per_page=None,
order=-1,
sort_col=None,
filters=None,
**params):
if not filters:
filters = {}
filters = self._query_updates_filter.filter(filters, conn=self)
group_updates = filters.get('group_updates', True)
params.update(filters)
params['page'] = int(start_row/rows_per_page) + 1
# If we're grouping updates, ask for twice as much. This is so we can
# handle the case where there are two updates for each package, one for
# each release. Yes, worst case we get twice as much data as we ask
# for, but this allows us to do *much* more efficient database calls on
# the server.
if group_updates:
params['rows_per_page'] = rows_per_page * 2
else:
params['rows_per_page'] = rows_per_page
# Convert bodhi1 query format to bodhi2.
if 'package' in params:
params['packages'] = params.pop('package')
if 'release' in params:
params['releases'] = params.pop('release')
results = self._bodhi_client.send_request('updates', auth=False, params=params)
total_count = results['total']
if group_updates:
updates_list = self._group_updates(results['updates'],
num_packages=rows_per_page)
else:
updates_list = results['updates']
for up in updates_list:
versions = []
releases = []
if group_updates:
up['title'] = up['dist_updates'][0]['title']
for dist_update in up['dist_updates']:
versions.append(dist_update['version'])
releases.append(dist_update['release_name'])
up['name'] = up['package_name']
up['versions'] = versions
up['releases'] = releases
up['status'] = up['dist_updates'][0]['status']
up['nvr'] = up['dist_updates'][0]['title']
up['request_id'] = up['package_name'] + \
dist_update['version'].replace('.', '')
else:
chunks = up['title'].split('-')
up['name'] = '-'.join(chunks[:-2])
up['version'] = '-'.join(chunks[-2:])
up['versions'] = chunks[-2]
up['releases'] = up['release']['long_name']
up['nvr'] = up['title']
up['request_id'] = up.get('updateid') or \
up['nvr'].replace('.', '').replace(',', '')
up['id'] = up['nvr'].split(',')[0]
# A unique id that we can use in HTML class fields.
#up['request_id'] = up.get('updateid') or \
# up['nvr'].replace('.', '').replace(',', '')
actions = []
up['actions'] = ''
for action in actions:
reqs = ''
if group_updates:
for u in up['dist_updates']:
reqs += "update_action('%s', '%s');" % (u['title'],
action[0])
title = up['dist_updates'][0]['title']
else:
reqs += "update_action('%s', '%s');" % (up['title'],
action[0])
title = up['title']
# FIXME: Don't embed HTML
up['actions'] += """
<button id="%s_%s" onclick="%s return false;">%s</button><br/>
""" % (title.replace('.', ''), action[0], reqs, action[1])
# Dates
if group_updates:
date_submitted = up['dist_updates'][0]['date_submitted']
date_pushed = up['dist_updates'][0]['date_pushed']
else:
date_submitted = up['date_submitted']
date_pushed = up['date_pushed']
granularity = filters.get('granularity', 'day')
ds = DateTimeDisplay(date_submitted)
up['date_submitted_display'] = ds.age(granularity=granularity,
general=True) + ' ago'
if date_pushed:
dp = DateTimeDisplay(date_pushed)
up['date_pushed'] = dp.datetime.strftime('%d %b %Y')
up['date_pushed_display'] = dp.age(granularity=granularity,
general=True) + ' ago'
# karma
# FIXME: take into account karma from both updates
if group_updates:
k = up['dist_updates'][0]['karma']
else:
k = up['karma']
if k:
up['karma_str'] = "%+d" % k
else:
up['karma_str'] = " %d" % k
up['karma_level'] = 'meh'
if k > 0:
up['karma_level'] = 'good'
if k < 0:
up['karma_level'] = 'bad'
up['details'] = self._get_update_details(up)
return (total_count, updates_list)
def _get_update_details(self, update):
details = ''
if update['status'] == 'stable':
if update.get('updateid'):
details += HTML.tag('a', c=update['updateid'], href='%s/updates/%s' % (
self._prod_url, update['alias']))
if update.get('date_pushed'):
details += HTML.tag('br') + update['date_pushed']
else:
details += 'In process...'
elif update['status'] == 'pending' and update.get('request'):
details += 'Pending push to %s' % update['request']
details += HTML.tag('br')
details += HTML.tag('a', c="View update details >",
href="%s/updates/%s" % (self._prod_url,
update['alias']))
elif update['status'] == 'obsolete':
for comment in update['comments']:
if comment['user']['name'] == 'bodhi':
if comment['text'].startswith('This update has been '
'obsoleted by '):
details += markdown.markdown(
comment['text'], safe_mode="replace")
return details
def _get_update_actions(self, update):
actions = []
if update['request']:
actions.append(('revoke', 'Cancel push'))
else:
if update['status'] == 'testing':
actions.append(('unpush', 'Unpush'))
actions.append(('stable', 'Push to stable'))
if update['status'] == 'pending':
actions.append(('testing', 'Push to testing'))
actions.append(('stable', 'Push to stable'))
return actions
def _group_updates(self, updates, num_packages=None):
"""
Group a list of updates by release.
This method allows allows you to limit the number of packages,
for when we want to display 1 package per row, regardless of how
many updates there are for it.
"""
packages = {}
done = False
i = 0
if not updates:
return []
for update in updates:
for build in update['builds']:
pkg = build['nvr'].rsplit('-', 2)[0]
if pkg not in packages:
if num_packages and i >= num_packages:
done = True
break
packages[pkg] = {
'package_name': pkg,
'dist_updates': list()
}
i += 1
else:
skip = False
for up in packages[pkg]['dist_updates']:
if up['release_name'] == \
update['release']['long_name']:
skip = True
break
if skip:
break
packages[pkg]['dist_updates'].append({
'release_name': update['release']['long_name'],
'version': '-'.join(build['nvr'].split('-')[-2:])
})
packages[pkg]['dist_updates'][-1].update(update)
if done:
break
result = [packages[p] for p in packages]
sort_col = 'date_submitted'
if result[0]['dist_updates'][0]['status'] == 'stable':
sort_col = 'date_pushed'
result = sorted(result, reverse=True,
cmp=lambda x, y: cmp(
x['dist_updates'][0][sort_col],
y['dist_updates'][0][sort_col])
)
return result
def add_updates_to_builds(self, builds):
"""Update a list of koji builds with the corresponding bodhi updates.
This method makes a single query to bodhi, asking if it knows about
any updates for a given list of koji builds. For builds with existing
updates, the `update` will be added to it's dictionary.
Currently it also adds `update_details`, which is HTML for rendering
the builds update options. Ideally, this should be done client-side
in the template (builds/templates/table_widget.mak).
"""
start = datetime.now()
updates = self.call('get_updates_from_builds', {
'builds': ' '.join([b['nvr'] for b in builds])})
if updates:
# FIXME: Lets stop changing the upstream APIs by putting the
# session id as the first element, and the results in the second.
updates = updates[1]
for build in builds:
if build['nvr'] in updates:
build['update'] = updates[build['nvr']]
status = build['update']['status']
details = ''
# FIXME: ideally, we should just return the update JSON and do
# this logic client-side in the template when the grid data
# comes in.
if status == 'stable':
details = 'Pushed to updates'
elif status == 'testing':
details = 'Pushed to updates-testing'
elif status == 'pending':
details = 'Pending push to %s' % build['update']['request']
details += HTML.tag('br')
details += HTML.tag('a', c="View update details >",
href="%s/updates/%s" % (self._prod_url,
build['update']['alias']))
else:
details = HTML.tag('a', c='Push to updates >',
href='%s/new?builds.text=%s' % (
self._prod_url, build['nvr']))
build['update_details'] = details
log.debug(
"Queried bodhi for builds in: %s" % (datetime.now() - start))
@classmethod
def register_query_active_releases(cls):
path = cls.register_query('query_active_releases',
cls.query_active_releases,
cls.query_active_releases_cache_prompt,
primary_key_col='release',
default_sort_col='release',
default_sort_order=-1,
can_paginate=True)
path.register_column('release',
default_visible=True,
can_sort=False,
can_filter_wildcards=False)
path.register_column('stable_version',
default_visible=True,
can_sort=False,
can_filter_wildcards=False)
path.register_column('testing_version',
default_visible=True,
can_sort=False,
can_filter_wildcards=False)
f = ParamFilter()
f.add_filter('package', ['nvr'], allow_none=False)
cls._query_active_releases = f
def get_all_releases(self):
releases_obj = self.call('releases', {})
releases_all = None
for i in range(1, releases_obj['pages'] + 1):
if releases_all == None:
releases_all = releases_obj['releases']
continue
temp = self.call('releases?page=' + str(i), {})['releases']
releases_all.extend(temp)
if releases_all == None:
raise TypeError
return releases_all
def query_active_releases(self, filters=None, **params):
releases = list()
queries = list()
# Mapping of tag -> release
release_tag = dict()
# List of testing builds to query bodhi for
testing_builds = list()
# nvr -> release lookup table
testing_builds_row = dict()
if not filters:
filters = dict()
filters = self._query_updates_filter.filter(filters, conn=self)
package = filters.get('package')
koji = get_connector('koji')._koji_client
koji.multicall = True
releases_all = self.get_all_releases()
releases_all.append({'dist_tag': 'rawhide',
'long_name':'Rawhide',
'stable_tag': 'rawhide',
'testing_tag': 'no_testing_tag_found',
'state': 'current'})
releases_all = sorted(releases_all, key=lambda k: k['dist_tag'], reverse=True)
for release in releases_all:
if release['state'] not in ['current', 'pending']\
or 'Modular' in release['long_name']:
continue
tag = release['dist_tag']
name = release['long_name']
r = {'release': name, 'stable_version': 'None',
'testing_version': 'None'}
if tag == 'rawhide':
koji.listTagged(
tag, package=package, latest=True, inherit=True)
queries.append(tag)
release_tag[tag] = r
else:
stable_tag = release['stable_tag']
testing_tag = release['testing_tag']
koji.listTagged(stable_tag, package=package,
latest=True, inherit=True)
queries.append(stable_tag)
release_tag[stable_tag] = r
koji.listTagged(testing_tag, package=package, latest=True)
queries.append(testing_tag)
release_tag[testing_tag] = r
releases.append(r)
results = koji.multiCall()
for i, result in enumerate(results):
if isinstance(result, dict):
if 'faultString' in result:
log.error("FAULT: %s" % result['faultString'])
else:
log.error("Can't find fault string in result: %s" % result)
else:
query = queries[i]
row = release_tag[query]
release = result[0]
if query == 'dist-rawhide':
if release:
nvr = parse_build(release[0]['nvr'])
row['stable_version'] = HTML.tag(
'a',
c='%(version)s-%(release)s' % nvr,
href=koji_build_url % nvr)
else:
row['stable_version'] = \
'No builds tagged with %s' % tag
row['testing_version'] = HTML.tag('i', c='Not Applicable')
continue
if release:
release = release[0]
if query.endswith('-testing'):
nvr = parse_build(release['nvr'])
row['testing_version'] = HTML.tag(
'a',
c='%(version)s-%(release)s' % nvr,
href=koji_build_url % nvr)
testing_builds.append(release['nvr'])
testing_builds_row[release['nvr']] = row
else:
# stable
nvr = parse_build(release['nvr'])
row['stable_version'] = HTML.tag(
'a',
c='%(version)s-%(release)s' % nvr,
href=koji_build_url % nvr)
if release['tag_name'].endswith('-updates'):
row['stable_version'] += ' (' + HTML.tag(
'a', c='update',
href='%s/updates/?builds=%s' % (
self._prod_url, nvr['nvr']
)
) + ')'
# If there are updates in testing, then query bodhi with a single call
if testing_builds:
data = self.call('updates', {
'builds': ' '.join(testing_builds)
})
updates = data['updates']
for up in updates:
for build in up['builds']:
if build['nvr'] in testing_builds:
break
else:
continue
build = build['nvr']
if up.karma > 1:
up.karma_icon = 'good'
elif up.karma < 0:
up.karma_icon = 'bad'
else:
up.karma_icon = 'meh'
karma_ico_16 = '/images/16_karma-%s.png' % up.karma_icon
karma_icon_url = \
self._request.environ.get('SCRIPT_NAME', '') + \
karma_ico_16
karma = 'karma_%s' % up.karma_icon
row = testing_builds_row[build]
row['testing_version'] += " " + HTML.tag(
'div',
c=HTML.tag(
'a', href="%s/updates/%s" % (
self._prod_url, up.alias),
c=HTML.tag(
'img', src=karma_icon_url) + HTML.tag(
'span',
c='%s karma' % up.karma)),
**{'class': '%s' % karma})
return (len(releases), releases)
|
fedora-infra/fedora-packages
|
fedoracommunity/connectors/bodhiconnector.py
|
Python
|
agpl-3.0
| 27,347
|
from collatex import *
from xml.dom import pulldom
import re
import glob
from datetime import datetime, date
# import pytz
# from tzlocal import get_localzone
# today = date.today()
# utc_dt = datetime(today, tzinfo=pytz.utc)
# dateTime = utc_dt.astimezone(get_localzone())
# strDateTime = str(dateTime)
now = datetime.utcnow()
nowStr = str(now)
regexWhitespace = re.compile(r'\s+')
regexNonWhitespace = re.compile(r'\S+')
regexEmptyTag = re.compile(r'/>$')
regexBlankLine = re.compile(r'\n{2,}')
regexLeadingBlankLine = re.compile(r'^\n')
regexPageBreak = re.compile(r'<pb.+?/>')
# Element types: xml, div, head, p, hi, pb, note, lg, l; comment()
# Tags to ignore, with content to keep: xml, comment, anchor
# Structural elements: div, p, lg, l
# Inline elements (empty) retained in normalization: pb, milestone, xi:include
# Inline and block elements (with content) retained in normalization: note, hi, head, ab
# GIs fall into one three classes
# 2017-05-21 ebb: Due to trouble with pulldom parsing XML comments, I have converted these to comment elements,
# 2017-05-21 ebb: to be ignored during collation.
# 2017-05-30 ebb: Determined that comment elements cannot really be ignored when they have text nodes (the text is
# 2017-05-30 ebb: collated but the tags are not). Decision to make the comments into self-closing elements with text
# 2017-05-30 ebb: contents as attribute values, and content such as tags simplified to be legal attribute values.
# 2017-05-22 ebb: I've set anchor elements with @xml:ids to be the indicators of collation "chunks" to process together
ignore = ['xml', 'pb', 'comment']
inlineEmpty = ['milestone', 'anchor', 'include']
inlineContent = ['hi']
blockElement = ['p', 'div', 'lg', 'l', 'head', 'note', 'ab', 'cit', 'quote', 'bibl', 'header']
# ebb: Tried removing 'comment', from blockElement list above, because we don't want these to be collated.
def normalizeSpace(inText):
"""Replaces all whitespace spans with single space characters"""
if regexNonWhitespace.search(inText):
return regexWhitespace.sub('\n', inText)
else:
return ''
def extract(input_xml):
"""Process entire input XML document, firing on events"""
# Start pulling; it continues automatically
doc = pulldom.parse(input_xml)
output = ''
for event, node in doc:
# elements to ignore: xml
if event == pulldom.START_ELEMENT and node.localName in ignore:
continue
# copy comments intact
elif event == pulldom.COMMENT:
doc.expandNode(node)
output += node.toxml()
# empty inline elements: pb, milestone
elif event == pulldom.START_ELEMENT and node.localName in inlineEmpty:
output += node.toxml()
# non-empty inline elements: note, hi, head, l, lg, div, p, ab,
elif event == pulldom.START_ELEMENT and node.localName in inlineContent:
output += regexEmptyTag.sub('>', node.toxml())
elif event == pulldom.END_ELEMENT and node.localName in inlineContent:
output += '</' + node.localName + '>'
elif event == pulldom.START_ELEMENT and node.localName in blockElement:
output += '\n<' + node.localName + '>\n'
elif event == pulldom.END_ELEMENT and node.localName in blockElement:
output += '\n</' + node.localName + '>'
elif event == pulldom.CHARACTERS:
output += normalizeSpace(node.data)
else:
continue
return output
# def normalize(inputText):
# return regexPageBreak('',inputText)
def processToken(inputText):
return {"t": inputText + ' ', "n": inputText}
def processWitness(inputWitness, id):
return {'id': id, 'tokens': [processToken(token) for token in inputWitness]}
for name in glob.glob('collationChunks/1818_fullFlat_*'):
matchString = name.split("fullFlat_", 1)[1]
# ebb: above gets C30.xml for example
matchStr = matchString.split(".", 1)[0]
# ebb: above strips off the file extension
with open(name, 'rb') as f1818file, \
open('collationChunks/Thomas_fullFlat_' + matchString, 'rb') as fThomasfile, \
open('collationChunks/1831_fullFlat_' + matchString, 'rb') as f1831file, \
open('textTableOutputTEST/collation_' + matchStr + '.txt', 'w') as outputFile:
f1818_tokens = regexLeadingBlankLine.sub('', regexBlankLine.sub('\n', extract(f1818file))).split('\n')
f1823_tokens = regexLeadingBlankLine.sub('', regexBlankLine.sub('\n', extract(f1823file))).split('\n')
f1831_tokens = regexLeadingBlankLine.sub('', regexBlankLine.sub('\n', extract(f1831file))).split('\n')
f1818_tokenlist = processWitness(f1818_tokens, 'f1818')
f1823_tokenlist = processWitness(f1823_tokens, 'f1823')
f1831_tokenlist = processWitness(f1831_tokens, 'f1831')
collation_input = {"witnesses": [f1818_tokenlist, f1823_tokenlist, f1831_tokenlist]}
# table = collate(collation_input, output='tei', segmentation=True)
table = collate(collation_input, segmentation=True, layout='vertical')
# print(nowStr + '\n' + table, file=outputFile)
# This yields a TypeError: "Can't convert 'AlignmentTable' object to str implicitly
print(table, file=outputFile)
|
ebeshero/Pittsburgh_Frankenstein
|
collateXPrep/old/collation_to_textTable.py
|
Python
|
agpl-3.0
| 5,280
|
# Copyright 2014-2016 Therp BV <http://therp.nl>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
# pylint: disable=consider-merging-classes-inherited
import logging
from odoo import _, api, fields, models
from odoo.exceptions import AccessDenied
class CleanupPurgeLine(models.AbstractModel):
""" Abstract base class for the purge wizard lines """
_name = 'cleanup.purge.line'
_order = 'name'
_description = 'Purge Column Abstract Wizard'
name = fields.Char('Name', readonly=True)
purged = fields.Boolean('Purged', readonly=True)
wizard_id = fields.Many2one('cleanup.purge.wizard')
logger = logging.getLogger('odoo.addons.database_cleanup')
@api.multi
def purge(self):
raise NotImplementedError
@api.model
def create(self, values):
# make sure the user trying this is actually supposed to do it
if self.env.ref(
'base.group_erp_manager') not in self.env.user.groups_id:
raise AccessDenied
return super(CleanupPurgeLine, self).create(values)
class PurgeWizard(models.AbstractModel):
""" Abstract base class for the purge wizards """
_name = 'cleanup.purge.wizard'
_description = 'Purge stuff'
@api.model
def default_get(self, fields_list):
res = super(PurgeWizard, self).default_get(fields_list)
if 'purge_line_ids' in fields_list:
res['purge_line_ids'] = self.find()
return res
@api.multi
def find(self):
raise NotImplementedError
@api.multi
def purge_all(self):
self.mapped('purge_line_ids').purge()
return True
@api.model
def get_wizard_action(self):
wizard = self.create({})
return {
'type': 'ir.actions.act_window',
'name': wizard.display_name,
'views': [(False, 'form')],
'res_model': self._name,
'res_id': wizard.id,
'flags': {
'action_buttons': False,
'sidebar': False,
},
}
@api.multi
def select_lines(self):
return {
'type': 'ir.actions.act_window',
'name': _('Select lines to purge'),
'views': [(False, 'tree'), (False, 'form')],
'res_model': self._fields['purge_line_ids'].comodel_name,
'domain': [('wizard_id', 'in', self.ids)],
}
@api.multi
def name_get(self):
return [
(this.id, self._description)
for this in self
]
@api.model
def create(self, values):
# make sure the user trying this is actually supposed to do it
if self.env.ref(
'base.group_erp_manager') not in self.env.user.groups_id:
raise AccessDenied
return super(PurgeWizard, self).create(values)
purge_line_ids = fields.One2many('cleanup.purge.line', 'wizard_id')
|
Vauxoo/server-tools
|
database_cleanup/models/purge_wizard.py
|
Python
|
agpl-3.0
| 2,925
|
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2
# as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Authors:
# Caner Candan <caner@candan.fr>, http://caner.candan.fr
# Geraldine Starke <geraldine@starke.fr>, http://www.vegeclic.fr
#
from django import forms
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from . import models
class ModelFormWithImage(forms.ModelForm):
main_image = 'main_image'
def __init__(self, *args, **kwargs):
super(ModelFormWithImage, self).__init__(*args, **kwargs)
if 'instance' in kwargs:
self.fields[self.main_image].queryset = models.Image.objects.filter(object_id=kwargs['instance'].id)
class ModelFormWithCurrency(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(ModelFormWithCurrency, self).__init__(*args, **kwargs)
if 'instance' not in kwargs:
self.initial['currency'] = models.Parameter.objects.get(name='default currency').object_id
class ParameterForm(forms.ModelForm):
class Meta:
model = models.Parameter
fields = ('site', 'name', 'content_type', 'object_id',)
object_id = forms.ChoiceField()
def __init__(self, *args, **kwargs):
super(ParameterForm, self).__init__(*args, **kwargs)
content_type = self.initial.get('content_type')
object_id = self.initial.get('object_id')
object_id_field = self.fields.get('object_id')
object_id_field.choices = [(o.id, o) for o in ContentType.objects.get(pk=content_type).get_all_objects_for_this_type()]
object_id_field.initial = object_id
class ParameterCreationForm(forms.ModelForm):
class Meta:
model = models.Parameter
fields = ('site', 'name', 'content_type',)
|
vegeclic/django-regularcom
|
common/forms.py
|
Python
|
agpl-3.0
| 2,325
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import pytz
from openerp import SUPERUSER_ID
from datetime import datetime
from dateutil.relativedelta import relativedelta
from openerp.osv import fields, osv
from openerp import netsvc
from openerp import pooler
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
from openerp.osv.orm import browse_record, browse_null
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP
class purchase_order(osv.osv):
def _amount_all(self, cr, uid, ids, field_name, arg, context=None):
res = {}
cur_obj=self.pool.get('res.currency')
for order in self.browse(cr, uid, ids, context=context):
res[order.id] = {
'amount_untaxed': 0.0,
'amount_tax': 0.0,
'amount_total': 0.0,
}
val = val1 = 0.0
cur = order.pricelist_id.currency_id
for line in order.order_line:
val1 += line.price_subtotal
for c in self.pool.get('account.tax').compute_all(cr, uid, line.taxes_id, line.price_unit, line.product_qty, line.product_id, order.partner_id)['taxes']:
val += c.get('amount', 0.0)
res[order.id]['amount_tax']=cur_obj.round(cr, uid, cur, val)
res[order.id]['amount_untaxed']=cur_obj.round(cr, uid, cur, val1)
res[order.id]['amount_total']=res[order.id]['amount_untaxed'] + res[order.id]['amount_tax']
return res
def _set_minimum_planned_date(self, cr, uid, ids, name, value, arg, context=None):
if not value: return False
if type(ids)!=type([]):
ids=[ids]
for po in self.browse(cr, uid, ids, context=context):
if po.order_line:
cr.execute("""update purchase_order_line set
date_planned=%s
where
order_id=%s and
(date_planned=%s or date_planned<%s)""", (value,po.id,po.minimum_planned_date,value))
cr.execute("""update purchase_order set
minimum_planned_date=%s where id=%s""", (value, po.id))
return True
def _minimum_planned_date(self, cr, uid, ids, field_name, arg, context=None):
res={}
purchase_obj=self.browse(cr, uid, ids, context=context)
for purchase in purchase_obj:
res[purchase.id] = False
if purchase.order_line:
min_date=purchase.order_line[0].date_planned
for line in purchase.order_line:
if line.date_planned < min_date:
min_date=line.date_planned
res[purchase.id]=min_date
return res
def _invoiced_rate(self, cursor, user, ids, name, arg, context=None):
res = {}
for purchase in self.browse(cursor, user, ids, context=context):
tot = 0.0
for invoice in purchase.invoice_ids:
if invoice.state not in ('draft','cancel'):
tot += invoice.amount_untaxed
if purchase.amount_untaxed:
res[purchase.id] = tot * 100.0 / purchase.amount_untaxed
else:
res[purchase.id] = 0.0
return res
def _shipped_rate(self, cr, uid, ids, name, arg, context=None):
if not ids: return {}
res = {}
for id in ids:
res[id] = [0.0,0.0]
cr.execute('''SELECT
p.purchase_id,sum(m.product_qty), m.state
FROM
stock_move m
LEFT JOIN
stock_picking p on (p.id=m.picking_id)
WHERE
p.purchase_id IN %s GROUP BY m.state, p.purchase_id''',(tuple(ids),))
for oid,nbr,state in cr.fetchall():
if state=='cancel':
continue
if state=='done':
res[oid][0] += nbr or 0.0
res[oid][1] += nbr or 0.0
else:
res[oid][1] += nbr or 0.0
for r in res:
if not res[r][1]:
res[r] = 0.0
else:
res[r] = 100.0 * res[r][0] / res[r][1]
return res
def _get_order(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('purchase.order.line').browse(cr, uid, ids, context=context):
result[line.order_id.id] = True
return result.keys()
def _invoiced(self, cursor, user, ids, name, arg, context=None):
res = {}
for purchase in self.browse(cursor, user, ids, context=context):
invoiced = False
if purchase.invoiced_rate == 100.00:
invoiced = True
res[purchase.id] = invoiced
return res
def _get_journal(self, cr, uid, context=None):
if context is None:
context = {}
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
company_id = context.get('company_id', user.company_id.id)
journal_obj = self.pool.get('account.journal')
res = journal_obj.search(cr, uid, [('type', '=', 'purchase'),
('company_id', '=', company_id)],
limit=1)
return res and res[0] or False
STATE_SELECTION = [
('draft', 'Draft PO'),
('sent', 'RFQ Sent'),
('confirmed', 'Waiting Approval'),
('approved', 'Purchase Order'),
('except_picking', 'Shipping Exception'),
('except_invoice', 'Invoice Exception'),
('done', 'Done'),
('cancel', 'Cancelled')
]
_track = {
'state': {
'purchase.mt_rfq_confirmed': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'confirmed',
'purchase.mt_rfq_approved': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'approved',
},
}
_columns = {
'name': fields.char('Order Reference', size=64, required=True, select=True, help="Unique number of the purchase order, computed automatically when the purchase order is created."),
'origin': fields.char('Source Document', size=64,
help="Reference of the document that generated this purchase order request; a sales order or an internal procurement request."
),
'partner_ref': fields.char('Supplier Reference', states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]}, size=64,
help="Reference of the sales order or quotation sent by your supplier. It's mainly used to do the matching when you receive the products as this reference is usually written on the delivery order sent by your supplier."),
'date_order':fields.date('Order Date', required=True, states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)]}, select=True, help="Date on which this document has been created."),
'date_approve':fields.date('Date Approved', readonly=1, select=True, help="Date on which purchase order has been approved"),
'partner_id':fields.many2one('res.partner', 'Supplier', required=True, states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]},
change_default=True, track_visibility='always'),
'dest_address_id':fields.many2one('res.partner', 'Customer Address (Direct Delivery)',
states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]},
help="Put an address if you want to deliver directly from the supplier to the customer. " \
"Otherwise, keep empty to deliver to your own company."
),
'warehouse_id': fields.many2one('stock.warehouse', 'Destination Warehouse'),
'location_id': fields.many2one('stock.location', 'Destination', required=True, domain=[('usage','<>','view')], states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]} ),
'pricelist_id':fields.many2one('product.pricelist', 'Pricelist', required=True, states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)],'done':[('readonly',True)]}, help="The pricelist sets the currency used for this purchase order. It also computes the supplier price for the selected products/quantities."),
'currency_id': fields.related('pricelist_id', 'currency_id', type="many2one", relation="res.currency", string="Currency",readonly=True, required=True),
'state': fields.selection(STATE_SELECTION, 'Status', readonly=True, help="The status of the purchase order or the quotation request. A quotation is a purchase order in a 'Draft' status. Then the order has to be confirmed by the user, the status switch to 'Confirmed'. Then the supplier must confirm the order to change the status to 'Approved'. When the purchase order is paid and received, the status becomes 'Done'. If a cancel action occurs in the invoice or in the reception of goods, the status becomes in exception.", select=True),
'order_line': fields.one2many('purchase.order.line', 'order_id', 'Order Lines', states={'approved':[('readonly',True)],'done':[('readonly',True)]}),
'validator' : fields.many2one('res.users', 'Validated by', readonly=True),
'notes': fields.text('Terms and Conditions'),
'invoice_ids': fields.many2many('account.invoice', 'purchase_invoice_rel', 'purchase_id', 'invoice_id', 'Invoices', help="Invoices generated for a purchase order"),
'picking_ids': fields.one2many('stock.picking.in', 'purchase_id', 'Picking List', readonly=True, help="This is the list of incoming shipments that have been generated for this purchase order."),
'shipped':fields.boolean('Received', readonly=True, select=True, help="It indicates that a picking has been done"),
'shipped_rate': fields.function(_shipped_rate, string='Received Ratio', type='float'),
'invoiced': fields.function(_invoiced, string='Invoice Received', type='boolean', help="It indicates that an invoice has been paid"),
'invoiced_rate': fields.function(_invoiced_rate, string='Invoiced', type='float'),
'invoice_method': fields.selection([('manual','Based on Purchase Order lines'),('order','Based on generated draft invoice'),('picking','Based on incoming shipments')], 'Invoicing Control', required=True,
readonly=True, states={'draft':[('readonly',False)], 'sent':[('readonly',False)]},
help="Based on Purchase Order lines: place individual lines in 'Invoice Control > Based on P.O. lines' from where you can selectively create an invoice.\n" \
"Based on generated invoice: create a draft invoice you can validate later.\n" \
"Bases on incoming shipments: let you create an invoice when receptions are validated."
),
'minimum_planned_date':fields.function(_minimum_planned_date, fnct_inv=_set_minimum_planned_date, string='Expected Date', type='date', select=True, help="This is computed as the minimum scheduled date of all purchase order lines' products.",
store = {
'purchase.order.line': (_get_order, ['date_planned'], 10),
}
),
'amount_untaxed': fields.function(_amount_all, digits_compute= dp.get_precision('Account'), string='Untaxed Amount',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums", help="The amount without tax", track_visibility='always'),
'amount_tax': fields.function(_amount_all, digits_compute= dp.get_precision('Account'), string='Taxes',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums", help="The tax amount"),
'amount_total': fields.function(_amount_all, digits_compute= dp.get_precision('Account'), string='Total',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums",help="The total amount"),
'fiscal_position': fields.many2one('account.fiscal.position', 'Fiscal Position'),
'payment_term_id': fields.many2one('account.payment.term', 'Payment Term'),
'product_id': fields.related('order_line','product_id', type='many2one', relation='product.product', string='Product'),
'create_uid': fields.many2one('res.users', 'Responsible'),
'company_id': fields.many2one('res.company','Company',required=True,select=1, states={'confirmed':[('readonly',True)], 'approved':[('readonly',True)]}),
'journal_id': fields.many2one('account.journal', 'Journal'),
}
_defaults = {
'date_order': fields.date.context_today,
'state': 'draft',
'name': lambda obj, cr, uid, context: '/',
'shipped': 0,
'invoice_method': 'order',
'invoiced': 0,
'pricelist_id': lambda self, cr, uid, context: context.get('partner_id', False) and self.pool.get('res.partner').browse(cr, uid, context['partner_id']).property_product_pricelist_purchase.id,
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'purchase.order', context=c),
'journal_id': _get_journal,
}
_sql_constraints = [
('name_uniq', 'unique(name, company_id)', 'Order Reference must be unique per Company!'),
]
_name = "purchase.order"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_description = "Purchase Order"
_order = "name desc"
def create(self, cr, uid, vals, context=None):
if vals.get('name','/')=='/':
vals['name'] = self.pool.get('ir.sequence').get(cr, uid, 'purchase.order') or '/'
order = super(purchase_order, self).create(cr, uid, vals, context=context)
return order
def unlink(self, cr, uid, ids, context=None):
purchase_orders = self.read(cr, uid, ids, ['state'], context=context)
unlink_ids = []
for s in purchase_orders:
if s['state'] in ['draft','cancel']:
unlink_ids.append(s['id'])
else:
raise osv.except_osv(_('Invalid Action!'), _('In order to delete a purchase order, you must cancel it first.'))
# automatically sending subflow.delete upon deletion
wf_service = netsvc.LocalService("workflow")
for id in unlink_ids:
wf_service.trg_validate(uid, 'purchase.order', id, 'purchase_cancel', cr)
return super(purchase_order, self).unlink(cr, uid, unlink_ids, context=context)
def button_dummy(self, cr, uid, ids, context=None):
return True
def onchange_pricelist(self, cr, uid, ids, pricelist_id, context=None):
if not pricelist_id:
return {}
return {'value': {'currency_id': self.pool.get('product.pricelist').browse(cr, uid, pricelist_id, context=context).currency_id.id}}
def onchange_dest_address_id(self, cr, uid, ids, address_id):
if not address_id:
return {}
address = self.pool.get('res.partner')
values = {'warehouse_id': False}
supplier = address.browse(cr, uid, address_id)
if supplier:
location_id = supplier.property_stock_customer.id
values.update({'location_id': location_id})
return {'value':values}
def onchange_warehouse_id(self, cr, uid, ids, warehouse_id):
if not warehouse_id:
return {}
warehouse = self.pool.get('stock.warehouse').browse(cr, uid, warehouse_id)
return {'value':{'location_id': warehouse.lot_input_id.id, 'dest_address_id': False}}
def onchange_partner_id(self, cr, uid, ids, partner_id):
partner = self.pool.get('res.partner')
if not partner_id:
return {'value': {
'fiscal_position': False,
'payment_term_id': False,
}}
supplier_address = partner.address_get(cr, uid, [partner_id], ['default'])
supplier = partner.browse(cr, uid, partner_id)
return {'value': {
'pricelist_id': supplier.property_product_pricelist_purchase.id,
'fiscal_position': supplier.property_account_position and supplier.property_account_position.id or False,
'payment_term_id': supplier.property_supplier_payment_term.id or False,
}}
def invoice_open(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_invoice_tree2')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
inv_ids = []
for po in self.browse(cr, uid, ids, context=context):
inv_ids+= [invoice.id for invoice in po.invoice_ids]
if not inv_ids:
raise osv.except_osv(_('Error!'), _('Please create Invoices.'))
#choose the view_mode accordingly
if len(inv_ids)>1:
result['domain'] = "[('id','in',["+','.join(map(str, inv_ids))+"])]"
else:
res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_supplier_form')
result['views'] = [(res and res[1] or False, 'form')]
result['res_id'] = inv_ids and inv_ids[0] or False
return result
def view_invoice(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing invoices of given sales order ids. It can either be a in a list or in a form view, if there is only one invoice to show.
'''
mod_obj = self.pool.get('ir.model.data')
wizard_obj = self.pool.get('purchase.order.line_invoice')
#compute the number of invoices to display
inv_ids = []
for po in self.browse(cr, uid, ids, context=context):
if po.invoice_method == 'manual':
if not po.invoice_ids:
context.update({'active_ids' : [line.id for line in po.order_line]})
wizard_obj.makeInvoices(cr, uid, [], context=context)
for po in self.browse(cr, uid, ids, context=context):
inv_ids+= [invoice.id for invoice in po.invoice_ids]
res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_supplier_form')
res_id = res and res[1] or False
return {
'name': _('Supplier Invoices'),
'view_type': 'form',
'view_mode': 'form',
'view_id': [res_id],
'res_model': 'account.invoice',
'context': "{'type':'in_invoice', 'journal_type': 'purchase'}",
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': 'current',
'res_id': inv_ids and inv_ids[0] or False,
}
def view_picking(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing pîcking orders of given purchase order ids.
'''
mod_obj = self.pool.get('ir.model.data')
pick_ids = []
for po in self.browse(cr, uid, ids, context=context):
pick_ids += [picking.id for picking in po.picking_ids]
action_model, action_id = tuple(mod_obj.get_object_reference(cr, uid, 'stock', 'action_picking_tree4'))
action = self.pool.get(action_model).read(cr, uid, action_id, context=context)
ctx = eval(action['context'])
ctx.update({
'search_default_purchase_id': ids[0]
})
if pick_ids and len(pick_ids) == 1:
form_view_ids = [view_id for view_id, view in action['views'] if view == 'form']
view_id = form_view_ids and form_view_ids[0] or False
action.update({
'views': [],
'view_mode': 'form',
'view_id': view_id,
'res_id': pick_ids[0]
})
action.update({
'context': ctx,
})
return action
def wkf_approve_order(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'approved', 'date_approve': fields.date.context_today(self,cr,uid,context=context)})
return True
def print_confirm(self,cr,uid,ids,context=None):
print "Confirmed"
def print_double(self,cr,uid,ids,context=None):
print "double Approval"
def print_router(self,cr,uid,ids,context=None):
print "Routed"
def wkf_send_rfq(self, cr, uid, ids, context=None):
'''
This function opens a window to compose an email, with the edi purchase template message loaded by default
'''
ir_model_data = self.pool.get('ir.model.data')
try:
template_id = ir_model_data.get_object_reference(cr, uid, 'purchase', 'email_template_edi_purchase')[1]
except ValueError:
template_id = False
try:
compose_form_id = ir_model_data.get_object_reference(cr, uid, 'mail', 'email_compose_message_wizard_form')[1]
except ValueError:
compose_form_id = False
ctx = dict(context)
ctx.update({
'default_model': 'purchase.order',
'default_res_id': ids[0],
'default_use_template': bool(template_id),
'default_template_id': template_id,
'default_composition_mode': 'comment',
})
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form_id, 'form')],
'view_id': compose_form_id,
'target': 'new',
'context': ctx,
}
def print_quotation(self, cr, uid, ids, context=None):
'''
This function prints the request for quotation and mark it as sent, so that we can see more easily the next step of the workflow
'''
assert len(ids) == 1, 'This option should only be used for a single id at a time'
wf_service = netsvc.LocalService("workflow")
wf_service.trg_validate(uid, 'purchase.order', ids[0], 'send_rfq', cr)
datas = {
'model': 'purchase.order',
'ids': ids,
'form': self.read(cr, uid, ids[0], context=context),
}
return {'type': 'ir.actions.report.xml', 'report_name': 'purchase.quotation', 'datas': datas, 'nodestroy': True}
#TODO: implement messages system
def wkf_confirm_order(self, cr, uid, ids, context=None):
todo = []
for po in self.browse(cr, uid, ids, context=context):
if not po.order_line:
raise osv.except_osv(_('Error!'),_('You cannot confirm a purchase order without any purchase order line.'))
for line in po.order_line:
if line.state=='draft':
todo.append(line.id)
self.pool.get('purchase.order.line').action_confirm(cr, uid, todo, context)
for id in ids:
self.write(cr, uid, [id], {'state' : 'confirmed', 'validator' : uid})
return True
def _prepare_inv_line(self, cr, uid, account_id, order_line, context=None):
"""Collects require data from purchase order line that is used to create invoice line
for that purchase order line
:param account_id: Expense account of the product of PO line if any.
:param browse_record order_line: Purchase order line browse record
:return: Value for fields of invoice lines.
:rtype: dict
"""
return {
'name': order_line.name,
'account_id': account_id,
'price_unit': order_line.price_unit or 0.0,
'quantity': order_line.product_qty,
'product_id': order_line.product_id.id or False,
'uos_id': order_line.product_uom.id or False,
'invoice_line_tax_id': [(6, 0, [x.id for x in order_line.taxes_id])],
'account_analytic_id': order_line.account_analytic_id.id or False,
}
def action_cancel_draft(self, cr, uid, ids, context=None):
if not len(ids):
return False
self.write(cr, uid, ids, {'state':'draft','shipped':0})
wf_service = netsvc.LocalService("workflow")
for p_id in ids:
# Deleting the existing instance of workflow for PO
wf_service.trg_delete(uid, 'purchase.order', p_id, cr)
wf_service.trg_create(uid, 'purchase.order', p_id, cr)
return True
def action_invoice_create(self, cr, uid, ids, context=None):
"""Generates invoice for given ids of purchase orders and links that invoice ID to purchase order.
:param ids: list of ids of purchase orders.
:return: ID of created invoice.
:rtype: int
"""
res = False
journal_obj = self.pool.get('account.journal')
inv_obj = self.pool.get('account.invoice')
inv_line_obj = self.pool.get('account.invoice.line')
fiscal_obj = self.pool.get('account.fiscal.position')
property_obj = self.pool.get('ir.property')
for order in self.browse(cr, uid, ids, context=context):
pay_acc_id = order.partner_id.property_account_payable.id
journal_ids = journal_obj.search(cr, uid, [('type', '=','purchase'),('company_id', '=', order.company_id.id)], limit=1)
if not journal_ids:
raise osv.except_osv(_('Error!'),
_('Define purchase journal for this company: "%s" (id:%d).') % (order.company_id.name, order.company_id.id))
# generate invoice line correspond to PO line and link that to created invoice (inv_id) and PO line
inv_lines = []
for po_line in order.order_line:
if po_line.product_id:
acc_id = po_line.product_id.property_account_expense.id
if not acc_id:
acc_id = po_line.product_id.categ_id.property_account_expense_categ.id
if not acc_id:
raise osv.except_osv(_('Error!'), _('Define expense account for this company: "%s" (id:%d).') % (po_line.product_id.name, po_line.product_id.id,))
else:
acc_id = property_obj.get(cr, uid, 'property_account_expense_categ', 'product.category').id
fpos = order.fiscal_position or False
acc_id = fiscal_obj.map_account(cr, uid, fpos, acc_id)
inv_line_data = self._prepare_inv_line(cr, uid, acc_id, po_line, context=context)
inv_line_id = inv_line_obj.create(cr, uid, inv_line_data, context=context)
inv_lines.append(inv_line_id)
po_line.write({'invoiced':True, 'invoice_lines': [(4, inv_line_id)]}, context=context)
# get invoice data and create invoice
inv_data = {
'name': order.partner_ref or order.name,
'reference': order.partner_ref or order.name,
'account_id': pay_acc_id,
'type': 'in_invoice',
'partner_id': order.partner_id.id,
'currency_id': order.pricelist_id.currency_id.id,
'journal_id': len(journal_ids) and journal_ids[0] or False,
'invoice_line': [(6, 0, inv_lines)],
'origin': order.name,
'fiscal_position': order.fiscal_position.id or False,
'payment_term': order.payment_term_id.id or False,
'company_id': order.company_id.id,
}
inv_id = inv_obj.create(cr, uid, inv_data, context=context)
# compute the invoice
inv_obj.button_compute(cr, uid, [inv_id], context=context, set_total=True)
# Link this new invoice to related purchase order
order.write({'invoice_ids': [(4, inv_id)]}, context=context)
res = inv_id
return res
def invoice_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state':'approved'}, context=context)
return True
def has_stockable_product(self, cr, uid, ids, *args):
for order in self.browse(cr, uid, ids):
for order_line in order.order_line:
if order_line.product_id and order_line.product_id.type in ('product', 'consu'):
return True
return False
def action_cancel(self, cr, uid, ids, context=None):
wf_service = netsvc.LocalService("workflow")
for purchase in self.browse(cr, uid, ids, context=context):
for pick in purchase.picking_ids:
if pick.state not in ('draft','cancel'):
raise osv.except_osv(
_('Unable to cancel this purchase order.'),
_('First cancel all receptions related to this purchase order.'))
for pick in purchase.picking_ids:
wf_service.trg_validate(uid, 'stock.picking', pick.id, 'button_cancel', cr)
for inv in purchase.invoice_ids:
if inv and inv.state not in ('cancel','draft'):
raise osv.except_osv(
_('Unable to cancel this purchase order.'),
_('You must first cancel all receptions related to this purchase order.'))
if inv:
wf_service.trg_validate(uid, 'account.invoice', inv.id, 'invoice_cancel', cr)
self.write(cr,uid,ids,{'state':'cancel'})
for (id, name) in self.name_get(cr, uid, ids):
wf_service.trg_validate(uid, 'purchase.order', id, 'purchase_cancel', cr)
return True
def date_to_datetime(self, cr, uid, userdate, context=None):
""" Convert date values expressed in user's timezone to
server-side UTC timestamp, assuming a default arbitrary
time of 12:00 AM - because a time is needed.
:param str userdate: date string in in user time zone
:return: UTC datetime string for server-side use
"""
# TODO: move to fields.datetime in server after 7.0
user_date = datetime.strptime(userdate, DEFAULT_SERVER_DATE_FORMAT)
if context and context.get('tz'):
tz_name = context['tz']
else:
tz_name = self.pool.get('res.users').read(cr, SUPERUSER_ID, uid, ['tz'])['tz']
if tz_name:
utc = pytz.timezone('UTC')
context_tz = pytz.timezone(tz_name)
user_datetime = user_date + relativedelta(hours=12.0)
local_timestamp = context_tz.localize(user_datetime, is_dst=False)
user_datetime = local_timestamp.astimezone(utc)
return user_datetime.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
return user_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
def _prepare_order_picking(self, cr, uid, order, context=None):
return {
'name': self.pool.get('ir.sequence').get(cr, uid, 'stock.picking.in'),
'origin': order.name + ((order.origin and (':' + order.origin)) or ''),
'date': self.date_to_datetime(cr, uid, order.date_order, context),
'partner_id': order.dest_address_id.id or order.partner_id.id,
'invoice_state': '2binvoiced' if order.invoice_method == 'picking' else 'none',
'type': 'in',
'partner_id': order.dest_address_id.id or order.partner_id.id,
'purchase_id': order.id,
'company_id': order.company_id.id,
'move_lines' : [],
}
def _prepare_order_line_move(self, cr, uid, order, order_line, picking_id, context=None):
return {
'name': order_line.name or '',
'product_id': order_line.product_id.id,
'product_qty': order_line.product_qty,
'product_uos_qty': order_line.product_qty,
'product_uom': order_line.product_uom.id,
'product_uos': order_line.product_uom.id,
'date': self.date_to_datetime(cr, uid, order.date_order, context),
'date_expected': self.date_to_datetime(cr, uid, order.date_order, context),
'location_id': order.partner_id.property_stock_supplier.id,
'location_dest_id': order.location_id.id,
'picking_id': picking_id,
'partner_id': order.dest_address_id.id or order.partner_id.id,
'move_dest_id': order_line.move_dest_id.id,
'state': 'draft',
'type':'in',
'purchase_line_id': order_line.id,
'company_id': order.company_id.id,
'price_unit': order_line.price_unit
}
def _create_pickings(self, cr, uid, order, order_lines, picking_id=False, context=None):
"""Creates pickings and appropriate stock moves for given order lines, then
confirms the moves, makes them available, and confirms the picking.
If ``picking_id`` is provided, the stock moves will be added to it, otherwise
a standard outgoing picking will be created to wrap the stock moves, as returned
by :meth:`~._prepare_order_picking`.
Modules that wish to customize the procurements or partition the stock moves over
multiple stock pickings may override this method and call ``super()`` with
different subsets of ``order_lines`` and/or preset ``picking_id`` values.
:param browse_record order: purchase order to which the order lines belong
:param list(browse_record) order_lines: purchase order line records for which picking
and moves should be created.
:param int picking_id: optional ID of a stock picking to which the created stock moves
will be added. A new picking will be created if omitted.
:return: list of IDs of pickings used/created for the given order lines (usually just one)
"""
if not picking_id:
picking_id = self.pool.get('stock.picking').create(cr, uid, self._prepare_order_picking(cr, uid, order, context=context))
todo_moves = []
stock_move = self.pool.get('stock.move')
wf_service = netsvc.LocalService("workflow")
for order_line in order_lines:
if not order_line.product_id:
continue
if order_line.product_id.type in ('product', 'consu'):
move = stock_move.create(cr, uid, self._prepare_order_line_move(cr, uid, order, order_line, picking_id, context=context))
if order_line.move_dest_id:
order_line.move_dest_id.write({'location_id': order.location_id.id})
todo_moves.append(move)
stock_move.action_confirm(cr, uid, todo_moves)
stock_move.force_assign(cr, uid, todo_moves)
wf_service.trg_validate(uid, 'stock.picking', picking_id, 'button_confirm', cr)
return [picking_id]
def action_picking_create(self, cr, uid, ids, context=None):
picking_ids = []
for order in self.browse(cr, uid, ids):
picking_ids.extend(self._create_pickings(cr, uid, order, order.order_line, None, context=context))
# Must return one unique picking ID: the one to connect in the subflow of the purchase order.
# In case of multiple (split) pickings, we should return the ID of the critical one, i.e. the
# one that should trigger the advancement of the purchase workflow.
# By default we will consider the first one as most important, but this behavior can be overridden.
return picking_ids[0] if picking_ids else False
def picking_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'shipped':1,'state':'approved'}, context=context)
return True
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default.update({
'state':'draft',
'shipped':False,
'invoiced':False,
'invoice_ids': [],
'picking_ids': [],
'name': self.pool.get('ir.sequence').get(cr, uid, 'purchase.order'),
})
return super(purchase_order, self).copy(cr, uid, id, default, context)
def do_merge(self, cr, uid, ids, context=None):
"""
To merge similar type of purchase orders.
Orders will only be merged if:
* Purchase Orders are in draft
* Purchase Orders belong to the same partner
* Purchase Orders are have same stock location, same pricelist
Lines will only be merged if:
* Order lines are exactly the same except for the quantity and unit
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: the ID or list of IDs
@param context: A standard dictionary
@return: new purchase order id
"""
#TOFIX: merged order line should be unlink
wf_service = netsvc.LocalService("workflow")
def make_key(br, fields):
list_key = []
for field in fields:
field_val = getattr(br, field)
if field in ('product_id', 'move_dest_id', 'account_analytic_id'):
if not field_val:
field_val = False
if isinstance(field_val, browse_record):
field_val = field_val.id
elif isinstance(field_val, browse_null):
field_val = False
elif isinstance(field_val, list):
field_val = ((6, 0, tuple([v.id for v in field_val])),)
list_key.append((field, field_val))
list_key.sort()
return tuple(list_key)
# Compute what the new orders should contain
new_orders = {}
for porder in [order for order in self.browse(cr, uid, ids, context=context) if order.state == 'draft']:
order_key = make_key(porder, ('partner_id', 'location_id', 'pricelist_id'))
new_order = new_orders.setdefault(order_key, ({}, []))
new_order[1].append(porder.id)
order_infos = new_order[0]
if not order_infos:
order_infos.update({
'origin': porder.origin,
'date_order': porder.date_order,
'partner_id': porder.partner_id.id,
'dest_address_id': porder.dest_address_id.id,
'warehouse_id': porder.warehouse_id.id,
'location_id': porder.location_id.id,
'pricelist_id': porder.pricelist_id.id,
'state': 'draft',
'order_line': {},
'notes': '%s' % (porder.notes or '',),
'fiscal_position': porder.fiscal_position and porder.fiscal_position.id or False,
})
else:
if porder.date_order < order_infos['date_order']:
order_infos['date_order'] = porder.date_order
if porder.notes:
order_infos['notes'] = (order_infos['notes'] or '') + ('\n%s' % (porder.notes,))
if porder.origin:
order_infos['origin'] = (order_infos['origin'] or '') + ' ' + porder.origin
for order_line in porder.order_line:
line_key = make_key(order_line, ('name', 'date_planned', 'taxes_id', 'price_unit', 'product_id', 'move_dest_id', 'account_analytic_id'))
o_line = order_infos['order_line'].setdefault(line_key, {})
if o_line:
# merge the line with an existing line
o_line['product_qty'] += order_line.product_qty * order_line.product_uom.factor / o_line['uom_factor']
else:
# append a new "standalone" line
for field in ('product_qty', 'product_uom'):
field_val = getattr(order_line, field)
if isinstance(field_val, browse_record):
field_val = field_val.id
o_line[field] = field_val
o_line['uom_factor'] = order_line.product_uom and order_line.product_uom.factor or 1.0
allorders = []
orders_info = {}
for order_key, (order_data, old_ids) in new_orders.iteritems():
# skip merges with only one order
if len(old_ids) < 2:
allorders += (old_ids or [])
continue
# cleanup order line data
for key, value in order_data['order_line'].iteritems():
del value['uom_factor']
value.update(dict(key))
order_data['order_line'] = [(0, 0, value) for value in order_data['order_line'].itervalues()]
# create the new order
neworder_id = self.create(cr, uid, order_data)
orders_info.update({neworder_id: old_ids})
allorders.append(neworder_id)
# make triggers pointing to the old orders point to the new order
for old_id in old_ids:
wf_service.trg_redirect(uid, 'purchase.order', old_id, neworder_id, cr)
wf_service.trg_validate(uid, 'purchase.order', old_id, 'purchase_cancel', cr)
return orders_info
class purchase_order_line(osv.osv):
def _amount_line(self, cr, uid, ids, prop, arg, context=None):
res = {}
cur_obj=self.pool.get('res.currency')
tax_obj = self.pool.get('account.tax')
for line in self.browse(cr, uid, ids, context=context):
taxes = tax_obj.compute_all(cr, uid, line.taxes_id, line.price_unit, line.product_qty, line.product_id, line.order_id.partner_id)
cur = line.order_id.pricelist_id.currency_id
res[line.id] = cur_obj.round(cr, uid, cur, taxes['total'])
return res
_columns = {
'name': fields.text('Description', required=True),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'date_planned': fields.date('Scheduled Date', required=True, select=True),
'taxes_id': fields.many2many('account.tax', 'purchase_order_taxe', 'ord_id', 'tax_id', 'Taxes'),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'product_id': fields.many2one('product.product', 'Product', domain=[('purchase_ok','=',True)], change_default=True),
'move_ids': fields.one2many('stock.move', 'purchase_line_id', 'Reservation', readonly=True, ondelete='set null'),
'move_dest_id': fields.many2one('stock.move', 'Reservation Destination', ondelete='set null'),
'price_unit': fields.float('Unit Price', required=True, digits_compute= dp.get_precision('Product Price')),
'price_subtotal': fields.function(_amount_line, string='Subtotal', digits_compute= dp.get_precision('Account')),
'order_id': fields.many2one('purchase.order', 'Order Reference', select=True, required=True, ondelete='cascade'),
'account_analytic_id':fields.many2one('account.analytic.account', 'Analytic Account',),
'company_id': fields.related('order_id','company_id',type='many2one',relation='res.company',string='Company', store=True, readonly=True),
'state': fields.selection([('draft', 'Draft'), ('confirmed', 'Confirmed'), ('done', 'Done'), ('cancel', 'Cancelled')], 'Status', required=True, readonly=True,
help=' * The \'Draft\' status is set automatically when purchase order in draft status. \
\n* The \'Confirmed\' status is set automatically as confirm when purchase order in confirm status. \
\n* The \'Done\' status is set automatically when purchase order is set as done. \
\n* The \'Cancelled\' status is set automatically when user cancel purchase order.'),
'invoice_lines': fields.many2many('account.invoice.line', 'purchase_order_line_invoice_rel', 'order_line_id', 'invoice_id', 'Invoice Lines', readonly=True),
'invoiced': fields.boolean('Invoiced', readonly=True),
'partner_id': fields.related('order_id','partner_id',string='Partner',readonly=True,type="many2one", relation="res.partner", store=True),
'date_order': fields.related('order_id','date_order',string='Order Date',readonly=True,type="date")
}
_defaults = {
'product_qty': lambda *a: 1.0,
'state': lambda *args: 'draft',
'invoiced': lambda *a: 0,
}
_table = 'purchase_order_line'
_name = 'purchase.order.line'
_description = 'Purchase Order Line'
def copy_data(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default.update({'state':'draft', 'move_ids':[],'invoiced':0,'invoice_lines':[]})
return super(purchase_order_line, self).copy_data(cr, uid, id, default, context)
def onchange_product_uom(self, cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=False, fiscal_position_id=False, date_planned=False,
name=False, price_unit=False, context=None):
"""
onchange handler of product_uom.
"""
if not uom_id:
return {'value': {'price_unit': price_unit or 0.0, 'name': name or '', 'product_uom' : uom_id or False}}
return self.onchange_product_id(cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=date_order, fiscal_position_id=fiscal_position_id, date_planned=date_planned,
name=name, price_unit=price_unit, context=context)
def _get_date_planned(self, cr, uid, supplier_info, date_order_str, context=None):
"""Return the datetime value to use as Schedule Date (``date_planned``) for
PO Lines that correspond to the given product.supplierinfo,
when ordered at `date_order_str`.
:param browse_record | False supplier_info: product.supplierinfo, used to
determine delivery delay (if False, default delay = 0)
:param str date_order_str: date of order, as a string in
DEFAULT_SERVER_DATE_FORMAT
:rtype: datetime
:return: desired Schedule Date for the PO line
"""
supplier_delay = int(supplier_info.delay) if supplier_info else 0
return datetime.strptime(date_order_str, DEFAULT_SERVER_DATE_FORMAT) + relativedelta(days=supplier_delay)
def _check_product_uom_group(self, cr, uid, context=None):
group_uom = self.pool.get('ir.model.data').get_object(cr, uid, 'product', 'group_uom')
res = [user for user in group_uom.users if user.id == uid]
return len(res) and True or False
def onchange_product_id(self, cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=False, fiscal_position_id=False, date_planned=False,
name=False, price_unit=False, context=None):
"""
onchange handler of product_id.
"""
if context is None:
context = {}
res = {'value': {'price_unit': price_unit or 0.0, 'name': name or '', 'product_uom' : uom_id or False}}
if not product_id:
return res
product_product = self.pool.get('product.product')
product_uom = self.pool.get('product.uom')
res_partner = self.pool.get('res.partner')
product_supplierinfo = self.pool.get('product.supplierinfo')
product_pricelist = self.pool.get('product.pricelist')
account_fiscal_position = self.pool.get('account.fiscal.position')
account_tax = self.pool.get('account.tax')
# - check for the presence of partner_id and pricelist_id
#if not partner_id:
# raise osv.except_osv(_('No Partner!'), _('Select a partner in purchase order to choose a product.'))
#if not pricelist_id:
# raise osv.except_osv(_('No Pricelist !'), _('Select a price list in the purchase order form before choosing a product.'))
# - determine name and notes based on product in partner lang.
context_partner = context.copy()
if partner_id:
lang = res_partner.browse(cr, uid, partner_id).lang
context_partner.update( {'lang': lang, 'partner_id': partner_id} )
product = product_product.browse(cr, uid, product_id, context=context_partner)
name = product.name
if product.description_purchase:
name += '\n' + product.description_purchase
res['value'].update({'name': name})
# - set a domain on product_uom
res['domain'] = {'product_uom': [('category_id','=',product.uom_id.category_id.id)]}
# - check that uom and product uom belong to the same category
product_uom_po_id = product.uom_po_id.id
if not uom_id:
uom_id = product_uom_po_id
if product.uom_id.category_id.id != product_uom.browse(cr, uid, uom_id, context=context).category_id.id:
if self._check_product_uom_group(cr, uid, context=context):
res['warning'] = {'title': _('Warning!'), 'message': _('Selected Unit of Measure does not belong to the same category as the product Unit of Measure.')}
uom_id = product_uom_po_id
res['value'].update({'product_uom': uom_id})
# - determine product_qty and date_planned based on seller info
if not date_order:
date_order = fields.date.context_today(self,cr,uid,context=context)
supplierinfo = False
for supplier in product.seller_ids:
if partner_id and (supplier.name.id == partner_id):
supplierinfo = supplier
if supplierinfo.product_uom.id != uom_id:
res['warning'] = {'title': _('Warning!'), 'message': _('The selected supplier only sells this product by %s') % supplierinfo.product_uom.name }
min_qty = product_uom._compute_qty(cr, uid, supplierinfo.product_uom.id, supplierinfo.min_qty, to_uom_id=uom_id)
if (qty or 0.0) < min_qty: # If the supplier quantity is greater than entered from user, set minimal.
if qty:
res['warning'] = {'title': _('Warning!'), 'message': _('The selected supplier has a minimal quantity set to %s %s, you should not purchase less.') % (supplierinfo.min_qty, supplierinfo.product_uom.name)}
qty = min_qty
dt = self._get_date_planned(cr, uid, supplierinfo, date_order, context=context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
qty = qty or 1.0
res['value'].update({'date_planned': date_planned or dt})
if qty:
res['value'].update({'product_qty': qty})
# - determine price_unit and taxes_id
if pricelist_id:
price = product_pricelist.price_get(cr, uid, [pricelist_id],
product.id, qty or 1.0, partner_id or False, {'uom': uom_id, 'date': date_order})[pricelist_id]
else:
price = product.standard_price
taxes = account_tax.browse(cr, uid, map(lambda x: x.id, product.supplier_taxes_id))
fpos = fiscal_position_id and account_fiscal_position.browse(cr, uid, fiscal_position_id, context=context) or False
taxes_ids = account_fiscal_position.map_tax(cr, uid, fpos, taxes)
res['value'].update({'price_unit': price, 'taxes_id': taxes_ids})
return res
product_id_change = onchange_product_id
product_uom_change = onchange_product_uom
def action_confirm(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'confirmed'}, context=context)
return True
purchase_order_line()
class procurement_order(osv.osv):
_inherit = 'procurement.order'
_columns = {
'purchase_id': fields.many2one('purchase.order', 'Purchase Order'),
}
def check_buy(self, cr, uid, ids, context=None):
''' return True if the supply method of the mto product is 'buy'
'''
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
for procurement in self.browse(cr, uid, ids, context=context):
if procurement.product_id.supply_method <> 'buy':
return False
return True
def check_supplier_info(self, cr, uid, ids, context=None):
partner_obj = self.pool.get('res.partner')
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
for procurement in self.browse(cr, uid, ids, context=context):
if not procurement.product_id.seller_ids:
message = _('No supplier defined for this product !')
self.message_post(cr, uid, [procurement.id], body=message)
cr.execute('update procurement_order set message=%s where id=%s', (message, procurement.id))
return False
partner = procurement.product_id.seller_id #Taken Main Supplier of Product of Procurement.
if not partner:
message = _('No default supplier defined for this product')
self.message_post(cr, uid, [procurement.id], body=message)
cr.execute('update procurement_order set message=%s where id=%s', (message, procurement.id))
return False
if user.company_id and user.company_id.partner_id:
if partner.id == user.company_id.partner_id.id:
raise osv.except_osv(_('Configuration Error!'), _('The product "%s" has been defined with your company as reseller which seems to be a configuration error!' % procurement.product_id.name))
address_id = partner_obj.address_get(cr, uid, [partner.id], ['delivery'])['delivery']
if not address_id:
message = _('No address defined for the supplier')
self.message_post(cr, uid, [procurement.id], body=message)
cr.execute('update procurement_order set message=%s where id=%s', (message, procurement.id))
return False
return True
def action_po_assign(self, cr, uid, ids, context=None):
""" This is action which call from workflow to assign purchase order to procurements
@return: True
"""
res = self.make_po(cr, uid, ids, context=context)
res = res.values()
return len(res) and res[0] or 0 #TO CHECK: why workflow is generated error if return not integer value
def create_procurement_purchase_order(self, cr, uid, procurement, po_vals, line_vals, context=None):
"""Create the purchase order from the procurement, using
the provided field values, after adding the given purchase
order line in the purchase order.
:params procurement: the procurement object generating the purchase order
:params dict po_vals: field values for the new purchase order (the
``order_line`` field will be overwritten with one
single line, as passed in ``line_vals``).
:params dict line_vals: field values of the single purchase order line that
the purchase order will contain.
:return: id of the newly created purchase order
:rtype: int
"""
po_vals.update({'order_line': [(0,0,line_vals)]})
return self.pool.get('purchase.order').create(cr, uid, po_vals, context=context)
def _get_purchase_schedule_date(self, cr, uid, procurement, company, context=None):
"""Return the datetime value to use as Schedule Date (``date_planned``) for the
Purchase Order Lines created to satisfy the given procurement.
:param browse_record procurement: the procurement for which a PO will be created.
:param browse_report company: the company to which the new PO will belong to.
:rtype: datetime
:return: the desired Schedule Date for the PO lines
"""
procurement_date_planned = datetime.strptime(procurement.date_planned, DEFAULT_SERVER_DATETIME_FORMAT)
schedule_date = (procurement_date_planned - relativedelta(days=company.po_lead))
return schedule_date
def _get_purchase_order_date(self, cr, uid, procurement, company, schedule_date, context=None):
"""Return the datetime value to use as Order Date (``date_order``) for the
Purchase Order created to satisfy the given procurement.
:param browse_record procurement: the procurement for which a PO will be created.
:param browse_report company: the company to which the new PO will belong to.
:param datetime schedule_date: desired Scheduled Date for the Purchase Order lines.
:rtype: datetime
:return: the desired Order Date for the PO
"""
seller_delay = int(procurement.product_id.seller_delay)
return schedule_date - relativedelta(days=seller_delay)
def make_po(self, cr, uid, ids, context=None):
""" Make purchase order from procurement
@return: New created Purchase Orders procurement wise
"""
res = {}
if context is None:
context = {}
company = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id
partner_obj = self.pool.get('res.partner')
uom_obj = self.pool.get('product.uom')
pricelist_obj = self.pool.get('product.pricelist')
prod_obj = self.pool.get('product.product')
acc_pos_obj = self.pool.get('account.fiscal.position')
seq_obj = self.pool.get('ir.sequence')
warehouse_obj = self.pool.get('stock.warehouse')
for procurement in self.browse(cr, uid, ids, context=context):
res_id = procurement.move_id.id
partner = procurement.product_id.seller_id # Taken Main Supplier of Product of Procurement.
seller_qty = procurement.product_id.seller_qty
partner_id = partner.id
address_id = partner_obj.address_get(cr, uid, [partner_id], ['delivery'])['delivery']
pricelist_id = partner.property_product_pricelist_purchase.id
warehouse_id = warehouse_obj.search(cr, uid, [('company_id', '=', procurement.company_id.id or company.id)], context=context)
uom_id = procurement.product_id.uom_po_id.id
qty = uom_obj._compute_qty(cr, uid, procurement.product_uom.id, procurement.product_qty, uom_id)
if seller_qty:
qty = max(qty,seller_qty)
price = pricelist_obj.price_get(cr, uid, [pricelist_id], procurement.product_id.id, qty, partner_id, {'uom': uom_id})[pricelist_id]
schedule_date = self._get_purchase_schedule_date(cr, uid, procurement, company, context=context)
purchase_date = self._get_purchase_order_date(cr, uid, procurement, company, schedule_date, context=context)
#Passing partner_id to context for purchase order line integrity of Line name
new_context = context.copy()
new_context.update({'lang': partner.lang, 'partner_id': partner_id})
product = prod_obj.browse(cr, uid, procurement.product_id.id, context=new_context)
taxes_ids = procurement.product_id.supplier_taxes_id
taxes = acc_pos_obj.map_tax(cr, uid, partner.property_account_position, taxes_ids)
name = product.partner_ref
if product.description_purchase:
name += '\n'+ product.description_purchase
line_vals = {
'name': name,
'product_qty': qty,
'product_id': procurement.product_id.id,
'product_uom': uom_id,
'price_unit': price or 0.0,
'date_planned': schedule_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'move_dest_id': res_id,
'taxes_id': [(6,0,taxes)],
}
name = seq_obj.get(cr, uid, 'purchase.order') or _('PO: %s') % procurement.name
po_vals = {
'name': name,
'origin': procurement.origin,
'partner_id': partner_id,
'location_id': procurement.location_id.id,
'warehouse_id': warehouse_id and warehouse_id[0] or False,
'pricelist_id': pricelist_id,
'date_order': purchase_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'company_id': procurement.company_id.id,
'fiscal_position': partner.property_account_position and partner.property_account_position.id or False,
'payment_term_id': partner.property_supplier_payment_term.id or False,
}
res[procurement.id] = self.create_procurement_purchase_order(cr, uid, procurement, po_vals, line_vals, context=new_context)
self.write(cr, uid, [procurement.id], {'state': 'running', 'purchase_id': res[procurement.id]})
self.message_post(cr, uid, ids, body=_("Draft Purchase Order created"), context=context)
return res
def _product_virtual_get(self, cr, uid, order_point):
procurement = order_point.procurement_id
if procurement and procurement.state != 'exception' and procurement.purchase_id and procurement.purchase_id.state in ('draft', 'confirmed'):
return None
return super(procurement_order, self)._product_virtual_get(cr, uid, order_point)
class mail_mail(osv.Model):
_name = 'mail.mail'
_inherit = 'mail.mail'
def _postprocess_sent_message(self, cr, uid, mail, context=None):
if mail.model == 'purchase.order':
wf_service = netsvc.LocalService("workflow")
wf_service.trg_validate(uid, 'purchase.order', mail.res_id, 'send_rfq', cr)
return super(mail_mail, self)._postprocess_sent_message(cr, uid, mail=mail, context=context)
class product_template(osv.Model):
_name = 'product.template'
_inherit = 'product.template'
_columns = {
'purchase_ok': fields.boolean('Can be Purchased', help="Specify if the product can be selected in a purchase order line."),
}
_defaults = {
'purchase_ok': 1,
}
class mail_compose_message(osv.Model):
_inherit = 'mail.compose.message'
def send_mail(self, cr, uid, ids, context=None):
context = context or {}
if context.get('default_model') == 'purchase.order' and context.get('default_res_id'):
context = dict(context, mail_post_autofollow=True)
wf_service = netsvc.LocalService("workflow")
wf_service.trg_validate(uid, 'purchase.order', context['default_res_id'], 'send_rfq', cr)
return super(mail_compose_message, self).send_mail(cr, uid, ids, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Johnzero/OE7
|
openerp/addons-modules/purchase/purchase.py
|
Python
|
agpl-3.0
| 64,117
|
"""
Unittests for the opal.core.discoverable module
"""
from mock import patch
from django.test import override_settings
from opal.core import exceptions
from opal.core.test import OpalTestCase
from opal.utils import AbstractBase
from opal.core import discoverable
class NewOpalObjectType(object):
pass
class NewOpalObjectClass(NewOpalObjectType):
pass
class MyPassingFeature(discoverable.DiscoverableFeature):
pass
class WatFeature(discoverable.DiscoverableFeature):
display_name = 'wat'
module_name = 'wat'
class SlugFeature(discoverable.DiscoverableFeature):
module_name = 'sluggy'
class MySlugFeature(SlugFeature):
slug = 'my-slug'
display_name = 'My Slug Defined Slug'
class ColourFeature(discoverable.DiscoverableFeature):
module_name = 'colours'
class BlueColour(ColourFeature):
display_name = 'Blue'
class RedColour(ColourFeature):
display_name = 'Red'
class SeaGreenColour(ColourFeature):
display_name = 'Sea Green'
class BombFeature(discoverable.DiscoverableFeature):
module_name = 'bombs'
blow_up = False
@classmethod
def is_valid(klass):
if klass.blow_up == True:
from opal.core.exceptions import InvalidDiscoverableFeatureError
raise InvalidDiscoverableFeatureError('BLOWING UP')
class Threat(BombFeature): pass
class AppImporterTestCase(OpalTestCase):
@override_settings(INSTALLED_APPS=("opal",))
@patch("opal.core.discoverable.stringport")
def test_class_import(self, stringport_mock):
classes_1 = discoverable.get_subclass("someModule", NewOpalObjectType)
classes_2 = discoverable.get_subclass("someModule", NewOpalObjectType)
self.assertEqual([i for i in classes_1], [NewOpalObjectClass])
self.assertEqual([i for i in classes_2], [NewOpalObjectClass])
# should only be called once because we should only import once
self.assertEqual(stringport_mock.call_count, 1)
@override_settings(INSTALLED_APPS=("opal",))
@patch("opal.core.discoverable.stringport")
def test_importerror_no_module(self, stringport_mock):
discoverable.import_from_apps('notarealmodule')
# should be called but suppress the importerror that happens
self.assertEqual(stringport_mock.call_count, 1)
@override_settings(INSTALLED_APPS=("opal",))
@patch("opal.core.discoverable.stringport")
def test_importerror_error_in_target_module(self, stringport_mock):
with self.assertRaises(ImportError):
stringport_mock.side_effect = ImportError('cannot import thing inside your target module')
discoverable.import_from_apps('blowingupmodule')
self.assertEqual(stringport_mock.call_count, 1)
class DiscoverableFeatureTestCase(OpalTestCase):
def test_is_valid_will_blow_up(self):
# We only care that the above class did not raise an exception.
self.assertTrue(True)
with self.assertRaises(exceptions.InvalidDiscoverableFeatureError):
class Detonate(BombFeature):
blow_up = True
def test_slug_for_no_implementation(self):
with self.assertRaises(ValueError):
MyPassingFeature.get_slug()
def test_slug_for_implementation(self):
self.assertEqual('wat', WatFeature.get_slug())
def test_slug_for_subclass(self):
self.assertEqual('red', RedColour.get_slug())
def test_slug_for_multi_word_name(self):
self.assertEqual('sea_green', SeaGreenColour.get_slug())
def test_slug_for_overriden_slug_property(self):
self.assertEqual('my-slug', MySlugFeature.get_slug())
def test_list_for_no_implementation(self):
with self.assertRaises(ValueError):
MyPassingFeature.list()
def test_list_no_subclasses(self):
self.assertEqual([], list(WatFeature.list()))
def test_list_subclasses(self):
subs = list(ColourFeature.list())
self.assertEqual(3, len(subs))
for s in [BlueColour, RedColour, SeaGreenColour]:
self.assertIn(s, subs)
def test_list_invalid_subclasses(self):
self.assertEqual([Threat], list(BombFeature.list()))
def test_get_not_a_thing(self):
with self.assertRaises(ValueError):
ColourFeature.get('border_collie')
def test_get_exists(self):
self.assertEqual(RedColour, ColourFeature.get('red'))
def test_abstract_discoverable(self):
class A(discoverable.DiscoverableFeature):
module_name = 'a'
class AA(A, AbstractBase):
pass
class B(A):
pass
class C(B, AbstractBase):
pass
class D(C):
pass
class E(AA):
pass
results = {i for i in A.list()}
self.assertEqual(results, set([B, D, E]))
class SortedFeature(discoverable.SortableFeature,
discoverable.DiscoverableFeature):
module_name = 'sorted'
class Sorted2(SortedFeature):
order = 2
class Sorted3(SortedFeature):
order = 3
class Sorted1(SortedFeature):
order = 1
class SortableFeatureTestCase(OpalTestCase):
def test_list_respects_order(self):
expected = [Sorted1, Sorted2, Sorted3]
self.assertEqual(expected, list(SortedFeature.list()))
def test_sortable_without_module_name(self):
class Nope(discoverable.SortableFeature): pass
with self.assertRaises(ValueError):
Nope.list()
class SometimesFeature(discoverable.DiscoverableFeature, discoverable.RestrictableFeature):
module_name = 'sometimes'
class Available(SometimesFeature): pass
class Unavailable(SometimesFeature):
@classmethod
def visible_to(self, user):
return False
class RestrictableFeatureTestCase(OpalTestCase):
def test_restricted(self):
expected = [Available]
self.assertEqual(expected, list(SometimesFeature.for_user(self.user)))
|
khchine5/opal
|
opal/tests/test_core_discoverable.py
|
Python
|
agpl-3.0
| 5,937
|
# -*- coding: utf-8 -*-
from distutils.core import setup
import py2exe
import glob
import sys
data_files = [
(r'graphics', glob.glob('..\graphics\*.glade')),
(r'graphics\images', glob.glob('..\graphics\images\*.png')),
(r'graphics\images', glob.glob('..\graphics\images\*.gif')),
(r'graphics\images', glob.glob('..\graphics\images\*.jpg')),
(r'graphics\fullcalendar', glob.glob('../graphics/fullcalendar/*.*')),
(r'graphics\fullcalendar\jquery', glob.glob('../graphics/fullcalendar/jquery/*.*')),
(r'graphics\fullcalendar\theme\default', glob.glob('../graphics/fullcalendar/theme/default/*.*')),
(r'graphics\fullcalendar\theme\blue', glob.glob('../graphics/fullcalendar/theme/blue/*.*')),
(r'graphics\fullcalendar\theme\lightness', glob.glob('../graphics/fullcalendar/theme/lightness/*.*')),
(r'graphics\fullcalendar\theme\pepper', glob.glob('../graphics/fullcalendar/theme/pepper/*.*')),
(r'graphics\fullcalendar\theme\eggplant', glob.glob('../graphics/fullcalendar/theme/eggplant/*.*')),
(r'graphics\fullcalendar\theme\mint', glob.glob('../graphics/fullcalendar/theme/mint/*.*')),
(r'graphics\fullcalendar\theme\default\images', glob.glob('../graphics/fullcalendar/theme/default/images/*.*')),
(r'graphics\fullcalendar\theme\blue\images', glob.glob('../graphics/fullcalendar/theme/blue/images/*.*')),
(r'graphics\fullcalendar\theme\ligthness\images', glob.glob('../graphics/fullcalendar/theme/lightness/images/*.*')),
(r'graphics\fullcalendar\theme\pepper\images', glob.glob('../graphics/fullcalendar/theme/pepper/images/*.*')),
(r'graphics\fullcalendar\theme\eggplant\images', glob.glob('../graphics/fullcalendar/theme/eggplant/images/*.*')),
(r'graphics\fullcalendar\theme\mint\images', glob.glob('../graphics/fullcalendar/theme/mint/images/*.*'))]
ico_file = '../windows/icon_planning.ico'
setup(name='Timetableasy',
version='1.0',
author='Fabien ROMANO, Léo STEVENIN, Denis DESCHAUX-BLANC, Sébastien BILLION',
data_files=data_files,
console = [{
"script" :'main.py',
"icon_resources" : [(0, ico_file)]
}]
)
|
SBillion/timetableasy
|
build/windows/setup.py
|
Python
|
agpl-3.0
| 2,051
|
# coding: UTF-8
from django.contrib.auth.models import User
from const.models import UserIdentity
from registration.models import RegistrationManager
def sendemail(request,username,password,email,identity,person_firstname,send_email=True, **kwargs):
#判断用户名是否存在存在直接返回
if not AuthUserExist(username, identity,person_firstname):
RegistrationManager().create_inactive_user(request,username,password,email,identity,person_firstname,send_email,**kwargs)
return True
else:
return False
def AuthUserExist(username, identity,person_firstname):
if User.objects.filter(username=username).count():
user_obj = User.objects.get(username=username)
ui_obj = UserIdentity.objects.get(identity=identity)
if ui_obj.auth_groups.filter(id=user_obj.id).count():
return True
elif user_obj.first_name != person_firstname:
return True
else:
return False
else:
return False
|
DutBright/scientificResearch
|
common/sendEmail.py
|
Python
|
agpl-3.0
| 1,006
|
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2016 Noviat nv/sa (www.noviat.com)
# Copyright© 2016 ICTSTUDIO <http://www.ictstudio.eu>
# License: AGPL-3.0 or later (http://www.gnu.org/licenses/agpl)
from openerp import api, fields, models
class AccountBankStatementLine(models.Model):
_inherit = 'account.bank.statement.line'
@api.model
def _default_cost_center(self):
return self.env['account.cost.center'].browse(
self._context.get('cost_center_id', None))
cost_center_id = fields.Many2one(
comodel_name='account.cost.center',
string='Cost Center',
default=_default_cost_center
)
@api.model
def get_statement_line_for_reconciliation(self, st_line):
data = super(AccountBankStatementLine, self).\
get_statement_line_for_reconciliation(st_line)
data['cost_center_id'] = st_line.cost_center_id.id
return data
|
ICTSTUDIO/accounting-addons
|
account_bank_statement_cost_center/models/account_bank_statement_line.py
|
Python
|
agpl-3.0
| 938
|
import json
from typing import List
from baton._baton.json import DataObjectJSONDecoder
from baton.models import DataObject
from mcheck.metadata.irods_metadata.file_metadata import IrodsSeqFileMetadata
IRODS_METADATA_LEGACY_LIBRARY_ID_PROPERTY = "library"
IRODS_METADATA_LIBRARY_ID_PROPERTY = "library_id"
IRODS_METADATA_REFERENCE_PROPERTY = "reference"
IRODS_METADATA_TARGET_PROPERTY = "target"
IRODS_ORIGINAL_REPLICA_NUMBER = 0
def convert_json_to_baton_objs(data_objects_as_json_string: str) -> List[IrodsSeqFileMetadata]:
decoded = json.loads(data_objects_as_json_string, cls=DataObjectJSONDecoder)
if isinstance(decoded, DataObject):
decoded = [decoded]
return decoded
def parse_data_objects(data_objects_as_json_string: str) -> List[IrodsSeqFileMetadata]:
"""
Parses the given data object(s) in the JSON serialised form, defined by baton, into representations that are used
internally.
:param data_objects_as_json_string: the data object(s) as a JSON string
:return: the internal representation of the data objects
"""
decoded = json.loads(data_objects_as_json_string, cls=DataObjectJSONDecoder)
if isinstance(decoded, DataObject):
decoded = [decoded]
return [convert_data_object(data_object) for data_object in decoded]
def convert_data_object(data_object: DataObject) -> IrodsSeqFileMetadata:
"""
Parses the given data object from iRODS into the representation used internally.
:param data_object: data object from iRODS, retrieved via baton wrapper
:return: internal representation of iRODS metadata
"""
path = data_object.path
if data_object.replicas is not None:
# Assuming that replica number `IRODS_REPLICA_FIRST_NUMBER` is the first replica that is created
original_replica = data_object.replicas.get_by_number(IRODS_ORIGINAL_REPLICA_NUMBER)
checksum_at_upload = original_replica.checksum if original_replica is not None else None
else:
checksum_at_upload = None
metadata = data_object.metadata
if metadata is None:
return IrodsSeqFileMetadata(path, checksum_at_upload=checksum_at_upload)
references = metadata.get(IRODS_METADATA_REFERENCE_PROPERTY)
target = list(metadata.get(IRODS_METADATA_TARGET_PROPERTY, default={None}))[0]
# TODO: Add other conversions
if IRODS_METADATA_LIBRARY_ID_PROPERTY in metadata:
libraries = metadata[IRODS_METADATA_LIBRARY_ID_PROPERTY]
elif IRODS_METADATA_LEGACY_LIBRARY_ID_PROPERTY in metadata:
libraries = metadata[IRODS_METADATA_LEGACY_LIBRARY_ID_PROPERTY]
else:
libraries = None
return IrodsSeqFileMetadata(path, references=references, libraries=libraries, checksum_at_upload=checksum_at_upload,
target=target)
|
wtsi-hgi/metadata-check
|
mcheck/main/input_parser.py
|
Python
|
agpl-3.0
| 2,802
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django.conf import settings
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from enumfields import Enum, EnumIntegerField
from filer.fields.image import FilerImageField
from jsonfield import JSONField
from parler.models import TranslatedFields
from shuup.core.fields import CurrencyField, InternalIdentifierField
from shuup.core.pricing import TaxfulPrice, TaxlessPrice
from shuup.utils.analog import define_log_model
from ._base import ChangeProtected, TranslatableShuupModel
from ._orders import Order
def _get_default_currency():
return settings.SHUUP_HOME_CURRENCY
class ShopStatus(Enum):
DISABLED = 0
ENABLED = 1
class Labels:
DISABLED = _('disabled')
ENABLED = _('enabled')
@python_2_unicode_compatible
class Shop(ChangeProtected, TranslatableShuupModel):
protected_fields = ["currency", "prices_include_tax"]
change_protect_message = _("The following fields cannot be changed since there are existing orders for this shop")
identifier = InternalIdentifierField(unique=True)
domain = models.CharField(max_length=128, blank=True, null=True, unique=True, verbose_name=_("domain"), help_text=_(
"Your shop domain name. Use this field to configure the URL that is used to visit your site. "
"Note: this requires additional configuration through your internet domain registrar."
))
status = EnumIntegerField(ShopStatus, default=ShopStatus.DISABLED, verbose_name=_("status"), help_text=_(
"Your shop status. Disable your shop if it is no longer in use."
))
owner = models.ForeignKey("Contact", blank=True, null=True, on_delete=models.SET_NULL, verbose_name=_("contact"))
options = JSONField(blank=True, null=True, verbose_name=_("options"))
currency = CurrencyField(default=_get_default_currency, verbose_name=_("currency"), help_text=_(
"The primary shop currency. This is the currency used when selling your products."
))
prices_include_tax = models.BooleanField(default=True, verbose_name=_("prices include tax"), help_text=_(
"This option defines whether product prices entered in admin include taxes. "
"Note this behavior can be overridden with contact group pricing."
))
logo = FilerImageField(verbose_name=_("logo"), blank=True, null=True, on_delete=models.SET_NULL)
maintenance_mode = models.BooleanField(verbose_name=_("maintenance mode"), default=False, help_text=_(
"Check this if you would like to make your shop temporarily unavailable while you do some shop maintenance."
))
contact_address = models.ForeignKey(
"MutableAddress", verbose_name=_("contact address"), blank=True, null=True, on_delete=models.SET_NULL)
translations = TranslatedFields(
name=models.CharField(max_length=64, verbose_name=_("name"), help_text=_(
"The shop name. This name is displayed throughout admin."
)),
public_name=models.CharField(max_length=64, verbose_name=_("public name"), help_text=_(
"The public shop name. This name is displayed in the store front and in any customer email correspondence."
)),
maintenance_message=models.CharField(
max_length=300, blank=True, verbose_name=_("maintenance message"), help_text=_(
"The message to display to customers while your shop is in maintenance mode."
)
)
)
def __str__(self):
return self.safe_translation_getter("name", default="Shop %d" % self.pk)
def create_price(self, value):
"""
Create a price with given value and settings of this shop.
Takes the ``prices_include_tax`` and ``currency`` settings of
this Shop into account.
:type value: decimal.Decimal|int|str
:rtype: shuup.core.pricing.Price
"""
if self.prices_include_tax:
return TaxfulPrice(value, self.currency)
else:
return TaxlessPrice(value, self.currency)
def _are_changes_protected(self):
return Order.objects.filter(shop=self).exists()
ShopLogEntry = define_log_model(Shop)
|
suutari/shoop
|
shuup/core/models/_shops.py
|
Python
|
agpl-3.0
| 4,508
|
# Copyright 2015-2019 Cedric RICARD
#
# This file is part of CloudMailing.
#
# CloudMailing is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# CloudMailing is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with CloudMailing. If not, see <http://www.gnu.org/licenses/>.
import os
import smtplib
import dns.resolver
from dns.exception import DNSException
from datetime import datetime
import logging
#import wingdbstub
#------------------------------------------------------------------------
# CM independent code
#------------------------------------------------------------------------
def mx_resolver(recipientOrDomain):
"""Helper function that do MX resolution and returning a sorted list of IPs.
@param recipientOrDomain: can be a recipient email or only the right part (domain)
of an email.
"""
try:
domain = recipientOrDomain.split('@')[1]
except IndexError:
domain = recipientOrDomain
try:
answers = [r for r in dns.resolver.query(domain, 'MX')]
answers.sort()
ips = []
for name in answers:
for ip in dns.resolver.query(name.exchange):
ips.append(ip.address)
return ips
except DNSException as ex:
logging.getLogger('sendmail').warning("Can't get MX record for domain '%s': %s" % (domain, str(ex)))
raise
class EmailSender(smtplib.SMTP):
def __init__(self, host = None, port = None, local_hostname = None):
smtplib.SMTP.__init__(self, host, port, local_hostname)
#self.set_debuglevel(100)
def rset(self):
"""SMTP 'rset' command -- resets session."""
try:
return self.docmd("rset")
except:
pass
def __sendmail(self, from_addr, to_addrs, msg, mail_options=[],
rcpt_options=[]):
"""This command performs an entire mail transaction.
The arguments are:
- from_addr : The address sending this mail.
- to_addrs : A list of addresses to send this mail to. A bare
string will be treated as a list with 1 address.
- msg : The message to send.
- mail_options : List of ESMTP options (such as 8bitmime) for the
mail command.
- rcpt_options : List of ESMTP options (such as DSN commands) for
all the rcpt commands.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first. If the server does ESMTP, message size
and each of the specified options will be passed to it. If EHLO
fails, HELO will be tried and ESMTP options suppressed.
This method will return normally if the mail is accepted for at least
one recipient. It returns a dictionary, with one entry for each
recipient that was refused. Each entry contains a tuple of the SMTP
error code and the accompanying error message sent by the server.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
SMTPRecipientsRefused The server rejected ALL recipients
(no mail was sent).
SMTPSenderRefused The server didn't accept the from_addr.
SMTPDataError The server replied with an unexpected
error code (other than a refusal of
a recipient).
Note: the connection will be open even after an exception is raised.
Example:
>>> import smtplib
>>> s=smtplib.SMTP("localhost")
>>> tolist=["one@one.org","two@two.org","three@three.org","four@four.org"]
>>> msg = '''From: Me@my.org
... Subject: testin'...
...
... This is a test '''
>>> s.sendmail("me@my.org",tolist,msg)
{ "three@three.org" : ( 550 ,"User unknown" ) }
>>> s.quit()
In the above example, the message was accepted for delivery to three
of the four addresses, and one was rejected, with the error code
550. If all addresses are accepted, then the method will return an
empty dictionary.
"""
self.ehlo_or_helo_if_needed()
esmtp_opts = []
if self.does_esmtp:
# Hmmm? what's this? -ddm
# self.esmtp_features['7bit']=""
if self.has_extn('size'):
esmtp_opts.append("size=%d" % len(msg))
for option in mail_options:
esmtp_opts.append(option)
(code,resp) = self.mail(from_addr, esmtp_opts)
if code != 250:
self.rset()
raise smtplib.SMTPSenderRefused(code, resp, from_addr)
senderrs={}
if isinstance(to_addrs, str):
to_addrs = [to_addrs]
for each in to_addrs:
(code,resp)=self.rcpt(each, rcpt_options)
if (code != 250) and (code != 251):
senderrs[each]=(code,resp)
if len(senderrs)==len(to_addrs):
# the server refused all our recipients
self.rset()
raise smtplib.SMTPRecipientsRefused(senderrs)
(code,resp) = self.data(msg)
if code != 250:
self.rset()
raise smtplib.SMTPDataError(code, resp)
#if we got here then somebody got our mail
return senderrs
#------------------------------------------------------------------------
# Using Twisted
from OpenSSL.SSL import SSLv3_METHOD
from twisted.mail.smtp import ESMTPClient, ESMTPSenderFactory, DNSNAME, Address
from twisted.internet.ssl import ClientContextFactory
from twisted.internet import defer
from twisted.internet import reactor, protocol, error
from twisted.mail import smtp
from twisted.python.failure import Failure
def sendmail_async(
authenticationUsername, authenticationSecret,
fromAddress, toAddress,
messageFile,
smtpHost, smtpPort=25
):
"""
@param authenticationUsername: The username with which to authenticate.
@param authenticationSecret: The password with which to authenticate.
@param fromAddress: The SMTP reverse path (ie, MAIL FROM)
@param toAddress: The SMTP forward path (ie, RCPT TO)
@param messageFile: A file-like object containing the headers and body of
the message to send.
@param smtpHost: The MX host to which to connect.
@param smtpPort: The port number to which to connect.
@return: A Deferred which will be called back when the message has been
sent or which will errback if it cannot be sent.
"""
# Create a context factory which only allows SSLv3 and does not verify
# the peer's certificate.
contextFactory = ClientContextFactory()
contextFactory.method = SSLv3_METHOD
resultDeferred = defer.Deferred()
senderFactory = ESMTPSenderFactory(
authenticationUsername,
authenticationSecret,
fromAddress,
toAddress,
messageFile,
resultDeferred,
contextFactory=contextFactory,
heloFallback=True,
requireTransportSecurity=False,
requireAuthentication=False)
#pylint: disable-msg=E1101
s = reactor.connectTCP(smtpHost, smtpPort, senderFactory)
#pylint: enable-msg=E1101
def close_socket(d):
s.disconnect()
return d
return resultDeferred.addBoth(close_socket)
class RelayerMixin:
"""
Add relayer capability to an SMTPClient taking emails from its factory.
"""
#def _removeDeferred(self, argh):
#del self.result
#return argh
def getMailFrom(self):
"""Return the email address the mail is from."""
logging.getLogger("sendmail").debug("[%s] Calling getMailFrom for '%s'",
self.factory.targetDomain, self.transport.getPeer())
n = self.factory.getNextEmail()
if n:
fromEmail, toEmails, filename, deferred = n
if not os.path.exists(filename):
# content is removed from disk as soon as the mailing is closed
raise smtp.SMTPClientError(471, "Sending aborted. Mailing stopped.")
self.fromEmail = fromEmail
self.toEmails = toEmails
self.mailFile = open(filename, 'rb')
self.result = deferred
#WHY? self.result.addBoth(self._removeDeferred)
return str(self.fromEmail)
return None
def getMailTo(self):
"""Return a list of emails to send to."""
return self.toEmails
def getMailData(self):
"""Return file-like object containing data of message to be sent.
Lines in the file should be delimited by '\\n'.
"""
# Rewind the file in case part of it was read while attempting to
# send the message.
if not os.path.exists(self.mailFile.name):
# content is removed from disk as soon as the mailing is closed
raise smtp.SMTPClientError(471, "Sending aborted. Mailing stopped.")
self.mailFile.seek(0, 0)
return self.mailFile
def sendError(self, exc):
"""
If an error occurs before a mail message is sent sendError will be
called. This base class method sends a QUIT if the error is
non-fatal and disconnects the connection.
@param exc: The SMTPClientError (or child class) raised
@type exc: C{SMTPClientError}
"""
logging.getLogger("sendmail").error("sendError: %s", exc)
if isinstance(exc, smtp.SMTPClientError) and not exc.isFatal:
self._disconnectFromServer()
else:
# If the error was fatal then the communication channel with the
# SMTP Server is broken so just close the transport connection
self.smtpState_disconnect(-1, None)
if hasattr(self, 'mailFile') and self.mailFile:
self.mailFile.close()
self.mailFile = None
if hasattr(self, 'result'):
self.result.errback(exc)
def sentMail(self, code, resp, numOk, addresses, log):
"""Called when an attempt to send an email is completed.
If some addresses were accepted, code and resp are the response
to the DATA command. If no addresses were accepted, code is -1
and resp is an informative message (NO that's wrong, this is the
last returned code).
@param code: the code returned by the SMTP Server
@param resp: The string response returned from the SMTP Server
@param numOK: the number of addresses accepted by the remote host.
@param addresses: is a list of tuples (address, code, resp) listing
the response to each RCPT command.
@param log: is the SMTP session log
"""
if hasattr(self, 'mailFile') and self.mailFile:
self.mailFile.close()
self.mailFile = None
# Do not retry, the SMTP server acknowledged the request
if code not in smtp.SUCCESS:
errlog = []
for addr, acode, aresp in addresses:
if acode not in smtp.SUCCESS:
errlog.append(b"%s: %03d %s" % (bytes(addr), acode, aresp))
errlog.append(bytes(log))
#print '\n'.join(errlog)
log.clear()
exc = smtp.SMTPDeliveryError(code, resp, b'\n'.join(errlog), addresses)
self.result.errback(Failure(exc))
else:
log.clear()
self.result.callback((numOk, addresses))
def connectionLost(self, reason=protocol.connectionDone):
"""We are no longer connected"""
## Taken from SMTPClient
self.setTimeout(None)
if hasattr(self, 'mailFile') and self.mailFile:
self.mailFile.close()
self.mailFile = None
## end of SMTPClient
# Disconnected after a QUIT command -> normal case
logging.getLogger("sendmail").debug("[%s] Disconnected from '%s'",
self.factory.targetDomain, self.transport.getPeer())
self.factory._lastLogOnConnectionLost = self.log.str()
class SMTPRelayer(RelayerMixin, ESMTPClient):
"""
SMTP protocol that sends a set of emails based on information it
gets from its factory, a L{SMTPSenderFactory}.
"""
class SMTPRelayerFactory(protocol.ClientFactory):
"""
Utility factory for sending mailings easily.
Will try to send all emails using the same connection.
"""
domain = DNSNAME
protocol = SMTPRelayer
def __init__(self, targetDomain, retries=5, timeout=60,
contextFactory=None, heloFallback=True,
requireAuthentication=False,
requireTransportSecurity=False,
logger=None,
username=None, secret=None,
connectionClosedCallback=None,
connectionFailureErrback=None):
"""
@param targetDomain: All emails handled by this factory will be
handled by a simple SMTP server: the one specified as MX record
for this domain name.
@param retries: The number of times to retry delivery of this
message.
@param timeout: Period, in seconds, for which to wait for
server responses, or None to wait forever.
"""
assert isinstance(retries, int)
self.targetDomain = targetDomain
self._contextFactory = contextFactory
self._heloFallback = heloFallback
self._requireAuthentication = requireAuthentication
self._requireTransportSecurity = requireTransportSecurity
self._username=username
self._secret=secret
self._connectionFailureErrback = connectionFailureErrback
self._connectionClosedCallback = connectionClosedCallback
self._dateStarted = datetime.now()
self._lastLogOnConnectionLost = "" # Used to track message returned by server in case of early rejection (before EHLO)
self.retries = -retries
self.timeout = timeout
self.mails = []
self.last_email = None
self.deferred = defer.Deferred()
self.log = logger or logging.getLogger("sendmail")
def __repr__(self):
return "<%s.%s instance for '%s' at 0x%x>" % (self.__module__, self.__class__.__name__,
self.targetDomain, id(self))
def __unicode__(self):
return "<%s.%s instance for '%s' at 0x%x>" % (self.__module__, self.__class__.__name__,
self.targetDomain, id(self))
@property
def startDate(self):
return self._dateStarted
def startedConnecting(self, connector):
"""Called when a connection has been started.
You can call connector.stopConnecting() to stop the connection attempt.
@param connector: a Connector object.
"""
self.log.debug("[%s] SMTP Connection started on '%s'...", self.targetDomain, connector.getDestination())
def clientConnectionFailed(self, connector, err):
"""Called when a connection has failed to connect.
It may be useful to call connector.connect() - this will reconnect.
@type reason: L{twisted.python.failure.Failure}
"""
self.log.warn("[%s] SMTP Connection failed for '%s': %s", self.targetDomain, connector.getDestination(), str(err.value))
self._processConnectionError(connector, err)
def clientConnectionLost(self, connector, err):
"""Called when an established connection is lost.
It may be useful to call connector.connect() - this will reconnect.
@type reason: L{twisted.python.failure.Failure}
"""
if self.last_email == None and len(self.mails) == 0 and err.check(error.ConnectionDone):
self.log.debug("[%s] SMTP Connection done for '%s'.", self.targetDomain, connector.getDestination())
if self._connectionClosedCallback:
self._connectionClosedCallback(connector)
return
self.log.warn("[%s] SMTP Connection lost for '%s': %s", self.targetDomain, connector.getDestination(), err.value)
self._processConnectionError(connector, err)
def _processConnectionError(self, connector, err):
if self.retries < 0:
self.log.info("[%s] SMTP Client retrying server '%s'. Retry: %s", self.targetDomain, connector.getDestination(), -self.retries)
connector.connect()
self.retries += 1
else:
if self._connectionFailureErrback:
self._connectionFailureErrback(connector, err)
def stopFactory(self):
"""This will be called before I stop listening on all Ports/Connectors.
This can be overridden to perform 'shutdown' tasks such as disconnecting
database connections, closing files, etc.
It will be called, for example, before an application shuts down,
if it was connected to a port. User code should not call this function
directly.
"""
self.log.debug("[%s] Stopping relay factory.", self.targetDomain)
if self.deferred:
if len(self.mails) > 0 or self.last_email:
self.deferred.errback(Failure(smtp.SMTPConnectError(-1, self._lastLogOnConnectionLost or "Connection closed prematurely.")))
else:
self.deferred.callback(self.targetDomain)
self.deferred = None # to avoid another call
def buildProtocol(self, addr):
self.log.debug("[%s] BuildProtocol for ip '%s'.", self.targetDomain, addr)
p = self.protocol(secret=self._secret, contextFactory=None, identity=self.domain, logsize=len(self.mails)*2+2)
p.debug = True # to enable SMTP log
p.heloFallback = self._heloFallback
p.requireAuthentication = self._requireAuthentication
p.requireTransportSecurity = self._requireTransportSecurity
p.factory = self
p.timeout = self.timeout
if self._username:
from twisted.mail.imap4 import CramMD5ClientAuthenticator, LOGINAuthenticator
p.registerAuthenticator(CramMD5ClientAuthenticator(self._username))
p.registerAuthenticator(LOGINAuthenticator(self._username))
p.registerAuthenticator(smtp.PLAINAuthenticator(self._username))
return p
def send_email(self, fromEmail, toEmails, fileName):
"""
@param fromEmail: The RFC 2821 address from which to send this
message.
@param toEmails: A sequence of RFC 2821 addresses to which to
send this message.
@param fileName: A full path to the file containing the message to send.
@param deferred: A Deferred to callback or errback when sending
of this message completes.
"""
deferred = defer.Deferred()
self.log.debug("Add %s into factory (%s)", ', '.join(toEmails), self.targetDomain)
self.mails.insert(0, (Address(fromEmail), list(map(Address, toEmails)), fileName, deferred))
return deferred
def getNextEmail(self):
try:
self.last_email = self.mails.pop()
self.log.debug("Factory (%s) return next email: %s", self.targetDomain, self.last_email[1])
return self.last_email
except IndexError:
self.log.debug("Factory (%s) return next email: EMPTY", self.targetDomain)
self.last_email = None
#self.deferred.callback(self.targetDomain)
return None
def get_recipients_count(self):
return len(self.mails)
|
ricard33/cloud-mailing
|
cloud_mailing/satellite/sendmail.py
|
Python
|
agpl-3.0
| 20,395
|
# -*- coding: utf-8 -*-
"""
Unit tests for the localization of emails sent by instructor.api methods.
"""
from django.core import mail
from django.urls import reverse
from django.test.utils import override_settings
from nose.plugins.attrib import attr
from six import text_type
from courseware.tests.factories import InstructorFactory
from openedx.core.djangoapps.lang_pref import LANGUAGE_KEY
from openedx.core.djangoapps.user_api.preferences.api import delete_user_preference, set_user_preference
from student.models import CourseEnrollment
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@attr(shard=1)
class TestInstructorAPIEnrollmentEmailLocalization(SharedModuleStoreTestCase):
"""
Test whether the enroll, unenroll and beta role emails are sent in the
proper language, i.e: the student's language.
"""
@classmethod
def setUpClass(cls):
super(TestInstructorAPIEnrollmentEmailLocalization, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(TestInstructorAPIEnrollmentEmailLocalization, self).setUp()
# Platform language is English, instructor's language is Chinese,
# student's language is Esperanto, so the emails should all be sent in
# Esperanto.
self.instructor = InstructorFactory(course_key=self.course.id)
set_user_preference(self.instructor, LANGUAGE_KEY, 'zh-cn')
self.client.login(username=self.instructor.username, password='test')
self.student = UserFactory.create()
set_user_preference(self.student, LANGUAGE_KEY, 'eo')
def update_enrollement(self, action, student_email):
"""
Update the current student enrollment status.
"""
url = reverse('students_update_enrollment', kwargs={'course_id': text_type(self.course.id)})
args = {'identifiers': student_email, 'email_students': 'true', 'action': action, 'reason': 'testing'}
response = self.client.post(url, args)
return response
def check_outbox_is_esperanto(self):
"""
Check that the email outbox contains exactly one message for which both
the message subject and body contain a certain Esperanto string.
"""
return self.check_outbox(u"Ýöü hävé ßéén")
def check_outbox(self, expected_message):
"""
Check that the email outbox contains exactly one message for which both
the message subject and body contain a certain string.
"""
self.assertEqual(1, len(mail.outbox))
self.assertIn(expected_message, mail.outbox[0].subject)
self.assertIn(expected_message, mail.outbox[0].body)
def test_enroll(self):
self.update_enrollement("enroll", self.student.email)
self.check_outbox_is_esperanto()
def test_unenroll(self):
CourseEnrollment.enroll(
self.student,
self.course.id
)
self.update_enrollement("unenroll", self.student.email)
self.check_outbox_is_esperanto()
def test_set_beta_role(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': text_type(self.course.id)})
self.client.post(url, {'identifiers': self.student.email, 'action': 'add', 'email_students': 'true'})
self.check_outbox_is_esperanto()
def test_enroll_unsubscribed_student(self):
# Student is unknown, so the platform language should be used
self.update_enrollement("enroll", "newuser@hotmail.com")
self.check_outbox("You have been")
@override_settings(LANGUAGE_CODE="eo")
def test_user_without_preference_receives_email_in_esperanto(self):
delete_user_preference(self.student, LANGUAGE_KEY)
self.update_enrollement("enroll", self.student.email)
self.check_outbox_is_esperanto()
|
Edraak/edraak-platform
|
lms/djangoapps/instructor/tests/test_api_email_localization.py
|
Python
|
agpl-3.0
| 3,965
|