code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
#!BPY
# -*- coding: UTF-8 -*-
# import pose bone constraints
#
# 2017.10.22 Natukikazemizo
import bpy
import os
import utils_log
import utils_io_csv
# Constants
WORK_FILE_NAME = "pose_constraints.csv"
BONE_NAME = 0
CONSTRAINT_NAME = 1
MUTE = 2
TARGET = 3
SUBTARGET_BONE_NAME = 4
EXTRAPOLATE = 5
FROM_MIN_X = 6
FROM_MAX_X = 7
FROM_MIN_Y = 8
FROM_MAX_Y = 9
FROM_MIN_Z = 10
FROM_MAX_Z = 11
MAP_TO_X_FROM = 12
MAP_TO_Y_FROM = 13
MAP_TO_Z_FROM = 14
MAP_TO = 15
TO_MIN_X = 16
TO_MAX_X = 17
TO_MIN_Y = 18
TO_MAX_Y = 19
TO_MIN_Z = 20
TO_MAX_Z = 21
TARGET_SPACE = 22
OWNER_SPACE = 23
# init logger
global logger
def imp(target):
header, data = utils_io_csv.read(WORK_FILE_NAME)
for row in data:
if bpy.data.objects.find(target) == -1:
logger.warn("Object not found. Object name is " + target)
break
if bpy.data.objects[target].pose.bones.find(row[BONE_NAME]) == -1:
logger.warn("Bone not found. Bone name is " + row[BONE_NAME])
break
bone = bpy.data.objects[target].pose.bones[row[BONE_NAME]]
if bone.constraints.find(row[CONSTRAINT_NAME]) == -1:
constraint = bone.constraints.new(type="TRANSFORM")
constraint.name = row[CONSTRAINT_NAME]
constraint = bone.constraints[row[CONSTRAINT_NAME]]
print(bone.name + constraint.name)
constraint.mute = row[MUTE] == "True"
constraint.target = bpy.data.objects[target]
constraint.subtarget = row[SUBTARGET_BONE_NAME]
constraint.use_motion_extrapolate = row[EXTRAPOLATE] == "True"
constraint.from_min_x = float(row[FROM_MIN_X])
constraint.from_max_x = float(row[FROM_MAX_X])
constraint.from_min_y = float(row[FROM_MIN_Y])
constraint.from_max_y = float(row[FROM_MAX_Y])
constraint.from_min_z = float(row[FROM_MIN_Z])
constraint.from_max_z = float(row[FROM_MAX_Z])
constraint.map_to_x_from = row[MAP_TO_X_FROM]
constraint.map_to_y_from = row[MAP_TO_Y_FROM]
constraint.map_to_z_from = row[MAP_TO_Z_FROM]
constraint.map_to = row[MAP_TO]
if constraint.map_to == "LOCATION":
constraint.to_min_x = float(row[TO_MIN_X])
constraint.to_max_x = float(row[TO_MAX_X])
constraint.to_min_y = float(row[TO_MIN_Y])
constraint.to_max_y = float(row[TO_MAX_Y])
constraint.to_min_z = float(row[TO_MIN_Z])
constraint.to_max_z = float(row[TO_MAX_Z])
elif constraint.map_to == "ROTATION":
constraint.to_min_x_rot = float(row[TO_MIN_X])
constraint.to_max_x_rot = float(row[TO_MAX_X])
constraint.to_min_y_rot = float(row[TO_MIN_Y])
constraint.to_max_y_rot = float(row[TO_MAX_Y])
constraint.to_min_z_rot = float(row[TO_MIN_Z])
constraint.to_max_z_rot = float(row[TO_MAX_Z])
else:
# map_to:SCALE
constraint.to_min_x_scale = float(row[TO_MIN_X])
constraint.to_max_x_scale = float(row[TO_MAX_X])
constraint.to_min_y_scale = float(row[TO_MIN_Y])
constraint.to_max_y_scale = float(row[TO_MAX_Y])
constraint.to_min_z_scale = float(row[TO_MIN_Z])
constraint.to_max_z_scale = float(row[TO_MAX_Z])
constraint.target_space = row[TARGET_SPACE]
constraint.owner_space = row[OWNER_SPACE]
logger = utils_log.Util_Log(os.path.basename(__file__))
logger.start()
imp("Dorothy.Armature")
imp("Loris.Armature")
logger.end()
|
natukikazemizo/sedna
|
Sedna/src/python/pose_constraints_imp.py
|
Python
|
mit
| 3,583
|
# !/usr/bin/python
# -*- coding: utf-8 -*-
import sys
from PySide.QtGui import *
from emokit.emotiv import Emotiv
from PlottingWidget import PlottingWidget
from HeadStatusWidget import HeadStatusWidget
import pyqtgraph as pg
import datetime
import time
sys.path.append('../util')
from PacketParser import PacketParser
class Record:
def __init__(self, parent=None):
self.headset = Emotiv()
self.electrodePairing = { \
"AF3": {'pair': "AF4", 'order': 1},
"AF4": {'pair': "AF3", 'order': 0},
"F3": {'pair': "F4", 'order': 1},
"F4": {'pair': "F3", 'order': 0},
"F7": {'pair': "F8", 'order': 1},
"F8": {'pair': "F7", 'order': 0},
"FC5": {'pair': "FC6", 'order': 1},
"FC6": {'pair': "FC5", 'order': 0},
"T7": {'pair': "T8", 'order': 1},
"T8": {'pair': "T7", 'order': 0},
"P7": {'pair': "P8", 'order': 1},
"P8": {'pair': "P7", 'order': 0},
"O1": {'pair': "O2", 'order': 1},
"O2": {'pair': "O1", 'order': 0},
}
self.timer = pg.QtCore.QTimer()
self.recording = False
self.parser = PacketParser()
self.header_text = "Timestamp,F3 Value,F3 Quality,FC5 Value,FC5 Quality,F7 Value,F7 Quality,T7 Value,T7 Quality,P7 Value,P7 Quality,O1 Value,O1 Quality,O2 Value,O2 Quality,P8 Value,P8 Quality,T8 Value,T8 Quality,F8 Value,F8 Quality,AF4 Value,AF4 Quality,FC6 Value,FC6 Quality,F4 Value,F4 Quality,AF3 Value,AF3 Quality,X Value,Y Value,Z Value"
def setPlotGraphBySensor(self, sensor):
self.plots.setVisible(False)
self.altPlots.setVisible(True)
if self.electrodePairing[sensor]["order"]:
self.altPlots.restartSensors( [ sensor, self.electrodePairing[sensor]["pair"]] )
else:
self.altPlots.restartSensors([ self.electrodePairing[sensor]["pair"], sensor ])
self.returnToGraphs.setVisible(True)
def update(self):
packet = self.headset.dequeue()
if packet != None:
self.plots.updater(packet)
self.headsetState.updateHeadsetStatus(packet)
if self.recording:
row = self.parser.fromPacketToCSV( packet )
self.output_file.write(row + "\n")
def setRecordTab(self):
self.setLeftSidedBox()
self.setCenterBox()
# Main grid layout
self.gridLayout = QGridLayout()
self.gridLayout.addLayout( self.leftBox, 0, 0)
self.gridLayout.addLayout( self.centerBox, 0, 1)
self.gridLayout.setColumnStretch(0, 1)
self.gridLayout.setColumnStretch(1, 3)
return self.gridLayout
def setLeftSidedBox(self):
# Left sided box for controls
self.leftBox = QFormLayout()
self.recordButton = QPushButton("Grabar")
self.recordButton.setEnabled(False)
self.recordButton.clicked.connect(self.startRecord)
self.stopButton = QPushButton("Detener")
self.stopButton.setEnabled(False)
self.stopButton.clicked.connect(self.stopRecord)
self.recordButtons = QGridLayout()
self.recordButtons.addWidget( self.recordButton, 0, 0)
self.recordButtons.addWidget( self.stopButton, 0, 1)
self.leftBox.addRow(QLabel("Controles de grabacion"))
self.leftBox.addRow(self.recordButtons)
self.route = QLineEdit()
self.route.setReadOnly(True)
self.examine = QPushButton("Examinar")
self.examine.clicked.connect(self.getFilename)
folderButtons = QGridLayout()
folderButtons.addWidget(self.route, 0, 0)
folderButtons.addWidget(self.examine, 0, 1)
self.leftBox.addRow(QLabel("Carpeta de guardado"))
self.leftBox.addRow(folderButtons)
# Sensors status
self.leftBox.addRow(QLabel("Estado de los sensores"))
self.headsetState = HeadStatusWidget(self.setPlotGraphBySensor)
self.leftBox.addRow(self.headsetState)
self.headsetState.updateHeadsetStatus(None)
def setCenterBox(self):
# Center sided box for signals
self.centerBox = QFormLayout()
self.centerBox.addRow(QLabel("Estado de las senales"))
self.returnToGraphs = QPushButton("Regresar")
self.returnToGraphs.setVisible(False)
self.returnToGraphs.clicked.connect(self.returnToGraphics)
self.centerBox.addRow(self.returnToGraphs)
self.plots = PlottingWidget()
self.centerBox.addRow(self.plots)
self.altPlots = PlottingWidget([])
self.centerBox.addRow(self.altPlots)
self.altPlots.setVisible(False)
def returnToGraphics(self):
self.altPlots.setVisible(False)
self.returnToGraphs.setVisible(False)
self.plots.setVisible(True)
def getFilename(self):
filename = QFileDialog.getExistingDirectory(self.examine, "Open Directory",
"/home",
QFileDialog.ShowDirsOnly
| QFileDialog.DontResolveSymlinks);
self.fileRoute = filename
self.route.setText(self.fileRoute)
self.recordButton.setEnabled(True)
def startRecord(self):
self.stopButton.setEnabled(True)
self.recordButton.setEnabled(False)
self.recording = True
self.file_name += datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d_%H-%M-%S') + ".csv"
self.output_file = open(self.file_name, 'w')
self.output_file.write(self.header_text + "\n")
def stopRecord(self):
self.output_file.close()
|
EmokitAlife/EmokitVisualizer
|
UI/Record.py
|
Python
|
mit
| 5,654
|
#!/usr/bin/env python
import csv
import sys
from datetime import datetime, timedelta
import itertools
import operator
import os
use_colors = sys.stdout.isatty()
if use_colors:
try:
import colorama
if os.name == 'nt':
colorama.init(strip=True, convert=True)
else:
colorama.init()
except ImportError:
print 'For colors install colorama ($ pip install colorama)'
use_colors = False
tformat = "%H:%M"
def parsetime(str):
try:
return datetime.strptime(str, tformat)
except ValueError:
return None
def abshourminute(td):
hours, rem = divmod(td.seconds, 60**2)
minutes, sec = divmod(rem, 60)
hours += td.days * 24
return hours, minutes
def hourminute(td):
if td < timedelta():
return abshourminute(-td), True
else:
return abshourminute(td), False
def formatahm(hm):
return "%02d:%02d" % hm
def formathm(hm, pos=' ', neg='-'):
return "%s%02d:%02d" % ((pos, neg)[hm[1]], hm[0][0], hm[0][1])
def formatd(td, *args):
return formathm(hourminute(td), *args)
def grouped(iterable, n, fillvalue=None):
args = [iter(iterable)] * n
return itertools.izip_longest(fillvalue=fillvalue, *args)
workday = timedelta(hours=7, minutes=45)
lunchbonus = timedelta(minutes=30)
mapping = { }
total_time = timedelta()
expected_time = timedelta()
days = []
cur_time_raw = datetime.now()
cur_time = datetime(1900,1,1, hour=cur_time_raw.hour, minute=cur_time_raw.minute)
nulltime = (timedelta(), timedelta())
addtime = lambda x,y: tuple(map(operator.add, x, y))
zerotime = parsetime('0:00')
with open(sys.argv[1], 'rb') as csvfile:
reader = csv.reader(csvfile)
mapping = { n: i for (i, n) in enumerate(next(reader)) }
for row in reader:
getcol = lambda n: row[mapping[n]]
gettimecol = lambda n: parsetime(getcol(n))
start = gettimecol('Start')
end = gettimecol('End')
lunch = getcol('Lunch?') == 'Yes'
adjust = gettimecol('Adjust')
adjust_delta = adjust - zerotime if adjust else None
if start and not end:
end = cur_time
if None in (start, end, lunch):
days.append(nulltime)
continue
duration = end - start
if not lunch:
duration += lunchbonus
if adjust_delta:
duration -= adjust_delta
total_time += duration
expected_time += workday
delta = duration - workday
days.append((duration, delta))
weeks = list(grouped(days, 5, nulltime))
months = list(grouped(weeks, 4, []))
def isnull(td):
return td.seconds == 0 and td.days == 0
def formattad(t, td):
if use_colors:
ts = ''
ds = ((colorama.Fore.RED, colorama.Fore.GREEN)[td >= timedelta()] +
(colorama.Style.BRIGHT if abs(td) >= timedelta(minutes=30) else ''))
ns = ''
rs = colorama.Fore.RESET + colorama.Style.RESET_ALL
else:
ts = ds = ns = rs = ''
if isnull(t) and isnull(td):
return ns + ' ' + '.' * 12 + rs
return "%s %s" % (ts + formatd(t), ds + formatd(td, '+')) + rs
total_sum = nulltime
print ''
for month in months:
weeklist = []
sumlist = []
for week in month:
weeklist.append([x if x else nulltime for x in week])
sumlist.append(reduce(addtime, week, nulltime))
weeklist_transposed = itertools.izip_longest(*weeklist, fillvalue=nulltime)
msum = reduce(addtime, sumlist, nulltime)
total_sum = addtime(total_sum, msum)
ind = ' ' * 2
sep = ' ' * 3
print '\n'.join(ind + sep.join(formattad(*day) for day in week) for week in weeklist_transposed)
print ''
print ind + sep.join(formattad(*x) for x in sumlist)
print ''
print 'Month: %s' % formattad(*msum)
print ''
print 'Total: %s' % formattad(*total_sum)
if use_colors:
colorama.deinit()
|
bqqbarbhg/workcalc
|
workcalc.py
|
Python
|
mit
| 3,917
|
from bottle import route, run, template, static_file, response, request
from json import dumps
#import serial
import time
#ser = serial.Serial('COM3', 9600, timeout=0)
#ser.readlines()
@route('/assets/:path#.+#', name='assets')
def static(path):
return static_file(path, root='assets')
@route('/')
def index():
return template('hello')
@route('/medidas')
def medidas():
#ser.write('M')
#time.sleep(0.5)
#medidas = ser.readline()
#medidas = medidas.split()
rv = [{ "temp": medidas[2], "lux": medidas[5]}]
response.content_type = 'application/json'
return dumps(rv)
@route('/comandos', method='POST')
def comandos():
comando = request.forms.get('comando')
#ser.write(comando)
return "Success"
run(host='localhost', port=8080, debug=True)
|
brunoliveira8/nasa-challenge
|
rover-system.py
|
Python
|
mit
| 795
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from keras.models import model_from_yaml
def generate_nnet(feats):
"""Generate a neural network.
Parameters
----------
feats : list with at least one feature vector
Returns
-------
Neural network object
"""
# Load it here to prevent crash of --help when it's not present
from keras.models import Sequential
from keras.layers import Dense, Convolution2D, Flatten
model = Sequential()
input_shape = (feats[0].shape[0],
feats[0].shape[1],
feats[0].shape[2])
logging.info("input shape: %s", input_shape)
model.add(Convolution2D(10, 3, 3,
border_mode='same',
input_shape=input_shape))
model.add(Convolution2D(10, 3, 3, activation='relu', border_mode='same'))
# model.add(Convolution2D(10, 3, 3, activation='relu', border_mode='same'))
model.add(Flatten())
model.add(Dense(2, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
return model
def serialize_model(hypes, model):
"""Serialize a model."""
yaml_path = hypes["segmenter"]["serialized_model_path"] + ".yaml"
hdf5_path = hypes["segmenter"]["serialized_model_path"] + ".hdf5"
model.save_weights(hdf5_path)
with open(yaml_path, 'w') as f:
f.write(model.to_yaml())
def load_model(hypes):
"""Load a serialized model."""
yaml_path = hypes["segmenter"]["serialized_model_path"] + ".yaml"
hdf5_path = hypes["segmenter"]["serialized_model_path"] + ".hdf5"
with open(yaml_path) as f:
yaml_string = f.read()
model = model_from_yaml(yaml_string)
model.load_weights(hdf5_path)
model.compile(optimizer='adadelta', loss='categorical_crossentropy')
return model
|
TensorVision/MediSeg
|
AP4/model-401-sst/sliding_window_keras.py
|
Python
|
mit
| 1,915
|
#!/usr/bin/env python
from distutils.core import setup
from pip.req import parse_requirements
install_reqs = parse_requirements('requirements.txt')
setup(
name='hadoop-parallel',
version='0.0.1',
author='Alex Pirozhenko',
author_email='alex.pirozhenko@gmail.com',
packages=['hadoop_parallel'],
package_dir={'hadoop_parallel': 'src/hadoop_parallel'},
requires=[str(ir.req) for ir in install_reqs]
)
|
alex-pirozhenko/hadoop-parallel
|
setup.py
|
Python
|
mit
| 442
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import unittest
import tempfile
import sys
import subprocess
import logging
try:
from unittest import mock # py3
except ImportError:
import mock # NOQA - requires "pip install mock"
from os.path import join as pathjoin
import codecs
import difflib
import textwrap
import warnings
import fulltext
from fulltext.util import is_windows
from fulltext.util import magic
from fulltext.util import exiftool
from fulltext.compat import which
from six import PY3
from six import BytesIO
TRAVIS = bool(os.environ.get('TRAVIS'))
TEXT_WITH_NEWLINES = u"Lorem ipsum\ndolor sit amet, consectetur adipiscing e" \
u"lit. Nunc ipsum augue, iaculis quis\nauctor eu, adipi" \
u"scing non est. Nullam id sem diam, eget varius dui. E" \
u"tiam\nsollicitudin sapien nec odio elementum sit amet" \
u" luctus magna volutpat. Ut\ncommodo nulla neque. Aliq" \
u"uam erat volutpat. Integer et nunc augue.\nPellentesq" \
u"ue habitant morbi tristique senectus et netus et male" \
u"suada fames\nac turpis egestas. Quisque at enim nulla" \
u", vel tincidunt urna. Nam leo\naugue, elementum ut vi" \
u"verra eget, scelerisque in purus. In arcu orci, porta" \
u"\nnec aliquet quis, pretium a sem. In fermentum nisl " \
u"id diam luctus viverra.\nNullam semper, metus at euis" \
u"mod vulputate, orci odio dignissim urna, quis\niaculi" \
u"s neque lacus ut tortor. Ut a justo non dolor venenat" \
u"is accumsan.\nProin dolor eros, aliquam id condimentu" \
u"m et, aliquam quis metus. Vivamus\neget purus diam."
TEXT = TEXT_WITH_NEWLINES.replace('\n', ' ')
WINDOWS = is_windows()
APPVEYOR = bool(os.environ.get('APPVEYOR'))
HERE = os.path.abspath(os.path.dirname(__file__))
logging.basicConfig(level=logging.WARNING)
# ===================================================================
# --- Utils
# ===================================================================
class BaseTestCase(unittest.TestCase):
"""Base TestCase Class."""
# --- override
def __str__(self):
# Print fully qualified test name.
return "%s.%s.%s" % (
os.path.splitext(__file__)[0], self.__class__.__name__,
self._testMethodName)
def shortDescription(self):
# Avoid printing docstrings.
return ""
# --- utils
def touch(self, fname, content=None):
if isinstance(content, bytes):
f = open(fname, "wb")
else:
if PY3:
f = open(fname, "wt")
else:
f = codecs.open(fname, "wt", encoding="utf8")
self.addCleanup(os.remove, fname)
with f:
if content:
f.write(content)
return fname
def touch_fobj(self, content=b""):
f = BytesIO()
self.addCleanup(f.close)
if content:
f.write(content)
f.seek(0)
return f
def assertMultiLineEqual(self, a, b, msg=None):
"""A useful assertion for troubleshooting."""
# Normalize spacing/formatting.
a = textwrap.wrap(a)
b = textwrap.wrap(b)
# Check if two blocks of text are equal.
if a == b:
return
if msg is None:
# If not the same, and no msg provided, create a user-friendly
# diff message.
a = [l + '\n' for l in a]
b = [l + '\n' for l in b]
msg = '\n' + ''.join(difflib.unified_diff(
a, b, 'A (first argument)', 'B (second argument)'))
raise AssertionError(msg)
def assertStartsWith(self, prefix, body):
"""Shortcut."""
if not body.startswith(prefix):
msg = '"%s" != "%s"' % (body[:len(prefix)], prefix)
raise AssertionError(msg)
def assertEndsWith(self, postfix, body):
"""Shortcut."""
if not body.endswith(postfix):
msg = '"%s" != "%s"' % (body[0 - len(postfix):], postfix)
raise AssertionError(msg)
unittest.TestCase = BaseTestCase
# ===================================================================
# --- Tests
# ===================================================================
class FullTextTestCase(BaseTestCase):
def test_missing_default(self):
"Ensure a missing file returns default instead of exception."
self.assertEqual(fulltext.get('non-existent-file.pdf', 'sentinal'),
'sentinal')
def test_missing(self):
"Ensure a missing file without a default raises an exception."
# In this case we hit the pdf backend which runs a command, the
# command fails because the file does not exist resulting in
# ShellError.
self.assertRaises(IOError, fulltext.get, 'non-existent-file.txt')
def test_unknown_default(self):
"Ensure unknown file type returns default instead of exception."
self.assertEqual(fulltext.get('unknown-file.foobar', 'sentinal'),
'sentinal')
def test_unknown(self):
"Ensure unknown file type without a default raises an exception."
# This is nearly a duplicate of test_missing, but in this case we hit
# the default text backend, which attempts to open the file resulting
# in an IOError.
self.assertRaises(IOError, fulltext.get, 'unknown-file.foobar')
def test_default_none(self):
"Ensure None is a valid value to pass as default."
self.assertEqual(fulltext.get('unknown-file.foobar', None), None)
def test_text_strip(self):
"""Ensure that stripping works as expected."""
file = BytesIO()
file.write(b' Test leading and trailing spaces removal. ')
file.write(b'Test @$%* punctuation removal! ')
file.write(b'Test spaces removal! ')
file.seek(0)
stripped = fulltext.get(file, backend='bin')
self.assertMultiLineEqual('Test leading and trailing spaces removal. '
'Test punctuation removal! Test spaces '
'removal!', stripped)
def test_register_backend_ext(self):
fulltext.register_backend(
'application/ijustmadethisup',
'fulltext.backends.__html',
extensions=['.ijustmadethisup'])
fname = self.touch("document.ijustmadethisup")
with mock.patch('fulltext.handle_path', return_value="") as m:
fulltext.get(fname)
klass = m.call_args[0][0]
self.assertEqual(klass.__module__, 'fulltext.backends.__html')
def test_text_ext(self):
for ext in (".py", ".cpp", ".h", ".pl"):
fname = self.touch("document%s" % ext)
with mock.patch('fulltext.handle_path', return_value="") as m:
fulltext.get(fname)
klass = m.call_args[0][0]
self.assertEqual(klass.__module__, 'fulltext.backends.__text')
class TestCLI(BaseTestCase):
def test_extract(self):
subprocess.check_output(
"%s -m fulltext extract %s" % (
sys.executable, pathjoin(HERE, "files/test.txt")),
shell=True)
def test_check(self):
p = subprocess.Popen(
"%s -m fulltext -t check" % sys.executable, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if p.returncode != 0:
if not WINDOWS:
# Everything is supposed to work on Linux.
self.fail(err.decode())
else:
# On Windows we expect a bunch of backends not to work.
# XXX maybe this is too strict.
lines = [x.split(':')[0] for x in
sorted(err.decode().splitlines())]
self.assertEqual(
lines,
['fulltext.backends.__doc',
'fulltext.backends.__hwp',
'fulltext.backends.__ocr',
'fulltext.backends.__ps'])
@unittest.skipIf(not WINDOWS, "windows only")
def test_which(self):
self.assertIsNotNone(which("pdftotext"))
self.assertIsNotNone(which("unrtf"))
self.assertIsNotNone(which("unrar"))
self.assertIsNotNone(which("exiftool"))
class TestBackendInterface(BaseTestCase):
def test_params(self):
# Make sure Backend class receives the right params.
fname = self.touch('testfn.doc')
with mock.patch('fulltext.handle_path', return_value="") as m:
fulltext.get(fname, encoding='foo', encoding_errors='bar')
klass = m.call_args[0][0]
self.assertEqual(klass.encoding, 'foo')
self.assertEqual(klass.encoding_errors, 'bar')
def test_callbacks(self):
# Make sure callback methods are called (also in the right order).
flags = []
class Backend:
def setup(self):
flags.append("setup")
def teardown(self):
flags.append("teardown")
def handle_fobj(self, path):
flags.append("handle_fobj")
return "text"
fname = self.touch('testfn.doc')
with mock.patch('fulltext.backend_inst_from_mod',
return_value=Backend()):
fulltext.get(fname, encoding='foo', encoding_errors='bar')
self.assertEqual(flags, ['setup', 'handle_fobj', 'teardown'])
def test_teardown_on_err(self):
# Make sure teardown methods is called also on error.
flags = []
class Backend:
def setup(self):
flags.append("setup")
def teardown(self):
flags.append("teardown")
def handle_fobj(self, path):
1 / 0
fname = self.touch('testfn.doc')
with mock.patch('fulltext.backend_inst_from_mod',
return_value=Backend()):
with self.assertRaises(ZeroDivisionError):
fulltext.get(fname, encoding='foo', encoding_errors='bar')
self.assertEqual(flags, ['setup', 'teardown'])
class TestInstalledDeps(BaseTestCase):
"""Make sure certain deps are installed."""
@unittest.skipIf(APPVEYOR, "AppVeyor can't detect magic")
def test_magic(self):
self.assertIsNotNone(magic)
@unittest.skipIf(WINDOWS and magic is None, "magic lib not installed")
def test_no_magic(self):
# Emulate a case where magic lib is not installed.
f = self.touch_fobj(content=b"hello world")
f.seek(0)
with mock.patch("fulltext.magic", None):
with warnings.catch_warnings(record=True) as ws:
mod = fulltext.backend_from_fobj(f)
self.assertIn("magic lib is not installed", str(ws[0].message))
self.assertEqual(mod.__name__, 'fulltext.backends.__bin')
def test_exiftool(self):
self.assertIsNotNone(exiftool)
# ===================================================================
# --- Mixin tests
# ===================================================================
class PathAndFileTests(object):
text = TEXT
mime = None
def test_file(self):
path = pathjoin(HERE, 'files/test.%s' % self.ext)
with open(path, 'rb') as f:
text = fulltext.get(f, mime=self.mime)
self.assertMultiLineEqual(self.text, text)
def _handle_text(self, f):
"""Main body of both 'text mode' tests."""
try:
text = fulltext.get(f, mime=self.mime)
self.assertMultiLineEqual(self.text, text)
finally:
f.close()
def test_file_text(self):
path = pathjoin(HERE, 'files/test.%s' % self.ext)
if PY3:
with self.assertRaises(AssertionError):
self._handle_text(open(path, 'rt'))
else:
self._handle_text(open(path, 'rt'))
def test_file_codecs(self):
path = pathjoin(HERE, 'files/test.%s' % self.ext)
with self.assertRaises(AssertionError):
self._handle_text(codecs.open(path, encoding='utf8'))
def test_path(self):
path = pathjoin(HERE, 'files/test.%s' % self.ext)
text = fulltext.get(path, mime=self.mime)
self.assertMultiLineEqual(self.text, text)
class TxtTestCase(BaseTestCase, PathAndFileTests):
ext = 'txt'
class OdtTestCase(BaseTestCase, PathAndFileTests):
ext = 'odt'
@unittest.skipIf(WINDOWS, "not supported on Windows")
class DocTestCase(BaseTestCase, PathAndFileTests):
ext = "doc"
class DocxTestCase(BaseTestCase, PathAndFileTests):
ext = "docx"
class OdsTestCase(BaseTestCase, PathAndFileTests):
ext = "ods"
class XlsTestCase(BaseTestCase, PathAndFileTests):
ext = "xls"
class XlsxTestCase(BaseTestCase, PathAndFileTests):
ext = "xlsx"
class HtmlTestCase(BaseTestCase, PathAndFileTests):
ext = "html"
class XmlTestCase(BaseTestCase, PathAndFileTests):
ext = "xml"
class ZipTestCase(BaseTestCase, PathAndFileTests):
ext = "zip"
class PdfTestCase(BaseTestCase, PathAndFileTests):
ext = "pdf"
class RarTestCase(BaseTestCase, PathAndFileTests):
ext = "rar"
class RtfTestCase(BaseTestCase, PathAndFileTests):
ext = "rtf"
class CsvTestCase(BaseTestCase, PathAndFileTests):
ext = "csv"
mime = 'text/csv'
text = TEXT.replace(',', '')
def test_newlines(self):
# See: https://github.com/btimby/fulltext/issues/68
fname = self.touch('testfn.csv', content="foo\n\rbar")
self.assertEqual(fulltext.get(fname), "foo bar")
class TsvTestCase(BaseTestCase, PathAndFileTests):
ext = "tsv"
mime = 'text/tsv'
class PsvTestCase(BaseTestCase, PathAndFileTests):
ext = "psv"
mime = 'text/psv'
@unittest.skipIf(WINDOWS, "not supported on Windows")
class PngTestCase(BaseTestCase, PathAndFileTests):
ext = "png"
class EpubTestCase(BaseTestCase, PathAndFileTests):
ext = "epub"
@unittest.skipIf(WINDOWS, "not supported on Windows")
class PsTestCase(BaseTestCase, PathAndFileTests):
ext = "ps"
class EmlTestCase(BaseTestCase, PathAndFileTests):
ext = "eml"
class MboxTestCase(BaseTestCase, PathAndFileTests):
ext = "mbox"
class MsgTestCase(BaseTestCase, PathAndFileTests):
ext = "msg"
class JsonTestCase(BaseTestCase, PathAndFileTests):
ext = "json"
@unittest.skipIf(not which('pyhwp'), "pyhwp not installed")
class HwpTestCase(BaseTestCase, PathAndFileTests):
ext = "hwp"
class GzTestCase(BaseTestCase, PathAndFileTests):
ext = "gz"
def test_pdf(self):
# See: https://github.com/btimby/fulltext/issues/56
text = fulltext.get(pathjoin(HERE, "files/gz/test.pdf.gz"))
self.assertMultiLineEqual(self.text, text)
def test_csv(self):
text = fulltext.get(pathjoin(HERE, "files/gz/test.csv.gz"))
self.assertMultiLineEqual(self.text.replace(',', ''), text)
def test_txt(self):
text = fulltext.get(pathjoin(HERE, "files/gz/test.txt.gz"))
self.assertMultiLineEqual(self.text, text)
# ---
class FilesTestCase(BaseTestCase):
@unittest.skipIf(WINDOWS, "not supported on Windows")
def test_old_doc_file(self):
"Antiword does not support older Word documents."
with open(pathjoin(HERE, 'files/test.old.doc'), 'rb') as f:
text = fulltext.get(f, backend='doc')
self.assertStartsWith('eZ-Audit', text)
self.assertIsInstance(text, u"".__class__)
@unittest.skipIf(WINDOWS, "not supported on Windows")
def test_old_doc_path(self):
"Antiword does not support older Word documents."
text = fulltext.get(pathjoin(HERE, 'files/test.old.doc'),
backend='doc')
self.assertStartsWith('eZ-Audit', text)
self.assertIsInstance(text, u"".__class__)
# ===================================================================
# --- Pickups
# ===================================================================
class TestPickups(BaseTestCase):
"""Make sure the right backend is called."""
# --- by extension
def test_by_ext(self):
fname = self.touch('testfn.html')
with mock.patch('fulltext.handle_path', return_value="") as m:
fulltext.get(fname)
klass = m.call_args[0][0]
self.assertEqual(klass.__module__, 'fulltext.backends.__html')
def test_no_ext(self):
# File with no extension == use bin backend.
fname = self.touch('testfn')
with mock.patch('fulltext.handle_path', return_value="") as m:
fulltext.get(fname)
klass = m.call_args[0][0]
self.assertEqual(klass.__module__, 'fulltext.backends.__bin')
def test_unknown_ext(self):
# File with unknown extension == use bin backend.
fname = self.touch('testfn.unknown')
with mock.patch('fulltext.handle_path', return_value="") as m:
fulltext.get(fname)
klass = m.call_args[0][0]
self.assertEqual(klass.__module__, 'fulltext.backends.__bin')
# --- by mime opt
def test_by_mime(self):
fname = self.touch('testfn.doc')
with mock.patch('fulltext.handle_path', return_value="") as m:
fulltext.get(fname, mime='application/vnd.ms-excel')
klass = m.call_args[0][0]
self.assertEqual(klass.__module__, 'fulltext.backends.__xlsx')
def test_by_unknown_mime(self):
fname = self.touch('testfn.doc')
with mock.patch('fulltext.handle_path', return_value="") as m:
fulltext.get(fname, mime='application/yo!')
klass = m.call_args[0][0]
self.assertEqual(klass.__module__, 'fulltext.backends.__bin')
# -- by name opt
@unittest.skipIf(WINDOWS, "not supported on Windows")
def test_by_name(self):
fname = self.touch('testfn')
with mock.patch('fulltext.handle_path', return_value="") as m:
fulltext.get(fname, name="woodstock.doc")
klass = m.call_args[0][0]
self.assertEqual(klass.__module__, 'fulltext.backends.__doc')
def test_by_name_with_no_ext(self):
# Assume bin backend is picked up.
fname = self.touch("woodstock-no-ext")
with mock.patch('fulltext.handle_path', return_value="") as m:
fulltext.get(fname, name=fname)
klass = m.call_args[0][0]
self.assertEqual(klass.__module__, 'fulltext.backends.__bin')
# --- by backend opt
def test_by_backend(self):
# Assert file ext is ignored if backend opt is used.
fname = self.touch('testfn.doc')
with mock.patch('fulltext.handle_path', return_value="") as m:
fulltext.get(fname, backend='html')
klass = m.call_args[0][0]
self.assertEqual(klass.__module__, 'fulltext.backends.__html')
def test_by_invalid_backend(self):
# Assert file ext is ignored if backend opt is used.
fname = self.touch('testfn.doc')
with self.assertRaises(ValueError):
fulltext.get(fname, backend='yoo')
# --- by src code ext
def test_src_code_ext(self):
fname = "file.js"
self.touch(fname, content="foo bar")
with mock.patch('fulltext.handle_path', return_value="") as m:
fulltext.get(fname)
klass = m.call_args[0][0]
self.assertEqual(klass.__module__, 'fulltext.backends.__text')
# ===================================================================
# --- File objects
# ===================================================================
class TestFileObj(BaseTestCase):
def test_returned_content(self):
f = self.touch_fobj(content=b"hello world")
ret = fulltext.get(f)
self.assertEqual(ret, "hello world")
def test_name_attr(self):
# Make sure that fulltext attempts to determine file name
# from "name" attr of the file obj.
f = tempfile.NamedTemporaryFile(suffix='.html')
with mock.patch('fulltext.handle_fobj', return_value="") as m:
fulltext.get(f)
klass = m.call_args[0][0]
self.assertEqual(klass.__module__, 'fulltext.backends.__html')
@unittest.skipIf(WINDOWS, "not supported on Windows")
def test_fobj_offset(self):
# Make sure offset is unaltered after guessing mime type.
f = self.touch_fobj(content=b"hello world")
f.seek(0)
mod = fulltext.backend_from_fobj(f)
self.assertEqual(mod.__name__, 'fulltext.backends.__text')
class TestGuessingFromFileContent(BaseTestCase):
"""Make sure that when file has no extension its type is guessed
from its content.
"""
@unittest.skipIf(WINDOWS and magic is None, "magic is not installed")
def test_pdf(self):
fname = "file-noext"
with open(pathjoin(HERE, 'files/test.pdf'), 'rb') as f:
data = f.read()
self.touch(fname, content=data)
with mock.patch('fulltext.handle_path', return_value="") as m:
fulltext.get(fname)
klass = m.call_args[0][0]
self.assertEqual(klass.__module__, 'fulltext.backends.__pdf')
@unittest.skipIf(WINDOWS, "not supported on Windows")
def test_html(self):
fname = "file-noext"
self.touch(fname, content=open(
pathjoin(HERE, 'files/test.html'), 'rb').read())
with mock.patch('fulltext.handle_path', return_value="") as m:
fulltext.get(fname)
klass = m.call_args[0][0]
self.assertEqual(klass.__module__, 'fulltext.backends.__html')
class TestUtils(BaseTestCase):
def test_is_file_path(self):
from fulltext.util import is_file_path
assert is_file_path('foo')
assert is_file_path(b'foo')
with open(__file__) as f:
assert not is_file_path(f)
# ===================================================================
# --- Encodings
# ===================================================================
class TestEncodingGeneric(BaseTestCase):
def test_global_vars(self):
# Make sure the globla vars are taken into consideration and
# passed to the underlying backends.
encoding, errors = fulltext.ENCODING, fulltext.ENCODING_ERRORS
fname = self.touch("file.txt", content=b"hello")
try:
fulltext.ENCODING = "foo"
fulltext.ENCODING_ERRORS = "bar"
with mock.patch('fulltext.handle_path', return_value="") as m:
fulltext.get(fname)
klass = m.call_args[0][0]
self.assertEqual(klass.encoding, 'foo')
self.assertEqual(klass.encoding_errors, 'bar')
finally:
fulltext.ENCODING = encoding
fulltext.ENCODING_ERRORS = errors
@unittest.skipIf(WINDOWS, "not supported on Windows")
class TestUnicodeBase(object):
ext = None
italian = u"ciao bella àèìòù "
japanese = u"かいおうせい海王星"
invalid = u"helloworld"
def compare(self, content_s, fulltext_s):
if PY3:
self.assertEqual(content_s, fulltext_s)
else:
# Don't test for equality on Python 2 because unicode
# support is basically broken.
self.assertEqual(content_s, fulltext_s)
pass
def doit(self, fname, expected_txt):
ret = fulltext.get(fname)
self.compare(ret, expected_txt)
def test_italian(self):
self.doit(pathjoin(HERE, "files/unicode/it.%s" % self.ext),
self.italian)
def test_japanese(self):
self.doit(pathjoin(HERE, "files/unicode/jp.%s" % self.ext),
self.japanese)
def test_invalid_char(self):
fname = pathjoin(HERE, "files/unicode/invalid.%s" % self.ext)
if os.path.exists(fname):
with self.assertRaises(UnicodeDecodeError):
fulltext.get(fname)
ret = fulltext.get(fname, encoding_errors="ignore")
self.assertEqual(ret, self.invalid)
#
fname = pathjoin(HERE, "files/unicode/it.%s" % self.ext)
with self.assertRaises(UnicodeDecodeError):
fulltext.get(fname, encoding='ascii')
ret = fulltext.get(
fname, encoding='ascii', encoding_errors="ignore")
against = self.italian.replace(
u"àèìòù", u"").replace(u" ", u" ").strip()
self.assertEqual(ret, against)
class TestUnicodeTxt(BaseTestCase, TestUnicodeBase):
ext = "txt"
class TestUnicodeCsv(BaseTestCase, TestUnicodeBase):
ext = "csv"
class TestUnicodeOdt(BaseTestCase, TestUnicodeBase):
ext = "odt"
# A binary file is passed and text is not de/encoded.
@unittest.skipIf(1, "no conversion happening")
def test_invalid_char(self):
pass
# ps backend uses `pstotext` CLI tool, which does not correctly
# handle unicode. Just make sure we don't crash if passed the
# error handler.
@unittest.skipIf(WINDOWS, "not supported on Windows")
class TestUnicodePs(BaseTestCase):
def test_italian(self):
fname = pathjoin(HERE, "files/unicode/it.ps")
with self.assertRaises(UnicodeDecodeError):
fulltext.get(fname)
ret = fulltext.get(fname, encoding_errors="ignore")
assert ret.startswith("ciao bella") # the rest is garbage
class TestUnicodeHtml(BaseTestCase, TestUnicodeBase):
ext = "html"
# backend uses `unrtf` CLI tool, which does not correctly
# handle unicode. Just make sure we don't crash if passed the
# error handler.
@unittest.skipIf(WINDOWS, "not supported on Windows")
class TestUnicodeRtf(BaseTestCase):
ext = "rtf"
def test_italian(self):
fname = pathjoin(HERE, "files/unicode/it.rtf")
with self.assertRaises(UnicodeDecodeError):
fulltext.get(fname)
ret = fulltext.get(fname, encoding_errors="ignore")
assert ret.startswith("ciao bella") # the rest is garbage
class TestUnicodeDoc(BaseTestCase, TestUnicodeBase):
ext = "doc"
italian = ' '.join([u"ciao bella àèìòù" for x in range(20)])
japanese = ' '.join([u"かいおうせい海王星" for x in range(30)])
class TestUnicodeXml(BaseTestCase, TestUnicodeBase):
ext = "xml"
class TestUnicodeXlsx(BaseTestCase, TestUnicodeBase):
ext = "xlsx"
# A binary file is passed and text is not de/encoded.
@unittest.skipIf(1, "no conversion happening")
def test_invalid_char(self):
pass
class TestUnicodePptx(BaseTestCase, TestUnicodeBase):
ext = "pptx"
# A binary file is passed and text is not de/encoded.
@unittest.skipIf(1, "no conversion happening")
def test_invalid_char(self):
pass
class TestUnicodePdf(BaseTestCase, TestUnicodeBase):
ext = "pdf"
class TestUnicodePng(BaseTestCase, TestUnicodeBase):
ext = "png"
def compare(self, content_s, fulltext_s):
pass
@unittest.skipIf(1, "not compatible")
def test_invalid_char(self):
pass
class TestUnicodeJson(BaseTestCase, TestUnicodeBase):
ext = "json"
class TestUnicodeDocx(BaseTestCase, TestUnicodeBase):
ext = "docx"
# Underlying lib doesn't allow to specify an encoding.
@unittest.skipIf(1, "not compatible")
def test_invalid_char(self):
pass
class TestUnicodeEml(BaseTestCase, TestUnicodeBase):
ext = "eml"
class TestUnicodeMbox(BaseTestCase, TestUnicodeBase):
ext = "mbox"
# ===================================================================
# --- Test titles
# ===================================================================
class TestTitle(BaseTestCase):
def test_html(self):
fname = pathjoin(HERE, "files/others/title.html")
self.assertEqual(
fulltext.get_with_title(fname)[1], "Lorem ipsum")
@unittest.skipIf(WINDOWS, "not supported on Windows")
def test_pdf(self):
fname = pathjoin(HERE, "files/others/test.pdf")
self.assertEqual(
fulltext.get_with_title(fname)[1], "This is a test PDF file")
def test_odt(self):
fname = pathjoin(HERE, "files/others/pretty-ones.odt")
self.assertEqual(
fulltext.get_with_title(fname)[1], "PRETTY ONES")
@unittest.skipIf(WINDOWS, "not supported on Windows")
def test_doc(self):
fname = pathjoin(HERE, "files/others/hello-world.doc")
fulltext.get_with_title(fname)
self.assertEqual(
fulltext.get_with_title(fname)[1], 'Lab 1: Hello World')
def test_docx(self):
fname = pathjoin(HERE, "files/others/hello-world.docx")
self.assertEqual(
fulltext.get_with_title(fname)[1], 'MPI example')
@unittest.skipIf(TRAVIS, "fails on travis")
def test_epub(self):
fname = pathjoin(HERE, "files/others/jquery.epub")
self.assertEqual(
fulltext.get_with_title(fname)[1], 'JQuery Hello World')
def test_pptx(self):
fname = pathjoin(HERE, "files/others/test.pptx")
self.assertEqual(
fulltext.get_with_title(fname)[1], 'lorem ipsum')
@unittest.skipIf(WINDOWS, "not supported on Windows")
def test_ps(self):
fname = pathjoin(HERE, "files/others/lecture.ps")
self.assertEqual(
fulltext.get_with_title(fname)[1], 'Hey there')
@unittest.skipIf(WINDOWS, "not supported on Windows")
def test_rtf(self):
fname = pathjoin(HERE, "files/others/test.rtf")
self.assertEqual(
fulltext.get_with_title(fname)[1], 'hello there')
def test_xls(self):
fname = pathjoin(HERE, "files/others/test.xls")
self.assertEqual(
fulltext.get_with_title(fname)[1], 'hey there')
def test_xlsx(self):
fname = pathjoin(HERE, "files/others/test.xlsx")
self.assertEqual(
fulltext.get_with_title(fname)[1], 'yo man!')
def main():
unittest.main(verbosity=2)
if __name__ == '__main__':
main()
|
btimby/fulltext
|
fulltext/test/__init__.py
|
Python
|
mit
| 30,303
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# RushHourGame documentation build configuration file, created by
# sphinx-quickstart on Sun Sep 24 19:39:09 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
# sys.path.insert(0, os.path.abspath('.'))
# FOR OGP
sys.path.append(os.path.abspath('_ext'))
#import sphinx_rtd_theme
#import sphinx_bootstrap_theme
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'ogtag',
'sphinxcontrib.blockdiag',
'sphinxcontrib.seqdiag',
'sphinxcontrib.actdiag',
'sphinxcontrib.nwdiag',
'sphinxcontrib.rackdiag',
'sphinxcontrib.packetdiag',
'sphinx.ext.todo',
'myasuda.sphinx.erdiagram',
]
og_site_url = 'http://docs.rushhourgame.net/spec/'
og_twitter_site = '@yasshi2525'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'RushHourGame'
copyright = '2017, yasshi2525'
author = 'yasshi2525'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'ja'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'default'
#html_theme = 'sphinx_rtd_theme'
#html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
#html_theme = 'bootstrap'
#html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# html_theme_options = {'bootswatch_theme': "cosmo"}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
#html_sidebars = {
# '**': [
# 'about.html',
# 'navigation.html',
# 'relations.html', # needs 'show_related': True theme option to display
# 'searchbox.html',
# 'donate.html',
# ]
#}
html_show_sourcelink = False
html_favicon = 'favicon.ico'
blockdiag_html_image_format = 'SVG'
seqdiag_html_image_format = 'SVG'
actdiag_html_image_format = 'SVG'
nwdiag_html_image_format = 'SVG'
rackiag_html_image_format = 'SVG'
packetdiag_html_image_format = 'SVG'
blockdiag_fontpath = [
'C:\WINDOWS\Fonts\MEIRYO.TTC',
'/usr/share/fonts/vlgothic/VL-Gothic-Regular.ttf',
]
actdiag_fontpath = [
'C:\WINDOWS\Fonts\MEIRYO.TTC',
'/usr/share/fonts/vlgothic/VL-Gothic-Regular.ttf',
]
seqdiag_fontpath = [
'C:\WINDOWS\Fonts\MEIRYO.TTC',
'/usr/share/fonts/vlgothic/VL-Gothic-Regular.ttf',
]
graphviz_output_format = "svg"
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'RushHourGamedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'RushHourGame.tex', 'RushHourGame Documentation',
'yasshi2525', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'rushhourgame', 'RushHourGame Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'RushHourGame', 'RushHourGame Documentation',
author, 'RushHourGame', 'One line description of project.',
'Miscellaneous'),
]
|
yasshi2525/rushhourgame
|
docs/spec/conf.py
|
Python
|
mit
| 6,528
|
# -*- coding: utf-8 -*-
from .feature_extractor import FeatureExtractor
class Char(FeatureExtractor):
"""Feature vectors for normalization as character-based tagging.
"""
def _get_sequenced(self, seq, pos, history=None):
joined = ''.join(history).replace("__EPS__", "")
features = {}
features[u'bias'] = 1.0
features[u'this_char ' + seq[pos]] = 1.0
full_word = seq[(self._left_context_size + 1):-self._right_context_size]
features[u'full_word ' + ''.join(full_word)] = 1.0
for i in range(1, self._left_context_size + 1):
features[u'left_{0}_char {1}'.format(i, seq[pos - i])] = 1.0
features[u'left_upto_{0}_tags {1}'\
.format(i, ' '.join(history[(pos - i):pos]))] = 1.0
features[u'left_joined_{0}_tags {1}'.format(i, joined[-i:])] = 1.0
for i in range(1, self._right_context_size + 1):
features[u'right_{0}_char {1}'.format(i, seq[pos + i])] = 1.0
return features
|
mbollmann/perceptron
|
mmb_perceptron/feature_extractor/norm_char.py
|
Python
|
mit
| 1,020
|
import sys
from setuptools import setup, find_packages
deps = ["jinja2", "distribute", "virtualenv", "beautifulsoup4"]
if sys.version_info[:2] == (2, 6):
deps.append('argparse')
setup(
name='pywizard',
version='0.7.1',
packages=find_packages(exclude=("test.*",)),
entry_points={
'console_scripts': [
'pywizard = pywizard.cli:pywizard_cmd',
],
},
url='',
license='MIT',
author='Aleksandr Rudakov',
author_email='ribozz@gmail.com',
description='Tool that implements chef/puppet -like approach for server management.',
long_description=open('README.md').read(),
install_requires=deps,
# cmdclass={'test': PyTest},
extras_require={
'dev': ['pytest', 'coverage', 'pytest-cov', 'mock'],
'travis': ['coveralls'],
'docs': ['sphinx==1.2b3', 'sphinx-argparse']
}
)
|
pywizard/pywizard
|
setup.py
|
Python
|
mit
| 882
|
import enchant
TWITTER = 'twitter'
INSTAGRAM = 'instagram'
FACEBOOK = 'facebook'
WECHAT = 'wechat'
WHATSAPP = 'whatsapp'
GOOGLE = 'google'
LINKEDIN = 'linkedin'
LINKDIN = 'linkdin'
SOCIAL_MEDIAS = [TWITTER, INSTAGRAM, FACEBOOK, WECHAT, WHATSAPP, GOOGLE, LINKEDIN, LINKDIN]
class SocialExtractor:
def __init__(self, high_recall = True, forward_limit=11):
self.is_high_recall = high_recall
self.dictionary_en = enchant.Dict("en_US")
self.dictionary_fr = enchant.Dict("fr_FR")
self.forward_limit = forward_limit
def extract(self, tokens):
tokens = map(lambda x:x.lower(),tokens)
handles = dict()
is_extracted = False
handle = self.get_handle_after_social_media(tokens, TWITTER)
if(handle is not None):
handles['twitter'] = handle
is_extracted = True
handle = self.get_handle_after_social_media(tokens, INSTAGRAM)
if(handle is not None):
handles['instagram'] = handle
is_extracted = True
if(is_extracted):
return handles
return None
def get_handle_after_social_media(self, tokens, social_media=TWITTER):
handle = None
#print tokens
if(social_media in tokens):
social_media_index = tokens.index(social_media)
handle_index = social_media_index + 1
limit = min(social_media_index + self.forward_limit, len(tokens))
check_dictionary = True
while(handle_index < limit):
word_at_index = tokens[handle_index]
#print "Current Word:"+word_at_index
if(word_at_index == u"@"):
if(handle_index + 1 < limit):
return tokens[handle_index+1]
elif(check_dictionary and is_valid_handle(word_at_index, social_media)):
#Word is a potential handle
if(word_at_index.isalpha()):
check_dictionary = False
if(not word_at_index in SOCIAL_MEDIAS and not self.dictionary_en.check(word_at_index)
and not self.dictionary_fr.check(word_at_index)):
return word_at_index
handle_index += 1
return handle
def is_valid_handle(handle, social_media):
if(social_media == INSTAGRAM):
#Max 30 chars which are letters, numbers, underscores and periods
if(len(handle) <= 30):
handle = handle.replace("_","")
handle = handle.replace(".","")
if(handle.isalnum()):
return True
if(social_media == TWITTER):
#Max 15 chars which are letter, numbers, underscores
if(len(handle) <= 15):
handle = handle.replace("_","")
if(handle.isalnum()):
return True
return False
|
r-kapoor/dig-socialmedia-id-extractor
|
digSocialMediaIdExtractor/social_extractor.py
|
Python
|
mit
| 2,890
|
# -*- coding: utf-8 -*-
msg = {
'en': {
'add_text-adding': u'Robot: Adding %(adding)s',
},
# Author: Csisc
'qqq': {
'add_text-adding': u'Edit summary when the bot adds text to a given page. %(adding)s is the added text truncated to 200 characters.',
},
# Author: Csisc
'aeb': {
'add_text-adding': u'بوت: إضافة %(adding)s',
},
'af': {
'add_text-adding': u'robot Bygevoeg %(adding)s',
},
'als': {
'add_text-adding': u'Bötli: Ygfüegt %(adding)s',
},
'am': {
'add_text-adding': u'ሎሌ መጨመር %(adding)s',
},
'an': {
'add_text-adding': u'Robot Adhibito %(adding)s',
},
'ang': {
'add_text-adding': u'Robot ēcung %(adding)s',
},
'ar': {
'add_text-adding': u'بوت: إضافة %(adding)s',
},
'arc': {
'add_text-adding': u'ܪܘܒܘܛ ܬܘܣܦܬܐ %(adding)s',
},
'arz': {
'add_text-adding': u'تزويد روبوت %(adding)s',
},
'as': {
'add_text-adding': u'ৰবট যোগ দিছে %(adding)s',
},
# Author: Esbardu
'ast': {
'add_text-adding': u'Robó: Añadiendo %(adding)s',
},
# Author: AZISS
'az': {
'add_text-adding': u'Bot redaktəsi əlavə edilir: %(adding)s',
},
# Author: E THP
'azb': {
'add_text-adding': u'بوت رئداکتهسی علاوه ائدیلدی %(adding)s',
},
# Author: Haqmar
'ba': {
'add_text-adding': u'Робот өҫтәне: %(adding)s',
},
'bar': {
'add_text-adding': u'Boterl: Aini %(adding)s',
},
'bat-smg': {
'add_text-adding': u'robots Pridedama %(adding)s',
},
'bcl': {
'add_text-adding': u'robot minadugang %(adding)s',
},
'be': {
'add_text-adding': u'робат дадаў %(adding)s',
},
'be-x-old': {
'add_text-adding': u'робат дадаў %(adding)s',
},
'bg': {
'add_text-adding': u'Робот Добавяне %(adding)s',
},
'bjn': {
'add_text-adding': u'bot Manambah %(adding)s',
},
'bn': {
'add_text-adding': u'বট যোগ করছে %(adding)s',
},
'bo': {
'add_text-adding': u'འཕྲུལ་ཆས་ཀྱི་མི། ཁ་སྣོན་རྒྱག་པ། %(adding)s',
},
'bpy': {
'add_text-adding': u'রোবট তিলকরের %(adding)s',
},
'br': {
'add_text-adding': u'Robot ouzhpennet %(adding)s',
},
'bs': {
'add_text-adding': u'robot dodaje %(adding)s',
},
# Author: Anskar
'ca': {
'add_text-adding': u'Robot: Afegint %(adding)s',
},
'ce': {
'add_text-adding': u'робот тIетоьхна %(adding)s',
},
'ceb': {
'add_text-adding': u'robot Gidugang %(adding)s',
},
# Author: Asoxor
# Author: Calak
'ckb': {
'add_text-adding': u'ڕۆبۆت: زیادکردنی %(adding)s',
},
'crh': {
'add_text-adding': u'robot ekley %(adding)s',
},
'cs': {
'add_text-adding': u'Robot přidal %(adding)s',
},
'csb': {
'add_text-adding': u'robot dodôwô %(adding)s',
},
'cu': {
'add_text-adding': u'аѵтоматъ добавихъ %(adding)s',
},
'cv': {
'add_text-adding': u'робот хушрĕ %(adding)s',
},
'cy': {
'add_text-adding': u'robot yn ychwanegu %(adding)s',
},
# Author: Christian List
# Author: Kaare
'da': {
'add_text-adding': u'Robot: Tilføjer %(adding)s',
},
'de': {
'add_text-adding': u'Bot: %(adding)s hinzugefügt',
},
# Author: Eruedin
'de-ch': {
'add_text-adding': u'Bot: %(adding)s hinzugefügt',
},
# Author: Erdemaslancan
'diq': {
'add_text-adding': u'Boti %(adding)s dekerd de',
},
'dv': {
'add_text-adding': u'ރޮބޮޓ އިތުރު ކުރުނ %(adding)s',
},
'el': {
'add_text-adding': u'Ρομπότ: Προσθήκη %(adding)s',
},
'eml': {
'add_text-adding': u'Robot A Śònt %(adding)s',
},
# Author: Airon90
'eo': {
'add_text-adding': u'Roboto: Aldonado de %(adding)s',
},
# Author: TheBITLINK
# Author: Xqt
'es': {
'add_text-adding': u'Bot: Añadiendo %(adding)s',
},
# Author: Pikne
'et': {
'add_text-adding': u'Robot: lisatud %(adding)s',
},
'eu': {
'add_text-adding': u'robota Erantsia %(adding)s',
},
'ext': {
'add_text-adding': u'Robó Añiiu %(adding)s',
},
'fa': {
'add_text-adding': u'ربات: افزودن %(adding)s',
},
'fi': {
'add_text-adding': u'Botti lisäsi %(adding)s',
},
'fiu-vro': {
'add_text-adding': u'robot manopandminõ %(adding)s',
},
'fo': {
'add_text-adding': u'bottur leggur aftrat %(adding)s',
},
# Author: Od1n
'fr': {
'add_text-adding': u'Robot : Ajoute %(adding)s',
},
# Author: ChrisPtDe
'frp': {
'add_text-adding': u'Robot : apond %(adding)s',
},
# Author: Murma174
'frr': {
'add_text-adding': u'Bot:Du %(adding)s diartu',
},
'fur': {
'add_text-adding': u'Robot: o zonti %(adding)s',
},
'fy': {
'add_text-adding': u'Bot - derby %(adding)s',
},
'ga': {
'add_text-adding': u'róbat ag suimiú %(adding)s',
},
'gl': {
'add_text-adding': u'Bot: Engado %(adding)s',
},
'gn': {
'add_text-adding': u'bot ojoapy %(adding)s',
},
'gu': {
'add_text-adding': u'રોબોટ ઉમેરણ %(adding)s',
},
'gv': {
'add_text-adding': u'bot currit stiagh ec %(adding)s',
},
# Author: Jetlag
'hak': {
'add_text-adding': u'機械人:新增%(adding)s',
},
'he': {
'add_text-adding': u'בוט: מוסיף %(adding)s',
},
'hr': {
'add_text-adding': u'robot Dodaje %(adding)s',
},
'hsb': {
'add_text-adding': u'bot přidał %(adding)s',
},
'ht': {
'add_text-adding': u'wobo Ajoute %(adding)s',
},
'hu': {
'add_text-adding': u'Bot: következő hozzáadása %(adding)s',
},
'hy': {
'add_text-adding': u'Ռոբոտը ավելացնում է․ %(adding)s',
},
'ia': {
'add_text-adding': u'Robot: Addition de %(adding)s',
},
'id': {
'add_text-adding': u'bot Menambah %(adding)s',
},
'ie': {
'add_text-adding': u'Bot: Adjuntet %(adding)s',
},
# Author: Lam-ang
'ilo': {
'add_text-adding': u'Robot: Agnaynayon %(adding)s',
},
'io': {
'add_text-adding': u'roboto adjuntas %(adding)s',
},
# Author: Snævar
'is': {
'add_text-adding': u'Vélmenni: Bæti við %(adding)s',
},
'it': {
'add_text-adding': u'Bot: Aggiungo %(adding)s',
},
# Author: Fryed-peach
# Author: Shirayuki
'ja': {
'add_text-adding': u'ロボットによる: %(adding)s を追加',
},
# Author: NoiX180
'jv': {
'add_text-adding': u'Bot: Nambah %(adding)s',
},
'ka': {
'add_text-adding': u'ბოტის დამატება %(adding)s',
},
'kab': {
'add_text-adding': u'a rubut ti merniwt %(adding)s',
},
'kk': {
'add_text-adding': u'Боттың үстегені %(adding)s',
},
'kl': {
'add_text-adding': u'Robot Ilassut %(adding)s',
},
'km': {
'add_text-adding': u'រ៉ូបូ បន្ថែម %(adding)s',
},
# Author: 아라
'ko': {
'add_text-adding': u'로봇: %(adding)s 추가',
},
'koi': {
'add_text-adding': u'робот содтiс %(adding)s',
},
'krc': {
'add_text-adding': u'робот къошду %(adding)s',
},
'ksh': {
'add_text-adding': u'Bot: dobëijedonn %(adding)s',
},
'ku': {
'add_text-adding': u'robot serzêde kirin %(adding)s',
},
'kv': {
'add_text-adding': u'робот содтi %(adding)s',
},
# Author: Kernoweger
'kw': {
'add_text-adding': u'Robot: ow keworra %(adding)s',
},
'ky': {
'add_text-adding': u'робот кошту %(adding)s',
},
'la': {
'add_text-adding': u'automaton addit %(adding)s',
},
'lad': {
'add_text-adding': u'robot Adjustado %(adding)s',
},
# Author: Robby
'lb': {
'add_text-adding': u'Bot: Derbäi setzen %(adding)s',
},
# Author: Namik
'lez': {
'add_text-adding': u'Робот:авунаj кар %(adding)s',
},
'li': {
'add_text-adding': u'Robot: debie %(adding)s',
},
'lmo': {
'add_text-adding': u'Robot jontant %(adding)s',
},
'ln': {
'add_text-adding': u'bot ebakísí %(adding)s',
},
'lo': {
'add_text-adding': u'ໂຣບົດ ພວມເພີ່ມ %(adding)s',
},
'lt': {
'add_text-adding': u'robotas Pridedama %(adding)s',
},
# Author: RMizo
'lus': {
'add_text-adding': u'Khawlmi: %(adding)s telh mek a ni',
},
'lv': {
'add_text-adding': u'robots pievieno %(adding)s',
},
# Author: StefanusRA
'map-bms': {
'add_text-adding': u'Bot Nambah %(adding)s',
},
'mdf': {
'add_text-adding': u'бот поладозе %(adding)s',
},
'mg': {
'add_text-adding': u'Rôbô Nanampy %(adding)s',
},
'mhr': {
'add_text-adding': u'робот ешарен %(adding)s',
},
'mi': {
'add_text-adding': u'he karetao e tāpiri ana %(adding)s',
},
'min': {
'add_text-adding': u'bot Manambah %(adding)s',
},
# Author: Rancher
'mk': {
'add_text-adding': u'Робот: Додава %(adding)s',
},
'ml': {
'add_text-adding': u'യന്ത്രം ചേര്ക്കുന്നു %(adding)s',
},
'mn': {
'add_text-adding': u'робот Нэмж байна %(adding)s',
},
'mo': {
'add_text-adding': u'Робот: Адэугат %(adding)s',
},
'mr': {
'add_text-adding': u'सांगकाम्याने वाढविले %(adding)s',
},
'mrj': {
'add_text-adding': u'робот темен %(adding)s',
},
'ms': {
'add_text-adding': u'bot menambah %(adding)s',
},
'mt': {
'add_text-adding': u'Bot Żieda %(adding)s',
},
'mwl': {
'add_text-adding': u'Robó Adicionando %(adding)s',
},
'my': {
'add_text-adding': u'ရိုဘော့က ပေါင်းထည့်နေသည် - %(adding)s',
},
'myv': {
'add_text-adding': u'роботось путызеть %(adding)s',
},
'mzn': {
'add_text-adding': u'ربوت بیشتن %(adding)s',
},
'nah': {
'add_text-adding': u'TepozcuayollotlTlamahxiltilli %(adding)s',
},
'nds': {
'add_text-adding': u'Bot: tofoiegt: %(adding)s',
},
# Author: Servien
'nds-nl': {
'add_text-adding': u'Bot derbie %(adding)s',
},
'ne': {
'add_text-adding': u'रोबोट ले थप्दै %(adding)s',
},
'nl': {
'add_text-adding': u'Robot: toegevoegd %(adding)s',
},
# Author: Harald Khan
# Author: Njardarlogar
'nn': {
'add_text-adding': u'robot: la til %(adding)s',
},
'no': {
'add_text-adding': u'robot Legger til %(adding)s',
},
'nov': {
'add_text-adding': u'robote Adid %(adding)s',
},
'nrm': {
'add_text-adding': u'robot ajouôte %(adding)s',
},
'nv': {
'add_text-adding': u'botígíí díí naaltsoos tʼáá bíniʼ łahgo áyiilaa (+) %(adding)s',
},
'oc': {
'add_text-adding': u'Robòt Apondre %(adding)s',
},
# Author: Jnanaranjan Sahu
'or': {
'add_text-adding': u'Robot: Adding %(adding)s',
},
'os': {
'add_text-adding': u'Робот бавæрдта %(adding)s',
},
'pcd': {
'add_text-adding': u'Robot Rajoute %(adding)s',
},
'pdc': {
'add_text-adding': u'Waddefresser: %(adding)s dezu geduh',
},
'pfl': {
'add_text-adding': u'Bot: Aigfiecht %(adding)s',
},
# Author: Sp5uhe
'pl': {
'add_text-adding': u'Robot dopisał: %(adding)s',
},
'pms': {
'add_text-adding': u'ël trigomiro a gionta %(adding)s',
},
'pnb': {
'add_text-adding': u'رلانا روبوٹ %(adding)s',
},
'ps': {
'add_text-adding': u'روباټ زیاتول %(adding)s',
},
'pt': {
'add_text-adding': u'Robô: Adicionando %(adding)s',
},
# Author: 555
'pt-br': {
'add_text-adding': u'Bot: Adicionando %(adding)s',
},
'qu': {
'add_text-adding': u'Rurana antacha Yapasqa %(adding)s',
},
'rm': {
'add_text-adding': u'Bot Agiunt %(adding)s',
},
'ro': {
'add_text-adding': u'Robot interwiki: Adăugat %(adding)s',
},
'roa-tara': {
'add_text-adding': u'Robbot Stoche a mette %(adding)s',
},
# Author: Rubin
# Author: Volkov
'ru': {
'add_text-adding': u'бот добавил: %(adding)s',
},
'rue': {
'add_text-adding': u'робот додав %(adding)s',
},
'rw': {
'add_text-adding': u'Roboti Guterana %(adding)s',
},
# Author: Hemant wikikosh1
'sa': {
'add_text-adding': u'कारुयन्त्रम् : योजयति %(adding)s',
},
'sah': {
'add_text-adding': u'робот эптэ %(adding)s',
},
'scn': {
'add_text-adding': u'robot junciennu %(adding)s',
},
'sco': {
'add_text-adding': u'robot Addin %(adding)s',
},
'si': {
'add_text-adding': u'රොබෝ එකතු කරමින් %(adding)s',
},
# Author: Teslaton
'sk': {
'add_text-adding': u'Robot pridal %(adding)s',
},
'sl': {
'add_text-adding': u'robot Dodajanje %(adding)s',
},
# Author: Abshirdheere
'so': {
'add_text-adding': u'Bot: dheeraad ah %(adding)s',
},
# Author: FatosMorina
'sq': {
'add_text-adding': u'roboti duke shtuar %(adding)s',
},
# Author: Rancher
'sr': {
'add_text-adding': u'Робот: додато %(adding)s',
},
# Author: Rancher
'sr-el': {
'add_text-adding': u'Robot: dodato %(adding)s',
},
'su': {
'add_text-adding': u'bot Nambih %(adding)s',
},
'sv': {
'add_text-adding': u'Bot: Lägger till %(adding)s',
},
# Author: Muddyb Blast Producer
'sw': {
'add_text-adding': u'Roboti: Imeongeza %(adding)s',
},
'szl': {
'add_text-adding': u'Bot dodowo: %(adding)s',
},
'ta': {
'add_text-adding': u'தானியங்கிஇணைப்பு %(adding)s',
},
'te': {
'add_text-adding': u'యంత్రము కలుపుతున్నది %(adding)s',
},
'tet': {
'add_text-adding': u'bot tau tan %(adding)s',
},
'tg': {
'add_text-adding': u'робот илова карда истодааст %(adding)s',
},
'th': {
'add_text-adding': u'โรบอต เพิ่ม %(adding)s',
},
'tk': {
'add_text-adding': u'robot goşdy %(adding)s',
},
'tl': {
'add_text-adding': u'robot dinagdag %(adding)s',
},
# Author: Гусейн
'tly': {
'add_text-adding': u'Робот: Зијод кардә бе %(adding)s',
},
'to': {
'add_text-adding': u'mīsiniʻoku tānaki %(adding)s',
},
# Author: Khutuck
'tr': {
'add_text-adding': u'Bot: Ekleniyor %(adding)s',
},
'tt': {
'add_text-adding': u'робот кушты %(adding)s',
},
# Author: Tifinaghes
'tzm': {
'add_text-adding': u'ⴰⵕⵓⴱⵓⵜ ⵜⵔⵏⵓⵜ %(adding)s',
},
'udm': {
'add_text-adding': u'робот ватсаз %(adding)s',
},
# Author: Sahran
'ug': {
'add_text-adding': u'ماشىنا ئادەم: %(adding)s قوشۇۋاتىدۇ',
},
'uk': {
'add_text-adding': u'робот додав %(adding)s',
},
'ur': {
'add_text-adding': u'روبالہ جمع %(adding)s',
},
'uz': {
'add_text-adding': u'Bot Qoʻshdi %(adding)s',
},
# Author: Alunardon90
'vec': {
'add_text-adding': u'Robot: Xonto %(adding)s',
},
'vi': {
'add_text-adding': u'Bot: Thêm %(adding)s',
},
'vls': {
'add_text-adding': u'Robot Derby %(adding)s',
},
'vo': {
'add_text-adding': u'Bot: Läükon vödemi: %(adding)s',
},
'wa': {
'add_text-adding': u'robot radjouter %(adding)s',
},
# Author: Harvzsf
'war': {
'add_text-adding': u'Robot: Gindudugang %(adding)s',
},
'xal': {
'add_text-adding': u'көдлвр немв %(adding)s',
},
'yi': {
'add_text-adding': u'באט צוגעלייגט %(adding)s',
},
# Author: Demmy
'yo': {
'add_text-adding': u'Bot: Ìfikún %(adding)s',
},
'yue': {
'add_text-adding': u'機械人 加 %(adding)s',
},
# Author: Mys 721tx
# Author: Yfdyh000
'zh': {
'add_text-adding': u'机器人:添加 %(adding)s',
},
'zh-classical': {
'add_text-adding': u'僕 增 %(adding)s',
},
# Author: Simon Shek
'zh-hant': {
'add_text-adding': u'機械人:新增%(adding)s',
},
# Author: Justincheng12345
'zh-hk': {
'add_text-adding': u'機械人新增:%(adding)s',
},
'zh-min-nan': {
'add_text-adding': u'bot ka-thiam %(adding)s',
},
'zh-yue': {
'add_text-adding': u'機械人 加 %(adding)s',
},
};
|
legoktm/pywikipedia-rewrite
|
scripts/i18n/add_text.py
|
Python
|
mit
| 15,557
|
import unittest
import mock
import copy
import itertools
from biokbase.workspace.baseclient import ServerError
from biokbase.narrative.app_util import map_inputs_from_job, map_outputs_from_state
from biokbase.narrative.jobs.job import (
Job,
COMPLETED_STATUS,
EXCLUDED_JOB_STATE_FIELDS,
JOB_ATTRS,
JOB_ATTR_DEFAULTS,
)
from biokbase.narrative.jobs.jobmanager import JOB_INIT_EXCLUDED_JOB_STATE_FIELDS
from biokbase.narrative.jobs.specmanager import SpecManager
from .util import ConfigTests
from .narrative_mock.mockclients import (
get_mock_client,
get_failing_mock_client,
MockClients,
assert_obj_method_called,
)
from contextlib import contextmanager
from io import StringIO
import sys
@contextmanager
def capture_stdout():
new_out, new_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
config = ConfigTests()
sm = SpecManager()
TEST_JOBS = config.load_json_file(config.get("jobs", "ee2_job_info_file"))
with mock.patch("biokbase.narrative.jobs.jobmanager.clients.get", get_mock_client):
sm.reload()
TEST_SPECS = copy.deepcopy(sm.app_specs)
sm.reload() # get live data
LIVE_SPECS = copy.deepcopy(sm.app_specs)
def get_test_spec(tag, app_id, live=False):
specs = LIVE_SPECS if live else TEST_SPECS
return copy.deepcopy(specs[tag][app_id])
def get_test_job(job_id):
return copy.deepcopy(TEST_JOBS[job_id])
# test_jobs contains jobs in the following states
JOB_COMPLETED = "5d64935ab215ad4128de94d6"
JOB_CREATED = "5d64935cb215ad4128de94d7"
JOB_RUNNING = "5d64935cb215ad4128de94d8"
JOB_TERMINATED = "5d64935cb215ad4128de94d9"
JOB_ERROR = "5d64935cb215ad4128de94e0"
BATCH_PARENT = "60e7112887b7e512a899c8f1"
BATCH_COMPLETED = "60e7112887b7e512a899c8f2"
BATCH_TERMINATED = "60e7112887b7e512a899c8f3"
BATCH_TERMINATED_RETRIED = "60e7112887b7e512a899c8f4"
BATCH_ERROR_RETRIED = "60e7112887b7e512a899c8f5"
BATCH_RETRY_COMPLETED = "60e71159fce9347f2adeaac6"
BATCH_RETRY_RUNNING = "60e7165f3e91121969554d82"
BATCH_RETRY_ERROR = "60e717d78ac80701062efe63"
JOB_NOT_FOUND = "job_not_found"
BATCH_CHILDREN = [
BATCH_COMPLETED,
BATCH_TERMINATED,
BATCH_TERMINATED_RETRIED,
BATCH_ERROR_RETRIED,
BATCH_RETRY_COMPLETED,
BATCH_RETRY_RUNNING,
BATCH_RETRY_ERROR,
]
JOBS_TERMINALITY = {
JOB_COMPLETED: True,
JOB_CREATED: False,
JOB_RUNNING: False,
JOB_TERMINATED: True,
JOB_ERROR: True,
BATCH_PARENT: False,
BATCH_COMPLETED: True,
BATCH_TERMINATED: True,
BATCH_TERMINATED_RETRIED: True,
BATCH_ERROR_RETRIED: True,
BATCH_RETRY_COMPLETED: True,
BATCH_RETRY_RUNNING: False,
BATCH_RETRY_ERROR: True,
}
ALL_JOBS = list(JOBS_TERMINALITY.keys())
TERMINAL_JOBS = []
ACTIVE_JOBS = []
for key, value in JOBS_TERMINALITY.items():
if value:
TERMINAL_JOBS.append(key)
else:
ACTIVE_JOBS.append(key)
def create_job_from_ee2(job_id, extra_data=None, children=None):
state = get_test_job(job_id)
job = Job(state, extra_data=extra_data, children=children)
return job
def create_state_from_ee2(job_id, exclude_fields=JOB_INIT_EXCLUDED_JOB_STATE_FIELDS):
"""
create the output of job.state() from raw job data
"""
state = get_test_job(job_id)
for attr in exclude_fields:
if attr in state:
del state[attr]
return state
def create_attrs_from_ee2(job_id):
state = get_test_job(job_id)
job_input = state.get("job_input", {})
narr_cell_info = job_input.get("narrative_cell_info", {})
attrs = dict(
app_id=job_input.get("app_id", JOB_ATTR_DEFAULTS["app_id"]),
app_version=job_input.get("service_ver", JOB_ATTR_DEFAULTS["app_version"]),
batch_id=(
state.get("job_id")
if state.get("batch_job", JOB_ATTR_DEFAULTS["batch_job"])
else state.get("batch_id", JOB_ATTR_DEFAULTS["batch_id"])
),
batch_job=state.get("batch_job", JOB_ATTR_DEFAULTS["batch_job"]),
cell_id=narr_cell_info.get("cell_id", JOB_ATTR_DEFAULTS["cell_id"]),
child_jobs=state.get("child_jobs", JOB_ATTR_DEFAULTS["child_jobs"]),
job_id=state.get("job_id"),
params=job_input.get("params", JOB_ATTR_DEFAULTS["params"]),
retry_ids=state.get("retry_ids", JOB_ATTR_DEFAULTS["retry_ids"]),
retry_parent=state.get("retry_parent", JOB_ATTR_DEFAULTS["retry_parent"]),
run_id=narr_cell_info.get("run_id", JOB_ATTR_DEFAULTS["run_id"]),
tag=narr_cell_info.get("tag", JOB_ATTR_DEFAULTS["tag"]),
user=state.get("user", JOB_ATTR_DEFAULTS["user"]),
)
return attrs
def get_widget_info(job_id):
state = get_test_job(job_id)
if state.get("status") != COMPLETED_STATUS:
return None
job_input = state.get("job_input", {})
narr_cell_info = job_input.get("narrative_cell_info", {})
params = job_input.get("params", JOB_ATTR_DEFAULTS["params"])
tag = narr_cell_info.get("tag", JOB_ATTR_DEFAULTS["tag"])
app_id = job_input.get("app_id", JOB_ATTR_DEFAULTS["app_id"])
spec = get_test_spec(tag, app_id)
with mock.patch("biokbase.narrative.app_util.clients.get", get_mock_client):
output_widget, widget_params = map_outputs_from_state(
state,
map_inputs_from_job(params, spec),
spec,
)
return {
"name": output_widget,
"tag": narr_cell_info.get("tag", "release"),
"params": widget_params,
}
def get_test_job_state(job_id):
state = get_test_job(job_id)
job_input = state.get("job_input", {})
narr_cell_info = job_input.get("narrative_cell_info", {})
state.update(
{
"batch_id": state.get(
"batch_id", job_id if state.get("batch_job", False) else None
),
"cell_id": narr_cell_info.get("cell_id", None),
"run_id": narr_cell_info.get("run_id", None),
"job_output": state.get("job_output", {}),
"child_jobs": state.get("child_jobs", []),
}
)
for f in EXCLUDED_JOB_STATE_FIELDS:
if f in state:
del state[f]
output_state = {
"state": state,
"widget_info": get_widget_info(job_id),
"cell_id": state.get("cell_id"),
"user": state.get("user"),
}
return output_state
def get_test_job_states(job_ids=TEST_JOBS.keys()):
# generate full job state objects
return {job_id: get_test_job_state(job_id) for job_id in job_ids}
@mock.patch("biokbase.narrative.jobs.job.clients.get", get_mock_client)
def get_batch_family_jobs(return_list=False):
"""
As invoked in appmanager's run_app_bulk, i.e.,
with from_job_id(s)
"""
child_jobs = Job.from_job_ids(BATCH_CHILDREN, return_list=True)
batch_job = Job.from_job_id(BATCH_PARENT, children=child_jobs)
if return_list:
return [batch_job] + child_jobs
else:
return {
BATCH_PARENT: batch_job,
**{
child_id: child_job
for child_id, child_job in zip(BATCH_CHILDREN, child_jobs)
},
}
def get_all_jobs(return_list=False):
# do batch family because
# batch container job needs to be
# instantiated with child instances
jobs = get_batch_family_jobs()
for job_id in ALL_JOBS:
if job_id not in jobs:
jobs[job_id] = create_job_from_ee2(job_id)
if return_list:
jobs = list(jobs.values())
return jobs
def get_cell_2_jobs():
"""
Returns a dict with keys being all the cell IDs
in the test data, and the values being a list of
jobs with that cell ID.
Batch jobs technically don't have cell IDs,
but they will take on the cell IDs of their children.
If their children are in different
cells, all children's cell IDs will map to the batch job
"""
cell_and_jobs = []
for job_id, job in get_all_jobs().items():
if job.batch_job:
for child_job in job.children:
cell_and_jobs.append((child_job.cell_id, job_id))
else:
cell_and_jobs.append((job.cell_id, job_id))
cell_2_jobs = {}
for cell_id, job_id in cell_and_jobs:
if cell_id in cell_2_jobs and job_id not in cell_2_jobs[cell_id]:
cell_2_jobs[cell_id] += [job_id]
else:
cell_2_jobs[cell_id] = [job_id]
return cell_2_jobs
class JobTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
job_state = get_test_job(JOB_COMPLETED)
job_input = job_state["job_input"]
cls.job_id = job_state["job_id"]
cls.app_id = job_input["app_id"]
cls.app_version = job_input.get("service_ver", "0.0.1")
cls.batch_id = None
cls.batch_job = False
cls.child_jobs = []
cls.cell_id = job_input.get("narrative_cell_info", {}).get("cell_id")
cls.extra_data = None
cls.user = job_state["user"]
cls.params = job_input["params"]
cls.retry_ids = job_state.get("retry_ids", [])
cls.retry_parent = job_state.get("retry_parent")
cls.run_id = job_input.get("narrative_cell_info", {}).get("run_id")
cls.tag = job_input.get("narrative_cell_info", {}).get("tag", "dev")
# load mock specs
with mock.patch("biokbase.narrative.jobs.job.clients.get", get_mock_client):
SpecManager().reload()
def check_jobs_equal(self, jobl, jobr):
self.assertEqual(jobl._acc_state, jobr._acc_state)
with mock.patch("biokbase.narrative.jobs.job.clients.get", get_mock_client):
self.assertEqual(jobl.state(), jobr.state())
for attr in JOB_ATTRS:
self.assertEqual(getattr(jobl, attr), getattr(jobr, attr))
def check_job_attrs_custom(self, job, exp_attr={}):
attr = dict(JOB_ATTR_DEFAULTS)
attr.update(exp_attr)
with mock.patch("biokbase.narrative.jobs.job.clients.get", get_mock_client):
for name, value in attr.items():
self.assertEqual(value, getattr(job, name))
def check_job_attrs(self, job, job_id, exp_attrs={}, skip_state=False):
# TODO check _acc_state full vs pruned, extra_data
# Check state() if no special values expected
if not exp_attrs and not skip_state:
state = create_state_from_ee2(job_id)
with mock.patch("biokbase.narrative.jobs.job.clients.get", get_mock_client):
self.assertEqual(state, job.state())
attrs = create_attrs_from_ee2(job_id)
attrs.update(exp_attrs)
# Mock here because job.child_jobs and job.retry_ids can
# cause EE2 query
with mock.patch("biokbase.narrative.jobs.job.clients.get", get_mock_client):
for name, value in attrs.items():
self.assertEqual(value, getattr(job, name))
def test_job_init__error_no_job_id(self):
with self.assertRaisesRegex(
ValueError, "Cannot create a job without a job ID!"
):
Job({"params": {}, "app_id": "this/that"})
def test_job_init__from_job_id(self):
"""
test job initialisation, as is done by run_app
"""
for job_id in ALL_JOBS:
if job_id == BATCH_PARENT:
continue
with mock.patch("biokbase.narrative.jobs.job.clients.get", get_mock_client):
job = Job.from_job_id(job_id)
self.check_job_attrs(job, job_id)
def test_job_init__from_job_ids(self):
job_ids = ALL_JOBS.copy()
job_ids.remove(BATCH_PARENT)
with mock.patch("biokbase.narrative.jobs.job.clients.get", get_mock_client):
jobs = Job.from_job_ids(job_ids, return_list=False)
for job_id, job in jobs.items():
self.check_job_attrs(job, job_id)
def test_job_init__extra_state(self):
"""
test job initialisation as is done by run_app_batch
"""
app_id = "kb_BatchApp/run_batch"
extra_data = {
"batch_app": app_id,
"batch_tag": None,
"batch_size": 300,
}
for job_id in ALL_JOBS:
if job_id == BATCH_PARENT:
continue
with mock.patch("biokbase.narrative.jobs.job.clients.get", get_mock_client):
batch_job = Job.from_job_id(
job_id,
extra_data=extra_data,
)
self.check_job_attrs(batch_job, job_id, {"extra_data": extra_data})
def test_job_init__batch_family(self):
"""
test job initialization, as is done by run_app_bulk
"""
batch_jobs = get_batch_family_jobs(return_list=False)
for job_id, job in batch_jobs.items():
self.check_job_attrs(job, job_id)
batch_job = batch_jobs[BATCH_PARENT]
self.assertEqual(batch_job.job_id, batch_job.batch_id)
def test_job_from_state__custom(self):
"""
test job initialisation with defaults being filled in
TODO do a non-default?
"""
params = [
{
"import_type": "FASTQ/FASTA",
"name": "small.forward.fq",
}
]
test_job = {
"user": "the_user",
"job_input": {
"params": params,
"service_ver": "42",
"app_id": "This/app",
},
"job_id": "0123456789abcdef",
}
expected = {
"app_id": "This/app",
"app_version": "42",
"batch_id": JOB_ATTR_DEFAULTS["batch_id"],
"cell_id": JOB_ATTR_DEFAULTS["cell_id"],
"extra_data": None,
"job_id": "0123456789abcdef",
"params": params,
"run_id": JOB_ATTR_DEFAULTS["run_id"],
"tag": JOB_ATTR_DEFAULTS["tag"],
"user": "the_user",
}
job = Job(test_job)
self.check_job_attrs_custom(job, expected)
@mock.patch("biokbase.narrative.jobs.job.clients.get", get_mock_client)
def test_state__no_final_state__non_terminal(self):
"""
test that a job outputs the correct state
"""
# ee2_state is fully populated (includes job_input, no job_output)
job = create_job_from_ee2(JOB_CREATED)
self.assertFalse(job.was_terminal())
self.assertIsNone(job.final_state)
state = job.state()
self.assertFalse(job.was_terminal())
self.assertIsNone(job.final_state)
self.assertEqual(state["status"], "created")
expected_state = create_state_from_ee2(JOB_CREATED)
self.assertEqual(state, expected_state)
def test_state__final_state_exists__terminal(self):
"""
test that a completed job emits its state without calling check_job
"""
job = create_job_from_ee2(JOB_COMPLETED)
self.assertTrue(job.was_terminal())
self.assertIsNotNone(job.final_state)
expected = create_state_from_ee2(JOB_COMPLETED)
self.assertEqual(job.final_state, expected)
with assert_obj_method_called(MockClients, "check_job", call_status=False):
state = job.state()
self.assertEqual(state["status"], "completed")
self.assertEqual(state, expected)
@mock.patch("biokbase.narrative.jobs.job.clients.get", get_failing_mock_client)
def test_state__raise_exception(self):
"""
test that the correct exception is thrown if check_job cannot be called
"""
job = create_job_from_ee2(JOB_CREATED)
self.assertFalse(job.was_terminal())
self.assertIsNone(job.final_state)
with self.assertRaisesRegex(ServerError, "Check job failed"):
job.state()
def test_state__returns_none(self):
def mock_state(self, state=None):
return None
job = create_job_from_ee2(JOB_CREATED)
expected = {
"status": "error",
"error": {
"code": -1,
"name": "Job Error",
"message": "Unable to return job state",
"error": "Unable to find current job state. Please try again later, or contact KBase.",
},
"errormsg": "Unable to return job state",
"error_code": -1,
"job_id": job.job_id,
"cell_id": job.cell_id,
"run_id": job.run_id,
"created": 0,
"updated": 0,
}
with mock.patch.object(Job, "state", mock_state):
state = job.output_state()
self.assertEqual(expected, state)
def test_job_update__no_state(self):
"""
test that without a state object supplied, the job state is unchanged
"""
job = create_job_from_ee2(JOB_CREATED)
self.assertFalse(job.was_terminal())
job._update_state(None)
self.assertFalse(job.was_terminal())
@mock.patch("biokbase.narrative.jobs.job.clients.get", get_mock_client)
def test_job_update__invalid_job_id(self):
"""
ensure that an ee2 state with a different job ID cannot be used to update a job
"""
job = create_job_from_ee2(JOB_RUNNING)
expected = create_state_from_ee2(JOB_RUNNING)
self.assertEqual(job.state(), expected)
# try to update it with the job state from a different job
with self.assertRaisesRegex(ValueError, "Job ID mismatch in _update_state"):
job._update_state(get_test_job(JOB_COMPLETED))
@mock.patch("biokbase.narrative.jobs.job.clients.get", get_mock_client)
def test_job_info(self):
job = create_job_from_ee2(JOB_COMPLETED)
info_str = "App name (id): Test Editor (NarrativeTest/test_editor)\nVersion: 0.0.1\nStatus: completed\nInputs:\n------\n"
with capture_stdout() as (out, err):
job.info()
self.assertIn(info_str, out.getvalue().strip())
def test_repr(self):
job = create_job_from_ee2(JOB_COMPLETED)
job_str = job.__repr__()
self.assertRegex("KBase Narrative Job - " + job.job_id, job_str)
@mock.patch("biokbase.narrative.jobs.job.clients.get", get_mock_client)
def test_repr_js(self):
job = create_job_from_ee2(JOB_COMPLETED)
js_out = job._repr_javascript_()
self.assertIsInstance(js_out, str)
# spot check to make sure the core pieces are present. needs the
# element.html part, job_id, and widget
self.assertIn("element.html", js_out)
self.assertIn(job.job_id, js_out)
self.assertIn("kbaseNarrativeJobStatus", js_out)
@mock.patch("biokbase.narrative.jobs.job.clients.get", get_mock_client)
def test_job_finished(self):
is_finished = {
JOB_CREATED: False,
JOB_RUNNING: False,
JOB_COMPLETED: True,
JOB_TERMINATED: True,
}
for job_id in is_finished.keys():
job = create_job_from_ee2(job_id)
self.assertEqual(job.is_finished(), is_finished[job_id])
@mock.patch("biokbase.narrative.widgetmanager.WidgetManager.show_output_widget")
@mock.patch("biokbase.narrative.jobs.job.clients.get", get_mock_client)
def test_show_output_widget(self, mock_method):
mock_method.return_value = True
job = Job(get_test_job(JOB_COMPLETED))
self.assertTrue(job.show_output_widget())
mock_method.assert_called_once()
@mock.patch("biokbase.narrative.jobs.job.clients.get", get_mock_client)
def test_show_output_widget__incomplete_state(self):
job = Job(get_test_job(JOB_CREATED))
self.assertRegex(
job.show_output_widget(), "Job is incomplete! It has status 'created'"
)
@mock.patch("biokbase.narrative.jobs.job.clients.get", get_mock_client)
def test_log(self):
# Things set up by the mock:
# 1. There's 100 total log lines
# 2. Each line has its line number embedded in it
total_lines = 100
job = create_job_from_ee2(JOB_COMPLETED)
logs = job.log()
# we know there's 100 lines total, so roll with it that way.
self.assertEqual(logs[0], total_lines)
self.assertEqual(len(logs[1]), total_lines)
for i in range(len(logs[1])):
line = logs[1][i]
self.assertIn("is_error", line)
self.assertIn("line", line)
self.assertIn(str(i), line["line"])
# grab the last half
offset = 50
logs = job.log(first_line=offset)
self.assertEqual(logs[0], total_lines)
self.assertEqual(len(logs[1]), offset)
for i in range(total_lines - offset):
self.assertIn(str(i + offset), logs[1][i]["line"])
# grab a bite from the middle
num_fetch = 20
logs = job.log(first_line=offset, num_lines=num_fetch)
self.assertEqual(logs[0], total_lines)
self.assertEqual(len(logs[1]), num_fetch)
for i in range(num_fetch):
self.assertIn(str(i + offset), logs[1][i]["line"])
# should normalize negative numbers properly
logs = job.log(first_line=-5)
self.assertEqual(logs[0], total_lines)
self.assertEqual(len(logs[1]), total_lines)
logs = job.log(num_lines=-5)
self.assertEqual(logs[0], total_lines)
self.assertEqual(len(logs[1]), 0)
@mock.patch("biokbase.narrative.jobs.job.clients.get", get_mock_client)
def test_parameters(self):
"""
test that a job returns the correct parameters
"""
job_state = get_test_job(JOB_COMPLETED)
job_params = job_state.get("job_input", {}).get("params", None)
self.assertIsNotNone(job_params)
job = Job(job_state)
self.assertIsNotNone(job.params)
with assert_obj_method_called(MockClients, "get_job_params", call_status=False):
params = job.parameters()
self.assertIsNotNone(params)
self.assertEqual(params, job_params)
@mock.patch("biokbase.narrative.jobs.job.clients.get", get_mock_client)
def test_parameters__param_fetch_ok(self):
"""
test that a job can successfully retrieve parameters from ee2
if they do not exist
"""
job_state = get_test_job(JOB_CREATED)
job_params = job_state.get("job_input", {}).get("params", None)
self.assertIsNotNone(job_params)
# delete the job params from the input
del job_state["job_input"]["params"]
job = Job(job_state)
self.assertEqual(job.params, JOB_ATTR_DEFAULTS["params"])
params = job.parameters()
self.assertEqual(params, job_params)
@mock.patch("biokbase.narrative.jobs.job.clients.get", get_failing_mock_client)
def test_parameters__param_fetch_fail(self):
"""
test failure to retrieve job params data
"""
job_state = get_test_job(JOB_TERMINATED)
del job_state["job_input"]["params"]
job = Job(job_state)
self.assertEqual(job.params, JOB_ATTR_DEFAULTS["params"])
with self.assertRaisesRegex(Exception, "Unable to fetch parameters for job"):
job.parameters()
@mock.patch("biokbase.narrative.jobs.job.clients.get", get_mock_client)
def test_parent_children__ok(self):
child_jobs = [Job.from_job_id(job_id) for job_id in BATCH_CHILDREN]
parent_job = Job(
create_state_from_ee2(BATCH_PARENT),
children=child_jobs,
)
self.assertFalse(parent_job.was_terminal())
# Make all child jobs completed
with mock.patch.object(
MockClients,
"check_job",
mock.Mock(return_value={"status": COMPLETED_STATUS}),
):
for child_job in child_jobs:
child_job.state(force_refresh=True)
self.assertTrue(parent_job.was_terminal())
def test_parent_children__fail(self):
parent_state = create_state_from_ee2(BATCH_PARENT)
child_states = [create_state_from_ee2(job_id) for job_id in BATCH_CHILDREN]
with self.assertRaisesRegex(
ValueError, "Must supply children when setting children of batch job parent"
):
Job(parent_state)
child_jobs = [Job(child_state) for child_state in child_states]
with self.assertRaisesRegex(ValueError, "Child job id mismatch"):
Job(
parent_state,
children=child_jobs[1:],
)
with self.assertRaisesRegex(ValueError, "Child job id mismatch"):
Job(
parent_state,
children=child_jobs * 2,
)
with self.assertRaisesRegex(ValueError, "Child job id mismatch"):
Job(
parent_state,
children=child_jobs + [create_job_from_ee2(JOB_COMPLETED)],
)
def test_get_viewer_params__active(self):
for job_id in ACTIVE_JOBS:
if job_id == BATCH_PARENT:
continue
job = create_job_from_ee2(job_id)
state = create_state_from_ee2(job_id)
out = job.get_viewer_params(state)
self.assertIsNone(out)
@mock.patch("biokbase.narrative.jobs.job.clients.get", get_mock_client)
def test_get_viewer_params__finished(self):
for job_id in TERMINAL_JOBS:
job = create_job_from_ee2(job_id)
state = create_state_from_ee2(job_id)
exp = get_widget_info(job_id)
got = job.get_viewer_params(state)
self.assertEqual(exp, got)
def test_get_viewer_params__batch_parent(self):
"""
do batch parent separately
since it requires passing in child jobs
"""
state = create_state_from_ee2(BATCH_PARENT)
batch_children = [create_job_from_ee2(job_id) for job_id in BATCH_CHILDREN]
job = create_job_from_ee2(BATCH_PARENT, children=batch_children)
out = job.get_viewer_params(state)
self.assertIsNone(out)
@mock.patch("biokbase.narrative.jobs.job.clients.get", get_mock_client)
def test_query_job_state(self):
for job_id in ALL_JOBS:
exp = create_state_from_ee2(
job_id, exclude_fields=JOB_INIT_EXCLUDED_JOB_STATE_FIELDS
)
got = Job.query_ee2_state(job_id, init=True)
self.assertEqual(exp, got)
exp = create_state_from_ee2(
job_id, exclude_fields=EXCLUDED_JOB_STATE_FIELDS
)
got = Job.query_ee2_state(job_id, init=False)
self.assertEqual(exp, got)
@mock.patch("biokbase.narrative.jobs.job.clients.get", get_mock_client)
def test_query_job_states(self):
states = Job.query_ee2_states(ALL_JOBS, init=True)
for job_id, got in states.items():
exp = create_state_from_ee2(
job_id, exclude_fields=JOB_INIT_EXCLUDED_JOB_STATE_FIELDS
)
self.assertEqual(exp, got)
states = Job.query_ee2_states(ALL_JOBS, init=False)
for job_id, got in states.items():
exp = create_state_from_ee2(
job_id, exclude_fields=EXCLUDED_JOB_STATE_FIELDS
)
self.assertEqual(exp, got)
NEW_RETRY_IDS = ["hello", "goodbye"]
NEW_CHILD_JOBS = ["cerulean", "magenta"]
def test_refresh_attrs__non_batch_active(self):
"""
retry_ids should be refreshed
"""
job_id = JOB_CREATED
job = create_job_from_ee2(job_id)
self.check_job_attrs(job, job_id)
def mock_check_job(self_, params):
self.assertEqual(params["job_id"], job_id)
return {"retry_ids": self.NEW_RETRY_IDS}
with mock.patch.object(MockClients, "check_job", mock_check_job):
self.check_job_attrs(job, job_id, {"retry_ids": self.NEW_RETRY_IDS})
def test_refresh_attrs__non_batch_terminal(self):
"""
retry_ids should be refreshed
"""
job_id = JOB_TERMINATED
job = create_job_from_ee2(job_id)
self.check_job_attrs(job, job_id)
def mock_check_job(self_, params):
self.assertEqual(params["job_id"], job_id)
return {"retry_ids": self.NEW_RETRY_IDS}
with mock.patch.object(MockClients, "check_job", mock_check_job):
self.check_job_attrs(job, job_id, {"retry_ids": self.NEW_RETRY_IDS})
def test_refresh_attrs__non_batch__is_retry(self):
"""
neither retry_ids/child_jobs should be refreshed
"""
job_id = BATCH_RETRY_RUNNING
job = create_job_from_ee2(job_id)
self.check_job_attrs(job, job_id)
with assert_obj_method_called(MockClients, "check_job", call_status=False):
self.check_job_attrs(job, job_id, skip_state=True)
def test_refresh_attrs__batch(self):
"""
child_jobs should be refreshed
"""
job_id = BATCH_PARENT
job = get_batch_family_jobs()[job_id]
self.check_job_attrs(job, job_id)
def mock_check_job(self_, params):
self.assertEqual(params["job_id"], job_id)
return {"child_jobs": self.NEW_CHILD_JOBS}
with mock.patch.object(MockClients, "check_job", mock_check_job):
self.check_job_attrs(job, job_id, {"child_jobs": self.NEW_CHILD_JOBS})
def test_was_terminal(self):
all_jobs = get_all_jobs()
for job_id, job in all_jobs.items():
self.assertEqual(JOBS_TERMINALITY[job_id], job.was_terminal())
@mock.patch("biokbase.narrative.jobs.job.clients.get", get_mock_client)
def test_was_terminal__batch(self):
batch_fam = get_batch_family_jobs(return_list=True)
batch_job, child_jobs = batch_fam[0], batch_fam[1:]
self.assertFalse(batch_job.was_terminal())
def mock_check_job(self_, params):
assert params["job_id"] in BATCH_CHILDREN
return {"status": COMPLETED_STATUS}
with mock.patch.object(MockClients, "check_job", mock_check_job):
for job in child_jobs:
job.state(force_refresh=True)
self.assertTrue(batch_job.was_terminal())
def test_in_cells(self):
all_jobs = get_all_jobs()
cell_2_jobs = get_cell_2_jobs()
cell_ids = list(cell_2_jobs.keys())
# Iterate through all combinations of cell IDs
for combo_len in range(len(cell_ids) + 1):
for combo in itertools.combinations(cell_ids, combo_len):
combo = list(combo)
# Get jobs expected to be associated with the cell IDs
exp_job_ids = [
job_id
for cell_id, job_ids in cell_2_jobs.items()
for job_id in job_ids
if cell_id in combo
]
for job_id, job in all_jobs.items():
self.assertEqual(job_id in exp_job_ids, job.in_cells(combo))
def test_in_cells__none(self):
job = create_job_from_ee2(JOB_COMPLETED)
with self.assertRaisesRegex(ValueError, "cell_ids cannot be None"):
job.in_cells(None)
def test_in_cells__batch__same_cell(self):
batch_fam = get_batch_family_jobs(return_list=True)
batch_job, child_jobs = batch_fam[0], batch_fam[1:]
for job in child_jobs:
job.cell_id = "hello"
self.assertTrue(batch_job.in_cells(["hi", "hello"]))
self.assertFalse(batch_job.in_cells(["goodbye", "hasta manana"]))
def test_in_cells__batch__diff_cells(self):
batch_fam = get_batch_family_jobs(return_list=True)
batch_job, child_jobs = batch_fam[0], batch_fam[1:]
children_cell_ids = ["hi", "hello", "greetings"]
for job, cell_id in zip(child_jobs, itertools.cycle(children_cell_ids)):
job.cell_id = cell_id
for cell_id in children_cell_ids:
self.assertTrue(batch_job.in_cells([cell_id]))
self.assertTrue(batch_job.in_cells(["A", cell_id, "B"]))
self.assertTrue(batch_job.in_cells([cell_id, "B", "A"]))
self.assertTrue(batch_job.in_cells(["B", "A", cell_id]))
self.assertFalse(batch_job.in_cells(["goodbye", "hasta manana"]))
def test_app_name(self):
for job in get_all_jobs().values():
if job.batch_job:
self.assertEqual("batch", job.app_name)
else:
test_spec = get_test_spec(job.tag, job.app_id)
self.assertEqual(test_spec["info"]["name"], job.app_name)
|
kbase/narrative
|
src/biokbase/narrative/tests/test_job.py
|
Python
|
mit
| 32,811
|
import sys
from file_assistant import *
from settings import settings
from parser import ShotParser
class Shot:
def __init__(self, filename, logging=False):
self.filename = get_template_path(shotify(filename))
self.logging = logging
if settings.developing or not isfile(htmlify(self.filename)):
self.log("parsing " + self.filename)
self.parser = ShotParser(self.filename, logging=self.logging)
self.filename = htmlify(self.filename)
def log(self, message):
if self.logging:
print message
def generate_shot(self):
if settings.developing or not isfile(self.filename):
self.log("generating " + self.filename + "\n")
code = self.parser.generate_code()
self.log("\n -- BEGIN PRE-JINJA CODE --\n\n"+code+"\n\n -- END PRE-JINJA CODE --\n")
write_shot_to_file(self.filename,shot=code)
def render(self, **varArgs):
self.generate_shot()
template = get_template(self.filename)
return template.render(**varArgs)
#-------------------------
# Main
#-------------------------
def main():
before_jinja = False
logging = False
if len(sys.argv) < 2:
exit("Usage : " + sys.argv[0] + " <filename> [-d]\n " + sys.argv[0] + " requires at least a file name as a parameter, with an optional debug flag")
elif len(sys.argv) > 2:
if "-l" in sys.argv:
logging = True
if "-j" in sys.argv:
before_jinja = True
s = Shot(sys.argv[1], logging=logging)
if before_jinja:
print s.parser.generate_code()
else:
print s.render()
if __name__ == "__main__":
main()
|
davidsteinberg/shots
|
shots/shot.py
|
Python
|
mit
| 1,723
|
import time
from datetime import timedelta
try:
from HTMLParser import HTMLParser
from urlparse import urljoin, urldefrag
except ImportError:
from html.parser import HTMLParser
from urllib.parse import urljoin, urldefrag
from tornado import httpclient, gen, ioloop, queues
base_url = 'http://docs.ansible.com/ansible/'
concurrency = 10
@gen.coroutine
def get_links_from_url(url):
"""Download the page at `url` and parse it for links.
Returned links have had the fragment after `#` removed, and have been made
absolute so, e.g. the URL 'gen.html#tornado.gen.coroutine' becomes
'http://www.tornadoweb.org/en/stable/gen.html'.
"""
try:
response = yield httpclient.AsyncHTTPClient().fetch(url)
print('fetched %s' % url)
html = response.body if isinstance(response.body, str) \
else response.body.decode()
urls = [urljoin(url, remove_fragment(new_url))
for new_url in get_links(html)]
except Exception as e:
print('Exception: %s %s' % (e, url))
raise gen.Return([])
raise gen.Return(urls)
def remove_fragment(url):
pure_url, frag = urldefrag(url)
return pure_url
def get_links(html):
class URLSeeker(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.urls = []
def handle_starttag(self, tag, attrs):
href = dict(attrs).get('href')
if href and tag == 'a':
self.urls.append(href)
def handle_data(self, data):
if data.find('``') != -1:
print 'find format error'
time.sleep(5)
url_seeker = URLSeeker()
url_seeker.feed(html)
return url_seeker.urls
@gen.coroutine
def main():
q = queues.Queue()
start = time.time()
fetching, fetched = set(), set()
@gen.coroutine
def fetch_url():
current_url = yield q.get()
try:
if current_url in fetching:
return
print('fetching %s' % current_url)
fetching.add(current_url)
urls = yield get_links_from_url(current_url)
fetched.add(current_url)
for new_url in urls:
# Only follow links beneath the base URL
if new_url.startswith(base_url):
yield q.put(new_url)
finally:
q.task_done()
@gen.coroutine
def worker():
while True:
yield fetch_url()
q.put(base_url)
# Start workers, then wait for the work queue to be empty.
for _ in range(concurrency):
worker()
yield q.join(timeout=timedelta(seconds=300))
assert fetching == fetched
print('Done in %d seconds, fetched %s URLs.' % (
time.time() - start, len(fetched)))
if __name__ == '__main__':
import logging
logging.basicConfig()
io_loop = ioloop.IOLoop.current()
io_loop.run_sync(main)
|
tao12345666333/Talk-Is-Cheap
|
python/tornado/spider/ansible_doc_spider.py
|
Python
|
mit
| 2,956
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:/Users/Zeke/Google Drive/dev/python/zeex/zeex/core/ui/file.ui'
#
# Created: Mon Nov 13 22:57:14 2017
# by: pyside-uic 0.2.15 running on PySide 1.2.2
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_FileWindow(object):
def setupUi(self, FileWindow):
FileWindow.setObjectName("FileWindow")
FileWindow.resize(1124, 894)
self.centralwidget = QtGui.QWidget(FileWindow)
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout = QtGui.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setObjectName("horizontalLayout")
self.widget = QtGui.QWidget(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widget.sizePolicy().hasHeightForWidth())
self.widget.setSizePolicy(sizePolicy)
self.widget.setObjectName("widget")
self.horizontalLayout.addWidget(self.widget)
FileWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(FileWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1124, 21))
self.menubar.setObjectName("menubar")
self.menuFile = QtGui.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuAction = QtGui.QMenu(self.menubar)
self.menuAction.setObjectName("menuAction")
FileWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(FileWindow)
self.statusbar.setObjectName("statusbar")
FileWindow.setStatusBar(self.statusbar)
self.toolBar = QtGui.QToolBar(FileWindow)
self.toolBar.setObjectName("toolBar")
FileWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)
self.actionSave = QtGui.QAction(FileWindow)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/standard_icons/save.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionSave.setIcon(icon)
self.actionSave.setObjectName("actionSave")
self.actionDelete = QtGui.QAction(FileWindow)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/standard_icons/delete.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
icon1.addPixmap(QtGui.QPixmap(":/standard_icons/delete.png"), QtGui.QIcon.Active, QtGui.QIcon.On)
self.actionDelete.setIcon(icon1)
self.actionDelete.setObjectName("actionDelete")
self.actionMergePurge = QtGui.QAction(FileWindow)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(":/standard_icons/merge.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionMergePurge.setIcon(icon2)
self.actionMergePurge.setObjectName("actionMergePurge")
self.actionSuppress = QtGui.QAction(FileWindow)
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(":/standard_icons/suppress.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionSuppress.setIcon(icon3)
self.actionSuppress.setObjectName("actionSuppress")
self.actionRename = QtGui.QAction(FileWindow)
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(":/standard_icons/rename.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionRename.setIcon(icon4)
self.actionRename.setObjectName("actionRename")
self.actionExecuteScript = QtGui.QAction(FileWindow)
self.actionExecuteScript.setObjectName("actionExecuteScript")
self.actionEditFields = QtGui.QAction(FileWindow)
self.actionEditFields.setIcon(icon4)
self.actionEditFields.setObjectName("actionEditFields")
self.actionSplit = QtGui.QAction(FileWindow)
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap(":/standard_icons/split.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionSplit.setIcon(icon5)
self.actionSplit.setObjectName("actionSplit")
self.actionAnalyze = QtGui.QAction(FileWindow)
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap(":/standard_icons/count.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionAnalyze.setIcon(icon6)
self.actionAnalyze.setObjectName("actionAnalyze")
self.actionNormalize = QtGui.QAction(FileWindow)
icon7 = QtGui.QIcon()
icon7.addPixmap(QtGui.QPixmap(":/standard_icons/normalize.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionNormalize.setIcon(icon7)
self.actionNormalize.setObjectName("actionNormalize")
self.actionSaveAs = QtGui.QAction(FileWindow)
icon8 = QtGui.QIcon()
icon8.addPixmap(QtGui.QPixmap(":/standard_icons/saveas.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionSaveAs.setIcon(icon8)
self.actionSaveAs.setObjectName("actionSaveAs")
self.actionTranspose = QtGui.QAction(FileWindow)
icon9 = QtGui.QIcon()
icon9.addPixmap(QtGui.QPixmap(":/standard_icons/transpose.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionTranspose.setIcon(icon9)
self.actionTranspose.setObjectName("actionTranspose")
self.actionDropNaN = QtGui.QAction(FileWindow)
self.actionDropNaN.setObjectName("actionDropNaN")
self.menuFile.addAction(self.actionSave)
self.menuFile.addAction(self.actionSaveAs)
self.menuFile.addAction(self.actionDelete)
self.menuAction.addAction(self.actionAnalyze)
self.menuAction.addAction(self.actionEditFields)
self.menuAction.addAction(self.actionExecuteScript)
self.menuAction.addAction(self.actionMergePurge)
self.menuAction.addAction(self.actionNormalize)
self.menuAction.addAction(self.actionSplit)
self.menuAction.addAction(self.actionSuppress)
self.menuAction.addAction(self.actionTranspose)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuAction.menuAction())
self.toolBar.addAction(self.actionSave)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionMergePurge)
self.toolBar.addAction(self.actionSplit)
self.toolBar.addAction(self.actionNormalize)
self.toolBar.addAction(self.actionEditFields)
self.toolBar.addAction(self.actionAnalyze)
self.toolBar.addAction(self.actionTranspose)
self.toolBar.addAction(self.actionDropNaN)
self.retranslateUi(FileWindow)
QtCore.QMetaObject.connectSlotsByName(FileWindow)
def retranslateUi(self, FileWindow):
FileWindow.setWindowTitle(QtGui.QApplication.translate("FileWindow", "MyFile", None, QtGui.QApplication.UnicodeUTF8))
self.menuFile.setTitle(QtGui.QApplication.translate("FileWindow", "File", None, QtGui.QApplication.UnicodeUTF8))
self.menuAction.setTitle(QtGui.QApplication.translate("FileWindow", "Action", None, QtGui.QApplication.UnicodeUTF8))
self.toolBar.setWindowTitle(QtGui.QApplication.translate("FileWindow", "toolBar", None, QtGui.QApplication.UnicodeUTF8))
self.actionSave.setText(QtGui.QApplication.translate("FileWindow", "Save", None, QtGui.QApplication.UnicodeUTF8))
self.actionSave.setToolTip(QtGui.QApplication.translate("FileWindow", "Save the file to disk.", None, QtGui.QApplication.UnicodeUTF8))
self.actionSave.setShortcut(QtGui.QApplication.translate("FileWindow", "Ctrl+S", None, QtGui.QApplication.UnicodeUTF8))
self.actionDelete.setText(QtGui.QApplication.translate("FileWindow", "Delete", None, QtGui.QApplication.UnicodeUTF8))
self.actionDelete.setToolTip(QtGui.QApplication.translate("FileWindow", "Deletes the current file from the program and disk!", None, QtGui.QApplication.UnicodeUTF8))
self.actionMergePurge.setText(QtGui.QApplication.translate("FileWindow", "Merge/Purge", None, QtGui.QApplication.UnicodeUTF8))
self.actionMergePurge.setShortcut(QtGui.QApplication.translate("FileWindow", "Ctrl+M", None, QtGui.QApplication.UnicodeUTF8))
self.actionSuppress.setText(QtGui.QApplication.translate("FileWindow", "Suppress", None, QtGui.QApplication.UnicodeUTF8))
self.actionRename.setText(QtGui.QApplication.translate("FileWindow", "Rename Headers", None, QtGui.QApplication.UnicodeUTF8))
self.actionRename.setShortcut(QtGui.QApplication.translate("FileWindow", "Ctrl+R", None, QtGui.QApplication.UnicodeUTF8))
self.actionExecuteScript.setText(QtGui.QApplication.translate("FileWindow", "Execute Script", None, QtGui.QApplication.UnicodeUTF8))
self.actionEditFields.setText(QtGui.QApplication.translate("FileWindow", "Edit Fields", None, QtGui.QApplication.UnicodeUTF8))
self.actionEditFields.setShortcut(QtGui.QApplication.translate("FileWindow", "Ctrl+F", None, QtGui.QApplication.UnicodeUTF8))
self.actionSplit.setText(QtGui.QApplication.translate("FileWindow", "Split", None, QtGui.QApplication.UnicodeUTF8))
self.actionAnalyze.setText(QtGui.QApplication.translate("FileWindow", "Analyze", None, QtGui.QApplication.UnicodeUTF8))
self.actionAnalyze.setToolTip(QtGui.QApplication.translate("FileWindow", "View statistics on each column.", None, QtGui.QApplication.UnicodeUTF8))
self.actionAnalyze.setShortcut(QtGui.QApplication.translate("FileWindow", "Ctrl+A", None, QtGui.QApplication.UnicodeUTF8))
self.actionNormalize.setText(QtGui.QApplication.translate("FileWindow", "Normalize", None, QtGui.QApplication.UnicodeUTF8))
self.actionNormalize.setToolTip(QtGui.QApplication.translate("FileWindow", "Normalize data", None, QtGui.QApplication.UnicodeUTF8))
self.actionNormalize.setShortcut(QtGui.QApplication.translate("FileWindow", "Ctrl+N", None, QtGui.QApplication.UnicodeUTF8))
self.actionSaveAs.setText(QtGui.QApplication.translate("FileWindow", "Save As", None, QtGui.QApplication.UnicodeUTF8))
self.actionSaveAs.setToolTip(QtGui.QApplication.translate("FileWindow", "Save the file to another path on the disk.", None, QtGui.QApplication.UnicodeUTF8))
self.actionTranspose.setText(QtGui.QApplication.translate("FileWindow", "Transpose", None, QtGui.QApplication.UnicodeUTF8))
self.actionTranspose.setToolTip(QtGui.QApplication.translate("FileWindow", "Convert rows to columns & vice versa (150 rows max)", None, QtGui.QApplication.UnicodeUTF8))
self.actionTranspose.setShortcut(QtGui.QApplication.translate("FileWindow", "Ctrl+T", None, QtGui.QApplication.UnicodeUTF8))
self.actionDropNaN.setText(QtGui.QApplication.translate("FileWindow", "Drop NaN", None, QtGui.QApplication.UnicodeUTF8))
self.actionDropNaN.setToolTip(QtGui.QApplication.translate("FileWindow", "Replaces \'nan\' values with blanks on string fields.", None, QtGui.QApplication.UnicodeUTF8))
from zeex.icons import icons_rc
|
zbarge/zeex
|
zeex/core/ui/file_ui.py
|
Python
|
mit
| 10,990
|
from ann_util import *
use_bias = 1
class ANN(object):
"""docstring for ANN"""
def __init__(self, layer_sizes):
self.layers =[]
self.learn_rate = 0.1
self.squash = sigmoid
self.deriv_squash = deriv_sigmoid
for i in range(len(layer_sizes)):
layer_size = layer_sizes[i]
prev_layer_size = 0 if i == 0 else layer_sizes[i-1]
layer = Layer(i,layer_size, prev_layer_size)
self.layers.append(layer)
def train(self, inputs, targets, n_epochs):
"""
Train the network wiht the labeled inputs for maximum number epochs.
"""
for epoch in range(0, n_epochs):
epoch_error = 0
for i in range(0,len(inputs)):
self.set_input(inputs[i])
self.forward_propagate()
sample_error= self.update_error_output(targets[i])
epoch_error +=sample_error
self.backward_propagate()
self.update_weights()
if epoch % 100 == 0:
print(epoch, epoch_error)
def predict(self, input):
"""
Return the network prediction for this input
"""
self.set_input(input)
self.forward_propagate()
return self.get_output()
def update_weights(self):
"""
Update the weight in each matrix each layer.
"""
for l in range(1,len(self.layers)):
for j in range(0,self.layers[l].n_neurons):
for i in range(0,self.layers[l-1].n_neurons+use_bias):
out = self.layers[l-1].output[i]
err = self.layers[l].error[j]
self.layers[l].weight[i][j]+=self.learn_rate * out *err
def set_input(self, input_vector):
input_layer = self.layers[0]
for i in range(0, input_layer.n_neurons):
input_layer.output[i+use_bias] = input_vector[i]
def forward_propagate(self):
"""
Propogate the input signal forward through the network
"""
for l in range(len(self.layers)-1):
src_layer = self.layers[l]
dst_layer = self.layers[l+1]
for j in range(0,dst_layer.n_neurons):
sum_in = 0
for i in range(0,src_layer.n_neurons+use_bias):
sum_in+= dst_layer.weight[i][j]*src_layer.output[i]
dst_layer.input[j] = sum_in
dst_layer.output[j+use_bias] = self.squash(sum_in)
def get_output(self):
output_layer = self.layers[-1]
res = [0]* output_layer.n_neurons
for i in range(0, len(res)):
res[i] = output_layer.output[i+use_bias]
return res
def update_error_output(self, target_vector):
sample_error = 0
output_layer = self.layers[-1]
for i in range(0,output_layer.n_neurons):
neuron_output = output_layer.output[i+use_bias]
neuron_error = target_vector[i]- neuron_output
output_layer.error[i]= self.deriv_squash(output_layer.input[i])*neuron_error
sample_error += (neuron_error*neuron_error)
sample_error *=0.5
return sample_error
def backward_propagate(self):
"""
Backprop Propogate the error from the output layer backwards to the input layer
"""
for l in range(len(self.layers)-1, 0, -1):
src_layer = self.layers[l]
dst_layer = self.layers[l-1]
for i in range(0, dst_layer.n_neurons):
error = 0
for j in range(0, src_layer.n_neurons):
error += src_layer.weight[i+use_bias][j] * src_layer.error[j]
dst_layer.error[i] = self.deriv_squash(dst_layer.input[i])* error
class Layer(object):
"""docstring for Layer"""
def __init__(self, id, layer_size, prev_layer_size):
self.id = id
self.n_neurons = layer_size
self.bias_val = 1
self.input = [0.0]*self.n_neurons
self.output = [0.0]*(self.n_neurons +use_bias)
self.output[0] = self.bias_val
self.error = [0.0]*self.n_neurons
self.weight = make_matrix(prev_layer_size+ use_bias, self.n_neurons)
for i in range(len(self.weight)):
for j in range(len(self.weight[i])):
self.weight[i][j] = between(-1.0, 1.0)
if __name__ == '__main__':
and_ann = ANN([2,2,1])
inputs =[[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]]
targets =[[0.0], [1.0], [1.0], [0.0]]
#train and predict
print("train and predicts")
and_ann.train(inputs, targets, 20000)
for i in range(len(targets)):
print(inputs[i],and_ann.predict(inputs[i]))
|
ssreza/insights
|
ann.py
|
Python
|
mit
| 3,940
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import re
import string
from math import ceil
import sys
from PyQt4 import QtGui
class Converter():
def main(self, folder):
conFilePath, jpgPaths = self.read_folder_contents(folder)
numFiles, splitCoords = self.read_con_file(conFilePath)
resampled = self.resample_data(numFiles, splitCoords)
self.print_new_files(numFiles, folder, jpgPaths, splitCoords)
def read_folder_contents(self, folder):
""" Reads in the contents of a folder and groups
the sorted .jpg paths together in a list, and creates an
object for the .con file. The folder should contain *only* the
relevant jpgs (tongue frames) and the single corresponding con file
generated by EdgeTrak. """
folder
folderContents = os.listdir(folder)
jpgPaths=[]
for fileName in folderContents: # this loop does the sorting of .con and .jpg files
if '.con' in fileName:
conFilePath = os.path.normpath(os.path.join(folder, fileName))
if ".jpg" in fileName:
jpgPaths.append(os.path.splitext(fileName)[0])
else:
pass
jpgPaths = sorted(jpgPaths) # sort the .jpg paths because the .con file columns
# are ordered according to the jpg filenames
return conFilePath, jpgPaths
def read_con_file(self, conFilePath):
"""Reads in a .con file, returns the list self.splitCoords,
which is a list of the coordinates generated by EdgeTrak and then
split according to the corresponding .jpg image"""
conFile = open(conFilePath, 'r') # read in the file
conLines = conFile.readlines() # create list with lines as elements
conFile.close()
numFiles = ((len(conLines[0].strip().split())) / 2) # count number of columns in file and divide by 2
# (since 2 columns to each image file)
splitCoords = [[] for i in range(numFiles)] # create list to append paired coordinates for
# each image file
for line in conLines:
i=0
coords = line.strip().split()
for sublist in splitCoords: # each sublist corresponds to an image file
# (tongue frame)
sublist.append((coords[(2*i)], coords[(2*i)+1])) # the input .con file has paired columns from left
i+=1 # to right (1,2), (3,4), (5,6)..., and this assigns
# each pair to a tuple
# and the tuple to its own sublist on splitCoord
return numFiles, splitCoords
def resample_data(self, numFiles, splitCoords, resampleTo = 32):
""" Used to get the EdgeTrak data compatable with
AutoTrace, which handles 32 points per traced image. """
for i in range(numFiles):
origLength = len(splitCoords[i]) # original length of the .con file columns
# (ie the number of traced points)
if origLength > resampleTo:
resampled = []
for j in range(resampleTo):
resampled.append(splitCoords[i][int(ceil(j *
origLength / resampleTo))]) # walk down the array of tuples (coordinates)
# in an evenly-spaced manner
splitCoords[i]=resampled
else:
pass
return resampled
def print_new_files(self, numFiles, folder, jpgPaths, splitCoords):
""" Print out a new file for each .jpg tongue image,
using the filename of each .jpg to create the filename for the
corresponding .txt file. """
for fileNum in range(numFiles):
outFile= open(folder + '/output_' +
str(jpgPaths[fileNum]) + '.txt' , 'w')
i=0
for item in splitCoords[fileNum]:
i+=1
outFile.write(str(i) + '\t' + str(item[0]) +
'\t' + str(item[1]) + '\n') # write line in the new file with tab delimiting
|
JRMeyer/Autotrace
|
under-development/analysis/edgetrak_converter.py
|
Python
|
mit
| 5,041
|
import requests
import json
from hamper.interfaces import ChatCommandPlugin, Command
class Timez(ChatCommandPlugin):
name = 'timez'
priority = 2
def setup(self, loader):
try:
self.api_key = loader.config['timez']['api-key']
except (KeyError, TypeError):
self.api_key = None
api_url = "http://api.worldweatheronline.com/free/v1/tz.ashx"
self.api_url = "%s?key=%s&q=%%s&format=json" % (api_url, self.api_key)
super(Timez, self).setup(loader)
class Timez(Command):
''' '''
name = 'timez'
regex = '^timez (.*)'
long_desc = short_desc = (
"timez <something> - Look up time for [ZIP code | "
"City, State (US Only) | City Name, State, Country | City Name, "
"Country | Airport Code | IP "
)
def command(self, bot, comm, groups):
if not self.plugin.api_key:
bot.reply(
comm, "This plugin is missconfigured. Its missing an API "
"key. Go register one at "
"http://developer.worldweatheronline.com/apps/register"
)
return
query = comm['message'].strip('timez ')
resp = requests.get(self.plugin.api_url % query)
if resp.status_code != 200:
bot.reply(comm, "Error: A non 200 status code was returned")
jresp = json.loads(resp.text)
try:
tz = jresp['data']['time_zone'][0]
bot.reply(
comm,
"For %s, local time is %s at UTC offset %s" % (
query, tz['localtime'], tz['utcOffset']
)
)
except KeyError:
bot.reply(
comm, "Sorry, the internet didn't understand your request."
)
# Always let the other plugins run
return False
|
hamperbot/hamper
|
hamper/plugins/timez.py
|
Python
|
mit
| 2,000
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
This file is based, although a rewrite, on MIT-licensed code from the Bottle web framework.
"""
import os, sys, optparse, urllib
path = os.path.dirname(os.path.abspath(__file__))
os.chdir(path)
sys.path = [path]+[p for p in sys.path if not p==path]
import gluon.main
from gluon.fileutils import read_file, write_file
class Servers:
@staticmethod
def cgi(app, address=None, **options):
from wsgiref.handlers import CGIHandler
CGIHandler().run(app) # Just ignore host and port here
@staticmethod
def flup(app,address, **options):
import flup.server.fcgi
flup.server.fcgi.WSGIServer(app, bindAddress=address).run()
@staticmethod
def wsgiref(app,address,**options): # pragma: no cover
from wsgiref.simple_server import make_server, WSGIRequestHandler
class QuietHandler(WSGIRequestHandler):
def log_request(*args, **kw): pass
options['handler_class'] = QuietHandler
srv = make_server(address[0],address[1],app,**options)
srv.serve_forever()
@staticmethod
def cherrypy(app,address, **options):
from cherrypy import wsgiserver
server = wsgiserver.CherryPyWSGIServer(address, app)
server.start()
@staticmethod
def rocket(app,address, **options):
from gluon.rocket import CherryPyWSGIServer
server = CherryPyWSGIServer(address, app)
server.start()
@staticmethod
def rocket_with_repoze_profiler(app,address, **options):
from gluon.rocket import CherryPyWSGIServer
from repoze.profile.profiler import AccumulatingProfileMiddleware
from gluon.settings import global_settings
global_settings.web2py_crontype = 'none'
wrapped = AccumulatingProfileMiddleware(
app,
log_filename='wsgi.prof',
discard_first_request=True,
flush_at_shutdown=True,
path = '/__profile__'
)
server = CherryPyWSGIServer(address, wrapped)
server.start()
@staticmethod
def paste(app,address,**options):
from paste import httpserver
from paste.translogger import TransLogger
httpserver.serve(app, host=address[0], port=address[1], **options)
@staticmethod
def fapws(app,address, **options):
import fapws._evwsgi as evwsgi
from fapws import base
evwsgi.start(address[0],str(address[1]))
evwsgi.set_base_module(base)
def app(environ, start_response):
environ['wsgi.multiprocess'] = False
return app(environ, start_response)
evwsgi.wsgi_cb(('',app))
evwsgi.run()
@staticmethod
def gevent(app,address, **options):
from gevent import monkey; monkey.patch_all()
from gevent import pywsgi
from gevent.pool import Pool
pywsgi.WSGIServer(address, app, spawn = 'workers' in options and Pool(int(options.workers)) or 'default').serve_forever()
@staticmethod
def bjoern(app,address, **options):
import bjoern
bjoern.run(app, *address)
@staticmethod
def tornado(app,address, **options):
import tornado.wsgi
import tornado.httpserver
import tornado.ioloop
container = tornado.wsgi.WSGIContainer(app)
server = tornado.httpserver.HTTPServer(container)
server.listen(address=address[0], port=address[1])
tornado.ioloop.IOLoop.instance().start()
@staticmethod
def twisted(app,address, **options):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, app))
reactor.listenTCP(address[1], factory, interface=address[0])
reactor.run()
@staticmethod
def diesel(app,address, **options):
from diesel.protocols.wsgi import WSGIApplication
app = WSGIApplication(app, port=address[1])
app.run()
@staticmethod
def gnuicorn(app,address, **options):
import gunicorn.arbiter
gunicorn.arbiter.Arbiter(address, 4, app).run()
@staticmethod
def eventlet(app,address, **options):
from eventlet import wsgi, listen
wsgi.server(listen(address), app)
@staticmethod
def mongrel2(app,address,**options):
import uuid
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
from mongrel2 import handler
conn = handler.Connection(str(uuid.uuid4()),
"tcp://127.0.0.1:9997",
"tcp://127.0.0.1:9996")
mongrel2_handler(app,conn,debug=False)
def run(servername,ip,port,softcron=True,logging=False,profiler=None):
if logging:
application = gluon.main.appfactory(wsgiapp=gluon.main.wsgibase,
logfilename='httpserver.log',
profilerfilename=profiler)
else:
application = gluon.main.wsgibase
if softcron:
from gluon.settings import global_settings
global_settings.web2py_crontype = 'soft'
getattr(Servers,servername)(application,(ip,int(port)))
def mongrel2_handler(application,conn,debug=False):
"""
Based on :
https://github.com/berry/Mongrel2-WSGI-Handler/blob/master/wsgi-handler.py
WSGI handler based on the Python wsgiref SimpleHandler.
A WSGI application should return a iterable op StringTypes.
Any encoding must be handled by the WSGI application itself.
"""
from wsgiref.handlers import SimpleHandler
try:
import cStringIO as StringIO
except:
import StringIO
# TODO - this wsgi handler executes the application and renders a page
# in memory completely before returning it as a response to the client.
# Thus, it does not "stream" the result back to the client. It should be
# possible though. The SimpleHandler accepts file-like stream objects. So,
# it should be just a matter of connecting 0MQ requests/response streams to
# the SimpleHandler requests and response streams. However, the Python API
# for Mongrel2 doesn't seem to support file-like stream objects for requests
# and responses. Unless I have missed something.
while True:
if debug: print "WAITING FOR REQUEST"
# receive a request
req = conn.recv()
if debug: print "REQUEST BODY: %r\n" % req.body
if req.is_disconnect():
if debug: print "DISCONNECT"
continue #effectively ignore the disconnect from the client
# Set a couple of environment attributes a.k.a. header attributes
# that are a must according to PEP 333
environ = req.headers
environ['SERVER_PROTOCOL'] = 'HTTP/1.1' # SimpleHandler expects a server_protocol, lets assume it is HTTP 1.1
environ['REQUEST_METHOD'] = environ['METHOD']
if ':' in environ['Host']:
environ['SERVER_NAME'] = environ['Host'].split(':')[0]
environ['SERVER_PORT'] = environ['Host'].split(':')[1]
else:
environ['SERVER_NAME'] = environ['Host']
environ['SERVER_PORT'] = ''
environ['SCRIPT_NAME'] = '' # empty for now
environ['PATH_INFO'] = urllib.unquote(environ['PATH'])
if '?' in environ['URI']:
environ['QUERY_STRING'] = environ['URI'].split('?')[1]
else:
environ['QUERY_STRING'] = ''
if environ.has_key('Content-Length'):
environ['CONTENT_LENGTH'] = environ['Content-Length'] # necessary for POST to work with Django
environ['wsgi.input'] = req.body
if debug: print "ENVIRON: %r\n" % environ
# SimpleHandler needs file-like stream objects for
# requests, errors and responses
reqIO = StringIO.StringIO(req.body)
errIO = StringIO.StringIO()
respIO = StringIO.StringIO()
# execute the application
handler = SimpleHandler(reqIO, respIO, errIO, environ, multithread = False, multiprocess = False)
handler.run(application)
# Get the response and filter out the response (=data) itself,
# the response headers,
# the response status code and the response status description
response = respIO.getvalue()
response = response.split("\r\n")
data = response[-1]
headers = dict([r.split(": ") for r in response[1:-2]])
code = response[0][9:12]
status = response[0][13:]
# strip BOM's from response data
# Especially the WSGI handler from Django seems to generate them (2 actually, huh?)
# a BOM isn't really necessary and cause HTML parsing errors in Chrome and Safari
# See also: http://www.xs4all.nl/~mechiel/projects/bomstrip/
# Although I still find this a ugly hack, it does work.
data = data.replace('\xef\xbb\xbf', '')
# Get the generated errors
errors = errIO.getvalue()
# return the response
if debug: print "RESPONSE: %r\n" % response
if errors:
if debug: print "ERRORS: %r" % errors
data = "%s\r\n\r\n%s" % (data, errors)
conn.reply_http(req, data, code = code, status = status, headers = headers)
def main():
usage = "python anyserver.py -s tornado -i 127.0.0.1 -p 8000 -l -P"
try:
version = read_file('VERSION')
except IOError:
version = ''
parser = optparse.OptionParser(usage, None, optparse.Option, version)
parser.add_option('-l',
'--logging',
action='store_true',
default=False,
dest='logging',
help='log into httpserver.log')
parser.add_option('-P',
'--profiler',
default=False,
dest='profiler',
help='profiler filename')
servers = ', '.join(x for x in dir(Servers) if not x[0]=='_')
parser.add_option('-s',
'--server',
default='rocket',
dest='server',
help='server name (%s)' % servers)
parser.add_option('-i',
'--ip',
default='127.0.0.1',
dest='ip',
help='ip address')
parser.add_option('-p',
'--port',
default='8000',
dest='port',
help='port number')
parser.add_option('-w',
'--workers',
default='',
dest='workers',
help='number of workers number')
(options, args) = parser.parse_args()
print 'starting %s on %s:%s...' % (options.server,options.ip,options.port)
run(options.server,options.ip,options.port,logging=options.logging,profiler=options.profiler)
if __name__=='__main__':
main()
|
SEA000/uw-empathica
|
empathica/anyserver.py
|
Python
|
mit
| 11,418
|
import _plotly_utils.basevalidators
class LabelValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="label", parent_name="splom.dimension", **kwargs):
super(LabelValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs
)
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/splom/dimension/_label.py
|
Python
|
mit
| 398
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import os
import shutil
import pytest
from icon_font_to_png import command_line
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
# Tests
def test_list_option(capfd):
"""Test listing CSS icons"""
css_file = os.path.join(BASE_DIR, 'files', 'test-foo.css')
ttf_file = os.path.join(BASE_DIR, 'files', 'test.ttf') # Required argument
# No CSS and TTF files
with pytest.raises(SystemExit):
command_line.run(
'--list'.split()
)
out, err = capfd.readouterr()
assert out == ''
# Lists correctly
with pytest.raises(SystemExit):
command_line.run(
'--css {css_file} --ttf {ttf_file} --list'.format(
css_file=css_file, ttf_file=ttf_file
).split()
)
out, err = capfd.readouterr()
assert out == 'bar\ntest\n'
# Lists correctly, with the prefix
with pytest.raises(SystemExit):
command_line.run(
'--css {css_file} --ttf {ttf_file} --keep_prefix --list'.format(
css_file=css_file, ttf_file=ttf_file
).split()
)
out, err = capfd.readouterr()
assert out == 'foo-bar\nfoo-test\n'
def test_icon_export(capfd):
"""Test exporting icons (on Font Awesome files)"""
css_file = os.path.join(BASE_DIR, 'files', 'font-awesome.css')
ttf_file = os.path.join(BASE_DIR, 'files', 'fontawesome-webfont.ttf')
# Export none icons
with pytest.raises(SystemExit):
command_line.run(
'--css {css_file} --ttf {ttf_file}'.format(
css_file=css_file, ttf_file=ttf_file
).split()
)
out, err = capfd.readouterr() # For skipping stdout
# Export one icon
command_line.run(
'--css {css_file} --ttf {ttf_file} github'.format(
css_file=css_file, ttf_file=ttf_file
).split()
)
out, err = capfd.readouterr() # For skipping stdout
assert os.path.isfile(os.path.join('exported', 'github.png'))
# Export two icons
command_line.run(
'--css {css_file} --ttf {ttf_file} github star'.format(
css_file=css_file, ttf_file=ttf_file
).split()
)
out, err = capfd.readouterr() # For skipping stdout
assert os.path.isfile(os.path.join('exported', 'github.png'))
assert os.path.isfile(os.path.join('exported', 'star.png'))
# Export all icons
command_line.run(
'--css {css_file} --ttf {ttf_file} ALL'.format(
css_file=css_file, ttf_file=ttf_file
).split()
)
out, err = capfd.readouterr() # For skipping stdout
def test_filename_option(capfd):
"""Test filename option"""
css_file = os.path.join(BASE_DIR, 'files', 'font-awesome.css')
ttf_file = os.path.join(BASE_DIR, 'files', 'fontawesome-webfont.ttf')
# Export one icon
command_line.run(
'--css {css_file} --ttf {ttf_file} '
'--filename foo github'.format(
css_file=css_file, ttf_file=ttf_file
).split()
)
out, err = capfd.readouterr() # For skipping stdout
assert os.path.isfile(os.path.join('exported', 'foo.png'))
# Export multiple icons
command_line.run(
'--css {css_file} --ttf {ttf_file} '
'--filename foo- github star'.format(
css_file=css_file, ttf_file=ttf_file
).split()
)
out, err = capfd.readouterr() # For skipping stdout
assert os.path.isfile(os.path.join('exported', 'foo-github.png'))
assert os.path.isfile(os.path.join('exported', 'foo-star.png'))
def test_download_option(capfd):
"""Test icon font download option"""
with pytest.raises(SystemExit):
command_line.run(
'--download {font_name}'.format(font_name='font-awesome').split()
)
out, err = capfd.readouterr() # For skipping stdout
assert out == "Icon font 'font-awesome' successfully downloaded\n"
assert os.path.isfile('font-awesome.css')
assert os.path.isfile('fontawesome-webfont.ttf')
# Teardown
def teardown_module():
"""Delete exported icons directory and downloaded FontAwesome files"""
if os.path.isdir('exported'):
shutil.rmtree('exported')
if os.path.isfile('font-awesome.css'):
os.remove('font-awesome.css')
if os.path.isfile('fontawesome-webfont.ttf'):
os.remove('fontawesome-webfont.ttf')
|
Pythonity/icon-font-to-png
|
icon_font_to_png/test/test_command_line.py
|
Python
|
mit
| 4,426
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
import sys
from cli_app import log
from cli_app import options
from cli_app import output
LOG = log.Logger.get()
def main():
# configargparse raises a SystemExit on error
args = options.Options().parse()
LOG.debug(args)
out = output.Output(args.text)
out.printScreen()
if __name__ == '__main__':
sys.exit(main())
|
xamurej/py3-cli-skel
|
cli_app/cli.py
|
Python
|
mit
| 390
|
"""Test script for DanceCatConsole."""
from __future__ import print_function
import os
import pytest
import datetime
from dateutil.relativedelta import relativedelta
from os import remove
from sqlalchemy import inspect
from DanceCat import Console
db_test_path = os.getcwd() + '/.test_console'
if not os.path.exists(db_test_path):
os.mkdir(db_test_path)
@pytest.fixture
def app():
"""Test fixture to set app config and remove previous test files."""
db_file_path = db_test_path + '/test_console.db'
Console.app.config.update({
'SQLALCHEMY_DATABASE_URI': ('sqlite:///' + db_file_path),
'SQLALCHEMY_TRACK_MODIFICATIONS': False
})
try:
remove(db_file_path)
except OSError:
pass
return Console.app
@pytest.fixture
def user_email():
"""Return test email."""
return 'test@test.test'
def test_list_commands():
"""Test for full coverage."""
Console.list_all()
def test_db_create_all(app):
"""Test db_create_all command."""
assert app.config.get('SQLALCHEMY_DATABASE_URI')
Console.db_create_all()
tables_list = inspect(Console.db.engine).get_table_names()
for table in Console.db.metadata.tables.items():
assert table[0] in tables_list
def test_add_allowed_user(app, user_email, capfd):
"""Test add_allowed_user command."""
assert app.config.get('SQLALCHEMY_DATABASE_URI')
Console.db_create_all()
Console.add_allowed_user(user_email)
out, err = capfd.readouterr()
assert out == 'Added "{email}" to allowed users list.\n'.format(
email=user_email
)
Console.add_allowed_user(user_email)
out, err = capfd.readouterr()
assert out == '"{email}" was already in the allowed users list.\n'.\
format(
email=user_email
)
def test_schedule_update(app, user_email):
"""Test schedule_update command."""
assert app.config.get('SQLALCHEMY_DATABASE_URI')
Console.db_create_all()
allowed_email = Console.Models.AllowedEmail(user_email)
Console.db.session.add(allowed_email)
Console.db.session.commit()
assert allowed_email.email
user = Console.Models.User(user_email, '123456')
Console.db.session.add(user)
Console.db.session.commit()
assert user.user_id
connection = Console.Models.Connection(
Console.Constants.DB_MYSQL,
'localhost',
'test_db',
user.user_id,
user_name='db_user'
)
Console.db.session.add(connection)
Console.db.session.commit()
assert connection.connection_id
job = Console.Models.Job(
'test job',
'select * from table_1',
user.user_id
)
Console.db.session.add(job)
Console.db.session.commit()
assert job.job_id
outdated_schedule = Console.Models.Schedule(
job_id=job.job_id,
start_time=datetime.datetime.now(),
user_id=user.user_id,
is_active=True,
schedule_type=Console.Constants.SCHEDULE_DAILY
)
Console.db.session.add(outdated_schedule)
Console.db.session.commit()
assert outdated_schedule.schedule_id
outdated_schedule.next_run -= relativedelta(hours=1)
Console.db.session.commit()
assert outdated_schedule.next_run < datetime.datetime.now()
Console.schedule_update()
updated_schedule = Console.Models.Schedule.query.get(
outdated_schedule.schedule_id
)
assert updated_schedule.next_run >= datetime.datetime.now()
|
scattm/DanceCat
|
tests/test_dance_cat_console.py
|
Python
|
mit
| 3,472
|
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import DataRequired
class LoginForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField('Remember Me')
submit = SubmitField('Sign In')
|
frainfreeze/studying
|
home/python/microblog/app/forms.py
|
Python
|
mit
| 398
|
# coding=utf-8
import os
import unittest
from conans.model.ref import ConanFileReference
from conans.test.utils.tools import TestClient, GenConanfile
from conans.util.files import mkdir
class TransitiveEditableTest(unittest.TestCase):
def test_transitive_editables(self):
# https://github.com/conan-io/conan/issues/4445
libc_ref = ConanFileReference.loads("LibC/0.1@user/testing")
libb_ref = ConanFileReference.loads("LibB/0.1@user/testing")
client = TestClient()
conanfileC = GenConanfile()
client.save({"conanfile.py": str(conanfileC)})
client.run("editable add . LibC/0.1@user/testing")
client2 = TestClient(client.cache_folder)
conanfileB = GenConanfile().with_name("LibB").with_version("0.1").with_require(libc_ref)
client2.save({"conanfile.py": str(conanfileB)})
client2.run("create . user/testing")
conanfileA = GenConanfile().with_name("LibA").with_version("0.1")\
.with_require(libb_ref)\
.with_require(libc_ref)
client2.save({"conanfile.py": str(conanfileA)})
client2.run("install .")
client2.current_folder = os.path.join(client2.current_folder, "build")
mkdir(client2.current_folder)
client2.run("install ..")
|
conan-io/conan
|
conans/test/integration/editable/transitive_editable_test.py
|
Python
|
mit
| 1,341
|
# -*- coding: utf-8 -*-
import humanize
import gevent
from datetime import datetime, timedelta
from disco.bot import CommandLevels
from disco.util.sanitize import S
from disco.types.message import MessageEmbed
from disco.types.channel import ChannelType
from rowboat.plugins import RowboatPlugin as Plugin, CommandFail, CommandSuccess
from rowboat.types.plugin import PluginConfig
from rowboat.util.timing import Eventual
from rowboat.util.input import parse_duration, humanize_duration
from rowboat.models.user import User
from rowboat.models.message import Message, Reminder
from rowboat.util.images import get_dominant_colors_user, get_dominant_colors_guild
from rowboat.constants import (
STATUS_EMOJI, SNOOZE_EMOJI, GREEN_TICK_EMOJI, GREEN_TICK_EMOJI_ID, RED_TICK_EMOJI, RED_TICK_EMOJI_ID, YEAR_IN_SEC
)
class RemindersConfig(PluginConfig):
pass
@Plugin.with_config(RemindersConfig)
class RemindersPlugin(Plugin):
def load(self, ctx):
super(RemindersPlugin, self).load(ctx)
self.reminder_task = Eventual(self.trigger_reminders)
self.spawn_later(10, self.queue_reminders)
def queue_reminders(self):
try:
next_reminder = Reminder.select().order_by(
Reminder.remind_at.asc()
).limit(1).get()
except Reminder.DoesNotExist:
return
self.reminder_task.set_next_schedule(next_reminder.remind_at)
def trigger_reminders(self):
reminders = Reminder.with_message_join().where(
(Reminder.remind_at < (datetime.utcnow() + timedelta(seconds=1)))
)
waitables = []
for reminder in reminders:
waitables.append(self.spawn(self.trigger_reminder, reminder))
for waitable in waitables:
waitable.join()
self.queue_reminders()
def trigger_reminder(self, reminder):
message = reminder.message_id
channel = self.state.channels.get(message.channel_id)
if not channel:
self.log.warning('Not triggering reminder, channel %s was not found!',
message.channel_id)
reminder.delete_instance()
return
msg = channel.send_message(u'<@{}> you asked me at {} ({} ago) to remind you about: {}'.format(
message.author_id,
reminder.created_at,
humanize_duration(reminder.created_at - datetime.utcnow()),
S(reminder.content)
))
# Add the emoji options
msg.add_reaction(SNOOZE_EMOJI)
msg.add_reaction(GREEN_TICK_EMOJI)
try:
mra_event = self.wait_for_event(
'MessageReactionAdd',
message_id=msg.id,
conditional=lambda e: (
(e.emoji.name == SNOOZE_EMOJI or e.emoji.id == GREEN_TICK_EMOJI_ID) and
e.user_id == message.author_id
)
).get(timeout=30)
except gevent.Timeout:
reminder.delete_instance()
return
finally:
# Cleanup
msg.delete_reaction(SNOOZE_EMOJI)
msg.delete_reaction(GREEN_TICK_EMOJI)
if mra_event.emoji.name == SNOOZE_EMOJI:
reminder.remind_at = datetime.utcnow() + timedelta(minutes=20)
reminder.save()
msg.edit(u'Ok, I\'ve snoozed that reminder for 20 minutes.')
return
reminder.delete_instance()
@Plugin.command('delete global', '[reminder:str]', group='reminder', aliases=['remove global global', 'clean global', 'clear global'], context={'mode': 'global'}, global_=True)
@Plugin.command('delete global', '[reminder:str]', group='r', aliases=['remove global', 'clean', 'clear global'], context={'mode': 'global'}, global_=True)
@Plugin.command('delete', '[reminder:str]', group='reminder', aliases=['remove', 'clean', 'clear'], context={'mode': 'server'}, global_=True)
@Plugin.command('delete', '[reminder:str]', group='r', aliases=['remove', 'clean', 'clear'], context={'mode': 'server'}, global_=True)
def cmd_remind_clear(self, event, reminder='all', mode='server'):
if reminder == 'all':
count = Reminder.count_for_user(event.author.id, event.guild.id) if mode == 'server' else Reminder.count_for_user(event.author.id)
if Reminder.count_for_user(event.author.id) == 0:
return event.msg.reply('<:{}> cannot clear reminders when you don\'t have any'.format(RED_TICK_EMOJI))
msg = event.msg.reply('Ok, clear {} reminders?'.format(count))
msg.chain(False).\
add_reaction(GREEN_TICK_EMOJI).\
add_reaction(RED_TICK_EMOJI)
try:
mra_event = self.wait_for_event(
'MessageReactionAdd',
message_id=msg.id,
conditional=lambda e: (
e.emoji.id in (GREEN_TICK_EMOJI_ID, RED_TICK_EMOJI_ID) and
e.user_id == event.author.id
)).get(timeout=10)
except gevent.Timeout:
return
finally:
msg.delete()
if mra_event.emoji.id != GREEN_TICK_EMOJI_ID:
return
count = Reminder.delete_all_for_user(event.author.id, event.guild.id) if mode == 'server' else Reminder.delete_all_for_user(event.author.id)
return event.msg.reply(':ok_hand: I cleared {} reminders for you'.format(count))
else:
try:
# stupid catch because python sucks
try:
reminder = int(reminder)
except:
return event.msg.reply('cannot convert `{}` to `int`'.format(S(reminder)))
r = Reminder.select(Reminder).where(
(Reminder.message_id << Reminder.with_message_join((Message.id, )).where(
Message.author_id == event.author.id
)) & (Reminder.id == reminder)
).get()
except Reminder.DoesNotExist:
return event.msg.reply('<:{}> cannot find reminder #{}'.format(RED_TICK_EMOJI, reminder))
msg = event.msg.reply('Ok, clear reminder #{}?'.format(reminder))
msg.chain(False).\
add_reaction(GREEN_TICK_EMOJI).\
add_reaction(RED_TICK_EMOJI)
try:
mra_event = self.wait_for_event(
'MessageReactionAdd',
message_id=msg.id,
conditional=lambda e: (
e.emoji.id in (GREEN_TICK_EMOJI_ID, RED_TICK_EMOJI_ID) and
e.user_id == event.author.id
)).get(timeout=10)
except gevent.Timeout:
return
finally:
msg.delete()
if mra_event.emoji.id != GREEN_TICK_EMOJI_ID:
return
Reminder.delete_for_user(event.author.id, r.id)
return event.msg.reply(':ok_hand: I cleared reminder #{} for you'.format(r.id))
@Plugin.command('add', '<duration:str> <content:str...>', group='r', global_=True)
@Plugin.command('remind', '<duration:str> <content:str...>', global_=True)
def cmd_remind(self, event, duration, content):
if Reminder.count_for_user(event.author.id) > 30:
return event.msg.reply(':warning: you can only have 15 reminders going at once!')
remind_at = parse_duration(duration)
if remind_at > (datetime.utcnow() + timedelta(seconds=5 * YEAR_IN_SEC)):
return event.msg.reply(':warning: thats too far in the future, I\'ll forget!')
r = Reminder.create(
message_id=event.msg.id,
remind_at=remind_at,
content=content
)
self.reminder_task.set_next_schedule(r.remind_at)
event.msg.reply(':ok_hand: I\'ll remind you at {} ({}) #{}'.format(
r.remind_at.isoformat(),
humanize_duration(r.remind_at - datetime.utcnow()),
r.id
))
@Plugin.command('list global', '[limit:int]', context={'mode': 'global'}, group='r', global_=True)
@Plugin.command('list global', '[limit:int]', context={'mode': 'global'}, group='remind', global_=True)
@Plugin.command('reminders global', '[limit:int]', context={'mode': 'global'}, global_=True)
@Plugin.command('list', '[limit:int]', context={'mode': 'server'}, group='r', global_=True)
@Plugin.command('list', '[limit:int]', context={'mode': 'server'}, group='remind', global_=True)
@Plugin.command('reminders', '[limit:int]', context={'mode': 'server'}, global_=True)
def cmd_remind_list(self, event, limit=None, mode='server'):
user = event.msg.author
count = Reminder.count_for_user(user.id, event.guild.id)
total_count = Reminder.count_for_user(user.id)
embed = MessageEmbed()
embed.title = '{} reminder{} ({} total)'.format(
count if mode == 'server' else total_count,
's' if (count != 1 and mode == 'server') or (total_count != 1 and mode == 'global') else '',
total_count
)
embed.set_author(name=u'{}#{}'.format(
user.username,
user.discriminator,
), icon_url=user.avatar_url)
embed.color = get_dominant_colors_user(user, user.get_avatar_url('png'))
embed.set_footer(text='You can cancel reminders with !r clear [ID]')
if (count == 0 and mode == 'server') or total_count == 0:
embed.description = 'You have no upcoming reminders{}.'.format(' in this server. Use `!r list global` to list all your upcoming reminders' if total_count > 0 else '')
else:
query = Reminder.select(Reminder).where(
(Reminder.message_id << Reminder.with_message_join((Message.id, )).where(
(Message.author_id == event.author.id) & (Message.guild_id == event.guild.id if mode == 'server' else True)
)) & (Reminder.remind_at > (datetime.utcnow() + timedelta(seconds=1)))
).order_by(Reminder.remind_at).limit(limit)
for reminder in query:
time = humanize_duration(reminder.remind_at - datetime.utcnow())
channel = Message.select().where(Message.id == reminder.message_id).get().channel_id
channel = self.state.channels.get(channel)
embed.add_field(
name=u'#{} in {}'.format(
reminder.id,
time
),
value=u'[`#{}`](https://discordapp.com/channels/{}/{}/{}) {}'.format(
channel.name if channel.type != ChannelType.DM else 'Jetski',
channel.guild_id if channel.type != ChannelType.DM else '@me',
channel.id,
reminder.message_id,
S(reminder.content)
)
)
return event.msg.reply(embed=embed)
|
ThaTiemsz/jetski
|
rowboat/plugins/reminders.py
|
Python
|
mit
| 11,141
|
# import multiprocessing to avoid this bug (http://bugs.python.org/issue15881#msg170215)
import multiprocessing
assert multiprocessing
import re
from setuptools import setup, find_packages
def get_version():
"""
Extracts the version number from the version.py file.
"""
VERSION_FILE = 'rabbitmq_admin/version.py'
mo = re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', open(VERSION_FILE, 'rt').read(), re.M)
if mo:
return mo.group(1)
else:
raise RuntimeError('Unable to find version string in {0}.'.format(VERSION_FILE))
install_requires = [
'requests>=2.7.0',
'six>=1.8.0'
]
tests_require = [
'coverage>=4.0',
'flake8>=2.2.0',
'pika>=0.10.0',
'mock>=1.0.1',
'nose>=1.3.0']
docs_require = [
'Sphinx>=1.2.2',
'sphinx_rtd_theme']
extras_require = {
'test': tests_require,
'packaging': ['wheel'],
'docs': docs_require,
}
everything = set(install_requires)
for deps in extras_require.values():
everything.update(deps)
extras_require['all'] = list(everything)
setup(
name='rabbitmq-admin',
version=get_version(),
description='A python interface for the RabbitMQ Admin HTTP API',
long_description=open('README.rst').read(),
url='https://github.com/ambitioninc/rabbitmq-admin',
author='Micah Hausler',
author_email='opensource@ambition.com',
keywords='RabbitMQ, AMQP, admin',
packages=find_packages(),
classifiers=[
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
license='MIT',
include_package_data=True,
test_suite='nose.collector',
install_requires=install_requires,
tests_require=tests_require,
extras_require=extras_require,
zip_safe=False,
)
|
ambitioninc/rabbitmq-admin
|
setup.py
|
Python
|
mit
| 1,954
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2015 Bartosz Janda
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from .. import helpers
from ..common import SummaryBase
import UIView
import UILabel
class UITableViewCellSyntheticProvider(UIView.UIViewSyntheticProvider):
"""
Class representing UITableViewCell.
"""
def __init__(self, value_obj, internal_dict):
super(UITableViewCellSyntheticProvider, self).__init__(value_obj, internal_dict)
self.type_name = "UITableViewCell"
self.register_child_value("reuse_identifier", ivar_name="_reuseIdentifier",
primitive_value_function=SummaryBase.get_summary_value,
summary_function=self.get_reuse_identifier_summary)
self.register_child_value("text_label", ivar_name="_textLabel",
provider_class=UILabel.UILabelSyntheticProvider,
summary_function=self.get_text_label_summary)
self.register_child_value("detail_text_label", ivar_name="_detailTextLabel",
provider_class=UILabel.UILabelSyntheticProvider,
summary_function=self.get_detail_text_label)
@staticmethod
def get_reuse_identifier_summary(value):
return "reuseIdentifier={}".format(value)
@staticmethod
def get_text_label_summary(provider):
"""
Text label summary.
:param UILabel.UILabelSyntheticProvider provider: UILabel provider
:return: Text label summary.
:rtype: str
"""
value = provider.text_value
if value is not None:
return "textLabel={}".format(provider.text_value)
return None
@staticmethod
def get_detail_text_label(provider):
"""
Detail label summary.
:param UILabel.UILabelSyntheticProvider provider: UILabel provider
:return: Detail label summary.
:rtype: str
"""
value = provider.text_value
if value is not None:
return "detailLabel={}".format(provider.text_value)
return None
def summaries_parts(self):
return [self.text_label_summary,
self.detail_text_label_summary,
self.reuse_identifier_summary,
self.tag_summary]
def summary_provider(value_obj, internal_dict):
return helpers.generic_summary_provider(value_obj, internal_dict, UITableViewCellSyntheticProvider)
|
bartoszj/Mallet
|
mallet/UIKit/UITableViewCell.py
|
Python
|
mit
| 3,568
|
import argparse
import logging
from rpi_twitter.helpers import authenticate, timestamp
def post_tweet(contents, add_time_stamp=False, reply_to=None, conf_file=None):
api = authenticate(conf_file=conf_file)
# Add a time stamp to the beginning
time_stamp = ""
if add_time_stamp:
time_stamp = timestamp() + ": "
logging.debug("Time Stamp: '{stamp}'".format(stamp=time_stamp))
# Add a name to the beginning of the tweet
reply = ""
if reply_to is not None:
if reply_to[0] == "@":
reply = reply_to.strip() + " "
else:
reply = "@" + reply_to.strip() + " "
logging.debug("Reply to: '{reply}'".format(reply=reply))
# Assemble the tweet
tweet = reply + time_stamp + contents
logging.debug("Tweet: '{tweet}'".format(tweet=tweet))
if len(tweet) > 280:
txt = "Tweet is too long: {tweet}".format(tweet=tweet)
logging.error(txt)
raise ValueError(txt)
logging.info("Calling Twitter API to post Tweet.")
api.update_status(tweet)
def main():
# Command line parsing
parser = argparse.ArgumentParser(
prog="Raspberry Pi Twitter Bot",
description="Send a tweet",
)
# The list of input files
parser.add_argument(
"tweet",
type=str,
help="the content of the tweet"
)
parser.add_argument(
"-s",
"--time-stamp",
help="add a time stamp to the beginning of the tweet",
dest="add_time_stamp",
action="store_true",
default=False,
)
parser.add_argument(
"-r",
"--reply-to",
help="a twitter handle to start the tweet with",
dest="reply_to",
default=None,
)
parser.add_argument(
"-c",
"--config",
help="override the default configuration file",
dest="conf_file",
default=None,
)
args = parser.parse_args()
logging.debug("Arguments: {args}".format(args=args))
# Send the tweet
post_tweet(
args.tweet,
add_time_stamp=args.add_time_stamp,
reply_to=args.reply_to,
conf_file=args.conf_file,
)
if __name__ == "__main__":
main()
|
agude/raspberry-pi-twitter-bot
|
rpi_twitter/t.py
|
Python
|
mit
| 2,208
|
import logging
from collections import defaultdict
from django.core.urlresolvers import reverse
from django.template import TemplateSyntaxError, Variable
from mezzanine.pages.models import Page
from mezzanine import template
from mezzanine.template.loader import get_template
register = template.Library()
@register.render_tag
def superfish_submenu(context, token):
"""
Return a list of child pages for the given parent, storing all
pages in a dict in the context when first called using parents as keys
for retrieval on subsequent recursive calls from the menu template.
"""
# First arg could be the menu template file name, or the parent page.
# Also allow for both to be used.
# logging.debug('superfish_submenu')
template_name = None
parent_page = None
parts = token.split_contents()[1:]
for part in parts:
part = Variable(part).resolve(context)
if isinstance(part, unicode):
template_name = part
elif isinstance(part, Page):
parent_page = part
if template_name is None:
try:
template_name = "mezzanine_superfish/superfishtree.html"
# template_name = context["menu_template_name"]
except KeyError:
error = "No template found for page_menu in: %s" % parts
raise TemplateSyntaxError(error)
context["menu_template_name"] = template_name
# logging.debug("context['menu_template_name'] is " + context["menu_template_name"])
if "menu_pages" not in context:
pages = defaultdict(list)
try:
user = context["request"].user
slug = context["request"].path
except KeyError:
user = None
slug = ""
for page in Page.objects.published(for_user=user).select_related(depth=2).order_by("_order"):
page.set_menu_helpers(slug)
pages[page.parent_id].append(page)
context["menu_pages"] = pages
context["on_home"] = slug == reverse("home")
# ``branch_level`` must be stored against each page so that the
# calculation of it is correctly applied. This looks weird but if we do
# the ``branch_level`` as a separate arg to the template tag with the
# addition performed on it, the addition occurs each time the template
# tag is called rather than once per level.
context["branch_level"] = 0
if parent_page is not None:
context["branch_level"] = parent_page.branch_level + 1
parent_page = parent_page.id
context["page_branch"] = context["menu_pages"].get(parent_page, [])
for i, page in enumerate(context["page_branch"]):
context["page_branch"][i].branch_level = context["branch_level"]
t = get_template(template_name, context)
# logging.debug(context["page_branch"])
return t.render(context)
|
cartwheelweb/mezzanine_superfish
|
mezzanine_superfish/templatetags/mezzanine_superfish_tags.py
|
Python
|
mit
| 2,839
|
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# Copyright 2012 Cloudscaling Group, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy.orm import object_mapper
try:
import six
Iterator = six.Iterator
except ImportError:
import sys
if sys.version_info[0] >= 3:
Iterator = object
else:
class Iterator(object):
def next(self):
return type(self).__next__(self)
class ModelBase(Iterator):
"""Base class for models."""
__table_initialized__ = False
def save(self, session):
"""Save this object."""
# NOTE(boris-42): This part of code should be look like:
# session.add(self)
# session.flush()
# But there is a bug in sqlalchemy and eventlet that
# raises NoneType exception if there is no running
# transaction and rollback is called. As long as
# sqlalchemy has this bug we have to create transaction
# explicitly.
with session.begin(subtransactions=True):
session.add(self)
session.flush()
def __repr__(self):
attrs = ", ".join(("%s=%s" % (k, v) for k, v in self.items()))
return "%s(%s)" % (self.__tablename__.title(), attrs)
def __setitem__(self, key, value):
setattr(self, key, value)
def __getitem__(self, key):
return getattr(self, key)
def __contains__(self, key):
# Don't use hasattr() because hasattr() catches any exception, not only
# AttributeError. We want to passthrough SQLAlchemy exceptions
# (ex: sqlalchemy.orm.exc.DetachedInstanceError).
try:
getattr(self, key)
except AttributeError:
return False
else:
return True
def get(self, key, default=None):
return getattr(self, key, default)
def __iter__(self):
columns = list(dict(object_mapper(self).columns).keys())
return ModelIterator(self, iter(columns))
def update(self, values):
"""Make the model object behave like a dict."""
for k, v in values.items():
setattr(self, k, v)
def _as_dict(self):
"""Make the model object behave like a dict.
Includes attributes from joins.
"""
local = dict((key, value) for key, value in self)
joined = dict([(k, v) for k, v in self.__dict__.items() if not k[0] == '_'])
local.update(joined)
return local
def items(self):
"""Make the model object behave like a dict."""
return self._as_dict().items()
def keys(self):
"""Make the model object behave like a dict."""
return [key for key, value in self.items()]
class ModelIterator(Iterator):
def __init__(self, model, columns):
self.model = model
self.i = columns
def __iter__(self):
return self
# In Python 3, __next__() has replaced next().
def __next__(self):
n = next(self.i)
return n, getattr(self.model, n)
|
xgfone/snippet
|
snippet/example/python/sqlalchemy-orm-model.py
|
Python
|
mit
| 3,895
|
import requests
import csv
import time
import re
from bs4 import BeautifulSoup
CSE_CATALOG_URL = 'https://cse.ucsd.edu/undergraduate/courses/prerequisites-cse-undergraduate-classes'
def clean(string, utf8=None):
string = string.encode('utf-8')
replace_values = [(u"\xa0".encode('utf-8'), " "), (u"\u2014".encode('utf-8'), "-"), \
(u"\u0027".encode('utf-8'), "'"), (u"\u2013".encode('utf-8'), "-"), \
(u"\u2019".encode('utf-8'), "'")]
for utf, new in replace_values:
string = string.replace(utf, new)
if utf8 and utf8 is True:
return " ".join(string.split()).encode('utf-8')
return " ".join(string.split())
def extract_catalog_dict(record):
course_ids = []
course_titles = []
course_prereqs = []
if record:
start = time.time()
print "Requesting access to the UCSD catalog page at %s..." % CSE_CATALOG_URL
page = requests.get(CSE_CATALOG_URL)
if record:
print "Webscraping the catalog..."
soup = BeautifulSoup(page.content, 'html.parser')
table = soup.find('table')
# Find all the <tr> tag pairs, skip the first one, then for each.
for row in table.find_all('tr')[1:]:
col = row.find_all('td')
cse_course = clean(col[0].text.strip(), True)
course_ids.append(cse_course)
title = clean(col[1].text.strip(), True)
course_titles.append(title)
#creates a list of preqreqs, with a few rules
#NO CO-REQUISITES
#NO SELF-DEPENDENCIES
prereqs = col[2].text.strip().split('***', 1)[0].split('Co-requisite', 1)[0].split('co-requisite', 1)[0]
prereqs = clean(prereqs.replace(cse_course, ""), True)
# 1 capital letter, 0+ letters, space, 1+ digits, 0 or 1 letter
# i.e. 'CSE 30' or 'CSE 8A'
pattern = "[A-Z][a-zA-Z]*\s?[0-9]+[a-zA-Z]?"
or_pattern = "(?:[a-zA-Z]+\s?[0-9]+[a-zA-Z]?)+(?: or [a-zA-Z]+\s?[0-9]+[a-zA-Z]?)+"
# creates a list of prereqs based on the regex
or_filter = re.findall(or_pattern, prereqs)
reg_filter = re.findall(pattern, prereqs)
filtered_prereqs = [course for course in reg_filter if not any(course in c for c in or_filter)]
filtered_prereqs += [courses.split(" or ") for courses in or_filter]
course_prereqs.append(filtered_prereqs)
if record:
end = time.time()
print "Completed scraping... %.3f seconds" % (end-start)
write_catalog(course_ids, course_titles, course_prereqs, record)
return course_ids, course_titles, course_prereqs
def write_catalog(ids, titles, prereqs, record):
if record:
start = time.time()
print "Writing to the csv file 'courses.csv'..."
with open('courses.csv', 'wb') as csvfile:
writer = csv.writer(csvfile)
rows = zip(ids, titles, prereqs)
writer.writerows(rows)
if record:
end = time.time()
print "Completed writing to file... %.3f seconds" % (end-start)
def read_catalog(csv_file):
catalog = []
with open(csv_file, 'rb') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
c_id = row[0]
c_title = row[1]
c_prereqs = ''.join([x for x in row[2] if x not in ["'", "[", "]"]]).split(', ')
#makes a copy to loop through and remove 'or prerequisites'
c_p_copy = [c for c in c_prereqs]
or_pattern = "\[[a-zA-Z0-9, ]+]"
#finds and parses the string for 'or prerequisites'
or_c_prereqs = re.findall(or_pattern, row[2][1:-1].replace("'", ""))
#parses the list of 'or prerequisites'
for i in range(len(or_c_prereqs)):
or_c_prereqs[i] = or_c_prereqs[i][1:-1].split(', ')
#removes any courses in the 'or prerequisites'
for c in c_p_copy:
if any(c in course for course in or_c_prereqs):
c_prereqs.remove(c)
#combine the lists and remove any empty strings
c_prereqs += or_c_prereqs
c_prereqs = filter(None, c_prereqs)
catalog.append((c_id, c_title, c_prereqs))
course_catalog = {}
#create a dictionary of the courses and the prereqs
#use course_id as key, a tuple of id/title/prereq as value
for x, y, z in catalog:
course_catalog[x.upper().replace(" ", "")] = (x, y, z)
return course_catalog
|
jeff4elee/course_path
|
course_path/home/catalog_scraper.py
|
Python
|
mit
| 3,958
|
__author__ = 'Kovachev'
from django.conf.urls import patterns, include, url
from django.views.generic import ListView, DetailView
from apps.post.models import Post
urlpatterns = [
url(r'^$', ListView.as_view(
queryset=Post.objects.filter(approved=True).order_by("created")[:5],
template_name='index.html'), name='index'),
]
|
Lyudmil-Kovachev/vertuto
|
vertuto/apps/home/urls.py
|
Python
|
mit
| 346
|
""" Base class for all taemin plugin """
import itertools
MAX_MSG_LENGTH = 400
class TaeminPlugin(object):
helper = {}
def __init__(self, taemin):
self.taemin = taemin
def start(self):
pass
def stop(self):
pass
def on_join(self, connection):
pass
def on_pubmsg(self, msg):
pass
def on_privmsg(self, msg):
pass
def on_quit(self, user):
pass
def on_part(self, connection):
pass
def kick(self, chan, nick, comment=""):
if not nick:
return
if not isinstance(nick, str):
nick = nick.decode("utf-8")
if chan in self.taemin.chans:
self.taemin.connection.kick(chan, nick, comment)
def privmsg(self, chan, msg):
""" Send a message to a chan or an user """
if not msg:
return
if not isinstance(msg, str):
msg = msg.decode("utf-8")
for m in ("".join(itertools.takewhile(lambda x: x, a)) for a in itertools.zip_longest(*([iter(msg)] * MAX_MSG_LENGTH))):
print(m)
if chan in self.taemin.chans:
self.taemin.create_pub_message(self.taemin.name, chan, m)
else:
self.taemin.create_priv_message(self.taemin.name, chan, m)
self.taemin.connection.privmsg(chan, m)
|
ningirsu/taemin
|
taemin/plugin.py
|
Python
|
mit
| 1,367
|
# -*- coding: utf-8 -*-:
from django.contrib.auth import get_user_model
from resrc.userprofile.models import Profile
def karma_rate(user_pk, diff):
user = Profile.objects.get(user__pk=user_pk)
if user.karma:
user.karma += diff
else:
user.karma = diff
user.save()
|
sergiolimajr/resrc
|
resrc/utils/karma.py
|
Python
|
mit
| 297
|
from django.conf.urls import url, include
from django.contrib import admin
from . import views
urlpatterns = [
# index images
url(r'^$', views.index, name = 'image_index'),
url(r'^imageboard/$', views.index, name = 'image_index'),
# images urls
url(r'^imageboard/(?P<image_id>[0-9]+)/$', views.detail, name = 'image_details'),
# big image urls
url(r'^imageboard/ib-large/(?P<image_id>[0-9]+)/$', views.ib_large, name = 'image_big_index'),
# images without tags urls
url(r'^imageboard/wo-tags/$', views.images_wo_tags, name = 'image_wo_tags_index'),
# ajax list tags
url(r'^imageboard/wo-tags/list-tags/$', views.list_tags, name = 'image_wo_tags_list'),
# ajax ajax_save_tags
url(r'^imageboard/wo-tags/ajax-save-tags/$', views.ajax_save_tags, name = 'ajax_save_tags'),
# ajax list folder
url(r'^imageboard/image/list-folders/$', views.list_folders, name = 'image_folders_list'),
# tags urls
url(r'^tags/$', views.tags, name = 'tag_index'),
url(r'^tag/(?P<tag_id>[0-9]+)/$', views.detail_tags, name = 'tag_details'),
# index search
url(r'^search/$', include('haystack.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
url(r'upload/', views.upload, name = 'jfu_upload' ),
url(r'^delete/(?P<pk>\d+)$', views.upload_delete, name = 'jfu_delete' ),
]
|
gmunumel/django_imageboard
|
imageboard/urls.py
|
Python
|
mit
| 1,402
|
import os
import json
import argparse
import cPickle as pkl
from collections import defaultdict
from tqdm import tqdm
from dataset import VidVRD
from baseline import segment_video, get_model_path
from baseline import trajectory, feature, model, association
def load_object_trajectory_proposal():
"""
Test loading precomputed object trajectory proposals
"""
dataset = VidVRD('../vidvrd-dataset', '../vidvrd-dataset/videos', ['train', 'test'])
video_indices = dataset.get_index(split='train')
for vid in video_indices:
durations = set(rel_inst['duration'] for rel_inst in dataset.get_relation_insts(vid, no_traj=True))
for duration in durations:
segs = segment_video(*duration)
for fstart, fend in segs:
trajs = trajectory.object_trajectory_proposal(dataset, vid, fstart, fend, gt=False, verbose=True)
trajs = trajectory.object_trajectory_proposal(dataset, vid, fstart, fend, gt=True, verbose=True)
video_indices = dataset.get_index(split='test')
for vid in video_indices:
anno = dataset.get_anno(vid)
segs = segment_video(0, anno['frame_count'])
for fstart, fend in segs:
trajs = trajectory.object_trajectory_proposal(dataset, vid, fstart, fend, gt=False, verbose=True)
trajs = trajectory.object_trajectory_proposal(dataset, vid, fstart, fend, gt=True, verbose=True)
def load_relation_feature():
"""
Test loading precomputed relation features
"""
dataset = VidVRD('../vidvrd-dataset', '../vidvrd-dataset/videos', ['train', 'test'])
extractor = feature.FeatureExtractor(dataset, prefetch_count=0)
video_indices = dataset.get_index(split='train')
for vid in video_indices:
durations = set(rel_inst['duration'] for rel_inst in dataset.get_relation_insts(vid, no_traj=True))
for duration in durations:
segs = segment_video(*duration)
for fstart, fend in segs:
extractor.extract_feature(vid, fstart, fend, verbose=True)
video_indices = dataset.get_index(split='test')
for vid in video_indices:
anno = dataset.get_anno(vid)
segs = segment_video(0, anno['frame_count'])
for fstart, fend in segs:
extractor.extract_feature(vid, fstart, fend, verbose=True)
def train():
dataset = VidVRD('../vidvrd-dataset', '../vidvrd-dataset/videos', ['train', 'test'])
param = dict()
param['model_name'] = 'baseline'
param['rng_seed'] = 1701
param['max_sampling_in_batch'] = 32
param['batch_size'] = 64
param['learning_rate'] = 0.001
param['weight_decay'] = 0.0
param['max_iter'] = 5000
param['display_freq'] = 1
param['save_freq'] = 5000
param['epsilon'] = 1e-8
param['pair_topk'] = 20
param['seg_topk'] = 200
print(param)
model.train(dataset, param)
def detect():
dataset = VidVRD('../vidvrd-dataset', '../vidvrd-dataset/videos', ['train', 'test'])
with open(os.path.join(get_model_path(), 'baseline_setting.json'), 'r') as fin:
param = json.load(fin)
short_term_relations = model.predict(dataset, param)
# group short term relations by video
video_st_relations = defaultdict(list)
for index, st_rel in short_term_relations.items():
vid = index[0]
video_st_relations[vid].append((index, st_rel))
# video-level visual relation detection by relational association
print('greedy relational association ...')
video_relations = dict()
for vid in tqdm(video_st_relations.keys()):
video_relations[vid] = association.greedy_relational_association(
dataset, video_st_relations[vid], max_traj_num_in_clip=100)
# save detection result
with open(os.path.join(get_model_path(), 'baseline_relation_prediction.json'), 'w') as fout:
output = {
'version': 'VERSION 1.0',
'results': video_relations
}
json.dump(output, fout)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='VidVRD baseline')
parser.add_argument('--load_feature', action="store_true", default=False, help='Test loading precomputed features')
parser.add_argument('--train', action="store_true", default=False, help='Train model')
parser.add_argument('--detect', action="store_true", default=False, help='Detect video visual relation')
args = parser.parse_args()
if args.load_feature or args.train or args.detect:
if args.load_feature:
load_object_trajectory_proposal()
load_relation_feature()
if args.train:
train()
if args.detect:
detect()
else:
parser.print_help()
|
xdshang/VidVRD-helper
|
baseline.py
|
Python
|
mit
| 4,731
|
# Copyright (c) 2016 Fabian Kochem
|
conceptsandtraining/libtree
|
tests/__init__.py
|
Python
|
mit
| 35
|
# -*- coding: utf-8 -*-
import os
import sys
from logging import getLogger
import Consts
import App
import util
logger = getLogger(__name__)
def _add_new(builder, module_id, new, curdeps, cur):
if module_id in curdeps:
entry = curdeps[module_id]
if entry['revision'] == new['revision']:
jar = cur.get_jar_path(entry['name'])
if os.path.exists(jar):
builder.add(module_id, jar, entry, hard=True)
return False
else:
sys.stdout.write("Update '%s' revision %s to %s\n" % (module_id, entry['revision'], new['revision']))
newjar = new['path']
builder.add(module_id, newjar, {
'cache': newjar,
'revision': new['revision']
})
return True
def install_to_pod(conf, app, artifacts, keepold=False):
current = app.get_current_context()
artifact_set = frozenset(artifacts)
args = ['Setup', 'runtime']
args.extend(artifact_set)
sysjava = util.new_sys_java(conf)
(code, output) = sysjava.sys_run(args, conf.to_dict())
if code != 0:
return False
builder = app.new_context_builder(artifact_set)
try:
deps = output['resolve']['dependencies']
curdeps = current.get_dependency_dict() if current else {}
is_update = False
for d in deps:
classifier = d.get('classifier', None)
if classifier is not None:
module_id = '%s::%s' % (d['id'], classifier)
else:
module_id = d['id']
is_update = (_add_new(builder, module_id, d, curdeps, current) or is_update)
if not is_update:
builder.revert()
return False
outcontrols = current.get_uncontrol_jars() if current else []
if len(outcontrols):
logger.warn("There are out of control jars: %s", ",".join(outcontrols))
for x in outcontrols:
builder.add(None, x, None, hard=True)
_check_errors(output)
except:
builder.revert()
raise
builder.commit(output['mains'], output['resources'], keepold=keepold)
return True
def _check_errors(output):
failures = output.get('failures', {})
if failures:
for undef, errors in failures.items():
classes = [x for x in errors if x.find('#') < 0]
mains = [x for x in errors if 0 <= x.find('#main')]
logger.warning("%s is undefined, so invalidate the below.", undef)
if classes:
logger.warning(" %s", "\n".join(["class: %s" % x for x in classes]))
if mains:
logger.warning(" %s", "\n".join(["main(): %s" % x for x in mains]))
duplicates = output.get('duplicates', {})
if duplicates:
for path, jars in duplicates.items():
logger.warning("resource %s is a duplicated entry." % path)
logger.warning(" %s", "\n".join(["jar: %s" % x for x in jars]))
def install(conf, args):
app = App.AppPod(conf)
current = app.get_current_context()
if not args.add and current:
return False, 'already installed directory. Use "-a" for add jar file if you want.'
installed = current and current.get_installs() or set()
artifacts = installed | set(args.artifacts)
# TODO: parse version string
# TODO check conflict
if artifacts <= installed:
return True
if install_to_pod(conf, app, artifacts):
return True, None
return False, None
def update(conf, args):
app = App.AppPod(conf)
installed = app.get_current_context()
if not installed:
return False, 'not found "%s": no marun installed status.' % Consts.APP_STATUS_FILE
if install_to_pod(conf, app, installed.get_installs(), args.keepold):
return True, None
return False, None
def setup_subcmd(subparsers):
install_parser = subparsers.add_parser('install', help='Install artifacts')
install_parser.add_argument('artifacts', nargs='+')
install_parser.add_argument('-a', '--add', help='additional install', action='store_true')
install_parser.add_argument('-d', '--libdir')
install_parser.set_defaults(handler=install)
update_parser = subparsers.add_parser('update', help='Update artifacts')
update_parser.add_argument('-k', '--keepold', action='store_true', help='keep old install')
# update_parser.add_argument('--minor', help='update minor version if pom.xml accept (default patch)')
# update_parser.add_argument('--ignore-pom')
update_parser.set_defaults(handler=update)
|
nishemon/marun
|
marun/sub_install.py
|
Python
|
mit
| 4,561
|
"""
Django settings for Asteria project.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
For considerations when deploying to production, see
https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
"""
from configparser import (
ConfigParser
)
from datetime import (
datetime as dt
)
from pathlib import (
Path
)
from uuid import (
uuid4
)
from django.contrib.messages import (
constants as messages
)
from django.utils import (
timezone
)
BASE_DIR = Path(__file__).resolve().parent.parent
config = ConfigParser()
config.read(BASE_DIR.joinpath('asteria.config'))
application_config = config['Application']
SECRET_KEY = application_config['Secret key']
if not SECRET_KEY:
SECRET_KEY = str(uuid4())
ADMINS = list()
for name, email in config['Admins'].items():
ADMINS.append((name, email))
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config.getboolean('Application', 'Debug mode')
TITLE = application_config['Title']
ALLOWED_HOSTS = [host.strip() for host in application_config['Allowed hosts'].split(',')]
use_tls = config.getboolean('Application', 'Use TLS')
SECURE_CONTENT_TYPE_NOSNIFF = use_tls
SECURE_BROWSER_XSS_FILTER = use_tls
SECURE_SSL_REDIRECT = use_tls
SESSION_COOKIE_SECURE = use_tls
CSRF_COOKIE_SECURE = use_tls
X_FRAME_OPTIONS = 'DENY'
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
'OPTIONS': {
'min_length': 12,
}
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = application_config['Language code']
TIME_ZONE = application_config['Time zone']
USE_TZ = True
USE_I18N = True
USE_L10N = True
DT_FMT = '%Y-%m-%d %H:%M:%S'
events_config = config['Events']
REGISTRATION_START = dt.strptime(events_config['Registration start'], DT_FMT)
CTF_START = dt.strptime(events_config['CTF start' ], DT_FMT)
REGISTRATION_END = dt.strptime(events_config['Registration end' ], DT_FMT)
CTF_END = dt.strptime(events_config['CTF end' ], DT_FMT)
REGISTRATION_START = timezone.make_aware(REGISTRATION_START)
CTF_START = timezone.make_aware(CTF_START )
REGISTRATION_END = timezone.make_aware(REGISTRATION_END )
CTF_END = timezone.make_aware(CTF_END )
MAX_TEAM_SIZE = config.getint('Teams', 'Max team size')
LOGIN_URL = 'login'
LOGIN_REDIRECT_URL = 'challenges'
LOGOUT_REDIRECT_URL = 'announcements'
# Application definition
INSTALLED_APPS = [
# Asteria apps
'announcements',
'challenges',
'teams',
'tests',
# Django apps
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
dir_config = config['Directories']
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [Path(dir_config['Root']).joinpath('common')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
db_config = config['Database']
DATABASES = {
'default': {
'ENGINE' : db_config['Backend' ],
'HOST' : db_config['Host' ],
'PORT' : db_config['Port' ],
'NAME' : db_config['Name' ],
'USER' : db_config['User' ],
'PASSWORD': db_config['Password'],
'TEST': {
'NAME': BASE_DIR.joinpath('test_db.sqlite3'),
},
}
}
if 'sqlite3' not in DATABASES['default']['ENGINE']:
DATABASES['default']['OPTIONS'] = {
'sql_mode': 'STRICT_ALL_TABLES',
}
email_config = config['Email']
EMAIL_BACKEND = email_config['Backend' ]
EMAIL_HOST = email_config['Host' ]
EMAIL_PORT = email_config['Port' ]
EMAIL_HOST_USER = email_config['User' ]
EMAIL_HOST_PASSWORD = email_config['Password' ]
DEFAULT_FROM_EMAIL = email_config['Default address']
SERVER_EMAIL = email_config['Server address' ]
EMAIL_USE_TLS = email_config.getboolean('Use TLS')
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_ROOT = dir_config['Static']
STATIC_URL = '/static/'
MEDIA_ROOT = dir_config['Media']
MEDIA_URL = '/media/'
STATICFILES_DIRS = [
Path(dir_config['Root']).joinpath('common/static'),
]
ROOT_URLCONF = 'asteria.urls'
AUTH_USER_MODEL = 'teams.Player'
MESSAGE_TAGS = {messages.ERROR: 'danger'}
CACHES = {
'default': {
'TIMEOUT': 30,
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
}
}
|
elespike/Asteria
|
asteria/settings.py
|
Python
|
mit
| 6,095
|
from sympy import symbols, integrate, pi, lambdify, Number, sin
from numpy.polynomial.legendre import leggauss
import scipy.sparse.linalg as sparse_la
import lega.fourier_basis as fourier
import lega.shen_basis as shen
import lega.legendre_basis as leg
from lega.common import tensor_product, function
from lega.legendre_basis import forward_transformation_matrix as FLT
from lega.legendre_basis import backward_transformation_matrix as BLT
from itertools import product
from sympy.mpmath import quad
import numpy as np
x, y = symbols('x, y')
f = x**2*sin(y)
n = 4
m = 3
# Create grid for evaluating f
fourier_points = np.linspace(0, 2*np.pi, n, endpoint=False)
legendre_points = leggauss(m)[0]
points = np.array([list(p)
for p in product(fourier_points, legendre_points)])
# Eval
f = lambdify([x, y], f, 'numpy')
F = f(points[:, 0], points[:, 1]).reshape((n, m))
# Now the columns which is f evaluated at Fourier points for fixed y at some
# quadrature points is Fourier transformed
F_hat = np.array([fourier.fft(col) for col in F.T]).T
# Now Forward Legendre transform each row
flt = FLT(m)
F_hat = np.array([flt.dot(row) for row in F_hat])
# Come back from wave numbers to grid values
blt = BLT(m).T
F_ = np.array([blt.dot(row) for row in F_hat])
F_ = np.array([fourier.ifft(col) for col in F_.T]).T
assert np.allclose(F, F_)
|
MiroK/lega
|
sandbox/fourier_legendre.py
|
Python
|
mit
| 1,358
|
import b64cy
import b64_mod
import en_word
import grouper
def keyfreq(filename , flag = 0):
if flag == 0:
canadateList = []
thefile = open(filename)
linenumber = 0
total = 0
greatestvalL = []
greatestnumber = 0
greatestnumberL = []
glinenumberL = []
for eachline in thefile:
#print eachline
linenumber+=1
charcountmap = {}
charstring = b64cy.decode(eachline.strip(),1)
#print charstring
for x in charstring:
#print x
if charcountmap.has_key(x):
count = charcountmap[x]
count +=1
charcountmap[x] = count
else:
charcountmap[x] = 1
#print charcountmap
topval = ''
number = 0
for x,i in charcountmap.iteritems():
if i>number:
number = i
topval = x
if number>=greatestnumber:
greatestnumberL.append(number)
greatestnumber = number
greatestvalL.append(topval)
glinenumberL.append(linenumber)
canadateList.append(eachline.strip())
total += number
elif flag == 1: #when flag is 1 we are not reading a file
charcountmap = {}
testname = b64cy.decode(filename,1)
print testname
for x in testname:
print x
return 00
return canadateList,greatestvalL,glinenumberL
# in this return canadateList is a list containing all the values out of the file that qualified as haveing a greatest occuring charicter.
# the order they are listed in should corispond to the other lists.
# the greatestvalL is what the symbol is and the glinenumberL is the number of the line that this occors on
def getkeylist(val):
"""generates a list of possible keys based on the ' etaoinshr' string of most frequent values in english sentences"""
COMMON_CHAR_STRING = " etaoinshr"
keylist = []
for x in COMMON_CHAR_STRING:
decx = ord(x)
hexx = b64_mod.dec_to_hex(decx,False) #Flase passed for single char convertion
char = b64_mod.dec_to_hex(ord(val),False) #Flase passed for single char convertion
keylist.append(b64cy.hexorsum(hexx,char))
return keylist
def find(filename):
canlist,val,line = keyfreq(filename)
indexer = -1
loop_controler = 0
vallist = []
found = 0
while len(canlist)>loop_controler:
if found == 1:
break
print "Line number: " + str(line[indexer])
print "Target hex: " + str(canlist[indexer])
keys = getkeylist(val[indexer])
print "Keys to be tested: " + str(keys)
print "*********************************************"
for x in keys:
vallist.append(b64cy.decode(b64cy.hexorsum(canlist[indexer],b64cy.keylen(canlist[indexer],x,1)),1))
for x in vallist:
tocheck = x.split(' ')
truecount = 0
for y in tocheck:
if en_word.is_english_word(y) == "true":
truecount +=1
if truecount > 3:
found = 1
print str(keys[loop_controler])+ " " + str(x)
break
indexer-=1
loop_controler +=1
# This is the code i used to dycript the hex message in the email. 'X' was
###1b37373331363f78151b7f2b783431333d78397828372d363c78373e783a393b3736
# the hex code from the e-mail decoded this reads "Cooking MC's like a pound
#of bacon"
# b64cy.decode(b64cy.hexorsum(var,b64cy.keylenmatcher(var,'X')),1)
def repxor_key_finder(filename):
document = open (filename)
thisline = document.readline()
thisline = b64_mod.basetohex(thisline.strip())
lowest_list, size_list = correct_size(thisline)
for x in size_list:
print 'Size List' + str(x)
codedBlocks = grouper.chunks(thisline,x) #FIXME here x can be passd as a odd number. this breaks hex_to_dec in keyfreq
for x in codedBlocks:
keyfreq(x,1)
document.close()
def hamm_calc(string1,string2):
answer = b64cy.hexorsum(string1,string2)
answer = b64_mod.hextobin(answer)
hamm = 0
for x in answer:
hamm += int(x)
return hamm
def correct_size(text_string):
lowest_list = []
current_lowest = 100
size_list = []
for x in range(2,40,1):
#print x
keylist = grouper.chunks(text_string,x)
#print keylist
newval = hamm_calc(keylist[0],keylist[1])/x
if newval<current_lowest:
lowest_list.append(newval)
size_list.append(x)
current_lowest = newval
return lowest_list, size_list
|
Stbot/PyCrypt
|
first writes/keyfinder.py
|
Python
|
mit
| 4,888
|
from collections import namedtuple
"""
G = (E, V)
"""
Edge = namedtuple("Edge", ['to', 'weight'])
class Graph(object):
def __init__(self):
self.vertices = {}
def add_vertex(self, v):
if v not in self.vertices:
self.vertices[v] = []
def get_edges(self, v):
if v in self.vertices:
return self.vertices[v]
else:
return []
def add_edge(self, v1, v2, directed=False, weight=None):
edge = Edge(to=v2, weight=weight)
self.add_vertex(v1)
self.vertices[v1].append(edge)
if not directed:
self.add_edge(v2, v1, directed=True, weight=weight)
@property
def vertices_list(self):
return self.vertices.keys()
@property
def num_vertices(self):
return len(self.vertices.keys())
@classmethod
def from_tuple_list(cls, edge_list, directed=False):
g = Graph()
for e in edge_list:
g.add_edge(e[0], e[1], directed)
return g
def __repr__(self):
txt = []
for v in self.vertices:
for e in self.vertices[v]:
txt.append('(%s, %s), ' % (v, e.to))
return ''.join(txt)
|
fcaneto/py_data_structures
|
graphs/graphs.py
|
Python
|
mit
| 1,215
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
import django
from django import forms
from django.core.urlresolvers import reverse
from django.forms.models import formset_factory
from django.middleware.csrf import _get_new_csrf_key
from django.template import (
TemplateSyntaxError, Context
)
import pytest
from django.utils.translation import ugettext_lazy as _
from .compatibility import get_template_from_string
from .conftest import only_uni_form, only_bootstrap3, only_bootstrap4, only_bootstrap
from .forms import TestForm, TestFormWithMedia
from crispy_forms.bootstrap import (
FieldWithButtons, PrependedAppendedText, AppendedText, PrependedText,
StrictButton
)
from crispy_forms.compatibility import text_type
from crispy_forms.helper import FormHelper, FormHelpersException
from crispy_forms.layout import (
Layout, Submit, Reset, Hidden, Button, MultiField, Field
)
from crispy_forms.utils import render_crispy_form
from crispy_forms.templatetags.crispy_forms_tags import CrispyFormNode
def test_inputs(settings):
form_helper = FormHelper()
form_helper.add_input(Submit('my-submit', 'Submit', css_class="button white"))
form_helper.add_input(Reset('my-reset', 'Reset'))
form_helper.add_input(Hidden('my-hidden', 'Hidden'))
form_helper.add_input(Button('my-button', 'Button'))
template = get_template_from_string("""
{% load crispy_forms_tags %}
{% crispy form form_helper %}
""")
c = Context({'form': TestForm(), 'form_helper': form_helper})
html = template.render(c)
assert 'button white' in html
assert 'id="submit-id-my-submit"' in html
assert 'id="reset-id-my-reset"' in html
assert 'name="my-hidden"' in html
assert 'id="button-id-my-button"' in html
if settings.CRISPY_TEMPLATE_PACK == 'uni_form':
assert 'submit submitButton' in html
assert 'reset resetButton' in html
assert 'class="button"' in html
else:
assert 'class="btn"' in html
assert 'btn btn-primary' in html
assert 'btn btn-inverse' in html
assert len(re.findall(r'<input[^>]+> <', html)) == 8
def test_invalid_form_method():
form_helper = FormHelper()
with pytest.raises(FormHelpersException):
form_helper.form_method = "superPost"
def test_form_with_helper_without_layout(settings):
form_helper = FormHelper()
form_helper.form_id = 'this-form-rocks'
form_helper.form_class = 'forms-that-rock'
form_helper.form_method = 'GET'
form_helper.form_action = 'simpleAction'
form_helper.form_error_title = 'ERRORS'
template = get_template_from_string("""
{% load crispy_forms_tags %}
{% crispy testForm form_helper %}
""")
# now we render it, with errors
form = TestForm({'password1': 'wargame', 'password2': 'god'})
form.is_valid()
c = Context({'testForm': form, 'form_helper': form_helper})
html = template.render(c)
# Lets make sure everything loads right
assert html.count('<form') == 1
assert 'forms-that-rock' in html
assert 'method="get"' in html
assert 'id="this-form-rocks"' in html
assert 'action="%s"' % reverse('simpleAction') in html
if settings.CRISPY_TEMPLATE_PACK == 'uni_form':
assert 'class="uniForm' in html
assert "ERRORS" in html
assert "<li>Passwords dont match</li>" in html
# now lets remove the form tag and render it again. All the True items above
# should now be false because the form tag is removed.
form_helper.form_tag = False
html = template.render(c)
assert '<form' not in html
assert 'forms-that-rock' not in html
assert 'method="get"' not in html
assert 'id="this-form-rocks"' not in html
def test_form_show_errors_non_field_errors():
form = TestForm({'password1': 'wargame', 'password2': 'god'})
form.helper = FormHelper()
form.helper.form_show_errors = True
form.is_valid()
template = get_template_from_string("""
{% load crispy_forms_tags %}
{% crispy testForm %}
""")
# First we render with errors
c = Context({'testForm': form})
html = template.render(c)
# Ensure those errors were rendered
assert '<li>Passwords dont match</li>' in html
assert text_type(_('This field is required.')) in html
assert 'error' in html
# Now we render without errors
form.helper.form_show_errors = False
c = Context({'testForm': form})
html = template.render(c)
# Ensure errors were not rendered
assert '<li>Passwords dont match</li>' not in html
assert text_type(_('This field is required.')) not in html
assert 'error' not in html
def test_html5_required():
form = TestForm()
form.helper = FormHelper()
form.helper.html5_required = True
html = render_crispy_form(form)
# 6 out of 7 fields are required and an extra one for the SplitDateTimeWidget makes 7.
assert html.count('required="required"') == 7
form = TestForm()
form.helper = FormHelper()
form.helper.html5_required = False
html = render_crispy_form(form)
def test_media_is_included_by_default_with_uniform():
form = TestFormWithMedia()
form.helper = FormHelper()
form.helper.template_pack = 'uni_form'
html = render_crispy_form(form)
assert 'test.css' in html
assert 'test.js' in html
def test_media_is_included_by_default_with_bootstrap():
form = TestFormWithMedia()
form.helper = FormHelper()
form.helper.template_pack = 'bootstrap'
html = render_crispy_form(form)
assert 'test.css' in html
assert 'test.js' in html
def test_media_is_included_by_default_with_bootstrap3():
form = TestFormWithMedia()
form.helper = FormHelper()
form.helper.template_pack = 'bootstrap3'
html = render_crispy_form(form)
assert 'test.css' in html
assert 'test.js' in html
def test_media_is_included_by_default_with_bootstrap4():
form = TestFormWithMedia()
form.helper = FormHelper()
form.helper.template_pack = 'bootstrap4'
html = render_crispy_form(form)
assert 'test.css' in html
assert 'test.js' in html
def test_media_removed_when_include_media_is_false_with_uniform():
form = TestFormWithMedia()
form.helper = FormHelper()
form.helper.template_pack = 'uni_form'
form.helper.include_media = False
html = render_crispy_form(form)
assert 'test.css' not in html
assert 'test.js' not in html
def test_media_removed_when_include_media_is_false_with_bootstrap():
form = TestFormWithMedia()
form.helper = FormHelper()
form.helper.template_pack = 'bootstrap'
form.helper.include_media = False
html = render_crispy_form(form)
assert 'test.css' not in html
assert 'test.js' not in html
def test_media_removed_when_include_media_is_false_with_bootstrap3():
form = TestFormWithMedia()
form.helper = FormHelper()
form.helper.template_pack = 'bootstrap3'
form.helper.include_media = False
html = render_crispy_form(form)
assert 'test.css' not in html
assert 'test.js' not in html
def test_media_removed_when_include_media_is_false_with_bootstrap4():
form = TestFormWithMedia()
form.helper = FormHelper()
form.helper.template_pack = 'bootstrap4'
form.helper.include_media = False
html = render_crispy_form(form)
assert 'test.css' not in html
assert 'test.js' not in html
def test_attrs():
form = TestForm()
form.helper = FormHelper()
form.helper.attrs = {'id': 'TestIdForm', 'autocomplete': "off"}
html = render_crispy_form(form)
assert 'autocomplete="off"' in html
assert 'id="TestIdForm"' in html
def test_template_context():
helper = FormHelper()
helper.attrs = {
'id': 'test-form',
'class': 'test-forms',
'action': 'submit/test/form',
'autocomplete': 'off',
}
node = CrispyFormNode('form', 'helper')
context = node.get_response_dict(helper, {}, False)
assert context['form_id'] == "test-form"
assert context['form_attrs']['id'] == "test-form"
assert "test-forms" in context['form_class']
assert "test-forms" in context['form_attrs']['class']
assert context['form_action'] == "submit/test/form"
assert context['form_attrs']['action'] == "submit/test/form"
assert context['form_attrs']['autocomplete'] == "off"
def test_template_context_using_form_attrs():
helper = FormHelper()
helper.form_id = 'test-form'
helper.form_class = 'test-forms'
helper.form_action = 'submit/test/form'
node = CrispyFormNode('form', 'helper')
context = node.get_response_dict(helper, {}, False)
assert context['form_id'] == "test-form"
assert context['form_attrs']['id'] == "test-form"
assert "test-forms" in context['form_class']
assert "test-forms" in context['form_attrs']['class']
assert context['form_action'] == "submit/test/form"
assert context['form_attrs']['action'] == "submit/test/form"
def test_template_helper_access():
helper = FormHelper()
helper.form_id = 'test-form'
assert helper['form_id'] == 'test-form'
def test_without_helper(settings):
template = get_template_from_string("""
{% load crispy_forms_tags %}
{% crispy form %}
""")
c = Context({'form': TestForm()})
html = template.render(c)
# Lets make sure everything loads right
assert '<form' in html
assert 'method="post"' in html
assert 'action' not in html
if settings.CRISPY_TEMPLATE_PACK == 'uni_form':
assert 'uniForm' in html
def test_template_pack_override_compact(settings):
current_pack = settings.CRISPY_TEMPLATE_PACK
override_pack = current_pack == 'uni_form' and 'bootstrap' or 'uni_form'
# {% crispy form 'template_pack_name' %}
template = get_template_from_string("""
{%% load crispy_forms_tags %%}
{%% crispy form "%s" %%}
""" % override_pack)
c = Context({'form': TestForm()})
html = template.render(c)
if current_pack == 'uni_form':
assert 'control-group' in html
else:
assert 'uniForm' in html
def test_template_pack_override_verbose(settings):
current_pack = settings.CRISPY_TEMPLATE_PACK
override_pack = current_pack == 'uni_form' and 'bootstrap' or 'uni_form'
# {% crispy form helper 'template_pack_name' %}
template = get_template_from_string("""
{%% load crispy_forms_tags %%}
{%% crispy form form_helper "%s" %%}
""" % override_pack)
c = Context({'form': TestForm(), 'form_helper': FormHelper()})
html = template.render(c)
if current_pack == 'uni_form':
assert 'control-group' in html
else:
assert 'uniForm' in html
def test_template_pack_override_wrong():
with pytest.raises(TemplateSyntaxError):
get_template_from_string("""
{% load crispy_forms_tags %}
{% crispy form 'foo' %}
""")
def test_invalid_helper(settings):
template = get_template_from_string("""
{% load crispy_forms_tags %}
{% crispy form form_helper %}
""")
c = Context({'form': TestForm(), 'form_helper': "invalid"})
settings.CRISPY_FAIL_SILENTLY = settings.TEMPLATE_DEBUG = False
with pytest.raises(TypeError):
template.render(c)
def test_formset_with_helper_without_layout(settings):
template = get_template_from_string("""
{% load crispy_forms_tags %}
{% crispy testFormSet formset_helper %}
""")
form_helper = FormHelper()
form_helper.form_id = 'thisFormsetRocks'
form_helper.form_class = 'formsets-that-rock'
form_helper.form_method = 'POST'
form_helper.form_action = 'simpleAction'
TestFormSet = formset_factory(TestForm, extra=3)
testFormSet = TestFormSet()
c = Context({'testFormSet': testFormSet, 'formset_helper': form_helper, 'csrf_token': _get_new_csrf_key()})
html = template.render(c)
assert html.count('<form') == 1
assert html.count("<input type='hidden' name='csrfmiddlewaretoken'") == 1
# Check formset management form
assert 'form-TOTAL_FORMS' in html
assert 'form-INITIAL_FORMS' in html
assert 'form-MAX_NUM_FORMS' in html
assert 'formsets-that-rock' in html
assert 'method="post"' in html
assert 'id="thisFormsetRocks"' in html
assert 'action="%s"' % reverse('simpleAction') in html
if settings.CRISPY_TEMPLATE_PACK == 'uni_form':
assert 'class="uniForm' in html
def test_CSRF_token_POST_form():
form_helper = FormHelper()
template = get_template_from_string("""
{% load crispy_forms_tags %}
{% crispy form form_helper %}
""")
# The middleware only initializes the CSRF token when processing a real request
# So using RequestContext or csrf(request) here does not work.
# Instead I set the key `csrf_token` to a CSRF token manually, which `csrf_token` tag uses
c = Context({'form': TestForm(), 'form_helper': form_helper, 'csrf_token': _get_new_csrf_key()})
html = template.render(c)
assert "<input type='hidden' name='csrfmiddlewaretoken'" in html
def test_CSRF_token_GET_form():
form_helper = FormHelper()
form_helper.form_method = 'GET'
template = get_template_from_string("""
{% load crispy_forms_tags %}
{% crispy form form_helper %}
""")
c = Context({'form': TestForm(), 'form_helper': form_helper, 'csrf_token': _get_new_csrf_key()})
html = template.render(c)
assert "<input type='hidden' name='csrfmiddlewaretoken'" not in html
def test_disable_csrf():
form = TestForm()
helper = FormHelper()
helper.disable_csrf = True
html = render_crispy_form(form, helper, {'csrf_token': _get_new_csrf_key()})
assert 'csrf' not in html
def test_render_hidden_fields():
test_form = TestForm()
test_form.helper = FormHelper()
test_form.helper.layout = Layout(
'email'
)
test_form.helper.render_hidden_fields = True
html = render_crispy_form(test_form)
assert html.count('<input') == 1
# Now hide a couple of fields
for field in ('password1', 'password2'):
test_form.fields[field].widget = forms.HiddenInput()
html = render_crispy_form(test_form)
assert html.count('<input') == 3
assert html.count('hidden') == 2
if django.VERSION < (1, 5):
assert html.count('type="hidden" name="password1"') == 1
assert html.count('type="hidden" name="password2"') == 1
else:
assert html.count('name="password1" type="hidden"') == 1
assert html.count('name="password2" type="hidden"') == 1
def test_render_required_fields():
test_form = TestForm()
test_form.helper = FormHelper()
test_form.helper.layout = Layout(
'email'
)
test_form.helper.render_required_fields = True
html = render_crispy_form(test_form)
assert html.count('<input') == 7
def test_helper_custom_template():
form = TestForm()
form.helper = FormHelper()
form.helper.template = 'custom_form_template.html'
html = render_crispy_form(form)
assert "<h1>Special custom form</h1>" in html
def test_helper_custom_field_template():
form = TestForm()
form.helper = FormHelper()
form.helper.layout = Layout(
'password1',
'password2',
)
form.helper.field_template = 'custom_field_template.html'
html = render_crispy_form(form)
assert html.count("<h1>Special custom field</h1>") == 2
@only_uni_form
def test_form_show_errors():
form = TestForm({
'email': 'invalidemail',
'first_name': 'first_name_too_long',
'last_name': 'last_name_too_long',
'password1': 'yes',
'password2': 'yes',
})
form.helper = FormHelper()
form.helper.layout = Layout(
Field('email'),
Field('first_name'),
Field('last_name'),
Field('password1'),
Field('password2'),
)
form.is_valid()
form.helper.form_show_errors = True
html = render_crispy_form(form)
assert html.count('error') == 9
form.helper.form_show_errors = False
html = render_crispy_form(form)
assert html.count('error') == 0
@only_uni_form
def test_multifield_errors():
form = TestForm({
'email': 'invalidemail',
'password1': 'yes',
'password2': 'yes',
})
form.helper = FormHelper()
form.helper.layout = Layout(
MultiField('legend', 'email')
)
form.is_valid()
form.helper.form_show_errors = True
html = render_crispy_form(form)
assert html.count('error') == 3
# Reset layout for avoiding side effects
form.helper.layout = Layout(
MultiField('legend', 'email')
)
form.helper.form_show_errors = False
html = render_crispy_form(form)
assert html.count('error') == 0
@only_bootstrap
def test_bootstrap_form_show_errors():
form = TestForm({
'email': 'invalidemail',
'first_name': 'first_name_too_long',
'last_name': 'last_name_too_long',
'password1': 'yes',
'password2': 'yes',
})
form.helper = FormHelper()
form.helper.layout = Layout(
AppendedText('email', 'whatever'),
PrependedText('first_name', 'blabla'),
PrependedAppendedText('last_name', 'foo', 'bar'),
AppendedText('password1', 'whatever'),
PrependedText('password2', 'blabla'),
)
form.is_valid()
form.helper.form_show_errors = True
html = render_crispy_form(form)
assert html.count('error') == 6
form.helper.form_show_errors = False
html = render_crispy_form(form)
assert html.count('error') == 0
@only_bootstrap
def test_error_text_inline(settings):
form = TestForm({'email': 'invalidemail'})
form.helper = FormHelper()
layout = Layout(
AppendedText('first_name', 'wat'),
PrependedText('email', '@'),
PrependedAppendedText('last_name', '@', 'wat'),
)
form.helper.layout = layout
form.is_valid()
html = render_crispy_form(form)
help_class = 'help-inline'
if settings.CRISPY_TEMPLATE_PACK in ['bootstrap3', 'bootstrap4']:
help_class = 'help-block'
matches = re.findall(
'<span id="error_\d_\w*" class="%s"' % help_class, html, re.MULTILINE
)
assert len(matches) == 3
form = TestForm({'email': 'invalidemail'})
form.helper = FormHelper()
form.helper.layout = layout
form.helper.error_text_inline = False
html = render_crispy_form(form)
matches = re.findall('<p id="error_\d_\w*" class="help-block"', html, re.MULTILINE)
assert len(matches) == 3
@only_bootstrap
def test_error_and_help_inline():
form = TestForm({'email': 'invalidemail'})
form.helper = FormHelper()
form.helper.error_text_inline = False
form.helper.help_text_inline = True
form.helper.layout = Layout('email')
form.is_valid()
html = render_crispy_form(form)
# Check that help goes before error, otherwise CSS won't work
help_position = html.find('<span id="hint_id_email" class="help-inline">')
error_position = html.find('<p id="error_1_id_email" class="help-block">')
assert help_position < error_position
# Viceversa
form = TestForm({'email': 'invalidemail'})
form.helper = FormHelper()
form.helper.error_text_inline = True
form.helper.help_text_inline = False
form.helper.layout = Layout('email')
form.is_valid()
html = render_crispy_form(form)
# Check that error goes before help, otherwise CSS won't work
error_position = html.find('<span id="error_1_id_email" class="help-inline">')
help_position = html.find('<p id="hint_id_email" class="help-block">')
assert error_position < help_position
@only_bootstrap
def test_form_show_labels():
form = TestForm()
form.helper = FormHelper()
form.helper.layout = Layout(
'password1',
FieldWithButtons(
'password2',
StrictButton("Confirm")
),
PrependedText(
'first_name',
'Mr.'
),
AppendedText(
'last_name',
'@'
),
PrependedAppendedText(
'datetime_field',
'on',
'secs'
)
)
form.helper.form_show_labels = False
html = render_crispy_form(form)
assert html.count("<label") == 0
@only_bootstrap3
def test_label_class_and_field_class():
form = TestForm()
form.helper = FormHelper()
form.helper.label_class = 'col-lg-2'
form.helper.field_class = 'col-lg-8'
html = render_crispy_form(form)
assert '<div class="form-group"> <div class="controls col-lg-offset-2 col-lg-8"> <div id="div_id_is_company" class="checkbox"> <label for="id_is_company" class=""> <input class="checkboxinput checkbox" id="id_is_company" name="is_company" type="checkbox" />'
assert html.count('col-lg-8') == 7
form.helper.label_class = 'col-sm-3'
form.helper.field_class = 'col-sm-8'
html = render_crispy_form(form)
assert '<div class="form-group"> <div class="controls col-sm-offset-3 col-sm-8"> <div id="div_id_is_company" class="checkbox"> <label for="id_is_company" class=""> <input class="checkboxinput checkbox" id="id_is_company" name="is_company" type="checkbox" />'
assert html.count('col-sm-8') == 7
@only_bootstrap3
def test_template_pack():
form = TestForm()
form.helper = FormHelper()
form.helper.template_pack = 'uni_form'
html = render_crispy_form(form)
assert 'form-control' not in html
assert 'ctrlHolder' in html
@only_bootstrap4
def test_bootstrap4_label_class_and_field_class():
form = TestForm()
form.helper = FormHelper()
form.helper.label_class = 'col-lg-2'
form.helper.field_class = 'col-lg-8'
html = render_crispy_form(form)
assert '<div class="form-group row">' in html
assert '<div class="controls col-lg-offset-2 col-lg-8">' in html
assert html.count('col-lg-8') == 7
form.helper.label_class = 'col-sm-3'
form.helper.field_class = 'col-sm-8'
html = render_crispy_form(form)
assert '<div class="form-group row">' in html
assert '<div class="controls col-sm-offset-3 col-sm-8">' in html
assert html.count('col-sm-8') == 7
@only_bootstrap4
def test_bootstrap4_template_pack():
form = TestForm()
form.helper = FormHelper()
form.helper.template_pack = 'uni_form'
html = render_crispy_form(form)
assert 'form-control' not in html
assert 'ctrlHolder' in html
|
RamezIssac/django-crispy-forms
|
crispy_forms/tests/test_form_helper.py
|
Python
|
mit
| 22,508
|
__author__ = 'rochelle'
#!/usr/bin/env python
import datetime, time, calendar
import optparse, sys, os, traceback, errno
_defaults = {
'gdal_dir' : 'C:\\OSGeo4W\\bin',
'mrt_dir' : 'C:\\Program Files\\MODIS_MRT\\bin',
'temp_dir' : 'T:\WFP2\WFP2\Temp',
'base_data_dir' : 'T:\WFP2\VAMPIRE\data\Download',
'base_product_dir' : 'T:\WFP2\IDN_GIS',
'country_names' : {'IDN' : 'Indonesia',
'TLS' : 'Timor-Leste',
'PNG' : 'Papua New Guinea',
'VNM' : 'Vietnam',
'PHL' : 'Philippines'},
'country_tiles' : {'IDN' : 'h27v08,h27v09,h27v10,h28v08,h28v09,h28v10,h29v08,h29v09,h29v10,h30v08,h30v09,h30v10,h31v08,h31v09,h31v10,h32v08,h32v09,h32v10',
},
'chirps_boundary_file' : '_cli_chirps_20_005_deg_grid_diss_a.shp',
'lst_min_file' : {'IDN' : 'idn_cli_MOD11C3.2000-2014.{0}.14yrs.min.tif'
},
'lst_max_file' : {'IDN' : 'idn_cli_MOD11C3.2000-2014.{0}.14yrs.max.tif'},
'tci_file' : {'IDN' : 'idn_cli_MOD11C3.{0}{1}.005.tif'},
}
def generateHeaderDirectory(output):
try:
pfile = open(output, 'w')
except IOError as e:
if e.errno == errno.EACCES:
return "Error creating file " + output
# Not a permission error.
raise
else:
with pfile:
file_string = """
directory:
GDAL: {gdal_dir}
MRT: {mrt_dir}
""".format(gdal_dir=_defaults['gdal_dir'], mrt_dir=_defaults['mrt_dir'])
pfile.write(file_string)
pfile.close()
return pfile.name
def generateHeaderRun(output):
try:
pfile = open(output, 'a')
except IOError as e:
if e.errno == errno.EACCES:
return "Error creating file " + output
# Not a permission error.
raise
else:
with pfile:
file_string = """
run:
"""
pfile.write(file_string)
pfile.close()
return pfile.name
def generateHeaderCHIRPS(output):
try:
pfile = open(output, 'a')
except IOError as e:
if e.errno == errno.EACCES:
return "Error creating file " + output
# Not a permission error.
raise
else:
with pfile:
file_string = """
temp: {temp_dir}
CHIRPS:
filenames:
input_prefix: chirps-v2.0
input_extension: .tiff
output_prefix: idn_cli_chirps-v2.0
output_ext: .tif
""".format(temp_dir=_defaults['temp_dir'])
pfile.write(file_string)
pfile.close()
return pfile.name
def generateRainfallLongTermAverageConfig(country, interval, start_date, output):
try:
pfile = open(output, 'a')
except IOError as e:
if e.errno == errno.EACCES:
return "Error creating file " + output
# Not a permission error.
raise
else:
with pfile:
year = start_date.strftime("%Y")
month = start_date.strftime("%m")
if interval == 'monthly':
_interval_name = 'month'
else:
_interval_name = interval
_input_dir = "{0}\Download\CHIRPS\\{1}\\{2}".format(_defaults['base_data_dir'], interval.capitalize(),
country)
if country == 'IDN':
_output_dir = "{0}\\01_Data\\02_IDN\Rasters\Climate\Precipitation\CHIRPS\{1}\Statistics_By{2}" \
.format(_defaults['base_product_dir'], interval.capitalize(), _interval_name.capitalize())
else:
_output_dir = "{0}\\01_Data\\03_Regional\{1}\Rasters\Climate\Precipitation\CHIRPS\{2}\Statistics_By{3}" \
.format(_defaults['base_product_dir'], country, interval.capitalize(), _interval_name.capitalize())
file_string = """
# - process: CHIRPS
# type: monthly
# input_dir: {input_dir}
# output_dir: {output_dir}""".format(input_dir=_input_dir, output_dir=_output_dir)
pfile.write(file_string)
pfile.close()
return pfile.name
def generateRainfallAnomalyConfig(country, interval, start_date, output):
try:
pfile = open(output, 'a')
except IOError as e:
if e.errno == errno.EACCES:
return "Error creating file " + output
# Not a permission error.
raise
else:
with pfile:
year = start_date.strftime("%Y")
month = start_date.strftime("%m")
if interval == 'monthly':
_interval_name = 'month'
else:
_interval_name = interval
_dl_output = "{0}\CHIRPS\{1}".format(_defaults['base_data_dir'], interval.capitalize())
_crop_output_pattern = "'{0}".format(country.lower()) + "_cli_{product}.{year}.{month}{extension}'"
if country == 'IDN':
_boundary_file = "{0}\\01_Data\\02_IDN\Shapefiles\Boundaries\Subset\CHIRPS\\" \
"idn_cli_chirps_20_005_deg_grid_diss_a.shp".format(_defaults['base_product_dir'])
_longterm_avg_file = "{0}\\01_Data\\02_IDN\Rasters\Climate\Precipitation\CHIRPS" \
"\{2}\Statistics_By{3}\idn_cli_chirps-v2.0.1981-2014.{1}.{4}.34yrs.avg.tif".format(
_defaults['base_product_dir'], month, interval.capitalize(), _interval_name.capitalize(), interval.lower())
else:
_boundary_file = "{0}\\01_Data\\03_Regional\{01}\Shapefiles\Boundaries\Subset\CHIRPS\\" \
"{02}_cli_chirps_20_005_deg_grid_diss_a.shp".format(
_defaults['base_product_dir'], country, country.lower())
file_pattern = '^(?P<product>chirps-v2.0).(?P<year>\d{4}).(?P<month>\d{2})(?P<extension>\.tif).*'
file_string = """
## Processing chain begin - Compute Rainfall Anomaly\n
# download CHIRPS precipitation data for {0}
- process: CHIRPS
type: download
interval: {1}
output_dir: {2}
dates: [{3}-{4}]
# crop data to region
- process: Raster
type: crop
input_dir: {2}
output_dir: {2}\{9}
file_pattern: {13}
output_pattern: {6}
boundary_file: {7}
# compute rainfall anomaly
- process: Analysis
type: rainfall_anomaly
current_file: {2}\{9}\{10}_cli_chirps-v2.0.{3}.{4}.tif
longterm_avg_file: {11}
output_file: {12}\\05_Analysis\\03_Early_Warning\Rainfall_Anomaly\{10}_cli_chirps-v2.0.{3}.{4}.ratio_anom.tif
## Processing chain end - Compute Rainfall Anomaly
""".format(_defaults['country_names'][country], interval, _dl_output, year, month, interval.capitalize(),
_crop_output_pattern, _boundary_file, _defaults['base_data_dir'], country, country.lower(),
_longterm_avg_file, _defaults['base_product_dir'], file_pattern)
pfile.write(file_string)
pfile.close()
return pfile.name
def generateVCIConfig(country, interval, start_date, output):
try:
pfile = open(output, 'a')
except IOError as e:
if e.errno == errno.EACCES:
return "Error creating file " + output
# Not a permission error.
raise
else:
with pfile:
year = start_date.strftime("%Y")
month = start_date.strftime("%m")
basedate = datetime.datetime.strptime("2000.{0}.01".format(month), "%Y.%m.%d")
dayofyear = basedate.timetuple().tm_yday
if calendar.isleap(int(year)) and dayofyear > 60:
dayofyear = dayofyear - 1
if country == 'IDN':
_boundary_file = "{0}\\01_Data\\02_IDN\ShapeFiles\Boundaries\Subset\MODIS\idn_phy_modis_1km_grid_diss_a.shp".format(
_defaults['base_product_dir'])
_output_pattern = 'idn_phy_{product}.{year}.{month}.{day}.{version}.{subset}{extension}'
_EVI_max_file = '{0}\\01_Data\\02_IDN\Rasters\Vegetation\MOD13A3.EVI\Statistics_ByMonth\idn_phy_MOD13A3' \
'.2000-2015.{1}.1_km_monthly_EVI.15yrs.max.tif'.format(_defaults['base_product_dir'],
str(dayofyear).zfill(3))
_EVI_min_file = '{0}\\01_Data\\02_IDN\Rasters\Vegetation\MOD13A3.EVI\Statistics_ByMonth\idn_phy_MOD13A3' \
'.2000-2015.{1}.1_km_monthly_EVI.15yrs.min.tif'.format(_defaults['base_product_dir'],
str(dayofyear).zfill(3))
else:
_boundary_file = "{0}\\01_Data\\03_Regional\{1}\ShapeFiles\Boundaries\Subset\MODIS\{2}_phy_modis_1km_grid_diss_a.shp".format(
_defaults['base_product_dir'], country, country.lower())
_output_pattern = "'{0}".format(country.lower()) + "_phy_{product}.{year}.{month}.{day}.{version}.{subset}{extension}'"
_EVI_max_file = '{0}\\01_Data\\03_Regional\{1}\Rasters\Vegetation\MOD13A3.EVI\Statistics_ByMonth\idn_phy_MOD13A3' \
'.2000-2015.{2}.1_km_monthly_EVI.15yrs.max.tif'.format(_defaults['base_product_dir'], country,
str(dayofyear).zfill(3))
_EVI_min_file = '{0}\\01_Data\\03_Regional\{1}\Rasters\Vegetation\MOD13A3.EVI\Statistics_ByMonth\idn_phy_MOD13A3' \
'.2000-2015.{2}.1_km_monthly_EVI.15yrs.min.tif'.format(_defaults['base_product_dir'], country,
str(dayofyear).zfill(3))
_output_file = "{product_dir}\\05_Analysis\\03_Early_Warning\Vegetation_Condition_Index" \
"\{country_l}_phy_MOD13A3.{year}.{month}.1_km_monthly_EVI_VCI.tif".format(product_dir=_defaults['base_product_dir'],
country_l=country.lower(),
year=year, month=month)
pattern = '^(?P<product>MOD\d{2}A\d{1}).(?P<year>\d{4}).(?P<month>\d{2}).(?P<day>\d{2}).(?P<version>\d{3}).(?P<subset>.*)(?P<extension>\.tif$)'
file_string = """
## Processing chain begin - Compute Vegetation Condition Index
# download MODIS vegetation data (MOD13A3.005) tiles for {country_name} and mosaic
- process: MODIS
type: download
output_dir: {data_dir}\MODIS\MOD13A3\HDF_MOD
mosaic_dir: {data_dir}\MODIS\MOD13A3\Processed\HDF_MOD
product: MOD13A3.005
tiles: {tiles}
dates: [{year}-{month}]
# extract MODIS NDVI and EVI
- process: MODIS
type: extract
layer: NDVI
input_dir: {data_dir}\MODIS\MOD13A3\Processed\HDF_MOD
output_dir: {data_dir}\MODIS\MOD13A3\Processed\NDVI
- process: MODIS
type: extract
layer: EVI
input_dir: {data_dir}\MODIS\MOD13A3\Processed\HDF_MOD
output_dir: {data_dir}\MODIS\MOD13A3\Processed\EVI
# crop data to region
- process: Raster
type: crop
file_pattern: {file_pattern}
output_pattern: {pattern}
input_dir: {data_dir}\MODIS\MOD13A3\Processed\EVI
output_dir: {data_dir}\MODIS\MOD13A3\Processed\{country}_EVI
boundary_file: {boundary}
- process: Analysis
type: VCI
current_file: {data_dir}\MODIS\MOD13A3\Processed\{country}_EVI\{country_l}_phy_MOD13A3.{year}.{month}.01.005.1_km_monthly_EVI.tif
EVI_max_file: {evi_max}
EVI_min_file: {evi_min}
output_file: {output_file}
## Processing chain end - Compute Vegetation Condition Index
""".format(country_name=_defaults['country_names'][country], data_dir=_defaults['base_data_dir'],
tiles=_defaults['country_tiles'][country], year=year, month=month, file_pattern=pattern,
pattern=_output_pattern, country=country,
boundary=_boundary_file, country_l=country.lower(), evi_max=_EVI_max_file, evi_min=_EVI_min_file,
output_file=_output_file)
pfile.write(file_string)
pfile.close()
return pfile.name
def generateTCIConfig(country, interval, start_date, output):
try:
pfile = open(output, 'a')
except IOError as e:
if e.errno == errno.EACCES:
return "Error creating file " + output
# Not a permission error.
raise
else:
with pfile:
year = start_date.strftime("%Y")
month = start_date.strftime("%m")
basedate = datetime.datetime.strptime("2000.{0}.01".format(month), "%Y.%m.%d")
dayofyear = basedate.timetuple().tm_yday
_input_pattern = '^(?P<product>MOD\d{2}C\d{1}).A(?P<year>\d{4})(?P<dayofyear>\d{3}).(?P<version>\d{3}).(?P<code>.*).(?P<subset>hdf_\d{2})(?P<extension>\.tif$)'
_avg_output_pattern = "'{product}.A{year}{dayofyear}.{version}.{code}.avg{extension}'"
if country == 'IDN':
_boundary_file = "{0}\\01_Data\\02_IDN\ShapeFiles\Boundaries\Subset\MODIS\idn_phy_modis_lst_005_grid_diss_a.shp".format(
_defaults['base_product_dir'])
_output_pattern = 'idn_cli_{product}.{year}{dayofyear}.{version}{extension}'
_LST_max_file = '{0}\\01_Data\\02_IDN\Rasters\Climate\Temperature\MODIS\MOD11C3\Statistics_byMonth' \
'\{1}'.format(_defaults['base_product_dir'], (_defaults['lst_max_file'][country]).format(month))
_LST_min_file = '{0}\\01_Data\\02_IDN\Rasters\Climate\Temperature\MODIS\MOD11C3\Statistics_byMonth' \
'\{1}'.format(_defaults['base_product_dir'], (_defaults['lst_min_file'][country]).format(month))
_TCI_file = '{0}\MODIS\MOD11C3\Processed\IDN\LST\{1}' \
.format(_defaults['base_data_dir'], (_defaults['tci_file'][country]) \
.format(year, str(dayofyear).zfill(3)))
else:
_boundary_file = "{0}\\01_Data\\03_Regional\{1}\ShapeFiles\Boundaries\Subset\MODIS\{2}_phy_modis_lst_005_grid_diss_a.shp".format(
_defaults['base_product_dir'], country, country.lower())
_output_pattern = "'{0}".format(country.lower()) + "_cli_{product}.{year}.{month}.{day}.{version}{extension}'"
_LST_max_file = '{0}\\01_Data\\03_Regional\{1}\Rasters\Climate\Temperature\MODIS\MOD11C3\Statistics_byMonth' \
'\{2}_cli_MOD11C3.2000.2014.{3}.14yrs.max.tif'.format(_defaults['base_product_dir'], country,
country.lower(), month)
_LST_min_file = '{0}\\01_Data\\03_Regional\{1}\Rasters\Climate\Temperature\MODIS\MOD11C3\Statistics_byMonth' \
'\{2}_cli_MOD11C3.2000.2014.{3}.14yrs.min.tif'.format(_defaults['base_product_dir'], country,
country.lower(), month)
_TCI_file = '{0}\MODIS\MOD11C3\Processed\{1}\LST\{2}_cli_MOD11C3.{3}{4}' \
'.005.tif'.format(_defaults['base_data_dir'], country, country.lower(), year, str(dayofyear).zfill(3))
file_pattern = '^(?P<product>MOD\d{2}C\d{1}).(?P<year>\d{4})(?P<dayofyear>\d{3}).(?P<version>\d{3}).(?P<average>avg)(?P<extension>\.tif$)'
file_string = """
## Processing chain begin - Compute Temperature Condition Index
# download MODIS temperature data (MOD11C3.005)
- process: MODIS
type: download
output_dir: {data_dir}\MODIS\MOD11C3\HDF_MOD
product: MOD11C3.005
dates: [{year}-{month}]
- process: MODIS
type: extract
layer: LST_Day
input_dir: {data_dir}\MODIS\MOD11C3\HDF_MOD\{year}.{month}.01
output_dir: {data_dir}\MODIS\MOD11C3\Processed\Day
- process: MODIS
type: extract
layer: LST_Night
input_dir: {data_dir}\MODIS\MOD11C3\HDF_MOD\{year}.{month}.01
output_dir: {data_dir}\MODIS\MOD11C3\Processed\Night
- process: MODIS
type: temp_average
directory_day: {data_dir}\MODIS\MOD11C3\Processed\Day
directory_night: {data_dir}\MODIS\MOD11C3\Processed\Night
directory_output: {data_dir}\MODIS\MOD11C3\Processed\Average
input_pattern: {input_pattern}
output_pattern: {avg_pattern}
- process: Raster
type: crop
file_pattern: {file_pattern}
output_pattern: {country_output_pattern}
input_dir: {data_dir}\MODIS\MOD11C3\Processed\Average
output_dir: {data_dir}\MODIS\MOD11C3\Processed\{country}\LST
boundary_file: {boundary}
- process: Analysis
type: TCI
current_file: {tci_file}
LST_max_file: {lst_max}
LST_min_file: {lst_min}
output_file: {product_dir}\\05_Analysis\\03_Early_Warning\Temperature_Condition_Index\{country_l}_cli_MOD11C3.{year}.{month}.tci.tif
## Processing chain end - Compute Temperature Condition Index
""".format(data_dir=_defaults['base_data_dir'], year=year, month=month, input_pattern=_input_pattern,
avg_pattern=_avg_output_pattern, country=country, country_output_pattern=_output_pattern,
boundary=_boundary_file, tci_file=_TCI_file, lst_max=_LST_max_file, lst_min=_LST_min_file, country_l=country.lower(),
product_dir=_defaults['base_product_dir'], file_pattern=file_pattern)
pfile.write(file_string)
pfile.close()
return pfile.name
def generateVHIConfig(country, interval, start_date, output):
try:
pfile = open(output, 'a')
except IOError as e:
if e.errno == errno.EACCES:
return "Error creating file " + output
# Not a permission error.
raise
else:
with pfile:
year = start_date.strftime("%Y")
month = start_date.strftime("%m")
basedate = datetime.datetime.strptime("2000.{0}.01".format(month), "%Y.%m.%d")
dayofyear = basedate.timetuple().tm_yday
_TCI_file = '{0}\\05_Analysis\\03_Early_Warning\Temperature_Condition_Index\{1}_cli_MOD11C3.{2}.{3}' \
'.tci.tif'.format(_defaults['base_product_dir'], country.lower(), year, month)
_VCI_file = '{product_dir}\\05_Analysis\\03_Early_Warning\Vegetation_Condition_Index' \
'\{country_l}_phy_MOD13A3.{year}.{month}.1_km_monthly_EVI_VCI.tif'.format\
(product_dir=_defaults['base_product_dir'],
country_l=country.lower(),
year=year, month=month)
file_string = """
## Processing chain begin - Compute Vegetation Health Index
- process: Analysis
type: VHI
VCI_file: {vci_file}
TCI_file: {tci_file}
output_file: {product_dir}\\05_Analysis\\03_Early_Warning\Vegetation_Health_Index\{country_l}_cli_MOD11C3.{year}.{month}.1_km_monthly_EVI_LST_VHI.tif
## Processing chain end - Compute Vegetation Health Index
""".format(year=year, month=month, country=country, tci_file=_TCI_file,
vci_file=_VCI_file, country_l=country.lower(),
product_dir=_defaults['base_product_dir'])
pfile.write(file_string)
pfile.close()
return pfile.name
def generateConfig(country, product, interval, start_date, output):
generateHeaderDirectory(output)
if product == "rainfall anomaly":
generateHeaderCHIRPS(output)
generateHeaderRun(output)
generateRainfallAnomalyConfig(country, interval, start_date, output)
elif product == "vhi":
generateHeaderRun(output)
generateVCIConfig(country, interval, start_date, output)
generateTCIConfig(country, interval, start_date, output)
generateVHIConfig(country, interval, start_date, output)
elif product == "rainfall_longterm_average":
generateHeaderCHIRPS(output)
generateHeaderRun(output)
generateRainfallLongTermAverageConfig(country, interval, start_date, output)
return 0
if __name__ == '__main__':
try:
start_time = time.time()
parser = optparse.OptionParser(formatter=optparse.TitledHelpFormatter(), usage=globals()['__doc__'], version='$Id$')
parser.add_option('-v', '--verbose', action='store_true', default=False, help='verbose output')
parser.add_option('-c', '--country', dest='country', action='store', help='country id')
parser.add_option('-p', '--product', dest='product', action='store', help='product')
parser.add_option('-o', '--output', dest='output', action='store', help='output filename')
parser.add_option('-i', '--interval', dest='interval', action='store', help='interval')
parser.add_option('-d', '--start_date', dest='start_date', action='store', help='start year-month')
(options, args) = parser.parse_args()
#if len(args) < 1:
# parser.error ('missing argument')
if options.verbose: print time.asctime()
_country = None
if options.country:
_country = options.country
print 'country=', _country
_product = None
if options.product:
_product = options.product
print 'product=', _product
_output = None
if options.output:
_output = options.output
print 'output=', _output
_interval = None
if options.interval:
_interval = options.interval
print 'interval=', _interval
_start_date = None
if options.start_date:
_start_date = datetime.datetime.strptime(options.start_date, "%Y-%m")
print 'start_date=', _start_date
generateConfig(_country, _product, _interval, _start_date, _output)
if options.verbose: print time.asctime()
if options.verbose: print 'TOTAL TIME IN MINUTES:',
if options.verbose: print (time.time() - start_time) / 60.0
sys.exit(0)
except KeyboardInterrupt, e: # Ctrl-C
raise e
except SystemExit, e: # sys.exit()
raise e
except Exception, e:
print 'ERROR, UNEXPECTED EXCEPTION'
print str(e)
traceback.print_exc()
os._exit(1)
|
spatialexplore/idn_vam_wfp
|
python/vampire/configGenerator.py
|
Python
|
mit
| 22,343
|
from nose.tools import assert_equal
from .... import make
from ....core.mod import mod
from ...ut import need_scrapyd
from .. import name
@need_scrapyd
def test_spy_rss():
app = make()
with app.app_context():
for query in [
'CLASSIC MILK+PEACE and ALIEN',
'Beyond the SKY 混沌',
]:
d = mod(name).spy(query, 60)
assert_equal(d.total, 0)
assert_equal(len(d.arts), 0)
|
Answeror/torabot
|
torabot/mods/tora/test/test_spy.py
|
Python
|
mit
| 453
|
from login import login
from logout import logout
from facebook import fbconnect, fbdisconnect
from google import gconnect, gdisconnect
from github import ghconnect, ghdisconnect
|
stonescar/item-catalog
|
modules/views/login/__init__.py
|
Python
|
mit
| 179
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "atlas.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
brainy-minds/brainy-atlas
|
atlas/manage.py
|
Python
|
mit
| 248
|
import DoesNotComputeLocations
print DoesNotComputeLocations.locs[1].getDesc()
print DoesNotComputeLocations.locs[1].whatItemNeeded()
|
MrFlash67/Does-Not-Compute
|
locations1_test.py
|
Python
|
mit
| 133
|
import os
from setuptools import setup, find_packages
VERSION = __import__('psi').__version__
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(name='django-psi',
version=VERSION,
description='Google Pagespeed Insights for your Django project.',
author='Kevin Fricovsky',
author_email='kevin@montylounge.com',
url='https://github.com/montylounge/django-psi/',
packages=find_packages(),
install_requires=['Django>=1.4','google-api-python-client==1.2'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
],
)
|
montylounge/django-psi
|
setup.py
|
Python
|
mit
| 849
|
class ftpinformation:
def __init__(self):
self.url = "YOUR-URL"
self.user = "YOUR-USERNAME"
self.password = "YOUR-PASSWORD"
self.port = 21
|
ShashankSanjay/HOLLERVERSE-HACKMIT
|
twilio/sftpinfo.py
|
Python
|
mit
| 148
|
# -*- encoding: utf-8 -*-
"""
Cron function
"""
import urllib2
from presence_analyzer.config import (
USERS_XML_FILE,
USERS_XML_URL,
)
def update_users_file():
"""
Download actual users data XML.
"""
remote_file = urllib2.urlopen(USERS_XML_URL)
local_file = open(USERS_XML_FILE, "w")
data = remote_file.read()
local_file.write(data)
local_file.close()
remote_file.close()
return data
if __name__ == '__main__':
update_users_file()
|
stxnext-kindergarten/presence-analyzer-klogaciuk
|
src/presence_analyzer/cron.py
|
Python
|
mit
| 489
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-01 04:34
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app.core', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='empresa',
old_name='cuil',
new_name='CUIT',
),
]
|
agustinhansen/SIDECO
|
app/core/migrations/0002_auto_20171001_0134.py
|
Python
|
mit
| 415
|
"""
This module holds all of the networking code for the game. It has the following
submodules:
#. :mod:`.server`: This is the actual code for the server and it handles the
webhosting and creating connections.
#. :mod:`.websocket`: This contains the code for creating and handling all the
websocket based connections.
#. :mod:`.telnet`: This contains the code for creating and handling all the
telnet based connections.
#. :mod:`.company`: This contains the code for updating the clients that are
companies and making sure that events from these clients are handled in the
simulation.
#. :mod:`.party`: This contains the code for updating the clients that are
parties and making sure that events from these clients are handled in the
simulation.
"""
|
Energy-Transistion/etg
|
etg/server/__init__.py
|
Python
|
mit
| 772
|
"""Runs the PyExtTest project against all installed python instances."""
import sys, subprocess, python_installations
if __name__ == '__main__':
num_failed_tests = 0
for installation in python_installations.get_python_installations():
# Skip versions with no executable.
if installation.exec_path == None:
print("Skipping (no executable)", installation, end="\n\n", flush=True)
continue
# Only test against official CPython installations.
if installation.company != "PythonCore":
print("Skipping (PythonCore)", installation, end="\n\n", flush=True)
continue
# Also skip versions before 2.7 since they don't have symbols.
version = tuple(int(n) for n in installation.sys_version.split("."))
if version < (2, 7):
print("Skipping (too old)", installation, end="\n\n", flush=True)
continue
# Create the dump files.
print("Creating dump files with python executable:", installation.exec_path, flush=True)
subprocess.check_call([installation.exec_path, "object_types.py"])
subprocess.check_call([installation.exec_path, "fibonacci_test.py"])
subprocess.check_call([installation.exec_path, "object_details.py"])
subprocess.check_call([installation.exec_path, "localsplus_test.py"])
# Run the tests against the dump files.
py_ext_test_exe = sys.argv[1] if len(sys.argv) > 1 else "../../x64/Debug/PyExtTest.exe"
num_failed_tests += subprocess.call(py_ext_test_exe)
sys.exit(num_failed_tests)
|
SeanCline/PyExt
|
test/scripts/run_all_tests.py
|
Python
|
mit
| 1,630
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-18 14:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orders', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='order',
name='address',
field=models.CharField(max_length=250, verbose_name='address'),
),
migrations.AlterField(
model_name='order',
name='city',
field=models.CharField(max_length=100, verbose_name='city'),
),
migrations.AlterField(
model_name='order',
name='email',
field=models.EmailField(max_length=254, verbose_name='email'),
),
migrations.AlterField(
model_name='order',
name='first_name',
field=models.CharField(max_length=50, verbose_name='first name'),
),
migrations.AlterField(
model_name='order',
name='last_name',
field=models.CharField(max_length=50, verbose_name='last name'),
),
migrations.AlterField(
model_name='order',
name='postal_code',
field=models.CharField(max_length=20, verbose_name='postal code'),
),
]
|
pauljherrera/avantiweb
|
orders/migrations/0002_auto_20170318_1137.py
|
Python
|
mit
| 1,360
|
# Copyright (c) 2003-2013 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""utilities methods and classes for checkers
Base id of standard checkers (used in msg and report ids):
01: base
02: classes
03: format
04: import
05: misc
06: variables
07: exceptions
08: similar
09: design_analysis
10: newstyle
11: typecheck
12: logging
13: string_format
14: string_constant
15-50: not yet used: reserved for future internal checkers.
51-99: perhaps used: reserved for external checkers
The raw_metrics checker has no number associated since it doesn't emit any
messages nor reports. XXX not true, emit a 07 report !
"""
import tokenize
from os import listdir
from os.path import dirname, join, isdir, splitext
from logilab.astng.utils import ASTWalker
from logilab.common.modutils import load_module_from_file
from logilab.common.configuration import OptionsProviderMixIn
from pylint.reporters import diff_string, EmptyReport
def table_lines_from_stats(stats, old_stats, columns):
"""get values listed in <columns> from <stats> and <old_stats>,
and return a formated list of values, designed to be given to a
ureport.Table object
"""
lines = []
for m_type in columns:
new = stats[m_type]
format = str
if isinstance(new, float):
format = lambda num: '%.3f' % num
old = old_stats.get(m_type)
if old is not None:
diff_str = diff_string(old, new)
old = format(old)
else:
old, diff_str = 'NC', 'NC'
lines += (m_type.replace('_', ' '), format(new), old, diff_str)
return lines
class BaseChecker(OptionsProviderMixIn, ASTWalker):
"""base class for checkers"""
# checker name (you may reuse an existing one)
name = None
# options level (0 will be displaying in --help, 1 in --long-help)
level = 1
# ordered list of options to control the ckecker behaviour
options = ()
# messages issued by this checker
msgs = {}
# reports issued by this checker
reports = ()
def __init__(self, linter=None):
"""checker instances should have the linter as argument
linter is an object implementing ILinter
"""
ASTWalker.__init__(self, self)
self.name = self.name.lower()
OptionsProviderMixIn.__init__(self)
self.linter = linter
# messages that are active for the current check
self.active_msgs = set()
def add_message(self, msg_id, line=None, node=None, args=None):
"""add a message of a given type"""
self.linter.add_message(msg_id, line, node, args)
def package_dir(self):
"""return the base directory for the analysed package"""
return dirname(self.linter.base_file)
# dummy methods implementing the IChecker interface
def open(self):
"""called before visiting project (i.e set of modules)"""
def close(self):
"""called after visiting project (i.e set of modules)"""
class BaseRawChecker(BaseChecker):
"""base class for raw checkers"""
def process_module(self, node):
"""process a module
the module's content is accessible via the stream object
stream must implement the readline method
"""
stream = node.file_stream
stream.seek(0) # XXX may be removed with astng > 0.23
self.process_tokens(tokenize.generate_tokens(stream.readline))
def process_tokens(self, tokens):
"""should be overridden by subclasses"""
raise NotImplementedError()
PY_EXTS = ('.py', '.pyc', '.pyo', '.pyw', '.so', '.dll')
def initialize(linter):
"""initialize linter with checkers in this package """
package_load(linter, __path__[0])
def package_load(linter, directory):
"""load all module and package in the given directory, looking for a
'register' function in each one, used to register pylint checkers
"""
imported = {}
for filename in listdir(directory):
basename, extension = splitext(filename)
if basename in imported or basename == '__pycache__':
continue
if extension in PY_EXTS and basename != '__init__' or (
not extension and isdir(join(directory, basename))):
try:
module = load_module_from_file(join(directory, filename))
except ValueError:
# empty module name (usually emacs auto-save files)
continue
except ImportError as exc:
import sys
print("Problem importing module %s: %s" % (filename, exc), file=sys.stderr)
else:
if hasattr(module, 'register'):
module.register(linter)
imported[basename] = 1
__all__ = ('BaseChecker', 'initialize', 'package_load')
|
tlksio/tlksio
|
env/lib/python3.4/site-packages/pylint/checkers/__init__.py
|
Python
|
mit
| 5,546
|
from datetime import datetime
import yaml
from concurrent.futures.thread import ThreadPoolExecutor
from tornado import ioloop, concurrent
from yaml.error import YAMLError
import re
from pipelines.api import PIPELINES_EXT
import os.path
import json
import logging
from uuid import uuid4
import base64
from tornado.web import HTTPError
import os.path
from pipelines import PipelinesError
from pipelines.pipeline.pipeline import Pipeline
from pipelines.plugin.exceptions import PluginError
log = logging.getLogger('pipelines')
class AsyncRunner(object):
__instance = None
def __new__(cls, *args, **kwargs):
if cls.__instance is None:
cls.__instance = super(
AsyncRunner, cls).__new__(cls, *args, **kwargs)
return cls.__instance
def __init__(self):
self.executor = ThreadPoolExecutor(max_workers=4)
self.io_loop = ioloop.IOLoop.current()
self.pipe = None
def load(self, pipeline_filepath, folder_path, params={}):
base_params = {
'status_file': os.path.join(folder_path, 'status.json'),
'log_file': os.path.join(folder_path, 'output.log')
}
base_params.update(params)
self.log_file = base_params['log_file']
self.pipe = Pipeline.from_yaml(pipeline_filepath, base_params)
def _write_user_context(self, context):
user = context.get('username', 'anonymous')
ip = context.get('ip', '(unknown ip)')
timestamp = datetime.utcnow().strftime('%Y:%m:%d %H:%M:%S')
msg = 'Pipeline triggered by: "{}" [ip: {}]'.format(user, ip)
to_write = '{timestamp}: {message}'.format(timestamp=timestamp,
message=msg)
if self.log_file:
with open(self.log_file, 'a') as f:
f.write(to_write)
f.write('\n')
@concurrent.run_on_executor
def run(self, context):
log.debug('Running with context: {}'.format(context))
self._write_user_context(context)
if self.pipe:
return self.pipe.run()
else:
raise PipelinesError('AsyncRunner error. No pipeline.')
def _file_iterator(folder, extensions):
for path in os.listdir(folder):
for ext in extensions:
if path.endswith('.%s' % ext):
yield path
def _slugify_file(filename):
basename = filename.rsplit('/', 1)[-1]
return basename.rsplit('.', 1)[0]
def _run_id_iterator(slug):
for sub_folder in os.listdir(slug):
if _is_valid_uuid(sub_folder):
yield sub_folder
def _is_valid_uuid(uuid):
regex = re.compile('^[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab][a-f0-9]{3}-?[a-f0-9]{12}\Z', re.I)
match = regex.match(uuid)
return bool(match)
def _get_pipeline_filepath(workspace, slug):
for ext in PIPELINES_EXT:
yaml_filepath = os.path.join(workspace, '%s.%s' % (slug, ext))
if os.path.exists(yaml_filepath):
return yaml_filepath
break
return None
# TODO server.js line 211 to use this func
def walk_pipelines(workspace):
for path in _file_iterator(workspace, extensions=PIPELINES_EXT):
try:
with open(os.path.join(workspace, path)) as f:
yaml_string = f.read()
except IOError as e:
log.error('Can not read pipelines, file missing: {}'.format(path))
continue
try:
pipeline_def = yaml.load(yaml_string)
except YAMLError:
log.error('Skipping pipeline. Could not load yaml for: {}'.format(path))
continue
slug = _slugify_file(path)
yield slug, pipeline_def
def _run_pipeline(handler, workspace, pipeline_slug, params={}, response_fn=None):
log.debug('Running pipeline %s with params %s' % (pipeline_slug, json.dumps(params)))
# Guess the pipeline extension
pipeline_filepath = _get_pipeline_filepath(workspace, pipeline_slug)
if not pipeline_filepath:
raise HTTPError(404, 'Pipeline not found')
task_id = str(uuid4())
folder_path = os.path.join(workspace, pipeline_slug, task_id)
try:
runner = AsyncRunner()
runner.load(pipeline_filepath, folder_path, params)
except (PipelinesError, PluginError) as e:
handler.clear()
handler.set_status(400)
err_msg = 'Error loading pipeline: {}'.format(e.message)
handler.finish(json.dumps({'message': err_msg}))
logging.warn(err_msg)
return
os.makedirs(folder_path)
if response_fn:
response_fn(handler, task_id)
else:
handler.write(json.dumps({'task_id': task_id}, indent=2))
handler.finish()
username = handler.get_current_user()
if isinstance(username, dict):
username = username.get('username', 'unknown')
user_context = {
'username': username,
'ip': handler.request.remote_ip
}
if 'authorization' in handler.request.headers:
user_context['username'] = _parse_basicauth_user(handler.request.headers['authorization'])
yield runner.run(user_context)
def _parse_basicauth_user(basicauth_http_header):
try:
return base64.decodestring (basicauth_http_header.split(' ')[1]).split(':')[0]
except Exception as e:
log.warn('Could not parse nginx auth header: {}'.format(basicauth_http_header))
return '(problem parsing user)'
|
Wiredcraft/pipelines
|
pipelines/api/utils.py
|
Python
|
mit
| 5,447
|
from .r_dependencies import *
from .r_base import r_base
class r_geneticAlgorithm(r_base):
def calculate_geneticAlgorithm(self):
'''genetic algorithm
requires the GA or genalg package
e.g. http://www.r-bloggers.com/genetic-algorithms-a-simple-r-example/
'''
# TODO
# Call to R
try:
# format the data into R objects
# call tune
r_statement = ('concentrations_m = matrix(concentrations, nrow = %s, ncol = %s, byrow = TRUE)' %(len(cn_sorted),len(sns_sorted)));
ans = robjects.r(r_statement);
return
except Exception as e:
print(e);
exit(-1);
|
dmccloskey/r_statistics
|
r_statistics/r_geneticAlgorithm.py
|
Python
|
mit
| 691
|
def gameMode(gameModeID):
return {
-1: 'skipped',
0 : 'Unknown',
1 : 'All Pick',
2 : 'Captains Mode',
3 : 'Random Draft',
4 : 'Single Draft',
5 : 'All Random',
6 : '?? INTRO/DEATH ??',
7 : 'The Diretide',
8 : 'Reverse Captains Mode',
9 : 'Greeviling',
10 : 'Tutorial',
11 : 'Mid Only',
12 : 'Least Played',
13 : 'New Player Pool',
14 : 'Compendium Matchmaking',
15 : 'Custom',
16 : 'Captains Draft',
17 : 'Balanced Draft',
18 : 'Ability Draft',
19 : '?? Event ??',
20 : 'All Random Death Match',
21 : '1vs1 Solo Mid',
22 : 'Ranked All Pick',
}.get(gameModeID, 'Unkown')
|
NNTin/Reply-Dota-2-Reddit
|
misc/idnamedict.py
|
Python
|
mit
| 769
|
from django.conf.urls import url, include
from rest_framework import routers
from apps.api import views
router = routers.DefaultRouter()
router.register(r'users', views.UserViewSet)
urlpatterns = [
url(r'^', include(router.urls)),
]
|
daniel-afana/modern-django
|
apps/api/urls.py
|
Python
|
mit
| 247
|
__version__ = '0.12.0'
default_app_config = 'mjml.apps.MJMLConfig'
|
liminspace/django-mjml
|
mjml/__init__.py
|
Python
|
mit
| 68
|
"""
Terminal-related utilities
--------------------------
"""
import os
import sys
from plumbum import local
from .progress import Progress
from .termsize import get_terminal_size
__all__ = (
"readline",
"ask",
"choose",
"prompt",
"get_terminal_size",
"Progress",
"get_terminal_size",
)
def __dir__():
return __all__
def readline(message=""):
"""Gets a line of input from the user (stdin)"""
sys.stdout.write(message)
sys.stdout.flush()
return sys.stdin.readline()
def ask(question, default=None):
"""
Presents the user with a yes/no question.
:param question: The question to ask
:param default: If ``None``, the user must answer. If ``True`` or ``False``, lack of response is
interpreted as the default option
:returns: the user's choice
"""
question = question.rstrip().rstrip("?").rstrip() + "?"
if default is None:
question += " (y/n) "
elif default:
question += " [Y/n] "
else:
question += " [y/N] "
while True:
try:
answer = readline(question).strip().lower()
except EOFError:
answer = None
if answer in {"y", "yes"}:
return True
if answer in {"n", "no"}:
return False
if not answer and default is not None:
return default
sys.stdout.write("Invalid response, please try again\n")
def choose(question, options, default=None):
"""Prompts the user with a question and a set of options, from which the user needs to choose.
:param question: The question to ask
:param options: A set of options. It can be a list (of strings or two-tuples, mapping text
to returned-object) or a dict (mapping text to returned-object).``
:param default: If ``None``, the user must answer. Otherwise, lack of response is interpreted
as this answer
:returns: The user's choice
Example::
ans = choose("What is your favorite color?", ["blue", "yellow", "green"], default = "yellow")
# `ans` will be one of "blue", "yellow" or "green"
ans = choose("What is your favorite color?",
{"blue" : 0x0000ff, "yellow" : 0xffff00 , "green" : 0x00ff00}, default = 0x00ff00)
# this will display "blue", "yellow" and "green" but return a numerical value
"""
if hasattr(options, "items"):
options = options.items()
sys.stdout.write(question.rstrip() + "\n")
choices = {}
defindex = None
for i, item in enumerate(options):
i += 1
if isinstance(item, (tuple, list)) and len(item) == 2:
text = item[0]
val = item[1]
else:
text = item
val = item
choices[i] = val
if default is not None and default == val:
defindex = i
sys.stdout.write(f"({i}) {text}\n")
if default is not None:
if defindex is None:
msg = f"Choice [{default}]: "
else:
msg = f"Choice [{defindex}]: "
else:
msg = "Choice: "
while True:
try:
choice = readline(msg).strip()
except EOFError:
choice = ""
if not choice and default:
return default
try:
choice = int(choice)
if choice not in choices:
raise ValueError()
except ValueError:
sys.stdout.write("Invalid choice, please try again\n")
continue
return choices[choice]
def prompt(
question,
type=str, # pylint: disable=redefined-builtin
default=NotImplemented,
validator=lambda val: True,
):
"""
Presents the user with a validated question, keeps asking if validation does not pass.
:param question: The question to ask
:param type: The type of the answer, defaults to str
:param default: The default choice
:param validator: An extra validator called after type conversion, can raise ValueError or return False to trigger a retry.
:returns: the user's choice
"""
question = question.rstrip(" \t:")
if default is not NotImplemented:
question += f" [{default}]"
question += ": "
while True:
try:
ans = readline(question).strip()
except EOFError:
ans = ""
if not ans:
if default is not NotImplemented:
# sys.stdout.write("\b%s\n" % (default,))
return default
continue
try:
ans = type(ans)
except (TypeError, ValueError) as ex:
sys.stdout.write(f"Invalid value ({ex}), please try again\n")
continue
try:
valid = validator(ans)
except ValueError as ex:
sys.stdout.write(f"{ex}, please try again\n")
continue
if not valid:
sys.stdout.write("Value not in specified range, please try again\n")
continue
return ans
def hexdump(data_or_stream, bytes_per_line=16, aggregate=True):
"""Convert the given bytes (or a stream with a buffering ``read()`` method) to hexdump-formatted lines,
with possible aggregation of identical lines. Returns a generator of formatted lines.
"""
if hasattr(data_or_stream, "read"):
def read_chunk():
while True:
buf = data_or_stream.read(bytes_per_line)
if not buf:
break
yield buf
else:
def read_chunk():
for i in range(0, len(data_or_stream), bytes_per_line):
yield data_or_stream[i : i + bytes_per_line]
prev = None
skipped = False
for i, chunk in enumerate(read_chunk()):
hexd = " ".join(f"{ord(ch):02x}" for ch in chunk)
text = "".join(ch if 32 <= ord(ch) < 127 else "." for ch in chunk)
if aggregate and prev == chunk:
skipped = True
continue
prev = chunk
if skipped:
yield "*"
hexd_ljust = hexd.ljust(bytes_per_line * 3, " ")
yield f"{i*bytes_per_line:06x} | {hexd_ljust}| {text}"
skipped = False
def pager(rows, pagercmd=None): # pragma: no cover
"""Opens a pager (e.g., ``less``) to display the given text. Requires a terminal.
:param rows: a ``bytes`` or a list/iterator of "rows" (``bytes``)
:param pagercmd: the pager program to run. Defaults to ``less -RSin``
"""
if not pagercmd:
pagercmd = local["less"]["-RSin"]
if hasattr(rows, "splitlines"):
rows = rows.splitlines()
pg = pagercmd.popen(stdout=None, stderr=None)
try:
for row in rows:
line = f"{row}\n"
try:
pg.stdin.write(line)
pg.stdin.flush()
except OSError:
break
pg.stdin.close()
pg.wait()
finally:
try:
rows.close()
except Exception:
pass
if pg and pg.poll() is None:
try:
pg.terminate()
except Exception:
pass
os.system("reset")
|
tomerfiliba/plumbum
|
plumbum/cli/terminal.py
|
Python
|
mit
| 7,197
|
#/usr/local/env python
# Problem link: https://oj.leetcode.com/problems/merge-intervals/
# Definition for an interval.
class Interval:
def __init__(self, s=0, e=0):
self.start = s
self.end = e
def __repr__(self):
return "["+str(self.start)+","+str(self.end)+"]"
class Solution:
# @param intervals, a list of Interval
# @return a list of Interval
def merge(self, intervals):
if len(intervals) <= 1:
return intervals
merged = []
for inter in intervals:
overlapped = []
for m in merged:
if self.checkMerge(m, inter):
overlapped.append(m)
if 0 == len(overlapped):
merged.append(inter)
else:
for o in overlapped:
merged.remove(o)
overlapped.append(inter)
start = min([o.start for o in overlapped])
end = max([o.end for o in overlapped])
merged.append(Interval(start, end))
return merged
def checkMerge(self, inter1, inter2):
if inter2.start > inter1.end or inter2.end < inter1.start:
return False
return True
def main():
intervals = [Interval(2,4),Interval(3,5),Interval(1,10),Interval(11,12)]
sol = Solution()
print sol.merge(intervals)
if __name__ == "__main__":
main()
|
erichoco/LeetCodeOJ
|
Merge Intervals/merge_intervals.py
|
Python
|
cc0-1.0
| 1,413
|
from setuptools import setup, find_packages
setup(name='MODEL0848279215',
version=20140916,
description='MODEL0848279215 from BioModels',
url='http://www.ebi.ac.uk/biomodels-main/MODEL0848279215',
maintainer='Stanley Gu',
maintainer_url='stanleygu@gmail.com',
packages=find_packages(),
package_data={'': ['*.xml', 'README.md']},
)
|
biomodels/MODEL0848279215
|
setup.py
|
Python
|
cc0-1.0
| 377
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2015-2018 University of Oslo, Norway
#
# This file is part of Cerebrum.
#
# Cerebrum is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Cerebrum is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cerebrum; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Cerebrum module for loading configuration files.
This module contains functionality for finding and loading config files from
well-defined locations on the file system.
It is a bridge between the `parsers` module and the `configuration` module.
TODO: Improvements:
1. Improve error handling:
- If multiple errors exist in config files, we should gather them up and
present all the errors.
- Reading config should be 'transactional'. If any errors exists, no
configuration should be changed.
"""
from __future__ import unicode_literals
import logging
import os
import sys
from Cerebrum.config.configuration import Configuration
from . import parsers as _parsers
# Make it possible to override sys.prefix for configuration path purposes
sys_prefix = os.getenv('CEREBRUM_SYSTEM_PREFIX', sys.prefix)
# Default directory for global configuration files.
default_dir = os.getenv('CEREBRUM_CONFIG_ROOT',
os.path.join(sys_prefix, 'etc', 'cerebrum', 'config'))
# Default directory for user configuration files.
user_dir = os.path.join('~', '.cerebrum', 'config')
# Default name of the root configuration file.
default_root_ns = None
# Module logger
logger = logging.getLogger(__name__)
def _f2key(f):
""" Get config namespace from filename. """
return os.path.splitext(os.path.basename(f))[0]
def _f2ext(f):
""" Get file extension from filename. """
return os.path.splitext(f)[1].lstrip('.')
def is_readable_dir(path):
""" Checks if path is a readable directory.
:param str path:
A file system path.
:return bool:
True if `path` is a readable and listable directory.
"""
return (bool(path) and os.path.isdir(path) and
os.access(path, os.R_OK | os.X_OK))
def lookup_dirs(additional_dirs=[]):
""" Gets an ordered list of config directories.
:param list additional_dirs:
Include directories in the list, if they are `readable`.
:return list:
A prioritized list of real, accessible directories.
"""
return filter(
is_readable_dir,
map(lambda d: os.path.abspath(os.path.expanduser(d)),
[default_dir, user_dir] + additional_dirs))
def read(config, root_ns=default_root_ns, additional_dirs=[]):
""" Update `config` with data from config files.
This function will:
1. Look at each file in the first lookupdir.
2. If a `<root_ns>.<ext>` exists, parse and load into `config` (at root
level).
3. For each other file `<name>.<ext>`, sorted by the length of <name>
length, load it into config[<name>] if config[<name>] exists.
The name length ordering makes sure that `foo.<ext>` gets loaded
_before `foo.bar.<ext>`.
4. Repeat for next lookup dir.
:param Configuration config:
The configuration to update.
:param str root_ns:
The namespace of this configuration.
:param list additional_dirs:
Additional directories to look for config files in. See `lookup_dirs`
for more info.
"""
logger.debug("read cls={t!r} root={r!r} dirs={d!r}"
"".format(t=type(config), r=root_ns, d=additional_dirs))
def _get_config_files(confdir):
""" yield config files from confdir. """
def _file_sort_key(filename):
""" sort files in confdir. """
key = _f2key(filename)
# The root config should always be first
if key == root_ns:
return 0
# Otherwise, we use the string length to select read order.
return len(key)
# Do we have config files that match a specific thing?
files = map(lambda f: os.path.join(confdir, f),
filter(lambda f: not f.startswith(os.path.extsep),
os.listdir(confdir)))
for f in sorted(files, key=_file_sort_key):
yield f
# TODO: Transactional update: Make a copy here, then update the copy
for d in lookup_dirs(additional_dirs=additional_dirs):
logger.debug('processing configs from {0}'.format(d))
for f in _get_config_files(d):
# logger.debug('considering {0}'.format(f))
key = _f2key(f)
# TODO: Handle errors here
# Also, maybe keep track of changes by files, warn if a file
# changes something that has already been set from another
# file?
if key == root_ns:
logger.debug('loading root using namespace {0!r}'.format(key))
config.load_dict(read_config(f))
elif key in config:
# TODO: Find a more elegant way of handling nested structures
if not isinstance(config[key], Configuration):
continue
logger.debug('loading namespace {0!r}'.format(key))
config[key].load_dict(read_config(f))
# TODO: Then validate the copy, and write changes back to the original
# config object to complete the 'transaction'.
def read_config(filename):
""" Read a config file.
:param str filename:
The config filename.
:return:
The structured data from `filename`.
"""
parser = _parsers.get_parser(filename)
logger.debug("read_config parser={p!r} filename={f!r}"
"".format(f=filename, p=parser))
return parser.read(filename)
|
unioslo/cerebrum
|
Cerebrum/config/loader.py
|
Python
|
gpl-2.0
| 6,272
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
import re
import MDAnalysisTests
def test_import():
try:
import MDAnalysis
except ImportError:
raise AssertionError('Failed to import module MDAnalysis. Install MDAnalysis'
'first to run the tests, e.g. "pip install mdanalysis"')
def test_matching_versions():
import MDAnalysis.version
assert MDAnalysis.version.__version__ == MDAnalysisTests.__version__, \
"MDAnalysis release {0} must be installed to have meaningful tests, not {1}".format(
MDAnalysisTests.__version__, MDAnalysis.__version__)
def test_version_format(version=None):
if version is None:
import MDAnalysis.version
version = MDAnalysis.version.__version__
# see https://github.com/MDAnalysis/mdanalysis/wiki/SemanticVersioning for format definition
m = re.match(r'(?P<MAJOR>\d+)\.(?P<MINOR>\d+)\.(?P<PATCH>\d+)(-(?P<suffix>\w+))?$',
version)
assert m, "version {0} does not match the MAJOR.MINOR.PATCH(-suffix) format".format(version)
def test_version_at_packagelevel():
import MDAnalysis
try:
version = MDAnalysis.__version__
except:
raise AssertionError("MDAnalysis.__version__ missing")
return test_version_format(version)
# The following allow testing of the memleak tester plugin.
# Keep commented out unless you suspect the plugin
# might be misbehaving. Apparently python3 is immune to these leaks!"""
#from numpy.testing import TestCase
#class A():
# """This is a small leaky class that won't break anything."""
# def __init__(self):
# self.self_ref = self
# def __del__(self):
# pass
#
#def test_that_memleaks():
# """Test that memleaks (Issue 323)"""
# a = A()
#
#class TestML1(TestCase):
# def test_that_memleaks(self):
# """Test that memleaks (Issue 323)"""
# self.a = A()
#
#class TestML2(TestCase):
# def setUp(self):
# a = A()
# def test_that_memleaks(self):
# """Test that memleaks (Issue 323)"""
# pass
|
MDAnalysis/mdanalysis
|
testsuite/MDAnalysisTests/utils/test_meta.py
|
Python
|
gpl-2.0
| 3,092
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
import os
import stat
from buildbot.process import buildstep
from buildbot.process import remotecommand
from buildbot.process import remotetransfer
from buildbot.process.results import FAILURE
from buildbot.process.results import SUCCESS
from buildbot.util import ascii2unicode
from buildbot.worker_transition import deprecatedWorkerClassMethod
class WorkerBuildStep(buildstep.BuildStep):
pass
class SetPropertiesFromEnv(WorkerBuildStep):
"""
Sets properties from environment variables on the worker.
Note this is transferred when the worker first connects
"""
name = 'SetPropertiesFromEnv'
description = ['Setting']
descriptionDone = ['Set']
def __init__(self, variables, source="WorkerEnvironment", **kwargs):
buildstep.BuildStep.__init__(self, **kwargs)
self.variables = variables
self.source = source
def start(self):
# on Windows, environment variables are case-insensitive, but we have
# a case-sensitive dictionary in worker_environ. Fortunately, that
# dictionary is also folded to uppercase, so we can simply fold the
# variable names to uppercase to duplicate the case-insensitivity.
fold_to_uppercase = (self.worker.worker_system == 'win32')
properties = self.build.getProperties()
environ = self.worker.worker_environ
variables = self.variables
log = []
if isinstance(variables, str):
variables = [self.variables]
for variable in variables:
key = variable
if fold_to_uppercase:
key = variable.upper()
value = environ.get(key, None)
if value:
# note that the property is not uppercased
properties.setProperty(variable, value, self.source,
runtime=True)
log.append("%s = %r" % (variable, value))
self.addCompleteLog("properties", "\n".join(log))
self.finished(SUCCESS)
class FileExists(WorkerBuildStep):
"""
Check for the existence of a file on the worker.
"""
name = 'FileExists'
renderables = ['file']
haltOnFailure = True
flunkOnFailure = True
def __init__(self, file, **kwargs):
buildstep.BuildStep.__init__(self, **kwargs)
self.file = file
def start(self):
self.checkWorkerHasCommand('stat')
cmd = remotecommand.RemoteCommand('stat', {'file': self.file})
d = self.runCommand(cmd)
d.addCallback(lambda res: self.commandComplete(cmd))
d.addErrback(self.failed)
def commandComplete(self, cmd):
if cmd.didFail():
self.descriptionDone = ["File not found."]
self.finished(FAILURE)
return
s = cmd.updates["stat"][-1]
if stat.S_ISREG(s[stat.ST_MODE]):
self.descriptionDone = ["File found."]
self.finished(SUCCESS)
else:
self.descriptionDone = ["Not a file."]
self.finished(FAILURE)
class CopyDirectory(WorkerBuildStep):
"""
Copy a directory tree on the worker.
"""
name = 'CopyDirectory'
description = ['Copying']
descriptionDone = ['Copied']
renderables = ['src', 'dest']
haltOnFailure = True
flunkOnFailure = True
def __init__(self, src, dest, timeout=None, maxTime=None, **kwargs):
buildstep.BuildStep.__init__(self, **kwargs)
self.src = src
self.dest = dest
self.timeout = timeout
self.maxTime = maxTime
def start(self):
self.checkWorkerHasCommand('cpdir')
args = {'fromdir': self.src, 'todir': self.dest}
if self.timeout:
args['timeout'] = self.timeout
if self.maxTime:
args['maxTime'] = self.maxTime
cmd = remotecommand.RemoteCommand('cpdir', args)
d = self.runCommand(cmd)
d.addCallback(lambda res: self.commandComplete(cmd))
d.addErrback(self.failed)
def commandComplete(self, cmd):
if cmd.didFail():
self.step_status.setText(["Copying", self.src, "to", self.dest, "failed."])
self.finished(FAILURE)
return
self.step_status.setText(self.describe(done=True))
self.finished(SUCCESS)
# TODO: BuildStep subclasses don't have a describe()....
def getResultSummary(self):
src = ascii2unicode(self.src, 'replace')
dest = ascii2unicode(self.dest, 'replace')
copy = u"%s to %s" % (src, dest)
if self.results == SUCCESS:
rv = u'Copied ' + copy
else:
rv = u'Copying ' + copy + ' failed.'
return {u'step': rv}
class RemoveDirectory(WorkerBuildStep):
"""
Remove a directory tree on the worker.
"""
name = 'RemoveDirectory'
description = ['Deleting']
descriptionDone = ['Deleted']
renderables = ['dir']
haltOnFailure = True
flunkOnFailure = True
def __init__(self, dir, **kwargs):
buildstep.BuildStep.__init__(self, **kwargs)
self.dir = dir
def start(self):
self.checkWorkerHasCommand('rmdir')
cmd = remotecommand.RemoteCommand('rmdir', {'dir': self.dir})
d = self.runCommand(cmd)
d.addCallback(lambda res: self.commandComplete(cmd))
d.addErrback(self.failed)
def commandComplete(self, cmd):
if cmd.didFail():
self.step_status.setText(["Delete failed."])
self.finished(FAILURE)
return
self.finished(SUCCESS)
class MakeDirectory(WorkerBuildStep):
"""
Create a directory on the worker.
"""
name = 'MakeDirectory'
description = ['Creating']
descriptionDone = ['Created']
renderables = ['dir']
haltOnFailure = True
flunkOnFailure = True
def __init__(self, dir, **kwargs):
buildstep.BuildStep.__init__(self, **kwargs)
self.dir = dir
def start(self):
self.checkWorkerHasCommand('mkdir')
cmd = remotecommand.RemoteCommand('mkdir', {'dir': self.dir})
d = self.runCommand(cmd)
d.addCallback(lambda res: self.commandComplete(cmd))
d.addErrback(self.failed)
def commandComplete(self, cmd):
if cmd.didFail():
self.step_status.setText(["Create failed."])
self.finished(FAILURE)
return
self.finished(SUCCESS)
class CompositeStepMixin():
def addLogForRemoteCommands(self, logname):
"""This method must be called by user classes
composite steps could create several logs, this mixin functions will write
to the last one.
"""
self.rc_log = self.addLog(logname)
return self.rc_log
def runRemoteCommand(self, cmd, args, abandonOnFailure=True,
evaluateCommand=lambda cmd: cmd.didFail()):
"""generic RemoteCommand boilerplate"""
cmd = remotecommand.RemoteCommand(cmd, args)
if hasattr(self, "rc_log"):
cmd.useLog(self.rc_log, False)
d = self.runCommand(cmd)
def commandComplete(cmd):
if abandonOnFailure and cmd.didFail():
raise buildstep.BuildStepFailed()
return evaluateCommand(cmd)
d.addCallback(lambda res: commandComplete(cmd))
return d
def runRmdir(self, dir, timeout=None, **kwargs):
""" remove a directory from the worker """
cmd_args = {'dir': dir, 'logEnviron': self.logEnviron}
if timeout:
cmd_args['timeout'] = timeout
return self.runRemoteCommand('rmdir', cmd_args, **kwargs)
def runRmFile(self, path, timeout=None, **kwargs):
""" remove a file from the worker """
cmd_args = {'path': path, 'logEnviron': self.logEnviron}
if timeout:
cmd_args['timeout'] = timeout
if self.workerVersionIsOlderThan('rmfile', '3.1'):
cmd_args['dir'] = os.path.abspath(path)
return self.runRemoteCommand('rmdir', cmd_args, **kwargs)
return self.runRemoteCommand('rmfile', cmd_args, **kwargs)
def pathExists(self, path):
""" test whether path exists"""
def commandComplete(cmd):
return not cmd.didFail()
return self.runRemoteCommand('stat', {'file': path,
'logEnviron': self.logEnviron, },
abandonOnFailure=False,
evaluateCommand=commandComplete)
def runMkdir(self, _dir, **kwargs):
""" create a directory and its parents"""
return self.runRemoteCommand('mkdir', {'dir': _dir,
'logEnviron': self.logEnviron, },
**kwargs)
def runGlob(self, path, **kwargs):
""" find files matching a shell-style pattern"""
def commandComplete(cmd):
return cmd.updates['files'][-1]
return self.runRemoteCommand('glob', {'path': path,
'logEnviron': self.logEnviron, },
evaluateCommand=commandComplete, **kwargs)
def getFileContentFromWorker(self, filename, abandonOnFailure=False):
self.checkWorkerHasCommand("uploadFile")
fileWriter = remotetransfer.StringFileWriter()
# default arguments
args = {
'workdir': self.workdir,
'writer': fileWriter,
'maxsize': None,
'blocksize': 32 * 1024,
}
if self.workerVersionIsOlderThan('uploadFile', '3.0'):
args['slavesrc'] = filename
else:
args['workersrc'] = filename
def commandComplete(cmd):
if cmd.didFail():
return None
return fileWriter.buffer
return self.runRemoteCommand('uploadFile', args,
abandonOnFailure=abandonOnFailure,
evaluateCommand=commandComplete)
deprecatedWorkerClassMethod(locals(), getFileContentFromWorker)
def downloadFileContentToWorker(self, workerdest, strfile, abandonOnFailure=False, mode=None):
self.checkWorkerHasCommand("downloadFile")
fileReader = remotetransfer.StringFileReader(strfile)
# default arguments
args = {
'workdir': self.workdir,
'maxsize': None,
'mode': mode,
'reader': fileReader,
'blocksize': 32 * 1024,
}
if self.workerVersionIsOlderThan('downloadFile', '3.0'):
args['slavedest'] = workerdest
else:
args['workerdest'] = workerdest
def commandComplete(cmd):
if cmd.didFail():
return None
return fileReader
return self.runRemoteCommand('downloadFile', args,
abandonOnFailure=abandonOnFailure,
evaluateCommand=commandComplete)
|
Frodox/buildbot
|
master/buildbot/steps/worker.py
|
Python
|
gpl-2.0
| 11,828
|
from languages import language
def input_float(text="", lang="en"):
lan = language[lang]
is_num = False
while is_num == False:
try:
num = float(input(text))
is_num = True
except ValueError:
print(lan["error_input_num"])
is_num = False
else:
pass
finally:
pass
return num
def input_int(text="", lang="en"):
lan = language[lang]
is_num = False
while is_num == False:
try:
num = int(input(text))
is_num = True
except ValueError:
print(lan["error_input_int"])
is_num = False
else:
pass
finally:
pass
return num
|
Nestyko/Root_Locus_Plot
|
user_input.py
|
Python
|
gpl-2.0
| 748
|
#!/usr/bin/env python
###
# Copyright 2015, EMBL-EBI
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
import datetime
VERSION = "2.0"
NGSEASYVERSION = "1.0-r001"
RUNDATE = datetime.datetime.now().strftime("%d%m%y%H%M%S")
|
olgamelnichuk/NGSPyEasy
|
ngspyeasy/settings.py
|
Python
|
gpl-2.0
| 726
|
# -*- encoding: utf-8 -*-
"""
Equipe MCRSoftwares - AcadSocial
Versão do Código: 01v003a
Responsável: Victor Ferraz
Auxiliar: -
Requisito(s): -
Caso(s) de Uso: -
Descrição:
Definição dos formulários relacionados à aplicação de grupos e eventos.
"""
from django import forms
from datetime import datetime
from grupos.errors import erro_interesse, erro_grupo, erro_evento
from contas.models import UsuarioModel
from grupos.models import ComentarioGrupoModel, InteresseModel, GrupoModel, PostagemGrupoModel
from grupos.models import ComentarioEventoModel, PostagemEventoModel, EventoModel
class ComentarioGrupoForm(forms.ModelForm):
class Meta:
model = ComentarioGrupoModel
fields = ()
class ComentarioEventoForm(forms.ModelForm):
class Meta:
model = ComentarioEventoModel
fields = ()
class InteresseSearchForm(forms.ModelForm):
pesquisa_attrs = {
'placeholder': 'Criar/Pesquisar Interesses...',
'class': 'form-control search-bar-interesse',
'id': 'interesseQry',
'autocomplete': 'off',
}
iq = forms.CharField(max_length=64, widget=forms.TextInput(attrs=pesquisa_attrs))
class Meta:
model = InteresseModel
fields = ('iq', )
class AdicionarInteresseForm(forms.ModelForm):
interesse_attrs = {
'id': 'interesseField',
}
criarInteresse = forms.CharField(max_length=64, widget=forms.HiddenInput(attrs=interesse_attrs))
def clean(self):
interesse = self.cleaned_data.get('criarInteresse')
interesse = ' '.join(unicode(interesse).split())
if not interesse or not ''.join(unicode(interesse).split()):
raise forms.ValidationError(erro_interesse['interesse_invalido'], code='interesse_invalido')
if unicode(interesse).startswith(' '):
interesse = interesse[1:]
try:
InteresseModel.objects.get(interesse__iexact=interesse)
raise forms.ValidationError(erro_interesse['interesse_ja_existente'], code='interesse_ja_existente')
except InteresseModel.DoesNotExist:
return self.cleaned_data
class Meta:
model = InteresseModel
fields = ('criarInteresse', )
class GrupoSearchForm(forms.ModelForm):
pesquisa_attrs = {
'placeholder': 'Criar/Pesquisar Grupos...',
'class': 'form-control search-bar-interesse',
'id': 'grupoQry',
'autocomplete': 'off',
}
gq = forms.CharField(max_length=64, widget=forms.TextInput(attrs=pesquisa_attrs))
class Meta:
model = GrupoModel
fields = ('gq', )
class AdicionarGrupoForm(forms.ModelForm):
grupo_attrs = {
'id': 'grupoField'
}
criarGrupo = forms.CharField(max_length=64, widget=forms.HiddenInput(attrs=grupo_attrs))
def clean(self):
grupo = self.cleaned_data.get('criarGrupo')
grupo = ' '.join(unicode(grupo).split())
if not grupo or not ''.join(unicode(grupo).split()):
raise forms.ValidationError(erro_grupo['grupo_invalido'], code='grupo_invalido')
if unicode(grupo).startswith(' '):
grupo = grupo[1:]
try:
GrupoModel.objects.get(nome__iexact=grupo)
raise forms.ValidationError(erro_grupo['grupo_ja_existente'], code='grupo_ja_existente')
except GrupoModel.DoesNotExist:
return self.cleaned_data
class Meta:
model = GrupoModel
fields = ('criarGrupo', )
class PostagemGrupoForm(forms.ModelForm):
titulo_attrs = {
'class': 'form-control form-panel',
'placeholder': u'Título...',
}
conteudo_attrs = {
'class': 'form-control form-panel',
'placeholder': u'Conteúdo...',
'maxlength': '256',
'rows': '1',
}
titulo = forms.CharField(max_length=32, widget=forms.TextInput(attrs=titulo_attrs), required=True)
conteudo = forms.CharField(widget=forms.Textarea(attrs=conteudo_attrs), required=True)
class Meta:
model = PostagemGrupoModel
fields = ('titulo', 'conteudo', )
class PostagemEventoForm(forms.ModelForm):
titulo_attrs = {
'class': 'form-control form-panel',
'placeholder': u'Título...',
}
conteudo_attrs = {
'class': 'form-control form-panel',
'placeholder': u'Conteúdo...',
'maxlength': '256',
'rows': '1',
}
titulo = forms.CharField(max_length=32, widget=forms.TextInput(attrs=titulo_attrs), required=True)
conteudo = forms.CharField(widget=forms.Textarea(attrs=conteudo_attrs), required=True)
class Meta:
model = PostagemEventoModel
fields = ('titulo', 'conteudo', )
class MembroSearchForm(forms.ModelForm):
pesquisa_attrs = {
'placeholder': 'Pesquisar Usuários...',
'class': 'form-control search-bar-interesse',
'id': 'usuarioQry',
'autocomplete': 'off',
}
aq = forms.CharField(max_length=64, widget=forms.TextInput(attrs=pesquisa_attrs))
class Meta:
model = GrupoModel
fields = ('aq', )
class EditarGrupoForm(forms.ModelForm):
nome_attrs = {
'class': 'form-control',
'id': 'nomeGrupoEditar',
}
descricao_attrs = {
'class': 'form-control',
'id': 'descricaoGrupoEditar',
'maxlength': '1024',
'rows': '2',
}
nome = forms.CharField(widget=forms.TextInput(attrs=nome_attrs), max_length=100, required=True)
descricao = forms.CharField(widget=forms.Textarea(attrs=descricao_attrs), max_length=1024, required=True)
class Meta:
model = GrupoModel
fields = ('nome', 'descricao', )
class AdminGrupoForm(forms.ModelForm):
membro_attrs = {
'class': 'form-control'
}
membro = forms.ModelChoiceField(queryset=None,
empty_label='Selecione o e-mail de um membro',
widget=forms.Select(attrs=membro_attrs))
class Meta:
model = UsuarioModel
fields = ('membro', )
class EventoForm(forms.ModelForm):
titulo_attrs = {
'class': 'form-control',
'id': 'eventoTitulo'
}
descricao_attrs = {
'class': 'form-control',
'maxlength': '256',
'id': 'eventoDesc',
'rows': '2',
}
dia_attrs = {
'class': 'form-control',
'id': 'eventoDia',
}
mes_attrs = {
'class': 'form-control',
'id': 'eventoMes',
}
ano_attrs = {
'class': 'form-control',
'id': 'eventoAno',
}
hora_attrs = {
'class': 'form-control',
'id': 'eventoHora',
}
minutos_attrs = {
'class': 'form-control',
'id': 'eventoMinutos',
}
local_attrs = {
'class': 'form-control',
'id': 'eventoLocal',
}
dia_list = [(x, str(x)) for x in range(1, 32)]
dia_list.insert(0, (0, 'Dia'))
mes_list = [(x, str(x)) for x in range(1, 13)]
mes_list.insert(0, (0, 'Mês'))
ano_list = [(x, str(x)) for x in range(datetime.today().year, datetime.today().year + 2)]
ano_list.insert(0, (0, 'Ano'))
hora_list = [(x, str(x)) for x in range(0, 24)]
hora_list.insert(0, (-1, 'Hora'))
minutos_list = [(x * 5, str(x * 5)) for x in range(0, 12)]
minutos_list.insert(0, (-1, 'Minutos'))
dia = forms.ChoiceField(choices=dia_list, widget=forms.Select(dia_attrs))
mes = forms.ChoiceField(choices=mes_list, widget=forms.Select(mes_attrs))
ano = forms.ChoiceField(choices=ano_list, widget=forms.Select(ano_attrs))
hora = forms.ChoiceField(choices=hora_list, widget=forms.Select(hora_attrs))
minutos = forms.ChoiceField(choices=minutos_list, widget=forms.Select(minutos_attrs))
titulo = forms.CharField(max_length=100, widget=forms.TextInput(attrs=titulo_attrs))
descricao = forms.CharField(max_length=256, widget=forms.Textarea(attrs=descricao_attrs))
local_evento = forms.CharField(max_length=140, widget=forms.TextInput(attrs=local_attrs))
def clean(self):
dia = self.cleaned_data.get('dia')
mes = self.cleaned_data.get('mes')
ano = self.cleaned_data.get('ano')
hora = self.cleaned_data.get('hora')
minutos = self.cleaned_data.get('minutos')
data_str = str(dia) + '-' + str(mes) + '-' + str(ano) + ' ' + str(hora) + ':' + str(minutos)
try:
datetime.strptime(data_str, '%d-%m-%Y %H:%M')
except ValueError:
raise forms.ValidationError(erro_evento['data_invalida'], code='data_invalida')
return self.cleaned_data
class Meta:
model = EventoModel
fields = ('dia', 'mes', 'ano', 'hora', 'minutos', 'local_evento', 'titulo', 'descricao',)
|
MCRSoftwares/AcadSocial
|
grupos/forms.py
|
Python
|
gpl-2.0
| 8,770
|
"""
Copyright (C) 2014, Web Bender Consulting, LLC. - All Rights Reserved
Unauthorized copying of this file, via any medium is strictly prohibited
Proprietary and confidential
Written by Elijah Ethun <elijahe@gmail.com>
"""
from RPIO import PWM
from Sven.Module.RaspberryPi.Base import Base
from Sven.Methods import *
class PWM(Base):
start_duty = 0
duty = start_duty
start_polarity = 1
polarity = start_polarity
start_frequency = .5
frequency = start_frequency
high_multiplier = 2
pwm = None
notes = {
'hB' : 493.88 * high_multiplier,
'hBb' : 466.16 * high_multiplier,
'hAs' : 466.16 * high_multiplier,
'hA' : 440.00 * high_multiplier,
'hAb' : 830.61 * high_multiplier,
'hGs' : 830.61 * high_multiplier,
'hG' : 783.99 * high_multiplier,
'hGb' : 739.99 * high_multiplier,
'hFs' : 739.99 * high_multiplier,
'hF' : 698.46 * high_multiplier,
'hE' : 659.26 * high_multiplier,
'hEb' : 622.25 * high_multiplier,
'hDs' : 622.25 * high_multiplier,
'hD' : 587.33 * high_multiplier,
'hDb' : 554.37 * high_multiplier,
'hCs' : 554.37 * high_multiplier,
'hC' : 523.25 * high_multiplier,
'B' : 493.88,
'Bb' : 466.16,
'As' : 466.16,
'A' : 440.00,
'Ab' : 830.61,
'Gs' : 830.61,
'G' : 783.99,
'Gb' : 739.99,
'Fs' : 739.99,
'F' : 698.46,
'E' : 659.26,
'Eb' : 622.25,
'Ds' : 622.25,
'D' : 587.33,
'Db' : 554.37,
'Cs' : 554.37,
'C' : 523.25
}
module_parameters = ['address', 'pud', 'direction']
required_parameters = ['address', 'pud', 'direction']
def __init__(self, module_factory = None, db = None, id = None):
self.required_parameters = ['address']
super(PWM, self).__init__(module_factory, db, id)
self.address = int(self.address)
class Meta(Meta):
name = 'PWM Module'
description = 'Pulse Width Modulation Output'
def pwmStart(self):
PWM.setup()
PWM.init_channel(0)
RPi.GPIO.setmode(RPi.GPIO.BCM)
RPi.GPIO.setup(self.address, RPi.GPIO.OUT)
self.pwm = RPi.GPIO.PWM(self.address, self.frequency)
self.pwm.start(self.duty)
def pwmStop(self):
self.cleanup()
def setFrequency(self, frequency):
self.pwm.ChangeFrequency(frequency)
def setDuty(self, duty):
self.pwm.ChangeDutyCycle(duty)
def note(self, frequency, duration = .2):
self.setFrequency(frequency)
time.sleep(duration)
def playList(self, notes):
for note in notes :
self.note(self.notes[note])
def cleanup(self):
"""
cleanup method called from parent __del__ destructor.
"""
notice("Calling cleanup in PWM")
if self.pwm is not None :
self.pwm.stop()
def threadTarget(self, thread):
while thread.running == True :
if self.event_triggered is not None :
self.runTriggeredEvents()
self.runTimedUpdates(thread)
time.sleep(.05)
# Output state actions
class action_soundMario(object):
class Meta(Meta):
name = 'Mario - Song'
def run(self, outer, *args, **kwargs):
outer.pwmStart()
try :
outer.note(outer.notes['hE'], .1)
outer.note(1, .05)
outer.note(outer.notes['hE'])
outer.note(1, .05)
outer.note(outer.notes['hE'], .15)
outer.note(1, .1)
outer.note(outer.notes['hC'], .1)
outer.note(1, .05)
outer.note(outer.notes['hE'], .15)
outer.note(1, .05)
outer.note(outer.notes['hG'], .1)
outer.note(1, .4)
outer.note(outer.notes['G'], .25)
except :
notice(traceback.format_exc())
outer.pwmStop()
class action_soundMarioCoin(object):
class Meta(Meta):
name = 'Mario - Coin Grab'
description = 'Get the money!'
def run(self, outer, *args, **kwargs):
outer.pwmStart()
try :
outer.note(outer.notes['hB'] * 2, .1)
outer.note(1, .025)
outer.note(outer.notes['hE'] * 2, .5)
while outer.duty > 0 :
outer.setDuty(outer.duty)
outer.duty = outer.duty - .5
time.sleep(.02)
except :
notice(traceback.format_exc())
outer.pwmStop()
class action_soundMario1up(object):
class Meta(Meta):
name = 'Mario - 1UP'
description = 'Not dead yet!'
def run(self, outer, *args, **kwargs):
outer.pwmStart()
try :
outer.note(outer.notes['hE'], .15)
outer.note(outer.notes['hG'], .15)
outer.note(outer.notes['hE'] * 2, .15)
outer.note(outer.notes['hC'] * 2, .15)
outer.note(outer.notes['hD'] * 2, .15)
outer.note(outer.notes['hG'] * 2, .15)
except :
notice(traceback.format_exc())
outer.pwmStop()
class action_soundZeldaItem(object):
class Meta(Meta):
name = 'Zelda - Item Found'
description = 'Surprise!'
def run(self, outer, *args, **kwargs):
outer.pwmStart()
try :
outer.note(outer.notes['hA'])
outer.note(outer.notes['hAs'])
outer.note(outer.notes['hB'])
outer.note(outer.notes['hC'], 1.5)
except :
notice(traceback.format_exc())
outer.pwmStop()
class action_soundMockingJay(object):
class Meta(Meta):
name = 'Mocking Jay'
description = 'Hunger Games'
def run(self, outer, *args, **kwargs):
outer.pwmStart()
try :
outer.note(outer.notes['hF'], .6)
outer.note(outer.notes['hA'] * 2, .6)
outer.note(outer.notes['hG'], .6)
outer.note(outer.notes['hC'], .6)
except :
notice(traceback.format_exc())
outer.pwmStop()
class action_soundZeldaSecret(object):
class Meta(Meta):
name = 'Zelda - Secret Found'
description = 'Blowed that wall up!'
def run(self, outer, *args, **kwargs):
outer.pwmStart()
try :
outer.playList(['hG', 'hFs', 'hDs', 'A', 'Gs', 'hE', 'hGs', 'hC']);
except :
notice(traceback.format_exc())
outer.pwmStop()
class action_soundNotice(object):
class Meta(Meta):
name = 'Standard Notice'
def run(self, outer, *args, **kwargs):
outer.pwmStart()
try :
outer.duty = 5
timer = 1
current = 0
while current < timer :
outer.setFrequency(outer.frequency)
current = current + .2
outer.frequency = outer.frequency + 400
time.sleep(.02)
outer.setFrequency(outer.frequency - 200)
time.sleep(.02)
outer.setFrequency(outer.frequency + 400)
time.sleep(.02)
outer.duty = 5
outer.pwmStop()
except :
notice(traceback.format_exc())
outer.pwmStop()
class action_soundSiren(object):
class Meta(Meta):
name = 'Standard Siren'
def run(self, outer, *args, **kwargs):
outer.pwmStart()
try :
outer.frequency = 969
timer = 2
current = 0
while current < timer :
outer.setFrequency(outer.frequency)
if outer.frequency == 800 :
outer.frequency = 969
else :
outer.frequency = 800
current = current + .5
time.sleep(.25)
except :
notice(traceback.format_exc())
outer.pwmStop()
class action_jingleBells(object):
class Meta(Meta):
name = 'Jingle Bells'
description = 'Poor Rendition'
def run(self, outer, *args, **kwargs):
outer.pwmStart()
try :
outer.note(outer.notes['hF'], )
outer.note(1)
outer.note(outer.notes['hF'])
outer.note(1)
outer.note(outer.notes['hF'], .4)
outer.note(1)
outer.note(outer.notes['hF'], )
outer.note(1)
outer.note(outer.notes['hF'])
outer.note(1)
outer.note(outer.notes['hF'], .4)
outer.note(1)
outer.note(outer.notes['hF'], )
outer.note(1)
outer.note(outer.notes['hA'] * 2)
outer.note(1)
outer.note(outer.notes['hD'])
outer.note(1)
outer.note(outer.notes['hE'])
outer.note(1)
outer.note(outer.notes['hF'], 1)
except :
notice(traceback.format_exc())
outer.pwmStop()
|
yarhajile/sven-daemon
|
Sven/Module/RaspberryPi/PWM.py
|
Python
|
gpl-2.0
| 8,241
|
import matplotlib
matplotlib.interactive(False)
matplotlib.use('WXAgg')
import wx
from pyoscope import PyOscope
from gui.mainframe import MainFrame
from gui.graphframe import GraphFrame
from gui.bindings import Binder
from wx.lib.mixins.inspection import InspectionMixin #DELME
class CPApp(wx.App, InspectionMixin):
def __init__(self, controller):
self.controller = controller
wx.App.__init__(self)
def OnInit(self):
self.Init() #DELME For InspectionMixin
# Make the MainFrame
fMainFrame = MainFrame(self.controller)
fMainFrame.Show()
self.SetTopWindow(fMainFrame)
self.fMainFrame = fMainFrame
# Make the GraphFrame
fGraphFrame = GraphFrame()
fGraphFrame.Show()
self.fGraphFrame = fGraphFrame
# Make the PyOscope instance
self.pyo = PyOscope(interactive=False)
# Make the Binder
self.binder = Binder(self.fMainFrame, self.fGraphFrame, self.pyo)
# Make references to the app for all the objects
self.fMainFrame.app = self
self.fGraphFrame.app = self
self.pyo.app = self
self.binder.app = self
# Bind the commands to the GUI
self.binder.bind()
return 1
if __name__ == '__main__':
app = CPApp(0)
app.MainLoop()
|
jlazear/cp
|
gui/app.py
|
Python
|
gpl-2.0
| 1,332
|
# ===========================================================================
import swap
import os,cPickle,atpy
# ======================================================================
"""
NAME
io
PURPOSE
Useful general functions to streamline file input and output.
COMMENTS
FUNCTIONS
writePickle(contents,filename):
readPickle(filename): returns contents of pickle
readCatalog(filename,config): returns table, given column names
in configuration config
rm(filename): silent file removal
BUGS
AUTHORS
This file is part of the Space Warps project, and is distributed
under the GPL v2 by the Space Warps Science Team.
http://spacewarps.org/
SWAP io is modelled on that written for the
Pangloss project, by Tom Collett (IoA) and Phil Marshall (Oxford).
https://github.com/drphilmarshall/Pangloss/blob/master/pangloss/io.py
HISTORY
2013-04-17 Started Marshall (Oxford)
"""
#=========================================================================
# Read in an instance of a class, of a given flavour. Create an instance
# if the file does not exist.
def read_pickle(filename,flavour):
try:
F = open(filename,"rb")
contents = cPickle.load(F)
F.close()
print "SWAP: read an old",contents,"from "+filename
except:
if filename is None:
print "SWAP: no "+flavour+" filename supplied."
else:
print "SWAP: "+filename+" does not exist."
if flavour == 'bureau':
contents = swap.Bureau()
print "SWAP: made a new",contents
elif flavour == 'collection':
contents = swap.Collection()
print "SWAP: made a new",contents
elif flavour == 'database':
contents = None
return contents
# ----------------------------------------------------------------------------
# Write out an instance of a class to file.
def write_pickle(contents,filename):
F = open(filename,"wb")
cPickle.dump(contents,F,protocol=2)
F.close()
return
# ----------------------------------------------------------------------------
# Write out a simple list of subject IDs, of subjects to be retired.
def write_list(sample, filename, item=None):
count = 0
F = open(filename,'w')
for ID in sample.list():
subject = sample.member[ID]
string = None
if item == 'retired_subject':
if subject.state == 'inactive':
string = subject.ZooID
elif item == 'candidate':
if subject.kind == 'test' and subject.status == 'detected':
string = subject.location
elif item == 'true_positive':
if subject.kind == 'sim' and subject.status == 'detected':
string = subject.location
elif item == 'false_positive':
if subject.kind == 'dud' and subject.status == 'detected':
string = subject.location
elif item == 'true_negative':
if subject.kind == 'dud' and subject.status == 'rejected':
string = subject.location
elif item == 'false_negative':
if subject.kind == 'sim' and subject.status == 'rejected':
string = subject.location
# Write a new line:
if item is not None and string is not None:
F.write('%s\n' % string)
count += 1
F.close()
return count
# ----------------------------------------------------------------------------
# Write out a multi-column catalog of high probability candidates.
def write_catalog(sample, filename, thresholds, kind='test'):
Nsubjects = 0
Nlenses = 0
# Open a new catalog and write a header:
F = open(filename,'w')
F.write('%s\n' % "# zooid P Nclass image")
for ID in sample.list():
subject = sample.member[ID]
P = subject.mean_probability
if P > thresholds['rejection'] and subject.kind == kind:
zooid = subject.ZooID
png = subject.location
Nclass = subject.exposure
# Write a new line:
F.write('%s %9.7f %s %s\n' % (zooid,P,str(Nclass),png))
Nlenses += 1
Nsubjects += 1
F.close()
return Nlenses,Nsubjects
# ----------------------------------------------------------------------------
# Make up a new filename, based on tonight's parameters:
def get_new_filename(pars,flavour):
# Usually, this is what we want filenames to look like:
stem = pars['trunk']+'_'+flavour
# Pickles are an exception though!
if flavour == 'bureau' or \
flavour == 'collection' or \
flavour == 'database':
stem = pars['survey']+'_'+flavour
ext = 'pickle'
folder = '.'
elif flavour == 'histories' or \
flavour == 'trajectories' or \
flavour == 'sample' or \
flavour == 'probabilities':
ext = 'png'
folder = pars['dir']
elif flavour == 'retire_these':
ext = 'txt'
folder = pars['dir']
elif flavour == 'candidate_catalog' or \
flavour == 'sim_catalog' or \
flavour == 'dud_catalog':
ext = 'txt'
folder = pars['dir']
elif flavour == 'candidates' or \
flavour == 'training_true_positives' or \
flavour == 'training_false_positives' or \
flavour == 'training_true_negatives' or \
flavour == 'training_false_negatives':
ext = 'txt'
folder = pars['dir']
else:
raise Exception("SWAP: io: unknown flavour "+flavour)
return folder+'/'+stem+'.'+ext
# ----------------------------------------------------------------------------
# Write configuration file given a dictionary of parameters:
def write_config(filename, pars):
F = open(filename,'w')
header = """
# ======================================================================
#
# Space Warps Analysis Pipeline configuration file.
#
# Lines starting with '#' are ignored; all other lines must contain a
# Name : Value pair to be read into the parameters dictionary.
#
# This file is part of the Space Warps project, and is distributed
# under the GPL v2 by the Space Warps Science Team.
# http://spacewarps.org/
#
# SWAP configuration is modelled on that written for the
# Pangloss project, by Tom Collett (IoA) and Phil Marshall (Oxford).
# https://github.com/drphilmarshall/Pangloss/blob/master/example/example.config
#
# ======================================================================
"""
F.write(header)
shortlist = ['survey', \
'start', \
'bureaufile', \
'samplefile', \
'stage', \
'verbose', \
'one_by_one', \
'report', \
'repickle', \
'initialPL', \
'initialPD', \
'agents_willing_to_learn', \
'a_few_at_the_start', \
'hasty', \
'skepticism', \
'use_marker_positions', \
'detection_threshold', \
'rejection_threshold', \
'dbspecies']
for keyword in shortlist:
F.write('\n')
F.write('%s: %s\n' % (keyword,str(pars[keyword])))
F.write('\n')
footer = '# ======================================================================'
F.write(footer)
F.close()
return
# ----------------------------------------------------------------------------
# Remove file, if it exists, stay quiet otherwise:
def rm(filename):
try:
os.remove(filename)
except OSError:
pass
return
# ======================================================================
|
zooniverse/SpaceWarps
|
analysis/swap/io.py
|
Python
|
gpl-2.0
| 8,164
|
import sys, DNS
while 1:
query = raw_input("Enter domain name:")
DNS.DiscoverNameServers()
reqobj = DNS.Request()
answerobj = reqobj.req(name = query, qtype = DNS.Type.ANY)
print type(answerobj)
#if not len(answerobj):
# print "Not found."
for item in answerobj.answers:
print "%-5s %s" % (item['typename'], item['data'])
|
sunzhongyu99/pythonscripts
|
dns-basic.py
|
Python
|
gpl-2.0
| 359
|
import random
import unittest
from sr.bitstr.generator import BitStringGenerator
from sr.bitstr.crossover import BitStringCrossover
class BitStringCrossoverTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
random.seed(10)
def setUp(self):
self.config = {
"max_population": 10,
"bitstring_generation": {
"genome_length": 10
},
"codons": [
"0000",
"0001",
"0010",
"0011",
"0100",
"0101",
"0110",
"0111",
"1000",
"1001",
"1011",
"1111"
],
"crossover": {
"method": "ONE_POINT_CROSSOVER",
"probability": 1.0
}
}
generator = BitStringGenerator(self.config)
self.bs1 = generator.generate_random_bitstr()
self.bs2 = generator.generate_random_bitstr()
self.crossover = BitStringCrossover(self.config)
def test_uniform_random_index(self):
i = self.crossover.uniform_random_index(self.bs1, self.bs2)
self.assertTrue(i is not None)
self.assertTrue(i < self.bs1.length)
self.assertTrue(i < self.bs2.length)
def test_one_point_crossover(self):
for i in range(10):
bs1_before = list(self.bs1.genome)
bs2_before = list(self.bs2.genome)
idx = random.randint(0, self.bs1.length)
print("BITSTR 1 [BEFORE]:", self.bs1.genome)
print("BITSTR 2 [BEFORE]:", self.bs2.genome)
print("INDEX:", idx)
self.crossover.one_point_crossover(self.bs1, self.bs2, idx)
bs1_after = list(self.bs1.genome)
bs2_after = list(self.bs2.genome)
print("BITSTR 1 [AFTER]:", self.bs1.genome)
print("BITSTR 2 [AFTER]:", self.bs2.genome)
# assert
self.assertFalse(bs1_before == bs1_after)
self.assertFalse(bs2_before == bs2_after)
# change it back to its original form
bstr_1_half = list(bs1_after[0:idx])
bstr_2_half = list(bs2_after[0:idx])
bs1_after[0:idx] = bstr_2_half
bs2_after[0:idx] = bstr_1_half
self.assertTrue(bs1_before == bs1_after)
self.assertTrue(bs2_before == bs2_after)
# def test_crossover(self):
# bs1_before = list(self.bs1.genome)
# bs2_before = list(self.bs2.genome)
#
# print("BITSTR 1 [BEFORE]:", self.bs1.genome)
# print("BITSTR 2 [BEFORE]:", self.bs2.genome)
#
# self.crossover.crossover(self.bs1, self.bs2)
#
# bs1_after = list(self.bs1.genome)
# bs2_after = list(self.bs2.genome)
#
# print("BITSTR 1 [AFTER]:", self.bs1.genome)
# print("BITSTR 2 [AFTER]:", self.bs2.genome)
#
# # assert
# self.assertFalse(bs1_before == bs1_after)
# self.assertFalse(bs2_before == bs2_after)
#
# self.config["crossover"]["method"] = "RANDOM_CROSSOVER"
# self.assertRaises(
# RuntimeError,
# self.crossover.crossover,
# self.bs1,
# self.bs2
# )
#
# def test_to_dict(self):
# self.crossover.crossover(self.bs1, self.bs2)
# cross_dict = self.crossover.to_dict()
#
# # import pprint
# # pprint.pprint(cross_dict)
#
# self.assertEqual(cross_dict["method"], "ONE_POINT_CROSSOVER")
# self.assertTrue(cross_dict["index"] is not None)
# self.assertTrue(cross_dict["crossover_probability"] is not None)
# self.assertTrue(cross_dict["random_probability"] is not None)
# self.assertTrue(cross_dict["crossovered"] is not None)
# self.assertEqual(len(cross_dict["before_crossover"]), 2)
# self.assertEqual(len(cross_dict["after_crossover"]), 2)
|
chutsu/sr
|
sr/tests/bitstr/crossover_tests.py
|
Python
|
gpl-2.0
| 4,011
|
from builtins import map
from builtins import str
import subprocess
import os
import glob
from timemanager.utils.tmlogging import info, error
from timemanager.utils.os_util import get_os, WINDOWS
from timemanager.conf import FRAME_FILENAME_PREFIX, FRAME_EXTENSION
IMAGEMAGICK = "convert"
FFMPEG = "ffmpeg"
DEFAULT_ANIMATION_NAME = "animation.gif"
DEFAULT_FRAME_PATTERN = "{}*.{}".format(FRAME_FILENAME_PREFIX, FRAME_EXTENSION)
file_dir = os.path.dirname(os.path.realpath(__file__))
def can_animate():
return is_in_path(IMAGEMAGICK)
def can_export_video():
return is_in_path(FFMPEG)
def is_in_path(exec_name):
try:
ret = subprocess.check_call([exec_name, "-version"])
return ret == 0
except Exception:
return False
def clear_frames(out_folder, frame_pattern=DEFAULT_FRAME_PATTERN):
all_frames = glob.glob(os.path.join(out_folder, frame_pattern))
list(map(os.remove, all_frames))
def make_animation(out_folder, delay_millis, frame_pattern=DEFAULT_FRAME_PATTERN):
if not can_animate():
error("Imagemagick is not in path")
raise Exception("Imagemagick is not in path. Please install ImageMagick!")
out_file = os.path.join(out_folder, DEFAULT_ANIMATION_NAME)
all_frames = glob.glob(os.path.join(out_folder, frame_pattern))
if len(all_frames) == 0:
msg = "Couldn't find any frames with pattern {} in folder {} to animate".format(frame_pattern, out_folder)
error(msg)
raise Exception(msg)
all_frames.sort()
fps = 1000 / delay_millis
args = [IMAGEMAGICK, "-delay", "1x" + str(fps)] + all_frames + [out_file]
ret = subprocess.check_call(args)
if (ret != 0):
msg = "Something went wrong creating the animated gif from frames"
error(msg)
raise Exception(msg)
info("Exported {} frames to gif {} (call :{})".format(len(all_frames), out_file, args))
return out_file
# ffmpeg -f image2 -r 1 -i frame%02d.png -vcodec libx264 -vf fps=25 -pix_fmt yuv420p out.mp4
# http://unix.stackexchange.com/questions/68770/converting-png-frames-to-video-at-1-fps
def make_video(out_folder, digits):
outfile = os.path.join(out_folder, "out.mp4")
# something like frame%03d.png as expected by ffmpeg
frame_pattern = os.path.join(out_folder, "{}%0{}d.{}".format(FRAME_FILENAME_PREFIX, digits, FRAME_EXTENSION))
# TODO: Make this configurable (when understanding how it works)
if get_os() == WINDOWS:
video_script = os.path.join(file_dir, "video.bat")
subprocess.check_call([video_script, frame_pattern, outfile])
else:
video_script = os.path.join(file_dir, "video.sh")
subprocess.check_call(["sh", video_script, frame_pattern, outfile])
info("Exported video to {}".format(outfile))
return outfile
|
anitagraser/TimeManager
|
animation/animate.py
|
Python
|
gpl-2.0
| 2,804
|
#!/usr/bin/python
from __future__ import division
import matplotlib, sys
if not 'show' in sys.argv:
matplotlib.use('Agg')
from pylab import *
figure(figsize=(7,4))
eos = loadtxt("../../papers/hughes-saft/figs/equation-of-state.dat")
eos_exp = loadtxt("../../papers/hughes-saft/figs/experimental-equation-of-state.dat")
gpermL=4.9388942e-3/0.996782051315 # conversion from atomic units to mass density
plot(eos[:,2]/gpermL, eos[:,0], 'b-', label='SAFT')
plot(eos[:,3]/gpermL, eos[:,0], 'b-')
plot(eos_exp[:,2]/gpermL, eos_exp[:,0], 'r--', label='experiment')
plot(eos_exp[:,3]/gpermL, eos_exp[:,0], 'r--')
legend(loc='best').draw_frame(False)
ylim(273, 710)
xlim(0, 1.05)
xlabel('$n$ (g/mL)')
ylabel('$T$ (K)')
tight_layout()
savefig('figs/equation-of-state.pdf', transparent=True)
show()
|
droundy/deft
|
talks/colloquium/figs/equation-of-state.py
|
Python
|
gpl-2.0
| 799
|
#!/usr/bin/env python3
import matplotlib.pyplot as plt
def string_to_list(string):
return [int(i) for i in string.split(',')]
def get_data(path):
f = open(path)
f.readline()
data = []
for line in f:
data.append(string_to_list(line))
data = list(zip(*data))
subtract = 3
means = [sum(sorted(d)[:len(d)-subtract]) / (len(d)-subtract) for d in data]
return data, means
values = {
'map_size': [20, 15, 25, 30],
'num_areas': [100, 70, 150],
'num_colours': [8, 5, 12],
'num_walls': [10, 7, 15],
'num_particles': [100, 70, 140, 200]
}
names = ['map_size', 'num_areas', 'num_colours', 'num_walls', 'num_particles']
names = ['map_size', 'num_areas', 'num_colours', 'num_walls', 'num_particles']
folder = 'data/'
legends = ['R1', 'R2']
colors = ['b', 'r']
plot_number = 1
plt.subplot(2, 3, plot_number)
data, base_means = get_data(folder+'base_case')
for i in range(len(data)):
plt.plot(
[i+1 for k in range(len(data[i]))],
data[i],
'.',
color=colors[i],
label=legends[i]
)
plt.plot(
i+1,
base_means[i],
'x',
color=colors[i],
markersize=10
)
plt.xlim((0, len(data)+1))
plt.tick_params(
axis='x',
which='both',
bottom='off',
top='off',
labelbottom='off'
)
plt.legend(loc='upper left')
plt.title('base_case')
for name in names:
data = [base_means]
for i in range(1, len(values[name])):
value = values[name][i]
_, means = get_data(folder+name+str(value))
if value < values[name][0]:
data.insert(0, means)
else:
data.append(means)
data = list(zip(*data))
plot_number += 1
plt.subplot(2, 3, plot_number)
for i in range(len(data)):
plt.plot(
sorted(values[name]),
data[i],
color=colors[i],
marker='+'
)
plt.ylim((0, 200))
plt.title(name)
plt.show()
|
Lebuin/project-robotica
|
plot_part1.py
|
Python
|
gpl-2.0
| 1,984
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from distutils.core import setup
import py2exe
import time
import os
import subprocess
# print nice messages
def LogPrint(Message, Type = "INFO"):
print("[{DateOfMessage}] - [{TypeOfMessage}] - "
"{Message}".format(
DateOfMessage = time.strftime("%d.%m.%Y %H:%M:%S", time.gmtime()),
TypeOfMessage = Type.upper(),
Message = Message
)
)
# Set the compiler time
DateOfCompiling = time.strftime("%Y%m%d%H%M%S", time.gmtime())
# Modification of the gobjects.py
if os.path.isfile("gobjects.py"):
os.rename("gobjects.py", "GlobalObjects_old.py")
with open("GlobalObjects_old.py") as FileInput:
with open("gobjects.py", "w") as FileOutPut:
for Line in FileInput:
if Line.startswith("__release__"):
FileOutPut.write("__release__ = {Date}\n".format(Date = DateOfCompiling))
else:
FileOutPut.write(Line)
if os.path.isfile("GlobalObjects_old.py"):
os.remove("GlobalObjects_old.py")
# Modification of the docs/conf.py
File = "../docs/conf.py"
FileOld = "../docs/conf_old.py"
if os.path.isfile(File):
os.rename(File, FileOld)
with open(FileOld) as FileInput:
with open(File, "w") as FileOutPut:
for Line in FileInput:
if Line.startswith("release"):
FileOutPut.write("release = '{Date}'\n".format(Date = DateOfCompiling))
else:
FileOutPut.write(Line)
if os.path.isfile(FileOld):
os.remove(FileOld)
# This Module will be modified befor the import.
import gobjects
LogPrint("release build: " + str(gobjects.__release__))
#sets the current directory in to a variable
DirectoryOfSetup = os.path.abspath("./")
#creates the path to the Directory of the newest build
DirectoryToBuild = os.path.abspath("..\\Build") + "\\{Date}".format(Date=DateOfCompiling)
if not os.path.exists(DirectoryToBuild):
os.makedirs(DirectoryToBuild, exist_ok = True)
LogPrint("Building language files")
# Get the stupid __pycache__ folder
LanguageFilesArray = [i for i in os.listdir(".\\language") if i != "__pycache__" and i != "language.py" and i != "__init__.py"]
#creating the directorys for the language files
LogPrint("creating the directorys for the language files")
if not os.path.exists(DirectoryToBuild+"\\language"):
os.mkdir(DirectoryToBuild+"\\language")
for i in LanguageFilesArray:
DirectoryOfLanguage = DirectoryToBuild+"\\language\\{NameOfLanguage}".format(NameOfLanguage=i)
if not os.path.exists(DirectoryOfLanguage):
os.mkdir(DirectoryOfLanguage)
DirectoryOfLanguageSub = DirectoryOfLanguage+"\\LC_MESSAGES"
if not os.path.exists(DirectoryOfLanguageSub):
os.mkdir(DirectoryOfLanguageSub)
LogPrint("Compiling language file {Name}".format(Name = i))
#compiling
subprocess.call(["py", "C:\\Python35\\Tools\\i18n\\msgfmt.py",
"-o", "{OutputFile}".format(OutputFile=DirectoryOfLanguageSub+"\\Telegram.mo"),
"{InputFile}".format(
InputFile="{DirectoryOfSetup}\\Language\\"
"{Language}\\LC_MESSAGES\\Telegram.po".format(
DirectoryOfSetup=DirectoryOfSetup,
Language=i)
)
])
#start compiling the source code
setup(
console=[{
# Main Python script
"script": "main.py",
# Icon to embed into the PE file.
"icon_resources": [(0, "icons\\photo_2015-09-03_20-15-23.ico")],
# The application name
"dest_base" : gobjects.__AppName__
}],
zipfile = "library.zip",
options = {
"py2exe":{
"dist_dir":DirectoryToBuild,
"optimize":0,
"includes": ["mysql.connector.locales.eng"],
"compressed":False,
"xref":True,
"bundle_files":3,
"skip_archive": False
}
}
)
|
TheRedFireFox/AnimeSubBot
|
src/setup.py
|
Python
|
gpl-2.0
| 4,486
|
from . import GenericNameSet, GenericXMLParser, GenericLeafNode, GenericTerm
class GML(GenericXMLParser):
def __init__(self, xml_gml):
self.xml_gml = xml_gml
self.attributes = self.get_attributes_as_dict(self.xml_gml)
self.lang = self.get_attribute_from_dict('xml:lang', self.xml_gml)
self.Point = self.get_point()
self.LineString = self.get_line_string()
self.Polygon = self.get_polygon()
def get_point(self):
pass
def get_line_string(self):
pass
def get_polygon(self):
pass
class Place(GenericXMLParser):
def __init__(self, xml_place):
self.xml_place = xml_place
self.attributes = self.get_attributes_as_dict(self.xml_place)
self.politicalEntity = self.get_attribute('politicalEntity')
self.geographicalEntity = self.get_attribute('geographicalEntity')
self.placeID = self.get_place_ids()
self.namePlaceSet = self.get_name_place_sets()
self.gml = self.get_gml()
self.partOfPlace = self.get_part_of_place()
self.placeClassification = self.get_place_classification()
def get_attribute(self, attribute):
return self.get_attribute_from_dict(attribute, self.attributes)
def get_place_ids(self):
return self.repeatable_node(self.xml_place, 'lido:placeID', GenericLeafNode)
def get_name_place_sets(self):
return self.repeatable_node(self.xml_place, 'lido:namePlaceSet', GenericNameSet)
def get_part_of_place(self):
return self.repeatable_node(self.xml_place, 'lido:partOfPlace', Place)
def get_gml(self):
print("Warning: GML is not implemented (yet)")
return self.repeatable_node(self.xml_place, 'gml', GML)
def get_place_classification(self):
return self.repeatable_node(self.xml_place, 'lido:placeClassification', GenericTerm)
class EventPlace(GenericXMLParser):
def __init__(self, xml_event_place):
self.xml_event_place = xml_event_place
self.attributes = self.get_attributes_as_dict(self.xml_event_place)
self.type = self.get_attribute('type')
self.sortorder = self.get_attribute('sortorder')
self.displayPlace = self.get_display_place()
self.place = self.get_place()
def get_attribute(self, attribute):
return self.get_attribute_from_dict(attribute, self.attributes)
def get_display_place(self):
return self.repeatable_node(self.xml_event_place, 'lido:displayPlace', GenericNameSet)
def get_place(self):
places = self.xpath(self.xml_event_place, 'lido:place')
return [Place(places.pop())]
|
pieterdp/LidoParser
|
parser/lido_elements/event_sub/event_place.py
|
Python
|
gpl-2.0
| 2,647
|
import yaml
import yaml.constructor
from collections import OrderedDict, MutableMapping
class OrderedDictYAMLLoader(yaml.Loader):
"""
A YAML loader that loads mappings into ordered dictionaries.
"""
def __init__(self, *args, **kwargs):
yaml.Loader.__init__(self, *args, **kwargs)
self.add_constructor(u'tag:yaml.org,2002:map', type(self).construct_yaml_map)
self.add_constructor(u'tag:yaml.org,2002:omap', type(self).construct_yaml_map)
def construct_yaml_map(self, node):
data = OrderedDict()
yield data
value = self.construct_mapping(node)
data.update(value)
def construct_mapping(self, node, deep=False):
if isinstance(node, yaml.MappingNode):
self.flatten_mapping(node)
else:
raise yaml.constructor.ConstructorError(None, None,
'expected a mapping node, but found %s' % node.id, node.start_mark)
mapping = OrderedDict()
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
try:
hash(key)
except TypeError as exc:
raise yaml.constructor.ConstructorError('while constructing a mapping',
node.start_mark, 'found unacceptable key (%s)' % exc,
key_node.start_mark)
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
# NOTE: Taken from https://github.com/kennethreitz/requests
# LICENCE: Apache2
class CaseInsensitiveDict(MutableMapping):
"""
A case-insensitive ``dict``-like object.
Implements all methods and operations of
``collections.MutableMapping`` as well as dict's ``copy``. Also
provides ``lower_items``.
All keys are expected to be strings. The structure remembers the
case of the last key to be set, and ``iter(instance)``,
``keys()``, ``items()``, ``iterkeys()``, and ``items()``
will contain case-sensitive keys. However, querying and contains
testing is case insensitive::
cid = CaseInsensitiveDict()
cid['Accept'] = 'application/json'
cid['aCCEPT'] == 'application/json' # True
list(cid) == ['Accept'] # True
For example, ``headers['content-encoding']`` will return the
value of a ``'Content-Encoding'`` response header, regardless
of how the header name was originally stored.
If the constructor, ``.update``, or equality comparison
operations are given keys that have equal ``.lower()``s, the
behavior is undefined.
"""
def __init__(self, data=None, **kwargs):
self._store = dict()
if data is None:
data = {}
self.update(data, **kwargs)
def __setitem__(self, key, value):
# Use the lowercased key for lookups, but store the actual
# key alongside the value.
self._store[key.lower()] = (key, value)
def __getitem__(self, key):
return self._store[key.lower()][1]
def __delitem__(self, key):
del self._store[key.lower()]
def __iter__(self):
return (casedkey for casedkey, mappedvalue in self._store.values())
def __len__(self):
return len(self._store)
def lower_items(self):
"""Like items(), but with all lowercase keys."""
return (
(lowerkey, keyval[1])
for (lowerkey, keyval)
in self._store.items()
)
def __eq__(self, other):
if isinstance(other, collections.Mapping):
other = CaseInsensitiveDict(other)
else:
return NotImplemented
# Compare insensitively
return dict(self.lower_items()) == dict(other.lower_items())
# Copy is required
def copy(self):
return CaseInsensitiveDict(self._store.values())
def __repr__(self):
return str(dict(self.items()))
|
DMS-Aus/Roam
|
src/roam/structs.py
|
Python
|
gpl-2.0
| 4,030
|
import unittest
import sys
from stackless import *
from support import StacklessTestCase
#test that thread state is restored properly
class TestExceptionState(StacklessTestCase):
def Tasklet(self):
try:
1/0
except Exception, e:
self.ran = True
ei = sys.exc_info()
self.assertEqual(ei[0], ZeroDivisionError)
schedule()
ei = sys.exc_info()
self.assertEqual(ei[0], ZeroDivisionError)
self.assertTrue("by zero" in str(ei[1]))
def testExceptionState(self):
t = tasklet(self.Tasklet)
sys.exc_clear()
t()
self.ran = False
t.run()
self.assertTrue(self.ran)
ei = sys.exc_info()
self.assertEqual(ei, (None,)*3)
t.run()
ei = sys.exc_info()
self.assertEqual(ei, (None,)*3)
class TestTracingState(StacklessTestCase):
def __init__(self, *args):
StacklessTestCase.__init__(self, *args)
self.trace = []
def Callback(self, *args):
self.trace.append(args)
def foo(self):
pass
def Tasklet(self):
sys.setprofile(self.Callback)
self.foo()
n = len(self.trace)
self.foo()
n2 = len(self.trace)
self.assertGreater(n2, n)
schedule()
self.foo()
n = len(self.trace)
self.foo()
n2 = len(self.trace)
self.assertGreater(n2, n)
def testTracingState(self):
t = tasklet(self.Tasklet)
t()
t.run()
self.foo()
n = len(self.trace)
self.foo()
n2 = len(self.trace)
self.assertEqual(n, n2)
t.run()
self.foo()
n = len(self.trace)
self.foo()
n2 = len(self.trace)
self.assertEqual(n, n2)
if __name__ == '__main__':
import sys
if not sys.argv[1:]:
sys.argv.append('-v')
unittest.main()
|
newerthcom/savagerebirth
|
libs/python-2.72/Stackless/unittests/test_tstate.py
|
Python
|
gpl-2.0
| 1,948
|
"""SCons.Tool
SCons tool selection.
This looks for modules that define a callable object that can modify
a construction environment as appropriate for a given tool (or tool
chain).
Note that because this subsystem just *selects* a callable that can
modify a construction environment, it's possible for people to define
their own "tool specification" in an arbitrary callable function. No
one needs to use or tie in to this subsystem in order to roll their own
tool definition.
"""
#
# Copyright (c) 2001 - 2015 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/__init__.py rel_2.3.5:3329:275e75118ad4 2015/06/20 11:18:26 bdbaddog"
import imp
import sys
import re
import os
import shutil
import SCons.Builder
import SCons.Errors
import SCons.Node.FS
import SCons.Scanner
import SCons.Scanner.C
import SCons.Scanner.D
import SCons.Scanner.LaTeX
import SCons.Scanner.Prog
DefaultToolpath=[]
CScanner = SCons.Scanner.C.CScanner()
DScanner = SCons.Scanner.D.DScanner()
LaTeXScanner = SCons.Scanner.LaTeX.LaTeXScanner()
PDFLaTeXScanner = SCons.Scanner.LaTeX.PDFLaTeXScanner()
ProgramScanner = SCons.Scanner.Prog.ProgramScanner()
SourceFileScanner = SCons.Scanner.Base({}, name='SourceFileScanner')
CSuffixes = [".c", ".C", ".cxx", ".cpp", ".c++", ".cc",
".h", ".H", ".hxx", ".hpp", ".hh",
".F", ".fpp", ".FPP",
".m", ".mm",
".S", ".spp", ".SPP", ".sx"]
DSuffixes = ['.d']
IDLSuffixes = [".idl", ".IDL"]
LaTeXSuffixes = [".tex", ".ltx", ".latex"]
for suffix in CSuffixes:
SourceFileScanner.add_scanner(suffix, CScanner)
for suffix in DSuffixes:
SourceFileScanner.add_scanner(suffix, DScanner)
# FIXME: what should be done here? Two scanners scan the same extensions,
# but look for different files, e.g., "picture.eps" vs. "picture.pdf".
# The builders for DVI and PDF explicitly reference their scanners
# I think that means this is not needed???
for suffix in LaTeXSuffixes:
SourceFileScanner.add_scanner(suffix, LaTeXScanner)
SourceFileScanner.add_scanner(suffix, PDFLaTeXScanner)
class Tool(object):
def __init__(self, name, toolpath=[], **kw):
self.name = name
self.toolpath = toolpath + DefaultToolpath
# remember these so we can merge them into the call
self.init_kw = kw
module = self._tool_module()
self.generate = module.generate
self.exists = module.exists
if hasattr(module, 'options'):
self.options = module.options
def _tool_module(self):
# TODO: Interchange zipimport with normal initilization for better error reporting
oldpythonpath = sys.path
sys.path = self.toolpath + sys.path
try:
try:
file, path, desc = imp.find_module(self.name, self.toolpath)
try:
return imp.load_module(self.name, file, path, desc)
finally:
if file:
file.close()
except ImportError, e:
if str(e)!="No module named %s"%self.name:
raise SCons.Errors.EnvironmentError(e)
try:
import zipimport
except ImportError:
pass
else:
for aPath in self.toolpath:
try:
importer = zipimport.zipimporter(aPath)
return importer.load_module(self.name)
except ImportError, e:
pass
finally:
sys.path = oldpythonpath
full_name = 'SCons.Tool.' + self.name
try:
return sys.modules[full_name]
except KeyError:
try:
smpath = sys.modules['SCons.Tool'].__path__
try:
file, path, desc = imp.find_module(self.name, smpath)
module = imp.load_module(full_name, file, path, desc)
setattr(SCons.Tool, self.name, module)
if file:
file.close()
return module
except ImportError, e:
if str(e)!="No module named %s"%self.name:
raise SCons.Errors.EnvironmentError(e)
try:
import zipimport
importer = zipimport.zipimporter( sys.modules['SCons.Tool'].__path__[0] )
module = importer.load_module(full_name)
setattr(SCons.Tool, self.name, module)
return module
except ImportError, e:
m = "No tool named '%s': %s" % (self.name, e)
raise SCons.Errors.EnvironmentError(m)
except ImportError, e:
m = "No tool named '%s': %s" % (self.name, e)
raise SCons.Errors.EnvironmentError(m)
def __call__(self, env, *args, **kw):
if self.init_kw is not None:
# Merge call kws into init kws;
# but don't bash self.init_kw.
if kw is not None:
call_kw = kw
kw = self.init_kw.copy()
kw.update(call_kw)
else:
kw = self.init_kw
env.Append(TOOLS = [ self.name ])
if hasattr(self, 'options'):
import SCons.Variables
if 'options' not in env:
from SCons.Script import ARGUMENTS
env['options']=SCons.Variables.Variables(args=ARGUMENTS)
opts=env['options']
self.options(opts)
opts.Update(env)
self.generate(env, *args, **kw)
def __str__(self):
return self.name
##########################################################################
# Create common executable program / library / object builders
def createProgBuilder(env):
"""This is a utility function that creates the Program
Builder in an Environment if it is not there already.
If it is already there, we return the existing one.
"""
try:
program = env['BUILDERS']['Program']
except KeyError:
import SCons.Defaults
program = SCons.Builder.Builder(action = SCons.Defaults.LinkAction,
emitter = '$PROGEMITTER',
prefix = '$PROGPREFIX',
suffix = '$PROGSUFFIX',
src_suffix = '$OBJSUFFIX',
src_builder = 'Object',
target_scanner = ProgramScanner)
env['BUILDERS']['Program'] = program
return program
def createStaticLibBuilder(env):
"""This is a utility function that creates the StaticLibrary
Builder in an Environment if it is not there already.
If it is already there, we return the existing one.
"""
try:
static_lib = env['BUILDERS']['StaticLibrary']
except KeyError:
action_list = [ SCons.Action.Action("$ARCOM", "$ARCOMSTR") ]
if env.Detect('ranlib'):
ranlib_action = SCons.Action.Action("$RANLIBCOM", "$RANLIBCOMSTR")
action_list.append(ranlib_action)
static_lib = SCons.Builder.Builder(action = action_list,
emitter = '$LIBEMITTER',
prefix = '$LIBPREFIX',
suffix = '$LIBSUFFIX',
src_suffix = '$OBJSUFFIX',
src_builder = 'StaticObject')
env['BUILDERS']['StaticLibrary'] = static_lib
env['BUILDERS']['Library'] = static_lib
return static_lib
def VersionShLibLinkNames(version, libname, env):
"""Generate names of symlinks to the versioned shared library"""
Verbose = False
platform = env.subst('$PLATFORM')
shlib_suffix = env.subst('$SHLIBSUFFIX')
shlink_flags = SCons.Util.CLVar(env.subst('$SHLINKFLAGS'))
linknames = []
if version.count(".") != 2:
# We need a version string of the form x.y.z to proceed
# Several changes need to be made to support versions like x.y
raise ValueError
if platform == 'darwin':
# For libfoo.x.y.z.dylib, linknames libfoo.so
suffix_re = re.escape('.' + version + shlib_suffix)
linkname = re.sub(suffix_re, shlib_suffix, libname)
if Verbose:
print "VersionShLibLinkNames: linkname = ",linkname
linknames.append(linkname)
elif platform == 'posix' or platform == 'sunos':
if sys.platform.startswith('openbsd'):
# OpenBSD uses x.y shared library versioning numbering convention
# and doesn't use symlinks to backwards-compatible libraries
return []
# For libfoo.so.x.y.z, linknames libfoo.so libfoo.so.x.y libfoo.so.x
suffix_re = re.escape(shlib_suffix + '.' + version)
# First linkname has no version number
linkname = re.sub(suffix_re, shlib_suffix, libname)
if Verbose:
print "VersionShLibLinkNames: linkname = ",linkname
linknames.append(linkname)
versionparts = version.split('.')
major_name = linkname + "." + versionparts[0]
minor_name = major_name + "." + versionparts[1]
#Only add link for major_name
#for linkname in [major_name, minor_name]:
for linkname in [major_name, ]:
if Verbose:
print "VersionShLibLinkNames: linkname ",linkname, ", target ",libname
linknames.append(linkname)
# note: no Windows case here (win32 or cygwin);
# MSVC doesn't support this type of versioned shared libs.
# (could probably do something for MinGW though)
return linknames
def VersionedSharedLibrary(target = None, source= None, env=None):
"""Build a shared library. If the environment has SHLIBVERSION
defined make a versioned shared library and create the appropriate
symlinks for the platform we are on"""
Verbose = False
try:
version = env.subst('$SHLIBVERSION')
except KeyError:
version = None
# libname includes the version number if one was given
libname = getattr(target[0].attributes, 'shlibname', target[0].name)
platform = env.subst('$PLATFORM')
shlib_suffix = env.subst('$SHLIBSUFFIX')
shlink_flags = SCons.Util.CLVar(env.subst('$SHLINKFLAGS'))
if Verbose:
print "VersionShLib: libname = ",libname
print "VersionShLib: platform = ",platform
print "VersionShLib: shlib_suffix = ",shlib_suffix
print "VersionShLib: target = ",str(target[0])
if version:
# set the shared library link flags
if platform == 'posix':
shlink_flags += [ '-Wl,-Bsymbolic' ]
# OpenBSD doesn't usually use SONAME for libraries
if not sys.platform.startswith('openbsd'):
# continue setup of shlink flags for all other POSIX systems
suffix_re = re.escape(shlib_suffix + '.' + version)
(major, age, revision) = version.split(".")
# soname will have only the major version number in it
soname = re.sub(suffix_re, shlib_suffix, libname) + '.' + major
shlink_flags += [ '-Wl,-soname=%s' % soname ]
if Verbose:
print " soname ",soname,", shlink_flags ",shlink_flags
elif platform == 'sunos':
suffix_re = re.escape(shlib_suffix + '.' + version)
(major, age, revision) = version.split(".")
soname = re.sub(suffix_re, shlib_suffix, libname) + '.' + major
shlink_flags += [ '-h', soname ]
elif platform == 'cygwin':
shlink_flags += [ '-Wl,-Bsymbolic',
'-Wl,--out-implib,${TARGET.base}.a' ]
elif platform == 'darwin':
shlink_flags += [ '-current_version', '%s' % version,
'-compatibility_version', '%s' % version,
'-undefined', 'dynamic_lookup' ]
if Verbose:
print "VersionShLib: shlink_flags = ",shlink_flags
envlink = env.Clone()
envlink['SHLINKFLAGS'] = shlink_flags
else:
envlink = env
result = SCons.Defaults.ShLinkAction(target, source, envlink)
if version:
# here we need the full pathname so the links end up in the right directory
libname = getattr(target[0].attributes, 'shlibpath', target[0].path)
if Verbose:
print "VerShLib: target lib is = ", libname
print "VerShLib: name is = ", target[0].name
print "VerShLib: dir is = ", target[0].dir.path
linknames = VersionShLibLinkNames(version, libname, env)
if Verbose:
print "VerShLib: linknames ",linknames
# Here we just need the file name w/o path as the target of the link
lib_ver = getattr(target[0].attributes, 'shlibname', target[0].name)
# make symlink of adjacent names in linknames
for count in range(len(linknames)):
linkname = linknames[count]
if count > 0:
try:
os.remove(lastlinkname)
except:
pass
os.symlink(os.path.basename(linkname),lastlinkname)
if Verbose:
print "VerShLib: made sym link of %s -> %s" % (lastlinkname,linkname)
lastlinkname = linkname
# finish chain of sym links with link to the actual library
if len(linknames)>0:
try:
os.remove(lastlinkname)
except:
pass
os.symlink(lib_ver,lastlinkname)
if Verbose:
print "VerShLib: made sym link of %s -> %s" % (linkname, lib_ver)
return result
# Fix http://scons.tigris.org/issues/show_bug.cgi?id=2903 :
# Ensure we still depend on SCons.Defaults.ShLinkAction command line which is $SHLINKCOM.
# This was tricky because we don't want changing LIBPATH to cause a rebuild, but
# changing other link args should. LIBPATH has $( ... $) around it but until this
# fix, when the varlist was added to the build sig those ignored parts weren't getting
# ignored.
ShLibAction = SCons.Action.Action(VersionedSharedLibrary, None, varlist=['SHLINKCOM'])
def createSharedLibBuilder(env):
"""This is a utility function that creates the SharedLibrary
Builder in an Environment if it is not there already.
If it is already there, we return the existing one.
"""
try:
shared_lib = env['BUILDERS']['SharedLibrary']
except KeyError:
import SCons.Defaults
action_list = [ SCons.Defaults.SharedCheck,
ShLibAction ]
shared_lib = SCons.Builder.Builder(action = action_list,
emitter = "$SHLIBEMITTER",
prefix = '$SHLIBPREFIX',
suffix = '$SHLIBSUFFIX',
target_scanner = ProgramScanner,
src_suffix = '$SHOBJSUFFIX',
src_builder = 'SharedObject')
env['BUILDERS']['SharedLibrary'] = shared_lib
return shared_lib
def createLoadableModuleBuilder(env):
"""This is a utility function that creates the LoadableModule
Builder in an Environment if it is not there already.
If it is already there, we return the existing one.
"""
try:
ld_module = env['BUILDERS']['LoadableModule']
except KeyError:
import SCons.Defaults
action_list = [ SCons.Defaults.SharedCheck,
SCons.Defaults.LdModuleLinkAction ]
ld_module = SCons.Builder.Builder(action = action_list,
emitter = "$LDMODULEEMITTER",
prefix = '$LDMODULEPREFIX',
suffix = '$LDMODULESUFFIX',
target_scanner = ProgramScanner,
src_suffix = '$SHOBJSUFFIX',
src_builder = 'SharedObject')
env['BUILDERS']['LoadableModule'] = ld_module
return ld_module
def createObjBuilders(env):
"""This is a utility function that creates the StaticObject
and SharedObject Builders in an Environment if they
are not there already.
If they are there already, we return the existing ones.
This is a separate function because soooo many Tools
use this functionality.
The return is a 2-tuple of (StaticObject, SharedObject)
"""
try:
static_obj = env['BUILDERS']['StaticObject']
except KeyError:
static_obj = SCons.Builder.Builder(action = {},
emitter = {},
prefix = '$OBJPREFIX',
suffix = '$OBJSUFFIX',
src_builder = ['CFile', 'CXXFile'],
source_scanner = SourceFileScanner,
single_source = 1)
env['BUILDERS']['StaticObject'] = static_obj
env['BUILDERS']['Object'] = static_obj
try:
shared_obj = env['BUILDERS']['SharedObject']
except KeyError:
shared_obj = SCons.Builder.Builder(action = {},
emitter = {},
prefix = '$SHOBJPREFIX',
suffix = '$SHOBJSUFFIX',
src_builder = ['CFile', 'CXXFile'],
source_scanner = SourceFileScanner,
single_source = 1)
env['BUILDERS']['SharedObject'] = shared_obj
return (static_obj, shared_obj)
def createCFileBuilders(env):
"""This is a utility function that creates the CFile/CXXFile
Builders in an Environment if they
are not there already.
If they are there already, we return the existing ones.
This is a separate function because soooo many Tools
use this functionality.
The return is a 2-tuple of (CFile, CXXFile)
"""
try:
c_file = env['BUILDERS']['CFile']
except KeyError:
c_file = SCons.Builder.Builder(action = {},
emitter = {},
suffix = {None:'$CFILESUFFIX'})
env['BUILDERS']['CFile'] = c_file
env.SetDefault(CFILESUFFIX = '.c')
try:
cxx_file = env['BUILDERS']['CXXFile']
except KeyError:
cxx_file = SCons.Builder.Builder(action = {},
emitter = {},
suffix = {None:'$CXXFILESUFFIX'})
env['BUILDERS']['CXXFile'] = cxx_file
env.SetDefault(CXXFILESUFFIX = '.cc')
return (c_file, cxx_file)
##########################################################################
# Create common Java builders
def CreateJarBuilder(env):
try:
java_jar = env['BUILDERS']['Jar']
except KeyError:
fs = SCons.Node.FS.get_default_fs()
jar_com = SCons.Action.Action('$JARCOM', '$JARCOMSTR')
java_jar = SCons.Builder.Builder(action = jar_com,
suffix = '$JARSUFFIX',
src_suffix = '$JAVACLASSSUFIX',
src_builder = 'JavaClassFile',
source_factory = fs.Entry)
env['BUILDERS']['Jar'] = java_jar
return java_jar
def CreateJavaHBuilder(env):
try:
java_javah = env['BUILDERS']['JavaH']
except KeyError:
fs = SCons.Node.FS.get_default_fs()
java_javah_com = SCons.Action.Action('$JAVAHCOM', '$JAVAHCOMSTR')
java_javah = SCons.Builder.Builder(action = java_javah_com,
src_suffix = '$JAVACLASSSUFFIX',
target_factory = fs.Entry,
source_factory = fs.File,
src_builder = 'JavaClassFile')
env['BUILDERS']['JavaH'] = java_javah
return java_javah
def CreateJavaClassFileBuilder(env):
try:
java_class_file = env['BUILDERS']['JavaClassFile']
except KeyError:
fs = SCons.Node.FS.get_default_fs()
javac_com = SCons.Action.Action('$JAVACCOM', '$JAVACCOMSTR')
java_class_file = SCons.Builder.Builder(action = javac_com,
emitter = {},
#suffix = '$JAVACLASSSUFFIX',
src_suffix = '$JAVASUFFIX',
src_builder = ['JavaFile'],
target_factory = fs.Entry,
source_factory = fs.File)
env['BUILDERS']['JavaClassFile'] = java_class_file
return java_class_file
def CreateJavaClassDirBuilder(env):
try:
java_class_dir = env['BUILDERS']['JavaClassDir']
except KeyError:
fs = SCons.Node.FS.get_default_fs()
javac_com = SCons.Action.Action('$JAVACCOM', '$JAVACCOMSTR')
java_class_dir = SCons.Builder.Builder(action = javac_com,
emitter = {},
target_factory = fs.Dir,
source_factory = fs.Dir)
env['BUILDERS']['JavaClassDir'] = java_class_dir
return java_class_dir
def CreateJavaFileBuilder(env):
try:
java_file = env['BUILDERS']['JavaFile']
except KeyError:
java_file = SCons.Builder.Builder(action = {},
emitter = {},
suffix = {None:'$JAVASUFFIX'})
env['BUILDERS']['JavaFile'] = java_file
env['JAVASUFFIX'] = '.java'
return java_file
class ToolInitializerMethod(object):
"""
This is added to a construction environment in place of a
method(s) normally called for a Builder (env.Object, env.StaticObject,
etc.). When called, it has its associated ToolInitializer
object search the specified list of tools and apply the first
one that exists to the construction environment. It then calls
whatever builder was (presumably) added to the construction
environment in place of this particular instance.
"""
def __init__(self, name, initializer):
"""
Note: we store the tool name as __name__ so it can be used by
the class that attaches this to a construction environment.
"""
self.__name__ = name
self.initializer = initializer
def get_builder(self, env):
"""
Returns the appropriate real Builder for this method name
after having the associated ToolInitializer object apply
the appropriate Tool module.
"""
builder = getattr(env, self.__name__)
self.initializer.apply_tools(env)
builder = getattr(env, self.__name__)
if builder is self:
# There was no Builder added, which means no valid Tool
# for this name was found (or possibly there's a mismatch
# between the name we were called by and the Builder name
# added by the Tool module).
return None
self.initializer.remove_methods(env)
return builder
def __call__(self, env, *args, **kw):
"""
"""
builder = self.get_builder(env)
if builder is None:
return [], []
return builder(*args, **kw)
class ToolInitializer(object):
"""
A class for delayed initialization of Tools modules.
Instances of this class associate a list of Tool modules with
a list of Builder method names that will be added by those Tool
modules. As part of instantiating this object for a particular
construction environment, we also add the appropriate
ToolInitializerMethod objects for the various Builder methods
that we want to use to delay Tool searches until necessary.
"""
def __init__(self, env, tools, names):
if not SCons.Util.is_List(tools):
tools = [tools]
if not SCons.Util.is_List(names):
names = [names]
self.env = env
self.tools = tools
self.names = names
self.methods = {}
for name in names:
method = ToolInitializerMethod(name, self)
self.methods[name] = method
env.AddMethod(method)
def remove_methods(self, env):
"""
Removes the methods that were added by the tool initialization
so we no longer copy and re-bind them when the construction
environment gets cloned.
"""
for method in self.methods.values():
env.RemoveMethod(method)
def apply_tools(self, env):
"""
Searches the list of associated Tool modules for one that
exists, and applies that to the construction environment.
"""
for t in self.tools:
tool = SCons.Tool.Tool(t)
if tool.exists(env):
env.Tool(tool)
return
# If we fall through here, there was no tool module found.
# This is where we can put an informative error message
# about the inability to find the tool. We'll start doing
# this as we cut over more pre-defined Builder+Tools to use
# the ToolInitializer class.
def Initializers(env):
ToolInitializer(env, ['install'], ['_InternalInstall', '_InternalInstallAs', '_InternalInstallVersionedLib'])
def Install(self, *args, **kw):
return self._InternalInstall(*args, **kw)
def InstallAs(self, *args, **kw):
return self._InternalInstallAs(*args, **kw)
def InstallVersionedLib(self, *args, **kw):
return self._InternalInstallVersionedLib(*args, **kw)
env.AddMethod(Install)
env.AddMethod(InstallAs)
env.AddMethod(InstallVersionedLib)
def FindTool(tools, env):
for tool in tools:
t = Tool(tool)
if t.exists(env):
return tool
return None
def FindAllTools(tools, env):
def ToolExists(tool, env=env):
return Tool(tool).exists(env)
return list(filter (ToolExists, tools))
def tool_list(platform, env):
other_plat_tools=[]
# XXX this logic about what tool to prefer on which platform
# should be moved into either the platform files or
# the tool files themselves.
# The search orders here are described in the man page. If you
# change these search orders, update the man page as well.
if str(platform) == 'win32':
"prefer Microsoft tools on Windows"
linkers = ['mslink', 'gnulink', 'ilink', 'linkloc', 'ilink32' ]
c_compilers = ['msvc', 'mingw', 'gcc', 'intelc', 'icl', 'icc', 'cc', 'bcc32' ]
cxx_compilers = ['msvc', 'intelc', 'icc', 'g++', 'c++', 'bcc32' ]
assemblers = ['masm', 'nasm', 'gas', '386asm' ]
fortran_compilers = ['gfortran', 'g77', 'ifl', 'cvf', 'f95', 'f90', 'fortran']
ars = ['mslib', 'ar', 'tlib']
other_plat_tools = ['msvs', 'midl']
elif str(platform) == 'os2':
"prefer IBM tools on OS/2"
linkers = ['ilink', 'gnulink', ]#'mslink']
c_compilers = ['icc', 'gcc',]# 'msvc', 'cc']
cxx_compilers = ['icc', 'g++',]# 'msvc', 'c++']
assemblers = ['nasm',]# 'masm', 'gas']
fortran_compilers = ['ifl', 'g77']
ars = ['ar',]# 'mslib']
elif str(platform) == 'irix':
"prefer MIPSPro on IRIX"
linkers = ['sgilink', 'gnulink']
c_compilers = ['sgicc', 'gcc', 'cc']
cxx_compilers = ['sgic++', 'g++', 'c++']
assemblers = ['as', 'gas']
fortran_compilers = ['f95', 'f90', 'f77', 'g77', 'fortran']
ars = ['sgiar']
elif str(platform) == 'sunos':
"prefer Forte tools on SunOS"
linkers = ['sunlink', 'gnulink']
c_compilers = ['suncc', 'gcc', 'cc']
cxx_compilers = ['sunc++', 'g++', 'c++']
assemblers = ['as', 'gas']
fortran_compilers = ['sunf95', 'sunf90', 'sunf77', 'f95', 'f90', 'f77',
'gfortran', 'g77', 'fortran']
ars = ['sunar']
elif str(platform) == 'hpux':
"prefer aCC tools on HP-UX"
linkers = ['hplink', 'gnulink']
c_compilers = ['hpcc', 'gcc', 'cc']
cxx_compilers = ['hpc++', 'g++', 'c++']
assemblers = ['as', 'gas']
fortran_compilers = ['f95', 'f90', 'f77', 'g77', 'fortran']
ars = ['ar']
elif str(platform) == 'aix':
"prefer AIX Visual Age tools on AIX"
linkers = ['aixlink', 'gnulink']
c_compilers = ['aixcc', 'gcc', 'cc']
cxx_compilers = ['aixc++', 'g++', 'c++']
assemblers = ['as', 'gas']
fortran_compilers = ['f95', 'f90', 'aixf77', 'g77', 'fortran']
ars = ['ar']
elif str(platform) == 'darwin':
"prefer GNU tools on Mac OS X, except for some linkers and IBM tools"
linkers = ['applelink', 'gnulink']
c_compilers = ['gcc', 'cc']
cxx_compilers = ['g++', 'c++']
assemblers = ['as']
fortran_compilers = ['gfortran', 'f95', 'f90', 'g77']
ars = ['ar']
elif str(platform) == 'cygwin':
"prefer GNU tools on Cygwin, except for a platform-specific linker"
linkers = ['cyglink', 'mslink', 'ilink']
c_compilers = ['gcc', 'msvc', 'intelc', 'icc', 'cc']
cxx_compilers = ['g++', 'msvc', 'intelc', 'icc', 'c++']
assemblers = ['gas', 'nasm', 'masm']
fortran_compilers = ['gfortran', 'g77', 'ifort', 'ifl', 'f95', 'f90', 'f77']
ars = ['ar', 'mslib']
else:
"prefer GNU tools on all other platforms"
linkers = ['gnulink', 'mslink', 'ilink']
c_compilers = ['gcc', 'msvc', 'intelc', 'icc', 'cc']
cxx_compilers = ['g++', 'msvc', 'intelc', 'icc', 'c++']
assemblers = ['gas', 'nasm', 'masm']
fortran_compilers = ['gfortran', 'g77', 'ifort', 'ifl', 'f95', 'f90', 'f77']
ars = ['ar', 'mslib']
if not str(platform) == 'win32':
other_plat_tools += ['m4', 'rpm']
c_compiler = FindTool(c_compilers, env) or c_compilers[0]
# XXX this logic about what tool provides what should somehow be
# moved into the tool files themselves.
if c_compiler and c_compiler == 'mingw':
# MinGW contains a linker, C compiler, C++ compiler,
# Fortran compiler, archiver and assembler:
cxx_compiler = None
linker = None
assembler = None
fortran_compiler = None
ar = None
else:
# Don't use g++ if the C compiler has built-in C++ support:
if c_compiler in ('msvc', 'intelc', 'icc'):
cxx_compiler = None
else:
cxx_compiler = FindTool(cxx_compilers, env) or cxx_compilers[0]
linker = FindTool(linkers, env) or linkers[0]
assembler = FindTool(assemblers, env) or assemblers[0]
fortran_compiler = FindTool(fortran_compilers, env) or fortran_compilers[0]
ar = FindTool(ars, env) or ars[0]
d_compilers = ['dmd', 'gdc', 'ldc']
d_compiler = FindTool(d_compilers, env) or d_compilers[0]
other_tools = FindAllTools(other_plat_tools + [
#TODO: merge 'install' into 'filesystem' and
# make 'filesystem' the default
'filesystem',
'wix', #'midl', 'msvs',
# Parser generators
'lex', 'yacc',
# Foreign function interface
'rpcgen', 'swig',
# Java
'jar', 'javac', 'javah', 'rmic',
# TeX
'dvipdf', 'dvips', 'gs',
'tex', 'latex', 'pdflatex', 'pdftex',
# Archivers
'tar', 'zip',
# SourceCode factories
'BitKeeper', 'CVS', 'Perforce',
'RCS', 'SCCS', # 'Subversion',
], env)
tools = ([linker, c_compiler, cxx_compiler,
fortran_compiler, assembler, ar, d_compiler]
+ other_tools)
return [x for x in tools if x]
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
IljaGrebel/OpenWrt-SDK-imx6_HummingBoard
|
staging_dir/host/lib/scons-2.3.5/SCons/Tool/__init__.py
|
Python
|
gpl-2.0
| 34,232
|
#!/usr/bin/env python
# **********************************************************************
#
# Copyright (c) 2003-2013 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
import os, sys
path = [ ".", "..", "../..", "../../..", "../../../.." ]
head = os.path.dirname(sys.argv[0])
if len(head) > 0:
path = [os.path.join(head, p) for p in path]
path = [os.path.abspath(p) for p in path if os.path.exists(os.path.join(p, "scripts", "TestUtil.py")) ]
if len(path) == 0:
raise RuntimeError("can't find toplevel directory!")
sys.path.append(os.path.join(path[0], "scripts"))
import TestUtil
router = os.path.join(TestUtil.getCppBinDir(), "glacier2router")
args = ' --Ice.Warn.Dispatch=0' + \
' --Ice.Warn.Connections=0' + \
' --Glacier2.Filter.Category.Accept="c1 c2"' + \
' --Glacier2.Filter.Category.AcceptUser="2"' + \
' --Glacier2.SessionTimeout="30"' + \
' --Glacier2.Client.Endpoints="default -p 12347"' + \
' --Glacier2.Server.Endpoints="tcp -h 127.0.0.1"' \
' --Ice.Admin.Endpoints="tcp -h 127.0.0.1 -p 12348"' + \
' --Ice.Admin.InstanceName=Glacier2' + \
' --Glacier2.CryptPasswords="' + os.path.join(os.getcwd(), "passwords") + '"'
sys.stdout.write("starting router... ")
sys.stdout.flush()
routerConfig = TestUtil.DriverConfig("server")
routerConfig.lang = "cpp"
starterProc = TestUtil.startServer(router, args, count=2, config=routerConfig)
print("ok")
TestUtil.clientServerTest()
TestUtil.clientServerTest(additionalClientOptions=" --shutdown")
starterProc.waitTestSuccess()
|
sbesson/zeroc-ice
|
java/test/Glacier2/router/run.py
|
Python
|
gpl-2.0
| 1,753
|
""" NOTICE: port of 'contrib' Pygments lexer available below:
- https://github.com/miyuchina/mistletoe/tree/master/contrib
"""
from mistletoe import HTMLRenderer
from pygments import highlight
from pygments.styles import get_style_by_name as get_style
from pygments.lexers import get_lexer_by_name as get_lexer, guess_lexer
from pygments.formatters.html import HtmlFormatter
class PygmentsRenderer(HTMLRenderer):
formatter = HtmlFormatter()
formatter.noclasses = True
def __init__(self, *extras, style='colorful'):
super().__init__(*extras)
self.formatter.style = get_style(style)
def render_block_code(self, token):
code = token.children[0].content
lexer = get_lexer(token.language) if token.language else guess_lexer(code)
return highlight(code, lexer, self.formatter)
|
cschmautz/homepage
|
src/app/utils/renderext.py
|
Python
|
gpl-2.0
| 837
|
import io
import os
import time
class liveFile(io.FileIO):
def getTimestamp(self):
return os.path.getmtime(self.path)
def _getContents(self):
self.seek(0)
return super().read().decode()
def __init__(self, name, mode = "r", closefd=True, opener=None):
super().__init__(name, mode, closefd, opener)
self.path = name
self.opener = opener
self.timestamp = self.getTimestamp()
self.contents = self._getContents()
def isCurrent(self):
return (self.getTimestamp() == self.timestamp)
def read(self):
if "r" not in self.mode or not self.isCurrent():
self._refresh("r")
return self.contents
def _refresh(self, mode = "r"):
self.close()
self.__init__(self.name, mode, self.closefd, self.opener)
def write(self, value, overRide=False):
if not self.isCurrent():
if not overRide:
raise IOError("WARNING: File has been modified since last opened. Call with overRide = True to over ride.")
self._refresh("a")
elif "a" not in self.mode:
self._refresh("a")
if type(value) is bytes:
super().write(value)
else:
super().write(value.encode())
|
jessestowe/pyfile
|
pyfile.py
|
Python
|
gpl-2.0
| 1,098
|
# Copyright: Martin Matusiak <numerodix@gmail.com>
from __future__ import absolute_import
import re
class FilepathTransformer(object):
@classmethod
def to_unicode(self, s):
us = s.decode('utf-8', 'ignore')
return us
@classmethod
def from_unicode(self, us):
s = us.encode('utf-8', 'ignore')
return s
@classmethod
def by_regex(cls, rx_from, rx_to, s):
return re.sub(rx_from, rx_to, s)
@classmethod
def capitalize(cls, s):
us = cls.to_unicode(s)
cap = lambda m: m.group(1).upper() + m.group(2).lower()
us = re.sub("(?u)(?<![0-9\w'])(\w)([\w']*)", cap, us)
s = cls.from_unicode(us)
return s
@classmethod
def make_lowercase(cls, s):
us = cls.to_unicode(s)
tolower = lambda m: m.group(1).lower()
us = re.sub('(?u)([\w]*)', tolower, us)
s = cls.from_unicode(us)
return s
@classmethod
def make_spaces_underscores(cls, s):
s = re.sub(' ', '_', s)
return s
@classmethod
def do_trim(cls, s):
# check endpoints
s = cls.by_regex('^([ ]|-)*', '', s)
s = cls.by_regex('([ ]|-)*$', '', s)
return s
@classmethod
def make_neat(cls, s):
# too many hyphens and underscores
s = cls.by_regex('_{2,}', '-', s)
s = cls.by_regex('-{2,}', '-', s)
s = cls.by_regex('-[ ]+-', '-', s)
# junk-y chars past the start of the string
s = cls.by_regex('(?<!^)\.', ' ', s)
s = cls.by_regex('_', ' ', s)
s = cls.by_regex('#', ' ', s)
s = cls.by_regex(':', ' ', s)
# let's have spaces around hyphen
s = cls.by_regex('(?<!\s)-', ' -', s)
s = cls.by_regex('-(?!\s)', '- ', s)
s = cls.by_regex('(?<!\s)[+]', ' +', s)
s = cls.by_regex('[+](?!\s)', '+ ', s)
# empty brackets
s = cls.by_regex('\[ *?\]', ' ', s)
s = cls.by_regex('\( *?\)', ' ', s)
# normalize spaces
s = cls.by_regex('[ ]{2,}', ' ', s)
s = cls.do_trim(s)
return s
@classmethod
def make_neater(cls, s):
# bracket-y junk
s = cls.by_regex('\[.*?\]', ' ', s)
s = cls.by_regex('\(.*?\)', ' ', s)
s = cls.do_trim(s)
return s
|
numerodix/nametrans
|
nametrans/filepathtrans.py
|
Python
|
gpl-2.0
| 2,302
|
#!/usr/bin/python
from select import epoll, EPOLLHUP, EPOLLERR, EPOLLIN, EPOLLOUT, EPOLLET
class EpollReactor(object):
EV_DISCONNECTED = (EPOLLHUP | EPOLLERR)
EV_IN = EPOLLIN# | EPOLLET
EV_OUT = EPOLLOUT# | EPOLLET
def __init__(self):
self._poller = epoll()
def poll(self, timeout):
return self._poller.poll(timeout)
def register(self, fd, mode):
self._poller.register(fd, mode)
def unregister(self, fd):
self._poller.unregister(fd)
def modify(self, fd, mode):
self._poller.modify(fd, mode)
|
hfutsuchao/Python2.6
|
fastpy/fastpy/跨平台/reactor/epollreactor.py
|
Python
|
gpl-2.0
| 591
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/tkralidi/work/foss4g/MetaSearch/MetaSearch/plugin/MetaSearch/ui/recorddialog.ui'
#
# Created: Thu Mar 20 21:56:35 2014
# by: PyQt4 UI code generator 4.9.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_RecordDialog(object):
def setupUi(self, RecordDialog):
RecordDialog.setObjectName(_fromUtf8("RecordDialog"))
RecordDialog.resize(600, 400)
self.verticalLayout = QtGui.QVBoxLayout(RecordDialog)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.textMetadata = QtGui.QTextBrowser(RecordDialog)
self.textMetadata.setLineWrapMode(QtGui.QTextEdit.NoWrap)
self.textMetadata.setOpenExternalLinks(True)
self.textMetadata.setObjectName(_fromUtf8("textMetadata"))
self.verticalLayout.addWidget(self.textMetadata)
self.buttonBox = QtGui.QDialogButtonBox(RecordDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Close)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.verticalLayout.addWidget(self.buttonBox)
self.retranslateUi(RecordDialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), RecordDialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), RecordDialog.reject)
QtCore.QMetaObject.connectSlotsByName(RecordDialog)
def retranslateUi(self, RecordDialog):
RecordDialog.setWindowTitle(QtGui.QApplication.translate("RecordDialog", "Record Metadata", None, QtGui.QApplication.UnicodeUTF8))
|
luca76/QGIS
|
python/plugins/MetaSearch/ui/recorddialog.py
|
Python
|
gpl-2.0
| 1,849
|
from cli import *
def del_file_cdrom_cmd(obj):
name = obj.name
try:
SIM_delete_object(obj)
print "File CD-ROM object '%s' deleted." % name
except Exception, msg:
print "Failed deleting file CD-ROM object '%s': %s" % (name, msg)
new_command("delete", del_file_cdrom_cmd,
args = [],
type = ["CD-ROM"],
namespace = "file-cdrom",
short = "delete an unused file-cdrom object",
doc = """
Delete an unused file-cdrom object with the name <i>object-name</i>.
""", filename="/mp/simics-3.0/src/extensions/file-cdrom/commands.py", linenumber="11")
|
iniverno/RnR-LLC
|
simics-3.0-install/simics-3.0.31/amd64-linux/lib/python/mod_file_cdrom_commands.py
|
Python
|
gpl-2.0
| 630
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-03-02 04:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('listings', '0006_auto_20160228_2238'),
]
operations = [
migrations.AlterField(
model_name='listing',
name='price',
field=models.IntegerField(blank=True, default=0, null=True),
),
]
|
NilsJPWerner/Sublet-Uchicago
|
listings/migrations/0007_auto_20160302_0425.py
|
Python
|
gpl-2.0
| 473
|
## This file is part of CDS Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 CERN.
##
## CDS Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## CDS Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""CDS Invenio Access Control Config. """
__revision__ = \
"$Id$"
# pylint: disable=C0301
from invenio.config import CFG_SITE_NAME, CFG_SITE_URL, CFG_SITE_LANG, \
CFG_SITE_SECURE_URL, CFG_SITE_SUPPORT_EMAIL, CFG_CERN_SITE
from invenio.messages import gettext_set_language
class InvenioWebAccessFireroleError(Exception):
"""Just an Exception to discover if it's a FireRole problem"""
pass
# VALUES TO BE EXPORTED
# CURRENTLY USED BY THE FILES access_control_engine.py access_control_admin.py webaccessadmin_lib.py
# name of the role giving superadmin rights
SUPERADMINROLE = 'superadmin'
# name of the webaccess webadmin role
WEBACCESSADMINROLE = 'webaccessadmin'
# name of the action allowing roles to access the web administrator interface
WEBACCESSACTION = 'cfgwebaccess'
# name of the action allowing roles to access the web administrator interface
VIEWRESTRCOLL = 'viewrestrcoll'
# name of the action allowing roles to delegate the rights to other roles
# ex: libraryadmin to delegate libraryworker
DELEGATEADDUSERROLE = 'accdelegaterole'
# max number of users to display in the drop down selects
MAXSELECTUSERS = 25
# max number of users to display in a page (mainly for user area)
MAXPAGEUSERS = 25
# default role definition, source:
CFG_ACC_EMPTY_ROLE_DEFINITION_SRC = 'deny all'
# default role definition, compiled:
CFG_ACC_EMPTY_ROLE_DEFINITION_OBJ = (False, False, ())
# default role definition, compiled and serialized:
CFG_ACC_EMPTY_ROLE_DEFINITION_SER = None
# List of tags containing (multiple) emails of users who should authorize
# to access the corresponding record regardless of collection restrictions.
if CFG_CERN_SITE:
CFG_ACC_GRANT_AUTHOR_RIGHTS_TO_EMAILS_IN_TAGS = ['859__f', '270__m']
else:
CFG_ACC_GRANT_AUTHOR_RIGHTS_TO_EMAILS_IN_TAGS = ['8560_f']
# Use external source for access control?
# Atleast one must be added
# Adviced not to change the name, since it is used to identify the account
# Format is: System name: (System class, Default True/Flase), atleast one
# must be default
CFG_EXTERNAL_AUTHENTICATION = {"Local" : (None, True)}
# Variables to set to the SSO Authentication name if using SSO
CFG_EXTERNAL_AUTH_USING_SSO = False
CFG_EXTERNAL_AUTH_LOGOUT_SSO = None
if CFG_CERN_SITE:
if True:
import external_authentication_sso as ea_sso
CFG_EXTERNAL_AUTH_USING_SSO = "CERN"
# Link to reach in order to logout from SSO
CFG_EXTERNAL_AUTH_LOGOUT_SSO = 'https://login.cern.ch/adfs/ls/?wa=wsignout1.0'
CFG_EXTERNAL_AUTHENTICATION = {CFG_EXTERNAL_AUTH_USING_SSO : (ea_sso.ExternalAuthSSO(), True)}
else:
import external_authentication_cern as ea_cern
CFG_EXTERNAL_AUTHENTICATION = {"Local": (None, False), \
"CERN": (ea_cern.ExternalAuthCern(), True)}
# default data for the add_default_settings function
# Note: by default the definition is set to deny any. This won't be a problem
# because userid directly connected with roles will still be allowed.
# roles
# name description definition
DEF_ROLES = ((SUPERADMINROLE, 'superuser with all rights', 'deny any'),
(WEBACCESSADMINROLE, 'WebAccess administrator', 'deny any'),
('anyuser', 'Any user', 'allow any'),
('basketusers', 'Users who can use baskets', 'allow any'),
('loanusers', 'Users who can use loans', 'allow any'),
('groupusers', 'Users who can use groups', 'allow any'),
('alertusers', 'Users who can use alerts', 'allow any'),
('messageusers', 'Users who can use messages', 'allow any'),
('holdingsusers', 'Users who can view holdings', 'allow any'),
('statisticsusers', 'Users who can view statistics', 'allow any'))
# Demo site roles
DEF_DEMO_ROLES = (('photocurator', 'Photo collection curator', 'deny any'),
('thesesviewer', 'Theses viewer', 'allow group "Theses viewers"\nallow apache_group "theses"'),
('thesescurator', 'Theses collection curator', 'deny any'),
('referee_DEMOBOO_*', 'Book collection curator', 'deny any'),
('restrictedpicturesviewer', 'Restricted pictures viewer', 'deny any'),
('curator', 'Curator', 'deny any'),
('basketusers', 'Users who can use baskets', 'deny email "hyde@cds.cern.ch"\nallow any'),
('submit_DEMOJRN_*', 'Users who can submit (and modify) "Atlantis Times" articles', 'deny all'),
('atlantiseditor', 'Users who can configure "Atlantis Times" journal', 'deny all'),
('commentmoderator', 'Users who can moderate comments', 'deny all'),
('poetrycommentreader', 'Users who can view comments in Poetry collection', 'deny all'))
DEF_DEMO_USER_ROLES = (('jekyll@cds.cern.ch', 'thesesviewer'),
('dorian.gray@cds.cern.ch', 'referee_DEMOBOO_*'),
('balthasar.montague@cds.cern.ch', 'curator'),
('romeo.montague@cds.cern.ch', 'restrictedpicturesviewer'),
('romeo.montague@cds.cern.ch', 'thesescurator'),
('juliet.capulet@cds.cern.ch', 'restrictedpicturesviewer'),
('juliet.capulet@cds.cern.ch', 'photocurator'),
('romeo.montague@cds.cern.ch', 'submit_DEMOJRN_*'),
('juliet.capulet@cds.cern.ch', 'submit_DEMOJRN_*'),
('balthasar.montague@cds.cern.ch', 'atlantiseditor'),
('romeo.montague@cds.cern.ch', 'poetrycommentreader'))
# users
# list of e-mail addresses
DEF_USERS = []
# actions
# name desc allowedkeywords optional
DEF_ACTIONS = (
('cfgwebsearch', 'configure WebSearch', '', 'no'),
('cfgbibformat', 'configure BibFormat', '', 'no'),
('cfgbibknowledge', 'configure BibKnowledge', '', 'no'),
('cfgwebsubmit', 'configure WebSubmit', '', 'no'),
('cfgbibrank', 'configure BibRank', '', 'no'),
('cfgwebcomment', 'configure WebComment', '', 'no'),
('cfgoaiharvest', 'configure OAI Harvest', '', 'no'),
('cfgoairepository', 'configure OAI Repository', '', 'no'),
('cfgbibindex', 'configure BibIndex', '', 'no'),
('cfgbibexport', 'configure BibExport', '', 'no'),
('runbibindex', 'run BibIndex', '', 'no'),
('runbibupload', 'run BibUpload', '', 'no'),
('runwebcoll', 'run webcoll', 'collection', 'yes'),
('runbibformat', 'run BibFormat', 'format', 'yes'),
('runbibclassify', 'run BibClassify', 'taxonomy', 'yes'),
('runbibtaskex', 'run BibTaskEx example', '', 'no'),
('runbibrank', 'run BibRank', '', 'no'),
('runoaiharvest', 'run oaiharvest task', '', 'no'),
('runoairepository', 'run oairepositoryupdater task', '', 'no'),
('runbibedit', 'run Record Editor', 'collection', 'yes'),
('runbibeditmulti', 'run Multi-Record Editor', '', 'no'),
('runbibdocfile', 'run Document File Manager', '', 'no'),
('runbibmerge', 'run Record Merger', '', 'no'),
('runwebstatadmin', 'run WebStadAdmin', '', 'no'),
('runinveniogc', 'run InvenioGC', '', 'no'),
('runbibexport', 'run BibExport', '', 'no'),
('referee', 'referee document type doctype/category categ', 'doctype,categ', 'yes'),
('submit', 'use webSubmit', 'doctype,act,categ', 'yes'),
('viewrestrdoc', 'view restricted document', 'status', 'no'),
('viewrestrcomment', 'view restricted comment', 'status', 'no'),
(WEBACCESSACTION, 'configure WebAccess', '', 'no'),
(DELEGATEADDUSERROLE, 'delegate subroles inside WebAccess', 'role', 'no'),
(VIEWRESTRCOLL, 'view restricted collection', 'collection', 'no'),
('cfgwebjournal', 'configure WebJournal', 'name,with_editor_rights', 'no'),
('viewcomment', 'view comments', 'collection', 'no'),
('sendcomment', 'send comments', 'collection', 'no'),
('attachcommentfile', 'attach files to comments', 'collection', 'no'),
('attachsubmissionfile', 'upload files to drop box during submission', '', 'no'),
('cfgbibexport', 'configure BibExport', '', 'no'),
('runbibexport', 'run BibExport', '', 'no'),
('usebaskets', 'use baskets', '', 'no'),
('useloans', 'use loans', '', 'no'),
('usegroups', 'use groups', '', 'no'),
('usealerts', 'use alerts', '', 'no'),
('usemessages', 'use messages', '', 'no'),
('viewholdings', 'view holdings', 'collection', 'yes'),
('viewstatistics', 'view statistics', 'collection', 'yes'),
('runbibcirculation', 'run BibCirculation', '', 'no'),
('moderatecomments', 'moderate comments', 'collection', 'no'),
('runbatchuploader', 'run batchuploader', 'collection', 'yes')
)
# Default authorizations
# role action arguments
DEF_AUTHS = (('basketusers', 'usebaskets', {}),
('loanusers', 'useloans', {}),
('groupusers', 'usegroups', {}),
('alertusers', 'usealerts', {}),
('messageusers', 'usemessages', {}),
('holdingsusers', 'viewholdings', {}),
('statisticsusers', 'viewstatistics', {}))
# Demo site authorizations
# role action arguments
DEF_DEMO_AUTHS = (
('photocurator', 'runwebcoll', {'collection': 'Pictures'}),
('restrictedpicturesviewer', 'viewrestrdoc', {'status': 'restricted_picture'}),
('thesesviewer', VIEWRESTRCOLL, {'collection': 'Theses'}),
('referee_DEMOBOO_*', 'referee', {'doctype': 'DEMOBOO', 'categ': '*'}),
('curator', 'cfgbibknowledge', {}),
('curator', 'runbibedit', {}),
('curator', 'runbibeditmulti', {}),
('curator', 'runbibmerge', {}),
('thesescurator', 'runbibedit', {'collection': 'Theses'}),
('thesescurator', VIEWRESTRCOLL, {'collection': 'Theses'}),
('photocurator', 'runbibedit', {'collection': 'Pictures'}),
('referee_DEMOBOO_*', 'runbibedit', {'collection': 'Books'}),
('submit_DEMOJRN_*', 'submit', {'doctype': 'DEMOJRN', 'act': 'SBI', 'categ': '*'}),
('submit_DEMOJRN_*', 'submit', {'doctype': 'DEMOJRN', 'act': 'MBI', 'categ': '*'}),
('submit_DEMOJRN_*', 'cfgwebjournal', {'name': 'AtlantisTimes', 'with_editor_rights': 'no'}),
('atlantiseditor', 'cfgwebjournal', {'name': 'AtlantisTimes', 'with_editor_rights': 'yes'}),
('referee_DEMOBOO_*', 'runbatchuploader', {'collection': 'Books'}),
('poetrycommentreader', 'viewcomment', {'collection': 'Poetry'})
)
_ = gettext_set_language(CFG_SITE_LANG)
# Activities (i.e. actions) for which exists an administrative web interface.
CFG_ACC_ACTIVITIES_URLS = {
'runbibedit' : (_("Run Record Editor"), "%s/record/edit/?ln=%%s" % CFG_SITE_URL),
'runbibeditmulti' : (_("Run Multi-Record Editor"), "%s/record/multiedit/?ln=%%s" % CFG_SITE_URL),
'runbibdocfile' : (_("Run Document File Manager"), "%s/submit/managedocfiles?ln=%%s" % CFG_SITE_URL),
'runbibmerge' : (_("Run Record Merger"), "%s/record/merge/?ln=%%s" % CFG_SITE_URL),
'cfgbibknowledge' : (_("Configure BibKnowledge"), "%s/kb?ln=%%s" % CFG_SITE_URL),
'cfgbibformat' : (_("Configure BibFormat"), "%s/admin/bibformat/bibformatadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgoaiharvest' : (_("Configure OAI Harvest"), "%s/admin/bibharvest/oaiharvestadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgoairepository' : (_("Configure OAI Repository"), "%s/admin/bibharvest/oairepositoryadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgbibindex' : (_("Configure BibIndex"), "%s/admin/bibindex/bibindexadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgbibrank' : (_("Configure BibRank"), "%s/admin/bibrank/bibrankadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgwebaccess' : (_("Configure WebAccess"), "%s/admin/webaccess/webaccessadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgwebcomment' : (_("Configure WebComment"), "%s/admin/webcomment/webcommentadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgwebsearch' : (_("Configure WebSearch"), "%s/admin/websearch/websearchadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgwebsubmit' : (_("Configure WebSubmit"), "%s/admin/websubmit/websubmitadmin.py?ln=%%s" % CFG_SITE_URL),
'cfgwebjournal' : (_("Configure WebJournal"), "%s/admin/webjournal/webjournaladmin.py?ln=%%s" % CFG_SITE_URL),
'runbibcirculation' : (_("Run BibCirculation"), "%s/admin/bibcirculation/bibcirculationadmin.py?ln=%%s" % CFG_SITE_URL),
'runbatchuploader' : (_("Run Batch Uploader"), "%s/batchuploader/metadata?ln=%%s" % CFG_SITE_URL)
}
CFG_WEBACCESS_MSGS = {
0: 'Try to <a href="%s/youraccount/login?referer=%%s">login</a> with another account.' % (CFG_SITE_SECURE_URL),
1: '<br />If you think this is not correct, please contact: <a href="mailto:%s">%s</a>' % (CFG_SITE_SUPPORT_EMAIL, CFG_SITE_SUPPORT_EMAIL),
2: '<br />If you have any questions, please write to <a href="mailto:%s">%s</a>' % (CFG_SITE_SUPPORT_EMAIL, CFG_SITE_SUPPORT_EMAIL),
3: 'Guest users are not allowed, please <a href="%s/youraccount/login">login</a>.' % CFG_SITE_SECURE_URL,
4: 'The site is temporarily closed for maintenance. Please come back soon.',
5: 'Authorization failure',
6: '%s temporarily closed' % CFG_SITE_NAME,
7: 'This functionality is temporarily closed due to server maintenance. Please use only the search engine in the meantime.',
8: 'Functionality temporarily closed'
}
CFG_WEBACCESS_WARNING_MSGS = {
0: 'Authorization granted',
1: 'You are not authorized to perform this action.',
2: 'You are not authorized to perform any action.',
3: 'The action %s does not exist.',
4: 'Unexpected error occurred.',
5: 'Missing mandatory keyword argument(s) for this action.',
6: 'Guest accounts are not authorized to perform this action.',
7: 'Not enough arguments, user ID and action name required.',
8: 'Incorrect keyword argument(s) for this action.',
9: """Account '%s' is not yet activated.""",
10: """You were not authorized by the authentication method '%s'.""",
11: """The selected login method '%s' is not the default method for this account, please try another one.""",
12: """Selected login method '%s' does not exist.""",
13: """Could not register '%s' account.""",
14: """Could not login using '%s', because this user is unknown.""",
15: """Could not login using your '%s' account, because you have introduced a wrong password.""",
16: """External authentication troubles using '%s' (maybe temporary network problems).""",
17: """You have not yet confirmed the email address for the '%s' authentication method.""",
18: """The administrator has not yet activated your account for the '%s' authentication method.""",
19: """The site is having troubles in sending you an email for confirming your email address. The error has been logged and will be taken care of as soon as possible.""",
20: """No roles are authorized to perform action %s with the given parameters."""
}
|
lbjay/cds-invenio
|
modules/webaccess/lib/access_control_config.py
|
Python
|
gpl-2.0
| 17,296
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import users.models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('password', models.CharField(verbose_name='password', max_length=128)),
('last_login', models.DateTimeField(verbose_name='last login', null=True, blank=True)),
('username', models.CharField(unique=True, max_length=32)),
('email', models.EmailField(unique=True, max_length=254)),
('name', models.CharField(max_length=50)),
('description', models.TextField(default='')),
('is_active', models.BooleanField(default=False)),
('confirm_token', models.CharField(max_length=40, default=users.models.create_hash)),
('password_token', models.CharField(max_length=40, default=users.models.create_hash)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'abstract': False,
},
),
]
|
7Pros/circuit
|
users/migrations/0001_initial.py
|
Python
|
gpl-2.0
| 1,392
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 18 00:00:32 2014
@author: Trent
"""
import numpy as np
from scipy.spatial.distance import pdist, squareform
import sys, csv
def getdata(inf, delimiter=','):
'''read data, calc distances from all pts to all other pts,
convert to symmetric square matrix'''
data = np.genfromtxt(inf, dtype=float, delimiter=delimiter)
distance = squareform(pdist(data))
'''upper triangle iterator, offset by +1 to ignore
the diagonal of 0 distances from a point to itself'''
uti = np.triu_indices_from(distance,1)
'''conversion to standard list type -- faster performance in
writing to file (? needs to be verified or updated)'''
i = list(uti[0])
j = list(uti[1])
data = list(distance[uti])
return i, j, data
def write_to_file(i, j, data, of):
sz = len(i)
one_percent = int(0.01*sz)
pct_complete = 0
with open(of, 'wb') as csvfile:
w = csv.writer(csvfile, delimiter='\t')
sys.stdout.write("\r%i%% complete" % pct_complete)
for x in xrange(sz):
w.writerow([str(i[x]) , str(j[x]) , str(data[x])])
if (x%one_percent) == 0 and pct_complete <100:
pct_complete +=1
sys.stdout.flush()
sys.stdout.write("\r%i%% complete" % pct_complete)
def main():
if len(sys.argv) != 3:
print("Two arguments expected <data filename> <output filename>".format(sys.argv[0]))
sys.exit(1)
datapath = sys.argv[1]
of = sys.argv[2]
i, j, data = getdata(datapath, delimiter='')
write_to_file(i, j, data, of)
if __name__ == "__main__":
main()
|
tweber225/fast-dp
|
distance_convert.py
|
Python
|
gpl-2.0
| 1,712
|
import re
from django import forms
from django.db.models import OneToOneField, ForeignKey
from django.db.models.query import QuerySet
from edc_base.form.classes import LogicCheck
from edc_constants.constants import YES, NO, OTHER, NOT_APPLICABLE
from edc_visit_tracking.models import VisitModelMixin
class BaseModelForm(forms.ModelForm):
visit_model = None
optional_attrs = {}
def __init__(self, *args, **kwargs):
super(BaseModelForm, self).__init__(*args, **kwargs)
self.logic = LogicCheck(self._meta.model)
def get_subject_identifier(self, cleaned_data):
subject_identifier = None
if 'subject_identifier' in cleaned_data:
subject_identifier = cleaned_data.get('subject_identifier')
if not subject_identifier:
if 'registered_subject' in cleaned_data:
subject_identifier = cleaned_data.get('registered_subject').subject_identifier
if not subject_identifier:
# look for a visit model field
for field in self._meta.model._meta.fields:
if isinstance(field, (OneToOneField, ForeignKey)):
if isinstance(field.rel.to, VisitModelMixin):
attrname = field.attrname
visit = cleaned_data.get(attrname, None)
if visit:
subject_identifier = visit.get_subject_identifier()
return subject_identifier
def clean(self):
"""Calls crypto clean methods, OTHER/Specify and some functionality for bhp_dispatch."""
cleaned_data = super(BaseModelForm, self).clean()
self.encrypted_fields_validation()
self.other_fields()
self.check_for_other_in_m2m()
return cleaned_data
def other_fields(self):
cleaned_data = self.cleaned_data
# check for OTHER in choices tuples, if selected, should have a value on Other specify.
other = []
for fld in cleaned_data.iterkeys():
if cleaned_data.get(fld, None) in ['other', 'Other', OTHER]:
other.append(fld)
for fld in other:
if '{0}_other'.format(fld) in cleaned_data:
if not cleaned_data.get('{0}_other'.format(fld)):
raise forms.ValidationError(
'If {0} is \'OTHER\', please specify. '
'You wrote \'{1}\''.format(fld, cleaned_data['{0}_other'.format(fld)]))
def encrypted_fields_validation(self):
"""Encrypted fields may have their own validation code to run."""
# TODO: is this used??
cleaned_data = self.cleaned_data
try:
from edc_base.encrypted_fields import BaseEncryptedField
for field in self._meta.model._meta.fields:
if isinstance(field, BaseEncryptedField):
field.validate_with_cleaned_data(field.attname, cleaned_data)
except ImportError:
pass
def check_for_other_in_m2m(self):
"""Raises ValidtionError for an m2m if it cannot confirm \'Other Specify\'
is paired with a value in a following \'other\' field."""
cleaned_data = self.cleaned_data
for field_name, field_value in cleaned_data.iteritems():
self.check_for_value_in_m2m(
cleaned_data, field_name, field_value,
'{0}_other'.format(field_name),
['specify', 'explain'],
'other')
def check_for_value_in_m2m(self, cleaned_data, m2m_name, m2m_qs, optional_field_name,
optional_words, required_word=None):
"""Raises ValidtionError for an m2m if it cannot confirm \'Other Specify\'
is paired with a value in a following \'other\' field."""
if not required_word:
required_word = ''
if isinstance(m2m_qs, QuerySet):
try:
answers = [l.name.lower() for l in cleaned_data.get(m2m_name, [])]
except AttributeError:
answers = [l.short_name.lower() for l in cleaned_data.get(m2m_name, [])]
if answers:
for ans in answers:
if any([word in ans for word in optional_words]) and required_word in ans.lower():
if not cleaned_data.get(optional_field_name, None):
raise forms.ValidationError(
'You have selected \'{0}\' as an answer for {1}. '
'Please specify in {2}.'.format(ans, m2m_name, optional_field_name))
# TODO: is this method used??
def validate_m2m(self, **kwargs):
"""Validates at form level a triplet of questions lead by a Yes/No for a
many to many with other specify.
* The first question is a Yes/No question indicating if any items
in the many to many will be selected
* The second question is a many to many (select all that apply)
* The third is an 'Other Specify' to be completed if an 'Other'
item was selected in the many to many question
Be sure to check cleaned_data for the 'key' of the m2m field first.
For example, in the ModelForm clean() method call::
if cleaned_data.has_key('chronic'):
self.validate_m2m(
label = 'chronic condition',
yesno = cleaned_data['chronic_since'],
m2m = cleaned_data['chronic'],
other = cleaned_data['chronic_other'])
"""
label = kwargs.get('label', 'items to be selected')
leading = kwargs.get('leading')
m2m = kwargs.get('m2m')
other = kwargs.get('other')
# if leading question is 'Yes', a m2m item cannot be 'Not applicable'
if leading == YES and [True for item in m2m if item.name == NOT_APPLICABLE]:
raise forms.ValidationError(
"You stated there ARE " + label + "s, yet you selected '{0}'".format(item.name))
# if leading question is 'No', ensure the m2m item is 'not applicable'
if leading == NO and not [True for item in m2m if item.name == NOT_APPLICABLE]:
raise forms.ValidationError("You stated there are NO {0}s. Please correct".format(label))
# if leading question is 'No', ensure only one m2m item is selected.
if leading == NO and len(m2m) > 1:
raise forms.ValidationError("You stated there are NO {0}s. Please correct".format(label))
# if leading question is 'Yes' and an m2m item is 'other, specify', ensure 'other' attribute has a value
if leading == YES and not other and [True for item in m2m if 'other' in item.name.lower()]:
raise forms.ValidationError("You have selected a '{0}' as 'Other', please specify.".format(label))
# if 'other' has a value but no m2m item is 'Other, specify'
if other and not [True for item in m2m if 'other' in item.name.lower()]:
raise forms.ValidationError(
'You have specified an \'Other\' {0} but not selected \'Other, specify\'. '
'Please correct.'.format(label))
def validate_m2m_wcs_dx(self, **kwargs):
label = kwargs.get('label', 'items to be selected')
leading = kwargs.get('leading')
m2m = kwargs.get('m2m')
other = kwargs.get('other')
# if leading question is 'Yes', a m2m item cannot be 'Not applicable'
if (leading == YES and [
True for item in m2m if (item.short_name == NOT_APPLICABLE or
item.short_name.lower() == 'asymptomatic')]):
raise forms.ValidationError(
"You stated there ARE " + label + "s, yet you selected '{0}'".format(item.short_name))
# if leading question is 'No', ensure the m2m item is 'not applicable'
if (leading == NO and not [
True for item in m2m if (item.short_name == NOT_APPLICABLE or
item.short_name.lower() == 'asymptomatic')]):
raise forms.ValidationError("You stated there are NO {0}s. Please correct".format(label))
# if leading question is 'No', ensure only one m2m item is selected.
if leading == NO and len(m2m) > 1:
raise forms.ValidationError("You stated there are NO {0}s. Please correct".format(label))
# if leading question is 'Yes' and an m2m item is 'other, specify', ensure 'other' attribute has a value
if leading == YES and not other and [True for item in m2m if 'other' in item.short_name.lower()]:
raise forms.ValidationError("You have selected a '{0}' as 'Other', please specify.".format(label))
# if 'other' has a value but no m2m item is 'Other, specify'
if other and not [True for item in m2m if 'other' in item.short_name.lower()]:
raise forms.ValidationError(
'You have specified an \'Other\' {0} but not selected \'Other, specify\'. '
'Please correct.'.format(label))
def options_in_tuple(self, choices, options):
"""Confirms options exist in choices tuple."""
if not isinstance(choices, tuple):
raise TypeError('Parameter \'choices\' must be a tuple.')
if not isinstance(options, list):
raise TypeError('Parameter \'options\' must be a list.')
choices = list(choices)
choices.sort()
lst = list(set(list(options) + choices))
lst.sort()
if not lst == choices:
raise TypeError(
'Options {0} are not in choices tuple {1}. Has the choices tuple changed?'.format(
options, choices))
def verify_tuples(self):
"""Override to verify tuple values referenced in the valifation checks.
For example, validation checks refer to values from these choices::
options = ['Yes', 'No', 'not sure', 'Don\'t want to answer']
self.options_in_tuple(YES_NO_UNSURE, options)
self.options_in_tuple(FIRSTPARTNERHIV_CHOICE, ['negative', 'I am not sure'])
"""
pass
def _validate_cleaned_data(self, cleaned_data, supress_exception=None):
self.validate_cleaned_data(cleaned_data)
if self.get_validation_error() and not supress_exception:
for message in self.get_validation_error().itervalues():
raise forms.ValidationError(message)
return self.get_validation_error()
def validate_cleaned_data(self, cleaned_data, suppress_exception=None):
"""Override to add validation code in a manner that is easier to test.
Instead of adding validation code to the clean() method, add it
to this method. Then in your tests do something like this::
...
print 'test maternal visit'
form = MaternalVisitForm()
self.assertEquals(form._validate_cleaned_data({}), None)
self.assertRaises(
ValidationError, form._validate_cleaned_data,
{'reason': 'missed', 'reason_missed': None})
self.assertIsNotNone(
form._validate_cleaned_data({
'reason': 'missed', 'reason_missed': None}, supress_exception=True).get(1, None))
...
.. note:: in your test call :fun:`_validate_cleaned_data` instead of :fun:`validate_cleaned_data`
Since :func:`clean` is called in super, there is no need to override it nor for this method
to return cleaned_data. So instead of this, in the clean method::
if cleaned_data.get('reason') == 'missed' and not cleaned_data.get('reason_missed'):
raise forms.ValidationError('Please provide the reason the scheduled visit was missed')
... do this in :func:`validate_cleaned_data`::
if cleaned_data.get('reason') == 'missed' and not cleaned_data.get('reason_missed'):
self.set_validation_error({1: 'an error has occurred'})
... then in the test, inspect the return value::
self.assertIsNotNone(
form._validate_cleaned_data({
'reason': 'missed', 'reason_missed': None}, supress_exception=True).get(1, None))
"""
pass
@property
def number_from_label(self):
"""Returns the question number from the label, or None."""
try:
number = re.match('^\\d+\\.', self.fields['week32_test_date'].label).string.split('.')
except AttributeError:
number = ['']
return number[0]
|
botswana-harvard/microbiome
|
microbiome/apps/mb/base_model_form.py
|
Python
|
gpl-2.0
| 12,711
|
#!/usr/bin/env python
###############################################################################
#
# Project: GDAL/OGR Test Suite
# Purpose: Test DB2 vector driver
#
# Author: David Adler <dadler@adtechgeospatial.com>
#
###############################################################################
# Copyright (c) 2015, David Adler <dadler@adtechgeospatial.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
###############################################################################
# Before this test is run with a real database connection,
# set DB2_TEST_SERVER to point to the server and table to be used, like:
# DB2_TEST_SERVER=Database=SAMP105;DSN=SAMP105A;tables=TEST.ZIPPOINT
# or
# DB2_TEST_SERVER=Database=SAMP105;Driver={IBM DB2 CLIDRIVER};Hostname=<>;Port=<>;PROTOCOL=TCPIP;UID=<>;PWD=<>;tables=TEST.ZIPPOINT
#
# Also before running, the db2 setup script must be run to create the
# needed SRS and test tables
# In a DB2 command window, connect to a database and issue a command like
# db2 -tvf ogr\data\db2\db2_setup.sql
#
# These tests currently only run on Windows
import os
import sys
sys.path.append( '../pymod' )
import gdaltest
import ogrtest
from osgeo import ogr
###############################################################################
# Test if driver is available
def ogr_db2_check_driver():
ogrtest.db2_drv = None
try:
ogrtest.db2_drv = ogr.GetDriverByName('DB2ODBC')
except:
pass
if ogrtest.db2_drv is None:
return 'skip'
return 'success'
###############################################################################
# Test if environment variable for DB2 connection is set and we can connect
def ogr_db2_init():
if ogrtest.db2_drv is None:
return 'skip'
if 'DB2_TEST_SERVER' in os.environ:
ogrtest.db2_test_server = "DB2ODBC:" + os.environ['DB2_TEST_SERVER']
else:
gdaltest.post_reason('Environment variable DB2_TEST_SERVER not found')
ogrtest.db2_drv = None
return 'skip'
return 'success'
###############################################################################
# Test GetFeatureCount()
def ogr_db2_GetFeatureCount():
if ogrtest.db2_drv is None:
return 'skip'
ds = ogr.Open(ogrtest.db2_test_server)
if ds is None:
return 'fail'
lyr = ds.GetLayer(0)
if lyr is None:
return 'fail'
count = lyr.GetFeatureCount()
if count != 5:
gdaltest.post_reason('did not get expected feature count')
return 'fail'
return 'success'
###############################################################################
# Test GetSpatialRef()
def ogr_db2_GetSpatialRef():
if ogrtest.db2_drv is None:
return 'skip'
ds = ogr.Open(ogrtest.db2_test_server)
if ds is None:
return 'fail'
lyr = ds.GetLayer(0)
if lyr is None:
return 'fail'
sr = lyr.GetSpatialRef()
if sr is None:
gdaltest.post_reason('did not get expected srs')
return 'fail'
txt = sr.ExportToWkt()
if txt.find('GEOGCS[\"GCS_WGS_1984') == -1:
gdaltest.post_reason('did not get expected srs')
print(txt)
return 'fail'
return 'success'
###############################################################################
# Test GetExtent()
def ogr_db2_GetExtent():
if ogrtest.db2_drv is None:
return 'skip'
ds = ogr.Open(ogrtest.db2_test_server)
if ds is None:
return 'fail'
lyr = ds.GetLayer(0)
if lyr is None:
return 'fail'
extent = lyr.GetExtent()
if extent is None:
gdaltest.post_reason('did not get extent')
return 'fail'
if extent != (-122.030745, -121.95672, 37.278665, 37.440885):
gdaltest.post_reason('did not get expected extent')
print(extent)
return 'fail'
return 'success'
###############################################################################
# Test GetFeature()
def ogr_db2_GetFeature():
if ogrtest.db2_drv is None:
return 'skip'
ds = ogr.Open(ogrtest.db2_test_server)
if ds is None:
return 'fail'
lyr = ds.GetLayer(0)
if lyr is None:
return 'fail'
feat = lyr.GetFeature(5)
if feat is None:
gdaltest.post_reason('did not get a feature')
return 'fail'
if feat.GetField('ZIP') != '95008':
gdaltest.post_reason('did not get expected feature')
feat.DumpReadable()
return 'fail'
return 'success'
###############################################################################
# Test SetSpatialFilter()
def ogr_db2_SetSpatialFilter():
if ogrtest.db2_drv is None:
return 'skip'
ds = ogr.Open(ogrtest.db2_test_server)
if ds is None:
return 'fail'
lyr = ds.GetLayer(0)
if lyr is None:
return 'fail'
# set a query envelope so we only get one feature
lyr.SetSpatialFilterRect( -122.02, 37.42, -122.01, 37.43 )
count = lyr.GetFeatureCount()
if count != 1:
gdaltest.post_reason('did not get expected feature count (1)')
print(count)
return 'fail'
feat = lyr.GetNextFeature()
if feat is None:
gdaltest.post_reason('did not get a feature')
return 'fail'
if feat.GetField('ZIP') != '94089':
gdaltest.post_reason('did not get expected feature')
feat.DumpReadable()
return 'fail'
# start over with a larger envelope to get 3 out of 5 of the points
lyr.ResetReading()
lyr.SetSpatialFilterRect( -122.04, 37.30, -121.80, 37.43 )
count = lyr.GetFeatureCount()
if count != 3:
gdaltest.post_reason('did not get expected feature count (3)')
print(count)
return 'fail'
# iterate through the features to make sure we get the same count
count = 0
feat = lyr.GetNextFeature()
while feat is not None:
count = count + 1
feat = lyr.GetNextFeature()
if count != 3:
gdaltest.post_reason('did not get expected feature count (3)')
print(count)
return 'fail'
return 'success'
#
# test what capabilities the DB2 driver provides
#
def ogr_db2_capabilities():
if ogrtest.db2_drv is None:
return 'skip'
ds = ogr.Open(ogrtest.db2_test_server)
if ds is None:
return 'fail'
layer = ds.GetLayer()
capabilities = [
ogr.OLCRandomRead,
ogr.OLCSequentialWrite,
ogr.OLCRandomWrite,
ogr.OLCFastSpatialFilter,
ogr.OLCFastFeatureCount,
ogr.OLCFastGetExtent,
ogr.OLCCreateField,
ogr.OLCDeleteField,
ogr.OLCReorderFields,
ogr.OLCAlterFieldDefn,
ogr.OLCTransactions,
ogr.OLCDeleteFeature,
ogr.OLCFastSetNextByIndex,
ogr.OLCStringsAsUTF8,
ogr.OLCIgnoreFields
]
print("Layer Capabilities:")
for cap in capabilities:
print(" %s = %s" % (cap, layer.TestCapability(cap)))
return 'success'
def ogr_db2_listdrivers():
cnt = ogr.GetDriverCount()
formatsList = [] # Empty List
for i in range(cnt):
driver = ogr.GetDriver(i)
driverName = driver.GetName()
# print driverName
if driverName not in formatsList:
formatsList.append(driverName)
formatsList.sort() # Sorting the messy list of ogr drivers
for i in formatsList:
print(i)
return 'success'
gdaltest_list = [
ogr_db2_check_driver,
ogr_db2_init,
# ogr_db2_listdrivers,
ogr_db2_GetSpatialRef,
ogr_db2_GetExtent,
ogr_db2_GetFeature,
ogr_db2_SetSpatialFilter,
ogr_db2_capabilities,
ogr_db2_GetFeatureCount
]
if __name__ == '__main__':
gdaltest.setup_run( 'ogr_db2' )
if os.name == 'nt':
gdaltest.run_tests( gdaltest_list )
else:
print("These tests only run on Windows")
gdaltest.summarize()
|
nextgis-extra/tests
|
lib_gdal/ogr/ogr_db2.py
|
Python
|
gpl-2.0
| 8,591
|
#!/usr/bin/env python
from django.contrib import admin
from .models import Office, Document, DocumentFile
admin.site.register(Office)
admin.site.register(Document)
admin.site.register(DocumentFile)
|
PatchRanger/declarator-test-task
|
office/admin.py
|
Python
|
gpl-2.0
| 199
|
#!/usr/bin/python
# -*- coding: iso-8859-15 -*-
# Práctica Final: Raquel Galán Montes
"""
Clase (y programa principal) para un servidor SIP
"""
import SocketServer
import socket
import sys
import os
import time
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
class XMLHandler(ContentHandler):
"""
Clase para leer de un fichero de configuracion XML
"""
def __init__(self):
"""
Constructor
"""
self.dic = {}
self.etiq = {"account", "uaserver", "rtpaudio", "regproxy", "log", "audio"}
self.atrib = {
"account": ["username","passwd"],
"uaserver": ["ip", "puerto"],
"rtpaudio": ["puerto"],
"regproxy": ["ip", "puerto"],
"log": ["path"],
"audio": ["path"]
}
def startElement(self, name, attrs):
"""
A cada elemento le une su etiqueta con sus atributos
"""
if name in self.etiq:
for atributo in self.atrib[name]:
element = name + "_" + atributo
self.dic[element] = attrs.get(atributo, "")
def get_tags(self):
"""
Devuelve elementos del diccionario
"""
return self.dic
class Log():
"""
Escribe lo que ocurre en el UA
"""
def __init__(self, fich):
self.fich = fich
def FicheroXML(self, evento, datos, ip, port):
"""
Se forma cada linea separada con espacios para cada caso
"""
fich_log = open(self.fich, "a")
t = time.strftime("%Y%m%d%H%M%S", time.gmtime(time.time()))
line = datos.split()
line = " ".join(line)
if evento != " Starting..." and evento != " Finishing.":
linea_log = t + evento + ip + ":" + str(port) + ": "
linea_log += line + '\r\n'
fich_log.write(linea_log)
elif evento == "Error":
linea_log = t + evento + '\r\n'
fich_log.write(linea_log)
else:
linea_log = t + evento + '\r\n'
fich_log.write(linea_log)
fich_log.close()
class SIPHandler(SocketServer.DatagramRequestHandler):
"""
SIP server class
"""
def handle(self):
#Tipos de respuestas:
Not_Allowed = "SIP/2.0 405 Method Not Allowed\r\n\r\n"
Bad = "SIP/2.0 400 Bad Request\r\n\r\n"
while 1:
# Leyendo línea a línea lo que nos envía el cliente
line = self.rfile.read()
ip = self.client_address[0]
pto = self.client_address[1]
if line != "":
lista = line.split()
username = lista[1]
sip_username = username.split(":")[0]
version_sip = lista[2]
if sip_username == "sip" and version_sip == "SIP/2.0":
print "El proxy nos manda: " + line
metodo = line.split(" ")[0]
#log
dic = cHandler.get_tags()
LOG_PATH = dic["log_path"]
log = Log(LOG_PATH)
log.FicheroXML(" Received from ", line, REGPROXY_IP, REGPROXY_PTO)
if metodo == "INVITE":
print "Se ha recibido: " + line + "\r\n"
print "Comienza INVITE"
# Se forma line con 100, 180, 200
line = "SIP/2.0 100 Trying\r\n\r\n"
line += "SIP/2.0 180 Ringing\r\n\r\n"
line += "SIP/2.0 200 OK\r\n\r\n"
cabecera = line + "Content-Type: application/sdp\r\n\r\n"
cabecera += "v=0\r\n" + "o=" + USERNAME + " "
cabecera += UASERVER_IP + "\r\n" + "s=misesion\r\n"
cabecera += "t=0\r\n" + "m=audio " + str(RTPAUDIO_PTO)
cabecera += " RTP\r\n\r\n"
print "Envio: " + cabecera
self.wfile.write(cabecera)
log.FicheroXML(" Sent to ", cabecera, REGPROXY_IP, REGPROXY_PTO)
print "Acaba INVITE"
elif metodo == "ACK":
print "Se recibe ACK y empieza RTP"
#Se recibe ACK y empieza RTP
os.system("chmod 777 mp32rtp")
aEjecutar = "./mp32rtp -i " + REGPROXY_IP + " -p "
aEjecutar += str(RTPAUDIO_PTO) + " < " + AUDIO_PATH
print "Vamos a ejecutar", aEjecutar
os.system(aEjecutar)
print "ip", ip
print "uaserver_ip", UASERVER_IP
print "pto", pto
log.FicheroXML(" Envio RTP ", " ", " ", " ")
print "Fin RTP: Audio enviado>>>>>>>"
print "Acaba RTP"
elif metodo == "BYE":
print "Comienza BYE"
line = "SIP/2.0 200 OK\r\n\r\n"
self.wfile.write(line)
print "responde al BYE " + line
log.FicheroXML(" Sent to ", line, ip, pto)
log.FicheroXML(" Finishing. ", " ", " ", " ")
print "Acaba BYE"
else:
# Aqui no se llegaria porque en el cliente lo para
print "metodo incorrecto"
line = Not_Allowed
self.wfile.write(line)
log.FicheroXML(" Sent to ", line, ip, pto)
raise SystemExit
else:
# Aqui no se llegaria porque en el cliente lo para
print "peticion mal formada"
line = Bad
self.wfile.write(line)
#log.FicheroXML(" Sent to ", line, ip, pto)
raise SystemExit
# Si no hay más líneas salimos del bucle infinito
else:
break
if __name__ == "__main__":
dic_rtp = {}
try:
# Verificar argumento
#fichero xml
CONFIG = sys.argv[1]
# Sacamos datos del xml
parser = make_parser()
cHandler = XMLHandler()
parser.setContentHandler(cHandler)
parser.parse(open(CONFIG))
dic = cHandler.get_tags()
# os.path.exists solo devuelve True si hay un fichero con ese nombre
if os.path.exists(CONFIG) is False:
print "no existe ese nombre de fichero XML"
raise SystemExit
# Para ver si no existe el nombre del archivo de audio
AUDIO_PATH = dic["audio_path"]
if not os.path.exists(AUDIO_PATH):
print "no existe ningun archivo de audio con ese nombre"
raise SystemExit
# Nombrar elementos del fichero XML
USERNAME = dic["account_username"]
UASERVER_IP = dic["uaserver_ip"]
UASERVER_PTO = int(dic["uaserver_puerto"])
RTPAUDIO_PTO = int(dic["rtpaudio_puerto"])
REGPROXY_IP = dic["regproxy_ip"]
REGPROXY_PTO = int(dic["regproxy_puerto"])
# Creamos servidor y escuchamos
serv = SocketServer.UDPServer((UASERVER_IP, UASERVER_PTO), SIPHandler)
print "Listening..."
serv.serve_forever()
except:
print ("Usage: python uaserver.py config")
raise SystemExit
|
raquelgalan/ptavi-pfinal
|
uaserver.py
|
Python
|
gpl-2.0
| 8,062
|