code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
from pprint import pprint
from django import forms
from base.forms.custom import O2BaseForm
from base.forms.fields import (
O2FieldCorForm,
O2FieldModeloForm,
O2FieldRefForm,
)
class InfAdProdForm(forms.Form):
pedido = forms.CharField(
label='Pedido',
widget=forms.TextInput(attrs={'type': 'number',
'autofocus': 'autofocus'}))
class RemessaIndustrBaseForm(forms.Form):
data_de = forms.DateField(
label='NF Remessa - Data inicial', required=False,
widget=forms.DateInput(attrs={'type': 'date'}))
data_ate = forms.DateField(
label='NF Remessa - Data final', required=False,
widget=forms.DateInput(attrs={'type': 'date'}))
faccao = forms.CharField(
label='Facção', required=False,
help_text='Busca no nome e no CNPJ da facção',
widget=forms.TextInput(attrs={'type': 'string'}))
cliente = forms.CharField(
label='Cliente', required=False,
help_text='Busca no nome e no CNPJ do cliente',
widget=forms.TextInput(attrs={'type': 'string'}))
pedido = forms.CharField(
label='Pedido Tussor', required=False,
widget=forms.TextInput(attrs={'type': 'number'}))
pedido_cliente = forms.CharField(
label='Pedido de cliente', required=False,
widget=forms.TextInput(attrs={'type': 'string'}))
op = forms.CharField(
label='OP', required=False,
widget=forms.TextInput(attrs={'type': 'number'}))
CHOICES = [('T', 'Todas as remessas'),
('S', 'Só remessas Sem retorno'),
('C', 'Só remessas Com retorno'),
]
retorno = forms.ChoiceField(
label='Retorno', choices=CHOICES, initial='T')
data_ret_de = forms.DateField(
label='NF Retorno - Data inicial', required=False,
widget=forms.DateInput(attrs={'type': 'date'}))
data_ret_ate = forms.DateField(
label='NF Retorno - Data final', required=False,
widget=forms.DateInput(attrs={'type': 'date'}))
nf_ret = forms.CharField(
label='NF Retorno', required=False,
widget=forms.TextInput(attrs={'type': 'number'}))
nf = forms.CharField(
label='NF Remessa', required=False,
widget=forms.TextInput(attrs={'type': 'number'}))
def clean_faccao(self):
faccao = self.cleaned_data['faccao'].upper()
data = self.data.copy()
data['faccao'] = faccao
self.data = data
return faccao
def clean_cliente(self):
cliente = self.cleaned_data['cliente'].upper()
data = self.data.copy()
data['cliente'] = cliente
self.data = data
return cliente
class RemessaIndustrNFForm(RemessaIndustrBaseForm):
CHOICES = [('T', 'Todas as remessas'),
('A', 'Ativa'),
('C', 'Canceladas'),
('D', 'Devolvidas'),
]
situacao = forms.ChoiceField(
label='Situação', choices=CHOICES, initial='A')
CHOICES = [('I', 'Por item de NF de remessa'),
('1', 'Por item de nível 1 de NF de remessa'),
('R', 'Por referência de nível 1 de NF de remessa'),
('N', 'Por NF de remessa'),
]
detalhe = forms.ChoiceField(
label='Detalhamento', choices=CHOICES, initial='N')
class RemessaIndustrForm(RemessaIndustrBaseForm):
CHOICES = [('C', 'Apenas por cor'),
('T', 'Por cor e tamanho'),
]
detalhe = forms.ChoiceField(
label='Detalhe', choices=CHOICES, initial='C')
class NotaFiscalForm(forms.Form):
nf = forms.CharField(
label='Nota fiscal',
widget=forms.TextInput(attrs={'type': 'number',
'autofocus': 'autofocus'}))
class buscaNFForm(
O2BaseForm,
O2FieldCorForm,
O2FieldModeloForm,
O2FieldRefForm,
):
pagina = forms.IntegerField(
required=False, widget=forms.HiddenInput())
class Meta:
autofocus_field = 'ref'
order_fields = [
'ref',
'modelo',
'cor',
]
def clean(self):
filtros = (
self.cleaned_data['ref'] +
self.cleaned_data['cor']
)
if len(filtros.strip()) == 0 and self.cleaned_data['modelo'] is None:
raise forms.ValidationError(
"Algum filtro deve ser definido.")
class UploadArquivoForm(forms.Form):
arquivo = forms.FileField()
| anselmobd/fo2 | src/contabil/forms.py | Python | mit | 4,534 |
from pprint import pprint
import lotes.models
def where_ende_disponivel(campo):
filter_local = ""
end_disp = list(lotes.models.EnderecoDisponivel.objects.filter(disponivel=True).values())
if len(end_disp) != 0:
filter_end = """--
AND l.local ~ '^("""
filter_sep = ""
for regra in end_disp:
filter_end += f"{filter_sep}{regra['inicio']}"
filter_sep = "|"
filter_local += filter_end + """).*'
"""
end_indisp = list(lotes.models.EnderecoDisponivel.objects.filter(disponivel=False).values())
if len(end_indisp) != 0:
filter_end = """--
AND l.local !~ '("""
filter_sep = ""
for regra in end_indisp:
filter_end += f"{filter_sep}{regra['inicio']}"
filter_sep = "|"
filter_local += filter_end + """)'
"""
return filter_local
| anselmobd/fo2 | src/cd/queries/functions.py | Python | mit | 898 |
from dataclasses import dataclass
import io
import typing as T
import numpy as np
@dataclass(frozen=True)
class PolygonShape:
vertices: T.Tuple[T.Tuple[float]]
color: T.Text
@dataclass(frozen=True)
class Body:
x: float
y: float
angle: float
shapes: T.Tuple[PolygonShape]
@dataclass(frozen=True)
class Scene:
bodies: T.Tuple[Body]
bounds: T.Tuple[float] # left, right, top, bottom
width: int
def draw_shape(shape, out):
out.write('<path fill="{fill}" d="'.format(fill=shape.color))
dx, dy = shape.vertices[0]
out.write('M {} {}'.format(dx, dy))
for (dx, dy) in shape.vertices[1:]:
out.write(' L {} {}'.format(dx, dy))
out.write('"/>')
def draw_body(body, out):
out.write('<g transform="translate({x},{y}) rotate({angle})">'.format(
x=body.x, y=body.y, angle=body.angle * 180/np.pi,
))
for shape in body.shapes:
draw_shape(shape, out)
out.write('</g>')
def draw_scene(scene, out):
xmin, xmax, ymin, ymax = scene.bounds
height = (ymax-ymin)/(xmax-xmin) * scene.width
out.write('<svg viewBox="{viewbox}" width="{width}" height="{height}">'.format(
viewbox='{} {} {} {}'.format(xmin, ymin, xmax-xmin, ymax-ymin),
width=scene.width, height=height))
out.write('<g transform="scale(1,-1) translate(0, {dy})">'.format(dy=-(ymax+ymin)))
for body in scene.bodies:
draw_body(body, out)
out.write('</g></svg>')
def draw(scene):
out = io.StringIO()
draw_scene(scene, out)
return out.getvalue()
| DouglasOrr/Snippets | hover/hover/render.py | Python | mit | 1,552 |
""" progressbar2 related utils"""
from codekit.codetools import warn
from public import public
from time import sleep
import progressbar
import functools
@public
def setup_logging(verbosity=0):
"""Configure progressbar sys.stderr wrapper which is required to play nice
with logging and not have strange formatting artifacts.
"""
progressbar.streams.wrap_stderr()
@public
def countdown_timer(seconds=10):
"""Show a simple countdown progress bar
Parameters
----------
seconds
Period of time the progress bar takes to reach zero.
"""
tick = 0.1 # seconds
n_ticks = int(seconds / tick)
widgets = ['Pause for panic: ', progressbar.ETA(), ' ', progressbar.Bar()]
pbar = progressbar.ProgressBar(
widgets=widgets, max_value=n_ticks
).start()
for i in range(n_ticks):
pbar.update(i)
sleep(tick)
pbar.finish()
@public
def wait_for_user_panic(**kwargs):
"""Display a scary message and count down progresss bar so an interative
user a chance to panic and kill the program.
Parameters
----------
kwargs
Passed verbatim to countdown_timer()
"""
warn('Now is the time to panic and Ctrl-C')
countdown_timer(**kwargs)
@public
@functools.lru_cache()
def wait_for_user_panic_once(**kwargs):
"""Same functionality as wait_for_user_panic() but will only display a
countdown once, reguardless of how many times it is called.
Parameters
----------
kwargs
Passed verbatim to wait_for_user_panic()
"""
wait_for_user_panic(**kwargs)
@public
def eta_bar(msg, max_value):
"""Display an adaptive ETA / countdown bar with a message.
Parameters
----------
msg: str
Message to prefix countdown bar line with
max_value: max_value
The max number of progress bar steps/updates
"""
widgets = [
"{msg}:".format(msg=msg),
progressbar.Bar(), ' ', progressbar.AdaptiveETA(),
]
return progressbar.ProgressBar(widgets=widgets, max_value=max_value)
| lsst-sqre/sqre-codekit | codekit/progressbar.py | Python | mit | 2,070 |
import os
import shutil
def pre_read(dir):
try:
filelist = os.listdir(dir)
for file in filelist:
if os.path.isdir(file):
# exception directory
if str(file) == 'webMager' or str(file) == '_site':
continue
pre_read(file)
else:
# read *_main.maxroot file
ext = str(file)
if ext.endswith('_main.maxroot'):
# load...
a=1
except PermissionError:
pass
os.chdir('..')
root_path = os.path.abspath(os.curdir)
result_path = os.path.join(root_path, '_site')
t
| MaximumRoot/MaxRootWeb | webMaker/maxrootweb.py | Python | mit | 718 |
#!/usr/bin/env python
from sys import argv, stderr
usage = \
"""
Usage: {program} <sample rate> <A4 freq.> [octaves=8]
e.g.: {program} 64000 442.0 5
""".format(program=argv[0])
if len(argv) < 3 or len(argv) > 4 :
print(usage, file = stderr)
exit(1)
A4 = 0
sample_rate = 0
octaves = 8
try:
A4 = float(argv[2])
except:
print("Error, invalid argument: Freq. must be a number!", file = stderr)
print(usage, file = stderr)
exit(1)
try:
sample_rate = int(argv[1])
except:
print("Error, invalid argument: Sample rate must be an integer!", \
file = stderr)
print(usage, file = stderr)
exit(1)
if len(argv) == 4 :
try:
octaves = int(argv[3])
except:
print("Error, invalid argument: Octaves must be an integer!", \
file = stderr)
print(usage, file = stderr)
exit(1)
freq_ratio = 2**(1/12)
base_freq = A4/(freq_ratio**57)
periods = [round(sample_rate/(2*base_freq*freq_ratio**t)) \
for t in range(0, 12*octaves)]
print("uint16_t tone_periods[{ntones}] = {{".format(ntones=12*octaves))
for o in range(0, octaves):
print('\t', end='')
for i in range(0, 12):
print("{period}, ".format(period=periods[12*o+i]), end='')
print('')
print("};")
| bardes/sonitus | tools/tone_gen.py | Python | mit | 1,271 |
from __future__ import print_function
__author__ = 'breddels'
# import astropy.vo.samp as sampy
import platform
import vaex.utils
import sys
import threading
import vaex.export
import vaex.utils
import vaex.promise
import vaex.settings
import vaex.remote
import psutil
from vaex.parallelize import parallelize
from vaex.ui.plot_windows import PlotDialog
import vaex.ui.columns
import vaex.ui.variables
import vaex.ui.qt as dialogs
import astropy.units
# py2/p3 compatibility
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
import vaex as vx
# from PySide import QtGui, QtCore
from vaex.ui.qt import *
from vaex.ui.table import *
from vaex.samp import Samp
# help py2app, it was missing this import
try: # in Pyinstaller this doesn't work, and we can get away with not setting this, total mystery
import sip
sip.setapi('QVariant', 2)
sip.setapi('QString', 2)
except:
pass
darwin = "darwin" in platform.system().lower()
frozen = getattr(sys, 'frozen', False)
# print "DEFAULT ENCODING is: %s"%(sys.getdefaultencoding())
# print "FILE SYSTEM ENCODING is: %s"%(sys.getfilesystemencoding())
# if darwin:
if sys.getfilesystemencoding() == None: # TODO: why does this happen in pyinstaller?
def getfilesystemencoding_wrapper():
return "UTF-8"
sys.getfilesystemencoding = getfilesystemencoding_wrapper
# on osx 10.8 we sometimes get pipe errors while printing, ignore these
# signal.signal(signal.SIGPIPE, signal.SIG_DFL)
try:
import pdb
import astropy.io.fits
# pdb.set_trace()
except Exception as e:
print(e)
pdb.set_trace()
import vaex.ui.plot_windows as vp
from vaex.ui.ranking import *
import vaex.ui.undo
import vaex.kld
import vaex.utils
import vaex.dataset
# import subspacefind
# import ctypes
import imp
import logging
logger = logging.getLogger("vaex")
# import locale
# locale.setlocale(locale.LC_ALL, )
# samp stuff
# import astropy.io.votable
custom = None
custompath = path = os.path.expanduser('~/.vaex/custom.py')
# print path
if os.path.exists(path):
customModule = imp.load_source('vaex.custom', path)
# custom = customModule.Custom()
else:
custom = None
logger.debug("%s does not exist" % path)
# print "root path is", vaex.utils.get_root_path()
if getattr(sys, 'frozen', False):
application_path = os.path.dirname(sys.executable)
elif __file__:
application_path = os.path.dirname(__file__)
if not frozen: # astropy not working :s
pass
# import pdb
# pdb.set_trace()
# fix from Chris Beaumont
# import astropy.logger
# astropy.logger.log.disable_warnings_logging()
__import__("astropy.io.votable")
# for osx
if "darwin" in platform.system().lower():
application_path = os.path.abspath(".")
def error(title, msg):
print("Error", title, msg)
from vaex.dataset import *
possibleFractions = [10**base * f for base in [-3, -2, -1, 0] for f in [0.25, 0.5, 0.75, 1.]]
possibleFractions.insert(0, 10**-4)
# print possibleFractions
class DatasetSelector(QtGui.QListWidget):
def __init__(self, parent):
super(DatasetSelector, self).__init__(parent)
# self.icon = QtGui.QIcon('icons/png/24x24/devices/memory.png')
# self.icon_server = QtGui.QIcon('icons/png/24x24/devices/memory.png')
self.icon = QtGui.QIcon(vp.iconfile('drive'))
self.icon_server = QtGui.QIcon(vp.iconfile('server-network'))
self.icon_memory = QtGui.QIcon(vp.iconfile('memory'))
self.datasets = []
self.signal_pick = vaex.events.Signal("pick")
self.signal_add_dataset = vaex.events.Signal("add dataset")
self.signal_add_dataset.connect(self.on_add_dataset)
self.signal_dataset_select = vaex.events.Signal("dataset-select")
self.currentItemChanged.connect(self.onDatasetSelected)
# self.items
def onDatasetSelected(self, data_item, previous):
if data_item is not None:
data = data_item.data(QtCore.Qt.UserRole)
if hasattr(data, "toPyObject"):
dataset = data.toPyObject()
self.signal_dataset_select.emit(dataset)
else:
self.signal_dataset_select.emit(data)
def on_add_dataset(self, dataset):
# print "added dataset", dataset
self.datasets.append(dataset)
dataset.signal_pick.connect(self.on_pick)
def on_pick(self, dataset, row):
# broadcast
logger.debug("broadcast pick")
self.signal_pick.emit(dataset, row)
def setBestFraction(self, dataset):
return
Nmax = 1000 * 1000 * 10
for fraction in possibleFractions[::-1]:
N = len(dataset)
if N > Nmax:
dataset.set_active_fraction(fraction)
logger.debug("set best fraction for dataset %r to %r" % (dataset, fraction))
else:
break
def is_empty(self):
return len(self.datasets) == 0
def open(self, path, **kwargs):
ds = vaex.open(path, **kwargs)
return self.add(ds)
def add(self, dataset):
self.setBestFraction(dataset)
item = QtGui.QListWidgetItem(self)
item.setText(dataset.name)
icon = self.icon
if hasattr(dataset, "filename"):
item.setToolTip("file: " + dataset.filename)
if isinstance(dataset, vaex.remote.DataFrameRemote):
icon = self.icon_server
item.setToolTip("source: " + dataset.path)
if isinstance(dataset, vaex.dataset.DatasetArrays):
icon = self.icon_memory
item.setIcon(icon)
# TODO: this hangs on pyside 1.2.1, linux
item.setData(QtCore.Qt.UserRole, dataset)
self.setCurrentItem(item)
self.signal_add_dataset.emit(dataset)
return dataset
class Worker(QtCore.QThread):
def __init__(self, parent, name, func, *args, **kwargs):
QtCore.QThread.__init__(self, parent=None)
self.func = func
self.args = args
self.kwargs = kwargs
self.name = name
self.signal = QtCore.SIGNAL("signal")
def run(self):
time.sleep(0.1)
print("in thread", self.currentThreadId())
self.result = self.func(*self.args, **self.kwargs)
print("result:", self.result)
# self.emit(self.signal, self.result)
# self.exec_()
def MyStats(object):
def __init__(self, data):
self.data = data
def __call___(self, args):
print(args)
# stat_name, column_name = args
# print "do", stat_name, "on", column_name
return 1
# f = stats[stat_name]
# return column_name, stat_name, f(self.data.columns[column_name])
# stats = {"minimum": lambda x: str(np.nanmin(x)), "maximum": lambda x: str(np.nanmax(x)), "mean": lambda x: str(np.mean(x)), "std": lambda x: str(np.std(x)), "median": lambda x: str(np.median(x))}
stats = {"minimum": lambda x: str(np.nanmin(x)), "maximum": lambda x: str(np.nanmax(x)), "mean": lambda x: str(np.mean(x)), "std": lambda x: str(np.std(x))}
def statsrun(args):
columns, stat_name, column_name = args
f = stats[stat_name]
# print args
return 1
class StatWorker(QtCore.QThread):
def __init__(self, parent, data):
QtCore.QThread.__init__(self, parent=parent)
self.data = data
def run(self):
time.sleep(0.1)
print("in thread", self.currentThreadId())
jobs = [(stat_name, column_name) for stat_name in list(stats.keys()) for column_name in list(self.data.columns.keys())]
@parallelize(cores=QtCore.QThread.idealThreadCount())
def dostats(args, data=self.data):
stat_name, column_name = args
columns = data.columns
f = stats[stat_name]
result = f(columns[column_name][slice(*data.current_slice)])
print(result)
return result
values = dostats(jobs)
self.results = {}
for job, value in zip(jobs, values):
stat_name, column_name = job
if stat_name not in self.results:
self.results[stat_name] = {}
self.results[stat_name][column_name] = value
print("results", self.results)
class StatisticsDialog(QtGui.QDialog):
def __init__(self, parent, data):
super(StatisticsDialog, self).__init__(parent)
self.data = data
# self.form_layout = QtGui.QFormLayout()
# self.min = QtGui.QLabel('...computing...', self)
# self.form_layout.addRow('Minimum:', self.min)
# self.setLayout(self.form_layout)
self.boxlist = QtGui.QHBoxLayout(self)
self.headers = ['minimum', 'maximum', 'mean', 'std']
# WorkerMinimum = lambda parent, data, column_name: Worker(parent, 'minimum', lambda data, column_name: str(min(data.columns[column_name])), data=data, column_name=column_name)
# WorkerMaximum = lambda parent, data, column_name: Worker(parent, 'maximum', lambda data, column_name: str(max(data.columns[column_name])), data=data, column_name=column_name)
# self.workers = {'minimum':WorkerMinimum, 'maximum': WorkerMaximum}
self.table = QtGui.QTableWidget(data.nColumns, len(self.headers), self)
self.table.setHorizontalHeaderLabels(self.headers)
self.table.setVerticalHeaderLabels(list(self.data.columns.keys()))
# pool = multiprocessing.Pool() #processes=QtCore.QThread.idealThreadCount())
# print "jobs:", jobs
worker = StatWorker(self, self.data)
def onFinish(worker=worker):
for column, stat in enumerate(self.headers):
for row, column_name in enumerate(self.data.columns.keys()):
value = worker.results[stat][column_name]
item = QtGui.QTableWidgetItem(value)
self.table.setItem(row, column, item)
worker.finished.connect(onFinish)
worker.start()
# for name in self.header:
# for column_name in self.data.colums.keys():
# self.table.set
# worker.finished.connect(onFinish)
if 0:
self.worker_list = [] # keep references
def onFinish():
for column, stat in enumerate(self.headers):
for row, column_name in enumerate(self.data.columns.keys()):
value = worker.results[stat][column_name]
item = QtGui.QTableWidgetItem(worker.result)
self.table.setItem(row, column, item)
for column, stat in enumerate(self.headers):
for row, column_name in enumerate(self.data.columns.keys()):
worker = self.workers[stat](parent, data, column_name)
def onFinish(worker=worker, row=row, column=column):
print("finished running", worker.result)
item = QtGui.QTableWidgetItem(worker.result)
self.table.setItem(row, column, item)
worker.finished.connect(onFinish)
print("starting", row, column)
worker.start(QtCore.QThread.IdlePriority)
self.worker_list.append(worker) # keeps reference to avoid GC
self.boxlist.addWidget(self.table)
self.setLayout(self.boxlist)
if 0:
# w1 = Worker(self, lambda data: str(min(data.columns.items()[0])), self.data)
self.w1 = Worker(self, self.test, self.data)
# self.connect(self.w1, self.w1.signal, self.setmin)
def setmin():
print(self.min.setText(self.w1.result))
self.w1.finished.connect(setmin)
self.w1.start()
def test(self, data):
print("test")
data = list(data.columns.values())[0]
return str(min(data))
# return "test"
def onFinish(self, worker):
print("worker", worker)
# print "setting", result
# self.min = str
class TextEdit(QtGui.QTextEdit):
doubleClicked = QtCore.pyqtSignal(object)
def mouseDoubleClickEvent(self, event):
self.doubleClicked.emit(event)
class DatasetPanel(QtGui.QFrame):
def __init__(self, parent, dataset_list):
super(DatasetPanel, self).__init__(parent)
self.dataset = None
self.column_changed_handler = None
self.active_fraction_changed_handler = None
self.dataset_list = dataset_list
self.app = parent
self.undoManager = vaex.ui.undo.UndoManager()
self.form_layout = QtGui.QFormLayout()
self.name = QtGui.QLabel('', self)
self.form_layout.addRow('Name:', self.name)
self.label_columns = QtGui.QLabel('', self)
self.form_layout.addRow('Columns:', self.label_columns)
self.label_length = QtGui.QLabel('', self)
self.form_layout.addRow('Length:', self.label_length)
if 0:
self.button_variables = QtGui.QPushButton('Variables', self)
self.form_layout.addRow('', self.button_variables)
self.fractionLabel = QtGui.QLabel("Use:...")
self.fractionWidget = QtGui.QWidget(self)
self.fractionLayout = QtGui.QHBoxLayout(self.fractionWidget)
self.fractionSlider = QtGui.QSlider(QtCore.Qt.Horizontal, self)
self.fractionSlider.setMinimum(0)
self.fractionSlider.setMaximum(len(possibleFractions) - 1)
# self.numberLabel = QtGui.QLabel('')
self.fractionLayout.addWidget(self.fractionSlider)
# self.fractionLayout.addWidget(self.numberLabel)
self.fractionWidget.setLayout(self.fractionLayout)
# self.fractionSlider.setTickInterval(len(possibleFractions))
self.form_layout.addRow(self.fractionLabel, self.fractionWidget)
self.auto_fraction_label = QtGui.QLabel("Display", parent)
self.auto_fraction_checkbox = QtGui.QCheckBox("Let server determine how much to display", parent)
self.form_layout.addRow(self.auto_fraction_label, self.auto_fraction_checkbox)
def on_change(state):
checked = state == QtCore.Qt.Checked
self.dataset.set_auto_fraction(checked)
self.fractionSlider.setEnabled(not self.dataset.get_auto_fraction())
self.auto_fraction_checkbox.stateChanged.connect(on_change)
self.fractionSlider.sliderReleased.connect(self.onFractionSet)
self.fractionSlider.valueChanged.connect(self.onValueChanged)
self.onValueChanged(0)
self.button_suggesions = QtGui.QToolButton(self)
self.button_suggesions.setText('Suggestions')
self.button_suggesions.setIcon(QtGui.QIcon(vp.iconfile('light-bulb')))
self.button_suggesions.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)
self.button_suggesions.setPopupMode(QtGui.QToolButton.InstantPopup)
self.menu_common = QtGui.QMenu()
self.button_suggesions.setMenu(self.menu_common)
self.form_layout.addRow('Suggestions:', self.button_suggesions)
# self.histogramButton = QtGui.QPushButton('histogram (1d)', self)
self.button_histogram = QtGui.QToolButton(self)
self.button_histogram.setText('histogram (1d)')
self.button_histogram.setIcon(QtGui.QIcon(vp.iconfile('layout')))
self.button_histogram.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)
self.form_layout.addRow('Plotting:', self.button_histogram)
self.menu_1d = QtGui.QMenu(self)
self.button_histogram.setMenu(self.menu_1d)
self.button_2d = QtGui.QToolButton(self)
self.button_2d.setIcon(QtGui.QIcon(vp.iconfile('layout-2-equal')))
self.button_2d.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)
self.button_2d.setText('x/y density')
self.form_layout.addRow('', self.button_2d)
self.menu_2d = QtGui.QMenu(self)
self.button_2d.setMenu(self.menu_2d)
self.button_3d = QtGui.QToolButton(self)
self.button_3d.setIcon(QtGui.QIcon(vp.iconfile('layout-3')))
self.button_3d.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)
self.button_3d.setText('x/y/z density')
self.form_layout.addRow('', self.button_3d)
self.menu_3d = QtGui.QMenu(self)
self.button_3d.setMenu(self.menu_3d)
if 0:
self.scatter1dSeries = QtGui.QPushButton('series', self)
self.form_layout.addRow('', self.scatter1dSeries)
self.scatter2dSeries = QtGui.QPushButton('x/y series', self)
self.form_layout.addRow('', self.scatter2dSeries)
if 0:
self.serieSlice = QtGui.QToolButton(self)
self.serieSlice.setText('serie slice')
self.form_layout.addRow('', self.serieSlice)
if 0:
self.statistics = QtGui.QPushButton('Statistics', self)
self.statistics.setDisabled(True)
self.statistics.setIcon(QtGui.QIcon(vp.iconfile('table-sum')))
# self.statistics.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)
self.form_layout.addRow('Data:', self.statistics)
self.rank = QtGui.QPushButton('Rank subspaces', self)
self.rank.setIcon(QtGui.QIcon(vp.iconfile('sort-quantity')))
# self.table.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)
self.form_layout.addRow('', self.rank)
self.table = QtGui.QPushButton('Open table', self)
self.table.setIcon(QtGui.QIcon(vp.iconfile('table')))
# self.table.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)
self.form_layout.addRow('', self.table)
self.button_histogram.clicked.connect(self.onOpenHistogram)
# self.statistics.clicked.connect(self.onOpenStatistics)
self.button_2d.clicked.connect(self.onOpenScatter)
self.button_3d.clicked.connect(self.onOpenScatter3d)
# self.scatter1dSeries.clicked.connect(self.onOpenScatter1dSeries)
# self.scatter2dSeries.clicked.connect(self.onOpenScatter2dSeries)
# self.serieSlice.clicked.connect(self.onOpenSerieSlice)
self.rank.clicked.connect(self.onOpenRank)
self.table.clicked.connect(self.onOpenTable)
self.description = TextEdit('', self)
self.description.setReadOnly(True)
self.form_layout.addRow('Description:', self.description)
self.description.doubleClicked.connect(self.onEditDescription)
self.setLayout(self.form_layout)
self.signal_open_plot = vaex.events.Signal("open plot")
def onEditDescription(self):
text = dialogs.gettext(self, "Edit description", "Edit description", self.description.toPlainText())
if text is not None:
self.dataset.description = text
self.description.setText(text)
self.dataset.write_meta()
def onOpenStatistics(self):
if self.dataset is not None:
dialog = StatisticsDialog(self, self.dataset)
dialog.show()
def onOpenScatter(self):
if self.dataset is not None:
xname, yname = self.default_columns_2d
self.plotxy(xname, yname)
def onOpenScatter3d(self):
if self.dataset is not None:
xname, yname, zname = self.dataset.column_names[:3]
self.plotxyz(xname, yname, zname)
def onOpenSerieSlice(self):
if self.dataset is not None:
xname, yname = self.dataset.rank1names[:2]
self.plotseriexy(xname, yname)
def onOpenScatter1dSeries(self):
if self.dataset is not None:
dialog = vp.SequencePlot(self, self.dataset)
dialog.show()
self.dataset.executor.execute()
def onOpenScatter2dSeries(self):
if self.dataset is not None:
dialog = vp.ScatterSeries2dPlotDialog(self, self.dataset)
dialog.show()
def onOpenHistogram(self):
if self.dataset is not None:
xname = self.dataset.column_names[0]
self.histogram(xname)
def plotxy(self, xname, yname, **kwargs):
dialog = vp.ScatterPlotDialog(self, self.dataset, app=self.app, **kwargs)
dialog.add_layer([xname, yname], self.dataset, **kwargs)
if not vaex.ui.hidden:
dialog.show()
else:
# we get a different output size when we don't show the dialog, which makes testing impossible
dialog.show()
dialog.hide()
# dialog.updateGeometry()
# dialog.adjustSize()
# self.dataset.executor.execute()
# self.dataset.executor.execute()
self.signal_open_plot.emit(dialog)
return dialog
def plotxyz(self, xname, yname, zname, **kwargs):
dialog = vp.VolumeRenderingPlotDialog(self, self.dataset, app=self.app, **kwargs)
dialog.add_layer([xname, yname, zname], **kwargs)
dialog.show()
# self.dataset.executor.execute()
self.dataset.executor.execute()
self.signal_open_plot.emit(dialog)
return dialog
def plotmatrix(self, *expressions):
dialog = vp.ScatterPlotMatrixDialog(self, self.dataset, expressions)
dialog.show()
self.dataset.executor.execute()
return dialog
def plotxyz_old(self, xname, yname, zname):
dialog = vp.PlotDialog3d(self, self.dataset, xname, yname, zname)
dialog.show()
def histogram(self, xname, **kwargs):
dialog = vp.HistogramPlotDialog(self, self.dataset, app=self.app, **kwargs)
dialog.add_layer([xname], **kwargs)
dialog.show()
# self.dataset.executor.execute()
# self.dataset.executor.execute()
self.signal_open_plot.emit(dialog)
return dialog
def onOpenRank(self):
if self.dataset is not None:
self.ranking()
def onOpenTable(self):
if self.dataset is not None:
self.tableview()
def onFractionSet(self):
index = self.fractionSlider.value()
fraction = possibleFractions[index]
if self.dataset:
self.dataset.set_active_fraction(fraction)
self.update_length()
# self.numberLabel.setText("{:,}".format(len(self.dataset)))
# self.dataset.executor.execute()
# self.dataset.executor.execute()
def onValueChanged(self, index):
fraction = possibleFractions[index]
text = "Dispay: %9.4f%%" % (fraction * 100)
self.fractionLabel.setText(text)
def on_active_fraction_changed(self, dataset, fraction):
self.update_active_fraction()
def update_active_fraction(self):
fraction = self.dataset.get_active_fraction()
distances = np.abs(np.array(possibleFractions) - fraction)
index = np.argsort(distances)[0]
self.fractionSlider.setValue(index) # this will fire an event and execute the above event code
def update_length(self):
if self.dataset.get_active_fraction() == 1:
self.label_length.setText("{:,}".format(self.dataset.length_original()))
else:
self.label_length.setText("{:,} of {:,}".format(len(self.dataset), self.dataset.length_original()))
def on_column_change(self, *args):
logger.debug("updating columns")
self.show_dataset(self.dataset)
def show_dataset(self, dataset):
if self.active_fraction_changed_handler:
self.dataset.signal_active_fraction_changed.disconnect(self.active_fraction_changed_handler)
if self.column_changed_handler:
self.dataset.signal_column_changed.disconnect(self.column_changed_handler)
self.refs = []
self.dataset = dataset
self.active_fraction_changed_handler = self.dataset.signal_active_fraction_changed.connect(self.on_active_fraction_changed)
self.column_changed_handler = self.dataset.signal_column_changed.connect(self.on_column_change)
self.name.setText(dataset.name)
self.description.setText(dataset.description if dataset.description else "")
self.label_columns.setText(str(dataset.column_count()))
self.update_length()
self.label_length.setText("{:,}".format(self.dataset.length_original()))
self.update_active_fraction()
self.button_2d.setEnabled(self.dataset.column_count() > 0)
self.auto_fraction_checkbox.setEnabled(not dataset.is_local())
self.fractionSlider.setEnabled(not dataset.get_auto_fraction())
self.menu_common.clear()
if dataset.ucd_find(["^pos.eq.ra", "^pos.eq.dec"]) and dataset.ucd_find(["^pos.galactic.lon", "^pos.galactic.lat"]) is None:
def add(*args):
vaex.ui.columns.add_celestial(self, self.dataset)
action = QtGui.QAction("Add galactic coordinates", self)
action.triggered.connect(add)
self.refs.append((action, add))
self.menu_common.addAction(action)
if dataset.ucd_find(["^pos.eq.ra", "^pos.eq.dec"]) and dataset.ucd_find(["^pos.ecliptic.lon", "^pos.ecliptic.lat"]) is None:
def add(*args):
vaex.ui.columns.add_celestial_eq2ecl(self, self.dataset)
action = QtGui.QAction("Add ecliptic coordinates", self)
action.triggered.connect(add)
self.refs.append((action, add))
self.menu_common.addAction(action)
if dataset.ucd_find(["pos.parallax"]) and not dataset.ucd_find(["pos.distance"]):
def add(*args):
vaex.ui.columns.add_distance(self, self.dataset)
action = QtGui.QAction("Add distance from parallax", self)
action.triggered.connect(add)
self.refs.append((action, add))
self.menu_common.addAction(action)
spherical_galactic = dataset.ucd_find(["^pos.distance", "^pos.galactic.lon", "^pos.galactic.lat"])
if spherical_galactic and not dataset.ucd_find(["^pos.cartesian.x;pos.galactocentric", "^pos.cartesian.y;pos.galactocentric", "^pos.cartesian.z;pos.galactocentric"]):
def add(*args):
vaex.ui.columns.add_cartesian(self, self.dataset, True)
action = QtGui.QAction("Add galactic cartesian positions", self)
action.triggered.connect(add)
self.refs.append((action, add))
self.menu_common.addAction(action)
if dataset.ucd_find(["pos.cartesian.x;pos.galactocentric", "pos.cartesian.y;pos.galactocentric", "pos.cartesian.z;pos.galactocentric"]) and \
not dataset.ucd_find(["pos.distance;pos.galactocentric", "pos.galactic.lon", "pos.galactic.lat"]):
def add(*args):
vaex.ui.columns.add_sky(self, self.dataset, True)
action = QtGui.QAction("Add galactic sky coordinates", self)
action.triggered.connect(add)
self.refs.append((action, add))
self.menu_common.addAction(action)
if dataset.ucd_find(["^pos.eq.ra", "^pos.eq.dec", "pos.pm;pos.eq.ra", "pos.pm;pos.eq.dec"]) and \
not dataset.ucd_find(["pos.pm;pos.galactic.lon", "pos.pm;pos.galactic.lat"]):
def add(*args):
vaex.ui.columns.add_proper_motion_eq2gal(self, self.dataset)
action = QtGui.QAction("Equatorial proper motions to galactic", self)
action.triggered.connect(add)
self.refs.append((action, add))
self.menu_common.addAction(action)
# dataset.add_virtual_columns_proper_motion_eq2gal("RA_ICRS_", "DE_ICRS_", "pmRA", "pmDE", "pm_l", "pm_b")
# dataset.add_virtual_columns_eq2gal("RA_ICRS_", "DE_ICRS_", "l", "b")
if dataset.ucd_find(["^pos.galactic.lon", "^pos.galactic.lat", "^pos.distance", "pos.pm;pos.galactic.lon", "pos.pm;pos.galactic.lat", "spect.dopplerVeloc"]):
def add(*args):
vaex.ui.columns.add_cartesian_velocities(self, self.dataset)
action = QtGui.QAction("Galactic velocities", self)
action.triggered.connect(add)
self.refs.append((action, add))
self.menu_common.addAction(action)
spherical_galactic = dataset.ucd_find(["^pos.galactic.lon", "^pos.galactic.lat"])
if spherical_galactic:
def add(*args):
vaex.ui.columns.add_aitoff(self, self.dataset, True)
action = QtGui.QAction("Add galactic aitoff projection", self)
action.triggered.connect(add)
self.refs.append((action, add))
self.menu_common.addAction(action)
self.button_suggesions.setEnabled(len(self.menu_common.actions()) > 0)
self.menu_1d.clear()
for column_name in self.dataset.get_column_names(virtual=True):
# action = QtGui.QAction
# QtGui.QAction(QtGui.QIcon(iconfile('glue_cross')), '&Pick', self)
action = QtGui.QAction(column_name, self)
action.triggered.connect(functools.partial(self.histogram, xname=column_name))
self.menu_1d.addAction(action)
self.default_columns_2d = None
self.menu_2d.clear()
ucd_pairs = [("^pos.cartesian.x", "^pos.cartesian.y"), ("^pos.cartesian.x", "^pos.cartesian.z"), ("^pos.cartesian.y", "^pos.cartesian.z"),
("^pos.eq.ra", "^pos.eq.dec"), ("^pos.galactic.lon", "^pos.galactic.lat"), ("^pos.ecliptic.lon", "^pos.ecliptic.lat"), ("^pos.earth.lon", "^pos.earth.lat")]
for ucd_pair in ucd_pairs:
done = False
exclude = []
while not done:
pair = dataset.ucd_find(ucd_pair, exclude=exclude)
if pair:
action = QtGui.QAction(", ".join(pair), self)
action.triggered.connect(functools.partial(self.plotxy, xname=pair[0], yname=pair[1]))
self.menu_2d.addAction(action)
if self.default_columns_2d is None:
self.default_columns_2d = pair
exclude.extend(pair)
else:
done = True
column_names = self.dataset.get_column_names(virtual=True)
for column_name1 in column_names:
# action1 = QtGui.QAction(column_name, self)
submenu = self.menu_2d.addMenu(column_name1)
for column_name2 in self.dataset.get_column_names(virtual=True):
action = QtGui.QAction(column_name2, self)
action.triggered.connect(functools.partial(self.plotxy, xname=column_name1, yname=column_name2))
submenu.addAction(action)
if self.default_columns_2d is None:
if len(column_names) >= 2:
self.default_columns_2d = column_names[:2]
elif len(column_names) == 1:
self.default_columns_2d = [column_names[0], ""]
self.default_columns_2d = ["", ""]
return
if 0: # TODO 3d menu takes long to generate when many columns are present, can we do this lazy?
for column_name1 in self.dataset.get_column_names():
# action1 = QtGui.QAction(column_name, self)
submenu = self.scatterMenu3d.addMenu(column_name1)
for column_name2 in self.dataset.get_column_names():
subsubmenu = submenu.addMenu(column_name2)
for column_name3 in self.dataset.get_column_names():
action = QtGui.QAction(column_name3, self)
action.triggered.connect(functools.partial(self.plotxyz, xname=column_name1, yname=column_name2, zname=column_name3))
subsubmenu.addAction(action)
if 0:
self.serieSliceMenu = QtGui.QMenu(self)
for column_name1 in self.dataset.rank1names:
# action1 = QtGui.QAction(column_name, self)
submenu = self.serieSliceMenu.addMenu(column_name1)
for column_name2 in self.dataset.rank1names:
action = QtGui.QAction(column_name2, self)
action.triggered.connect(functools.partial(self.plotseriexy, xname=column_name1, yname=column_name2))
submenu.addAction(action)
self.serieSlice.setMenu(self.serieSliceMenu)
def plotseriexy(self, xname, yname):
if self.dataset is not None:
dialog = vp.Rank1ScatterPlotDialog(self, self.dataset, xname + "[index]", yname + "[index]")
self.dataset.executor.execute()
self.signal_open_plot.emit(dialog)
dialog.show()
def tableview(self):
dialog = TableDialog(self.dataset, self)
dialog.show()
return dialog
def ranking(self, **options):
dialog = RankDialog(self.dataset, self, self, **options)
dialog.show()
return dialog
def pca(self, **options):
# dialog = RankDialog(self.dataset, self, self, **options)
# dialog.show()
# return dialog
import vaex.pca
vaex.pca.pca(self.dataset, self.dataset.get_column_names())
class WidgetUsage(QtGui.QWidget):
def __init__(self, parent):
super(WidgetUsage, self).__init__(parent)
self.setMinimumHeight(16)
self.setMinimumWidth(100)
self.timer = QtCore.QTimer(self)
self.timer.timeout.connect(self.update)
self.timer.start(500)
self.t_prev = time.time()
self.bytes_read_prev = psutil.disk_io_counters().read_bytes
def paintEvent(self, event):
painter = QtGui.QPainter()
painter.begin(self)
painter.fillRect(event.rect(), QtGui.QBrush(QtCore.Qt.white))
size = self.size()
width, height = size.width(), size.height()
self.tool_lines = []
# self.tool_text = ""
try:
def drawbar(index, count, fraction, color=QtCore.Qt.red):
if fraction == fraction: # check nan
# print "bar", index, count, height * (index)/ count, height * (index+1)/ count
rect = QtCore.QRect(0, height * (index) / count, int(width * fraction + 0.5), height / count)
# painter.setBrush(QtGui.QBrush(QtCore.Qt.blue))
painter.fillRect(rect, QtGui.QBrush(color))
cpu_fraction = psutil.cpu_percent() / 100.
# print cpu_fraction
drawbar(0, 4, cpu_fraction, QtCore.Qt.green)
self.tool_lines.append("Cpu usage: %.1f%%" % (cpu_fraction * 100,))
vmem = psutil.virtual_memory()
mem_fraction = (vmem.total - vmem.available) * 1. / vmem.total
self.tool_lines.append("Virtual memory: %s used of %s (=%.1f%%)%%" % (vaex.utils.filesize_format(vmem.total - vmem.available), vaex.utils.filesize_format(vmem.total), mem_fraction * 100.))
drawbar(1, 4, mem_fraction, QtCore.Qt.red)
swapmem = psutil.swap_memory()
swap_fraction = swapmem.used * 1. / swapmem.total
drawbar(2, 4, swap_fraction, QtCore.Qt.blue)
self.tool_lines.append("Swap memory: %s used of %s (=%.1f%%)" % (vaex.utils.filesize_format(swapmem.used), vaex.utils.filesize_format(swapmem.total), swap_fraction * 100.))
self.t_now = time.time()
self.bytes_read_new = psutil.disk_io_counters().read_bytes
bytes_per_second = (self.bytes_read_new - self.bytes_read_prev) / (self.t_now - self.t_prev)
Mbytes_per_second = bytes_per_second / 1024**2
# go from 1 mb to 10*1024 mb/s in log spacing
disk_fraction = np.clip(np.log2(Mbytes_per_second) / np.log2(10 * 1024), 0, 1)
drawbar(3, 4, disk_fraction, QtCore.Qt.magenta)
self.tool_lines.append("Reading at %.2f MiB/s" % (Mbytes_per_second,))
self.t_prev = self.t_now
self.bytes_read_prev = self.bytes_read_new
self.tool_text = "\n".join(self.tool_lines)
painter.end()
self.setToolTip(self.tool_text)
except:
pass
class VaexApp(QtGui.QMainWindow):
"""
:type windows: list[PlotDialog]
"""
signal_samp_notification = QtCore.pyqtSignal(str, str, str, dict, dict)
signal_samp_call = QtCore.pyqtSignal(str, str, str, str, dict, dict)
def __init__(self, argv=[], open_default=False, enable_samp=True):
super(VaexApp, self).__init__()
is_py2 = (sys.version_info[0] == 2)
self.enable_samp = enable_samp # if (enable_samp is not None) else is_py2
self.windows = []
self.current_window = None
self.current_dataset = None
QtGui.QToolTip.setFont(QtGui.QFont('SansSerif', 10))
# self.setToolTip('This is a <b>QWidget</b> widget')
if 0:
qbtn = QtGui.QPushButton('Quit', self)
qbtn.clicked.connect(QtCore.QCoreApplication.instance().quit)
qbtn.resize(qbtn.sizeHint())
qbtn.move(150, 150)
btn = QtGui.QPushButton('Button', self)
btn.setToolTip('This is a <b>QPushButton</b> widget')
btn.resize(btn.sizeHint())
btn.move(50, 50)
# self.setGeometry(300, 300, 250, 150)
self.resize(700, 500)
# self.center()
self.setWindowTitle(u'V\xe6X v' + vaex.__version__)
# self.statusBar().showMessage('Ready')
self.toolbar = self.addToolBar('Main toolbar')
self.toolbar.setVisible(False)
self.left = QtGui.QFrame(self)
self.left.setFrameShape(QtGui.QFrame.StyledPanel)
self.dataset_selector = DatasetSelector(self.left)
self.dataset_selector.setMinimumWidth(300)
self.tabs = QtGui.QTabWidget()
self.dataset_panel = DatasetPanel(self, self.dataset_selector.datasets) # QtGui.QFrame(self)
self.dataset_panel.setFrameShape(QtGui.QFrame.StyledPanel)
self.tabs.addTab(self.dataset_panel, "Main")
self.main_panel = self.dataset_panel
self.splitter = QtGui.QSplitter(QtCore.Qt.Horizontal)
self.splitter.addWidget(self.left)
# self.splitter.addWidget(self.dataset_panel)
self.splitter.addWidget(self.tabs)
# self.hbox = QtGui.QHBoxLayout(self)
# self.hbox.addWidget(self.splitter)
self.setCentralWidget(self.splitter)
# self.setLayout(self.hbox)
# this widget uses a time which causes an fps drop for opengl
# self.widget_usage = WidgetUsage(self.left)
# self.list.resize(30
self.boxlist = QtGui.QVBoxLayout(self.left)
self.boxlist.addWidget(self.dataset_selector)
# self.boxlist.addWidget(self.widget_usage)
self.left.setLayout(self.boxlist)
def on_dataset_select(dataset):
self.current_dataset = dataset
current.dataset = dataset
self.dataset_panel.show_dataset(dataset)
self.columns_panel.set_dataset(dataset)
self.variables_panel.set_dataset(dataset)
self.dataset_selector.signal_dataset_select.connect(on_dataset_select)
# self.list.currentItemChanged.connect(self.infoPanel.onDataSelected)
# self.dataset_selector.currentItemChanged.connect(self.dataset_panel.onDataSelected)
# self.dataset_selector.currentItemChanged.connect(self.dataset_panel.onDataSelected)
# self.list.testfill()
if not vaex.ui.hidden:
self.show()
self.raise_()
# self.list.itemSelectionChanged.connect(self.right.onDataSelected)
# self.action_open = QtGui.QAction(vp.iconfile('quickopen-file', '&Open', self)
# self.action_open.
self.action_open_hdf5_gadget = QtGui.QAction(QtGui.QIcon(vp.iconfile('table-import')), '&Open Gadget hdf5', self)
self.action_open_hdf5_vaex = QtGui.QAction(QtGui.QIcon(vp.iconfile('table-import')), '&Open Vaex hdf5', self)
self.action_open_hdf5_amuse = QtGui.QAction(QtGui.QIcon(vp.iconfile('table-import')), '&Open Amuse hdf5', self)
self.action_open_fits = QtGui.QAction(QtGui.QIcon(vp.iconfile('table-import')), '&Open FITS (binary table)', self)
self.action_save_hdf5 = QtGui.QAction(QtGui.QIcon(vp.iconfile('table-export')), '&Export to hdf5', self)
self.action_save_fits = QtGui.QAction(QtGui.QIcon(vp.iconfile('table-export')), '&Export to fits', self)
self.server_connect_action = QtGui.QAction(QtGui.QIcon(vp.iconfile('database-cloud')), '&Connect to server', self)
def server_connect(*ignore):
servers = vaex.settings.main.get("servers", ["ws://localhost:9000/"])
server = str(dialogs.choose(self, "Connect to server", "Connect to server", servers, editable=True))
if server is None:
return
try:
vaex_server = vaex.server(server, thread_mover=self.call_in_main_thread)
datasets = vaex_server.datasets()
except Exception as e:
dialogs.dialog_error(self, "Error connecting", "Error connecting: %r" % e)
return
dataset_descriptions = ["{} ({:,} rows)".format(dataset.name, len(dataset)) for dataset in datasets]
dataset_index = dialogs.choose(self, "Choose datasets", "Choose dataset", dataset_descriptions)
if dataset_index is None:
return
dataset = datasets[dataset_index]
self.dataset_selector.add(dataset)
self.add_recently_opened(dataset.path)
if server in servers:
servers.remove(server)
servers.insert(0, server)
vaex.settings.main.store("servers", servers)
self.server_connect_action.triggered.connect(server_connect)
exitAction = QtGui.QAction(QtGui.QIcon('icons/png/24x24/actions/application-exit-2.png'), '&Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setShortcut('Alt+Q')
exitAction.setStatusTip('Exit application')
exitAction.triggered.connect(QtGui.qApp.quit)
self.samp = None
"""
ipythonAction = QtGui.QAction(QtGui.QIcon(vp.iconfile('table-import')), '&IPython console', self)
ipythonAction.setShortcut('Alt+I')
ipythonAction.setStatusTip('Show IPython console')
def show_ipython_console(*args):
ipython_console.show()
ipythonAction.triggered.connect(show_ipython_console)
"""
menubar = self.menuBar()
fileMenu = menubar.addMenu('&File')
self.menu_open = fileMenu.addMenu("&Open")
self.menu_open.addAction(self.action_open_hdf5_vaex)
self.menu_open.addAction(self.action_open_hdf5_gadget)
self.menu_open.addAction(self.action_open_hdf5_amuse)
self.menu_recent = fileMenu.addMenu("&Open Recent")
self.recently_opened = vaex.settings.main.get("recent", [])
self.update_recently_opened()
if (not frozen) or darwin:
self.menu_open.addAction(self.action_open_fits)
fileMenu.addAction(self.action_save_hdf5)
fileMenu.addAction(self.action_save_fits)
# fileMenu.addAction(self.action_open)
fileMenu.addAction(self.server_connect_action)
# fileMenu.addAction(ipythonAction)
fileMenu.addAction(exitAction)
self.menu_data = menubar.addMenu('&Data')
def check_memory(bytes):
if bytes > psutil.virtual_memory().available:
if bytes < (psutil.virtual_memory().available + psutil.swap_memory().free):
text = "Action requires %s, you have enough swap memory available but it will make your computer slower, do you want to continue?" % (vaex.utils.filesize_format(bytes),)
return confirm(self, "Memory usage issue", text)
else:
text = "Action requires %s, you do not have enough swap memory available, do you want try anyway?" % (vaex.utils.filesize_format(bytes),)
return confirm(self, "Memory usage issue", text)
return True
for level in [20, 25, 27, 29, 30, 31, 32]:
N = 2**level
action = QtGui.QAction('Generate Soneira Peebles fractal: N={:,}'.format(N), self)
def do(ignore=None, level=level):
if level < 29:
if check_memory(4 * 8 * 2**level):
sp = vx.file.other.SoneiraPeebles(dimension=4, eta=2, max_level=level, L=[1.1, 1.3, 1.6, 2.])
self.dataset_selector.add(sp)
else:
if check_memory(2 * 8 * 2**level):
sp = vx.file.other.SoneiraPeebles(dimension=2, eta=2, max_level=level, L=[1.6, 2.])
self.dataset_selector.add(sp)
action.triggered.connect(do)
self.menu_data.addAction(action)
for dim in [2, 3]:
if dim == 3:
res = [128, 256, 512, 1024]
if dim == 2:
res = [512, 1024, 2048]
for N in res:
for power in [-1.5, -2.5]:
count = N**dim
name = 'Zeldovich d={dim} N={N:,}, count={count:,} powerspectrum={power:}'.format(**locals())
action = QtGui.QAction('Generate ' + name, self)
def do(ignore=None, dim=dim, N=N, power=power, name=name):
t = None
z = vx.file.other.Zeldovich(dim, N, power, t, name=name)
self.dataset_selector.add(z)
action.triggered.connect(do)
self.menu_data.addAction(action)
self.menu_columns = menubar.addMenu('&Columns')
self.columns_panel = vaex.ui.columns.ColumnsTable(self.tabs, menu=self.menu_columns)
self.tabs.addTab(self.columns_panel, "Columns")
self.variables_panel = vaex.ui.variables.VariablesTable(self.tabs, menu=self.menu_columns)
self.tabs.addTab(self.variables_panel, "Variables")
use_toolbar = "darwin" not in platform.system().lower()
use_toolbar = True
self.toolbar.setIconSize(QtCore.QSize(16, 16))
# self.toolbar.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
# self.toolbar.addAction(exitAction)
if self.enable_samp:
self.action_samp_connect = QtGui.QAction(QtGui.QIcon(vp.iconfile('plug-connect')), 'Connect to SAMP HUB', self)
self.action_samp_connect.setShortcut('Alt+S')
self.action_samp_connect.setCheckable(True)
if use_toolbar:
self.toolbar.addAction(self.action_samp_connect)
self.action_samp_connect.triggered.connect(self.onSampConnect)
self.action_samp_table_send = QtGui.QAction(QtGui.QIcon(vp.iconfile('table--arrow')), 'Send active dataset via SAMP', self)
self.action_samp_table_send.setShortcut('Alt+T')
if use_toolbar:
self.toolbar.addAction(self.action_samp_table_send)
self.action_samp_table_send.triggered.connect(self.onSampSend)
self.action_samp_sand_table_select_row_list = QtGui.QAction(QtGui.QIcon(vp.iconfile('block--arrow')), 'Send selection via SAMP(table.select.rowlist)', self)
self.action_samp_sand_table_select_row_list.setShortcut('Alt+R')
if use_toolbar:
self.toolbar.addAction(self.action_samp_sand_table_select_row_list)
self.action_samp_sand_table_select_row_list.triggered.connect(self.on_samp_send_table_select_rowlist)
self.toolbar.addSeparator()
self.action_save_hdf5.triggered.connect(self.onExportHdf5)
self.action_save_fits.triggered.connect(self.onExportFits)
self.sampMenu = menubar.addMenu('&Samp')
self.sampMenu.addAction(self.action_samp_connect)
# self.sampMenu.addAction(self.action_samp_table_send)
self.sampMenu.addAction(self.action_samp_sand_table_select_row_list)
if use_toolbar:
# self.toolbar.addAction(self.action_open_hdf5_gadget)
# self.toolbar.addAction(self.action_open_hdf5_vaex)
# if (not frozen) or darwin:
# self.toolbar.addAction(self.action_open_fits)
self.toolbar.addAction(self.action_save_hdf5)
self.toolbar.addAction(self.action_save_fits)
if len(argv) == 0 and open_default:
if custom is not None:
custom.loadDatasets(self.dataset_selector)
custom.openPlots(self.dataset_panel)
elif 1: # frozen:
# for index, name in list(enumerate("gas halo disk stars sat".split()))[::-1]:
# self.dataset_selector.open(os.path.join(application_path, 'data/disk-galaxy.hdf5'), particle_name=name)
# f = vaex.utils.get_data_file("data/helmi-dezeeuw-2000-10p.hdf5")
# if f and os.path.exists(f):
# self.dataset_selector.open(f)
# self.dataset_selector.open(os.path.join(application_path, "data/Aq-A-2-999-shuffled-fraction.hdf5"))
dataset_example = vaex.example(download=False)
if dataset_example is not None:
self.dataset_selector.add(dataset_example)
for pluginpath in [os.path.expanduser('~/.vaex/plugin')]:
logger.debug("pluginpath: %s" % pluginpath)
if os.path.exists(pluginpath):
import glob
paths = glob.glob(pluginpath + "/*.py")
for path in paths:
logger.debug("plugin file: %s" % path)
filename = os.path.basename(path)
name = os.path.splitext(filename)[0]
imp.load_source('vaexuser.plugin.' + name, path)
self.open_generators = [] # for reference counts
self.action_open_hdf5_gadget.triggered.connect(self.openGenerator(self.gadgethdf5, "Gadget HDF5 file", "*.hdf5"))
self.action_open_hdf5_vaex.triggered.connect(self.openGenerator(self.vaex_hdf5, "Gaia HDF5 file", "*.hdf5"))
self.action_open_hdf5_amuse.triggered.connect(self.openGenerator(self.amuse_hdf5, "Amuse HDF5 file", "*.hdf5"))
if (not frozen) or darwin:
self.action_open_fits.triggered.connect(self.openGenerator(self.open_fits, "FITS file", "*.fits"))
self.help_menu = menubar.addMenu('&Help')
self.action_help = QtGui.QAction("Help", self)
self.action_credits = QtGui.QAction("Credits", self)
self.help_menu.addAction(self.action_help)
self.help_menu.addAction(self.action_credits)
self.action_help.triggered.connect(self.onActionHelp)
self.action_credits.triggered.connect(self.onActionCredits)
if self.enable_samp:
self.signal_samp_notification.connect(self.on_samp_notification)
self.signal_samp_call.connect(self.on_samp_call)
QtCore.QCoreApplication.instance().aboutToQuit.connect(self.clean_up)
self.action_samp_connect.setChecked(True)
self.onSampConnect(ignore_error=True)
self.dataset_selector.signal_pick.connect(self.on_pick)
self.samp_ping_timer = QtCore.QTimer()
self.samp_ping_timer.timeout.connect(self.on_samp_ping_timer)
# self.samp_ping_timer.start(1000)
self.highlighed_row_from_samp = False
def on_open_plot(plot_dialog):
self.dataset_selector.signal_add_dataset.connect(lambda dataset: plot_dialog.fill_menu_layer_new())
plot_dialog.signal_samp_send_selection.connect(lambda dataset: self.on_samp_send_table_select_rowlist(dataset=dataset))
current.window = plot_dialog
# current.layer = plot_dialog.current_layer
if kernel:
kernel.shell.push({"window": plot_dialog})
kernel.shell.push({"layer": plot_dialog.current_layer})
self.windows.append(plot_dialog) # TODO remove from list
def on_close(window):
self.windows.remove(window)
if self.current_window == window:
self.current_window = None
current.window = None
plot_dialog.signal_closed.connect(on_close)
self.current_window = plot_dialog
self.dataset_panel.signal_open_plot.connect(on_open_plot)
self.signal_call_in_main_thread.connect(self.on_signal_call_in_main_thread)
import queue
self.queue_call_in_main_thread = queue.Queue(1)
self.parse_args(argv)
# this queue is used to return values from the main thread to the callers thread
signal_call_in_main_thread = QtCore.pyqtSignal(object, object, object) # fn, args, kwargs
# signal_promise = QtCore.pyqtSignal(str)
def call_in_main_thread(self, fn, *args, **kwargs):
# print "send promise to main thread using signal", threading.currentThread()
logger.debug("sending call to main thread, we are in thread: %r", threading.currentThread())
assert self.queue_call_in_main_thread.empty()
self.signal_call_in_main_thread.emit(fn, args, kwargs)
logger.debug("emitted...")
return self.queue_call_in_main_thread.get()
# self.signal_promise.emit("blaat")
def on_signal_call_in_main_thread(self, fn, args, kwargs):
logger.debug("got callback %r, and should call it with argument: %r %r (from thread %r)", fn, args, kwargs, threading.currentThread())
assert self.queue_call_in_main_thread.empty()
return_value = None
try:
return_value = fn(*args, **kwargs)
finally:
self.queue_call_in_main_thread.put(return_value)
# promise.fulfill(value)
def select(self, *args):
args = list(args) # copy since we will modify it
if len(args) == 0:
print("select requires at least one argument")
return
index = args.pop(0)
if (index < 0) or index >= len(self.windows):
print("window index %d out of range [%d, %d]" % (index, 0, len(self.windows) - 1))
else:
current.window = self.windows[index]
if len(args) > 0:
layer_index = args.pop(0)
current.window.select_layer(layer_index)
current.layer = current.window.current_layer
def plot(self, *args, **kwargs):
if current.window is None:
if len(args) == 1:
self.dataset_panel.histogram(args[0], **kwargs)
if len(args) == 2:
self.dataset_panel.plotxy(args[0], args[1], **kwargs)
else:
layer = current.window.current_layer
if layer:
layer.apply_options(kwargs)
if len(args) == 1:
layer.x = args[0]
if len(args) == 2:
layer.x = args[0]
layer.y = args[1]
else:
print("no current layer")
# window_name = kwargs.get("window_name")
# layer_name = kwargs.get("layer_name")
# name = kwargs.get("name")
# if name is not None:
# window_name, layer_name = name.split(":")
# kwargs["window_name"] = window_name
# kwargs["layer_name"] = layer_name
# layer = None
# window = None
# windows = [window for window in self.windows if window.name == window_name]
# if windows:
# window = windows[0]
# layers = [layer for layer in window.layers if layer.name == layer_name]
# if layer is None:
# if len(args) == 1:
# self.dataset_panel.histogram(args[0], **kwargs)
# if len(args) == 2:
# self.dataset_panel.plotxy(args[0], args[1], **kwargs)
# if len(args) == 1:
# self.dataset_panel.histogram(args[0], kwargs)
# else:
def parse_args(self, args):
# args = sys.argv[1:]
index = 0
def error(msg):
print(msg, file=sys.stderr)
sys.exit(1)
hold_plot = False
plot = None
while index < len(args):
filename = args[index]
filename = args[index]
print("filename", filename)
dataset = None
if filename.startswith("cluster://"):
dataset = vaex.open(filename) # , thread_mover=self.call_in_main_thread)
elif filename.startswith("http://") or filename.startswith("ws://"): # TODO: thinkg about https wss
# o = urlparse(filename)
# assert o.scheme == "http"
# base_path, should_be_datasets, dataset_name = o.path.rsplit("/", 2)
# if should_be_datasets != "datasets":
# error("expected an url in the form http://host:port/optional/part/datasets/dataset_name")
# server = vaex.server(hostname=o.hostname, port = o.port or 80, thread_mover=self.call_in_main_thread, base_path=base_path)
if 0:
server = vaex.server(filename, thread_mover=self.call_in_main_thread)
datasets = server.datasets()
names = [dataset.name for dataset in datasets]
index += 1
if index >= len(args):
error("expected dataset to follow url, e.g. vaex http://servername:9000 somedataset, possible dataset names: %s" % " ".join(names))
name = args[index]
if name not in names:
error("no such dataset '%s' at server, possible dataset names: %s" % (name, " ".join(names)))
found = [dataset for dataset in datasets if dataset.name == name]
if found:
dataset = found[0]
dataset = vaex.open(filename, thread_mover=self.call_in_main_thread)
self.add_recently_opened(filename)
# dataset = self.open(filename)
elif filename[0] == ":": # not a filename, but a classname
classname = filename.split(":")[1]
if classname not in vaex.dataset.dataset_type_map:
print(classname, "does not exist, options are", sorted(vaex.dataset.dataset_type_map.keys()))
sys.exit(-1)
class_ = vaex.dataset.dataset_type_map[classname]
clsargs = [eval(value) for value in filename.split(":")[2:]]
dataset = class_(*clsargs)
else:
options = filename.split(":")
clsargs = [eval(value) for value in options[1:]]
filename = options[0]
dataset = vaex.open(filename, *clsargs) # vaex.dataset.load_file(filename, *clsargs)
self.add_recently_opened(filename)
if dataset is None:
error("cannot open file {filename}".format(**locals()))
index += 1
self.dataset_selector.add(dataset)
# for this dataset, keep opening plots (seperated by -) or add layers (seperated by +)
plot = plot if hold_plot else None
options = {}
# if we find --<task> we don't plot but do sth else
if index < len(args) and args[index].startswith("--") and len(args[index]) > 2:
task_name = args[index][2:]
index += 1
if task_name in ["rank", "pca"]:
options = {}
while index < len(args):
if args[index] == "-":
index += 1
break
elif args[index] == "--":
index += 1
break
elif "=" in args[index]:
key, value = args[index].split("=", 1)
options[key] = value
else:
error("unkown option for task %r: %r " % (task_name, args[index]))
index += 1
if task_name == "rank":
self.dataset_panel.ranking(**options)
if task_name == "pca":
self.dataset_panel.pca(**options)
else:
error("unkown task: %r" % task_name)
# else:
if 1:
while index < len(args) and args[index] != "--":
columns = []
while index < len(args) and args[index] not in ["+", "-", "--", "++"]:
if "=" in args[index]:
key, value = args[index].split("=", 1)
if ":" in key:
type, key = key.split(":", 1)
if type == "vcol":
dataset.virtual_columns[key] = value
elif type == "var":
dataset.variables[key] = value
else:
error("unknown expression, %s, type %s not recognized" % (type + ":" + key, type))
elif key.startswith("@"):
method_name = key[1:]
method = getattr(dataset, method_name)
method(*eval(value))
# if method is Non
# error("unknown expression, %s, type %s not recognized" % (type + ":" + key, type))
else:
options[key] = value
else:
columns.append(args[index])
index += 1
if plot is None:
if len(columns) == 1:
plot = self.dataset_panel.histogram(columns[0], **options)
elif len(columns) == 2:
plot = self.dataset_panel.plotxy(columns[0], columns[1], **options)
elif len(columns) == 3:
plot = self.dataset_panel.plotxyz(columns[0], columns[1], columns[2], **options)
else:
error("cannot plot more than 3 columns yet: %r" % columns)
else:
layer = plot.add_layer(columns, dataset=dataset, **options)
# layer.jobs_manager.execute()
options = {}
if index < len(args) and args[index] == "-":
plot = None # set to None to create a new plot, + will do a new layer
if index < len(args) and args[index] == "--":
hold_plot = False
break # break out for the next dataset
if index < len(args) and args[index] == "++":
hold_plot = True
break # break out for the next dataset, but keep the same plot
index += 1
if index < len(args):
pass
index += 1
def on_samp_ping_timer(self):
if self.samp:
connected = self.samp.client.is_connected
# print "samp is", "connected" if connected else "disconnected!"
if not connected:
self.samp = None
if self.samp:
try:
self.samp.client.ping()
except:
print("oops, ping went wrong, disconnect detected")
try:
self.samp.disconnect()
except:
pass
self.samp = None
self.action_samp_connect.setChecked(self.samp is not None)
def on_pick(self, dataset, row):
logger.debug("samp pick event")
# avoid sending an event if this was caused by a samp event
if self.samp and not self.highlighed_row_from_samp: # TODO: check if connected,
kwargs = {"row": str(row)}
if hasattr(dataset, "samp_id") and dataset.samp_id:
kwargs["table-id"] = dataset.samp_id
# kwargs["url"] = "file:" + dataset.filename
kwargs["url"] = dataset.samp_id
else:
if dataset.path:
kwargs["table-id"] = "file:" + dataset.path
kwargs["url"] = "file:" + dataset.path
else:
kwargs["table-id"] = "file:" + dataset.name
kwargs["url"] = "file:" + dataset.name
self.samp.client.enotify_all("table.highlight.row", **kwargs)
def on_samp_send_table_select_rowlist(self, ignore=None, dataset=None):
if self.samp: # TODO: check if connected
dataset = dataset or self.dataset_panel.dataset
rows = []
print(dataset.has_selection(), dataset.evaluate_selection_mask())
if dataset.has_selection() is not None:
rows = np.arange(len(dataset))[dataset.evaluate_selection_mask()]
rowlist = list(map(str, rows))
kwargs = {"row-list": rowlist}
if dataset.samp_id:
kwargs["table-id"] = dataset.samp_id
# kwargs["url"] = "file:" + dataset.filename
kwargs["url"] = "file:" + dataset.samp_id
else:
kwargs["table-id"] = "file:" + dataset.path
self.samp.client.enotify_all("table.select.rowList", **kwargs)
def onActionHelp(self):
filename = vaex.utils.get_data_file("doc/index.html")
url = "file://" + filename
vaex.utils.os_open(url)
# self.webDialog("doc/index.html")
def onActionCredits(self):
filename = vaex.utils.get_data_file("doc/credits.html")
url = "file://" + filename
vaex.utils.os_open(url)
# vaex.utils.os_open("doc/credits.html")
# self.webDialog("html/credits.html")
def _webDialog(self, url):
view = QWebView()
view.load(QtCore.QUrl(url))
dialog = QtGui.QDialog(self)
layout = QtGui.QVBoxLayout()
dialog.setLayout(layout)
# text = file("html/credits.html").read()
# print text
# label = QtGui.QLabel(text, dialog)
# layout.addWidget(label)
layout.addWidget(view)
dialog.resize(300, 300)
dialog.show()
def onExportHdf5(self):
self.export("hdf5")
def onExportFits(self):
self.export("fits")
def export(self, type="hdf5"):
dataset = self.dataset_panel.dataset
name = dataset.name + "-mysubset.hdf5"
options = ["All: %r records, filesize: %r" % (len(dataset), vaex.utils.filesize_format(dataset.byte_size()))]
options += ["Selection: %r records, filesize: %r" % (dataset.count(selection=True), vaex.utils.filesize_format(dataset.byte_size(selection=True)))]
index = dialogs.choose(self, "What do you want to export?", "Choose what to export:", options)
if index is None:
return
export_selection = index == 1
logger.debug("export selection: %r", export_selection)
# select_many(None, "lala", ["aap", "noot"] + ["item-%d-%s" % (k, "-" * k) for k in range(30)])
ok, columns_mask = dialogs.select_many(self, "Select columns", dataset.get_column_names(virtual=True))
if not ok: # cancel
return
selected_column_names = [column_name for column_name, selected in zip(dataset.get_column_names(virtual=True), columns_mask) if selected]
logger.debug("export column names: %r", selected_column_names)
shuffle = dialogs.dialog_confirm(self, "Shuffle?", "Do you want the dataset to be shuffled (output the rows in random order)")
logger.debug("export shuffled: %r", shuffle)
if shuffle and dataset.length_original() != len(dataset):
dialogs.dialog_info(self, "Shuffle", "You selected shuffling while not exporting the full dataset, will select random rows from the full dataset")
partial_shuffle = True
else:
partial_shuffle = False
if export_selection and shuffle:
dialogs.dialog_info(self, "Shuffle", "Shuffling with selection not supported")
return
if type == "hdf5":
endian_options = ["Native", "Little endian", "Big endian"]
index = dialogs.choose(self, "Which endianness", "Which endianness / byte order:", endian_options)
if index is None:
return
endian_option = ["=", "<", ">"][index]
logger.debug("export endian: %r", endian_option)
if type == "hdf5":
filename = dialogs.get_path_save(self, "Save to HDF5", name, "HDF5 *.hdf5")
else:
filename = dialogs.get_path_save(self, "Save to col-fits", name, "FITS (*.fits)")
logger.debug("export to file: %r", filename)
# print args
filename = str(filename)
if not filename.endswith("." + type):
filename += "." + type
if filename:
with dialogs.ProgressExecution(self, "Copying data...", "Abort export") as progress_dialog:
if type == "hdf5":
vaex.export.export_hdf5(dataset, filename, column_names=selected_column_names, shuffle=shuffle, selection=export_selection, byteorder=endian_option, progress=progress_dialog.progress)
if type == "fits":
vaex.export.export_fits(dataset, filename, column_names=selected_column_names, shuffle=shuffle, selection=export_selection, progress=progress_dialog.progress)
logger.debug("export done")
def gadgethdf5(self, filename):
logger.debug("open gadget hdf5: %r", filename)
for index, name in list(enumerate("gas halo disk bulge stars sat".split()))[::-1]:
self.dataset_selector.addGadgetHdf5(str(filename), name, index)
def vaex_hdf5(self, filename):
logger.debug("open vaex hdf5: %r", filename)
dataset = vaex.open(str(filename))
self.dataset_selector.add(dataset)
def amuse_hdf5(self, filename):
logger.debug("open amuse: %r", filename)
dataset = vaex.open(str(filename))
self.dataset_selector.add(dataset)
def open_fits(self, filename):
logger.debug("open fits: %r", filename)
dataset = vaex.open(str(filename))
self.dataset_selector.add(dataset)
def open(self, path):
"""Add a dataset and add it to the UI"""
logger.debug("open dataset: %r", path)
if path.startswith("http") or path.startswith("ws"):
dataset = vaex.open(path, thread_mover=self.call_in_main_thread)
else:
dataset = vaex.open(path)
self.add_recently_opened(path)
self.dataset_selector.add(dataset)
return dataset
def add(self, dataset):
"""Add an dataset to the UI"""
self.dataset_selector.add(dataset)
def openGenerator(self, callback_, description, filemask):
# print repr(callback_)
def open(arg=None, callback_=callback_, filemask=filemask):
# print repr(callback_), repr(filemask)
filename = QtGui.QFileDialog.getOpenFileName(self, description, "", filemask)
if isinstance(filename, tuple):
filename = str(filename[0]) # ]
# print repr(callback_)
if filename:
callback_(filename)
self.add_recently_opened(filename)
self.open_generators.append(open)
return open
def add_recently_opened(self, path):
# vaex.recent[""]
if path.startswith("http") or path.startswith("ws"):
pass
else: # non url's will be converted to an absolute path
path = os.path.abspath(path)
while path in self.recently_opened:
self.recently_opened.remove(path)
self.recently_opened.insert(0, path)
self.recently_opened = self.recently_opened[:10]
vaex.settings.main.store("recent", self.recently_opened)
self.update_recently_opened()
def update_recently_opened(self):
self.menu_recent.clear()
self.menu_recent_subactions = []
for path in self.recently_opened:
def open(ignore=None, path=path):
self.open(path)
name = vaex.utils.filename_shorten(path)
action = QtGui.QAction(name, self)
action.triggered.connect(open)
self.menu_recent_subactions.append(action)
self.menu_recent.addAction(action)
self.menu_recent.addSeparator()
def clear(ignore=None):
self.recently_opened = []
vaex.settings.main.store("recent", self.recently_opened)
self.update_recently_opened()
action = QtGui.QAction("Clear recent list", self)
action.triggered.connect(clear)
self.menu_recent_subactions.append(action)
self.menu_recent.addAction(action)
def onSampConnect(self, ignore_error=False):
if self.action_samp_connect.isChecked():
if self.samp is None:
self.samp = Samp(daemon=True, name="vaex")
# self.samp.tableLoadCallbacks.append(self.onLoadTable)
connected = self.samp.client.is_connected
# print "samp is connected:", connected
if connected:
self.samp.client.bind_receive_notification("table.highlight.row", self._on_samp_notification)
self.samp.client.bind_receive_call("table.select.rowList", self._on_samp_call)
self.samp.client.bind_receive_notification("table.load.votable", self._on_samp_notification)
self.samp.client.bind_receive_call("table.load.votable", self._on_samp_call)
self.samp.client.bind_receive_notification("table.load.fits", self._on_samp_notification)
self.samp.client.bind_receive_call("table.load.fits", self._on_samp_call)
else:
if not ignore_error:
dialog_error(self, "Connecting to SAMP server", "Could not connect, make sure a SAMP HUB is running (for instance TOPCAT)")
self.samp = None
self.action_samp_connect.setChecked(False)
else:
print("disconnect")
# try:
self.samp.client.disconnect()
self.samp = None
# self.action_samp_connect.setText("disconnect from SAMP HUB" if self.samp else "conncet to SAMP HUB")
# except:
# dialog_exception(self, "Connecting to SAMP server", "Could not connect, make sure a SAMP HUB is running (for instance TOPCAT)")
def _on_samp_notification(self, private_key, sender_id, mtype, params, extra):
# this callback will be in a different thread, so we use pyqt's signal mechanism to
# push an event in the main thread's event loop
print(private_key, sender_id, mtype, params, extra)
self.signal_samp_notification.emit(private_key, sender_id, mtype, params, extra)
def _on_samp_call(self, private_key, sender_id, msg_id, mtype, params, extra):
# same as _on_samp_notification
# print private_key, sender_id, msg_id, mtype, params, extra
self.signal_samp_call.emit(private_key, sender_id, msg_id, mtype, params, extra)
self.samp.client.ereply(msg_id, sampy.SAMP_STATUS_OK, result={"txt": "printed"})
def on_samp_notification(self, private_key, sender_id, mtype, params, extra):
# and this should execute in the main thread
logger.debug("samp notification: %r" % ((private_key, sender_id, mtype),))
assert QtCore.QThread.currentThread() == main_thread
def dash_to_underscore(hashmap):
hashmap = dict(hashmap) # copy
for key, value in list(hashmap.items()):
del hashmap[key]
hashmap[key.replace("-", "_")] = value
return hashmap
params = dash_to_underscore(params)
if mtype == "table.highlight.row":
self.samp_table_highlight_row(**params)
if mtype == "table.select.rowList":
self.samp_table_select_rowlist(**params)
if mtype == "table.load.votable":
self.samp_table_load_votable(**params)
def on_samp_call(self, private_key, sender_id, msg_id, mtype, params, extra):
# and this should execute in the main thread
assert QtCore.QThread.currentThread() == main_thread
# we simply see a call as a notification
self.on_samp_notification(private_key, sender_id, mtype, params, extra)
def samp_table_highlight_row(self, row, url=None, table_id=None):
logger.debug("highlight row: {url}:{row}".format(**locals()))
print(("highlight row: {url}:{row}".format(**locals())))
row = int(row)
# only supports url for the moment
for id in (url, table_id):
if id is not None:
for dataset in self._samp_find_datasets(id):
# avoid triggering another samp event and an infinite loop
self.highlighed_row_from_samp = True
try:
dataset.set_current_row(row)
finally:
self.highlighed_row_from_samp = False
def samp_table_select_rowlist(self, row_list, url=None, table_id=None):
print("----")
logger.debug("select rowlist: {url}".format(**locals()))
print(("select rowlist: {url}".format(**locals())))
row_list = np.array([int(k) for k in row_list])
# did_select = False
datasets_updated = [] # keep a list to avoid multiple 'setMask' calls (which would do an update twice)
# TODO: this method is not compatible with the selection history... how to deal with this? New SelectionObject?
for id in (url, table_id):
if id is not None:
for dataset in self._samp_find_datasets(id):
if dataset not in datasets_updated:
mask = np.zeros(len(dataset), dtype=np.bool)
mask[row_list] = True
print("match dataset", dataset)
dataset._set_mask(mask)
# did_select = True
datasets_updated.append(dataset)
# if did_select:
# self.main_panel.jobsManager.execute()
def samp_table_load_votable(self, url=None, table_id=None, name=None):
filenames = []
if table_id is not None:
filename = table_id
if filename.startswith("file:/"):
filename = filename[5:]
basename, ext = os.path.splitext(filename)
if os.path.exists(filename):
filenames.append(filename)
for other_ext in [".hdf5", ".fits"]:
filename = basename + other_ext
print(filename)
if os.path.exists(filename) and filename not in filenames:
filenames.append(filename)
filenames = list(filter(vaex.file.can_open, filenames))
options = []
for filename in filenames:
options.append(filename + " | read directly from file (faster)")
options.append(url + " | load as VOTable (slower)")
# options.append("link to existing opened dataset")
for dataset in self.dataset_selector.datasets:
options.append("link to existing open dataset: " + dataset.name)
index = choose(self, "SAMP: load table", "Choose how to load table", options)
if index is not None:
if index < len(filenames):
print("open file", filenames[index])
self.load_file(filenames[index], table_id)
elif index == len(filenames):
self.load_votable(url, table_id)
print("load votable", url)
else:
self.dataset_selector.datasets[index - len(filenames) - 1].samp_id = table_id
def load_file(self, path, samp_id=None):
dataset_class = None
ds = vx.open(path)
if ds:
ds.samp_id = samp_id
self.dataset_selector.add(ds)
def load_votable(self, url, table_id):
dialog = QtGui.QProgressDialog("Downloading VO table", "cancel", 0, 0, self)
# self.dialog.show()
dialog.setWindowModality(QtCore.Qt.WindowModal)
dialog.setMinimumDuration(0)
dialog.setAutoClose(True)
dialog.setAutoReset(True)
dialog.setMinimum(0)
dialog.setMaximum(0)
dialog.show()
try:
def ask(username, password):
d = QuickDialog(self, "Username/password")
d.add_text("username", "Username", username)
d.add_password("password", "Password", password)
values = d.get()
if values:
return values["username"], values["password"]
else:
return None
t = vaex.samp.fetch_votable(url, ask=ask)
if t:
dataset = vx.from_astropy_table(t.to_table())
# table = astropy.io.votable.parse_single_table(url)
# print("done parsing table")
# names = table.array.dtype.names
# dataset = DatasetMemoryMapped(table_id, nommap=True)
# data = table.array.data
# for i in range(len(data.dtype)):
# name = data.dtype.names[i]
# type = data.dtype[i]
# if type.kind in ["f", "i"]: # only store float
# #datagroup.create_dataset(name, data=table.array[name].astype(np.float64))
# #dataset.addMemoryColumn(name, table.array[name].astype(np.float64))
# dataset.addColumn(name, array=table.array[name])
dataset.samp_id = table_id
dataset.name = table_id
self.dataset_selector.add(dataset)
return dataset
finally:
dialog.hide()
def message(self, text, index=0):
print(text)
self.messages[index] = text
text = ""
keys = list(self.messages.keys())
keys.sort()
text_parts = [self.messages[key] for key in keys]
self.statusBar().showMessage(" | ".join(text_parts))
def _samp_find_datasets(self, id):
print(self.dataset_selector.datasets)
try:
for dataset in self.dataset_selector.datasets:
if dataset.matches_url(id) or (dataset.samp_id == id):
yield dataset
except:
logger.exception("problem")
def onSampSend(self):
if self.samp is None:
self.onSampConnect()
dataset = self.dataset_panel.dataset
params = {"rows": str(dataset._length), "columns": {}}
params['id'] = dataset.filename
type_map = {np.float64: "F8_LE", np.float32: "F4_LE", np.int64: "I8_LE", np.int32: "I4_LE", np.uint64: "U8_LE", np.uint32: "U4_LE"}
print(type_map)
for column_name in dataset.column_names:
type = dataset.dtypes[column_name]
if hasattr(type, "type"):
type = type.type # TODO: why is this needed?
bytes_type = np.zeros(1, dtype=type).dtype.itemsize
column = {
"filename": dataset.filenames[column_name],
"type": type_map[type],
"byte_offset": str(dataset.offsets[column_name]),
"type_stride": str(dataset.strides[column_name]),
"byte_stride": str(dataset.strides[column_name] * bytes_type),
"bytes_type": str(bytes_type),
}
params["columns"][column_name] = column
self.samp.client.callAll("send_mmap_" + dataset.name,
{"samp.mtype": "table.load.memory_mapped_columns",
"samp.params": params})
def onLoadTable(self, url, table_id, name):
# this is called from a different thread!
print("loading table", url, table_id, name)
try:
self.load(url, table_id, name)
except:
logger.exception("load table")
return
def load(self, url, table_id, name):
print("parsing table...")
table = astropy.io.votable.parse_single_table(url)
print("done parsing table")
names = table.array.dtype.names
dataset = DatasetMemoryMapped(table_id, nommap=True)
data = table.array.data
for i in range(len(data.dtype)):
name = data.dtype.names[i]
type = data.dtype[i]
if type.kind == "f": # only store float
# datagroup.create_dataset(name, data=table.array[name].astype(np.float64))
dataset.addMemoryColumn(name, table.array[name].astype(np.float64))
self.dataset_selector.add(dataset)
def center(self):
qr = self.frameGeometry()
cp = QtGui.QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def closeEvent(self, event):
# print("close event")
return
reply = QtGui.QMessageBox.question(self, 'Message',
"Are you sure to quit?", QtGui.QMessageBox.Yes |
QtGui.QMessageBox.No, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
event.accept()
else:
event.ignore()
def clean_up(self):
print("clean up")
if self.samp is not None:
print("disconnect samp")
try:
self.samp.client.disconnect()
except:
logger.exception("error disconnecting from SAMP hub")
# event.accept()
return
app = None
kernel = None
"""
from qtconsole.rich_jupyter_widget import RichJupyterWidget
from qtconsole.inprocess import QtInProcessKernelManager
from IPython.lib import guisupport
"""
def print_process_id():
print(('Process ID is:', os.getpid()))
class Current(object):
pass
current = Current()
# current.fig = None
current.window = None
# current.layer = None
def main(argv=sys.argv[1:]):
global main_thread
global vaex
global app
global kernel
global ipython_console
global current
vaex.set_log_level_warning()
if app is None:
app = QtGui.QApplication(argv)
if not (frozen and darwin): # osx app has its own icon file
import vaex.ui.icons
icon = QtGui.QIcon(vaex.ui.icons.iconfile('vaex128'))
app.setWindowIcon(icon)
# import vaex.ipkernel_qtapp
# ipython_window = vaex.ipkernel_qtapp.SimpleWindow(app)
main_thread = QtCore.QThread.currentThread()
# print select_many(None, "lala", ["aap", "noot"] + ["item-%d-%s" % (k, "-" * k) for k in range(30)])
# sys.exit(0)
# sys._excepthook = sys.excepthook
def qt_exception_hook(exctype, value, traceback):
print("qt hook in thread: %r" % threading.currentThread())
sys.__excepthook__(exctype, value, traceback)
qt_exception(None, exctype, value, traceback)
# sys._excepthook(exctype, value, traceback)
# sys.exit(1)
sys.excepthook = qt_exception_hook
vaex.promise.Promise.unhandled = staticmethod(qt_exception_hook)
# raise RuntimeError, "blaat"
vaex_app = VaexApp(argv, open_default=True)
def plot(*args, **kwargs):
vaex_app.plot(*args, **kwargs)
def select(*args, **kwargs):
vaex_app.select(*args, **kwargs)
"""if 1:
# app = guisupport.get_app_qt4()
print_process_id()
# Create an in-process kernel
# >>> print_process_id( )
# will print the same process ID as the main process
kernel_manager = QtInProcessKernelManager()
kernel_manager.start_kernel()
kernel = kernel_manager.kernel
kernel.gui = 'qt4'
kernel.shell.push({'foo': 43, 'print_process_id': print_process_id, "vaex_app":vaex_app, "plot": plot, "current": current, "select": select})
kernel_client = kernel_manager.client()
kernel_client.start_channels()
def stop():
kernel_client.stop_channels()
kernel_manager.shutdown_kernel()
app.exit()
ipython_console = RichJupyterWidget()
ipython_console.kernel_manager = kernel_manager
ipython_console.kernel_client = kernel_client
ipython_console.exit_requested.connect(stop)
#ipython_console.show()
sys.exit(guisupport.start_event_loop_qt4(app))
"""
# w = QtGui.QWidget()
# w.resize(250, 150)
# w.move(300, 300)
# w.setWindowTitle('Simple')
# w.show()
# ipython_window.show()
# ipython_window.ipkernel.start()
sys.exit(app.exec_())
def batch_copy_index(from_array, to_array, shuffle_array):
N_per_batch = int(1e7)
length = len(from_array)
batches = int(math.ceil(float(length) / N_per_batch))
print(np.sum(from_array))
for i in range(batches):
# print "batch", i, "out of", batches, ""
sys.stdout.flush()
i1 = i * N_per_batch
i2 = min(length, (i + 1) * N_per_batch)
# print "reading...", i1, i2
sys.stdout.flush()
data = from_array[shuffle_array[i1:i2]]
# print "writing..."
sys.stdout.flush()
to_array[i1:i2] = data
| maartenbreddels/vaex | packages/vaex-ui/vaex/ui/main.py | Python | mit | 89,877 |
# -*- coding: utf-8 -*-
# Get rid of "FormatSerializer:Method 'create' is abstract in class 'BaseSerializer' but is not overridden"
# FormatSerializer is read only anyway
# pylint: disable=abstract-method
from __future__ import unicode_literals
from rest_framework import serializers
from rest_framework.reverse import reverse
from django.db.models import Q
from authentication.serializers import UserDisplaySerializer
from hub.models import PackageModel, DocumentModel, FileGroupModel, FileModel, TransformationModel, UrlModel
"""
Django serializers.
"""
class PackageSerializer(serializers.HyperlinkedModelSerializer):
"""
Packages are either documents or transformations. Do some magic to differentiate between them (django/rest_framework
is really bad at this).
"""
owner = UserDisplaySerializer(read_only=True)
type = serializers.SerializerMethodField()
preview = serializers.SerializerMethodField()
template = serializers.SerializerMethodField()
class Meta(object):
""" Meta class for PackageSerializer. """
model = PackageModel
fields = ('id', 'url', 'name', 'description', 'private', 'owner', 'created_at', 'type', 'preview', 'template')
def get_template(self, obj):
if isinstance(obj, TransformationModel):
return obj.is_template
return False
def get_type(self, obj):
if isinstance(obj, DocumentModel):
return 'document'
elif isinstance(obj, TransformationModel):
return 'transformation'
return 'unknown'
def get_preview(self, obj):
request = self.context.get('request', None)
format = self.context.get('format', None)
return reverse('{}model-preview'.format(self.get_type(obj)), kwargs={'pk': obj.id}, request=request,
format=format)
class DocumentSerializer(serializers.HyperlinkedModelSerializer):
file_groups = serializers.HyperlinkedIdentityField('documentmodel-filegroup')
owner = UserDisplaySerializer(read_only=True)
preview = serializers.HyperlinkedIdentityField('documentmodel-preview')
class Meta(object):
""" Meta class for DocumentSerializer. """
model = DocumentModel
fields = ('id', 'url', 'name', 'description', 'file_groups', 'private', 'owner', 'created_at', 'preview')
def to_representation(self, instance):
ret = super(DocumentSerializer, self).to_representation(instance)
ret['type'] = 'document'
return ret
class FileSerializer(serializers.HyperlinkedModelSerializer):
file_format = serializers.CharField(source='format')
class Meta(object):
""" Meta class for FileSerializer. """
model = FileModel
fields = ('id', 'url', 'file_name', 'file_format', 'file_group')
class UrlSerializer(serializers.HyperlinkedModelSerializer):
source_url = serializers.URLField()
url_format = serializers.CharField(source='format')
class Meta(object):
""" Meta class for UrlSerializer. """
model = UrlModel
fields = ('id', 'url', 'source_url', 'url_format', 'refresh_after', 'type', 'file_group')
class TransformationIdSerializer(serializers.Serializer):
id = serializers.IntegerField(read_only=True)
url = serializers.HyperlinkedIdentityField(view_name='transformationmodel-detail')
name = serializers.CharField(read_only=True)
class Meta(object):
fields = ('id', 'url', 'name')
class RelatedTransformationMixin(object):
def _get_related_transformations(self, obj, request):
filter = Q(private=False)
if request.user:
filter |= Q(owner=request.user.id)
related_transformations = obj.related_transformations.filter(filter)
serializer = TransformationIdSerializer(related_transformations, many=True, context={'request': request})
return serializer.data
class FileGroupSerializer(serializers.HyperlinkedModelSerializer, RelatedTransformationMixin):
files = FileSerializer(many=True, read_only=True)
urls = UrlSerializer(many=True, read_only=True)
document = DocumentSerializer(read_only=True)
related_transformations = serializers.SerializerMethodField()
data = serializers.HyperlinkedIdentityField('filegroupmodel-data')
token = serializers.HyperlinkedIdentityField('filegroupmodel-token')
preview = serializers.HyperlinkedIdentityField('filegroupmodel-preview')
class Meta(object):
""" Meta class for FileGroupSerializer. """
model = FileGroupModel
fields = ('id', 'url', 'document', 'files', 'urls', 'data', 'preview', 'related_transformations', 'token')
depth = 1
def get_related_transformations(self, obj):
return self._get_related_transformations(obj, self.context['request'])
class FormatSerializer(serializers.Serializer):
name = serializers.CharField(read_only=True)
label = serializers.CharField(read_only=True)
description = serializers.CharField(read_only=True)
example = serializers.CharField(read_only=True)
extension = serializers.CharField(read_only=True)
class TransformationSerializer(serializers.HyperlinkedModelSerializer, RelatedTransformationMixin):
referenced_file_groups = serializers.HyperlinkedIdentityField('transformationmodel-filegroups')
referenced_transformations = serializers.HyperlinkedIdentityField('transformationmodel-transformations')
token = serializers.HyperlinkedIdentityField('transformationmodel-token')
related_transformations = serializers.SerializerMethodField()
owner = UserDisplaySerializer(read_only=True)
data = serializers.HyperlinkedIdentityField('transformationmodel-data')
preview = serializers.HyperlinkedIdentityField('transformationmodel-preview')
class Meta(object):
""" Meta class for TransformationSerializer. """
model = TransformationModel
fields = ('id', 'url', 'name', 'description', 'transformation', 'private', 'owner', 'data', 'is_template',
'preview', 'referenced_file_groups', 'referenced_transformations', 'related_transformations', 'token')
def to_representation(self, instance):
ret = super(TransformationSerializer, self).to_representation(instance)
ret['type'] = 'transformation'
return ret
def get_related_transformations(self, obj):
return self._get_related_transformations(obj, self.context['request'])
| hsr-ba-fs15-dat/opendatahub | src/main/python/hub/serializers.py | Python | mit | 6,460 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'reactiongameui.ui'
#
# Created by: PyQt5 UI code generator 5.8.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(800, 600)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(Form.sizePolicy().hasHeightForWidth())
Form.setSizePolicy(sizePolicy)
Form.setAutoFillBackground(True)
self.verticalLayout = QtWidgets.QVBoxLayout(Form)
self.verticalLayout.setObjectName("verticalLayout")
self.Main = QtWidgets.QFrame(Form)
self.Main.setAutoFillBackground(False)
self.Main.setFrameShape(QtWidgets.QFrame.NoFrame)
self.Main.setFrameShadow(QtWidgets.QFrame.Plain)
self.Main.setObjectName("Main")
self.Button2 = QtWidgets.QPushButton(self.Main)
self.Button2.setGeometry(QtCore.QRect(675, 555, 101, 23))
self.Button2.setObjectName("Button2")
self.BarLabel3 = QtWidgets.QLabel(self.Main)
self.BarLabel3.setGeometry(QtCore.QRect(10, 5, 270, 31))
font = QtGui.QFont()
font.setFamily("DejaVu Sans")
font.setPointSize(14)
self.BarLabel3.setFont(font)
self.BarLabel3.setAutoFillBackground(True)
self.BarLabel3.setAlignment(QtCore.Qt.AlignCenter)
self.BarLabel3.setObjectName("BarLabel3")
self.Button1 = QtWidgets.QPushButton(self.Main)
self.Button1.setGeometry(QtCore.QRect(570, 555, 101, 23))
self.Button1.setObjectName("Button1")
self.BarLabel1 = QtWidgets.QLabel(self.Main)
self.BarLabel1.setGeometry(QtCore.QRect(500, 5, 270, 31))
font = QtGui.QFont()
font.setFamily("DejaVu Sans")
font.setPointSize(14)
self.BarLabel1.setFont(font)
self.BarLabel1.setAutoFillBackground(True)
self.BarLabel1.setAlignment(QtCore.Qt.AlignCenter)
self.BarLabel1.setObjectName("BarLabel1")
self.BarLabel2 = QtWidgets.QLabel(self.Main)
self.BarLabel2.setGeometry(QtCore.QRect(270, 5, 270, 31))
font = QtGui.QFont()
font.setFamily("DejaVu Sans")
font.setPointSize(14)
self.BarLabel2.setFont(font)
self.BarLabel2.setAutoFillBackground(True)
self.BarLabel2.setAlignment(QtCore.Qt.AlignCenter)
self.BarLabel2.setObjectName("BarLabel2")
self.BackLabel = QtWidgets.QLabel(self.Main)
self.BackLabel.setGeometry(QtCore.QRect(9, 4, 762, 33))
font = QtGui.QFont()
font.setFamily("DejaVu Sans")
font.setPointSize(14)
self.BackLabel.setFont(font)
self.BackLabel.setAutoFillBackground(True)
self.BackLabel.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.BackLabel.setFrameShadow(QtWidgets.QFrame.Sunken)
self.BackLabel.setText("")
self.BackLabel.setAlignment(QtCore.Qt.AlignCenter)
self.BackLabel.setObjectName("BackLabel")
self.QF2 = QtWidgets.QFrame(self.Main)
self.QF2.setGeometry(QtCore.QRect(60, 181, 170, 62))
self.QF2.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.QF2.setFrameShadow(QtWidgets.QFrame.Raised)
self.QF2.setObjectName("QF2")
self.label_5 = QtWidgets.QLabel(self.QF2)
self.label_5.setGeometry(QtCore.QRect(0, 31, 125, 31))
font = QtGui.QFont()
font.setFamily("DejaVu Sans")
font.setPointSize(12)
self.label_5.setFont(font)
self.label_5.setAutoFillBackground(True)
self.label_5.setFrameShape(QtWidgets.QFrame.Box)
self.label_5.setFrameShadow(QtWidgets.QFrame.Sunken)
self.label_5.setAlignment(QtCore.Qt.AlignCenter)
self.label_5.setObjectName("label_5")
self.label_6 = QtWidgets.QLabel(self.QF2)
self.label_6.setGeometry(QtCore.QRect(124, 31, 47, 31))
font = QtGui.QFont()
font.setFamily("DejaVu Sans")
font.setPointSize(12)
self.label_6.setFont(font)
self.label_6.setAutoFillBackground(True)
self.label_6.setFrameShape(QtWidgets.QFrame.Box)
self.label_6.setFrameShadow(QtWidgets.QFrame.Sunken)
self.label_6.setAlignment(QtCore.Qt.AlignCenter)
self.label_6.setObjectName("label_6")
self.label_7 = QtWidgets.QLabel(self.QF2)
self.label_7.setGeometry(QtCore.QRect(124, 0, 47, 31))
font = QtGui.QFont()
font.setFamily("DejaVu Sans")
font.setPointSize(12)
self.label_7.setFont(font)
self.label_7.setAutoFillBackground(True)
self.label_7.setFrameShape(QtWidgets.QFrame.Box)
self.label_7.setFrameShadow(QtWidgets.QFrame.Sunken)
self.label_7.setAlignment(QtCore.Qt.AlignCenter)
self.label_7.setObjectName("label_7")
self.label_8 = QtWidgets.QLabel(self.QF2)
self.label_8.setGeometry(QtCore.QRect(0, 0, 125, 31))
font = QtGui.QFont()
font.setFamily("DejaVu Sans")
font.setPointSize(12)
self.label_8.setFont(font)
self.label_8.setAutoFillBackground(True)
self.label_8.setFrameShape(QtWidgets.QFrame.Box)
self.label_8.setFrameShadow(QtWidgets.QFrame.Sunken)
self.label_8.setAlignment(QtCore.Qt.AlignCenter)
self.label_8.setObjectName("label_8")
self.label_5.raise_()
self.label_6.raise_()
self.label_8.raise_()
self.label_7.raise_()
self.QF1 = QtWidgets.QFrame(self.Main)
self.QF1.setGeometry(QtCore.QRect(60, 60, 170, 62))
self.QF1.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.QF1.setFrameShadow(QtWidgets.QFrame.Raised)
self.QF1.setObjectName("QF1")
self.label_9 = QtWidgets.QLabel(self.QF1)
self.label_9.setGeometry(QtCore.QRect(0, 31, 125, 31))
font = QtGui.QFont()
font.setFamily("DejaVu Sans")
font.setPointSize(12)
self.label_9.setFont(font)
self.label_9.setAutoFillBackground(True)
self.label_9.setFrameShape(QtWidgets.QFrame.Box)
self.label_9.setFrameShadow(QtWidgets.QFrame.Sunken)
self.label_9.setAlignment(QtCore.Qt.AlignCenter)
self.label_9.setObjectName("label_9")
self.label_10 = QtWidgets.QLabel(self.QF1)
self.label_10.setGeometry(QtCore.QRect(124, 31, 47, 31))
font = QtGui.QFont()
font.setFamily("DejaVu Sans")
font.setPointSize(12)
self.label_10.setFont(font)
self.label_10.setAutoFillBackground(True)
self.label_10.setFrameShape(QtWidgets.QFrame.Box)
self.label_10.setFrameShadow(QtWidgets.QFrame.Sunken)
self.label_10.setAlignment(QtCore.Qt.AlignCenter)
self.label_10.setObjectName("label_10")
self.label_11 = QtWidgets.QLabel(self.QF1)
self.label_11.setGeometry(QtCore.QRect(124, 0, 47, 31))
font = QtGui.QFont()
font.setFamily("DejaVu Sans")
font.setPointSize(12)
self.label_11.setFont(font)
self.label_11.setAutoFillBackground(True)
self.label_11.setFrameShape(QtWidgets.QFrame.Box)
self.label_11.setFrameShadow(QtWidgets.QFrame.Sunken)
self.label_11.setAlignment(QtCore.Qt.AlignCenter)
self.label_11.setObjectName("label_11")
self.label_12 = QtWidgets.QLabel(self.QF1)
self.label_12.setGeometry(QtCore.QRect(0, 0, 125, 31))
font = QtGui.QFont()
font.setFamily("DejaVu Sans")
font.setPointSize(12)
self.label_12.setFont(font)
self.label_12.setAutoFillBackground(True)
self.label_12.setFrameShape(QtWidgets.QFrame.Box)
self.label_12.setFrameShadow(QtWidgets.QFrame.Sunken)
self.label_12.setAlignment(QtCore.Qt.AlignCenter)
self.label_12.setObjectName("label_12")
self.label_9.raise_()
self.label_10.raise_()
self.label_12.raise_()
self.label_11.raise_()
self.QF3 = QtWidgets.QFrame(self.Main)
self.QF3.setGeometry(QtCore.QRect(60, 301, 170, 62))
self.QF3.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.QF3.setFrameShadow(QtWidgets.QFrame.Raised)
self.QF3.setObjectName("QF3")
self.label_13 = QtWidgets.QLabel(self.QF3)
self.label_13.setGeometry(QtCore.QRect(0, 31, 125, 31))
font = QtGui.QFont()
font.setFamily("DejaVu Sans")
font.setPointSize(12)
self.label_13.setFont(font)
self.label_13.setAutoFillBackground(True)
self.label_13.setFrameShape(QtWidgets.QFrame.Box)
self.label_13.setFrameShadow(QtWidgets.QFrame.Sunken)
self.label_13.setAlignment(QtCore.Qt.AlignCenter)
self.label_13.setObjectName("label_13")
self.label_14 = QtWidgets.QLabel(self.QF3)
self.label_14.setGeometry(QtCore.QRect(124, 31, 47, 31))
font = QtGui.QFont()
font.setFamily("DejaVu Sans")
font.setPointSize(12)
self.label_14.setFont(font)
self.label_14.setAutoFillBackground(True)
self.label_14.setFrameShape(QtWidgets.QFrame.Box)
self.label_14.setFrameShadow(QtWidgets.QFrame.Sunken)
self.label_14.setAlignment(QtCore.Qt.AlignCenter)
self.label_14.setObjectName("label_14")
self.label_15 = QtWidgets.QLabel(self.QF3)
self.label_15.setGeometry(QtCore.QRect(124, 0, 47, 31))
font = QtGui.QFont()
font.setFamily("DejaVu Sans")
font.setPointSize(12)
self.label_15.setFont(font)
self.label_15.setAutoFillBackground(True)
self.label_15.setFrameShape(QtWidgets.QFrame.Box)
self.label_15.setFrameShadow(QtWidgets.QFrame.Sunken)
self.label_15.setAlignment(QtCore.Qt.AlignCenter)
self.label_15.setObjectName("label_15")
self.label_16 = QtWidgets.QLabel(self.QF3)
self.label_16.setGeometry(QtCore.QRect(0, 0, 125, 31))
font = QtGui.QFont()
font.setFamily("DejaVu Sans")
font.setPointSize(12)
self.label_16.setFont(font)
self.label_16.setAutoFillBackground(True)
self.label_16.setFrameShape(QtWidgets.QFrame.Box)
self.label_16.setFrameShadow(QtWidgets.QFrame.Sunken)
self.label_16.setAlignment(QtCore.Qt.AlignCenter)
self.label_16.setObjectName("label_16")
self.label_13.raise_()
self.label_14.raise_()
self.label_16.raise_()
self.label_15.raise_()
self.QF4 = QtWidgets.QFrame(self.Main)
self.QF4.setGeometry(QtCore.QRect(60, 421, 170, 62))
self.QF4.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.QF4.setFrameShadow(QtWidgets.QFrame.Raised)
self.QF4.setObjectName("QF4")
self.label_17 = QtWidgets.QLabel(self.QF4)
self.label_17.setGeometry(QtCore.QRect(0, 31, 125, 31))
font = QtGui.QFont()
font.setFamily("DejaVu Sans")
font.setPointSize(12)
self.label_17.setFont(font)
self.label_17.setAutoFillBackground(True)
self.label_17.setFrameShape(QtWidgets.QFrame.Box)
self.label_17.setFrameShadow(QtWidgets.QFrame.Sunken)
self.label_17.setAlignment(QtCore.Qt.AlignCenter)
self.label_17.setObjectName("label_17")
self.label_18 = QtWidgets.QLabel(self.QF4)
self.label_18.setGeometry(QtCore.QRect(124, 31, 47, 31))
font = QtGui.QFont()
font.setFamily("DejaVu Sans")
font.setPointSize(12)
self.label_18.setFont(font)
self.label_18.setAutoFillBackground(True)
self.label_18.setFrameShape(QtWidgets.QFrame.Box)
self.label_18.setFrameShadow(QtWidgets.QFrame.Sunken)
self.label_18.setAlignment(QtCore.Qt.AlignCenter)
self.label_18.setObjectName("label_18")
self.label_19 = QtWidgets.QLabel(self.QF4)
self.label_19.setGeometry(QtCore.QRect(124, 0, 47, 31))
font = QtGui.QFont()
font.setFamily("DejaVu Sans")
font.setPointSize(12)
self.label_19.setFont(font)
self.label_19.setAutoFillBackground(True)
self.label_19.setFrameShape(QtWidgets.QFrame.Box)
self.label_19.setFrameShadow(QtWidgets.QFrame.Sunken)
self.label_19.setAlignment(QtCore.Qt.AlignCenter)
self.label_19.setObjectName("label_19")
self.label_20 = QtWidgets.QLabel(self.QF4)
self.label_20.setGeometry(QtCore.QRect(0, 0, 125, 31))
font = QtGui.QFont()
font.setFamily("DejaVu Sans")
font.setPointSize(12)
self.label_20.setFont(font)
self.label_20.setAutoFillBackground(True)
self.label_20.setFrameShape(QtWidgets.QFrame.Box)
self.label_20.setFrameShadow(QtWidgets.QFrame.Sunken)
self.label_20.setAlignment(QtCore.Qt.AlignCenter)
self.label_20.setObjectName("label_20")
self.label_17.raise_()
self.label_18.raise_()
self.label_20.raise_()
self.label_19.raise_()
self.SF1 = QtWidgets.QFrame(self.Main)
self.SF1.setGeometry(QtCore.QRect(331, 120, 170, 62))
self.SF1.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.SF1.setFrameShadow(QtWidgets.QFrame.Raised)
self.SF1.setObjectName("SF1")
self.label_21 = QtWidgets.QLabel(self.SF1)
self.label_21.setGeometry(QtCore.QRect(0, 31, 125, 31))
font = QtGui.QFont()
font.setFamily("DejaVu Sans")
font.setPointSize(12)
self.label_21.setFont(font)
self.label_21.setAutoFillBackground(True)
self.label_21.setFrameShape(QtWidgets.QFrame.Box)
self.label_21.setFrameShadow(QtWidgets.QFrame.Sunken)
self.label_21.setAlignment(QtCore.Qt.AlignCenter)
self.label_21.setObjectName("label_21")
self.label_22 = QtWidgets.QLabel(self.SF1)
self.label_22.setGeometry(QtCore.QRect(124, 31, 47, 31))
font = QtGui.QFont()
font.setFamily("DejaVu Sans")
font.setPointSize(12)
self.label_22.setFont(font)
self.label_22.setAutoFillBackground(True)
self.label_22.setFrameShape(QtWidgets.QFrame.Box)
self.label_22.setFrameShadow(QtWidgets.QFrame.Sunken)
self.label_22.setAlignment(QtCore.Qt.AlignCenter)
self.label_22.setObjectName("label_22")
self.label_23 = QtWidgets.QLabel(self.SF1)
self.label_23.setGeometry(QtCore.QRect(124, 0, 47, 31))
font = QtGui.QFont()
font.setFamily("DejaVu Sans")
font.setPointSize(12)
self.label_23.setFont(font)
self.label_23.setAutoFillBackground(True)
self.label_23.setFrameShape(QtWidgets.QFrame.Box)
self.label_23.setFrameShadow(QtWidgets.QFrame.Sunken)
self.label_23.setAlignment(QtCore.Qt.AlignCenter)
self.label_23.setObjectName("label_23")
self.label_24 = QtWidgets.QLabel(self.SF1)
self.label_24.setGeometry(QtCore.QRect(0, 0, 125, 31))
font = QtGui.QFont()
font.setFamily("DejaVu Sans")
font.setPointSize(12)
self.label_24.setFont(font)
self.label_24.setAutoFillBackground(True)
self.label_24.setFrameShape(QtWidgets.QFrame.Box)
self.label_24.setFrameShadow(QtWidgets.QFrame.Sunken)
self.label_24.setAlignment(QtCore.Qt.AlignCenter)
self.label_24.setObjectName("label_24")
self.label_21.raise_()
self.label_22.raise_()
self.label_24.raise_()
self.label_23.raise_()
self.SF2 = QtWidgets.QFrame(self.Main)
self.SF2.setGeometry(QtCore.QRect(331, 360, 170, 62))
self.SF2.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.SF2.setFrameShadow(QtWidgets.QFrame.Raised)
self.SF2.setObjectName("SF2")
self.label_25 = QtWidgets.QLabel(self.SF2)
self.label_25.setGeometry(QtCore.QRect(0, 31, 125, 31))
font = QtGui.QFont()
font.setFamily("DejaVu Sans")
font.setPointSize(12)
self.label_25.setFont(font)
self.label_25.setAutoFillBackground(True)
self.label_25.setFrameShape(QtWidgets.QFrame.Box)
self.label_25.setFrameShadow(QtWidgets.QFrame.Sunken)
self.label_25.setAlignment(QtCore.Qt.AlignCenter)
self.label_25.setObjectName("label_25")
self.label_26 = QtWidgets.QLabel(self.SF2)
self.label_26.setGeometry(QtCore.QRect(124, 31, 47, 31))
font = QtGui.QFont()
font.setFamily("DejaVu Sans")
font.setPointSize(12)
self.label_26.setFont(font)
self.label_26.setAutoFillBackground(True)
self.label_26.setFrameShape(QtWidgets.QFrame.Box)
self.label_26.setFrameShadow(QtWidgets.QFrame.Sunken)
self.label_26.setAlignment(QtCore.Qt.AlignCenter)
self.label_26.setObjectName("label_26")
self.label_27 = QtWidgets.QLabel(self.SF2)
self.label_27.setGeometry(QtCore.QRect(124, 0, 47, 31))
font = QtGui.QFont()
font.setFamily("DejaVu Sans")
font.setPointSize(12)
self.label_27.setFont(font)
self.label_27.setAutoFillBackground(True)
self.label_27.setFrameShape(QtWidgets.QFrame.Box)
self.label_27.setFrameShadow(QtWidgets.QFrame.Sunken)
self.label_27.setAlignment(QtCore.Qt.AlignCenter)
self.label_27.setObjectName("label_27")
self.label_28 = QtWidgets.QLabel(self.SF2)
self.label_28.setGeometry(QtCore.QRect(0, 0, 125, 31))
font = QtGui.QFont()
font.setFamily("DejaVu Sans")
font.setPointSize(12)
self.label_28.setFont(font)
self.label_28.setAutoFillBackground(True)
self.label_28.setFrameShape(QtWidgets.QFrame.Box)
self.label_28.setFrameShadow(QtWidgets.QFrame.Sunken)
self.label_28.setAlignment(QtCore.Qt.AlignCenter)
self.label_28.setObjectName("label_28")
self.label = QtWidgets.QLabel(self.Main)
self.label.setGeometry(QtCore.QRect(331, 340, 171, 16))
self.label.setObjectName("label")
self.FinalMatchFrame = QtWidgets.QFrame(self.Main)
self.FinalMatchFrame.setGeometry(QtCore.QRect(581, 240, 170, 62))
self.FinalMatchFrame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.FinalMatchFrame.setFrameShadow(QtWidgets.QFrame.Raised)
self.FinalMatchFrame.setObjectName("FinalMatchFrame")
self.label_29 = QtWidgets.QLabel(self.FinalMatchFrame)
self.label_29.setGeometry(QtCore.QRect(0, 31, 125, 31))
font = QtGui.QFont()
font.setFamily("DejaVu Sans")
font.setPointSize(12)
self.label_29.setFont(font)
self.label_29.setAutoFillBackground(True)
self.label_29.setFrameShape(QtWidgets.QFrame.Box)
self.label_29.setFrameShadow(QtWidgets.QFrame.Sunken)
self.label_29.setAlignment(QtCore.Qt.AlignCenter)
self.label_29.setObjectName("label_29")
self.label_30 = QtWidgets.QLabel(self.FinalMatchFrame)
self.label_30.setGeometry(QtCore.QRect(124, 31, 47, 31))
font = QtGui.QFont()
font.setFamily("DejaVu Sans")
font.setPointSize(12)
self.label_30.setFont(font)
self.label_30.setAutoFillBackground(True)
self.label_30.setFrameShape(QtWidgets.QFrame.Box)
self.label_30.setFrameShadow(QtWidgets.QFrame.Sunken)
self.label_30.setAlignment(QtCore.Qt.AlignCenter)
self.label_30.setObjectName("label_30")
self.label_31 = QtWidgets.QLabel(self.FinalMatchFrame)
self.label_31.setGeometry(QtCore.QRect(124, 0, 47, 31))
font = QtGui.QFont()
font.setFamily("DejaVu Sans")
font.setPointSize(12)
self.label_31.setFont(font)
self.label_31.setAutoFillBackground(True)
self.label_31.setFrameShape(QtWidgets.QFrame.Box)
self.label_31.setFrameShadow(QtWidgets.QFrame.Sunken)
self.label_31.setAlignment(QtCore.Qt.AlignCenter)
self.label_31.setObjectName("label_31")
self.label_32 = QtWidgets.QLabel(self.FinalMatchFrame)
self.label_32.setGeometry(QtCore.QRect(0, 0, 125, 31))
font = QtGui.QFont()
font.setFamily("DejaVu Sans")
font.setPointSize(12)
self.label_32.setFont(font)
self.label_32.setAutoFillBackground(True)
self.label_32.setFrameShape(QtWidgets.QFrame.Box)
self.label_32.setFrameShadow(QtWidgets.QFrame.Sunken)
self.label_32.setAlignment(QtCore.Qt.AlignCenter)
self.label_32.setObjectName("label_32")
self.ChampionsFrame = QtWidgets.QFrame(self.Main)
self.ChampionsFrame.setGeometry(QtCore.QRect(581, 451, 170, 60))
self.ChampionsFrame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.ChampionsFrame.setFrameShadow(QtWidgets.QFrame.Raised)
self.ChampionsFrame.setObjectName("ChampionsFrame")
self.label_33 = QtWidgets.QLabel(self.ChampionsFrame)
self.label_33.setGeometry(QtCore.QRect(0, 30, 170, 31))
font = QtGui.QFont()
font.setFamily("DejaVu Sans")
font.setPointSize(12)
self.label_33.setFont(font)
self.label_33.setAutoFillBackground(True)
self.label_33.setFrameShape(QtWidgets.QFrame.Box)
self.label_33.setFrameShadow(QtWidgets.QFrame.Sunken)
self.label_33.setAlignment(QtCore.Qt.AlignCenter)
self.label_33.setObjectName("label_33")
self.label_36 = QtWidgets.QLabel(self.ChampionsFrame)
self.label_36.setGeometry(QtCore.QRect(0, 0, 170, 31))
font = QtGui.QFont()
font.setFamily("DejaVu Sans")
font.setPointSize(12)
self.label_36.setFont(font)
self.label_36.setAutoFillBackground(True)
self.label_36.setFrameShape(QtWidgets.QFrame.Box)
self.label_36.setFrameShadow(QtWidgets.QFrame.Sunken)
self.label_36.setAlignment(QtCore.Qt.AlignCenter)
self.label_36.setObjectName("label_36")
self.frame_QF1toSF1 = QtWidgets.QFrame(self.Main)
self.frame_QF1toSF1.setGeometry(QtCore.QRect(230, 70, 101, 81))
self.frame_QF1toSF1.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_QF1toSF1.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_QF1toSF1.setObjectName("frame_QF1toSF1")
self.line = QtWidgets.QFrame(self.frame_QF1toSF1)
self.line.setGeometry(QtCore.QRect(0, 13, 51, 16))
self.line.setFrameShadow(QtWidgets.QFrame.Plain)
self.line.setLineWidth(2)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setObjectName("line")
self.line_3 = QtWidgets.QFrame(self.frame_QF1toSF1)
self.line_3.setGeometry(QtCore.QRect(50, 20, 3, 46))
self.line_3.setFrameShadow(QtWidgets.QFrame.Plain)
self.line_3.setLineWidth(2)
self.line_3.setFrameShape(QtWidgets.QFrame.VLine)
self.line_3.setObjectName("line_3")
self.line_6 = QtWidgets.QFrame(self.frame_QF1toSF1)
self.line_6.setGeometry(QtCore.QRect(51, 56, 52, 20))
self.line_6.setFrameShadow(QtWidgets.QFrame.Plain)
self.line_6.setLineWidth(2)
self.line_6.setFrameShape(QtWidgets.QFrame.HLine)
self.line_6.setObjectName("line_6")
self.frame_QF3toSF2 = QtWidgets.QFrame(self.Main)
self.frame_QF3toSF2.setGeometry(QtCore.QRect(230, 310, 101, 81))
self.frame_QF3toSF2.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_QF3toSF2.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_QF3toSF2.setObjectName("frame_QF3toSF2")
self.line_2 = QtWidgets.QFrame(self.frame_QF3toSF2)
self.line_2.setGeometry(QtCore.QRect(0, 13, 51, 16))
self.line_2.setFrameShadow(QtWidgets.QFrame.Plain)
self.line_2.setLineWidth(2)
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setObjectName("line_2")
self.line_7 = QtWidgets.QFrame(self.frame_QF3toSF2)
self.line_7.setGeometry(QtCore.QRect(50, 20, 3, 46))
self.line_7.setFrameShadow(QtWidgets.QFrame.Plain)
self.line_7.setLineWidth(2)
self.line_7.setFrameShape(QtWidgets.QFrame.VLine)
self.line_7.setObjectName("line_7")
self.line_8 = QtWidgets.QFrame(self.frame_QF3toSF2)
self.line_8.setGeometry(QtCore.QRect(51, 56, 52, 20))
self.line_8.setFrameShadow(QtWidgets.QFrame.Plain)
self.line_8.setLineWidth(2)
self.line_8.setFrameShape(QtWidgets.QFrame.HLine)
self.line_8.setObjectName("line_8")
self.frame_QF4toSF2 = QtWidgets.QFrame(self.Main)
self.frame_QF4toSF2.setGeometry(QtCore.QRect(230, 387, 101, 81))
self.frame_QF4toSF2.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_QF4toSF2.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_QF4toSF2.setObjectName("frame_QF4toSF2")
self.line_9 = QtWidgets.QFrame(self.frame_QF4toSF2)
self.line_9.setGeometry(QtCore.QRect(0, 56, 51, 16))
self.line_9.setFrameShadow(QtWidgets.QFrame.Plain)
self.line_9.setLineWidth(2)
self.line_9.setFrameShape(QtWidgets.QFrame.HLine)
self.line_9.setObjectName("line_9")
self.line_10 = QtWidgets.QFrame(self.frame_QF4toSF2)
self.line_10.setGeometry(QtCore.QRect(50, 19, 3, 46))
self.line_10.setFrameShadow(QtWidgets.QFrame.Plain)
self.line_10.setLineWidth(2)
self.line_10.setFrameShape(QtWidgets.QFrame.VLine)
self.line_10.setObjectName("line_10")
self.line_11 = QtWidgets.QFrame(self.frame_QF4toSF2)
self.line_11.setGeometry(QtCore.QRect(51, 10, 52, 20))
self.line_11.setFrameShadow(QtWidgets.QFrame.Plain)
self.line_11.setLineWidth(2)
self.line_11.setFrameShape(QtWidgets.QFrame.HLine)
self.line_11.setObjectName("line_11")
self.frame_QF2toSF1 = QtWidgets.QFrame(self.Main)
self.frame_QF2toSF1.setGeometry(QtCore.QRect(230, 147, 101, 81))
self.frame_QF2toSF1.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_QF2toSF1.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_QF2toSF1.setObjectName("frame_QF2toSF1")
self.line_12 = QtWidgets.QFrame(self.frame_QF2toSF1)
self.line_12.setGeometry(QtCore.QRect(0, 56, 51, 16))
self.line_12.setFrameShadow(QtWidgets.QFrame.Plain)
self.line_12.setLineWidth(2)
self.line_12.setFrameShape(QtWidgets.QFrame.HLine)
self.line_12.setObjectName("line_12")
self.line_13 = QtWidgets.QFrame(self.frame_QF2toSF1)
self.line_13.setGeometry(QtCore.QRect(50, 19, 3, 46))
self.line_13.setFrameShadow(QtWidgets.QFrame.Plain)
self.line_13.setLineWidth(2)
self.line_13.setFrameShape(QtWidgets.QFrame.VLine)
self.line_13.setObjectName("line_13")
self.line_14 = QtWidgets.QFrame(self.frame_QF2toSF1)
self.line_14.setGeometry(QtCore.QRect(51, 10, 52, 20))
self.line_14.setFrameShadow(QtWidgets.QFrame.Plain)
self.line_14.setLineWidth(2)
self.line_14.setFrameShape(QtWidgets.QFrame.HLine)
self.line_14.setObjectName("line_14")
self.frame_SF2toF1 = QtWidgets.QFrame(self.Main)
self.frame_SF2toF1.setGeometry(QtCore.QRect(501, 266, 81, 136))
self.frame_SF2toF1.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_SF2toF1.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_SF2toF1.setObjectName("frame_SF2toF1")
self.line_15 = QtWidgets.QFrame(self.frame_SF2toF1)
self.line_15.setGeometry(QtCore.QRect(0, 117, 51, 16))
self.line_15.setFrameShadow(QtWidgets.QFrame.Plain)
self.line_15.setLineWidth(2)
self.line_15.setFrameShape(QtWidgets.QFrame.HLine)
self.line_15.setObjectName("line_15")
self.line_16 = QtWidgets.QFrame(self.frame_SF2toF1)
self.line_16.setGeometry(QtCore.QRect(50, 19, 3, 107))
self.line_16.setFrameShadow(QtWidgets.QFrame.Plain)
self.line_16.setLineWidth(2)
self.line_16.setFrameShape(QtWidgets.QFrame.VLine)
self.line_16.setObjectName("line_16")
self.line_17 = QtWidgets.QFrame(self.frame_SF2toF1)
self.line_17.setGeometry(QtCore.QRect(51, 10, 33, 20))
self.line_17.setFrameShadow(QtWidgets.QFrame.Plain)
self.line_17.setLineWidth(2)
self.line_17.setFrameShape(QtWidgets.QFrame.HLine)
self.line_17.setObjectName("line_17")
self.frame_SF2toF1_2 = QtWidgets.QFrame(self.Main)
self.frame_SF2toF1_2.setGeometry(QtCore.QRect(501, 132, 81, 136))
self.frame_SF2toF1_2.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_SF2toF1_2.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_SF2toF1_2.setObjectName("frame_SF2toF1_2")
self.line_18 = QtWidgets.QFrame(self.frame_SF2toF1_2)
self.line_18.setGeometry(QtCore.QRect(0, 11, 51, 16))
self.line_18.setFrameShadow(QtWidgets.QFrame.Plain)
self.line_18.setLineWidth(2)
self.line_18.setFrameShape(QtWidgets.QFrame.HLine)
self.line_18.setObjectName("line_18")
self.line_19 = QtWidgets.QFrame(self.frame_SF2toF1_2)
self.line_19.setGeometry(QtCore.QRect(50, 18, 3, 109))
self.line_19.setFrameShadow(QtWidgets.QFrame.Plain)
self.line_19.setLineWidth(2)
self.line_19.setFrameShape(QtWidgets.QFrame.VLine)
self.line_19.setObjectName("line_19")
self.line_20 = QtWidgets.QFrame(self.frame_SF2toF1_2)
self.line_20.setGeometry(QtCore.QRect(51, 116, 33, 20))
self.line_20.setFrameShadow(QtWidgets.QFrame.Plain)
self.line_20.setLineWidth(2)
self.line_20.setFrameShape(QtWidgets.QFrame.HLine)
self.line_20.setObjectName("line_20")
self.BackLabel.raise_()
self.Button2.raise_()
self.BarLabel3.raise_()
self.Button1.raise_()
self.BarLabel1.raise_()
self.BarLabel2.raise_()
self.QF2.raise_()
self.QF1.raise_()
self.QF3.raise_()
self.QF4.raise_()
self.SF1.raise_()
self.SF2.raise_()
self.label.raise_()
self.FinalMatchFrame.raise_()
self.ChampionsFrame.raise_()
self.frame_QF1toSF1.raise_()
self.frame_QF3toSF2.raise_()
self.frame_QF4toSF2.raise_()
self.frame_QF2toSF1.raise_()
self.frame_SF2toF1.raise_()
self.frame_SF2toF1_2.raise_()
self.verticalLayout.addWidget(self.Main)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.Button2.setText(_translate("Form", "Quit Tournament"))
self.BarLabel3.setText(_translate("Form", "Quarter-Finals"))
self.Button1.setText(_translate("Form", "Play Next Match"))
self.BarLabel1.setText(_translate("Form", "Final"))
self.BarLabel2.setText(_translate("Form", "Semi-Finals"))
self.label_5.setText(_translate("Form", "Player 4"))
self.label_6.setText(_translate("Form", "0"))
self.label_7.setText(_translate("Form", "0"))
self.label_8.setText(_translate("Form", "Player 3"))
self.label_9.setText(_translate("Form", "Player 2"))
self.label_10.setText(_translate("Form", "0"))
self.label_11.setText(_translate("Form", "0"))
self.label_12.setText(_translate("Form", "Player 1"))
self.label_13.setText(_translate("Form", "Player 6"))
self.label_14.setText(_translate("Form", "0"))
self.label_15.setText(_translate("Form", "0"))
self.label_16.setText(_translate("Form", "Player 5"))
self.label_17.setText(_translate("Form", "Player 8"))
self.label_18.setText(_translate("Form", "0"))
self.label_19.setText(_translate("Form", "0"))
self.label_20.setText(_translate("Form", "Player 7"))
self.label_21.setText(_translate("Form", "QF 2 Winner"))
self.label_22.setText(_translate("Form", "0"))
self.label_23.setText(_translate("Form", "0"))
self.label_24.setText(_translate("Form", "QF 1 Winner"))
self.label_25.setText(_translate("Form", "QF 4 Winner"))
self.label_26.setText(_translate("Form", "0"))
self.label_27.setText(_translate("Form", "0"))
self.label_28.setText(_translate("Form", "QF 3 Winner"))
self.label.setText(_translate("Form", "Semi-Final 2"))
self.label_29.setText(_translate("Form", "SF 2 Winner"))
self.label_30.setText(_translate("Form", "0"))
self.label_31.setText(_translate("Form", "0"))
self.label_32.setText(_translate("Form", "SF 1 Winner"))
self.label_33.setText(_translate("Form", "Winner\'s Name"))
self.label_36.setText(_translate("Form", "Champion"))
| tabsoverspaces/ReactionGame | scripts/bracket_ui.py | Python | mit | 32,901 |
"""Tornado handlers for kernel specifications.
Preliminary documentation at https://github.com/ipython/ipython/wiki/IPEP-25%3A-Registry-of-installed-kernels#rest-api
"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import glob
import json
import os
pjoin = os.path.join
from tornado import web
from ...base.handlers import APIHandler, json_errors
from ...utils import url_path_join
def kernelspec_model(handler, name):
"""Load a KernelSpec by name and return the REST API model"""
ksm = handler.kernel_spec_manager
spec = ksm.get_kernel_spec(name)
d = {'name': name}
d['spec'] = spec.to_dict()
d['resources'] = resources = {}
resource_dir = spec.resource_dir
for resource in ['kernel.js', 'kernel.css']:
if os.path.exists(pjoin(resource_dir, resource)):
resources[resource] = url_path_join(
handler.base_url,
'kernelspecs',
name,
resource
)
for logo_file in glob.glob(pjoin(resource_dir, 'logo-*')):
fname = os.path.basename(logo_file)
no_ext, _ = os.path.splitext(fname)
resources[no_ext] = url_path_join(
handler.base_url,
'kernelspecs',
name,
fname
)
return d
class MainKernelSpecHandler(APIHandler):
SUPPORTED_METHODS = ('GET', 'OPTIONS')
@web.authenticated
@json_errors
def get(self):
ksm = self.kernel_spec_manager
km = self.kernel_manager
model = {}
model['default'] = km.default_kernel_name
model['kernelspecs'] = specs = {}
for kernel_name in ksm.find_kernel_specs():
try:
d = kernelspec_model(self, kernel_name)
except Exception:
self.log.error("Failed to load kernel spec: '%s'", kernel_name, exc_info=True)
continue
specs[kernel_name] = d
self.set_header("Content-Type", 'application/json')
self.finish(json.dumps(model))
@web.authenticated
@json_errors
def options(self):
self.finish()
class KernelSpecHandler(APIHandler):
SUPPORTED_METHODS = ('GET',)
@web.authenticated
@json_errors
def get(self, kernel_name):
try:
model = kernelspec_model(self, kernel_name)
except KeyError:
raise web.HTTPError(404, u'Kernel spec %s not found' % kernel_name)
self.set_header("Content-Type", 'application/json')
self.finish(json.dumps(model))
# URL to handler mappings
kernel_name_regex = r"(?P<kernel_name>\w+)"
default_handlers = [
(r"/api/kernelspecs", MainKernelSpecHandler),
(r"/api/kernelspecs/%s" % kernel_name_regex, KernelSpecHandler),
]
| bdh1011/wau | venv/lib/python2.7/site-packages/notebook/services/kernelspecs/handlers.py | Python | mit | 2,798 |
#!/usr/bin/env python
"""Test template support in VTK-Python
VTK-python decides which template specializations
to wrap according to which ones are used in typedefs
and which ones appear as superclasses of other classes.
In addition, the wrappers are hard-coded to wrap the
vtkDenseArray and vtkSparseArray classes over a broad
range of types.
Created on May 29, 2011 by David Gobbi
"""
import sys
import exceptions
import vtk
from vtk.test import Testing
arrayTypes = ['char', 'int8', 'uint8', 'int16', 'uint16',
'int32', 'uint32', int, 'uint', 'int64', 'uint64',
'float32', float, str, 'unicode', vtk.vtkVariant]
arrayCodes = ['c', 'b', 'B', 'h', 'H',
'i', 'I', 'l', 'L', 'q', 'Q',
'f', 'd']
class TestTemplates(Testing.vtkTest):
def testDenseArray(self):
"""Test vtkDenseArray template"""
for t in (arrayTypes + arrayCodes):
a = vtk.vtkDenseArray[t]()
a.Resize(1)
i = vtk.vtkArrayCoordinates(0)
if t in ['bool', '?']:
value = 1
a.SetValue(i, value)
result = a.GetValue(i)
self.assertEqual(value, result)
elif t in ['float32', 'float64', 'float', 'f', 'd']:
value = 3.125
a.SetValue(i, value)
result = a.GetValue(i)
self.assertEqual(value, result)
elif t in ['char', 'c']:
value = 'c'
a.SetValue(i, value)
result = a.GetValue(i)
self.assertEqual(value, result)
elif t in [str, 'str', 'unicode']:
value = unicode("hello")
a.SetValue(i, value)
result = a.GetValue(i)
self.assertEqual(value, result)
elif t in ['vtkVariant', vtk.vtkVariant]:
value = vtk.vtkVariant("world")
a.SetValue(i, value)
result = a.GetValue(i)
self.assertEqual(value, result)
else:
value = 12
a.SetValue(i, value)
result = a.GetValue(i)
self.assertEqual(value, result)
def testSparseArray(self):
"""Test vtkSparseArray template"""
for t in (arrayTypes + arrayCodes):
a = vtk.vtkSparseArray[t]()
a.Resize(1)
i = vtk.vtkArrayCoordinates(0)
if t in ['bool', '?']:
value = 0
a.SetValue(i, value)
result = a.GetValue(i)
self.assertEqual(value, result)
elif t in ['float32', 'float64', 'float', 'f', 'd']:
value = 3.125
a.SetValue(i, value)
result = a.GetValue(i)
self.assertEqual(value, result)
elif t in ['char', 'c']:
value = 'c'
a.SetValue(i, value)
result = a.GetValue(i)
self.assertEqual(value, result)
elif t in [str, 'str', 'unicode']:
value = unicode("hello")
a.SetValue(i, value)
result = a.GetValue(i)
self.assertEqual(value, result)
elif t in ['vtkVariant', vtk.vtkVariant]:
value = vtk.vtkVariant("world")
a.SetValue(i, value)
result = a.GetValue(i)
self.assertEqual(value, result)
else:
value = 12
a.SetValue(i, value)
result = a.GetValue(i)
self.assertEqual(value, result)
def testArray(self):
"""Test array CreateArray"""
o = vtk.vtkArray.CreateArray(vtk.vtkArray.DENSE, vtk.VTK_DOUBLE)
self.assertEqual(o.__class__, vtk.vtkDenseArray[float])
def testVector(self):
"""Test vector templates"""
# make sure Rect inherits operators
r = vtk.vtkRectf(0, 0, 2, 2)
self.assertEqual(r[2], 2.0)
c = vtk.vtkColor4ub(0, 0, 0)
self.assertEqual(list(c), [0, 0, 0, 255])
e = vtk.vtkVector['float32', 3]([0.0, 1.0, 2.0])
self.assertEqual(list(e), [0.0, 1.0, 2.0])
i = vtk.vtkVector3['i'](0)
self.assertEqual(list(i), [0, 0, 0])
if __name__ == "__main__":
Testing.main([(TestTemplates, 'test')])
| timkrentz/SunTracker | IMU/VTK-6.2.0/Common/DataModel/Testing/Python/TestTemplates.py | Python | mit | 4,490 |
import tornado.ioloop
#!! this example requires the file /test/setting.py with the folowing constants is created
from awsutils.tornado.sqsclient import SQSClient
from test.settings import access_key, secret_key
sqsclient = SQSClient(endpoint='sqs.us-east-1.amazonaws.com', access_key=access_key, secret_key=secret_key, secure=False)
def renderResult(data):
print("message received", data)
sqsclient.sendMessage(callback=renderResult, qName="test", messageBody="this is a test message")
tornado.ioloop.IOLoop.instance().start() | sanyi/awsutils | examples/tornado-sqs.py | Python | mit | 536 |
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class TripadItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
| acehanks/projects | tripadvisor_scrapy/tripad/items.py | Python | mit | 285 |
import os
from qmpy import *
from django.test import TestCase
peak_locations = []
class MiedemaTestCase(TestCase):
def setUp(self):
read_elements()
def test_methods(self):
## test that it generally works
self.assertEqual(Miedema("FeNi").energy, -0.03)
self.assertEqual(Miedema("FeNi").energy, -0.03)
c = Composition.get("LiBe")
self.assertEqual(Miedema(c).energy, -0.08)
self.assertEqual(Miedema({"Pt": 1, "Ti": 3}).energy, -0.76)
## test that non-metals are ignored
self.assertEqual(Miedema("Fe2O3").energy, None)
## test that it is quantity invariant
self.assertEqual(Miedema("Fe5Ni5").energy, -0.03)
class PDFTestCase(TestCase):
def test_distances(self):
pass
class NearestNeighborTestCase(TestCase):
def setUp(self):
read_elements()
sample_files_loc = os.path.join(INSTALL_PATH, "io", "files")
self.fcc = io.poscar.read(os.path.join(sample_files_loc, "POSCAR_FCC"))
self.bcc = io.poscar.read(os.path.join(sample_files_loc, "POSCAR_BCC"))
self.sc = io.poscar.read(os.path.join(sample_files_loc, "POSCAR_SC"))
def test_heuristic(self):
self.fcc.find_nearest_neighbors()
self.assertEqual(len(self.fcc[0].neighbors), 12)
self.bcc.find_nearest_neighbors()
self.assertEqual(len(self.bcc[0].neighbors), 8)
self.sc.find_nearest_neighbors()
self.assertEqual(len(self.sc[0].neighbors), 6)
def test_voronoi(self):
self.fcc.find_nearest_neighbors(method="voronoi")
self.assertEqual(len(self.fcc[0].neighbors), 12)
self.bcc.find_nearest_neighbors(method="voronoi")
self.assertEqual(len(self.bcc[0].neighbors), 14)
self.bcc.find_nearest_neighbors(method="voronoi", tol=5)
self.assertEqual(len(self.bcc[0].neighbors), 8)
self.sc.find_nearest_neighbors(method="voronoi")
self.assertEqual(len(self.sc[0].neighbors), 6)
| wolverton-research-group/qmpy | qmpy/analysis/tests.py | Python | mit | 1,996 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import sys
import argparse
import collections
from functools import wraps
import logging
# We are assuming, that there is an already configured logger present
logger = logging.getLogger(__name__)
class CommandLine(object):
"""Create a command line interface for the application.
Can call any core method as action.
Careful: The function defintion order is reflected in the cli.
Can be reorder using the *weight* flag of the initializer.
'lighter' Arguments will go first
"""
arguments = collections.OrderedDict()
@classmethod
def handle(self, core, name):
"""Handle the command line arguments.
Returns true if the gui is to be shown, this is controlled
through the 'batch' argument."""
call_buckets = collections.defaultdict(list)
# Build the ArgumentParser
arg_parser = argparse.ArgumentParser(name)
for name, arg in self.arguments.iteritems():
arg_parser.add_argument(
"--{}".format(name),
**{key: val for key, val in filter(lambda e: e is not None, [
("nargs", len(arg.args)) if len(arg.args) > 0 else None,
("metavar", arg.args) if arg.action == "store" else None,
("type", arg.type) if arg.action == "store" else None,
("default", arg.default),
("action", arg.action),
("help", arg.help)
])}
)
call_buckets[arg.weight].append(arg)
# Add batch argument to suppress gui
arg_parser.add_argument("--batch", "-b", "--no-gui",
help="Run in batch mode (Don't show the gui)",
action="store_true",
default=sys.flags.interactive)
# Parse all arguments
args = arg_parser.parse_args()
# Check all actions
logger.debug(call_buckets)
call_order = sorted(call_buckets.keys())
for weight in call_order:
for arg in call_buckets[weight]:
params = getattr(args, arg.name.replace("-", "_"))
method = getattr(core, arg.method)
if params is not None and params != arg.default:
if isinstance(params, list):
method(*params)
else:
method()
return not args.batch
def __init__(self, name, *args, **flags):
"""The constructor for the CommandLine object.
Accepts the same flags as the add_argument function of the
ArgumentParser class.
The *weight* flag can be used to reorder the execution of
arguments. 'lighter' commands will go first."""
self.name = name
self.args = args
self.help = flags.get("help", "")
self.type = flags.get("type", str)
self.default = flags.get("default", None)
self.action = flags.get("action", "store")
self.weight = flags.get("weight", 0)
if self.name in CommandLine.arguments:
raise KeyError(self.name)
CommandLine.arguments[self.name] = self
def __call__(self, func):
if self.help == "":
self.help = func.__doc__
self.method = func.__name__
@wraps(func)
def wrapper(instance, *args, **kwargs):
return func(instance, *args, **kwargs)
return wrapper
def __str__(self):
return "--{} -> {}('{}')".format(self.name, self.method, "', '".join(self.args))
__repr__ = __str__
| hastern/jelly | cli.py | Python | mit | 3,654 |
import re
from .common import InfoExtractor
class GamekingsIE(InfoExtractor):
_VALID_URL = r'http://www\.gamekings\.tv/videos/(?P<name>[0-9a-z\-]+)'
_TEST = {
u"url": u"http://www.gamekings.tv/videos/phoenix-wright-ace-attorney-dual-destinies-review/",
u'file': u'20130811.mp4',
# MD5 is flaky, seems to change regularly
#u'md5': u'2f32b1f7b80fdc5cb616efb4f387f8a3',
u'info_dict': {
u"title": u"Phoenix Wright: Ace Attorney \u2013 Dual Destinies Review",
u"description": u"Melle en Steven hebben voor de review een week in de rechtbank doorbracht met Phoenix Wright: Ace Attorney - Dual Destinies.",
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
name = mobj.group('name')
webpage = self._download_webpage(url, name)
video_url = self._og_search_video_url(webpage)
video = re.search(r'[0-9]+', video_url)
video_id = video.group(0)
# Todo: add medium format
video_url = video_url.replace(video_id, 'large/' + video_id)
return {
'id': video_id,
'ext': 'mp4',
'url': video_url,
'title': self._og_search_title(webpage),
'description': self._og_search_description(webpage),
}
| Grassboy/plugin.video.plurkTrend | youtube_dl/extractor/gamekings.py | Python | mit | 1,331 |
import collections
import re
#imports
class Jison:#extends
symbols = {}
terminals = {}
productions = {}
table = {}
default_actions = {}
version = '0.3.12'
debug = False
action_none = 0
action_shift = 1
action_deduce = 2
action_accept = 3
unput_stack = []
def trace(self):
"""trace"""
def __init__(self):
"""Setup Parser"""
"""@@PARSER_INJECT@@"""
def parser_perform_action(self, yy, yystate, s, o):
"""@@ParserPerformAction@@"""
def parser_lex(self):
token = self.lexerLex()
#end = 1
if token is not None:
return token
return self.Symbols["end"]
def parse_error(self, _str='', _hash=None):
raise Exception(_str)
def lexer_error(self, _str='', _hash=None):
raise Exception(_str)
def parse(self, _input):
if self.table is None:
raise Exception("Empty ")
self.eof = ParserSymbol("Eof", 1)
first_action = ParserAction(0, self.table[0])
first_cached_action = ParserCachedAction(first_action)
stack = collections.deque(first_cached_action)
stack_count = 1
vstack = collections.deque(None)
vstach_count = 1
yy = None
_yy = None
recovering = 0
symbol = None
action = None
err_str = ''
pre_error_symbol = None
state = None
self.set_input(_input)
while True:
# retrieve state number from top of stack
state = stack[stack_count].action.state
# use default actions if available
if state is not None and self.default_actions[state.index]:
action = self.default_actions[state.index]
else:
if symbol is None:
symbol = self.parser_lex()
# read action for current state and first input
if state is not None:
action = state.actions[symbol.index]
else:
action = None
if action is None:
if recovering is 0:
# Report error
expected = []
actions = self.table[state.index].actions
for p in actions:
if self.terminals[p] is not None and p > 2:
expected.push(self.terminals[p].name)
if symbol.index in self.terminals:
got = self.terminals[symbol.index].name
else:
got = "NOTHING"
err_str = "Parser error on line " + self.yy.line_no + ":\n" + self.show + "\nExpecting " + (", ".join(expected)) + ". got '" + got + "'"
self.parse_error(self, err_str, ParserError())
# Jison generated lexer
eof = None
yy = None
match = ''
condition_stack = collections.deque()
rules = {}
conditions = {}
done = False
less = None
_more = False
input = None
offset = None
ranges = None
flex = False
line_expression = re.compile("(?:\r\n?|\n).*")
def set_input(self, _input):
self.input = InputReader(_input)
self._more = self.less = self.done = False
self.yy = ParserValue()#
self.condition_stack.clear()
self.condition_stack.append('INITIAL')
if self.ranges is not None:
self.yy.loc = ParserLocation()
self.yy.loc.set_range(ParserRange(0, 0))
else:
self.yy.loc = ParserLocation()
self.offset = 0
def input(self):
ch = self.input.ch()
self.yy.text += ch
self.yy.leng += 1
self.offset += 1
self.match += ch
lines = self.line_expression.match(ch)
if lines is not None:
self.yy.line_no += 1
self.yy.loc.last_line += 1
else:
self.yy.loc.last_column += 1
if self.ranges is not None:
self.yy.loc.range.y += 1
return ch
def unput(self, ch):
yy = ParserValue()#
_len = len(ch)
lines = self.line_expression.split(ch)
lines_count = len(lines)
self.input.un_ch(_len)
yy.text = self.yy.text[0: _len - 1]
self.offset -= _len
old_lines = self.line_expression.split(self.match)
old_lines_count = len(old_lines)
self.match = self.match[0:len(self.match) - 1]
if lines_count - 1 > 0:
yy.line_no = self.yy.line_no - lines_count - 1
r = self.yy.loc.range
old_lines_length = old_lines[old_lines_count - lines_count] if old_lines[old_lines_count - lines_count] is not None else 0
yy.loc = ParserLocation( self.yy.loc.first_line, self.yy.line_no, self.yy.loc.first_column, self.yy.loc.first_line, None)#TODO
if self.ranges is not None:
yy.loc.range(ParserRange(r.x, r.x + self.yy.leng - _len))
self.unput_stack.push(yy)
def more(self):
self._more = True
def past_input(self):
matched = self.input.to_string()
past = matched[0:len(matched) - len(self.match)]
result = past[-20].replace('\n', '')
if len(past) > 20:
return '...' + result
return result
def upcoming_input(self):
if self.done == false:
next = self.match
next_len = len(next)
if next_len < 20:
next += self.input.to_string()[:20 - next_len]
else:
if next_len > 20:
next = next[:-20] + '...'
return next.replace('\n', '')
def show_position(self):
pre = self.past_input()
c = '-' * len(pre)
return pre + self.upcoming_input() + '\n' + c + '^'
def next(self):
if len(self.unput_stack) > 0:
this.yy = yy
if self.done:
return self.eof
if self.input.done:
self.done = true
if self.more is false:
self.yy.text = ''
self.match = ''
rules = self.current_rules()
class ParserLocation:
first_line = 1
last_line = 0
first_column = 1
last_column = 0
range = None
def __init__(self, first_line = 1, last_line = 0, first_column = 1, last_column = 0):
self.first_line = first_line
self.last_line = last_line
self.first_column = first_column
self.last_column = last_column
def set_range(self, range):
self.range = range
class ParserValue:
leng = 0
loc = None
line_no = 0
text = None
class ParserCachedAction:
def __init__(self, action, symbol=None):
self.action = action
self.symbol = symbol
class ParserAction:
action = None
state = None
symbol = None
def __init__(self, action, state=None, symbol=None):
self.action = action
self.state = state
self.symbol = symbol
class ParserSymbol:
name = None
Index = 0
index = -1
symbols = {}
symbols_by_name = {}
def __init__(self, name, index):
self.name = name
self.index = index
def add_action(self, parser_action):
self.symbols[parser_action.index] = self.symbols_by_name[parser_action.name] = parser_action
class ParserError:
text = None
state = None
symbol = None
line_no = 0
loc = None
expected = None
def __init__(self, text, state, symbol, line_no, loc, expected):
self.text = text
self.state = state
self.symbol = symbol
self.line_no = line_no
self.loc = loc
self.expected = expected
class LexerError:
text = None
token = None
line_no = 0
def __init__(self, text, token, line_no):
self.text = text
self.token = token
self.line_no
class ParserState:
index = 0
actions = []
def __init__(self, index):
self.index = index
def set_actions(self, actions):
self.actions = actions
class ParserRange:
x = 0
y = 0
def __init__(self, x, y):
self.x = x
self.y = y
class InputReader:
input = None
length = 0
done = False
matches = []
position = 0
def __init__(self, _input):
self.input = _input
self.length = len(_input)
def add_match(self, match):
self.matches.append(match)
self.position += len(match)
self.done = (self.position >= self.length)
def ch(self):
ch = self.input[self.position]
self.add_match(ch)
return ch
def un_ch(self, ch_length):
self.position -= ch_length
self.position = max(0, self.position)
self.done = (self.position >= self.length)
def substring(self, start, end):
start = self.position if start == 0 else start + self.position
end = self.length if end == 0 else start + end
return self.input[start:end]
def match(self, rule):
matches = re.search(rule, self.position)
if matches is not None:
return matches.group()
return None
def to_string(self):
return ''.join(self.matches) | Bovid/Bovid.python | template.py | Python | mit | 7,644 |
class Printer:
def __init__(self):
self._output = ''
def print_output(self, output):
self._output += output
def print_line(self, x1, y1, x2, y2, color=0, width=1):
self.print_output(_svg_line(x1, y1, x2, y2, color=color, width=width))
def print_circle(self, x, y, r, color=0, width=1, border_color=0):
self.print_output(_svg_circle(x, y, r, color=color, width=width, border_color=border_color))
def print_square(self, x, y, a, color=0, width=1, border_color=0):
self.print_output(_svg_rectangle(x, y, a, a, color=color, width=width, border_color=border_color))
def print_text(self, x, y, text, color=0, font_size=12):
self.print_output(_svg_text(x, y, text, color=color, font_size=font_size))
def to_file(self, filename):
with open(filename, 'w') as f:
f.write(str(self))
def __str__(self):
return """<svg width="100%" height="100%" version="1.1" xmlns="http://www.w3.org/2000/svg">
{}
</svg>
""".format(self._output)
def _svg_line(x1, y1, x2, y2, color, width):
color = _svg_color(color)
return '<line x1="{}" y1="{}" x2="{}" y2="{}" style="stroke-linecap:round;stroke:{};stroke-width:{};" />\n'.format(x1, y1, x2, y2, color, width)
def _svg_circle(x, y, r, color, width, border_color):
color = _svg_color(color)
border_color = _svg_color(color)
return '<circle cx="{}" cy="{}" r="{}" style="fill:{}; stroke:{}; stroke-width:{};" />\n'.format(x, y, r, color, border_color, width)
def _svg_rectangle(x, y, a, b, color, width, border_color):
color = _svg_color(color)
border_color = _svg_color(border_color)
return '<rect x="{}" y="{}" width="{}" height="{}" style="fill:{}; stroke:{}; stroke-width:{};" />\n'.format(x, y, a, b, color, border_color, width)
def _svg_text(x, y, text, color, font_size):
color = _svg_color(color)
return '<text x="{}" y="{}" font-family="Nimbus Sans L" font-size="{}" fill="{}">{}</text>\n'.format(x, y, font_size, color, text)
def _svg_color(color):
if isinstance(color, str):
return color
return 'rgb({}, {}, {})'.format(color, color, color)
| adaptive-learning/proso-apps | proso/svg.py | Python | mit | 2,180 |
import pytest
from flask_httpauth import HTTPBasicAuth
from kapsi_git_manager.kgm import app
from kapsi_git_manager import authentication as auth
password = "password"
username = "username"
def test_auth_simple():
""" this is the idiot test to check whether anything in you auth system works. """
assert isinstance(auth.auth, HTTPBasicAuth)
def test_verify_correct_password():
""" with the credentials file try to verify correct password combination """
with app.app_context():
assert auth.verify_pw(username, password) == True
def test_verify_wrong_password():
""" with the credentials file try to verify wrong password combination. """
with app.app_context():
assert auth.verify_pw("winnie", "pooh") == False
| hanshoi/kapsi-git-manager | tests/test_authentication.py | Python | mit | 761 |
# -*- coding: iso-8859-1 -*-
ur"""AsyncIO objects wrap the Win32 Overlapped API. They are instantiated by
passing a handle which has been opened for Overlapped IO. They can be waited
on by the functions in the :mod:`ipc` module and are True when complete,
False otherwise.
"""
import pywintypes
import winerror
import win32event
import win32file
from winsys import constants, core, exc, ipc, utils
class x_asyncio (exc.x_winsys):
pass
WINERROR_MAP = {
}
wrapped = exc.wrapper (WINERROR_MAP, x_asyncio)
class AsyncIO (core._WinSysObject):
def __init__ (self):
core._WinSysObject.__init__ (self)
self.event = ipc.event (needs_manual_reset=True)
self.overlapped = wrapped (win32file.OVERLAPPED)
self.overlapped.hEvent = self.event.pyobject ()
def pyobject (self):
ur"""Return the pyobject of the underlying event so that this object can
be waited on by the :func:`ipc.all` or :func:`ipc.any` functions
"""
return self.event.pyobject ()
def is_complete (self):
ur":returns: `True` if the IO has completed"
return self.event.isSet ()
__nonzero__ = is_complete
def wait (self):
ur"""Wait for the IO to complete in such a way that the wait can
be interrupted by a KeyboardInterrupt.
"""
while not self.event.wait (timeout_s=0.5):
pass
class AsyncHandler (AsyncIO):
BUFFER_SIZE = 4096
def __init__ (self, handle, buffer_size=BUFFER_SIZE):
AsyncIO.__init__ (self)
self.handle = handle
class AsyncWriter (AsyncHandler):
def __init__ (self, handle, data):
AsyncHandler.__init__ (self, handle)
self.data = data
wrapped (win32file.WriteFile, self.handle, data, self.overlapped)
class AsyncReader (AsyncHandler):
BUFFER_SIZE = 4096
def __init__ (self, handle):
AsyncHandler.__init__ (self, handle)
self.buffer = win32file.AllocateReadBuffer (self.BUFFER_SIZE)
wrapped (win32file.ReadFile, self.handle, self.buffer, self.overlapped)
def data (self):
ur"""Wait until the IO has completed and return the data from the read. This
is expected to be called after is_complete is true.
"""
n_bytes = win32file.GetOverlappedResult (self.handle, self.overlapped, True)
return str (self.buffer)[:n_bytes]
| one2pret/winsys | winsys/asyncio.py | Python | mit | 2,311 |
import asyncio
import demjson
from bot import user_steps, sender, get, downloader
from message import Message
client_id = ''#YOUR CLIENT ID
async def search(query):
global guest_client_id
search_url = 'https://api.soundcloud.com/search?q=%s&facet=model&limit=30&offset=0&linked_partitioning=1&client_id='+client_id
url = search_url % query
response = await get(url)
r = demjson.decode(response)
res = []
for entity in r['collection']:
if entity['kind'] == 'track':
res.append([entity['title'], entity['permalink_url']])
return res
async def getfile(url):
response = await get(
"https://api.soundcloud.com/resolve?url={}&client_id="+client_id.format(url))
r = demjson.decode(response)
return r['stream_url'] + "?client_id="+client_id
@asyncio.coroutine
async def run(message, matches, chat_id, step):
from_id = message['from']['id']
if step == 0:
await sender(
Message(chat_id).set_text("*Please Wait*\nI'm Searching all Music with this name", parse_mode="markdown"))
user_steps[from_id] = {"name": "Soundcloud", "step": 1, "data": {}}
i = 0
show_keyboard = {'keyboard': [], "selective": True}
matches = matches.replace(" ", "+")
for song in await search(matches):
title, link = song[0], song[1]
user_steps[from_id]['data'][title] = link
show_keyboard['keyboard'].append([title])
i += 1
if i == 20:
break
if len(show_keyboard['keyboard']) in [0, 1]:
hide_keyboard = {'hide_keyboard': True, 'selective': True}
del user_steps[from_id]
return [Message(chat_id).set_text("*Not Found*",
reply_to_message_id=message['message_id'], reply_markup=hide_keyboard,
parse_mode="markdown")]
return [Message(chat_id).set_text("Select One Of these :", reply_to_message_id=message['message_id'],
reply_markup=show_keyboard)]
elif step == 1:
try:
hide_keyboard = {'hide_keyboard': True, "selective": True}
await sender(Message(chat_id).set_text("*Please Wait*\nLet me Save this Music For You",
reply_to_message_id=message['message_id'],
reply_markup=hide_keyboard, parse_mode="markdown"))
await downloader(await getfile(user_steps[from_id]['data'][message['text']]),
"tmp/{}.mp3".format(message['text']))
del user_steps[from_id]
return [Message(chat_id).set_audio("tmp/{}.mp3".format(message['text']), title=message['text'],
performer="@Siarobot")]
except Exception as e:
del user_steps[from_id]
return [Message(chat_id).set_text("*Wrong Input*\n_Try Again_", parse_mode="markdown")]
plugin = {
"name": "Soundcloud",
"desc": "Download a Music From Sound Cloud\n\n"
"*For Start :*\n`/sc michael jackson billie jean`",
"usage": ["/sc \\[`Search`]"],
"run": run,
"sudo": False,
"patterns": ["^[/!#]sc (.*)$"]
}
| siyanew/Siarobo | plugins/soundcloud.py | Python | mit | 3,323 |
"""
Anonymous token-based surveys
"""
| nmoutschen/tsurvey | src/tsurvey/__init__.py | Python | mit | 38 |
# coding=utf-8
"""
moderation plugin for DecoraterBot.
"""
import regex
import discord
from discord.ext import commands
from DecoraterBotUtils import utils
# This module's warn, and mute commands do not work for now.
# I would like it if someone would help me fix them and pull
# request the fixtures to this file to make them work.
class Moderation:
"""
Moderation Commands Extension to the
default DecoraterBot Moderation commands.
"""
def __init__(self):
self.moderation_text = utils.PluginTextReader(
file='moderation.json')
@commands.command(name='ban', pass_context=True, no_pm=True)
async def ban_command(self, ctx):
"""
Bot Commands.
:param ctx: Messages.
:return: Nothing.
"""
reply_data = ""
role2 = discord.utils.find(lambda role: role.name == 'Bot Commander',
ctx.message.channel.server.roles)
if role2 in ctx.message.author.roles:
for disuser in ctx.message.mentions:
listdata = ctx.message.channel.server.members
member2 = discord.utils.find(
lambda member: member.name == disuser.name, listdata)
try:
await ctx.bot.ban(member2, delete_message_days=7)
reply_data = str(
self.moderation_text[
'ban_command_data'
][0]).format(member2)
except discord.Forbidden:
reply_data = str(
self.moderation_text[
'ban_command_data'
][1])
except discord.HTTPException:
reply_data = str(
self.moderation_text[
'ban_command_data'
][2])
break
else:
reply_data = str(
self.moderation_text[
'ban_command_data'
][3])
else:
reply_data = str(
self.moderation_text[
'ban_command_data'
][4])
try:
await ctx.bot.send_message(
ctx.message.channel, content=reply_data)
except discord.Forbidden:
await ctx.bot.BotPMError.resolve_send_message_error(
ctx)
@commands.command(name='softban', pass_context=True, no_pm=True)
async def softban_command(self, ctx):
"""
Bot Commands.
:param ctx: Messages.
:return: Nothing.
"""
reply_data = ""
role2 = discord.utils.find(lambda role: role.name == 'Bot Commander',
ctx.message.channel.server.roles)
if role2 in ctx.message.author.roles:
for disuser in ctx.message.mentions:
memberlist = ctx.message.channel.server.members
member2 = discord.utils.find(
lambda member: member.name == disuser.name, memberlist)
try:
await ctx.bot.ban(member2, delete_message_days=7)
await ctx.bot.unban(member2.server, member2)
reply_data = str(
self.moderation_text['softban_command_data'][
0]).format(member2)
except discord.Forbidden:
reply_data = str(
self.moderation_text['softban_command_data'][1])
except discord.HTTPException:
reply_data = str(
self.moderation_text['softban_command_data'][2])
break
else:
reply_data = str(
self.moderation_text[
'softban_command_data'
][3])
else:
reply_data = str(
self.moderation_text[
'softban_command_data'
][4])
try:
await ctx.bot.send_message(
ctx.message.channel, content=reply_data)
except discord.Forbidden:
await ctx.bot.BotPMError.resolve_send_message_error(
ctx)
@commands.command(name='kick', pass_context=True, no_pm=True)
async def kick_command(self, ctx):
"""
Bot Commands.
:param ctx: Messages.
:return: Nothing.
"""
reply_data = ""
role2 = discord.utils.find(lambda role: role.name == 'Bot Commander',
ctx.message.channel.server.roles)
if role2 in ctx.message.author.roles:
for disuser in ctx.message.mentions:
memberlist = ctx.message.channel.server.members
member2 = discord.utils.find(
lambda member: member.name == disuser.name, memberlist)
try:
await ctx.bot.kick(member2)
reply_data = str(
self.moderation_text['kick_command_data'][
0]).format(member2)
except discord.Forbidden:
reply_data = str(
self.moderation_text[
'kick_command_data'
][1])
except discord.HTTPException:
reply_data = str(
self.moderation_text[
'kick_command_data'
][2])
break
else:
reply_data = str(
self.moderation_text[
'kick_command_data'
][3])
else:
reply_data = str(
self.moderation_text[
'kick_command_data'
][4])
try:
await ctx.bot.send_message(
ctx.message.channel, content=reply_data)
except discord.Forbidden:
await ctx.bot.BotPMError.resolve_send_message_error(
ctx)
@commands.command(name='prune', pass_context=True, no_pm=True)
async def prune_command(self, ctx):
"""
Bot Commands.
:param ctx: Messages.
:return: Nothing.
"""
reply_data = ""
if ctx.message.channel.id in ctx.bot.ignoreslist["channels"]:
return
if ctx.message.author.id in ctx.bot.banlist['Users']:
return
else:
role2 = discord.utils.find(
lambda role: role.name == 'Bot Commander',
ctx.message.channel.server.roles)
# if ctx.message.author.id == owner_id:
# opt = ctx.message.content[len(_bot_prefix + "prune "):].strip()
# num = 1
# if opt:
# try:
# num = int(opt)
# except:
# return
# reply_data = await self.prune_command_iterater_helper(ctx, num)
# else:
if role2 in ctx.message.author.roles:
opt = ctx.message.content[
len(ctx.prefix + "prune "):].strip()
num = 1
if opt:
try:
num = int(opt)
except Exception as e:
str(e)
return
reply_data = await self.prune_command_iterater_helper(ctx, num)
else:
reply_data = str(
self.moderation_text[
'prune_command_data'
][1])
if reply_data is not None:
try:
await ctx.bot.send_message(
ctx.message.channel, content=reply_data)
except discord.Forbidden:
await ctx.bot.BotPMError.resolve_send_message_error(
ctx)
@commands.command(name='clear', pass_context=True, no_pm=True)
async def clear_command(self, ctx):
"""
Bot Commands.
:param ctx: Messages.
:return: Nothing.
"""
if ctx.message.author.id in ctx.bot.banlist['Users']:
return
else:
reply_data = await self.clear_command_iterater_helper(ctx)
if reply_data is not None:
try:
await ctx.bot.send_message(
ctx.message.channel, content=reply_data)
except discord.Forbidden:
await ctx.bot.BotPMError.resolve_send_message_error(
ctx)
@commands.command(name='warn', pass_context=True)
async def warn_command(self, ctx):
"""
::warn Command for DecoraterBot.
"""
role2 = discord.utils.find(lambda role: role.name == 'Bot Commander',
ctx.message.channel.server.roles)
if role2 in ctx.message.author.roles:
match = regex.match('warn[ ]+(<@(.+?)>[ ])+(.+)',
ctx.message.content[len(ctx.prefix):].strip())
if match:
warning = match.captures(3)[0]
targets = match.captures(2)
for target in targets:
await ctx.bot.send_message(target, content=warning)
@commands.command(name='mute', pass_context=True)
async def mute_command(self, ctx):
"""
::mute Search Command for DecoraterBot.
"""
role2 = discord.utils.find(lambda role: role.name == 'Bot Commander',
ctx.message.channel.server.roles)
if role2 in ctx.message.author.roles:
match = regex.match(ctx.prefix + 'mute[ ]+(<@(.+?)>[ ])+(.+)',
ctx.message.content)
if match:
mute_time = match.captures(3)[0]
# targets = match.captures(2)
if mute_time is not None:
# s = seconds
# m = minutes
# h = hours
# d = days
# w = weeks
# M = months
# y = years
pattern = '(\d+)(s|m|h|d|w|M|y)'
searchres = regex.match(pattern, mute_time)
if searchres is not None:
# TODO: Finish this command.
return
# Helpers.
async def prune_command_iterater_helper(self, ctx, num):
"""
Prunes Messages.
:param ctx: Message Context.
:param num:
:return: message string on Error, nothing otherwise.
"""
try:
await ctx.bot.purge_from(ctx.message.channel, limit=num + 1)
return None
except discord.HTTPException:
messages = []
async for message in ctx.bot.logs_from(
ctx.message.channel, limit=num + 1):
messages.append(message)
for message in messages:
try:
await ctx.bot.delete_messages(message)
except discord.HTTPException:
return str(
self.moderation_text[
'prune_command_data'
][0])
finally:
return f"Deleted {num + 1} messages."
async def clear_command_iterater_helper(self, ctx):
"""
Clears the bot's messages.
:param ctx: Message Context.
:return: Nothing.
"""
type(self)
try:
await ctx.bot.purge_from(
ctx.message.channel, limit=100,
check=lambda e: e.author == (
ctx.message.server.me))
except discord.HTTPException:
messages = []
async for message in ctx.bot.logs_from(
ctx.message.channel, limit=100,
check=lambda e: e.author == (
ctx.message.server.me)):
messages.append(message)
for message in messages:
try:
await ctx.bot.delete_messages(message)
except discord.HTTPException:
return "Failed to delete the bot's messages."
finally:
return "Deleted the bot's messages."
def setup(bot):
"""
DecoraterBot's Moderation Plugin.
"""
bot.add_cog(Moderation())
| DecoraterBot-devs/DecoraterBot-cogs | moderation.py | Python | mit | 12,660 |
__all__ = ['pwnedapi', 'utils']
from .pwnedapi import HaveIBeenPwnedApi
| ericfourrier/pwnedapi | pwnedapi/__init__.py | Python | mit | 72 |
# -*- coding: utf-8 -*-
__version__ = '0.0.1'
__license__ = 'MIT'
| adebarbara/olpy | olpy/__init__.py | Python | mit | 67 |
from twisted.trial import unittest
from twistedchecker.checkers.docstring import DocstringChecker
class DocstringTestCase(unittest.TestCase):
"""
Test for twistedchecker.checkers.docstring
"""
def test_getLineIndent(self):
"""
Test of twistedchecker.checkers.docstring._getLineIndent.
"""
checker = DocstringChecker()
indentNoSpace = checker._getLineIndent("foo")
indentTwoSpaces = checker._getLineIndent(" foo")
indentFourSpaces = checker._getLineIndent(" foo")
self.assertEqual(indentNoSpace, 0)
self.assertEqual(indentTwoSpaces, 2)
self.assertEqual(indentFourSpaces, 4)
| twisted/twistedchecker | twistedchecker/test/test_docstring.py | Python | mit | 678 |
from tilequeue.query.fixture import make_fixture_data_fetcher
from tilequeue.query.pool import DBConnectionPool
from tilequeue.query.postgres import make_db_data_fetcher
from tilequeue.query.rawr import make_rawr_data_fetcher
from tilequeue.query.split import make_split_data_fetcher
from tilequeue.process import Source
from tilequeue.store import make_s3_tile_key_generator
__all__ = [
'DBConnectionPool',
'make_db_data_fetcher',
'make_fixture_data_fetcher',
'make_data_fetcher',
]
def make_data_fetcher(cfg, layer_data, query_cfg, io_pool):
db_fetcher = make_db_data_fetcher(
cfg.postgresql_conn_info, cfg.template_path, cfg.reload_templates,
query_cfg, io_pool)
if cfg.yml.get('use-rawr-tiles'):
rawr_fetcher = _make_rawr_fetcher(
cfg, layer_data, query_cfg, io_pool)
group_by_zoom = cfg.yml.get('rawr').get('group-zoom')
assert group_by_zoom is not None, 'Missing group-zoom rawr config'
return make_split_data_fetcher(
group_by_zoom, db_fetcher, rawr_fetcher)
else:
return db_fetcher
class _NullRawrStorage(object):
def __init__(self, data_source, table_sources):
self.data_source = data_source
self.table_sources = table_sources
def __call__(self, tile):
# returns a "tables" object, which responds to __call__(table_name)
# with tuples for that table.
data = {}
for location in self.data_source(tile):
data[location.name] = location.records
def _tables(table_name):
from tilequeue.query.common import Table
source = self.table_sources[table_name]
return Table(source, data.get(table_name, []))
return _tables
def _make_rawr_fetcher(cfg, layer_data, query_cfg, io_pool):
rawr_yaml = cfg.yml.get('rawr')
assert rawr_yaml is not None, 'Missing rawr configuration in yaml'
group_by_zoom = rawr_yaml.get('group-zoom')
assert group_by_zoom is not None, 'Missing group-zoom rawr config'
rawr_source_yaml = rawr_yaml.get('source')
assert rawr_source_yaml, 'Missing rawr source config'
table_sources = rawr_source_yaml.get('table-sources')
assert table_sources, 'Missing definitions of source per table'
# map text for table source onto Source objects
for tbl, data in table_sources.items():
source_name = data['name']
source_value = data['value']
table_sources[tbl] = Source(source_name, source_value)
label_placement_layers = rawr_yaml.get('label-placement-layers', {})
for geom_type, layers in label_placement_layers.items():
assert geom_type in ('point', 'polygon', 'linestring'), \
'Geom type %r not understood, expecting point, polygon or ' \
'linestring.' % (geom_type,)
label_placement_layers[geom_type] = set(layers)
indexes_cfg = rawr_yaml.get('indexes')
assert indexes_cfg, 'Missing definitions of table indexes.'
# source types are:
# s3 - to fetch RAWR tiles from S3
# store - to fetch RAWR tiles from any tilequeue tile source
# generate - to generate RAWR tiles directly, rather than trying to load
# them from S3. this can be useful for standalone use and
# testing. provide a postgresql subkey for database connection
# settings.
source_type = rawr_source_yaml.get('type')
if source_type == 's3':
rawr_source_s3_yaml = rawr_source_yaml.get('s3')
bucket = rawr_source_s3_yaml.get('bucket')
assert bucket, 'Missing rawr source s3 bucket'
region = rawr_source_s3_yaml.get('region')
assert region, 'Missing rawr source s3 region'
prefix = rawr_source_s3_yaml.get('prefix')
assert prefix, 'Missing rawr source s3 prefix'
extension = rawr_source_s3_yaml.get('extension')
assert extension, 'Missing rawr source s3 extension'
allow_missing_tiles = rawr_source_s3_yaml.get(
'allow-missing-tiles', False)
import boto3
from tilequeue.rawr import RawrS3Source
s3_client = boto3.client('s3', region_name=region)
tile_key_gen = make_s3_tile_key_generator(rawr_source_s3_yaml)
storage = RawrS3Source(
s3_client, bucket, prefix, extension, table_sources, tile_key_gen,
allow_missing_tiles)
elif source_type == 'generate':
from raw_tiles.source.conn import ConnectionContextManager
from raw_tiles.source.osm import OsmSource
postgresql_cfg = rawr_source_yaml.get('postgresql')
assert postgresql_cfg, 'Missing rawr postgresql config'
conn_ctx = ConnectionContextManager(postgresql_cfg)
rawr_osm_source = OsmSource(conn_ctx)
storage = _NullRawrStorage(rawr_osm_source, table_sources)
elif source_type == 'store':
from tilequeue.store import make_store
from tilequeue.rawr import RawrStoreSource
store_cfg = rawr_source_yaml.get('store')
store = make_store(store_cfg,
credentials=cfg.subtree('aws credentials'))
storage = RawrStoreSource(store, table_sources)
else:
assert False, 'Source type %r not understood. ' \
'Options are s3, generate and store.' % (source_type,)
# TODO: this needs to be configurable, everywhere! this is a long term
# refactor - it's hard-coded in a bunch of places :-(
max_z = 16
layers = _make_layer_info(layer_data, cfg.process_yaml_cfg)
return make_rawr_data_fetcher(
group_by_zoom, max_z, storage, layers, indexes_cfg,
label_placement_layers)
def _make_layer_info(layer_data, process_yaml_cfg):
from tilequeue.query.common import LayerInfo, ShapeType
layers = {}
functions = _parse_yaml_functions(process_yaml_cfg)
for layer_datum in layer_data:
name = layer_datum['name']
min_zoom_fn, props_fn = functions[name]
shape_types = ShapeType.parse_set(layer_datum['geometry_types'])
layer_info = LayerInfo(min_zoom_fn, props_fn, shape_types)
layers[name] = layer_info
return layers
def _parse_yaml_functions(process_yaml_cfg):
from tilequeue.command import make_output_calc_mapping
from tilequeue.command import make_min_zoom_calc_mapping
output_layer_data = make_output_calc_mapping(process_yaml_cfg)
min_zoom_layer_data = make_min_zoom_calc_mapping(process_yaml_cfg)
keys = set(output_layer_data.keys())
assert keys == set(min_zoom_layer_data.keys())
functions = {}
for key in keys:
min_zoom_fn = min_zoom_layer_data[key]
output_fn = output_layer_data[key]
functions[key] = (min_zoom_fn, output_fn)
return functions
| mapzen/tilequeue | tilequeue/query/__init__.py | Python | mit | 6,789 |
"""empty message
Revision ID: 4986e64643f4
Revises: 175003d01257
Create Date: 2015-04-15 12:16:41.965765
"""
# revision identifiers, used by Alembic.
revision = '4986e64643f4'
down_revision = '175003d01257'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('tile', sa.Column('feature_id', sa.String(), nullable=False))
op.alter_column('tile', 'date_acquired',
existing_type=postgresql.TIMESTAMP(),
nullable=False)
op.create_unique_constraint(None, 'tile', ['feature_id'])
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'tile', type_='unique')
op.alter_column('tile', 'date_acquired',
existing_type=postgresql.TIMESTAMP(),
nullable=True)
op.drop_column('tile', 'feature_id')
### end Alembic commands ###
| justinwp/croplands | migrations/versions/4986e64643f4_.py | Python | mit | 1,023 |
from django.db.backends.mysql.compiler import SQLCompiler as BaseSQLCompiler
from django.db.backends.mysql.compiler import SQLInsertCompiler, \
SQLDeleteCompiler, SQLUpdateCompiler, SQLAggregateCompiler, \
SQLDateCompiler, SQLDateTimeCompiler
class SQLCompiler(BaseSQLCompiler):
STRAIGHT_INNER = 'STRAIGHT_JOIN'
def get_ordering(self):
"""
Returns a tuple containing a list representing the SQL elements in the
"order by" clause, and the list of SQL elements that need to be added
to the GROUP BY clause as a result of the ordering.
Also sets the ordering_aliases attribute on this instance to a list of
extra aliases needed in the select.
Determining the ordering SQL can change the tables we need to include,
so this should be run *before* get_from_clause().
The method is overrided to save the result to reuse it in
get_from_clause().
"""
# We might want to notify people to not order by columns from different
# tables as there is no index across tables. They may create proxy
# model to do filtering with subquery.
result, params, group_by = super(SQLCompiler, self).get_ordering()
self.__ordering_group_by = group_by
return result, params, group_by
def get_from_clause(self):
"""
Returns a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Sub-classes, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables we need. This means the select columns,
ordering and distinct must be done first.
Patch query with STRAIGHT_JOIN if there is ordering and all joins in
query are INNER joins.
"""
straight_join_patch_applied = False
if self.__ordering_group_by \
and len(self.query.tables) > 1 \
and all(join_info.join_type is None \
or join_info.join_type == self.query.INNER
for join_info in self.query.alias_map.itervalues()):
# Get ordering table name from get_ordering()
# XXX: let's pretend that we believe in luck! :)
ordering_table = self.__ordering_group_by[0][0].split('.', 1)[0][1:-1]
# Save query tables and alias mapping to patch and restore them.
query_tables = _query_tables = self.query.tables
query_alias_map = self.query.alias_map
_query_alias_map = query_alias_map.copy()
try:
ordering_table_index = query_tables.index(ordering_table)
except ValueError:
# Is this possible? Fallback without patching
pass
else:
# STRAIGHT_JOIN forces MySQL read from the first table in
# a query, thus we must be sure that the first table is that
# we apply ordering to.
if ordering_table_index > 0:
_first_table = query_tables[0]
# Move ordering table to the begining
_query_tables = [ordering_table] \
+ [table for table in query_tables if table != ordering_table]
_ordering_join_info = _query_alias_map[ordering_table]
# Fix JoinInfo
# XXX: It's unsufficient, it recreates objects.
_query_alias_map[_first_table] = _query_alias_map[_first_table]\
._replace(
join_type=self.STRAIGHT_INNER,
join_cols=[join_cols[::-1]
for join_cols in _ordering_join_info.join_cols],
join_field=_ordering_join_info.join_field,
lhs_alias=ordering_table
)
_query_alias_map[ordering_table] = _ordering_join_info._replace(
join_type=None,
join_cols=((None, None), ),
join_field=None,
lhs_alias=None
)
# Replace INNER joins with STRAIGHT joins
# XXX: It's unsufficient, it recreates objects.
for table in _query_tables[1:]:
_query_alias_map[table] = _query_alias_map[table]\
._replace(join_type=self.STRAIGHT_INNER)
# Patch query
self.query.tables = _query_tables
self.query.alias_map = _query_alias_map
straight_join_patch_applied = True
result, from_params = super(SQLCompiler, self).get_from_clause()
# Restore patched query if patched
if straight_join_patch_applied:
self.query.tables = query_tables
if ordering_table_index > 0:
self.query.alias_map = query_alias_map
return result, from_params
| frol/django-mysql-fix | django_mysql_fix/backends/mysql/compiler.py | Python | mit | 5,170 |
#!/usr/bin/env python3
# https://www.hackerrank.com/challenges/class-2-find-the-torsional-angle
import io
import math
import sys
import unittest
class Vector:
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def subtract(self, other):
x = self.x - other.x
y = self.y - other.y
z = self.z - other.z
return Vector(x, y, z)
def dot_product(self, other):
return self.x * other.x + self.y * other.y + self.z * other.z
def cross_product(self, other):
zero = Vector(0, 0, 0)
x = self.y * other.z - self.z * other.y
y = self.z * other.x - self.x * other.z
z = self.x * other.y - self.y * other.x
return zero.subtract(Vector(x, y, z))
def value(self):
xx = math.pow(self.x, 2)
yy = math.pow(self.y, 2)
zz = math.pow(self.z, 2)
return math.sqrt(xx + yy + zz)
def torsional_angle(a, b, c, d):
ab = a.subtract(b)
bc = b.subtract(c)
cd = c.subtract(d)
x = ab.cross_product(bc)
y = bc.cross_product(cd)
cosine = x.dot_product(y) / (x.value() * y.value())
return math.degrees(math.acos(cosine))
def main():
a = Vector(*tuple(map(float, input().strip().split())))
b = Vector(*tuple(map(float, input().strip().split())))
c = Vector(*tuple(map(float, input().strip().split())))
d = Vector(*tuple(map(float, input().strip().split())))
print('%.2f' % torsional_angle(a, b, c, d))
if __name__ == '__main__': # pragma: no cover
main()
class TestCode(unittest.TestCase):
def generalized_test(self, which):
sys.stdin = open(__file__.replace('.py', f'.{which}.in'), 'r')
sys.stdout = io.StringIO()
expected = open(__file__.replace('.py', f'.{which}.out'), 'r')
main()
self.assertEqual(sys.stdout.getvalue(), expected.read())
for handle in [sys.stdin, sys.stdout, expected]:
handle.close()
def test_0(self):
self.generalized_test('0')
| altermarkive/Coding-Interviews | algorithm-design/hackerrank/class_2_find_the_torsional_angle/test_class_2_find_the_torsional_angle.py | Python | mit | 2,023 |
from app import create_app
app = create_app()
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5000, debug=True) | defshine/cleanblog | run.py | Python | mit | 127 |
# -*- coding: utf-8 -*-
from __future__ import print_function
from collections import OrderedDict
import numpy as np
import six
from acq4.devices.DAQGeneric.taskGUI import DAQGenericTaskGui
from acq4.devices.Device import Device, DeviceTask
from pyqtgraph import siFormat
from pyqtgraph.debug import Profiler
from acq4.util import Qt
from acq4.util.Mutex import Mutex
from acq4.util.debug import printExc
from pyqtgraph.metaarray import MetaArray, axis
Ui_Form = Qt.importTemplate('.DeviceTemplate')
class DataMapping:
"""Class that maps values between the voltages on a DAQ channel and the physically measured units.
By default, this class applies a simple linear scale and offset for analog channels. Digital channels
may optionally be inverted.
Vout = Value * scale - offset
Value = (Vin + offset) * scale
This class may be subclassed to allow any arbitrary mapping (eg, calibration curves, etc.)
"""
def __init__(self, device, chans=None):
"""When mapping initializes, it immediately grabs the scale and offset for each channel
specified in chans (or all channels if None). This means that the mapping is only valid
as long as these values have not changed."""
self.device = device
self.scale = {}
self.offset = {}
if chans is None:
chans = device.listChannels()
if isinstance(chans, six.string_types):
chans = [chans]
for ch in chans:
self.scale[ch] = device.getChanScale(ch)
self.offset[ch] = device.getChanOffset(ch)
def mapToDaq(self, chan, data):
scale = self.scale[chan]
offset = self.offset[chan]
return (data * scale) - offset
def mapFromDaq(self, chan, data):
scale = self.scale[chan]
offset = self.offset[chan]
return (data + offset) * scale
class ChannelHandle(object):
def __init__(self, dev, channel):
self.dev = dev
self.channel = channel
class DAQGeneric(Device):
"""
Config format:
channels:
ChannelName1:
device: 'DaqDeviceName'
channel: '/Dev1/ao0'
type: 'ao'
units: 'A'
scale: 200 * mV / nA
ChannelName2:
device: 'DaqDeviceName'
channel: '/Dev1/ai3'
type: 'ai'
mode: 'nrse'
units: 'A'
scale: 200 * nA / mV
ChannelName3:
device: 'DaqDeviceName'
channel: '/Dev1/line7'
type: 'di'
invert: True
"""
sigHoldingChanged = Qt.Signal(object, object)
def __init__(self, dm, config, name):
Device.__init__(self, dm, config, name)
self._DGLock = Mutex(Qt.QMutex.Recursive) ## protects access to _DGHolding, _DGConfig
## Do some sanity checks here on the configuration
# 'channels' key is expected; for backward compatibility we just use the top-level config.
config = config.get('channels', config)
self._DGConfig = config
self._DGHolding = {}
for ch in config:
if config[ch]['type'][0] != 'a' and ('scale' in config[ch] or 'offset' in config[ch]):
raise Exception("Scale/offset only allowed for analog channels. (%s.%s)" % (name, ch))
if 'scale' not in config[ch]:
config[ch]['scale'] = 1 ## must be int to prevent accidental type conversion on digital data
if 'offset' not in config[ch]:
config[ch]['offset'] = 0
if config[ch].get('invert', False):
if config[ch]['type'][0] != 'd':
raise Exception("Inversion only allowed for digital channels. (%s.%s)" % (name, ch))
config[ch]['scale'] = -1
config[ch]['offset'] = -1
# print "chan %s scale %f" % (ch, config[ch]['scale'])
if 'holding' not in config[ch]:
config[ch]['holding'] = 0.0
## It is possible to create virtual channels with no real hardware connection
if 'device' not in config[ch]:
# print "Assuming channel %s is virtual:" % ch, config[ch]
config[ch]['virtual'] = True
## set holding value for all output channels now
if config[ch]['type'][1] == 'o':
self.setChanHolding(ch, config[ch]['holding'])
# self._DGHolding[ch] = config[ch]['holding']
dm.declareInterface(name, ['daqChannelGroup'], self)
for ch in config:
dm.declareInterface(name + "." + ch, ['daqChannel'], ChannelHandle(self, ch))
def mapToDAQ(self, channel, data):
mapping = self.getMapping(chans=[channel])
return mapping.mapToDaq(channel, data)
def mapFromDAQ(self, channel, data):
mapping = self.getMapping(chans=[channel])
return mapping.mapFromDaq(channel, data)
def getMapping(self, chans=None):
return DataMapping(self, chans)
def createTask(self, cmd, parentTask):
return DAQGenericTask(self, cmd, parentTask)
def getConfigParam(self, param):
return self._DGConfig.get(param, None)
def setChanHolding(self, channel, level=None, block=True, mapping=None):
"""Define and set the holding values for this channel
If *block* is True, then return only after the value has been set on the DAQ.
If *block* is False, then simply schedule the change to take place when the DAQ is available.
*mapping* is a DataMapping object which tells the device how to translate *level* into
a voltage on the physical DAQ channel. If *mapping* is None, then it will use self.getMapping(*channel*)
to determine the correct mapping.
"""
prof = Profiler(disabled=True)
with self._DGLock:
prof('lock')
# print "set holding", channel, level
### Set correct holding level here...
if level is None:
level = self._DGHolding[channel]
if level is None:
raise Exception("No remembered holding level for channel %s" % channel)
else:
self._DGHolding[channel] = level
if mapping is None:
mapping = self.getMapping(channel)
val = mapping.mapToDaq(channel, self._DGHolding[channel])
prof('map')
# print "Set holding for channel %s: %f => %f" % (channel, self._DGHolding[channel], val)
chConf = self._DGConfig[channel]
isVirtual = chConf.get('virtual', False)
if not isVirtual:
daq = chConf['device']
chan = chConf['channel']
daqDev = self.dm.getDevice(daq)
prof('get dev')
## release DGLock before setChannelValue
if not isVirtual:
if block:
daqDev.setChannelValue(chan, val, block=True)
else:
daqDev.setChannelValue(chan, val, block=False,
delaySetIfBusy=True) ## Note: If a task is running, this will not be set until it completes.
prof('set channel value')
self.sigHoldingChanged.emit(channel, level)
prof('emit')
def getChanHolding(self, chan):
with self._DGLock:
return self._DGHolding[chan]
def getChannelValue(self, channel, block=True, raw=False):
with self._DGLock:
daq = self._DGConfig[channel]['device']
chan = self._DGConfig[channel]['channel']
mode = self._DGConfig[channel].get('mode', None)
## release _DGLock before getChannelValue
daqDev = self.dm.getDevice(daq)
val = daqDev.getChannelValue(chan, mode=mode, block=block)
if not raw:
return self.mapFromDAQ(channel, val)
else:
return val
def reconfigureChannel(self, chan, config):
"""Allows reconfiguration of channel properties (including the actual DAQ channel name)"""
with self._DGLock:
self._DGConfig[chan].update(config)
def deviceInterface(self, win):
"""Return a widget with a UI to put in the device rack"""
return DAQDevGui(self)
def taskInterface(self, taskRunner):
"""Return a widget with a UI to put in the task rack"""
return DAQGenericTaskGui(self, taskRunner)
def getDAQName(self, channel):
# return self._DGConfig[channel]['channel'][0]
with self._DGLock:
return self._DGConfig[channel]['device']
def quit(self):
pass
def setChanScale(self, ch, scale, update=True, block=True):
with self._DGLock:
self._DGConfig[ch]['scale'] = scale
if update and self.isOutput(ch): ## only set Holding for output channels
self.setChanHolding(ch, block=block)
def setChanOffset(self, ch, offset, update=True, block=True):
with self._DGLock:
self._DGConfig[ch]['offset'] = offset
if update and self.isOutput(ch): ## only set Holding for output channels
self.setChanHolding(ch, block=block)
def getChanScale(self, chan):
with self._DGLock:
## Scale defaults to 1.0
## - can be overridden in configuration
return self._DGConfig[chan].get('scale', 1.0)
def getChanOffset(self, chan):
with self._DGLock:
## Offset defaults to 0.0
## - can be overridden in configuration
return self._DGConfig[chan].get('offset', 0.0)
def getChanUnits(self, ch):
with self._DGLock:
if 'units' in self._DGConfig[ch]:
return self._DGConfig[ch]['units']
else:
return None
def isOutput(self, chan):
with self._DGLock:
return self._DGConfig[chan]['type'][1] == 'o'
def listChannels(self):
with self._DGLock:
return dict([(ch, self._DGConfig[ch].copy()) for ch in self._DGConfig])
class DAQGenericTask(DeviceTask):
def __init__(self, dev, cmd, parentTask):
DeviceTask.__init__(self, dev, cmd, parentTask)
self.daqTasks = {}
self.initialState = {}
self._DAQCmd = cmd
## Stores the list of channels that will generate or acquire buffered samples
self.bufferedChannels = []
def getConfigOrder(self):
"""return lists of devices that should be configured (before, after) this device"""
daqs = set([self.dev.getDAQName(ch) for ch in self._DAQCmd])
return [], list(daqs) ## this device should be configured before its DAQs
def configure(self):
## Record initial state or set initial value
## NOTE:
## Subclasses should call this function only _after_ making any changes that will affect the mapping between
## physical values and channel voltages.
prof = Profiler('DAQGenericTask.configure', disabled=True)
# self.daqTasks = {}
self.mapping = self.dev.getMapping(chans=list(
self._DAQCmd.keys())) ## remember the mapping so we can properly translate data after it has been returned
self.initialState = {}
self.holdingVals = {}
for ch in self._DAQCmd:
# dev = self.dev.dm.getDevice(self.dev._DGConfig[ch]['channel'][0])
dev = self.dev.dm.getDevice(self.dev.getDAQName(ch))
prof.mark(ch + ' get dev')
if 'preset' in self._DAQCmd[ch]:
with self.dev._DGLock:
daqChan = self.dev._DGConfig[ch]['channel']
# dev.setChannelValue(self.dev._DGConfig[ch]['channel'][1], self._DAQCmd[ch]['preset'])
preVal = self.mapping.mapToDaq(ch, self._DAQCmd[ch]['preset'])
dev.setChannelValue(daqChan, preVal)
prof.mark(ch + ' preset')
elif 'holding' in self._DAQCmd[ch]:
self.dev.setChanHolding(ch, self._DAQCmd[ch]['holding'])
prof.mark(ch + ' set holding')
if 'recordInit' in self._DAQCmd[ch] and self._DAQCmd[ch]['recordInit']:
self.initialState[ch] = self.dev.getChannelValue(ch)
prof.mark(ch + ' record init')
for ch in self.dev._DGConfig:
## record current holding value for all output channels (even those that were not buffered for this task)
with self.dev._DGLock:
chanType = self.dev._DGConfig[ch]['type']
if chanType in ['ao', 'do']:
self.holdingVals[ch] = self.dev.getChanHolding(ch)
prof.mark(ch + ' record holding')
prof.finish()
def createChannels(self, daqTask):
self.daqTasks = {}
# print "createChannels"
## Is this the correct DAQ device for any of my channels?
## create needed channels + info
## write waveform to command channel if needed
chans = self.dev.listChannels()
for ch in chans:
# print " creating channel %s.." % ch
if ch not in self._DAQCmd:
# print " ignoring channel", ch, "not in command"
continue
chConf = chans[ch]
if chConf['device'] != daqTask.devName():
# print " ignoring channel", ch, "wrong device"
continue
## Input channels are only used if the command has record: True
if chConf['type'] in ['ai', 'di']:
# if ('record' not in self._DAQCmd[ch]) or (not self._DAQCmd[ch]['record']):
if not self._DAQCmd[ch].get('record', False):
# print " ignoring channel", ch, "recording disabled"
continue
## Output channels are only added if they have a command waveform specified
elif chConf['type'] in ['ao', 'do']:
if 'command' not in self._DAQCmd[ch]:
# print " ignoring channel", ch, "no command"
continue
self.bufferedChannels.append(ch)
# _DAQCmd[ch]['task'] = daqTask ## ALSO DON't FORGET TO DELETE IT, ASS.
if chConf['type'] in ['ao', 'do']:
# scale = self.getChanScale(ch)
cmdData = self._DAQCmd[ch]['command']
if cmdData is None:
# print "No command for channel %s, skipping." % ch
continue
# cmdData = cmdData * scale
## apply scale, offset or inversion for output lines
cmdData = self.mapping.mapToDaq(ch, cmdData)
# print "channel", chConf['channel'][1], cmdData
if chConf['type'] == 'do':
cmdData = cmdData.astype(np.uint32)
cmdData[cmdData <= 0] = 0
cmdData[cmdData > 0] = 0xFFFFFFFF
# print "channel", self._DAQCmd[ch]
# print "LOW LEVEL:", self._DAQCmd[ch].get('lowLevelConf', {})
daqTask.addChannel(chConf['channel'], chConf['type'], **self._DAQCmd[ch].get('lowLevelConf', {}))
self.daqTasks[ch] = daqTask ## remember task so we can stop it later on
daqTask.setWaveform(chConf['channel'], cmdData)
# print "DO task %s has type" % ch, cmdData.dtype
elif chConf['type'] == 'ai':
mode = chConf.get('mode', None)
# if len(chConf['channel']) > 2:
# mode = chConf['channel'][2]
# print "Adding channel %s to DAQ task" % chConf['channel'][1]
daqTask.addChannel(chConf['channel'], chConf['type'], mode=mode,
**self._DAQCmd[ch].get('lowLevelConf', {}))
self.daqTasks[ch] = daqTask ## remember task so we can stop it later on
elif chConf['type'] == 'di':
daqTask.addChannel(chConf['channel'], chConf['type'], **self._DAQCmd[ch].get('lowLevelConf', {}))
self.daqTasks[ch] = daqTask ## remember task so we can stop it later on
def getChanUnits(self, chan):
if 'units' in self._DAQCmd[chan]:
return self._DAQCmd[chan]['units']
else:
return self.dev.getChanUnits(chan)
def start(self):
## possibly nothing required here, DAQ will start recording without our help.
pass
def isDone(self):
## DAQ task handles this for us.
return True
def stop(self, abort=False):
# with self.dev._DGLock: ##not necessary
## Stop DAQ tasks before setting holding level.
# print "STOP"
prof = Profiler(disabled=True)
for ch in self.daqTasks:
# print "Stop task", self.daqTasks[ch]
try:
self.daqTasks[ch].stop(abort=abort)
except:
printExc("Error while stopping DAQ task:")
prof('stop %s' % ch)
for ch in self._DAQCmd:
if 'holding' in self._DAQCmd[ch]:
self.dev.setChanHolding(ch, self._DAQCmd[ch]['holding'])
prof('set holding %s' % ch)
elif self.dev.isOutput(ch): ## return all output channels to holding value
self.dev.setChanHolding(ch)
prof('reset to holding %s' % ch)
def getResult(self):
## Access data recorded from DAQ task
## create MetaArray and fill with MC state info
## Collect data and info for each channel in the command
result = {}
for ch in self.bufferedChannels:
result[ch] = self.daqTasks[ch].getData(self.dev._DGConfig[ch]['channel'])
result[ch]['data'] = self.mapping.mapFromDaq(ch, result[ch]['data']) ## scale/offset/invert
result[ch]['units'] = self.getChanUnits(ch)
if len(result) > 0:
meta = result[list(result.keys())[0]]['info']
rate = meta['rate']
nPts = meta['numPts']
## Create an array of time values
timeVals = np.linspace(0, float(nPts - 1) / float(rate), nPts)
## Concatenate all channels together into a single array, generate MetaArray info
chanList = [np.atleast_2d(result[x]['data']) for x in result]
cols = [(x, result[x]['units']) for x in result]
# print cols
try:
arr = np.concatenate(chanList)
except:
print(chanList)
print([a.shape for a in chanList])
raise
daqState = OrderedDict()
for ch in self.dev._DGConfig:
if ch in result:
daqState[ch] = result[ch]['info']
else:
daqState[ch] = {}
## record current holding value for all output channels (even those that were not buffered for this task)
if self.dev._DGConfig[ch]['type'] in ['ao', 'do']:
daqState[ch]['holding'] = self.holdingVals[ch]
info = [axis(name='Channel', cols=cols), axis(name='Time', units='s', values=timeVals)] + [
{'DAQ': daqState}]
protInfo = self._DAQCmd.copy() ## copy everything but the command arrays and low-level configuration info
for ch in protInfo:
protInfo[ch].pop('command', None)
protInfo[ch].pop('lowLevelConf', None)
info[-1]['Protocol'] = protInfo
marr = MetaArray(arr, info=info)
return marr
else:
return None
def storeResult(self, dirHandle):
DeviceTask.storeResult(self, dirHandle)
for ch in self._DAQCmd:
if self._DAQCmd[ch].get('recordInit', False):
# if 'recordInit' in self._DAQCmd[ch] and self._DAQCmd[ch]['recordInit']:
dirHandle.setInfo({(self.dev.name(), ch): self.initialState[ch]})
class DAQDevGui(Qt.QWidget):
def __init__(self, dev):
self.dev = dev
Qt.QWidget.__init__(self)
self.layout = Qt.QVBoxLayout()
self.setLayout(self.layout)
chans = self.dev.listChannels()
self.widgets = {}
# self.uis = {}
self.defaults = {}
for ch in chans:
wid = Qt.QWidget()
ui = Ui_Form()
ui.setupUi(wid)
self.layout.addWidget(wid)
ui.analogCtrls = [ui.scaleDefaultBtn, ui.scaleSpin, ui.offsetDefaultBtn, ui.offsetSpin, ui.scaleLabel,
ui.offsetLabel]
# ui.channel = ch
for s in dir(ui):
i = getattr(ui, s)
if isinstance(i, Qt.QWidget):
i.channel = ch
self.widgets[ch] = ui
ui.nameLabel.setText(str(ch))
ui.channelCombo.addItem("%s (%s)" % (ch, chans[ch]['channel']))
holding = chans[ch].get('holding', 0)
if chans[ch]['type'] in ['ao', 'ai']:
ui.inputRadio.setEnabled(False)
ui.outputRadio.setEnabled(False)
ui.invertCheck.hide()
scale = chans[ch].get('scale', 1)
units = chans[ch].get('units', 'V')
offset = chans[ch].get('offset', 0)
ui.offsetSpin.setOpts(suffix='V', siPrefix=True, dec=True, step=1.0, minStep=1e-4)
ui.offsetSpin.setValue(offset)
ui.offsetSpin.sigValueChanged.connect(self.offsetSpinChanged)
ui.offsetDefaultBtn.setText("Default (%s)" % siFormat(offset, suffix='V'))
ui.offsetDefaultBtn.clicked.connect(self.offsetDefaultBtnClicked)
if chans[ch]['type'] == 'ao':
ui.outputRadio.setChecked(True)
ui.scaleDefaultBtn.setText("Default (%s)" % siFormat(scale, suffix='V/' + units))
ui.scaleSpin.setOpts(suffix='V/' + units, siPrefix=True, dec=True, step=1.0, minStep=1e-9)
ui.holdingSpin.setOpts(suffix=units, siPrefix=True, step=0.01)
ui.holdingSpin.setValue(holding)
ui.holdingSpin.sigValueChanged.connect(self.holdingSpinChanged)
elif chans[ch]['type'] == 'ai':
ui.inputRadio.setChecked(True)
ui.holdingLabel.hide()
ui.holdingSpin.hide()
ui.scaleDefaultBtn.setText("Default (%s)" % siFormat(scale, suffix=units + '/V'))
# ui.scaleDefaultBtn.clicked.connect(self.scaleDefaultBtnClicked)
ui.scaleSpin.setOpts(suffix=units + '/V', siPrefix=True, dec=True)
ui.scaleSpin.setValue(scale)
ui.scaleDefaultBtn.clicked.connect(self.scaleDefaultBtnClicked)
ui.scaleSpin.sigValueChanged.connect(self.scaleSpinChanged)
self.defaults[ch] = {
'scale': scale,
'offset': offset}
elif chans[ch]['type'] in ['do', 'di']:
for item in ui.analogCtrls:
item.hide()
if chans[ch].get('invert', False):
ui.invertCheck.setChecked(True)
if chans[ch]['type'] == 'do':
ui.outputRadio.setChecked(True)
ui.holdingSpin.setOpts(bounds=[0, 1], step=1)
ui.holdingSpin.setValue(holding)
ui.holdingSpin.sigValueChanged.connect(self.holdingSpinChanged)
elif chans[ch]['type'] == 'di':
ui.inputRadio.setChecked(True)
ui.holdingLabel.hide()
ui.holdingSpin.hide()
ui.invertCheck.toggled.connect(self.invertToggled)
# Qt.QObject.connect(self.dev, Qt.SIGNAL('holdingChanged'), self.holdingChanged)
self.dev.sigHoldingChanged.connect(self.holdingChanged)
def holdingChanged(self, ch, val):
self.widgets[ch].holdingSpin.blockSignals(True)
self.widgets[ch].holdingSpin.setValue(val)
self.widgets[ch].holdingSpin.blockSignals(False)
def holdingSpinChanged(self, spin):
ch = spin.channel
self.dev.setChanHolding(ch, spin.value(), block=False)
def scaleSpinChanged(self, spin):
ch = spin.channel
self.dev.setChanScale(ch, spin.value(), block=False)
def offsetSpinChanged(self, spin):
ch = spin.channel
self.dev.setChanOffset(ch, spin.value(), block=False)
def offsetDefaultBtnClicked(self):
ch = self.sender().channel
self.widgets[ch].offsetSpin.setValue(self.defaults[ch]['offset'])
def scaleDefaultBtnClicked(self):
ch = self.sender().channel
self.widgets[ch].scaleSpin.setValue(self.defaults[ch]['scale'])
def invertToggled(self, b):
ch = self.sender().channel
if b:
self.dev.setChanScale(ch, -1, update=False)
self.dev.setChanOffset(ch, 1)
else:
self.dev.setChanScale(ch, 1, update=False)
self.dev.setChanOffset(ch, 0)
| acq4/acq4 | acq4/devices/DAQGeneric/DAQGeneric.py | Python | mit | 25,249 |
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.http import Http404, JsonResponse, HttpResponseForbidden
from django.shortcuts import render, redirect, get_object_or_404
from .forms import PlaylistForm
from .models import Playlist
def form_data(user, form):
data = {
'owner': user,
'playlist_form': form,
}
return data
def playlists(request, user_id):
user = get_object_or_404(User, id=user_id)
if request.method == 'GET':
data = {
'owner': user,
'playlists': Playlist.objects.filter(user=user),
}
return render(request, 'playlist/index.html', data)
elif request.method == 'POST':
# Check if user matches URL
if request.user != user:
return HttpResponseForbidden()
form = PlaylistForm(request.POST)
if form.is_valid():
playlist = form.save(commit=False)
playlist.user = request.user
playlist.save()
return redirect('playlist:all', user_id)
else:
data = form_data(user, form)
return render(request, 'playlist/form.html', data)
@login_required
def create_view(request, user_id):
user = get_object_or_404(User, id=user_id)
# Check if user matches URL
if request.user != user:
return redirect('playlist:create', request.user.id)
data = form_data(user, PlaylistForm())
return render(request, 'playlist/form.html', data)
def playlist(request, user_id, playlist_id):
user = get_object_or_404(User, id=user_id)
playlist = get_object_or_404(Playlist, id=playlist_id, user=user)
if request.method == 'GET':
data = {
'owner': user,
'playlist': playlist,
}
return render(request, 'playlist/playlist.html', data)
elif request.method == 'POST':
# Check if user owns playlist
if request.user != playlist.user:
return HttpResponseForbidden()
action = request.GET.get('action', False)
if action:
if action == 'delete':
playlist.delete()
elif action == 'update':
form = PlaylistForm(request.POST, instance=playlist)
if form.is_valid():
playlist = form.save()
else:
data = form_data(user, form)
return render(request, 'playlist/form.html', data)
return redirect('playlist:all', user_id)
@login_required
def edit_view(request, user_id, playlist_id):
user = get_object_or_404(User, id=user_id)
playlist = get_object_or_404(Playlist, id=playlist_id, user=user)
# Check if playlist belongs to logged in user
if request.user != playlist.user:
return redirect('playlist:all', playlist.user.id)
data = form_data(user, PlaylistForm(instance=playlist))
return render(request, 'playlist/form.html', data) | chaocodes/playlist-manager-django | manager/playlist/views.py | Python | mit | 2,983 |
import uctypes
ACCEL_CONFIG = {
'x_self_test' : uctypes.BFUINT8 | 0 | 7 << uctypes.BF_POS | 1 << uctypes.BF_LEN,
'y_self_test' : uctypes.BFUINT8 | 0 | 6 << uctypes.BF_POS | 1 << uctypes.BF_LEN,
'z_self_test' : uctypes.BFUINT8 | 0 | 5 << uctypes.BF_POS | 1 << uctypes.BF_LEN,
'range' : uctypes.BFUINT8 | 0 | 3 << uctypes.BF_POS | 2 << uctypes.BF_LEN,
}
buf = bytearray(1)
buf[0] = 0xa8
print('buf[0] =', hex(buf[0]))
accel_config = uctypes.struct(ACCEL_CONFIG, uctypes.addressof(buf))
print('x_self_test =', accel_config.x_self_test)
print('y_self_test =', accel_config.y_self_test)
print('z_self_test =', accel_config.z_self_test)
print('range =', accel_config.range)
accel_config.y_self_test = 1
print('buf[0] =', hex(buf[0]))
| dhylands/upy-examples | uctypes_test.py | Python | mit | 763 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "like.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| bartTC/like | manage.py | Python | mit | 245 |
import math
import time
# Retry decorator with exponential backoff
def retry(tries, delay=1, backoff=2):
"""Retries a function or method until it returns True.
delay sets the initial delay, and backoff sets how much the delay should
lengthen after each failure. backoff must be greater than 1, or else it
isn't really a backoff. tries must be at least 0, and delay greater than
0."""
if backoff <= 1:
raise ValueError("backoff must be greater than 1")
tries = math.floor(tries)
if tries < 0:
raise ValueError("tries must be 0 or greater")
if delay <= 0:
raise ValueError("delay must be greater than 0")
def deco_retry(f):
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay # make mutable
rv = f(*args, **kwargs) # first attempt
while mtries > 0:
if rv == True or type(rv) == str: # Done on success ..
return rv
mtries -= 1 # consume an attempt
time.sleep(mdelay) # wait...
mdelay *= backoff # make future wait longer
rv = f(*args, **kwargs) # Try again
return False # Ran out of tries :-(
return f_retry # true decorator -> decorated function
return deco_retry # @retry(arg[, ...]) -> true decorator | Heipiao/weibo | retry.py | Python | mit | 1,243 |
import unittest, tempfile, uuid, os, shutil, sys
class Test_CreateRemoveTempLocation(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(Test_CreateRemoveTempLocation, self).__init__(*args, **kwargs)
self._tmpTestFolder = None
def setUp(self):
self._tmpTestFolder = os.path.join(tempfile.gettempdir(),
"unittest_%s_%s" % (self.__class__.__name__, str( uuid.uuid4() )[:8]))
os.mkdir(self._tmpTestFolder)
sys.path.append(self._tmpTestFolder)
def tearDown(self):
if not self._tmpTestFolder: return
if os.path.isdir(self._tmpTestFolder):
print "removing test folder: '%s'" %self._tmpTestFolder
shutil.rmtree(self._tmpTestFolder)
self._tmpTestFolder = None
if self._tmpTestFolder in sys.path:
sys.path.remove(self._tmpTestFolder)
| yukioSatoh/ysPyCommon | tests/test_ysPyCommon/__init__.py | Python | mit | 941 |
#/usr/bin/env python
from fsm import Machine
states = ["q1", "q2", "q3"]
alphabet = ["0","1"]
transitions = {
"q1": {"0": "q1", "1": "q2"},
"q2": {"0": "q3", "1": "q2"},
"q3": {"0": "q2", "1": "q2"},
}
start = "q1"
end = ["q2"]
machine = Machine.from_arguments(states, alphabet, transitions, start, end)
machine.run(123) # fail
machine.run("") # fail
machine.run("1") # pass
machine.run("11") # pass
machine.run("0100101") # pass
| bnookala/fsm | example.py | Python | mit | 448 |
'''
Ugly - will be improved soon
'''
from datetime import datetime,timedelta
def archive_timestamp(timestamp):
'''
'''
_d=datetime.strptime(timestamp, '%Y%m%d%H%M%S')
return _d.strftime('%Y%m%d')
def way_date(date_input):
'''
'''
if isinstance(date_input, str):
return datetime.strptime(date_input, '%Y%m%d')
elif isinstance(date_input, datetime):
return date_input.strftime('%Y%m%d')
raise TypeError('Only string or datetime objects')
class Dateess:
'''
'''
def __init__(self, full_year):
'''
'''
self._d={'date_range':{
'start':{'year':0, 'month':0, 'day':0},
'end':{'year':0, 'month':0, 'day':0}}}
self.full_year=full_year
self._utc=datetime.utcnow()
@property
def start_year(self):
'''
'''
return self._d['date_range']['start']['year']
@start_year.setter
def start_year(self, year):
'''
'''
self._d['date_range']['start']['year']=year
@property
def end_year(self):
'''
'''
return self._d['date_range']['end']['year']
@end_year.setter
def end_year(self, year):
'''
'''
self._d['date_range']['end']['year']=year
@property
def start_month(self):
'''
'''
return self._d['date_range']['start']['month']
@start_month.setter
def start_month(self, month):
'''
'''
self._d['date_range']['start']['month']=month
@property
def end_month(self):
'''
'''
return self._d['date_range']['end']['month']
@end_month.setter
def end_month(self, month):
'''
'''
self._d['date_range']['end']['month']=month
@property
def start_day(self):
'''
'''
return self._d['date_range']['start']['day']
@start_month.setter
def start_day(self, day):
'''
'''
self._d['date_range']['start']['day']=day
@property
def end_day(self):
'''
'''
return self._d['date_range']['end']['day']
@end_month.setter
def end_day(self, day):
'''
'''
self._d['date_range']['end']['day']=day
@property
def start_date(self):
'''
'''
if not self.start_month:
self.start_month=1
if not self.start_day:
self.start_day=1
return datetime(year=self.start_year,
month=self.start_month,
day=self.start_day)
@property
def end_date(self):
'''
'''
if not self.end_month:
self.end_month=12
if not self.end_day:
self.end_day=31
return datetime(year=self.end_year,
month=self.end_month,
day=self.end_day)
@property
def date_range(self):
'''
'''
def sumday(date, days_size):
'''
'''
new_date=date+timedelta(days=days_size)
return way_date(new_date)
ds=self.start_date
de=self.end_date
delta=de-ds
return [sumday(ds,dn) for dn in range(delta.days+1)]
@property
def archive_timestamp(self):
'''
'''
if not self._archive_stamp:
raise TypeError('[-] Archive timestamp no set')
return self._archive_stamp.strftime('%Y%m%d')
@archive_timestamp.setter
def archive_timestamp(self, archive_stamp):
'''
'''
self._archive_stamp=datetime.strptime(archive_stamp, '%Y%m%d%H%M%S')
def intime(self, archive_stamp):
'''
'''
self.archive_timestamp=archive_stamp
arquive_stamp=self._archive_stamp
return (arquive_stamp >= self.start_date
and arquive_stamp <= self.end_date)
@staticmethod
def load(site_config):
'''
'''
if not 'date_range' in site_config:
raise TypeError('[-] date_range is required')
try:
full_year=site_config['date_range'].pop('full_year')
startis=site_config['date_range']['start']
endis=site_config['date_range']['end']
except KeyError as e:
raise Exception('[-] Missing {} key'.format(e))
if not 'year' in startis or not 'year' in endis:
raise Exception('[-] Setting year range is required')
dss=Dateess(full_year)
dss.start_year=startis['year']
dss.end_year=endis['year']
if startis.get('month'):
dss.start_month=startis['month']
dss.end_month=endis['month']
if startis.get('day'):
dss.start_day=startis['day']
dss.end_day=endis['day']
return dss
| VulcanoAhab/waybackeess | date.py | Python | mit | 4,845 |
# Import the JModelica.org Python packages
import pymodelica
from pymodelica.compiler_wrappers import ModelicaCompiler
# Create a compiler and compiler target object
mc = ModelicaCompiler()
# Build trees as if for an FMU or Model Exchange v 1.0
#target = mc.create_target_object("me", "1.0")
source = mc.parse_model("CauerLowPassAnalog.mo")
indent_amount = 2
def dump(src, fid, indent=0):
ind = " " * (indent_amount * indent)
try:
fid.write(ind + src.getNodeName() + "\n")
except:
fid.write(ind + "exception: " + str(src) + "\n")
try:
for idx in range(src.numChild):
dump(src.children[idx], fid, indent+1)
except:
fid.write(ind + "(exception)\n")
# dump the filter instance
with open('out.txt', 'w') as fid:
dump(source, fid, 0)
print "DONE!"
| michael-okeefe/soep-sandbox | src/python2/dump_source.py | Python | mit | 815 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '.\taskTemplate.ui'
#
# Created: Thu Oct 08 16:48:34 2015
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(218, 236)
self.gridLayout_2 = QtGui.QGridLayout(Form)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.groupBox = QtGui.QGroupBox(Form)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.gridLayout = QtGui.QGridLayout(self.groupBox)
self.gridLayout.setSpacing(0)
self.gridLayout.setContentsMargins(3, 0, 3, 3)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.powerWaveRadio = QtGui.QRadioButton(self.groupBox)
self.powerWaveRadio.setChecked(True)
self.powerWaveRadio.setObjectName(_fromUtf8("powerWaveRadio"))
self.gridLayout.addWidget(self.powerWaveRadio, 0, 0, 1, 1)
self.switchWaveRadio = QtGui.QRadioButton(self.groupBox)
self.switchWaveRadio.setObjectName(_fromUtf8("switchWaveRadio"))
self.gridLayout.addWidget(self.switchWaveRadio, 1, 0, 1, 1)
self.gridLayout_2.addWidget(self.groupBox, 5, 0, 1, 3)
self.wavelengthWidget = QtGui.QWidget(Form)
self.wavelengthWidget.setObjectName(_fromUtf8("wavelengthWidget"))
self.horizontalLayout = QtGui.QHBoxLayout(self.wavelengthWidget)
self.horizontalLayout.setSpacing(0)
self.horizontalLayout.setMargin(0)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.setWavelengthCheck = QtGui.QCheckBox(self.wavelengthWidget)
self.setWavelengthCheck.setObjectName(_fromUtf8("setWavelengthCheck"))
self.horizontalLayout.addWidget(self.setWavelengthCheck)
self.wavelengthSpin = QtGui.QSpinBox(self.wavelengthWidget)
self.wavelengthSpin.setMaximum(4000)
self.wavelengthSpin.setSingleStep(10)
self.wavelengthSpin.setProperty("value", 1080)
self.wavelengthSpin.setObjectName(_fromUtf8("wavelengthSpin"))
self.horizontalLayout.addWidget(self.wavelengthSpin)
self.gridLayout_2.addWidget(self.wavelengthWidget, 4, 0, 1, 3)
self.label_2 = QtGui.QLabel(Form)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout_2.addWidget(self.label_2, 0, 0, 1, 1)
self.outputPowerLabel = QtGui.QLabel(Form)
self.outputPowerLabel.setObjectName(_fromUtf8("outputPowerLabel"))
self.gridLayout_2.addWidget(self.outputPowerLabel, 0, 1, 1, 1)
self.checkPowerBtn = QtGui.QPushButton(Form)
self.checkPowerBtn.setObjectName(_fromUtf8("checkPowerBtn"))
self.gridLayout_2.addWidget(self.checkPowerBtn, 0, 2, 1, 1)
self.label_3 = QtGui.QLabel(Form)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.label_3.setFont(font)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout_2.addWidget(self.label_3, 1, 0, 1, 1)
self.samplePowerLabel = QtGui.QLabel(Form)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.samplePowerLabel.setFont(font)
self.samplePowerLabel.setObjectName(_fromUtf8("samplePowerLabel"))
self.gridLayout_2.addWidget(self.samplePowerLabel, 1, 1, 1, 1)
self.adjustLengthCheck = QtGui.QCheckBox(Form)
self.adjustLengthCheck.setChecked(True)
self.adjustLengthCheck.setTristate(False)
self.adjustLengthCheck.setObjectName(_fromUtf8("adjustLengthCheck"))
self.gridLayout_2.addWidget(self.adjustLengthCheck, 3, 0, 1, 3)
self.checkPowerCheck = QtGui.QCheckBox(Form)
self.checkPowerCheck.setChecked(True)
self.checkPowerCheck.setObjectName(_fromUtf8("checkPowerCheck"))
self.gridLayout_2.addWidget(self.checkPowerCheck, 2, 0, 1, 3)
self.releaseBetweenTasks = QtGui.QRadioButton(Form)
self.releaseBetweenTasks.setObjectName(_fromUtf8("releaseBetweenTasks"))
self.releaseButtonGroup = QtGui.QButtonGroup(Form)
self.releaseButtonGroup.setObjectName(_fromUtf8("releaseButtonGroup"))
self.releaseButtonGroup.addButton(self.releaseBetweenTasks)
self.gridLayout_2.addWidget(self.releaseBetweenTasks, 6, 0, 1, 3)
self.releaseAfterSequence = QtGui.QRadioButton(Form)
self.releaseAfterSequence.setChecked(True)
self.releaseAfterSequence.setObjectName(_fromUtf8("releaseAfterSequence"))
self.releaseButtonGroup.addButton(self.releaseAfterSequence)
self.gridLayout_2.addWidget(self.releaseAfterSequence, 7, 0, 1, 3)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.groupBox.setTitle(_translate("Form", "Control Mode:", None))
self.powerWaveRadio.setText(_translate("Form", "Power waveform (W)", None))
self.switchWaveRadio.setText(_translate("Form", "Switch waveform (%)", None))
self.setWavelengthCheck.setText(_translate("Form", "Set wavelength", None))
self.wavelengthSpin.setSuffix(_translate("Form", " nm", None))
self.label_2.setText(_translate("Form", "Output Power:", None))
self.outputPowerLabel.setText(_translate("Form", "0mW", None))
self.checkPowerBtn.setText(_translate("Form", "Check Power", None))
self.label_3.setText(_translate("Form", "Power at Sample:", None))
self.samplePowerLabel.setText(_translate("Form", "0mW", None))
self.adjustLengthCheck.setToolTip(_translate("Form", "If the output power of the laser changes, adjust the length of laser pulses to maintain constant pulse energy.", None))
self.adjustLengthCheck.setText(_translate("Form", "Adjust pulse length if power changes", None))
self.checkPowerCheck.setText(_translate("Form", "Check power before task start", None))
self.releaseBetweenTasks.setText(_translate("Form", "Release between tasks", None))
self.releaseAfterSequence.setText(_translate("Form", "Release after sequence", None))
| mgraupe/acq4 | acq4/devices/Laser/taskTemplate.py | Python | mit | 6,745 |
# https://leetcode.com/problems/convert-sorted-array-to-binary-search-tree/
from TreeNode import TreeNode
class Solution(object):
def sortedArrayToBST(self, nums):
if not nums : return None
n = len(nums)
if n == 1:
return TreeNode(nums[0])
mid = TreeNode(nums[n // 2])
mid.left = self.sortedArrayToBST(nums[:n // 2])
mid.right = self.sortedArrayToBST(nums[n // 2 + 1:])
return mid | menghanY/LeetCode-Python | Tree/ConvertSortedArrayToBinarySearchTree.py | Python | mit | 452 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 27 16:23:07 2017
@author: user
"""
## FOUND HERE http://www.nltk.org/howto/sentiment.html
## Source code http://www.nltk.org/_modules/nltk/sentiment/vader.html
## http://www.nltk.org/api/nltk.sentiment.html
## Hutto, C.J. & Gilbert, E.E. (2014). VADER: A Parsimonious Rule-based Model
##for Sentiment Analysis of Social Media Text. Eighth International Conference on
## Weblogs and Social Media (ICWSM-14). Ann Arbor, MI, June 2014.
## http://www.postgresqltutorial.com/postgresql-python
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import psycopg2
import sys
reload(sys) #Prevents errors with utf-8 encoding not working properly
sys.setdefaultencoding('utf8')
def SentimentAnalyzer(tweets):
sid = SentimentIntensityAnalyzer() #need to nltk.download() to use all the packages
sentiment_tweets = []
#for i in range(10):
for tweet in tweets:
tweet_id = tweet[0]
tweet_id = str(tweet_id)
tweet_id = int(tweet_id)
ss = sid.polarity_scores(tweet[11])
if ss['compound'] <= -0.293:
label = 'negative'
elif ss['compound'] >= 0.293:
label = 'positive'
else:
label = 'neutral'
sentiment = ss['compound']
sentiment_tweets.append((tweet_id,sentiment,label))
return sentiment_tweets
| SatoshiNakamotoGeoscripting/SatoshiNakamotoGeoscripting | Final_assignment/lib/sentimentAnalyzerVader.py | Python | mit | 1,401 |
'''
The following is from http://stackoverflow.com/a/2022629
Thanks to Longpoke
'''
import types
class Event(list):
"""Event subscription.
A list of callable objects. Calling an instance of this will cause a
call to each item in the list in ascending order by index.
Example Usage:
>>> def f(x):
... print 'f(%s)' % x
>>> def g(x):
... print 'g(%s)' % x
>>> e = Event()
>>> e()
>>> e.append(f)
>>> e(123)
f(123)
>>> e.remove(f)
>>> e()
>>> e += (f, g)
>>> e(10)
f(10)
# g(10)
>>> del e[0]
>>> e(2)
g(2)
"""
def __init__(self,repeat=True):
super(Event,self).__init__()
self.repeat = repeat
def __call__(self, *args, **kwargs):
for f in self:
f(*args, **kwargs)
if not self.repeat:
map(lambda func: self.remove(func),self)
def remove(self, func):
if func in self:
list.remove(self,func)
def __repr__(self):
#Make function names look prettier
items = [item.__name__ if isinstance(item, types.FunctionType) or isinstance(item, types.MethodType)
else item
for item in self]
return "Event %s" % list.__repr__(items)
class Listener(dict):
def addSub(self, name, callback,repeat=True):
'''sets self[name] to Event() if there is no key name.
Either way self[name] is returned and callback is appended'''
self.setdefault(name, Event(repeat)).append(callback)
def removeSub(self, name, callback):
if name in self:
self[name].remove(callback)
if len(self[name]) == 0:
del self[name]
def listen(self, event, repeat=True):
def wrap(f):
self.addSub(event, f,repeat)
return f
return wrap
def trigger(self, event):
def wrap(f):
def newFunc(*args, **kwargs):
res = f(*args, **kwargs)
self(event, res)
return res
return newFunc
return wrap
def __call__(self, event, *args, **kwargs):
if event in self:
self[event](*args, **kwargs)
if len(self[event])==0:
self.removeSub(event,self[event])
if "listeners" in self:
self['listeners'](event, *args, **kwargs)
def __add__(self, listener):
self.addSub('listeners', listener)
def __sub__(self, listener):
self.removeSub('listeners', listener)
def __repr__(self):
return "Listener %s"% dict.__repr__(self)
def getListeners(self):
if "listeners" in self:
return self['listeners']
return None
def isListener(self, listener):
return listener in self.getListeners()
| willemneal/CallMe | callMe.py | Python | mit | 2,823 |
import time
import psycopg2
import os
import sys
if __name__ == "__main__":
try:
conn = psycopg2.connect("dbname='master' user='postgres' host='localhost' password='postgres'")
except Exception, e:
print "unable to connect"
sys.exit()
cur = conn.cursor()
for x in xrange(1,1000):
print x
cur.execute("INSERT INTO reptable166 (name) VALUES(%s)", ("test" + str(x),))
conn.commit()
time.sleep(1)
cur.close()
conn.close() | btrihatmaja/AutoSlony | testing/test_insert.py | Python | mit | 443 |
from item import Item
class Merchant:
def __init__(self, markup=1.2, markdown=0.8):
self.inventory = []
self.markup = markup
self.markdown = markdown
def add_item(self, item):
# Adds an item to the merchant's inventory
if (not isinstance(item, Item)):
raise TypeError("Unexpected " + type(item))
self.inventory.append(item)
def get_selling_offers(self):
# Lists all items in the merchant's inventory
# and adds the markup fee
offers = []
for item in self.inventory:
offer = (item, item.value*self.markup)
offers.append(offer)
return offers
def get_buying_offers(self, items):
# Generates buying offers on the items in 'items'
offers = []
for item in items:
offer = (item, item.value*self.markdown)
offers.append(offer)
return offers
class Banker:
def get_items(self, player):
items_formatted = []
bank_list = []
for item, amount in player.bank.items():
items_formatted.append((item.name, amount))
bank_list.append(item)
return items_formatted, bank_list
def add_item(self, player, item):
if (not isinstance(item, Item)):
raise TypeError("Unexpected " + type(item))
if item in player.bank:
player.bank[item] += 1
else:
player.bank[item] = 1
return True
def remove_item(self, player, item):
if (not isinstance(item, Item)):
raise TypeError("Unexpected " + type(item))
if item in player.bank:
if player.bank[item] == 1:
player.bank.pop(item)
else:
player.bank[item] -= 0
return True
else:
return False | henrik645/txt-rpg | merchant.py | Python | mit | 1,919 |
from django.core.management.base import BaseCommand, CommandError
import __future__
from ModelTracker.models import History
import datetime
from django.apps import apps
def getModel(table_name):
return next((m for m in apps.get_models() if m._meta.db_table==table_name), None)
class Command(BaseCommand):
help = 'Restore Object to old status'
def add_arguments(self, parser):
parser.add_argument('--id', nargs='?', type=str,default=None)
parser.add_argument("--state",type=str,nargs='?',default="new")
def handle(self, *args, **options):
if not options.get("id",None):
print ("Change ID is needed")
exit(1)
print (options)
h = History.objects.get(id=int(options["id"]))
model = getModel(h.table)
if model == None:
print("Can't find the Model")
exit(2)
d=[f.name for f in model._meta.get_fields()]
if options["state"]=="old": state=h.old_state
else: state=h.new_state
keys2del=[]
for key in state:
if (key.startswith("_") and "_cache" in key) or (key not in d and not ("_id" in key and key[:-3] in d)):
keys2del.append(key)
if type(state[key])==type({}):
if state[key].get("_type",None) == "datetime":
state[key]=datetime.datetime.strptime(state[key]["value"],"%Y-%m-%d %H:%M:%S")
elif state[key].get("_type",None) == "date":
state[key]=datetime.datetime.strptime(state[key]["value"],"%Y-%m-%d")
for key in keys2del:
del state[key]
print(state)
m=model(**state)
m.save("CLI",event_name="Restore Record to %s (%s)"%(options["id"],options["state"]))
| mkalioby/ModelTracker | ModelTracker/management/commands/restoreObject.py | Python | mit | 1,770 |
# Define a new directive `code-block` (aliased as `sourcecode`) that uses the
# `pygments` source highlighter to render code in color.
#
# Incorporates code from the `Pygments`_ documentation for `Using Pygments in
# ReST documents`_ and `Octopress`_.
#
# .. _Pygments: http://pygments.org/
# .. _Using Pygments in ReST documents: http://pygments.org/docs/rstdirective/
# .. _Octopress: http://octopress.org/
import re
import os
import md5
import __main__
# Absolute path to pygments cache dir
PYGMENTS_CACHE_DIR = os.path.abspath(os.path.join(os.path.dirname(__main__.__file__), '../../.pygments-cache'))
# Ensure cache dir exists
if not os.path.exists(PYGMENTS_CACHE_DIR):
os.makedirs(PYGMENTS_CACHE_DIR)
from pygments.formatters import HtmlFormatter
from docutils import nodes
from docutils.parsers.rst import directives, Directive
from pygments import highlight
from pygments.lexers import get_lexer_by_name, TextLexer
class Pygments(Directive):
""" Source code syntax hightlighting.
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
string_opts = ['title', 'url', 'caption']
option_spec = dict([(key, directives.unchanged) for key in string_opts])
has_content = True
def run(self):
self.assert_has_content()
try:
lexer_name = self.arguments[0]
lexer = get_lexer_by_name(lexer_name)
except ValueError:
# no lexer found - use the text one instead of an exception
lexer_name = 'text'
lexer = TextLexer()
formatter = HtmlFormatter()
# Construct cache filename
cache_file = None
content_text = u'\n'.join(self.content)
cache_file_name = '%s-%s.html' % (lexer_name, md5.new(content_text).hexdigest())
cached_path = os.path.join(PYGMENTS_CACHE_DIR, cache_file_name)
# Look for cached version, otherwise parse
if os.path.exists(cached_path):
cache_file = open(cached_path, 'r')
parsed = cache_file.read()
else:
parsed = highlight(content_text, lexer, formatter)
# Strip pre tag and everything outside it
pres = re.compile("<pre>(.+)<\/pre>", re.S)
stripped = pres.search(parsed).group(1)
# Create tabular code with line numbers
table = '<div class="highlight"><table><tr><td class="gutter"><pre class="line-numbers">'
lined = ''
for idx, line in enumerate(stripped.splitlines(True)):
table += '<span class="line-number">%d</span>\n' % (idx + 1)
lined += '<span class="line">%s</span>' % line
table += '</pre></td><td class="code"><pre><code class="%s">%s</code></pre></td></tr></table></div>' % (lexer_name, lined)
# Add wrapper with optional caption and link
code = '<figure class="code">'
if self.options:
caption = ('<span>%s</span>' % self.options['caption']) if 'caption' in self.options else ''
title = self.options['title'] if 'title' in self.options else 'link'
link = ('<a href="%s">%s</a>' % (self.options['url'], title)) if 'url' in self.options else ''
if caption or link:
code += '<figcaption>%s %s</figcaption>' % (caption, link)
code += '%s</figure>' % table
# Write cache
if cache_file is None:
cache_file = open(cached_path, 'w')
cache_file.write(parsed)
cache_file.close()
return [nodes.raw('', code, format='html')]
directives.register_directive('code-block', Pygments)
directives.register_directive('sourcecode', Pygments) | faircloth-lab/data-theme | _plugins/jekyll-rst/directives.py | Python | mit | 3,665 |
import urllib2
import lxml.html
import numpy
import scipy
import scipy.misc
import scipy.cluster
import urlparse
import struct
import operator
import gzip
import datetime
import requests
import httplib
from PIL import BmpImagePlugin, PngImagePlugin, Image
from socket import error as SocketError
from boto.s3.key import Key
from StringIO import StringIO
from django.conf import settings
from apps.rss_feeds.models import MFeedPage, MFeedIcon
from utils import log as logging
from utils.feed_functions import timelimit, TimeoutError
from OpenSSL.SSL import Error as OpenSSLError
from pyasn1.error import PyAsn1Error
from requests.packages.urllib3.exceptions import LocationParseError
class IconImporter(object):
def __init__(self, feed, page_data=None, force=False):
self.feed = feed
self.force = force
self.page_data = page_data
self.feed_icon = MFeedIcon.get_feed(feed_id=self.feed.pk)
def save(self):
if not self.force and self.feed.favicon_not_found:
# print 'Not found, skipping...'
return
if (
not self.force
and not self.feed.favicon_not_found
and self.feed_icon.icon_url
and self.feed.s3_icon
):
# print 'Found, but skipping...'
return
image, image_file, icon_url = self.fetch_image_from_page_data()
if not image:
image, image_file, icon_url = self.fetch_image_from_path(force=self.force)
if image:
image = self.normalize_image(image)
try:
color = self.determine_dominant_color_in_image(image)
except IndexError:
return
try:
image_str = self.string_from_image(image)
except TypeError:
return
if len(image_str) > 500000:
image = None
if (image and
(self.force or
self.feed_icon.data != image_str or
self.feed_icon.icon_url != icon_url or
self.feed_icon.not_found or
(settings.BACKED_BY_AWS.get('icons_on_s3') and not self.feed.s3_icon))):
logging.debug(" ---> [%-30s] ~SN~FBIcon difference:~FY color:%s (%s/%s) data:%s url:%s notfound:%s no-s3:%s" % (
self.feed.log_title[:30],
self.feed_icon.color != color, self.feed_icon.color, color,
self.feed_icon.data != image_str,
self.feed_icon.icon_url != icon_url,
self.feed_icon.not_found,
settings.BACKED_BY_AWS.get('icons_on_s3') and not self.feed.s3_icon))
self.feed_icon.data = image_str
self.feed_icon.icon_url = icon_url
self.feed_icon.color = color
self.feed_icon.not_found = False
self.feed_icon.save()
if settings.BACKED_BY_AWS.get('icons_on_s3'):
self.save_to_s3(image_str)
if self.feed.favicon_color != color:
self.feed.favicon_color = color
self.feed.favicon_not_found = False
self.feed.save(update_fields=['favicon_color', 'favicon_not_found'])
if not image:
self.feed_icon.not_found = True
self.feed_icon.save()
self.feed.favicon_not_found = True
self.feed.save()
return not self.feed.favicon_not_found
def save_to_s3(self, image_str):
expires = datetime.datetime.now() + datetime.timedelta(days=60)
expires = expires.strftime("%a, %d %b %Y %H:%M:%S GMT")
k = Key(settings.S3_CONN.get_bucket(settings.S3_ICONS_BUCKET_NAME))
k.key = self.feed.s3_icons_key
k.set_metadata('Content-Type', 'image/png')
k.set_metadata('Expires', expires)
k.set_contents_from_string(image_str.decode('base64'))
k.set_acl('public-read')
self.feed.s3_icon = True
self.feed.save()
def load_icon(self, image_file, index=None):
'''
DEPRECATED
Load Windows ICO image.
See http://en.wikipedia.org/w/index.php?oldid=264332061 for file format
description.
Cribbed and modified from http://djangosnippets.org/snippets/1287/
'''
try:
image_file.seek(0)
header = struct.unpack('<3H', image_file.read(6))
except Exception, e:
return
# Check magic
if header[:2] != (0, 1):
return
# Collect icon directories
directories = []
for i in xrange(header[2]):
directory = list(struct.unpack('<4B2H2I', image_file.read(16)))
for j in xrange(3):
if not directory[j]:
directory[j] = 256
directories.append(directory)
if index is None:
# Select best icon
directory = max(directories, key=operator.itemgetter(slice(0, 3)))
else:
directory = directories[index]
# Seek to the bitmap data
image_file.seek(directory[7])
prefix = image_file.read(16)
image_file.seek(-16, 1)
if PngImagePlugin._accept(prefix):
# Windows Vista icon with PNG inside
try:
image = PngImagePlugin.PngImageFile(image_file)
except IOError:
return
else:
# Load XOR bitmap
try:
image = BmpImagePlugin.DibImageFile(image_file)
except IOError:
return
if image.mode == 'RGBA':
# Windows XP 32-bit color depth icon without AND bitmap
pass
else:
# Patch up the bitmap height
image.size = image.size[0], image.size[1] >> 1
d, e, o, a = image.tile[0]
image.tile[0] = d, (0, 0) + image.size, o, a
# Calculate AND bitmap dimensions. See
# http://en.wikipedia.org/w/index.php?oldid=264236948#Pixel_storage
# for description
offset = o + a[1] * image.size[1]
stride = ((image.size[0] + 31) >> 5) << 2
size = stride * image.size[1]
# Load AND bitmap
image_file.seek(offset)
string = image_file.read(size)
mask = Image.frombytes('1', image.size, string, 'raw',
('1;I', stride, -1))
image = image.convert('RGBA')
image.putalpha(mask)
return image
def fetch_image_from_page_data(self):
image = None
image_file = None
if self.page_data:
content = self.page_data
elif settings.BACKED_BY_AWS.get('pages_on_s3') and self.feed.s3_page:
key = settings.S3_CONN.get_bucket(settings.S3_PAGES_BUCKET_NAME).get_key(self.feed.s3_pages_key)
compressed_content = key.get_contents_as_string()
stream = StringIO(compressed_content)
gz = gzip.GzipFile(fileobj=stream)
try:
content = gz.read()
except IOError:
content = None
else:
content = MFeedPage.get_data(feed_id=self.feed.pk)
url = self._url_from_html(content)
if not url:
try:
content = requests.get(self.cleaned_feed_link).content
url = self._url_from_html(content)
except (AttributeError, SocketError, requests.ConnectionError,
requests.models.MissingSchema, requests.sessions.InvalidSchema,
requests.sessions.TooManyRedirects,
requests.models.InvalidURL,
requests.models.ChunkedEncodingError,
requests.models.ContentDecodingError,
httplib.IncompleteRead,
LocationParseError, OpenSSLError, PyAsn1Error,
ValueError), e:
logging.debug(" ---> ~SN~FRFailed~FY to fetch ~FGfeed icon~FY: %s" % e)
if url:
image, image_file = self.get_image_from_url(url)
return image, image_file, url
@property
def cleaned_feed_link(self):
if self.feed.feed_link.startswith('http'):
return self.feed.feed_link
return 'http://' + self.feed.feed_link
def fetch_image_from_path(self, path='favicon.ico', force=False):
image = None
url = None
if not force:
url = self.feed_icon.icon_url
if not url and self.feed.feed_link and len(self.feed.feed_link) > 6:
try:
url = urlparse.urljoin(self.feed.feed_link, 'favicon.ico')
except ValueError:
url = None
if not url:
return None, None, None
image, image_file = self.get_image_from_url(url)
if not image:
url = urlparse.urljoin(self.feed.feed_link, '/favicon.ico')
image, image_file = self.get_image_from_url(url)
# print 'Found: %s - %s' % (url, image)
return image, image_file, url
def get_image_from_url(self, url):
# print 'Requesting: %s' % url
if not url:
return None, None
@timelimit(30)
def _1(url):
headers = {
'User-Agent': 'NewsBlur Favicon Fetcher - %s subscriber%s - %s '
'(Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_1) '
'AppleWebKit/534.48.3 (KHTML, like Gecko) Version/5.1 '
'Safari/534.48.3)' %
(
self.feed.num_subscribers,
's' if self.feed.num_subscribers != 1 else '',
self.feed.permalink
),
'Connection': 'close',
'Accept': 'image/png,image/x-icon,image/*;q=0.9,*/*;q=0.8'
}
try:
request = urllib2.Request(url, headers=headers)
icon = urllib2.urlopen(request).read()
except Exception:
return None
return icon
try:
icon = _1(url)
except TimeoutError:
return None, None
try:
icon_file = StringIO(icon)
image = Image.open(icon_file)
except (IOError, ValueError):
return None, None
return image, icon_file
def _url_from_html(self, content):
url = None
if not content:
return url
try:
if isinstance(content, unicode):
content = content.encode('utf-8')
icon_path = lxml.html.fromstring(content).xpath(
'//link[@rel="icon" or @rel="shortcut icon"]/@href'
)
except (lxml.etree.ParserError, TypeError):
return url
if icon_path:
if str(icon_path[0]).startswith('http'):
url = icon_path[0]
else:
url = urlparse.urljoin(self.feed.feed_link, icon_path[0])
return url
def normalize_image(self, image):
# if image.size != (16, 16):
# image = image.resize((16, 16), Image.BICUBIC)
if image.mode != 'RGBA':
try:
image = image.convert('RGBA')
except IOError:
pass
return image
def determine_dominant_color_in_image(self, image):
NUM_CLUSTERS = 5
# Convert image into array of values for each point.
if image.mode == '1':
image.convert('L')
ar = numpy.array(image)
# ar = scipy.misc.fromimage(image)
shape = ar.shape
# Reshape array of values to merge color bands. [[R], [G], [B], [A]] => [R, G, B, A]
if len(shape) > 2:
ar = ar.reshape(scipy.product(shape[:2]), shape[2])
# Get NUM_CLUSTERS worth of centroids.
ar = ar.astype(numpy.float)
codes, _ = scipy.cluster.vq.kmeans(ar, NUM_CLUSTERS)
# Pare centroids, removing blacks and whites and shades of really dark and really light.
original_codes = codes
for low, hi in [(60, 200), (35, 230), (10, 250)]:
codes = scipy.array([code for code in codes
if not ((code[0] < low and code[1] < low and code[2] < low) or
(code[0] > hi and code[1] > hi and code[2] > hi))])
if not len(codes):
codes = original_codes
else:
break
# Assign codes (vector quantization). Each vector is compared to the centroids
# and assigned the nearest one.
vecs, _ = scipy.cluster.vq.vq(ar, codes)
# Count occurences of each clustered vector.
counts, bins = scipy.histogram(vecs, len(codes))
# Show colors for each code in its hex value.
# colors = [''.join(chr(c) for c in code).encode('hex') for code in codes]
# total = scipy.sum(counts)
# print dict(zip(colors, [count/float(total) for count in counts]))
# Find the most frequent color, based on the counts.
index_max = scipy.argmax(counts)
peak = codes.astype(int)[index_max]
color = ''.join(chr(c) for c in peak).encode('hex')
return color[:6]
def string_from_image(self, image):
output = StringIO()
image.save(output, 'png', quality=95)
contents = output.getvalue()
output.close()
return contents.encode('base64')
| mihaip/NewsBlur | apps/rss_feeds/icon_importer.py | Python | mit | 13,810 |
# coding=utf-8
"""
Binary class deconstruct, reconstruct packet
"""
import copy
class Binary(object):
@staticmethod
def deconstruct_packet(packet):
"""
Replaces every bytearray in packet with a numbered placeholder.
:param packet:
:return: dict with packet and list of buffers
"""
buffers = []
packet_data = packet.get('data', None)
def _deconstruct_packet(data):
if type(data) is bytearray:
place_holder = {
'_placeholder': True,
'num': len(buffers)
}
buffers.append(data)
return place_holder
if type(data) is list:
new_data = []
for d in data:
new_data.append(_deconstruct_packet(d))
return new_data
if type(data) is dict:
new_data = {}
for k, v in data.items():
new_data[k] = _deconstruct_packet(v)
return new_data
return data
pack = copy.copy(packet)
pack['data'] = _deconstruct_packet(packet_data)
pack['attachments'] = len(buffers)
return {
'packet': pack,
'buffers': buffers
}
@staticmethod
def reconstruct_packet(packet, buffers):
def _reconstruct_packet(data):
if type(data) is dict:
if '_placeholder' in data:
buf = buffers[data['num']]
return buf
else:
for k, v in data.items():
data[k] = _reconstruct_packet(v)
return data
if type(data) is list:
for i in xrange(len(data)):
data[i] = _reconstruct_packet(data[i])
return data
return data
packet['data'] = _reconstruct_packet(packet['data'])
del packet['attachments']
return packet
@staticmethod
def remove_blobs(data):
def _remove_blobs(obj, cur_key=None, containing_obj=None):
if not obj:
return obj
try:
# Try to read it as a file
buf = bytearray(obj.read())
if containing_obj is not None and cur_key is not None:
containing_obj[cur_key] = buf
else:
return buf
except AttributeError:
pass
if type(obj) is list:
for index, item in enumerate(obj):
_remove_blobs(item, index, obj)
if type(obj) is dict:
for k, v in obj.items():
_remove_blobs(v, k, obj)
return obj
blobless_data = _remove_blobs(data)
return blobless_data
| shuoli84/gevent_socketio2 | socketio/binary.py | Python | mit | 2,907 |
# Under MIT license, see LICENSE.txt
from enum import Enum
""" Constantes concernant les tactiques. """
class Flags(Enum):
INIT = 0
WIP = 1
FAILURE = 2
SUCCESS = 3
PASS_TO_PLAYER = 4
def is_complete(p_status_flag):
return p_status_flag == Flags.FAILURE or p_status_flag == Flags.SUCCESS
| RoboCupULaval/StrategyIA | ai/STA/Tactic/tactic_constants.py | Python | mit | 317 |
import os
from datetime import timedelta
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
###########################################################################
# [ Application ]
###########################################################################
APP_TITLE = 'WebApp'
APP_MAIL_NAME = '%s Support' % APP_TITLE
APP_MAIL_ADDRESS = 'support@webapp.com'
APP_MAIL_SENDER = '%s <%s>' % (APP_MAIL_NAME, APP_MAIL_ADDRESS)
APP_MAIL_SUBJECT_PREFIX = '[%s]' % APP_TITLE
# Email address for the primary site administrator user account.
APP_ADMIN = os.environ.get('APP_ADMIN')
# Allow new users to register.
APP_ALLOW_NEW_USERS = True
# A value of 0 means unlimited.
APP_MAX_USERS = 2
# Toggles the logging of user events.
APP_EVENT_LOGGING = True
###########################################################################
# [ Flask ]
###########################################################################
SECRET_KEY = os.environ.get('SECRET_KEY') or 'hard to guess string'
###########################################################################
# [ Flask-Login ]
###########################################################################
# Ensures that the "remember me" cookie isn't accessible by
# client-sides scripts.
REMEMBER_COOKIE_HTTPONLY = True
# Time-to-live for the "remember me" cookie.
REMEMBER_COOKIE_DURATION = timedelta(days=365)
# Must be disabled for the application's security layer to
# function properly.
SESSION_PROTECTION = None
###########################################################################
# [ Flask-Mail ]
###########################################################################
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
###########################################################################
# [ Flask-SQLAlchemy ]
###########################################################################
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
SQLALCHEMY_TRACK_MODIFICATIONS = False
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
###########################################################################
# [ Flask ]
###########################################################################
DEBUG = True
###########################################################################
# [ Flask-SQLAlchemy ]
###########################################################################
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
class TestingConfig(Config):
###########################################################################
# [ Flask ]
###########################################################################
TESTING = True
###########################################################################
# [ Flask-SQLAlchemy ]
###########################################################################
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-test.sqlite')
class ProductionConfig(Config):
###########################################################################
# [ Flask ]
###########################################################################
# Uncomment the following line if you're running HTTPS throughout
# your entire application.
# SESSION_COOKIE_SECURE = True
###########################################################################
# [ Flask-Login ]
###########################################################################
# Uncomment the following line if you're running HTTPS throughout
# your entire application.
# REMEMBER_COOKIE_SECURE = True
###########################################################################
# [ Flask-SQLAlchemy ]
###########################################################################
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
}
| richgieg/flask-now | config.py | Python | mit | 4,539 |
"""
merged implementation of the cache provider
the name cache was not chosen to ensure pluggy automatically
ignores the external pytest-cache
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
from collections import OrderedDict
import attr
import py
import six
import pytest
from .compat import _PY2 as PY2
from .pathlib import Path
from .pathlib import resolve_from_str
from .pathlib import rmtree
README_CONTENT = u"""\
# pytest cache directory #
This directory contains data from the pytest's cache plugin,
which provides the `--lf` and `--ff` options, as well as the `cache` fixture.
**Do not** commit this to version control.
See [the docs](https://docs.pytest.org/en/latest/cache.html) for more information.
"""
CACHEDIR_TAG_CONTENT = b"""\
Signature: 8a477f597d28d172789f06886806bc55
# This file is a cache directory tag created by pytest.
# For information about cache directory tags, see:
# http://www.bford.info/cachedir/spec.html
"""
@attr.s
class Cache(object):
_cachedir = attr.ib(repr=False)
_config = attr.ib(repr=False)
@classmethod
def for_config(cls, config):
cachedir = cls.cache_dir_from_config(config)
if config.getoption("cacheclear") and cachedir.exists():
rmtree(cachedir, force=True)
cachedir.mkdir()
return cls(cachedir, config)
@staticmethod
def cache_dir_from_config(config):
return resolve_from_str(config.getini("cache_dir"), config.rootdir)
def warn(self, fmt, **args):
from _pytest.warnings import _issue_warning_captured
from _pytest.warning_types import PytestWarning
_issue_warning_captured(
PytestWarning(fmt.format(**args) if args else fmt),
self._config.hook,
stacklevel=3,
)
def makedir(self, name):
""" return a directory path object with the given name. If the
directory does not yet exist, it will be created. You can use it
to manage files likes e. g. store/retrieve database
dumps across test sessions.
:param name: must be a string not containing a ``/`` separator.
Make sure the name contains your plugin or application
identifiers to prevent clashes with other cache users.
"""
name = Path(name)
if len(name.parts) > 1:
raise ValueError("name is not allowed to contain path separators")
res = self._cachedir.joinpath("d", name)
res.mkdir(exist_ok=True, parents=True)
return py.path.local(res)
def _getvaluepath(self, key):
return self._cachedir.joinpath("v", Path(key))
def get(self, key, default):
""" return cached value for the given key. If no value
was yet cached or the value cannot be read, the specified
default is returned.
:param key: must be a ``/`` separated value. Usually the first
name is the name of your plugin or your application.
:param default: must be provided in case of a cache-miss or
invalid cache values.
"""
path = self._getvaluepath(key)
try:
with path.open("r") as f:
return json.load(f)
except (ValueError, IOError, OSError):
return default
def set(self, key, value):
""" save value for the given key.
:param key: must be a ``/`` separated value. Usually the first
name is the name of your plugin or your application.
:param value: must be of any combination of basic
python types, including nested types
like e. g. lists of dictionaries.
"""
path = self._getvaluepath(key)
try:
if path.parent.is_dir():
cache_dir_exists_already = True
else:
cache_dir_exists_already = self._cachedir.exists()
path.parent.mkdir(exist_ok=True, parents=True)
except (IOError, OSError):
self.warn("could not create cache path {path}", path=path)
return
try:
f = path.open("wb" if PY2 else "w")
except (IOError, OSError):
self.warn("cache could not write path {path}", path=path)
else:
with f:
json.dump(value, f, indent=2, sort_keys=True)
if not cache_dir_exists_already:
self._ensure_supporting_files()
def _ensure_supporting_files(self):
"""Create supporting files in the cache dir that are not really part of the cache."""
if self._cachedir.is_dir():
readme_path = self._cachedir / "README.md"
if not readme_path.is_file():
readme_path.write_text(README_CONTENT)
gitignore_path = self._cachedir.joinpath(".gitignore")
if not gitignore_path.is_file():
msg = u"# Created by pytest automatically.\n*"
gitignore_path.write_text(msg, encoding="UTF-8")
cachedir_tag_path = self._cachedir.joinpath("CACHEDIR.TAG")
if not cachedir_tag_path.is_file():
cachedir_tag_path.write_bytes(CACHEDIR_TAG_CONTENT)
class LFPlugin(object):
""" Plugin which implements the --lf (run last-failing) option """
def __init__(self, config):
self.config = config
active_keys = "lf", "failedfirst"
self.active = any(config.getoption(key) for key in active_keys)
self.lastfailed = config.cache.get("cache/lastfailed", {})
self._previously_failed_count = None
self._no_failures_behavior = self.config.getoption("last_failed_no_failures")
def pytest_report_collectionfinish(self):
if self.active and self.config.getoption("verbose") >= 0:
if not self._previously_failed_count:
return None
noun = "failure" if self._previously_failed_count == 1 else "failures"
suffix = " first" if self.config.getoption("failedfirst") else ""
mode = "rerun previous {count} {noun}{suffix}".format(
count=self._previously_failed_count, suffix=suffix, noun=noun
)
return "run-last-failure: %s" % mode
def pytest_runtest_logreport(self, report):
if (report.when == "call" and report.passed) or report.skipped:
self.lastfailed.pop(report.nodeid, None)
elif report.failed:
self.lastfailed[report.nodeid] = True
def pytest_collectreport(self, report):
passed = report.outcome in ("passed", "skipped")
if passed:
if report.nodeid in self.lastfailed:
self.lastfailed.pop(report.nodeid)
self.lastfailed.update((item.nodeid, True) for item in report.result)
else:
self.lastfailed[report.nodeid] = True
def pytest_collection_modifyitems(self, session, config, items):
if self.active:
if self.lastfailed:
previously_failed = []
previously_passed = []
for item in items:
if item.nodeid in self.lastfailed:
previously_failed.append(item)
else:
previously_passed.append(item)
self._previously_failed_count = len(previously_failed)
if not previously_failed:
# running a subset of all tests with recorded failures outside
# of the set of tests currently executing
return
if self.config.getoption("lf"):
items[:] = previously_failed
config.hook.pytest_deselected(items=previously_passed)
else:
items[:] = previously_failed + previously_passed
elif self._no_failures_behavior == "none":
config.hook.pytest_deselected(items=items)
items[:] = []
def pytest_sessionfinish(self, session):
config = self.config
if config.getoption("cacheshow") or hasattr(config, "slaveinput"):
return
saved_lastfailed = config.cache.get("cache/lastfailed", {})
if saved_lastfailed != self.lastfailed:
config.cache.set("cache/lastfailed", self.lastfailed)
class NFPlugin(object):
""" Plugin which implements the --nf (run new-first) option """
def __init__(self, config):
self.config = config
self.active = config.option.newfirst
self.cached_nodeids = config.cache.get("cache/nodeids", [])
def pytest_collection_modifyitems(self, session, config, items):
if self.active:
new_items = OrderedDict()
other_items = OrderedDict()
for item in items:
if item.nodeid not in self.cached_nodeids:
new_items[item.nodeid] = item
else:
other_items[item.nodeid] = item
items[:] = self._get_increasing_order(
six.itervalues(new_items)
) + self._get_increasing_order(six.itervalues(other_items))
self.cached_nodeids = [x.nodeid for x in items if isinstance(x, pytest.Item)]
def _get_increasing_order(self, items):
return sorted(items, key=lambda item: item.fspath.mtime(), reverse=True)
def pytest_sessionfinish(self, session):
config = self.config
if config.getoption("cacheshow") or hasattr(config, "slaveinput"):
return
config.cache.set("cache/nodeids", self.cached_nodeids)
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption(
"--lf",
"--last-failed",
action="store_true",
dest="lf",
help="rerun only the tests that failed "
"at the last run (or all if none failed)",
)
group.addoption(
"--ff",
"--failed-first",
action="store_true",
dest="failedfirst",
help="run all tests but run the last failures first. "
"This may re-order tests and thus lead to "
"repeated fixture setup/teardown",
)
group.addoption(
"--nf",
"--new-first",
action="store_true",
dest="newfirst",
help="run tests from new files first, then the rest of the tests "
"sorted by file mtime",
)
group.addoption(
"--cache-show",
action="store_true",
dest="cacheshow",
help="show cache contents, don't perform collection or tests",
)
group.addoption(
"--cache-clear",
action="store_true",
dest="cacheclear",
help="remove all cache contents at start of test run.",
)
cache_dir_default = ".pytest_cache"
if "TOX_ENV_DIR" in os.environ:
cache_dir_default = os.path.join(os.environ["TOX_ENV_DIR"], cache_dir_default)
parser.addini("cache_dir", default=cache_dir_default, help="cache directory path.")
group.addoption(
"--lfnf",
"--last-failed-no-failures",
action="store",
dest="last_failed_no_failures",
choices=("all", "none"),
default="all",
help="change the behavior when no test failed in the last run or no "
"information about the last failures was found in the cache",
)
def pytest_cmdline_main(config):
if config.option.cacheshow:
from _pytest.main import wrap_session
return wrap_session(config, cacheshow)
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config):
config.cache = Cache.for_config(config)
config.pluginmanager.register(LFPlugin(config), "lfplugin")
config.pluginmanager.register(NFPlugin(config), "nfplugin")
@pytest.fixture
def cache(request):
"""
Return a cache object that can persist state between testing sessions.
cache.get(key, default)
cache.set(key, value)
Keys must be a ``/`` separated value, where the first part is usually the
name of your plugin or application to avoid clashes with other cache users.
Values can be any object handled by the json stdlib module.
"""
return request.config.cache
def pytest_report_header(config):
"""Display cachedir with --cache-show and if non-default."""
if config.option.verbose or config.getini("cache_dir") != ".pytest_cache":
cachedir = config.cache._cachedir
# TODO: evaluate generating upward relative paths
# starting with .., ../.. if sensible
try:
displaypath = cachedir.relative_to(config.rootdir)
except ValueError:
displaypath = cachedir
return "cachedir: {}".format(displaypath)
def cacheshow(config, session):
from pprint import pformat
tw = py.io.TerminalWriter()
tw.line("cachedir: " + str(config.cache._cachedir))
if not config.cache._cachedir.is_dir():
tw.line("cache is empty")
return 0
dummy = object()
basedir = config.cache._cachedir
vdir = basedir / "v"
tw.sep("-", "cache values")
for valpath in sorted(x for x in vdir.rglob("*") if x.is_file()):
key = valpath.relative_to(vdir)
val = config.cache.get(key, dummy)
if val is dummy:
tw.line("%s contains unreadable content, will be ignored" % key)
else:
tw.line("%s contains:" % key)
for line in pformat(val).splitlines():
tw.line(" " + line)
ddir = basedir / "d"
if ddir.is_dir():
contents = sorted(ddir.rglob("*"))
tw.sep("-", "cache directories")
for p in contents:
# if p.check(dir=1):
# print("%s/" % p.relto(basedir))
if p.is_file():
key = p.relative_to(basedir)
tw.line("{} is a file of length {:d}".format(key, p.stat().st_size))
return 0
| hackebrot/pytest | src/_pytest/cacheprovider.py | Python | mit | 13,931 |
#!/usr/bin/env python
"""
This module contains input, output and algorithms for creating
texture atlases.
"""
| Ezphares/TextureAtlas | atlas/__init__.py | Python | mit | 110 |
#!/usr/bin/python
# Copyright 2014 BitPay, Inc.
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from __future__ import division,print_function,unicode_literals
import os
import bctest
import buildenv
if __name__ == '__main__':
bctest.bctester(os.environ["srcdir"] + "/test/data",
"navcoin-util-test.json",buildenv)
| navcoindev/navcoin-core | src/test/navcoin-util-test.py | Python | mit | 410 |
import pylogging
import os
# Logs Dir Absolute Path
logs_path = os.path.dirname(os.path.abspath(__file__)) + '/logs/'
# Create Logger Instance
logger = pylogging.PyLogging(LOG_FILE_PATH = logs_path)
def customAction1(type, msg):
# Custom Action Goes Here
pass
# Add Action
actionIden1 = logger.addAction(customAction1)
def customAction2(type, msg):
# Custom Action Goes Here
pass
# Add Action
actionIden2 = logger.addAction(customAction2)
# To Remove Action1
logger.removeAction(actionIden1)
# Log Info Message
logger.info("Info Message")
# Log Warning Message
logger.warning("Warning Message.")
# Log Error Message
logger.error("Error Message.")
# Log Critical Message
logger.critical("Critical Message.")
# Log Normal Message
logger.log("Normal Log Message.") | Clivern/PyLogging | examples/custom_actions.py | Python | mit | 773 |
from .utils import ShellParser
class Parser(ShellParser):
"""Extract text from postscript files using ps2ascii command.
"""
def extract(self, filename, **kwargs):
stdout, _ = self.run(['ps2ascii', filename])
return stdout
| deanmalmgren/textract | textract/parsers/ps_parser.py | Python | mit | 253 |
class Node:
def __init__(self,data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def push(self, newdata): #in front
newnode = Node(newdata)
newnode.next = self.head
self.head = newnode
def insertAfter(self, prevnode, newdata):
newnode = Node(newdata)
newnode.next = prevnode.next
prevnode.next = newnode
def append(self, newdata): #at end
newnode = Node(newdata)
#if list is empty
if self.head is None:
self.head = newnode
return
#if not
temp = self.head
while(temp.next):
temp = temp.next
temp.next = newnode
def deleteNode(self, key): #first one found
temp = self.head
while (temp is not None):
print 'Now at:',temp.data
if temp.data == key:
print 'Found 3 at',temp.data
break
print 'prev is temp now'
prev = temp
temp = temp.next
if(temp == None): return
prev.next = temp.next
temp = None
def deletepos(self, pos): #specific position
temp = self.head
for i in xrange(0,pos-1):
prev = temp
temp = temp.next
if(temp == None): return
prev.next = temp.next
temp = None
def printList(self):
temp = self.head
while(temp):
print " %d" %(temp.data)
temp = temp.next
def main():
print 'hello'
ll = LinkedList()
ll.push(1)
ll.push(2)
ll.push(3)
ll.push(4)
ll.push(5)
print "Created Linked List: "
ll.printList()
ll.deletepos(3)
print "\nLinked List after Deletion of 3:"
ll.printList()
if __name__ == '__main__':
main() | singhjaideep/Stuff | linkedlist_2.py | Python | mit | 1,494 |
# Generated by Django 2.1.4 on 2018-12-21 14:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0016_auto_20181202_2205'),
('mainapp', '0016_auto_20181221_1432'),
]
operations = [
]
| meine-stadt-transparent/meine-stadt-transparent | mainapp/migrations/0017_merge_20181221_1508.py | Python | mit | 272 |
from multiprocessing import Process
import pytest
from .mock_server.server import start
server_process = None
@pytest.hookimpl
def pytest_sessionstart(session):
global server_process
server_process = Process(target=start)
server_process.start()
@pytest.hookimpl
def pytest_sessionfinish(session):
if server_process is not None:
server_process.terminate()
server_process.join()
| pirate/bookmark-archiver | tests/conftest.py | Python | mit | 417 |
##############################################################################
# Clinical trials parser
#
# eg 2013-2016
##############################################################################
import cPickle, os, re
def main():
#base_dir = "../data/ct/"
base_dir = "/home/eguney/data/ct/"
file_name = base_dir + "ct.csv"
output_data(base_dir, file_name)
return
def output_data(base_dir, file_name):
drug_to_ctids = get_interventions(base_dir, include_other_names=True) #False)
print len(drug_to_ctids), drug_to_ctids.items()[:5]
ctid_to_conditions = get_ctid_to_conditions(base_dir)
print len(ctid_to_conditions), ctid_to_conditions.items()[:5]
ctid_to_values = get_ctid_to_details(base_dir)
print len(ctid_to_values), ctid_to_values.items()[:5]
f = open(file_name, 'w')
f.write("Drug\tClinical trial Id\tPhase\tStatus\tFDA regulated\tWhy stopped\tResults date\tConditions\n")
for drug, ctids in drug_to_ctids.iteritems():
for ctid in ctids:
values = [ drug, ctid ]
if ctid in ctid_to_values:
#phase, status, fda_regulated, why_stopped, results_date = ctid_to_values[ctid]
values.extend(ctid_to_values[ctid])
if ctid in ctid_to_conditions:
conditions = ctid_to_conditions[ctid]
values.append(" | ".join(conditions))
f.write("%s\n" % "\t".join(values))
f.close()
return
def get_disease_specific_drugs(drug_to_diseases, phenotype_to_mesh_id):
disease_to_drugs = {}
mesh_id_to_phenotype = {}
for phenotype, mesh_id in phenotype_to_mesh_id.items():
mesh_id_to_phenotype[mesh_id] = phenotype
for drugbank_id, diseases in drug_to_diseases.iteritems():
for phenotype, dui, val in diseases:
if val > 0:
if dui in mesh_id_to_phenotype: # In the disease data set
disease = mesh_id_to_phenotype[dui].lower()
disease_to_drugs.setdefault(disease, set()).add(drugbank_id)
return disease_to_drugs
def get_drug_disease_mapping(base_dir, selected_drugs, name_to_drug, synonym_to_drug, mesh_id_to_name, mesh_id_to_name_with_synonyms, dump_file):
if os.path.exists(dump_file):
drug_to_diseases = cPickle.load(open(dump_file))
return drug_to_diseases
# Get mesh name to mesh id mapping
mesh_name_to_id = {}
for mesh_id, names in mesh_id_to_name_with_synonyms.iteritems():
for name in names:
for name_mod in [ name, name.replace(",", ""), name.replace("-", " "), name.replace(",", "").replace("-", " ") ]:
mesh_name_to_id[name_mod] = mesh_id
# Get CT info
drug_to_ctids, ctid_to_conditions, ctid_to_values = get_ct_data(base_dir, include_other_names=True)
# Get CT - MeSH disease mapping
intervention_to_mesh_name = {}
interventions = reduce(lambda x,y: x|y, ctid_to_conditions.values())
for intervention in interventions:
if intervention.endswith('s'):
intervention = intervention[:-1]
idx = intervention.find("(")
if idx != -1:
intervention = intervention[:idx].rstrip()
try:
exp = re.compile(r"\b%ss{,1}\b" % re.escape(intervention))
except:
print "Problem with regular expression:", intervention
for mesh_name, dui in mesh_name_to_id.iteritems():
m = exp.search(mesh_name)
if m is None:
continue
elif len(mesh_name.split()) != len(intervention.split()): # no partial overlap
continue
phenotype = mesh_id_to_name[dui]
intervention_to_mesh_name[intervention] = phenotype
break
#print len(intervention_to_mesh_name), intervention_to_mesh_name.items()[:5]
# Get interventions
phase_to_value = { "Phase 0": 0.5, "Phase 1": 0.6, "Phase 1/Phase 2": 0.65, "Phase 2": 0.7, "Phase 2/Phase 3": 0.75, "Phase 3": 0.8, "Phase 3/Phase 4":0.85, "Phase 4": 0.9, "N/A": 0.5 }
status_to_value = { "Terminated": -0.5, "Withdrawn": -1} #,"Completed", "Recruiting", "Not yet recruiting"
drug_to_diseases = {}
drug_to_diseases_n_study = {}
non_matching_drugs = set()
for drug, ctids in drug_to_ctids.iteritems():
drugbank_id = None
if name_to_drug is None:
drugbank_id = drug
else:
if drug in name_to_drug:
drugbank_id = name_to_drug[drug]
elif drug in synonym_to_drug:
drugbank_id = synonym_to_drug[drug]
else:
non_matching_drugs.add(drug)
continue
if selected_drugs is not None and drugbank_id not in selected_drugs:
continue
phenotype_to_count = {}
for ctid in ctids:
phase, status, fda_regulated, why_stopped, results_date = ctid_to_values[ctid]
val = 0.5
if phase not in phase_to_value:
print "Unknown phase:", phase
if status in status_to_value and phase in phase_to_value:
val = phase_to_value[phase] - 0.1
for intervention in ctid_to_conditions[ctid]:
if intervention not in intervention_to_mesh_name:
continue
phenotype = intervention_to_mesh_name[intervention]
i = phenotype_to_count.setdefault(phenotype, 0)
phenotype_to_count[phenotype] = i + 1
dui = mesh_name_to_id[phenotype]
# Phase based value assignment
drug_to_diseases.setdefault(drugbank_id, set()).add((phenotype, dui, val))
# Number of study based value assignment
for phenotype, val in phenotype_to_count.iteritems():
dui = mesh_name_to_id[phenotype]
drug_to_diseases_n_study.setdefault(drugbank_id, set()).add((phenotype, dui, val))
#drug_to_diseases = drug_to_diseases_n_study
#print "Non matching drugs:", len(non_matching_drugs)
#print len(drug_to_diseases), drug_to_diseases.items()[:5]
cPickle.dump(drug_to_diseases, open(dump_file, 'w'))
return drug_to_diseases
def get_ct_data(base_dir, include_other_names=True, dump_file=None):
if dump_file is not None and os.path.exists(dump_file):
values = cPickle.load(open(dump_file))
#drug_to_ctids, ctid_to_conditions, ctid_to_values = values
return values
drug_to_ctids = get_interventions(base_dir, include_other_names)
ctid_to_conditions = get_ctid_to_conditions(base_dir)
ctid_to_values = get_ctid_to_details(base_dir)
values = drug_to_ctids, ctid_to_conditions, ctid_to_values
if dump_file is not None:
cPickle.dump(values, open(dump_file, 'w'))
return values
def get_ctid_to_conditions(base_dir):
condition_file = base_dir + "conditions.txt"
condition_file2 = base_dir + "condition_browse.txt"
# Get conditions
ctid_to_conditions = {}
f = open(condition_file)
f.readline()
for line in f:
words = line.strip().split("|")
ctid = words[1]
condition = words[2] #.lower()
ctid_to_conditions.setdefault(ctid, set()).add(condition)
f.close()
return ctid_to_conditions
f = open(condition_file2)
f.readline()
for line in f:
words = line.strip().split("|")
ctid = words[1]
condition = words[2] #.lower()
ctid_to_conditions.setdefault(ctid, set()).add(condition)
f.close()
return ctid_to_conditions
def get_ctid_to_details(base_dir):
study_file = base_dir + "clinical_study.txt" # _noclob
# Get phase etc information
f = open(study_file)
line = f.readline()
words = line.strip().split("|")
header_to_idx = dict((word.lower(), i) for i, word in enumerate(words))
text = None
ctid_to_values = {}
while line:
line = f.readline()
if line.startswith("NCT"):
if text is not None:
words = text.strip().split("|")
ctid = words[0]
try:
phase = words[header_to_idx["phase"]]
status = words[header_to_idx["overall_status"]]
fda_regulated = words[header_to_idx["is_fda_regulated"]]
why_stopped = words[header_to_idx["why_stopped"]]
results_date = words[header_to_idx["firstreceived_results_date"]]
except:
print words
return
if phase.strip() != "":
ctid_to_values[ctid] = [phase, status, fda_regulated, why_stopped, results_date]
text = line
else:
text += line
f.close()
words = text.strip().split("|")
ctid = words[0]
phase = words[header_to_idx["phase"]]
status = words[header_to_idx["overall_status"]]
if phase.strip() != "":
ctid_to_values[ctid] = [phase, status, fda_regulated, why_stopped, results_date]
return ctid_to_values
def get_interventions(base_dir, include_other_names=True):
#ctid_to_drugs = {}
drug_to_ctids = {}
intervention_file = base_dir + "interventions.txt"
f = open(intervention_file)
f.readline()
#prev_row = 0
ignored_intervention_types = set()
for line in f:
words = line.strip().split("|")
try:
row = int(words[0])
#if row != prev_row + 1:
# continue
except:
continue
#prev_row += 1
if len(words) < 5:
#print words
continue
ctid = words[1]
intervention = words[2]
drug = words[3]
drug = drug.decode("ascii", errors="ignore").encode("ascii")
drug = drug.strip("\"'")
if intervention != "Drug" and intervention != "Biological" :
ignored_intervention_types.add(intervention)
continue
drug_to_ctids.setdefault(drug, set()).add(ctid)
#ctid_to_drugs.setdefault(ctid, set()).add(drug)
#conditions = drug_to_interventions.setdefault(drug, set())
#conditions |= ctid_to_conditions[ctid]
f.close()
print "Ignored intervention types:", ignored_intervention_types
if include_other_names:
intervention_file = base_dir + "intervention_browse.txt"
f = open(intervention_file)
f.readline()
for line in f:
words = line.strip().split("|")
row = int(words[0])
ctid = words[1]
drug = words[2] #.lower()
drug = drug.decode("ascii", errors="ignore").encode("ascii")
drug = drug.strip("\"'")
drug_to_ctids.setdefault(drug, set()).add(ctid)
#ctid_to_drugs.setdefault(ctid, set()).add(drug)
f.close()
intervention_file = base_dir + "intervention_other_names.txt"
f = open(intervention_file)
f.readline()
for line in f:
words = line.strip().split("|")
row = int(words[0])
ctid = words[1]
drug = words[3] #.lower()
drug = drug.decode("ascii", errors="ignore").encode("ascii")
drug = drug.strip("\"'")
drug_to_ctids.setdefault(drug, set()).add(ctid)
#ctid_to_drugs.setdefault(ctid, set()).add(drug)
f.close()
return drug_to_ctids #ctid_to_drugs
def get_drug_to_interventions(drug_to_ctids):
drug_to_interventions = {}
non_matching_drugs = set()
for drug, ctids in drug_to_ctids.iteritems():
drugbank_id = None
if name_to_drug is None:
drugbank_id = drug
else:
if drug in name_to_drug:
drugbank_id = name_to_drug[drug]
elif drug in synonym_to_drug:
drugbank_id = synonym_to_drug[drug]
else:
non_matching_drugs.add(drug)
continue
values = set()
for ctid in ctids:
#if ctid_to_values[ctid][0] != "Phase 3":
# continue
values |= ctid_to_conditions[ctid]
if len(values) == 0:
continue
drug_to_interventions.setdefault(drugbank_id, values)
#print "Non matching drugs:", len(non_matching_drugs)
#phenotypes = disease_to_drugs.keys()
#disease_to_interventions = {}
#for drug, interventions in drug_to_interventions.iteritems():
# for intervention in interventions:
# intervention = intervention.lower()
# for disease in phenotypes:
# values = text_utilities.tokenize_disease_name(disease)
# if all([ intervention.find(word.strip()) != -1 for word in values ]): # disease.split(",") ]):
# disease_to_drugs_ct.setdefault(disease, set()).add(drug)
# disease_to_interventions.setdefault(disease, set()).add(intervention)
#for disease, interventions in disease_to_interventions.iteritems():
# print disease, interventions
#print len(drug_to_interventions), drug_to_interventions.items()[:5]
#print drug_to_ctids["voriconazole"], print ctid_to_conditions["NCT00005912"], print ctid_to_values["NCT00005912"]
#print drug_to_interventions["DB00582"]
return drug_to_interventions
def get_frequent_interventions(drug_to_interventions):
condition_to_count = {}
for drug, interventions in drug_to_interventions.iteritems():
for condition in interventions:
if condition in condition_to_count:
condition_to_count[condition] += 1
else:
condition_to_count[condition] = 1
values = []
for condition, count in condition_to_count.iteritems():
values.append((count, condition))
values.sort()
values.reverse()
#print values[:50]
return values
if __name__ == "__main__":
main()
| quimaguirre/diana | diana/toolbox/parse_clinical_trials.py | Python | mit | 12,367 |
import peewee
import tornado.web
import tornado.gen
from windseed.settings import env, db
from windseed.base import handler
from windseed.apps.admin import urls
from windseed.apps.admin.models import User
from windseed.apps.web.models import Record
class Handler(handler.Handler):
def get_current_user(self):
"""
Current user
Do not use this if you have many requests within this
application since each request will hit DB, use redis to store
intermediate results
"""
email = self.get_secure_cookie('user')
if email:
try:
user = User.get(User.email == email)
except User.DoesNotExist:
user = None
if user:
if user.active and user.superuser:
return email
else:
return None
else:
return None
else:
return None
def write_error(self, status_code, **kwargs):
self.render('admin/error.html', status_code=status_code)
def authenticated(func):
"""
Execute target function if authenticated, redirect to login page otherwise
"""
def decorated(self, *args, **kwargs):
if not self.get_current_user():
self.redirect(urls.login)
else:
return func(self, *args, **kwargs)
return decorated
def unauthenticated(func):
"""
Execute target function if not authenticated, redirect to dashboard
otherwise
"""
def decorated(self, *args, **kwargs):
if self.get_current_user():
self.redirect(urls.dashboard)
else:
return func(self, *args, **kwargs)
return decorated
class LoginHandler(Handler):
"""
Login: /admin/login/
"""
@tornado.web.addslash
@tornado.gen.coroutine
@unauthenticated
def get(self):
"""
Render login page
"""
self.render('admin/login.html')
@tornado.gen.coroutine
@unauthenticated
def post(self):
"""
Process login form and authenticate user if credentials are valid,
redirect back to login page otherwise
"""
email = self.get_argument('email')
password = self.get_argument('password')
try:
user = User.get(User.email == email)
except User.DoesNotExist:
user = None
if user:
if user.active and user.superuser and \
user.check_password(password=password):
self.set_secure_cookie('user', user.email)
self.redirect(urls.dashboard)
else:
self.redirect(urls.login)
else:
self.redirect(urls.login)
class LogoutHandler(Handler):
"""
Logout: /admin/logout/
"""
@tornado.web.addslash
@tornado.gen.coroutine
@authenticated
def get(self):
"""
Clear user authentication and redirect to login page
"""
self.clear_cookie('user')
self.redirect(urls.login)
class DashboardHandler(Handler):
"""
Dashboard: /admin/
"""
@tornado.web.addslash
@tornado.gen.coroutine
@authenticated
def get(self):
"""
Render dashboard
"""
self.render('admin/dashboard.html')
class RecordsHandler(Handler):
"""
Records: /admin/records/
"""
def get_page_context(self):
"""
Return current page context
"""
try:
page = int(self.get_argument('page', 1))
except ValueError:
page = 1
try:
count = peewee.SelectQuery(Record).count()
except peewee.IntegrityError:
count = 0
page_count = int(count/env.ADMIN_ITEMS_PER_PAGE) + \
int(bool(count % env.ADMIN_ITEMS_PER_PAGE))
prev_page, page, next_page = self.paging(page, page_count)
try:
records = Record\
.select()\
.order_by(
Record.active.desc(),
Record.uts.desc())\
.paginate(page, paginate_by=env.ADMIN_ITEMS_PER_PAGE)
except peewee.IntegrityError:
records = []
return dict(records=records,
count=count,
page_count=page_count,
prev_page=prev_page,
page=page,
next_page=next_page)
def ajax_page(self, status):
"""
Return current page
"""
record_list = tornado.escape.to_basestring(
self.render_string(
'admin/partials/_record_list.html',
**self.get_page_context()))
self.write(dict(status=status, record_list=record_list))
def ajax_empty(self, status):
"""
Return empty response
"""
self.write(dict(status=status))
@tornado.web.addslash
@tornado.gen.coroutine
@authenticated
def get(self):
"""
Render records
"""
self.render(
'admin/records.html',
**self.get_page_context())
@tornado.gen.coroutine
@authenticated
def post(self):
"""
Create, update or delete a record
"""
create = self.get_argument('create', None)
update = self.get_argument('update', None)
delete = self.get_argument('delete', None)
uid = self.get_argument('uid', None)
active = self.get_argument('active', None)
active = True if active is not None else False
name = self.get_argument('name', None)
if name is not None:
name = name.strip()
if not name:
name = None
description = self.get_argument('description', None)
if create is not None and \
active is not None and \
name is not None:
try:
with db.pool.atomic():
created = Record.create(
active=active,
name=name,
description=description)
except peewee.IntegrityError:
created = None
if created:
self.ajax_page('create')
else:
self.ajax_empty('not_created')
elif update is not None and \
uid is not None and \
active is not None and \
name is not None:
try:
with db.pool.atomic():
updated = Record\
.update(
active=active,
name=name,
description=description)\
.where(Record.uid == uid)\
.execute()
except peewee.IntegrityError:
updated = None
if updated:
self.ajax_page('update')
else:
self.ajax_empty('not_updated')
elif delete is not None and \
uid is not None:
try:
with db.pool.atomic():
deleted = Record\
.delete()\
.where(Record.uid == uid)\
.execute()
except peewee.IntegrityError:
deleted = None
if deleted:
self.ajax_page('delete')
else:
self.ajax_empty('not_deleted')
else:
self.ajax_empty('not_command')
| embali/windseed | windseed/apps/admin/handlers.py | Python | mit | 7,638 |
##### 텐서 딥러닝 1장
import tensorflow as tf
hello = tf.constant('Hello, Tensorflow')
sess = tf.Session()
print(sess.run(hello))
# 'b'는 bytes literals라는 뜻이다.
node1 = tf.constant(3.0, tf.float32) # 숫자, 데이터타입
node2 = tf.constant(4.0) # 숫자, 데이터타입
node3 = tf.add(node1, node2) # 숫자, 데이터타입
# node3 = node1 + node2 # 이렇게도 사용가능
print(node1)
print(node2)
print(node3)
sess = tf.Session()
print('sess.run(node1, node2):', sess.run([node1, node2]))
print('sess.run(node3):', sess.run(node3))
# 그래프는 미리 만들어놓고 실행시키는 단계에서 값을 주고 싶을 때
# placeholder
a = tf.placeholder(tf.float32)
b = tf.placeholder(tf.float32)
adder_node = a + b
print(sess.run(adder_node, feed_dict={a:3, b:4.5}))
print(sess.run(adder_node, feed_dict={a:[1,3], b:[2,4]}))
# tendor는 array를 말한다.
# 어레이의 랭크
# 0:scalar // 1:vector // 2:matrix // n:n-tensor.....
# tensor의 shape
# .shape()해서 나오는 모양
# type
# int32 // float32
# 정리
# 그래프를 설계, 빌드!
# 그래프 실행(sess.run, 변수설정)
# 결과 반환
#### 텐서 딥러닝 4장 - 파일에서 데이터 읽어오기
import numpy as np
import tensorflow as tf
xy = np.loadtxt('C:\python\DeepLearningPythonStudy\DeepLearning\DeepLearning\\02_Deep_ChoTH\data\data-01-test-score.csv', delimiter=',', dtype=np.float32)
x_data = xy[:, 0:-1]
y_data = xy[:, [-1]]
print(x_data.shape, x_data, len(x_data))
print(y_data.shape, y_data)
# 참고
# b = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
# b[:, 1] # 전체 행의 1번 열 다 출력
# b[-1] # 마지막행
# b[-1, :] # 마지막 행 전체 출력
# b[0:2, :] # 1,2번 행의 전체 열
# 몇차원 어레이냐? -> 랭크, rank
# 어떤 모양의 어레이냐? -> 셰입, shape
# 축, axis
sess = tf.InteractiveSession()
t = tf.constant([1,2,3,4])
tf.shape(t).eval()
t = tf.constant([[1,2],
[3,4]])
tf.shape(t).eval()
t = tf.constant([[[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]],
[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]])
tf.shape(t).eval()
m1 = tf.constant([[1.,2.]])
m2 = tf.constant(3.)
tf.shape(m1+m2).eval()
tf.reduce_mean([1.,2.], axis=0).eval() # integer이면 안된다. float!!!
x = [[1.,2.],
[3.,4.]]
tf.reduce_mean(x).eval()
tf.reduce_mean(x, axis=1).eval()
tf.reduce_mean(x, axis=0).eval() # 가장 바깥쪽의 축이 0이 된다.
tf.reduce_mean(x, axis=-1).eval() # 가장 안쪽의 축이 -1이 된다.
tf.reduce_sum(x).eval()
tf.reduce_sum(x, 1).eval()
tf.reduce_sum(x, 0).eval()
tf.reduce_sum(x, -1).eval() # 가장 안쪽
x = [[0,1,2],
[2,1,0]]
tf.argmax(x).eval() # 가장 큰 수의 인덱스를 반환하는 함수, 축을 적지 않으면 0으로 간주
tf.argmax(x, 1).eval()
tf.argmax(x, 0).eval()
tf.argmax(x, -1).eval()
t = np.array([[[0, 1, 2],
[3, 4, 5]],
[[6, 7, 8],
[9, 10, 11]]])
t.shape
tf.reshape(t, shape=[-1,3]).eval() # 안쪽은 3, 나머지는 알아서 해(-1), 2차원으로
tf.reshape(t, shape=[-1,1,3]).eval() # 안쪽은 3, 그다음은 1, 나머지는 알아서(-1), 2차원으로
tf.squeeze([[0], [1], [2]]).eval() # 차원축소
tf.expand_dims([0,1,2], 1).eval() # 차원추가
# one hot
tf.one_hot([[0], [1], [2], [0]], depth=3).eval() # 랭크가 자동으로 추가
t = tf.one_hot([[0], [1], [2], [0]], depth=3) # 랭크가 자동적으로 추가되는 것을 막기 위해 reshape
tf.reshape(t, shape=[-1, 3]).eval()
tf.cast([1.8, 2.2, 3.3, 4.9], tf.int32).eval()
tf.cast([True, False, 1 == 1, 0 == 1], tf.int32).eval()
x = [1, 4]
y = [2, 5]
z = [3, 6]
# Pack along first dim.
tf.stack([x, y, z]).eval()
tf.stack([x, y, z], axis=0).eval()
tf.stack([x, y, z], axis=1).eval()
x = [[0, 1, 2],
[2, 1, 0]]
tf.ones_like(x).eval()
tf.zeros_like(x).eval()
for x, y in zip([1,2,3], [4,5,6]):
print(x, y)
for x, y, z in zip([1,2,3], [4,5,6], [7,8,9]):
print(x, y, z)
# K = tf.sigmoid(tf.matmul(X, W1) + b1)
# hypothesis = tf.sigmoid(tf.matmul(K, W2) + b2)
# ML lab 09-1:Neural Net for XOR
# XOR 신경망 코드
import numpy as np
import tensorflow as tf
x_data = np.array([[0,0], [0,1], [1,0], [1,1]], dtype=np.float32)
y_data = np.array([[0], [1], [1], [0]], dtype=np.float32)
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
W = tf.Variable(tf.random_normal([2,1]), name='weight')
b = tf.Variable(tf.random_normal([1]), name='bias')
# 데이터가 적어서 softmax 함수 생략
hypothesis = tf.sigmoid(tf.matmul(X,W) + b)
cost = -tf.reduce_mean(Y*tf.log(hypothesis) + (1-Y) * tf.log(1-hypothesis)) # 손실함수 구하기
train = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(cost) # 경사감소법으로 손실함수 줄여나가기
# Accuracy computation
# True is hypothesis>0.5 else False
predicted = tf.cast(hypothesis>0.5, dtype=tf.float32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y), dtype=tf.float32))
# Lounch graph
sess = tf.Session()
# Initioalize Tensorflow variables
sess.run(tf.global_variables_initializer())
for step in range(1001):
sess.run(train, feed_dict={X:x_data, Y:y_data})
if step%100 == 0:
print(step, sess.run(cost, feed_dict={X:x_data, Y:y_data}))
# Accuracy report
h, c, a = sess.run([hypothesis, predicted, accuracy], feed_dict={X:x_data, Y:y_data})
print("\nHypothesis:", h, "\nCorrect:", c, "\nAccuracy:", a)
# 오류는 없지만 손실함수가 감소하지 않는다. 지나치게 단순해서, 1층!~
# accuracy : [0.50208956]
# 위의 망과 비슷한 2층 신경망
import numpy as np
import tensorflow as tf
x_data = np.array([[0,0], [0,1], [1,0], [1,1]], dtype=np.float32)
y_data = np.array([[0], [1], [1], [0]], dtype=np.float32)
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
W1 = tf.Variable(tf.random_normal([2,2]), name='weight1') # 앞의 2는 데이터수, 뒤의 2는 노드수(출력값의 개수)
b1 = tf.Variable(tf.random_normal([2]), name='bias1') # 바이어스는 출력값의 개수와 맞춰줘야 한다.
layer1 = tf.sigmoid(tf.matmul(X,W1) + b1)
# layer1 = tf.nn.relu(tf.matmul(X,W1) + b1)
W2 = tf.Variable(tf.random_normal([2,1]), name='weight2')
b2 = tf.Variable(tf.random_normal([1]), name='bias2')
hypothesis = tf.sigmoid(tf.matmul(layer1,W2) + b2)
cost = -tf.reduce_mean(Y*tf.log(hypothesis) + (1-Y) * tf.log(1-hypothesis)) # 손실함수 구하기
train = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(cost) # 경사감소법으로 손실함수 줄여나가기
# Accuracy computation
# True is hypothesis>0.5 else False
predicted = tf.cast(hypothesis>0.5, dtype=tf.float32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y), dtype=tf.float32))
# Lounch graph
sess = tf.Session()
# Initioalize Tensorflow variables
sess.run(tf.global_variables_initializer())
for step in range(1001):
sess.run(train, feed_dict={X:x_data, Y:y_data})
if step%100 == 0:
print(step, sess.run(cost, feed_dict={X:x_data, Y:y_data}))
# Accuracy report
h, c, a = sess.run([hypothesis, predicted, accuracy], feed_dict={X:x_data, Y:y_data})
print("\nHypothesis:", h, "\nCorrect:", c, "\nAccuracy:", a)
# Accuracy: 0.75
# 층이 많다고 무조건 정확도가 올라가는 것이 아니다.
# 왜냐하면 오차역전파를 하면서 시그모이드에 의해 항상 1보다 작은 숫자가 계속 곱해지면서 최종적인 값이 점점 작아지게 된다.
# 뒤로 갈 수록, 즉 입력값에 가까울 수록 영향력이 작아지면서 기울기가 사라지게 된다. vanishing gradient
# 그래서 렐루를 사용한다. 마지막만 시그모이드를 사용한다. 0~1 사이의 값을 가져야하기 때문에
# 초기값을 줄 때 유의사항
# 1. 0을 주면 안된다.
# 2. RBM은 어려우니 싸비에르, He
# W = np.random.randn(fan_in, fan_out/np.sqrt(fan_in)) # 싸비에르
# W = np.random.randn(fan_in, fan_out/np.sqrt(fan_in/2)) # He
# CNN 제외하고 xavier, relu, dropout, adam 사용
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
nb_classes = 10
keep_prob = tf.placeholder(tf.float32)
X = tf.placeholder(tf.float32, [None, 784])
Y = tf.placeholder(tf.float32, [None, nb_classes])
###################################################
W1 = tf.get_variable("W1", shape=[784, 256], initializer=tf.contrib.layers.xavier_initializer())
b1 = tf.Variable(tf.random_normal([256]))
layer1 = tf.nn.relu(tf.matmul(X, W1) + b1)
layer1 = tf.nn.dropout(layer1, keep_prob=keep_prob)
W2 = tf.get_variable("W2", shape=[256, 128], initializer=tf.contrib.layers.xavier_initializer())
b2 = tf.Variable(tf.random_normal([128]))
layer2 = tf.nn.relu(tf.matmul(layer1, W2) + b2)
layer2 = tf.nn.dropout(layer2, keep_prob=keep_prob)
W3 = tf.get_variable("W3", shape=[128, nb_classes], initializer=tf.contrib.layers.xavier_initializer())
b3 = tf.Variable(tf.random_normal([nb_classes]))
hypothesis = tf.matmul(layer2, W3) + b3
###################################################
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=hypothesis, labels=Y))
optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)
# Test model
is_correct = tf.equal(tf.arg_max(hypothesis, 1), tf.arg_max(Y, 1))
accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))
training_epochs = 15
batch_size = 100
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for epoch in range(training_epochs):
avg_cost = 0
total_batch = int(mnist.train.num_examples / batch_size) # 1에폭 도는데 필요한 횟수
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
feed_dict = {X: batch_xs, Y: batch_ys, keep_prob:0.7}
c, _ = sess.run([cost, optimizer], feed_dict=feed_dict)
avg_cost += c / total_batch
print('Epoch:', '%04d' % (epoch +1), 'cost=', '{:.9f}'.format(avg_cost))
print("Accuracy:", accuracy.eval(session=sess, feed_dict={X:mnist.test.images, Y:mnist.test.labels, keep_prob:1}))
#### CNN 실습
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
sess = tf.InteractiveSession()
image = np.array([[[[1],[2],[3]],
[[4],[5],[6]],
[[7],[8],[9]]]], dtype=np.float32)
print(image.shape)
plt.imshow(image.reshape(3,3), cmap='Greys')
plt.show()
##########################
### 2층 CNN 진짜 실습(mnist)
##########################
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
import random
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
learning_rate = 0.001
training_epochs = 15
batch_size = 100
nb_classes = 10
X = tf.placeholder(tf.float32, [None, 784])
X_img = tf.reshape(X, [-1, 28, 28, 1]) # img 28x28x1 (black/white)
Y = tf.placeholder(tf.float32, [None, nb_classes])
#L1 ImgIn shape=(?, 28, 28, 1)
W1 = tf.Variable(tf.random_normal([3,3,1,32], stddev=0.01)) # 필터의 크기, 색깔, 필터의 개수
# W1 = tf.get_variable("W1", shape=[3,3,1,32], initializer=tf.contrib.layers.xavier_initializer())???
# Conv통과 후 -> (?, 28, 28, 32)
# Pool통과 후 -> (?, 14, 14, 32)
L1 = tf.nn.conv2d(X_img, W1, strides=[1,1,1,1], padding='SAME')
print(L1)
L1 = tf.nn.relu(L1)
L1 = tf.nn.max_pool(L1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') # pooling 스트라이드 2
'''
Tensor("Conv2D:0", shape=(?, 28, 28, 32), dtype=float32)
Tensor("Relu:0", shape=(?, 28, 28, 32), dtype=float32)
Tensor("MaxPool:0", shape=(?, 14, 14, 32), dtype=float32) # 1층에서 출력값!!!!!의 형태
'''
# L2 ImgIn shape=(?, 14, 14, 32)
W2 = tf.Variable(tf.random_normal([3, 3, 32, 64], stddev=0.01)) # 필터의 크기, 필터의 두께(L1의 출력값이랑 맞춘다.32), 필터의 개수(이미지 64개가 만들어짐)
# Conv ->(?, 14, 14, 64)
# Pool ->(?, 7, 7, 64)
L2 = tf.nn.conv2d(L1, W2, strides=[1, 1, 1, 1], padding='SAME')
L2 = tf.nn.relu(L2)
L2 = tf.nn.max_pool(L2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
L2_flat = tf.reshape(L2, [-1, 7*7*64]) # 다시 1차원으로 죽 펴준다.
'''
Tensor("Conv2D_1:0", shape=(?, 14, 14, 64), dtype=float32)
Tensor("Relu_1:0", shape=(?, 14, 14, 64), dtype=float32)
Tensor("MaxPool_1:0", shape=(?, 7, 7, 64), dtype=float32)
Tensor("Reshape_1:0", shape=(?, 3136), dtype=float32)
'''
L2 = tf.reshape(L2, [-1,7*7*64]) # 위에꺼 출력해보고 적는다.
W3 = tf.get_variable("W3", shape=[7*7*64, 10], initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.random_normal([10]))
hypothesis = tf.matmul(L2, W3) + b
# define cost/Loss & optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=hypothesis, labels=Y))
optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# train my model
print('Learning started. It takes sometime.')
for epoch in range(training_epochs):
avg_cost = 0
total_batch = int(mnist.train.num_examples / batch_size)
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
feed_dict = {X: batch_xs, Y: batch_ys}
c, _ = sess.run([cost, optimizer], feed_dict=feed_dict)
avg_cost += c / total_batch
print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(avg_cost))
print('Learning Finished!')
# Test model and check accuracy
correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print('Accuracy:', sess.run(accuracy, feed_dict={
X: mnist.test.images, Y: mnist.test.labels}))
# Get one and predict
r = random.randint(0, mnist.test.num_examples - 1)
print("Label: ", sess.run(tf.argmax(mnist.test.labels[r:r + 1], 1)))
print("Prediction: ", sess.run(
tf.argmax(hypothesis, 1), feed_dict={X: mnist.test.images[r:r + 1]}))
##########################
### 3층 CNN 진짜 실습(mnist)
##########################
##########################
##########################
##########################
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
import random
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
learning_rate = 0.001
training_epochs = 15
batch_size = 100
keep_prob = tf.placeholder(tf.float32)
nb_classes = 10
X = tf.placeholder(tf.float32, [None, 784])
X_img = tf.reshape(X, [-1, 28, 28, 1]) # img 28x28x1 (black/white)
Y = tf.placeholder(tf.float32, [None, nb_classes])
#L1 ImgIn shape=(?, 28, 28, 1)
W1 = tf.Variable(tf.random_normal([3,3,1,32], stddev=0.01)) # 필터의 크기, 색깔, 필터의 개수
# W1 = tf.get_variable("W1", shape=[3,3,1,32], initializer=tf.contrib.layers.xavier_initializer())???
# Conv통과 후 -> (?, 28, 28, 32)
# Pool통과 후 -> (?, 14, 14, 32)
L1 = tf.nn.conv2d(X_img, W1, strides=[1,1,1,1], padding='SAME')
# print(L1)
L1 = tf.nn.relu(L1)
L1 = tf.nn.max_pool(L1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') # pooling 스트라이드 2
L1 = tf.nn.dropout(L1, keep_prob=keep_prob)
'''
Tensor("Conv2D:0", shape=(?, 28, 28, 32), dtype=float32)
Tensor("Relu:0", shape=(?, 28, 28, 32), dtype=float32)
Tensor("MaxPool:0", shape=(?, 14, 14, 32), dtype=float32) # 1층에서 출력값!!!!!의 형태
'''
# L2 ImgIn shape=(?, 14, 14, 32)
W2 = tf.Variable(tf.random_normal([3, 3, 32, 64], stddev=0.01)) # 필터의 크기, 필터의 두께(L1의 출력값이랑 맞춘다.32), 필터의 개수(이미지 64개가 만들어짐)
# Conv ->(?, 14, 14, 64)
# Pool ->(?, 7, 7, 64)
L2 = tf.nn.conv2d(L1, W2, strides=[1, 1, 1, 1], padding='SAME')
L2 = tf.nn.relu(L2)
L2 = tf.nn.max_pool(L2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
L2 = tf.nn.dropout(L2, keep_prob=keep_prob)
'''
Tensor("Conv2D_1:0", shape=(?, 14, 14, 64), dtype=float32)
Tensor("Relu_1:0", shape=(?, 14, 14, 64), dtype=float32)
Tensor("MaxPool_1:0", shape=(?, 7, 7, 64), dtype=float32)
Tensor("Reshape_1:0", shape=(?, 3136), dtype=float32)
'''
W3 = tf.Variable(tf.random_normal([3,3,64,128], stddev=0.01))
L3 = tf.nn.conv2d(L2, W3, strides=[1,1,1,1], padding='SAME')
L3 = tf.nn.relu(L3)
L3 = tf.nn.max_pool(L3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
L3 = tf.nn.dropout(L3, keep_prob=keep_prob)
L3 = tf.reshape(L3, [-1, 128*4*4])
'''
Tensor("Conv2D_2:0", shape=(?, 7, 7, 128), dtype=float32)
Tensor("Relu_2:0", shape=(?, 7, 7, 128), dtype=float32)
Tensor("MaxPool_2:0", shape=(?, 4, 4, 128), dtype=float32)
Tensor("dropout_2/mul:0", shape=(?, 4, 4, 128), dtype=float32)
Tensor("Reshape_1:0", shape=(?, 2048), dtype=float32)
'''
W4 = tf.get_variable("W4", shape=[128*4*4, 625], initializer=tf.contrib.layers.xavier_initializer())
b4 = tf.Variable(tf.random_normal([625]))
L4 = tf.nn.relu(tf.matmul(L3, W4) + b4)
L4 = tf.nn.dropout(L4, keep_prob=keep_prob)
'''
Tensor("Relu_3:0", shape=(?, 625), dtype=float32)
Tensor("dropout_3/mul:0", shape=(?, 625), dtype=float32)
'''
# L5 final fc 625 inputs -> 10 outputs
W5 = tf.get_variable("W5", shape=[625, 10], initializer=tf.contrib.layers.xavier_initializer())
b5 = tf.Variable(tf.random_normal([10]))
logits = tf.matmul(L4, W5) + 5
'''
Tensor("add_1:0", shape=(?, 10), dtype=float32)
'''
# define cost/Loss & optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# initialize
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# train my model
print('Learning started. It takes sometime.')
for epoch in range(training_epochs):
avg_cost = 0
total_batch = int(mnist.train.num_examples / batch_size)
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
feed_dict = {X:batch_xs, Y:batch_ys, keep_prob:0.7}
c, _ = sess.run([cost, optimizer], feed_dict=feed_dict)
avg_cost += c / total_batch
print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(avg_cost))
print('Learning Finished!')
# Test model and check accuracy
correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print('Accuracy:', sess.run(accuracy, feed_dict={
X:mnist.test.images, Y:mnist.test.labels, keep_prob:1}))
# Get one and predict
r = random.randint(0, mnist.test.num_examples - 1)
print("Label: ", sess.run(tf.argmax(mnist.test.labels[r:r + 1], 1)))
print("Prediction: ", sess.run(
tf.argmax(hypothesis, 1), feed_dict={X: mnist.test.images[r:r + 1]})) | ghost9023/DeepLearningPythonStudy | DeepLearning/DeepLearning/02_Deep_ChoTH/tensorflow_prac.py | Python | mit | 18,788 |
import numpy as np
import matplotlib.pyplot as plt
from scipy.constants import c, e, m_p
import matplotlib.pyplot as plt
plt.switch_backend('TkAgg')
plt.ion()
from PyCOBRA_accelerator import TwissMap
from PyCOBRA_beam import Bunch, gaussian_generator
R = 6911./(2*np.pi)
energy = 26e9
gamma = np.sqrt(1 + (e*energy/(m_p*c**2))**2)
beta = np.sqrt(1-gamma**-2)
gamma_tr = 18
eta = gamma_tr**-2 - gamma**-2
Q_x = 20.13
Q_y = 20.18
Q_s = 0.017
beta_x = R/Q_x
beta_y = R/Q_y
beta_z = eta*R/Q_s
twiss_x = TwissMap('x', 0, beta_x, 0, beta_x, Q_x)
twiss_y = TwissMap('y', 0, beta_y, 0, beta_y, Q_y)
twiss_z = TwissMap('z', 0, beta_z, 0, beta_z, Q_s)
n_macroparticles = 2000
intensity = 1e11
bunch = Bunch(n_macroparticles,
intensity/n_macroparticles, e, m_p, gamma,
gaussian_generator(2e-6/(beta*gamma), ('x', 'xp'), 0, beta_x),
gaussian_generator(2e-6/(beta*gamma), ('y', 'yp'), 0, beta_y),
gaussian_generator(2.00, ('z', 'dp'), 0, beta_z))
fig, (ax1, ax2, ax3) = plt.subplots(3, figsize=(12, 14), tight_layout=True)
one_turn_map = [twiss_x, twiss_y, twiss_z]
for k in range(60):
for m in one_turn_map:
m.kick(bunch)
ax1.scatter(bunch.x, bunch.xp, c='g', lw=0.1)
ax2.scatter(bunch.y, bunch.yp, c='g', lw=0.1)
ax3.scatter(bunch.z, bunch.dp, c='g', lw=0.1)
ax1.plot(np.mean(bunch.x), np.mean(bunch.xp), 'ro')
ax2.plot(np.mean(bunch.y), np.mean(bunch.yp), 'ro')
ax3.plot(np.mean(bunch.z), np.mean(bunch.dp), 'ro')
[ax.set_xlim(-2e-2, 2e-2) for ax in [ax1, ax2]]
[ax.set_ylim(-2e-4, 2e-4) for ax in [ax1, ax2]]
ax3.set_xlim(-100, 100)
ax3.set_ylim(-1, 1)
plt.draw()
[ax.cla() for ax in [ax1, ax2, ax3]]
plt.close()
| like2000/PyCOBRA | PyCOBRA_run.py | Python | mit | 1,736 |
import numpy as np
import pytoolkit as tk
def test_print_classification_multi():
y_true = np.array([0, 1, 1, 1, 2])
prob_pred = np.array(
[
[0.75, 0.00, 0.25],
[0.25, 0.75, 0.00],
[0.25, 0.75, 0.00],
[0.25, 0.00, 0.75],
[0.25, 0.75, 0.00],
]
)
tk.evaluations.print_classification(y_true, prob_pred)
def test_print_classification_binary():
y_true = np.array([0, 1, 1, 0])
prob_pred = np.array([0.25, 0.25, 0.75, 0.25])
tk.evaluations.print_classification(y_true, prob_pred)
def test_print_classification_binary_multi():
y_true = np.array([0, 1, 1, 0])
prob_pred = np.array([[0.25, 0.75], [0.25, 0.75], [0.75, 0.25], [0.25, 0.75]])
tk.evaluations.print_classification(y_true, prob_pred)
| ak110/pytoolkit | pytoolkit/evaluations/classification_test.py | Python | mit | 808 |
from readSVCsig import readSVCdata
from readSVCsig import readSVCheader
from readSVCsig import readSVCsig
| pavdpr/svcread | python/__init__.py | Python | mit | 106 |
# mako/cmd.py
# Copyright 2006-2021 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from argparse import ArgumentParser
from os.path import dirname
from os.path import isfile
import sys
from mako import exceptions
from mako.lookup import TemplateLookup
from mako.template import Template
def varsplit(var):
if "=" not in var:
return (var, "")
return var.split("=", 1)
def _exit():
sys.stderr.write(exceptions.text_error_template().render())
sys.exit(1)
def cmdline(argv=None):
parser = ArgumentParser()
parser.add_argument(
"--var",
default=[],
action="append",
help="variable (can be used multiple times, use name=value)",
)
parser.add_argument(
"--template-dir",
default=[],
action="append",
help="Directory to use for template lookup (multiple "
"directories may be provided). If not given then if the "
"template is read from stdin, the value defaults to be "
"the current directory, otherwise it defaults to be the "
"parent directory of the file provided.",
)
parser.add_argument(
"--output-encoding", default=None, help="force output encoding"
)
parser.add_argument(
"--output-file",
default=None,
help="Write to file upon successful render instead of stdout",
)
parser.add_argument("input", nargs="?", default="-")
options = parser.parse_args(argv)
output_encoding = options.output_encoding
output_file = options.output_file
if options.input == "-":
lookup_dirs = options.template_dir or ["."]
lookup = TemplateLookup(lookup_dirs)
try:
template = Template(
sys.stdin.read(),
lookup=lookup,
output_encoding=output_encoding,
)
except:
_exit()
else:
filename = options.input
if not isfile(filename):
raise SystemExit("error: can't find %s" % filename)
lookup_dirs = options.template_dir or [dirname(filename)]
lookup = TemplateLookup(lookup_dirs)
try:
template = Template(
filename=filename,
lookup=lookup,
output_encoding=output_encoding,
)
except:
_exit()
kw = dict(varsplit(var) for var in options.var)
try:
rendered = template.render(**kw)
except:
_exit()
else:
if output_file:
open(output_file, "wt", encoding=output_encoding).write(rendered)
else:
sys.stdout.write(rendered)
if __name__ == "__main__":
cmdline()
| sqlalchemy/mako | mako/cmd.py | Python | mit | 2,814 |
#!/usr/bin/env python
# A python script to take targets from a google spreadsheet and run a
# Nessus vulnerability scan.
import json
import gspread
from oauth2client.service_account import ServiceAccountCredentials
from nessrest import ness6rest
import getpass
# Login with your Google account's API key
scopes = ['https://spreadsheets.google.com/feeds']
credentials = ServiceAccountCredentials.from_json_keyfile_name('API-xxxxxxxxxxxx.json', scopes)
gc = gspread.authorize(credentials)
# Open worksheet from spreadsheet
wks = gc.open("hosts").sheet1
# Get all values from the first column
host_list = wks.col_values(1)
temp_hosts = []
for i in host_list:
# ignore the first entry as it's just header information
# del host_list[0]
if i and i != 'IP':
# iterate through all rows and add to a temp array
temp_hosts.append(i)
print(temp_hosts)
# scan
# Scan Settings
# nessus_url = "https://nessus.example.com:8834"
nessus_url = "https://192.168.111.10:8834"
scan_policy = "Basic Network Scan"
scan_name = "My Scan"
# Scanner Credentials
user = getpass._raw_input('User: ')
password = getpass.getpass()
# login = "username"
# password = "password"
scan = ness6rest.Scanner(url=nessus_url, login=user,
password=password, insecure=True)
# Set scan policy that should be used
scan.policy_set(name=scan_policy)
# alt_targets on edit can take an array otherwise a new scan expects a string
hosts = ','.join(temp_hosts)
# Set target and scan name
scan.scan_add(targets=hosts, name=scan_name)
# scan.scan_exists(targets=hosts, name=scan_name)
# Run Scan
scan.scan_run()
# Download results
# scan.action(action="scans", method="get")
# for s in scan.res['scans']:
# scan.scan_name = s['name']
# scan.scan_id = s['id']
# xml_nessus = scan.download_scan(export_format='nessus')
# fp = open('%s_%s.nessus'%(scan.scan_name,scan.scan_id),"w")
# fp.write(xml_nessus)
# fp.close()
| mokuso/scan-gspread-targets | scan-gspread-targets.py | Python | mit | 1,952 |
"""
Django-environ allows you to utilize 12factor inspired environment
variables to configure your Django application.
"""
import json
import logging
import os
import re
import sys
import warnings
from django.core.exceptions import ImproperlyConfigured
from six.moves import urllib_parse as urlparse
from six import string_types
logger = logging.getLogger(__file__)
__author__ = 'joke2k'
__version__ = (0, 4, 0)
# return int if possible
def _cast_int(v):
return int(v) if hasattr(v, 'isdigit') and v.isdigit() else v
class NoValue(object):
def __repr__(self):
return '<{0}>'.format(self.__class__.__name__)
class Env(object):
"""Provide scheme-based lookups of environment variables so that each
caller doesn't have to pass in `cast` and `default` parameters.
Usage:::
env = Env(MAIL_ENABLED=bool, SMTP_LOGIN=(str, 'DEFAULT'))
if env('MAIL_ENABLED'):
...
"""
NOTSET = NoValue()
BOOLEAN_TRUE_STRINGS = ('true', 'on', 'ok', 'y', 'yes', '1')
URL_CLASS = urlparse.ParseResult
DEFAULT_DATABASE_ENV = 'DATABASE_URL'
DB_SCHEMES = {
'postgres': 'django.db.backends.postgresql_psycopg2',
'postgresql': 'django.db.backends.postgresql_psycopg2',
'psql': 'django.db.backends.postgresql_psycopg2',
'pgsql': 'django.db.backends.postgresql_psycopg2',
'postgis': 'django.contrib.gis.db.backends.postgis',
'mysql': 'django.db.backends.mysql',
'mysql2': 'django.db.backends.mysql',
'mysqlgis': 'django.contrib.gis.db.backends.mysql',
'spatialite': 'django.contrib.gis.db.backends.spatialite',
'sqlite': 'django.db.backends.sqlite3',
'ldap': 'ldapdb.backends.ldap',
}
_DB_BASE_OPTIONS = ['CONN_MAX_AGE', 'ATOMIC_REQUESTS', 'AUTOCOMMIT']
DEFAULT_CACHE_ENV = 'CACHE_URL'
CACHE_SCHEMES = {
'dbcache': 'django.core.cache.backends.db.DatabaseCache',
'dummycache': 'django.core.cache.backends.dummy.DummyCache',
'filecache': 'django.core.cache.backends.filebased.FileBasedCache',
'locmemcache': 'django.core.cache.backends.locmem.LocMemCache',
'memcache': 'django.core.cache.backends.memcached.MemcachedCache',
'pymemcache': 'django.core.cache.backends.memcached.PyLibMCCache',
'rediscache': 'django_redis.cache.RedisCache',
'redis': 'django_redis.cache.RedisCache',
}
_CACHE_BASE_OPTIONS = ['TIMEOUT', 'KEY_PREFIX', 'VERSION', 'KEY_FUNCTION']
DEFAULT_EMAIL_ENV = 'EMAIL_URL'
EMAIL_SCHEMES = {
'smtp': 'django.core.mail.backends.smtp.EmailBackend',
'smtps': 'django.core.mail.backends.smtp.EmailBackend',
'smtp+tls': 'django.core.mail.backends.smtp.EmailBackend',
'smtp+ssl': 'django.core.mail.backends.smtp.EmailBackend',
'consolemail': 'django.core.mail.backends.console.EmailBackend',
'filemail': 'django.core.mail.backends.filebased.EmailBackend',
'memorymail': 'django.core.mail.backends.locmem.EmailBackend',
'dummymail': 'django.core.mail.backends.dummy.EmailBackend'
}
_EMAIL_BASE_OPTIONS = ['EMAIL_USE_TLS', 'EMAIL_USE_SSL']
DEFAULT_SEARCH_ENV = 'SEARCH_URL'
SEARCH_SCHEMES = {
"elasticsearch": "haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine",
"solr": "haystack.backends.solr_backend.SolrEngine",
"whoosh": "haystack.backends.whoosh_backend.WhooshEngine",
"simple": "haystack.backends.simple_backend.SimpleEngine",
}
def __init__(self, **scheme):
self.scheme = scheme
def __call__(self, var, cast=None, default=NOTSET, parse_default=False):
return self.get_value(var, cast=cast, default=default, parse_default=parse_default)
# Shortcuts
def str(self, var, default=NOTSET):
"""
:rtype: str
"""
return self.get_value(var, default=default)
def unicode(self, var, default=NOTSET):
"""Helper for python2
:rtype: unicode
"""
return self.get_value(var, cast=str, default=default)
def bool(self, var, default=NOTSET):
"""
:rtype: bool
"""
return self.get_value(var, cast=bool, default=default)
def int(self, var, default=NOTSET):
"""
:rtype: int
"""
return self.get_value(var, cast=int, default=default)
def float(self, var, default=NOTSET):
"""
:rtype: float
"""
return self.get_value(var, cast=float, default=default)
def json(self, var, default=NOTSET):
"""
:returns: Json parsed
"""
return self.get_value(var, cast=json.loads, default=default)
def list(self, var, cast=None, default=NOTSET):
"""
:rtype: list
"""
return self.get_value(var, cast=list if not cast else [cast], default=default)
def dict(self, var, cast=dict, default=NOTSET):
"""
:rtype: dict
"""
return self.get_value(var, cast=cast, default=default)
def url(self, var, default=NOTSET):
"""
:rtype: urlparse.ParseResult
"""
return self.get_value(var, cast=urlparse.urlparse, default=default, parse_default=True)
def db_url(self, var=DEFAULT_DATABASE_ENV, default=NOTSET, engine=None):
"""Returns a config dictionary, defaulting to DATABASE_URL.
:rtype: dict
"""
return self.db_url_config(self.get_value(var, default=default), engine=engine)
db = db_url
def cache_url(self, var=DEFAULT_CACHE_ENV, default=NOTSET, backend=None):
"""Returns a config dictionary, defaulting to CACHE_URL.
:rtype: dict
"""
return self.cache_url_config(self.url(var, default=default), backend=backend)
cache = cache_url
def email_url(self, var=DEFAULT_EMAIL_ENV, default=NOTSET, backend=None):
"""Returns a config dictionary, defaulting to EMAIL_URL.
:rtype: dict
"""
return self.email_url_config(self.url(var, default=default), backend=backend)
email = email_url
def search_url(self, var=DEFAULT_SEARCH_ENV, default=NOTSET, engine=None):
"""Returns a config dictionary, defaulting to SEARCH_URL.
:rtype: dict
"""
return self.search_url_config(self.url(var, default=default), engine=engine)
def path(self, var, default=NOTSET, **kwargs):
"""
:rtype: Path
"""
return Path(self.get_value(var, default=default), **kwargs)
def get_value(self, var, cast=None, default=NOTSET, parse_default=False):
"""Return value for given environment variable.
:param var: Name of variable.
:param cast: Type to cast return value as.
:param default: If var not present in environ, return this instead.
:param parse_default: force to parse default..
:returns: Value from environment or default (if set)
"""
logger.debug("get '{0}' casted as '{1}' with default '{2}'".format(var, cast, default))
if var in self.scheme:
var_info = self.scheme[var]
try:
has_default = len(var_info) == 2
except TypeError:
has_default = False
if has_default:
if not cast:
cast = var_info[0]
if default is self.NOTSET:
try:
default = var_info[1]
except IndexError:
pass
else:
if not cast:
cast = var_info
try:
value = os.environ[var]
except KeyError:
if default is self.NOTSET:
error_msg = "Set the {0} environment variable".format(var)
raise ImproperlyConfigured(error_msg)
value = default
# Resolve any proxied values
if hasattr(value, 'startswith') and value.startswith('$'):
value = value.lstrip('$')
value = self.get_value(value, cast=cast, default=default)
if value != default or parse_default:
value = self.parse_value(value, cast)
return value
# Class and static methods
@classmethod
def parse_value(cls, value, cast):
"""Parse and cast provided value
:param value: Stringed value.
:param cast: Type to cast return value as.
:returns: Casted value
"""
if cast is None:
return value
elif cast is bool:
try:
value = int(value) != 0
except ValueError:
value = value.lower() in cls.BOOLEAN_TRUE_STRINGS
elif isinstance(cast, list):
value = list(map(cast[0], [x for x in value.split(',') if x]))
elif isinstance(cast, dict):
key_cast = cast.get('key', str)
value_cast = cast.get('value', str)
value_cast_by_key = cast.get('cast', dict())
value = dict(map(
lambda kv: (key_cast(kv[0]), cls.parse_value(kv[1], value_cast_by_key.get(kv[0], value_cast))),
[val.split('=') for val in value.split(';') if val]
))
elif cast is dict:
value = dict([val.split('=') for val in value.split(',') if val])
elif cast is list:
value = [x for x in value.split(',') if x]
elif cast is float:
# clean string
float_str = re.sub(r'[^\d,\.]', '', value)
# split for avoid thousand separator and different locale comma/dot symbol
parts = re.split(r'[,\.]', float_str)
if len(parts) == 1:
float_str = parts[0]
else:
float_str = "{0}.{1}".format(''.join(parts[0:-1]), parts[-1])
value = float(float_str)
else:
value = cast(value)
return value
@classmethod
def db_url_config(cls, url, engine=None):
"""Pulled from DJ-Database-URL, parse an arbitrary Database URL.
Support currently exists for PostgreSQL, PostGIS, MySQL and SQLite.
SQLite connects to file based databases. The same URL format is used, omitting the hostname,
and using the "file" portion as the filename of the database.
This has the effect of four slashes being present for an absolute file path:
>>> from environ import Env
>>> Env.db_url_config('sqlite:////full/path/to/your/file.sqlite')
{'ENGINE': 'django.db.backends.sqlite3', 'HOST': None, 'NAME': '/full/path/to/your/file.sqlite', 'PASSWORD': None, 'PORT': None, 'USER': None}
>>> Env.db_url_config('postgres://uf07k1i6d8ia0v:wegauwhgeuioweg@ec2-107-21-253-135.compute-1.amazonaws.com:5431/d8r82722r2kuvn')
{'ENGINE': 'django.db.backends.postgresql_psycopg2', 'HOST': 'ec2-107-21-253-135.compute-1.amazonaws.com', 'NAME': 'd8r82722r2kuvn', 'PASSWORD': 'wegauwhgeuioweg', 'PORT': 5431, 'USER': 'uf07k1i6d8ia0v'}
"""
if not isinstance(url, cls.URL_CLASS):
if url == 'sqlite://:memory:':
# this is a special case, because if we pass this URL into
# urlparse, urlparse will choke trying to interpret "memory"
# as a port number
return {
'ENGINE': cls.DB_SCHEMES['sqlite'],
'NAME': ':memory:'
}
# note: no other settings are required for sqlite
url = urlparse.urlparse(url)
config = {}
# Remove query strings.
path = url.path[1:]
path = path.split('?', 2)[0]
# if we are using sqlite and we have no path, then assume we
# want an in-memory database (this is the behaviour of sqlalchemy)
if url.scheme == 'sqlite' and path == '':
path = ':memory:'
if url.scheme == 'ldap':
path = '{scheme}://{hostname}'.format(scheme=url.scheme, hostname=url.hostname)
if url.port:
path += ':{port}'.format(port=url.port)
# Update with environment configuration.
config.update({
'NAME': path,
'USER': url.username,
'PASSWORD': url.password,
'HOST': url.hostname,
'PORT': _cast_int(url.port),
})
if url.query:
config_options = {}
for k, v in urlparse.parse_qs(url.query).items():
if k.upper() in cls._DB_BASE_OPTIONS:
config.update({k.upper(): _cast_int(v[0])})
else:
config_options.update({k: _cast_int(v[0])})
config['OPTIONS'] = config_options
if engine:
config['ENGINE'] = engine
if url.scheme in Env.DB_SCHEMES:
config['ENGINE'] = Env.DB_SCHEMES[url.scheme]
if not config.get('ENGINE', False):
warnings.warn("Engine not recognized from url: {0}".format(config))
return {}
return config
@classmethod
def cache_url_config(cls, url, backend=None):
"""Pulled from DJ-Cache-URL, parse an arbitrary Cache URL.
:param url:
:param overrides:
:return:
"""
url = urlparse.urlparse(url) if not isinstance(url, cls.URL_CLASS) else url
location = url.netloc.split(',')
if len(location) == 1:
location = location[0]
config = {
'BACKEND': cls.CACHE_SCHEMES[url.scheme],
'LOCATION': location,
}
if url.scheme == 'filecache':
config.update({
'LOCATION': url.netloc + url.path,
})
if url.path and url.scheme in ['memcache', 'pymemcache', 'rediscache']:
config.update({
'LOCATION': 'unix:' + url.path,
})
if url.query:
config_options = {}
for k, v in urlparse.parse_qs(url.query).items():
opt = {k.upper(): _cast_int(v[0])}
if k.upper() in cls._CACHE_BASE_OPTIONS:
config.update(opt)
else:
config_options.update(opt)
config['OPTIONS'] = config_options
if backend:
config['BACKEND'] = backend
return config
@classmethod
def email_url_config(cls, url, backend=None):
"""Parses an email URL."""
config = {}
url = urlparse.urlparse(url) if not isinstance(url, cls.URL_CLASS) else url
# Remove query strings
path = url.path[1:]
path = path.split('?', 2)[0]
# Update with environment configuration
config.update({
'EMAIL_FILE_PATH': path,
'EMAIL_HOST_USER': url.username,
'EMAIL_HOST_PASSWORD': url.password,
'EMAIL_HOST': url.hostname,
'EMAIL_PORT': _cast_int(url.port),
})
if backend:
config['EMAIL_BACKEND'] = backend
elif url.scheme in cls.EMAIL_SCHEMES:
config['EMAIL_BACKEND'] = cls.EMAIL_SCHEMES[url.scheme]
if url.scheme in ('smtps', 'smtp+tls'):
config['EMAIL_USE_TLS'] = True
elif url.scheme == 'smtp+ssl':
config['EMAIL_USE_SSL'] = True
if url.query:
config_options = {}
for k, v in urlparse.parse_qs(url.query).items():
opt = {k.upper(): _cast_int(v[0])}
if k.upper() in cls._EMAIL_BASE_OPTIONS:
config.update(opt)
else:
config_options.update(opt)
config['OPTIONS'] = config_options
return config
@classmethod
def search_url_config(cls, url, engine=None):
config = {}
url = urlparse.urlparse(url) if not isinstance(url, cls.URL_CLASS) else url
# Remove query strings.
path = url.path[1:]
path = path.split('?', 2)[0]
if url.scheme in cls.SEARCH_SCHEMES:
config["ENGINE"] = cls.SEARCH_SCHEMES[url.scheme]
if path.endswith("/"):
path = path[:-1]
split = path.rsplit("/", 1)
if len(split) > 1:
path = split[:-1]
index = split[-1]
else:
path = ""
index = split[0]
config.update({
"URL": urlparse.urlunparse(("http",) + url[1:2] + (path,) + url[3:]),
"INDEX_NAME": index,
})
if path:
config.update({
"PATH": path,
})
if engine:
config['ENGINE'] = engine
return config
@staticmethod
def read_env(env_file=None, **overrides):
"""Read a .env file into os.environ.
If not given a path to a dotenv path, does filthy magic stack backtracking
to find manage.py and then find the dotenv.
http://www.wellfireinteractive.com/blog/easier-12-factor-django/
https://gist.github.com/bennylope/2999704
"""
if env_file is None:
frame = sys._getframe()
env_file = os.path.join(os.path.dirname(frame.f_back.f_code.co_filename), '.env')
if not os.path.exists(env_file):
warnings.warn("not reading %s - it doesn't exist." % env_file)
return
try:
with open(env_file) if isinstance(env_file, string_types) else env_file as f:
content = f.read()
except IOError:
warnings.warn("not reading %s - it doesn't exist." % env_file)
return
logger.debug('Read environment variables from: {0}'.format(env_file))
for line in content.splitlines():
m1 = re.match(r'\A([A-Za-z_0-9]+)=(.*)\Z', line)
if m1:
key, val = m1.group(1), m1.group(2)
m2 = re.match(r"\A'(.*)'\Z", val)
if m2:
val = m2.group(1)
m3 = re.match(r'\A"(.*)"\Z', val)
if m3:
val = re.sub(r'\\(.)', r'\1', m3.group(1))
os.environ.setdefault(key, str(val))
# set defaults
for key, value in overrides.items():
os.environ.setdefault(key, value)
class Path(object):
"""Inspired to Django Two-scoops, handling File Paths in Settings.
>>> from environ import Path
>>> root = Path('/home')
>>> root, root(), root('dev')
(<Path:/home>, '/home', '/home/dev')
>>> root == Path('/home')
True
>>> root in Path('/'), root not in Path('/other/path')
(True, True)
>>> root('dev', 'not_existing_dir', required=True)
Traceback (most recent call last):
environ.environ.ImproperlyConfigured: Create required path: /home/not_existing_dir
>>> public = root.path('public')
>>> public, public.root, public('styles')
(<Path:/home/public>, '/home/public', '/home/public/styles')
>>> assets, scripts = public.path('assets'), public.path('assets', 'scripts')
>>> assets.root, scripts.root
('/home/public/assets', '/home/public/assets/scripts')
>>> assets + 'styles', str(assets + 'styles'), ~assets
(<Path:/home/public/assets/styles>, '/home/public/assets/styles', <Path:/home/public>)
"""
def path(self, *paths, **kwargs):
"""Create new Path based on self.root and provided paths.
:param paths: List of sub paths
:param kwargs: required=False
:rtype: Path
"""
return self.__class__(self.__root__, *paths, **kwargs)
def file(self, name, *args, **kwargs):
"""Open a file.
:param name: Filename appended to self.root
:param args: passed to open()
:param kwargs: passed to open()
:rtype: file
"""
return open(self(name), *args, **kwargs)
@property
def root(self):
"""Current directory for this Path"""
return self.__root__
def __init__(self, start='', *paths, **kwargs):
super(Path, self).__init__()
if kwargs.get('is_file', False):
start = os.path.dirname(start)
self.__root__ = self._absolute_join(start, *paths, **kwargs)
def __call__(self, *paths, **kwargs):
"""Retrieve the absolute path, with appended paths
:param paths: List of sub path of self.root
:param kwargs: required=False
"""
return self._absolute_join(self.__root__, *paths, **kwargs)
def __eq__(self, other):
return self.__root__ == other.__root__
def __ne__(self, other):
return not self.__eq__(other)
def __add__(self, other):
return Path(self.__root__, other if not isinstance(other, Path) else other.__root__)
def __sub__(self, other):
if isinstance(other, int):
return self.path('../' * other)
elif isinstance(other, string_types):
return Path(self.__root__.rstrip(other))
raise TypeError("unsupported operand type(s) for -: '{0}' and '{1}'".format(self, type(other)))
def __invert__(self):
return self.path('..')
def __contains__(self, item):
base_path = self.__root__
if len(base_path) > 1:
base_path = os.path.join(base_path, '')
return item.__root__.startswith(base_path)
def __repr__(self):
return "<Path:{0}>".format(self.__root__)
def __str__(self):
return self.__root__
def __unicode__(self):
return self.__str__()
@staticmethod
def _absolute_join(base, *paths, **kwargs):
absolute_path = os.path.abspath(os.path.join(base, *paths))
if kwargs.get('required', False) and not os.path.exists(absolute_path):
raise ImproperlyConfigured("Create required path: {0}".format(absolute_path))
return absolute_path
def register_scheme(scheme):
for method in dir(urlparse):
if method.startswith('uses_'):
getattr(urlparse, method).append(scheme)
def register_schemes(schemes):
for scheme in schemes:
register_scheme(scheme)
# Register database and cache schemes in URLs.
register_schemes(Env.DB_SCHEMES.keys())
register_schemes(Env.CACHE_SCHEMES.keys())
register_schemes(Env.SEARCH_SCHEMES.keys())
register_schemes(Env.EMAIL_SCHEMES.keys())
| ei-grad/django-environ | environ/environ.py | Python | mit | 22,397 |
"""
run tests against a webserver running in the same reactor
NOTE: this test uses port 8888 on localhost
"""
import os
import ujson as json
import cyclone.httpclient
from twisted.internet import defer
from twisted.application import internet
from twisted.trial.unittest import TestCase
from twisted.python import log
from txbitwrap.api import factory as Api
from txbitwrap.machine import set_pnml_path
import txbitwrap.event
IFACE = '127.0.0.1'
PORT = 8888
OPTIONS = {
'listen-ip': IFACE,
'listen-port': PORT,
'machine-path': os.path.abspath(os.path.dirname(__file__) + '/../../schemata'),
'pg-host': '127.0.0.1',
'pg-port': 5432,
'pg-username': 'bitwrap',
'pg-password': 'bitwrap',
'pg-database': 'bitwrap'
}
class ApiTest(TestCase):
""" setup rpc endpoint and invoke ping method """
def setUp(self):
""" start tcp endpoint """
set_pnml_path(OPTIONS['machine-path'])
self.options = OPTIONS
#pylint: disable=no-member
self.service = internet.TCPServer(PORT, Api(self.options), interface=self.options['listen-ip'])
#pylint: enable=no-member
self.service.startService()
@defer.inlineCallbacks
def tearDown(self):
""" stop tcp endpoint """
self.service.stopService()
yield txbitwrap.event.rdq.stop()
@staticmethod
def url(resource):
""" bulid a url using test endpoint """
return 'http://%s:%s/%s' % (IFACE, PORT, resource)
@staticmethod
def client(resource):
""" rpc client """
return cyclone.httpclient.JsonRPC(ApiTest.url(resource))
@staticmethod
def fetch(resource, **kwargs):
""" async request with httpclient"""
return cyclone.httpclient.fetch(ApiTest.url(resource), **kwargs)
@staticmethod
def dispatch(**event):
""" rpc client """
resource = 'dispatch/%s/%s/%s' % (event['schema'], event['oid'], event['action'])
url = ApiTest.url(resource)
if isinstance(event['payload'], str):
data = event['payload']
else:
data = json.dumps(event['payload'])
return cyclone.httpclient.fetch(url, postdata=data)
@staticmethod
def broadcast(**event):
""" rpc client """
resource = 'broadcast/%s/%s' % (event['schema'], event['id'])
url = ApiTest.url(resource)
data = json.dumps(event)
return cyclone.httpclient.fetch(url, postdata=data)
| stackdump/txbitwrap | txbitwrap/test/__init__.py | Python | mit | 2,474 |
# Exercise 3
# creating a simple loop
# print out 1 thru 10
#set the start value
i = 0
upper_range=10
for i in range(upper_range):
print(str(i+1)+ " this is a loop")
| paulcockram7/paulcockram7.github.io | 10python/l05/Exercise 3.py | Python | mit | 171 |
# -*- coding: utf-8 -*-
import re
import requests
import base64
import struct
import random
from sklearn.cluster import KMeans
from sklearn.preprocessing import scale
from dateutil.parser import parse
class Tweet(object):
def __init__(self, status):
self.screen_name = status['screen_name']
self.text = status['text']
self.classification = None
self.created_at = parse(status['created_at'])
self.image_url = status['user']['profile_image_url_https']
self.lat = self.vector[-2]
self.long = self.vector[-1]
def classify(self, classification):
self.classification = classification
@property
def vector(self):
val = extract_brainstate(self.text)
if val == None:
return (1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
else:
return val
@property
def theta(self):
return abs(self.vector[0])
@property
def alpha(self):
return abs(self.vector[1])
@property
def beta(self):
return abs(self.vector[2])
@property
def fmax(self):
return abs(self.vector[3])
@property
def color(self):
overall_density = self.theta + self.alpha + self.beta
r = int(self.alpha / overall_density * 255)
g = int(self.beta / overall_density * 255)
b = int(self.theta / overall_density * 255)
return u'#%02x%02x%02x' % (r, g, b)
def __str__(self):
if self.vector != None:
return u'<@{}: {} {} [{}]>'.format(self.screen_name, self.text[:20], self.vector,
self.classification)
else:
return u'<@{}: {} [{}]>'.format(self.screen_name, self.text[:20], self.classification)
def get_tweets(hashtag):
url = u'http://loklak.org:9000/api/search.json?q={}&minified=true'.format(hashtag)
resp = requests.get(url)
data = resp.json()['statuses']
tweets = []
for status in data:
tweets.append(Tweet(status))
return tweets
def extract_brainstate(text):
match = re.search(r' [A-Za-z0-9+/=]{24,32} ', text)
if not match:
return None
else:
raw_brains = match.group(0).strip()
decoded_brains = base64.b64decode(raw_brains)
if len(raw_brains) == 24:
bla = struct.unpack('4f', decoded_brains)
cooked_brains = (bla[0], bla[1], bla[2], bla[3],
52.541576 + random.random() / 500, 13.390394 + random.random() / 500)
else:
cooked_brains = struct.unpack('6f', decoded_brains)
return cooked_brains
def cluster_brains(brains):
vectors = [t.vector for t in brains]
scaled_vectors = scale(vectors)
model = KMeans(n_clusters=2)
results = model.fit_predict(scaled_vectors)
for result, brain in zip(results, brains):
brain.classify(result)
return model, results
def find_latest(tweets):
latest = {}
for t in tweets:
if t.screen_name in latest.keys():
if latest[t.screen_name].created_at < t.created_at:
latest[t.screen_name] = t
else:
latest[t.screen_name] = t
return latest
| uwekamper/flutterspark | flutterbrain_web/mapview/clustering.py | Python | mit | 3,178 |
import logging
from amqpstorm import Connection
from amqpstorm import Message
logging.basicConfig(level=logging.INFO)
with Connection('localhost', 'guest', 'guest') as connection:
with connection.channel() as channel:
# Declare a queue called, 'simple_queue'.
channel.queue.declare('simple_queue')
# Create the message with a expiration (time to live) set to 6000.
message = Message.create(
channel, 'Hello World',
properties={"expiration": '6000'}
)
# Publish the message to the queue, 'simple_queue'.
message.publish('simple_queue')
| eandersson/amqpstorm | examples/publish_message_with_expiration.py | Python | mit | 624 |
import ctypes
class C_struct:
"""Decorator to convert the given class into a C struct."""
# contains a dict of all known translatable types
types = ctypes.__dict__
@classmethod
def register_type(cls, typename, obj):
"""Adds the new class to the dict of understood types."""
cls.types[typename] = obj
def __call__(self, cls):
"""Converts the given class into a C struct.
Usage:
>>> @C_struct()
... class Account:
... first_name = "c_char_p"
... last_name = "c_char_p"
... balance = "c_float"
...
>>> a = Account()
>>> a
<cstruct.Account object at 0xb7c0ee84>
A very important note: while it *is* possible to
instantiate these classes as follows:
>>> a = Account("Geremy", "Condra", 0.42)
This is strongly discouraged, because there is at
present no way to ensure what order the field names
will be read in.
"""
# build the field mapping (names -> types)
fields = []
for k, v in vars(cls).items():
# don't wrap private variables
if not k.startswith("_"):
# if its a pointer
if v.startswith("*"):
field_type = ctypes.POINTER(self.types[v[1:]])
else:
field_type = self.types[v]
new_field = (k, field_type)
fields.append(new_field)
# make our bases tuple
bases = (ctypes.Structure,) + tuple((base for base in cls.__bases__))
# finish up our wrapping dict
class_attrs = {"_fields_": fields, "__doc__": cls.__doc__}
# now create our class
return type(cls.__name__, bases, class_attrs)
| ActiveState/code | recipes/Python/576734_C_struct_decorator/recipe-576734.py | Python | mit | 1,507 |
#!/usr/bin/env python
# integeral.py
import numpy as num
def integral(x,y):
"""
ROUTINE: INTEGRAL
USEAGE: RESULT = INTEGRAL( X, Y )
PURPOSE: Integrate tabulated data using Simpson's rule
with 3-point Lagragian interpolation. Data may be
regularly sampled in X, or irregularly sampled.
INPUT:
X Vector of x axis points.
(Elements must be unique and monotonically increasing)
Y Vector of corresponding Y axis points.
KEYWORD_INPUT: None.
OUTPUT: Result of integration.
EXAMPLE:
Example 1:
Define 11 x-values on the closed interval [0.0 , 0.8].
X = [ 0.0, .12, .22, .32, .36, .40, .44, .54, .64, .70, .80 ]
Define 11 f-values corresponding to x(i).
F = [ 0.200000, 1.30973, 1.30524, 1.74339, 2.07490, 2.45600, $
2.84299, 3.50730, 3.18194, 2.36302, 0.231964 ]
Compute the integral.
RESULT = INTEGRAL( X, F )
In this example, the f-values are generated from a known function,
(f = .2 + 25*x - 200*x^2 + 675*x^3 - 900*x^4 + 400*x^5)
The Multiple Application Trapezoid Method yields; result = 1.5648
The Multiple Application Simpson's Method yields; result = 1.6036
IDL User Library INT_TABULATED.PRO yields; result = 1.6232
INTEGRAL.PRO yields; result = 1.6274
The Exact Solution (4 decimal accuracy) yields; result = 1.6405
AUTHOR: Liam Gumley, CIMSS/SSEC (liam.gumley@ssec.wisc.edu)
Based on a FORTRAN-77 version by Paul van Delst, CIMSS/SSEC
22-DEC-95
REVISIONS: None.
"""
n = x.size
x0 = x[0:n-2]
x1 = x[1:n-1]
x2 = x[2:n-0]
y0 = y[0:n-2]
y1 = y[1:n-1]
y2 = y[2:n-0]
#
# compute interpolation delta and midpoint arrays
#
dx = x1-x0
xmid = 0.5*(x1+x0)
#
# compute 3 point lagrange interpolation
#
l0 = ((xmid-x1)/(x0-x1))*((xmid-x2)/(x0-x2))
l1 = ((xmid-x0)/(x1-x0))*((xmid-x2)/(x1-x2))
l2 = ((xmid-x0)/(x2-x0))*((xmid-x1)/(x2-x1))
ymid = y0*l0 + y1*l1 + y2*l2;
#
# compute integral sum
#
integ = sum(1.0/6.0*dx*(y0+4.0*ymid+y1))
#
# handle last 3 points similarly
#
x0 = x[n-3]
x1 = x[n-2]
x2 = x[n-1]
y0 = y[n-3]
y1 = y[n-2]
y2 = y[n-1]
dx = x2 - x1
xmid = 0.5*(x2+x1)
l0 = ((xmid-x1)/(x0-x1))*((xmid-x2)/(x0-x2))
l1 = ((xmid-x0)/(x1-x0))*((xmid-x2)/(x1-x2))
l2 = ((xmid-x0)/(x2-x0))*((xmid-x1)/(x2-x1))
ymid = y0*l0 + y1*l1 + y2*l2;
integ = integ + 1.0/6.0*dx*(y1+4.0*ymid+y2)
return integ
if __name__ == '__main__':
print(integral.__doc__)
X = num.array((0.0, .12, .22, .32, .36, .40, .44, .54, .64, .70, .80))
Y = num.array((0.200000, 1.30973, 1.30524, 1.74339, 2.07490, 2.45600,
2.84299, 3.50730, 3.18194, 2.36302, 0.231964))
i = integral(X,Y)
print(i)
| graziano-giuliani/pythoncode | pyuwphysret/common/pyfiles/atmos/integral.py | Python | mit | 2,891 |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# created by restran on 2016/1/2
from __future__ import unicode_literals, absolute_import
from django import forms
from django.utils.translation import ugettext_lazy as _
from .models import *
from common.forms import BaseModelForm
logger = logging.getLogger(__name__)
class ClientForm(BaseModelForm):
class Meta:
model = Client
fields = ('name', 'memo', 'enable', "app_id", 'secret_key',
'login_auth_url', 'access_token_ex', 'refresh_token_ex',
'sms_login_auth_url', 'change_password_url', 'sms_change_password_url')
def clean_refresh_token_ex(self):
access_token_ex = self.cleaned_data['access_token_ex']
refresh_token_ex = self.cleaned_data['refresh_token_ex']
if access_token_ex >= refresh_token_ex:
raise forms.ValidationError(_('refresh_token 的过期时间不能小于 access_token'))
return refresh_token_ex
ClientForm.base_fields.keyOrder = [
'name', 'memo', 'url', 'enable', 'app_id',
'secret_key', 'login_auth_url', 'access_token_ex',
'refresh_token_ex', 'sms_login_auth_url', 'sms_change_password_url',
'change_password_url'
]
#
# class ClientEndpointForm(BaseModelForm):
# class Meta:
# model = Client
# fields = ('name', 'memo', 'enable', 'access_key', 'secret_key')
class EndpointForm(BaseModelForm):
def __init__(self, *args, **kwargs):
super(EndpointForm, self).__init__(*args, **kwargs)
class Meta:
model = Endpoint
fields = ('name', 'is_builtin', 'url', 'unique_name', 'enable_acl', 'version',
'async_http_connect_timeout', 'async_http_request_timeout', 'enable_hmac',
'memo', 'require_login')
def clean_url(self):
is_builtin = self.cleaned_data['is_builtin']
url = self.cleaned_data['url']
if not is_builtin and (url is None or url == ''):
raise forms.ValidationError(_('Endpoint URL 不能为空'))
else:
return url
def clean_unique_name(self):
unique_name = self.cleaned_data['unique_name']
if self.instance is not None:
sites = Endpoint.objects.filter(unique_name=unique_name).values('id')
for t in sites:
if t['id'] != self.instance.id:
raise forms.ValidationError(_('已存在相同名称的 Endpoint'))
else:
sites = Endpoint.objects.filter(unique_name=unique_name).values('id')
if len(sites) > 0:
raise forms.ValidationError(_('已存在相同名称的 Endpoint'))
return unique_name
EndpointForm.base_fields.keyOrder = [
'name', 'unique_name', 'is_builtin', 'url', 'prefix_uri', 'enable_acl',
'async_http_connect_timeout', 'async_http_request_timeout', 'enable_hmac',
'memo', 'require_login']
class ACLRuleForm(BaseModelForm):
class Meta:
model = ACLRule
fields = ('re_uri', 'is_permit')
| restran/api-gateway-dashboard | dashboard/forms.py | Python | mit | 3,021 |
import gdsfactory as gf
yaml = """
instances:
mmi1:
component: mmi1x2
settings:
width_mmi: 4.5
length_mmi: 10
mmi2:
component: mmi1x2
settings:
width_mmi: 4.5
length_mmi: 5
straight:
component: straight
placements:
mmi2:
x: 100
mirror: True
straight:
x: 40
y: 40
routes:
route_name1:
links:
mmi1,o3: mmi2,o3
route_name2:
links:
mmi1,o2: straight,o1
route_name3:
links:
mmi2,o2: straight,o2
ports:
o1: mmi2,o1
o2: mmi2,o1
"""
if __name__ == "__main__":
"""FIXME"""
mzi = gf.read.from_yaml(yaml)
n = mzi.get_netlist()
# mzi.show()
# mzi.plot()
| gdsfactory/gdsfactory | fixme/netlist.py | Python | mit | 766 |
class Repository(object):
def __init__(self, obj):
self._wrapped_obj = obj
self.language = obj[u'language'] or u'unknown'
self.name = obj[u'full_name']
def __getattr__(self, attr):
if attr in self.__dict__:
return getattr(self, attr)
else:
return getattr(self._wrapped_obj, attr)
| Rustem/toptal-blog-celery-toy-ex | celery_uncovered/toyex/models.py | Python | mit | 353 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Positions'
db.delete_table(u'positions_positions')
# Adding model 'Position'
db.create_table(u'positions_position', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=100)),
('date', self.gf('django.db.models.fields.DateField')(default=datetime.datetime(2013, 8, 21, 0, 0))),
('content', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal(u'positions', ['Position'])
def backwards(self, orm):
# Adding model 'Positions'
db.create_table(u'positions_positions', (
('content', self.gf('django.db.models.fields.TextField')()),
('date', self.gf('django.db.models.fields.DateField')(default=datetime.datetime(2013, 8, 21, 0, 0))),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal(u'positions', ['Positions'])
# Deleting model 'Position'
db.delete_table(u'positions_position')
models = {
u'positions.position': {
'Meta': {'object_name': 'Position'},
'content': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 8, 21, 0, 0)'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['positions'] | Hackfmi/Diaphanum | positions/migrations/0002_auto__del_positions__add_position.py | Python | mit | 1,927 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Utilities for getting directory tree."""
import os
from pathlib import Path
import crayons
from nuke.utils import parse_ignore_file
def fg(text, color):
"""Set text to foregound color."""
return "\33[38;5;" + str(color) + "m" + text + "\33[0m"
def bg(text, color):
"""Set text to background color."""
return "\33[48;5;" + str(color) + "m" + text + "\33[0m"
def get_colorized(path: Path):
"""Colorize path name based on type."""
name = path.name
if path.is_dir():
return crayons.blue(name)
elif path.is_file():
return crayons.green(name)
elif path.is_mount():
return crayons.red(name)
elif path.is_symlink():
return crayons.cyan(name)
elif path.is_socket():
return crayons.magenta(name)
else:
return crayons.white(name)
def get_dirtree(directory):
"""
Get the directory tree of the `directory`.
:param directory: The root directory from where to generate the directory tree.
:return: The list of paths with appropriate indenting
"""
element_list = []
ignore_patterns = []
file_link = fg("├── ", 241) # u'\u251c\u2500\u2500 '
last_file_link = fg("└── ", 241) # u'\u2514\u2500\u2500 '
tree_branch = fg("│ ", 241) # u'\u2502 '
# Get the list of all the files/dirs in the directory to nuke.
# We traverse in a bottom up manner so that directory removal is trivial.
for (dirpath_str, dirnames, filenames) in os.walk(directory, topdown=False):
level = dirpath_str.replace(str(directory), "").count(os.sep)
if level > 0:
indent = tree_branch * (level - 1) + file_link
else:
indent = ""
dirpath = Path(dirpath_str)
# We record every element in the tree as a dict of the indented name (repr)
# and the path so we can use the ignore methods on the paths and still
# have the indented names for our tree
# only add current directory as element to be nuked if no .nukeignore file is present
if ".nukeignore" not in filenames:
# Add the current directory
element = {
"repr": "{}{}/".format(indent, get_colorized(dirpath)),
"path": dirpath,
}
element_list.append(element)
subindent = tree_branch * (level)
# Add the files in the directory
for idx, fn in enumerate(filenames):
if fn == ".nukeignore":
ignore_patterns.extend(
parse_ignore_file((dirpath / fn), dirpath))
continue
# Check if it is the last element
if idx == len(filenames) - 1:
branch = subindent + last_file_link
else:
branch = subindent + file_link
element = {
"repr": "{}{}".format(branch, get_colorized(dirpath / fn)),
"path": (dirpath / fn),
}
element_list.append(element)
return element_list, ignore_patterns
| varunagrawal/nuke | nuke/dirtree.py | Python | mit | 3,107 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import Counter
import numpy as np
STARTING_LABEL = '*' # Label of t=-1
STARTING_LABEL_INDEX = 0
def default_feature_func(_, X, t):
"""
Returns a list of feature strings.
(Default feature function)
:param X: An observation vector
:param t: time
:return: A list of feature strings
"""
length = len(X)
features = list()
features.append('U[0]:%s' % X[t][0])
features.append('POS_U[0]:%s' % X[t][1])
if t < length-1:
features.append('U[+1]:%s' % (X[t+1][0]))
features.append('B[0]:%s %s' % (X[t][0], X[t+1][0]))
features.append('POS_U[1]:%s' % X[t+1][1])
features.append('POS_B[0]:%s %s' % (X[t][1], X[t+1][1]))
if t < length-2:
features.append('U[+2]:%s' % (X[t+2][0]))
features.append('POS_U[+2]:%s' % (X[t+2][1]))
features.append('POS_B[+1]:%s %s' % (X[t+1][1], X[t+2][1]))
features.append('POS_T[0]:%s %s %s' % (X[t][1], X[t+1][1], X[t+2][1]))
if t > 0:
features.append('U[-1]:%s' % (X[t-1][0]))
features.append('B[-1]:%s %s' % (X[t-1][0], X[t][0]))
features.append('POS_U[-1]:%s' % (X[t-1][1]))
features.append('POS_B[-1]:%s %s' % (X[t-1][1], X[t][1]))
if t < length-1:
features.append('POS_T[-1]:%s %s %s' % (X[t-1][1], X[t][1], X[t+1][1]))
if t > 1:
features.append('U[-2]:%s' % (X[t-2][0]))
features.append('POS_U[-2]:%s' % (X[t-2][1]))
features.append('POS_B[-2]:%s %s' % (X[t-2][1], X[t-1][1]))
features.append('POS_T[-2]:%s %s %s' % (X[t-2][1], X[t-1][1], X[t][1]))
return features
class FeatureSet():
feature_dic = dict()
observation_set = set()
empirical_counts = Counter()
num_features = 0
label_dic = {STARTING_LABEL: STARTING_LABEL_INDEX}
label_array = [STARTING_LABEL]
feature_func = default_feature_func
def __init__(self, feature_func=None):
# Sets a custom feature function.
if feature_func is not None:
self.feature_func = feature_func
def scan(self, data):
"""
Constructs a feature set, a label set,
and a counter of empirical counts of each feature from the input data.
:param data: A list of (X, Y) pairs. (X: observation vector , Y: label vector)
"""
# Constructs a feature set, and counts empirical counts.
for X, Y in data:
prev_y = STARTING_LABEL_INDEX
for t in range(len(X)):
# Gets a label id
try:
y = self.label_dic[Y[t]]
except KeyError:
y = len(self.label_dic)
self.label_dic[Y[t]] = y
self.label_array.append(Y[t])
# Adds features
self._add(prev_y, y, X, t)
prev_y = y
def load(self, feature_dic, num_features, label_array):
self.num_features = num_features
self.label_array = label_array
self.label_dic = {label: i for label, i in enumerate(label_array)}
self.feature_dic = self.deserialize_feature_dic(feature_dic)
def __len__(self):
return self.num_features
def _add(self, prev_y, y, X, t):
"""
Generates features, constructs feature_dic.
:param prev_y: previous label
:param y: present label
:param X: observation vector
:param t: time
"""
for feature_string in self.feature_func(X, t):
if feature_string in self.feature_dic.keys():
if (prev_y, y) in self.feature_dic[feature_string].keys():
self.empirical_counts[self.feature_dic[feature_string][(prev_y, y)]] += 1
else:
feature_id = self.num_features
self.feature_dic[feature_string][(prev_y, y)] = feature_id
self.empirical_counts[feature_id] += 1
self.num_features += 1
if (-1, y) in self.feature_dic[feature_string].keys():
self.empirical_counts[self.feature_dic[feature_string][(-1, y)]] += 1
else:
feature_id = self.num_features
self.feature_dic[feature_string][(-1, y)] = feature_id
self.empirical_counts[feature_id] += 1
self.num_features += 1
else:
self.feature_dic[feature_string] = dict()
# Bigram feature
feature_id = self.num_features
self.feature_dic[feature_string][(prev_y, y)] = feature_id
self.empirical_counts[feature_id] += 1
self.num_features += 1
# Unigram feature
feature_id = self.num_features
self.feature_dic[feature_string][(-1, y)] = feature_id
self.empirical_counts[feature_id] += 1
self.num_features += 1
def get_feature_vector(self, prev_y, y, X, t):
"""
Returns a list of feature ids of given observation and transition.
:param prev_y: previous label
:param y: present label
:param X: observation vector
:param t: time
:return: A list of feature ids
"""
feature_ids = list()
for feature_string in self.feature_func(X, t):
try:
feature_ids.append(self.feature_dic[feature_string][(prev_y, y)])
except KeyError:
pass
return feature_ids
def get_labels(self):
"""
Returns a label dictionary and array.
"""
return self.label_dic, self.label_array
def calc_inner_products(self, params, X, t):
"""
Calculates inner products of the given parameters and feature vectors of the given observations at time t.
:param params: parameter vector
:param X: observation vector
:param t: time
:return:
"""
inner_products = Counter()
for feature_string in self.feature_func(X, t):
try:
for (prev_y, y), feature_id in self.feature_dic[feature_string].items():
inner_products[(prev_y, y)] += params[feature_id]
except KeyError:
pass
return [((prev_y, y), score) for (prev_y, y), score in inner_products.items()]
def get_empirical_counts(self):
empirical_counts = np.ndarray((self.num_features,))
for feature_id, counts in self.empirical_counts.items():
empirical_counts[feature_id] = counts
return empirical_counts
def get_feature_list(self, X, t):
feature_list_dic = dict()
for feature_string in self.feature_func(X, t):
for (prev_y, y), feature_id in self.feature_dic[feature_string].items():
if (prev_y, y) in feature_list_dic.keys():
feature_list_dic[(prev_y, y)].add(feature_id)
else:
feature_list_dic[(prev_y, y)] = {feature_id}
return [((prev_y, y), feature_ids) for (prev_y, y), feature_ids in feature_list_dic.items()]
def serialize_feature_dic(self):
serialized = dict()
for feature_string in self.feature_dic.keys():
serialized[feature_string] = dict()
for (prev_y, y), feature_id in self.feature_dic[feature_string].items():
serialized[feature_string]['%d_%d' % (prev_y, y)] = feature_id
return serialized
def deserialize_feature_dic(self, serialized):
feature_dic = dict()
for feature_string in serialized.keys():
feature_dic[feature_string] = dict()
for transition_string, feature_id in serialized[feature_string].items():
prev_y, y = transition_string.split('_')
feature_dic[feature_string][(int(prev_y), int(y))] = feature_id
return feature_dic | lancifollia/laon_crf | feature.py | Python | mit | 8,052 |
"""
Functions to spot hemy regions
"""
import os.path as op
import datetime
from collections import defaultdict, Counter
from tabulate import tabulate
from scipy import stats
import numpy as np
import pybedtools
import pysam
import vcf
from bcbio.distributed.transaction import file_transaction
from bcbio.provenance import do
from bcbio.utils import append_stem, file_exists, splitext_plus, safe_makedir
from bcbio.variation.vcfutils import bgzip_and_index
def is_good_cpg(frmt, record):
alt_depth = sum(map(int, frmt['DP4'].split(','))[2:])
ref_depth = sum(map(int, frmt['DP4'].split(','))[:2])
if record[6] != "PASS":
return False
if int(ref_depth) > 3 and int(alt_depth) > 3:
return True
def _genotype(alleles):
if alleles[0] == alleles[1]:
return "homoz"
else:
return "heteroz"
def is_good_het(frmt, record):
depth = sum(map(int, frmt['DP4'].split(','))[2:])
# if _genotype(frmt['GT'].split("/")) == "heteroz" and int(frmt['DP']) > 3 and depth > 3 and record[6] == "PASS":
if _genotype(frmt['GT'].split("/")) == "heteroz" and int(frmt['DP']) > 3:
return True
def _get_strand(record):
return record[7].split(";")[0].split("=")[1]
def _snp_veracity_both_strand(sense, anti):
"""
Only if SNPs is detected in both strand with two alleles
"""
gen_plus = sense.keys()
gen_minus = anti.keys()
allels1 = [g.split(":")[0].split("/")[0] for g in gen_plus]
allels2 = [g.split(":")[0] for g in gen_minus]
if len(allels1) == len(allels2):
return True
def _read_pairs(gt):
# print "read_pairs %s" % gt
gt1 = gt.split(":")[0].split("/")[0]
if gt.find("/") > -1:
gt2 = gt.split(":")[0].split("/")[1]
return (gt1, gt2)
def _get_total(gts, total):
return [total[_read_pairs(gts[0][1])[0]], total[_read_pairs(gts[1][1])[0]]]
def _top_gt(gts):
total = Counter()
first = _read_pairs(gts[0][1])
top = None
for gt in gts:
pair = _read_pairs(gt[1])
if pair:
if pair[0] != first[0] and pair[1] != first[1]:
top = [gts[0], gt]
total[pair[0]] += gt[0]
if top:
total = _get_total(top, total)
return top, total
return False, False
def _above_prop(x, s, p=0.8):
pvals = []
for p in [0.8, 0.9, 1.0]:
pvals.append(stats.binom_test(x, s, p))
return max(pvals) > 0.70
def _prop(gt):
sense_sorted = sorted(zip(gt.values(), gt.keys()), reverse=True)
top_2, total = _top_gt(sense_sorted)
# print "top_2 %s totla %s" % (top_2, total)
if top_2:
gt2_prop = float(top_2[1][0]) / total[1]
gt1_prop = float(top_2[0][0]) / total[0]
table = np.array([[top_2[1][0], total[1] - top_2[1][0]], [total[0] - top_2[0][0], top_2[0][0]]])
# print "table\n%s\ntotals %s %s" % (table, gt1_prop, gt2_prop)
# print stats.fisher_exact(table)
if stats.fisher_exact(table)[1] < 0.05 and _above_prop(top_2[0][0], total[0]) and _above_prop(top_2[1][0], total[1]):
return True
return False
def _valid_test(link, link_as):
"""
Only if top2 associated nt are equally represented
"""
# print "link %s %s" % (link, link_as)
if len(link) > 1:
sense_pval = _prop(link)
else:
sense_pval = False
# if len(link_as) > 1:
# anti_pval = _prop(link_as)
# else:
# anti_pval = True
if sense_pval:
return True
return False
def _valid(link, link_as):
"""
Only if one snp allele is associated with the Cu/Cm
"""
if len(link) == 2:
gen = link.keys()
allels1 = gen[0].split(":")[0].split("/")
allels2 = gen[1].split(":")[0].split("/")
if allels1[0] != allels2[0] and allels1[1] != allels2[1] and _snp_veracity(link, link_as):
return True
def _format(link):
"""
Give nice format to dict with alleles and reads supporting
"""
cell = ''
for allele in link:
cell += "%s=%s;" % (allele, link[allele])
return cell
def _change_to_cpg(line, tag):
return line.replace(tag, "CpG%s" % tag).strip()
def _change_to_snp(line, tag):
return line.replace(tag, "SNP%s" % tag).strip()
def _create_vcf_header(vcf_file, out_handle):
"""
Create header for final vcf
"""
print >>out_handle, "##fileformat=VCFv4.1"
print >>out_handle, "##fileData=%s" % datetime.date.today().strftime('%y%m%d')
with open(vcf_file) as in_handle:
for line in in_handle:
if line.startswith("##reference"):
print >>out_handle, line.strip()
if line.startswith("##contig"):
print >>out_handle, line.strip()
if line.startswith("#CHROM"):
print >>out_handle, line.strip()
if line.startswith("##BisSNP"):
print >>out_handle, line.strip()
if line.startswith("##FILTER"):
print >>out_handle, line.strip()
if line.startswith("##FORMAT=<ID=GT"):
print >>out_handle, line.strip()
if line.startswith("##INFO=<ID=DP"):
print >>out_handle, line.strip()
if line.startswith("##FORMAT=<ID=BRC6"):
print >>out_handle, _change_to_cpg(line, 'BRC6')
print >>out_handle, _change_to_snp(line, 'BRC6')
if line.startswith("##FORMAT=<ID=CM"):
print >>out_handle, _change_to_cpg(line, 'CM')
print >>out_handle, _change_to_snp(line, 'CM')
if line.startswith("##FORMAT=<ID=CU"):
print >>out_handle, _change_to_cpg(line, 'CU')
print >>out_handle, _change_to_snp(line, 'CU')
if line.startswith("##FORMAT=<ID=CP"):
print >>out_handle, _change_to_cpg(line, 'CP')
print >>out_handle, _change_to_snp(line, 'CP')
if line.startswith("##FORMAT=<ID=DP"):
print >>out_handle, _change_to_cpg(line, 'DP')
print >>out_handle, _change_to_snp(line, 'DP')
if line.startswith("##INFO=<ID=CS"):
print >>out_handle, line.strip()
def _get_info(info, tag):
"""
get value from info vcf field
"""
return next((value.split("=")[1] for value in info.split(";") if value.startswith(tag)), None)
def _get_format(header, frmt):
"""
get format field in dict instance
"""
frmt = dict(zip(header.split(":"), frmt.split(':')))
return frmt
def _format_vcf_value(frmt1, frmt2, tag):
return {_change_to_cpg(tag, tag): frmt1[tag],
_change_to_snp(tag, tag): frmt2[tag]}
def _get_vcf_line(record):
"""
create new vcf file with CpG and SNP information
"""
frmt = {}
cs = _get_info(record[7], "CS")
ref = "%s%s" % ("C", record[13])
alt = "%s%s" % ("C", record[14])
qual = (float(record[5]) + float(record[15])) / 2
filter = "LowQual"
dp = int(_get_info(record[7], "DP")) + int(_get_info(record[17], "DP"))
info = ";".join(["DP=%s" % dp, "CS=%s" % cs])
cpg = _get_format(record[8], record[9])
snp = _get_format(record[18], record[19])
for value in ["BRC6", "CM", "CU", "CP", "DP"]:
frmt.update(_format_vcf_value(cpg, snp, value))
format = "GT:" + ":".join(frmt.keys())
sample = snp["GT"] + ":" + ":".join(frmt.values())
return record[0], record[11], ref, alt, qual, filter, info, format, sample
def _correct_vcf(vcf_file):
"""
sort by genome/position, bgzip and index
"""
vcf_sort = append_stem(vcf_file, "_sort") + ".gz"
if not file_exists(vcf_sort):
with file_transaction(vcf_sort) as tx_out:
cmd = "cat {vcf_file} |vcf-sort | bgzip > {tx_out}"
do.run(cmd.format(**locals()), "sort %s" % vcf_file)
do.run("tabix -f {0}".format(tx_out), "")
return vcf_sort
def cpg_het_pairs(cpgvcf, snpvcf, bam_file, out_file, workdir):
"""
Detect het close to hemi-met sites
"""
out_vcf = splitext_plus(out_file)[0] + ".vcf"
cpg_filter = op.join(workdir, op.basename(append_stem(cpgvcf, "_filtered")))
snp_filter = op.join(workdir, op.basename(append_stem(snpvcf, "_filtered")))
if not file_exists(cpg_filter):
with open(cpg_filter, 'w') as out_handle:
with open(cpgvcf) as in_handle:
for line in in_handle:
if line.startswith("#"):
continue
record = line.strip().split("\t")
# print record
header, frmt = record[8], record[9]
frmt = dict(zip(header.split(":"), frmt.split(':')))
if is_good_cpg(frmt, record):
print >>out_handle, line
if not file_exists(snp_filter):
with open(snp_filter, 'w') as out_handle:
with open(snpvcf) as in_handle:
for line in in_handle:
if line.startswith("#"):
continue
record = line.strip().split("\t")
header, frmt = record[8], record[9]
frmt = dict(zip(header.split(":"), frmt.split(':')))
if is_good_het(frmt, record):
print >>out_handle, line
if not file_exists(out_vcf):
res = pybedtools.BedTool(cpg_filter).window(snp_filter, w=75)
with open(out_file, 'w') as out_handle, open(out_vcf, 'w') as vcf_handle:
_create_vcf_header(cpgvcf, vcf_handle)
print >>out_handle, "chrom\tCpG_pos\tCpG_nt\tSNP_pos\tAlleles\tassociation_plus\tSNP_reads_minus"
for record in res:
if record[1] != record[11]:
# if record[1] == "19889634":
link, link_as, align = _make_linkage(bam_file, record[0], int(record[1]), int(record[11]), _get_strand(record))
res = "%s\t%s\t%s\t%s\t%s/%s\t%s\t%s" % (record[0], record[1], record[3], record[11], record[13], record[14], _format(link), _format(link_as))
chrom, pos, ref, alt, qual, filt, info, frmt, sample = _get_vcf_line(record)
# print res
if _valid_test(link, link_as):
filt = "PASS"
print >>out_handle, res
# print res
# print >>out_handle, '\n'.join(align)
vcf_res = "{chrom}\t{pos}\t.\t{ref}\t{alt}\t{qual}\t{filt}\t{info}\t{frmt}\t{sample}".format(**locals())
print >>vcf_handle, vcf_res
return _correct_vcf(out_vcf)
def _complement(nt):
if nt == 'a':
return 't'
elif nt == 't':
return 'a'
elif nt == 'c':
return 'g'
elif nt == 'g':
return 'c'
def _model(pileup, snp, cpg_st):
c_pos = v_pos = []
for read in pileup:
if len(pileup[read].keys()) == 1:
continue
info_snp = pileup[read]['snp'].split(":")
info_cpg = pileup[read]['cpg'].split(":")
if info_cpg[1] == cpg_st:
if cpg_st == "+":
c_pos.append(info_cpg[0].lower())
v_pos.append(info_snp[0].lower())
else:
c_pos.append(_complement(info_cpg[0].lower()))
v_pos.append(_complement(info_snp[0].lower()))
else:
if info_snp[1] == "+":
v_pos.append(info_snp[0].lower())
else:
v_pos.append(_complement(info_snp[0].lower()))
def _make_linkage(bam_file, chrom, cpg, snp, cpg_st):
start, end = [cpg-1, snp-1] if cpg-1 < snp-1 else [snp-1, cpg-1]
pairs = _pairs_matrix(bam_file, [chrom, start, end], cpg-1, snp-1)
link = Counter()
link_as = Counter()
align = []
for pair in pairs:
if len(pairs[pair].keys()) == 1:
continue
nts = [pairs[pair]['cpg'].split(":")[0], pairs[pair]['snp'].split(":")[0]]
align.append("-".join(nts) if cpg < snp else "-".join(nts[::-1]))
info_snp = pairs[pair]['snp'].split(":")
# if info_snp[1] == cpg_st:
# print pairs[pair]
if pairs[pair]['cpg']:
info_cpg = pairs[pair]['cpg'].split(":")
if info_cpg[1] == info_snp[1] and info_cpg[1] == cpg_st:
link["v%s/c%s:%s" % (info_snp[0], info_cpg[0], cpg_st)] += 1
# else:
# link_as["v%s:%s" % (info_snp[0], info_snp[1])] += 1
# print "LINK\n%s\n" % link
return link, link_as, align
def _pairs_matrix(bam_file, region, cpg, snp):
"""
Get reads from the cpg region and pairs
cpg nt with snp nt
"""
pileup = defaultdict(dict)
c, s, e = region
samfile = pysam.AlignmentFile(bam_file, "rb")
for pileupcolumn in samfile.pileup(c, s, e):
if pileupcolumn.pos == cpg or pileupcolumn.pos == snp:
# print ("\ncoverage at base %s = %s" % (pileupcolumn.pos, pileupcolumn.n))
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip: # query position is None if is_del or is_refskip is set.
strand = "-" if pileupread.alignment.is_reverse else "+"
tag = "cpg" if pileupcolumn.pos == cpg else "snp"
nt = pileupread.alignment.query_sequence[pileupread.query_position]
nt = nt.lower() if strand == "-" else nt
pileup[pileupread.alignment.query_name].update({tag: nt + ":%s" % strand})
return pileup
def get_het(in_vcf, region, sample, out_file):
res = pybedtools.BedTool(in_vcf).intersect(b=region, wo=True)
with file_transaction(out_file) as tx_out:
with open(tx_out, 'w') as out_handle:
# print >> out_handle, "chrom\tstart\tend\tgen\dp4\tstrand\tgene\tsample"
for record in res:
gene = record[-2]
chrom, pos, info, header, frmt = record[0], int(record[1]), record[7], record[8], record[9]
# cs = info.split(';')[0].split('=')[1]
frmt = dict(zip(header.split(":"), frmt.split(':')))
# if _genotype(frmt['GT'].split("/")) == "heteroz" and int(frmt['DP']) > 10 and int(frmt['DP4']) > 10 and record[6] == "PASS":
if is_good_het(frmt, record):
tag = "%s-%s-%s-%s" % (frmt['GT'], frmt['DP'], gene, sample)
print >> out_handle, "%s\t%s\t%s\t%s\t.\t+" % (chrom, pos, pos + 1, tag )
def post_processing(vcf_res, vcf_merged, out):
"""
merge list of vcf files and get stats
"""
if len(vcf_res) == 1:
return vcf_res
if not file_exists(vcf_merged):
cmd = "bcftools merge {0} > {1}".format(" ".join(vcf_res), vcf_merged)
do.run(cmd, "merge files")
vcf_reader = vcf.Reader(open(vcf_merged, 'r'))
samples = vcf_reader.samples
num_call = Counter()
num_call_sample = Counter()
for record in vcf_reader:
if not record.FILTER:
num_call[record.num_called] += 1
# print record.num_called
for sample in samples:
if record.genotype(sample)['GT'] != "./.":
# print record.genotype(sample)['GT']
num_call_sample[sample] += 1
with open(out + "_shared_stat.tsv", 'w') as stat_handle:
print >>stat_handle, tabulate([[k, v] for k, v in num_call.iteritems()], headers=["# samples", "# of SNPs"])
with open(out + "_stat.tsv", 'w') as stat_handle:
print >>stat_handle, tabulate([[k, v] for k, v in num_call_sample.iteritems()], headers=["samples", "# of SNPs"])
def detect_asm(data, args):
vcf_res = []
in_vcf = data['fastq']
bam_file = data['bam']
sample = splitext_plus(op.basename(in_vcf))[0].split(".raw")[0].replace(".rawcpg", "")
workdir = op.join(args.out, sample)
safe_makedir(workdir)
snp_file = in_vcf.replace("rawcpg", "rawsnp")
assert bam_file, "No bam file associated to vcf %s" % in_vcf
out_file = op.join(workdir, sample + "_pairs.tsv")
vcf_res = cpg_het_pairs(in_vcf, snp_file, bam_file, out_file, workdir)
data['asm'] = vcf_res
return data
| lpantano/ASMfinder | asm/select.py | Python | mit | 16,220 |
#import logging
#logging.basicConfig(level=logging.INFO, datefmt='%H:%M:%S',
# format='%(asctime)s %(levelname)s: %(message)s')
import unittest
import SocketServer, socket
import random, time
import threading
import cStringIO
from datetime import datetime
from shapy import register_settings
register_settings('tests.emulation.settings')
from shapy.emulation.shaper import Shaper
from tests.mixins import ShaperMixin, ServerMixin
from tests.utils import total_seconds
class TestCWCShaping(unittest.TestCase, ShaperMixin, ServerMixin):
filesize = 2**19 # 0.5MB
def setUp(self):
self.server_addr = ('127.0.0.2', 55000)
self.client_addr = ('127.0.0.3', 55001)
# shaping init
ShaperMixin.setUp(self)
ServerMixin.run_server(self)
with open('/dev/urandom', 'rb') as f:
self.randomfile = bytearray(f.read(self.filesize))
def test_transfer(self):
self.sock_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# SO_REUSEADDR: http://stackoverflow.com/questions/3229860/what-is-the-meaning-of-so-reuseaddr-setsockopt-option-linux
s = self.sock_client
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(self.client_addr)
s.connect(self.server_addr)
start = datetime.now()
# client -> server
sent = 0
while sent < self.filesize:
sent += s.send(self.randomfile[sent:sent+4096])
# We have to wait until the server finishes reading data from its socket
# and closes the connection.
rcvd = s.recv(1024)
delay = total_seconds(datetime.now() - start)
#delay = delta.seconds + delta.microseconds/float(10**6)
tt = self.estimate_transfer_time(self.filesize, self.client_addr[0],
self.server_addr[0])
self.assertAlmostEqual(delay, tt, delta=0.4)
# server -> client
start = datetime.now()
while len(rcvd) < self.filesize:
rcvd += s.recv(1024)
delay = total_seconds(datetime.now() - start)
tt = self.estimate_transfer_time(self.filesize, self.server_addr[0],
self.client_addr[0])
self.assertAlmostEqual(delay, tt, delta=0.4)
# statistics of qdiscs on IFB must correctly reflect the transmitted data
self._test_traffic()
s.close()
def _test_traffic(self):
c = self.sh.get_traffic(self.client_addr[0])
s = self.sh.get_traffic(self.server_addr[0])
# qdisc statistics reflect all traffic, including header of each layer,
# not only filesize
delta = self.filesize/100
self.assertAlmostEqual(c[0], self.filesize, delta=delta)
self.assertAlmostEqual(c[1], self.filesize, delta=delta)
self.assertAlmostEqual(s[0], self.filesize, delta=delta)
self.assertAlmostEqual(s[1], self.filesize, delta=delta)
def tearDown(self):
if hasattr(self, 'sock_client'):
self.sock_client.close()
ShaperMixin.tearDown(self)
| praus/shapy | tests/emulation/test_shaping.py | Python | mit | 3,245 |
import urllib
from urllib2 import urlopen, quote
import urllib2
import json
import time
import datetime
import csv
import unicodedata
import sys
timestring = time.time()
formatted_timestring = datetime.datetime.fromtimestamp(timestring).strftime('%Y_%m_%d')
search = sys.argv[1]
api_key = 'YOUR_API_KEY'
def query_threat_recon(indicator, api_key):
params = urllib.urlencode({'api_key': api_key, 'indicator': indicator})
f = urllib2.urlopen("https://api.threatrecon.co/api/v1/search", params)
data = json.load(f)
results = data["Results"]
#print json.dumps(data, indent=4, sort_keys=False)
return results
results = query_threat_recon(search, api_key)
csv_file_name = 'TR_search_'+formatted_timestring+'.csv'
with open(csv_file_name, 'wb') as csvfile:
indicatorwriter = csv.writer(csvfile, delimiter=',',quotechar='"', quoting=csv.QUOTE_MINIMAL)
indicatorwriter.writerow(['INDICATOR','REFERENCE','SOURCE','KILLCHAIN','FIRST_SEEN','LAST_SEEN','ATTRIBUTION','PROCESS_TYPE','RNAME', 'RDATA','ROOT_NODE','COUNTRY','TAGS','COMMENT','CONFIDENCE'])
for item in results:
indicator = search
reference = str(item["Reference"]).decode('utf-8')
source = str(item["Source"]).decode('utf-8')
killchain = str(item["KillChain"]).decode('utf-8')
first_seen = str(item["FirstSeen"]).decode('utf-8')
last_seen = str(item["LastSeen"]).decode('utf-8')
attribution = str(item["Attribution"]).decode('utf-8')
process_type = str(item["ProcessType"]).decode('utf-8')
rrname = str(item["Rrname"])
rdata = str(item["Rdata"])
rootnode = str(item["RootNode"])
country = str(item["Country"]).decode('utf-8')
tags = str(item["Tags"]).decode('utf-8')
comment = item["Comment"]
comment2 = unicodedata.normalize('NFKD', comment).encode('ascii','ignore')
confidence = str(item["Confidence"]).decode('utf-8')
indicatorwriter.writerow([indicator,reference,source,killchain,first_seen,last_seen,attribution,process_type,rrname,rdata,rootnode,country,tags,comment2,confidence])
lenresults = str(len(results))
print csv_file_name
#print lenresults +' records added to CSV'
| JburkeRSAC/PizzaCat-Web | threat_recon_to_CSV.py | Python | mit | 2,231 |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Sarielsaz Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test behavior of headers messages to announce blocks.
Setup:
- Two nodes, two p2p connections to node0. One p2p connection should only ever
receive inv's (omitted from testing description below, this is our control).
Second node is used for creating reorgs.
Part 1: No headers announcements before "sendheaders"
a. node mines a block [expect: inv]
send getdata for the block [expect: block]
b. node mines another block [expect: inv]
send getheaders and getdata [expect: headers, then block]
c. node mines another block [expect: inv]
peer mines a block, announces with header [expect: getdata]
d. node mines another block [expect: inv]
Part 2: After "sendheaders", headers announcements should generally work.
a. peer sends sendheaders [expect: no response]
peer sends getheaders with current tip [expect: no response]
b. node mines a block [expect: tip header]
c. for N in 1, ..., 10:
* for announce-type in {inv, header}
- peer mines N blocks, announces with announce-type
[ expect: getheaders/getdata or getdata, deliver block(s) ]
- node mines a block [ expect: 1 header ]
Part 3: Headers announcements stop after large reorg and resume after getheaders or inv from peer.
- For response-type in {inv, getheaders}
* node mines a 7 block reorg [ expect: headers announcement of 8 blocks ]
* node mines an 8-block reorg [ expect: inv at tip ]
* peer responds with getblocks/getdata [expect: inv, blocks ]
* node mines another block [ expect: inv at tip, peer sends getdata, expect: block ]
* node mines another block at tip [ expect: inv ]
* peer responds with getheaders with an old hashstop more than 8 blocks back [expect: headers]
* peer requests block [ expect: block ]
* node mines another block at tip [ expect: inv, peer sends getdata, expect: block ]
* peer sends response-type [expect headers if getheaders, getheaders/getdata if mining new block]
* node mines 1 block [expect: 1 header, peer responds with getdata]
Part 4: Test direct fetch behavior
a. Announce 2 old block headers.
Expect: no getdata requests.
b. Announce 3 new blocks via 1 headers message.
Expect: one getdata request for all 3 blocks.
(Send blocks.)
c. Announce 1 header that forks off the last two blocks.
Expect: no response.
d. Announce 1 more header that builds on that fork.
Expect: one getdata request for two blocks.
e. Announce 16 more headers that build on that fork.
Expect: getdata request for 14 more blocks.
f. Announce 1 more header that builds on that fork.
Expect: no response.
Part 5: Test handling of headers that don't connect.
a. Repeat 10 times:
1. Announce a header that doesn't connect.
Expect: getheaders message
2. Send headers chain.
Expect: getdata for the missing blocks, tip update.
b. Then send 9 more headers that don't connect.
Expect: getheaders message each time.
c. Announce a header that does connect.
Expect: no response.
d. Announce 49 headers that don't connect.
Expect: getheaders message each time.
e. Announce one more that doesn't connect.
Expect: disconnect.
"""
from test_framework.mininode import *
from test_framework.test_framework import SarielsazTestFramework
from test_framework.util import *
from test_framework.blocktools import create_block, create_coinbase
direct_fetch_response_time = 0.05
class TestNode(NodeConnCB):
def __init__(self):
super().__init__()
self.block_announced = False
self.last_blockhash_announced = None
def clear_last_announcement(self):
with mininode_lock:
self.block_announced = False
self.last_message.pop("inv", None)
self.last_message.pop("headers", None)
# Request data for a list of block hashes
def get_data(self, block_hashes):
msg = msg_getdata()
for x in block_hashes:
msg.inv.append(CInv(2, x))
self.connection.send_message(msg)
def get_headers(self, locator, hashstop):
msg = msg_getheaders()
msg.locator.vHave = locator
msg.hashstop = hashstop
self.connection.send_message(msg)
def send_block_inv(self, blockhash):
msg = msg_inv()
msg.inv = [CInv(2, blockhash)]
self.connection.send_message(msg)
def on_inv(self, conn, message):
self.block_announced = True
self.last_blockhash_announced = message.inv[-1].hash
def on_headers(self, conn, message):
if len(message.headers):
self.block_announced = True
message.headers[-1].calc_sha256()
self.last_blockhash_announced = message.headers[-1].sha256
# Test whether the last announcement we received had the
# right header or the right inv
# inv and headers should be lists of block hashes
def check_last_announcement(self, headers=None, inv=None):
expect_headers = headers if headers != None else []
expect_inv = inv if inv != None else []
test_function = lambda: self.block_announced
wait_until(test_function, timeout=60, lock=mininode_lock)
with mininode_lock:
self.block_announced = False
success = True
compare_inv = []
if "inv" in self.last_message:
compare_inv = [x.hash for x in self.last_message["inv"].inv]
if compare_inv != expect_inv:
success = False
hash_headers = []
if "headers" in self.last_message:
# treat headers as a list of block hashes
hash_headers = [ x.sha256 for x in self.last_message["headers"].headers ]
if hash_headers != expect_headers:
success = False
self.last_message.pop("inv", None)
self.last_message.pop("headers", None)
return success
def wait_for_getdata(self, hash_list, timeout=60):
if hash_list == []:
return
test_function = lambda: "getdata" in self.last_message and [x.hash for x in self.last_message["getdata"].inv] == hash_list
wait_until(test_function, timeout=timeout, lock=mininode_lock)
return
def wait_for_block_announcement(self, block_hash, timeout=60):
test_function = lambda: self.last_blockhash_announced == block_hash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
return
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [ CBlockHeader(b) for b in new_blocks ]
self.send_message(headers_message)
def send_getblocks(self, locator):
getblocks_message = msg_getblocks()
getblocks_message.locator.vHave = locator
self.send_message(getblocks_message)
class SendHeadersTest(SarielsazTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
# mine count blocks and return the new tip
def mine_blocks(self, count):
# Clear out last block announcement from each p2p listener
[ x.clear_last_announcement() for x in self.p2p_connections ]
self.nodes[0].generate(count)
return int(self.nodes[0].getbestblockhash(), 16)
# mine a reorg that invalidates length blocks (replacing them with
# length+1 blocks).
# Note: we clear the state of our p2p connections after the
# to-be-reorged-out blocks are mined, so that we don't break later tests.
# return the list of block hashes newly mined
def mine_reorg(self, length):
self.nodes[0].generate(length) # make sure all invalidated blocks are node0's
sync_blocks(self.nodes, wait=0.1)
for x in self.p2p_connections:
x.wait_for_block_announcement(int(self.nodes[0].getbestblockhash(), 16))
x.clear_last_announcement()
tip_height = self.nodes[1].getblockcount()
hash_to_invalidate = self.nodes[1].getblockhash(tip_height-(length-1))
self.nodes[1].invalidateblock(hash_to_invalidate)
all_hashes = self.nodes[1].generate(length+1) # Must be longer than the orig chain
sync_blocks(self.nodes, wait=0.1)
return [int(x, 16) for x in all_hashes]
def run_test(self):
# Setup the p2p connections and start up the network thread.
inv_node = TestNode()
test_node = TestNode()
self.p2p_connections = [inv_node, test_node]
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], inv_node))
# Set nServices to 0 for test_node, so no block download will occur outside of
# direct fetching
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node, services=0))
inv_node.add_connection(connections[0])
test_node.add_connection(connections[1])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
inv_node.wait_for_verack()
test_node.wait_for_verack()
tip = int(self.nodes[0].getbestblockhash(), 16)
# PART 1
# 1. Mine a block; expect inv announcements each time
self.log.info("Part 1: headers don't start before sendheaders message...")
for i in range(4):
old_tip = tip
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(inv=[tip]), True)
# Try a few different responses; none should affect next announcement
if i == 0:
# first request the block
test_node.get_data([tip])
test_node.wait_for_block(tip)
elif i == 1:
# next try requesting header and block
test_node.get_headers(locator=[old_tip], hashstop=tip)
test_node.get_data([tip])
test_node.wait_for_block(tip)
test_node.clear_last_announcement() # since we requested headers...
elif i == 2:
# this time announce own block via headers
height = self.nodes[0].getblockcount()
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
block_time = last_time + 1
new_block = create_block(tip, create_coinbase(height+1), block_time)
new_block.solve()
test_node.send_header_for_blocks([new_block])
test_node.wait_for_getdata([new_block.sha256])
test_node.send_message(msg_block(new_block))
test_node.sync_with_ping() # make sure this block is processed
inv_node.clear_last_announcement()
test_node.clear_last_announcement()
self.log.info("Part 1: success!")
self.log.info("Part 2: announce blocks with headers after sendheaders message...")
# PART 2
# 2. Send a sendheaders message and test that headers announcements
# commence and keep working.
test_node.send_message(msg_sendheaders())
prev_tip = int(self.nodes[0].getbestblockhash(), 16)
test_node.get_headers(locator=[prev_tip], hashstop=0)
test_node.sync_with_ping()
# Now that we've synced headers, headers announcements should work
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=[tip]), True)
height = self.nodes[0].getblockcount()+1
block_time += 10 # Advance far enough ahead
for i in range(10):
# Mine i blocks, and alternate announcing either via
# inv (of tip) or via headers. After each, new blocks
# mined by the node should successfully be announced
# with block header, even though the blocks are never requested
for j in range(2):
blocks = []
for b in range(i+1):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
if j == 0:
# Announce via inv
test_node.send_block_inv(tip)
test_node.wait_for_getheaders()
# Should have received a getheaders now
test_node.send_header_for_blocks(blocks)
# Test that duplicate inv's won't result in duplicate
# getdata requests, or duplicate headers announcements
[ inv_node.send_block_inv(x.sha256) for x in blocks ]
test_node.wait_for_getdata([x.sha256 for x in blocks])
inv_node.sync_with_ping()
else:
# Announce via headers
test_node.send_header_for_blocks(blocks)
test_node.wait_for_getdata([x.sha256 for x in blocks])
# Test that duplicate headers won't result in duplicate
# getdata requests (the check is further down)
inv_node.send_header_for_blocks(blocks)
inv_node.sync_with_ping()
[ test_node.send_message(msg_block(x)) for x in blocks ]
test_node.sync_with_ping()
inv_node.sync_with_ping()
# This block should not be announced to the inv node (since it also
# broadcast it)
assert "inv" not in inv_node.last_message
assert "headers" not in inv_node.last_message
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=[tip]), True)
height += 1
block_time += 1
self.log.info("Part 2: success!")
self.log.info("Part 3: headers announcements can stop after large reorg, and resume after headers/inv from peer...")
# PART 3. Headers announcements can stop after large reorg, and resume after
# getheaders or inv from peer.
for j in range(2):
# First try mining a reorg that can propagate with header announcement
new_block_hashes = self.mine_reorg(length=7)
tip = new_block_hashes[-1]
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=new_block_hashes), True)
block_time += 8
# Mine a too-large reorg, which should be announced with a single inv
new_block_hashes = self.mine_reorg(length=8)
tip = new_block_hashes[-1]
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(inv=[tip]), True)
block_time += 9
fork_point = self.nodes[0].getblock("%02x" % new_block_hashes[0])["previousblockhash"]
fork_point = int(fork_point, 16)
# Use getblocks/getdata
test_node.send_getblocks(locator = [fork_point])
assert_equal(test_node.check_last_announcement(inv=new_block_hashes), True)
test_node.get_data(new_block_hashes)
test_node.wait_for_block(new_block_hashes[-1])
for i in range(3):
# Mine another block, still should get only an inv
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(inv=[tip]), True)
if i == 0:
# Just get the data -- shouldn't cause headers announcements to resume
test_node.get_data([tip])
test_node.wait_for_block(tip)
elif i == 1:
# Send a getheaders message that shouldn't trigger headers announcements
# to resume (best header sent will be too old)
test_node.get_headers(locator=[fork_point], hashstop=new_block_hashes[1])
test_node.get_data([tip])
test_node.wait_for_block(tip)
elif i == 2:
test_node.get_data([tip])
test_node.wait_for_block(tip)
# This time, try sending either a getheaders to trigger resumption
# of headers announcements, or mine a new block and inv it, also
# triggering resumption of headers announcements.
if j == 0:
test_node.get_headers(locator=[tip], hashstop=0)
test_node.sync_with_ping()
else:
test_node.send_block_inv(tip)
test_node.sync_with_ping()
# New blocks should now be announced with header
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=[tip]), True)
self.log.info("Part 3: success!")
self.log.info("Part 4: Testing direct fetch behavior...")
tip = self.mine_blocks(1)
height = self.nodes[0].getblockcount() + 1
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
block_time = last_time + 1
# Create 2 blocks. Send the blocks, then send the headers.
blocks = []
for b in range(2):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
inv_node.send_message(msg_block(blocks[-1]))
inv_node.sync_with_ping() # Make sure blocks are processed
test_node.last_message.pop("getdata", None)
test_node.send_header_for_blocks(blocks)
test_node.sync_with_ping()
# should not have received any getdata messages
with mininode_lock:
assert "getdata" not in test_node.last_message
# This time, direct fetch should work
blocks = []
for b in range(3):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
test_node.send_header_for_blocks(blocks)
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=direct_fetch_response_time)
[ test_node.send_message(msg_block(x)) for x in blocks ]
test_node.sync_with_ping()
# Now announce a header that forks the last two blocks
tip = blocks[0].sha256
height -= 1
blocks = []
# Create extra blocks for later
for b in range(20):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
# Announcing one block on fork should not trigger direct fetch
# (less work than tip)
test_node.last_message.pop("getdata", None)
test_node.send_header_for_blocks(blocks[0:1])
test_node.sync_with_ping()
with mininode_lock:
assert "getdata" not in test_node.last_message
# Announcing one more block on fork should trigger direct fetch for
# both blocks (same work as tip)
test_node.send_header_for_blocks(blocks[1:2])
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]], timeout=direct_fetch_response_time)
# Announcing 16 more headers should trigger direct fetch for 14 more
# blocks
test_node.send_header_for_blocks(blocks[2:18])
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]], timeout=direct_fetch_response_time)
# Announcing 1 more header should not trigger any response
test_node.last_message.pop("getdata", None)
test_node.send_header_for_blocks(blocks[18:19])
test_node.sync_with_ping()
with mininode_lock:
assert "getdata" not in test_node.last_message
self.log.info("Part 4: success!")
# Now deliver all those blocks we announced.
[ test_node.send_message(msg_block(x)) for x in blocks ]
self.log.info("Part 5: Testing handling of unconnecting headers")
# First we test that receipt of an unconnecting header doesn't prevent
# chain sync.
for i in range(10):
test_node.last_message.pop("getdata", None)
blocks = []
# Create two more blocks.
for j in range(2):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
# Send the header of the second block -> this won't connect.
with mininode_lock:
test_node.last_message.pop("getheaders", None)
test_node.send_header_for_blocks([blocks[1]])
test_node.wait_for_getheaders()
test_node.send_header_for_blocks(blocks)
test_node.wait_for_getdata([x.sha256 for x in blocks])
[ test_node.send_message(msg_block(x)) for x in blocks ]
test_node.sync_with_ping()
assert_equal(int(self.nodes[0].getbestblockhash(), 16), blocks[1].sha256)
blocks = []
# Now we test that if we repeatedly don't send connecting headers, we
# don't go into an infinite loop trying to get them to connect.
MAX_UNCONNECTING_HEADERS = 10
for j in range(MAX_UNCONNECTING_HEADERS+1):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
for i in range(1, MAX_UNCONNECTING_HEADERS):
# Send a header that doesn't connect, check that we get a getheaders.
with mininode_lock:
test_node.last_message.pop("getheaders", None)
test_node.send_header_for_blocks([blocks[i]])
test_node.wait_for_getheaders()
# Next header will connect, should re-set our count:
test_node.send_header_for_blocks([blocks[0]])
# Remove the first two entries (blocks[1] would connect):
blocks = blocks[2:]
# Now try to see how many unconnecting headers we can send
# before we get disconnected. Should be 5*MAX_UNCONNECTING_HEADERS
for i in range(5*MAX_UNCONNECTING_HEADERS - 1):
# Send a header that doesn't connect, check that we get a getheaders.
with mininode_lock:
test_node.last_message.pop("getheaders", None)
test_node.send_header_for_blocks([blocks[i%len(blocks)]])
test_node.wait_for_getheaders()
# Eventually this stops working.
test_node.send_header_for_blocks([blocks[-1]])
# Should get disconnected
test_node.wait_for_disconnect()
self.log.info("Part 5: success!")
# Finally, check that the inv node never received a getdata request,
# throughout the test
assert "getdata" not in inv_node.last_message
if __name__ == '__main__':
SendHeadersTest().main()
| sarielsaz/sarielsaz | test/functional/sendheaders.py | Python | mit | 24,054 |
""" Git Parent model """
from django.db import models
class GitParentEntry(models.Model):
""" Git Parent """
project = models.ForeignKey('gitrepo.GitProjectEntry', related_name='git_parent_project')
parent = models.ForeignKey('gitrepo.GitCommitEntry', related_name='git_parent_commit')
son = models.ForeignKey('gitrepo.GitCommitEntry', related_name='git_son_commit')
order = models.IntegerField(default=0)
created_at = models.DateTimeField(auto_now_add=True, editable=False)
updated_at = models.DateTimeField(auto_now=True, editable=False)
def __unicode__(self):
return u'Parent:{0}, Son:{1}, order:{2}'.format(self.parent.commit_hash, self.son.commit_hash, self.order)
| imvu/bluesteel | app/logic/gitrepo/models/GitParentModel.py | Python | mit | 713 |
from character import Trait, Perk
__author__ = "Johannes Hackbarth"
ALL_RACES = {"Deathclaw", "Dog", "Ghoul", "Half Mutant", "Human", "Robot", "Super Mutant"}
ANIMALS = {"Deathclaw", "Dog"}
ROBOTS = {"Robot"}
# TODO: Add all effects to traits
TRAIT_LIST = [
Trait("Fast Metabolism",
"Your metabolic rate is twice normal. This means that you are much less resistant"
" to radiation and poison, but your body heals faster.You get a 2 point bonus to "
"Healing Rate, but your Radiation and Poison Resistance start at 0% (racial "
"modifiers are added later). Robots cannot choose this trait.",
ALL_RACES - ROBOTS),
Trait("Bruiser",
"A little slower, but a little bigger. You may not hit as often, but they will feel it when you do! "
"Your total action points are lowered, but your Strength is increased. You get a 2 point bonus to Strength,"
" but loose 2 Action Points.",
ALL_RACES,
attr_mod=2, attr_name="strength"),
Trait("Small Frame",
"You are not quite as big as everyone else, but that never slowed you down. You can't carry as much, but you"
" are more agile. You get a 1 point bonus to Agility, but your Carry Weight is only 15 lbs Y Strength.",
ALL_RACES,
attr_mod=1, attr_name="Agility"),
Trait("One Handed",
"One of your hands is very dominant. You excel with single-handed weapons, but two-handed weapons cause a"
" problem. You have a 40% penalty to hit with two-handed weapons, but get a 20% bonus to hit with weapons "
"that only require one hand. Animals cannot choose this trait.",
ALL_RACES - ANIMALS,
),
Trait("Finesse",
"Your attacks show a lot of finesse. You don't do as much damage, but you cause more critical hits. All of "
"your attacks lose 30% of their damage (after reductions are made for Damage Resistance, etc.) but you gain "
"a 10%bonus to Critical Chance.",
ALL_RACES),
Trait("Kamikaze",
"By not paying attention to any threats, you can act a lot faster in a turn. This lowers your Armor Class "
"to just what you are wearing, but you sequence much faster in a combat turn. You have no natural Armor "
"Class (Armor Class is therefore 0 regardless of Agility). You must wear armor to get an Armor Class.Your "
"sequence gets a 5 point bonus.",
ALL_RACES),
Trait("Heavy Handed",
"You swing harder, not better. Your attacks are very brutal, but lack finesse. You rarely cause a good "
"critical hit, but you always do more melee damage. You get a 4 point bonus to Melee Damage, but your "
"critical hits do 30% less damage, and are 30% less likely to cripple a limb or cause unconsciousness.",
ALL_RACES),
Trait("Fast Shot",
"You don't have time for a targeted attack, because you attack faster than normal people. It costs you one "
"less action point to use a weapon. You cannot perform targeted shots, but all weapons take one less action "
"point to use. Note that the Fast Shot trait has no effect on HtH or Melee attacks. Animals cannot choose "
"this trait.",
ALL_RACES - ANIMALS),
Trait("Bloody Mess",
"By some strange twist of fate, people around you die violently. You always see the worst way a person can "
"die. This does not mean you kill them any faster or slower, but when they do die, it will be dramatic. "
"Just how dramatic is up to the Gamemaster.",
ALL_RACES),
Trait("Jinxed",
"The good thing is that everyone around you has more critical failures in combat. The bad thing is: so do "
"you! If you, a member of your party, or a non-player character have a failure in combat, there is a "
"greater likelihood the failure will be upgraded (downgraded?) to a critical failure. Critical failures are "
"bad: weapons explode, you may hit the wrong target, you could lose part of your turn, or any number of bad "
"things. Failures are 50% more likely to become critical failures around the character or anyone else in "
"combat.",
ALL_RACES),
Trait("Good Natured",
"You studied less-combative skills as you were growing up. Your combat skills start at a lower level, but "
"First Aid, Doctor, Speech, and Barter are substantially improved. Those skills get a 20% bonus. You get a "
"10% penalty to starting combat skills (Small Guns, Big Guns, Energy Weapons, Unarmed, and Melee Weapons). "
"This is a one-time bonus. Animals and robots cannot choose this trait.",
ALL_RACES - ANIMALS - ROBOTS),
Trait("Chem Reliant",
"You are more easily addicted to chems. Your chance to be addicted is twice normal, but you recover in half "
"the time from their ill effects. Robots cannot choose this trait.",
ALL_RACES - ROBOTS),
Trait("Chem Resistant",
"Chems only effect you half as long as normal, but your chance to be addicted is only 50% the normal amount. "
"Robots cannot choose this trait.",
ALL_RACES - ROBOTS),
Trait("Night Person",
"As a night-time person, you are more awake when the sun goes down. Your Intelligence and Perception are "
"improved at night but are dulled during the day. You get a 1 point penalty to these Statistics from 0601 "
"to 1800, and a 1 point bonus to these Stats from 1801 to 0600. Robots cannot choose this trait. Note that "
"the bonus cannot take IN and PE above the character’s racial maximum or below the character’s racial "
"minimum.",
ALL_RACES),
Trait("Skilled",
"Since you spend more time improving your skills than a normal person, you gain more skill points. The "
"tradeoff is that you do not gain as many extra abilities. You will gain a perk at one level higher than "
"normal. For example, if you normally gained a perk every 4 levels, you would now gain a perk every 5 "
"levels. You will get an additional 5 skill points per new experience level, and a one-time bonus of +10% "
"to your skills when you begin the game. Animals and robots cannot choose this trait.",
ALL_RACES - ANIMALS - ROBOTS),
Trait("Gifted",
"You have more innate abilities than most, so you have not spent as much time honing your skills. Your "
"statistics are better than the average person, but your skills are lacking. All Stats get a 1- point "
"bonus, but all skills get a 10% penalty and you receive 5 less Skill Points per level. Robots cannot "
"choose this trait.",
ALL_RACES - ROBOTS),
Trait("Sex Appeal",
"This trait increases your chances of having a good reaction with members of the opposite sex. "
"Unfortunately, this trait tends to annoy members of your sex. Jealous twits. When interacting with members "
"of the opposite sex, you gain a 1 point bonus to Charisma for reactions only. When making Speech and Barter "
"rolls, you gain a 40% bonus for each. When interacting with members of the same sex, you have a 1 point "
"penalty to Charisma for reactions only and have a 40% penalty to both Speech and Barter rolls. Only humans "
"can choose this trait.",
["Human"]),
Trait("Glowing One",
"Extreme radiation exposure has left you glowing in the dark. Your glow eliminates modifiers from light in "
"combat for both you and your enemies. In addition, you gain a +50% bonus to Radiation Resistance, but "
"everyone around you takes 10 rads per hour (see Radiation under Damage and Death, below). Only Ghouls "
"can choose this trait.",
["Ghoul"]),
Trait("Tech Wizard",
"You spent your formative years hunched over a bench learning all about the way things work. The trouble "
"is that you’ve ruined your eyes! You get a +15% bonus to Science, Repair, and Lockpick skills, but you "
"lose 1 Perception. Deathclaws and Dogs cannot choose this trait.",
ALL_RACES - ANIMALS,
attr_mod=-1, attr_name="Perception"),
Trait("Fear the Reaper",
"You have cheated death! You gain perks as if you were a human, but you are now on death’s short list. "
"This means that once a month, you must roll against Luck or else drop dead. Only Ghouls can choose this "
"trait.",
["Ghoul"]),
Trait("Vat Skin",
"Other people find you hideous to behold and disgusting to smell after your “dip” in the FEV vats. "
"The good news is that you gain a +10 bonus to your Armor Class thanks to your extra-tough skin. The bad "
"news is that everyone within ten hexes of your location, friend and foe, suffers a 1-point penalty to "
"Perception (you are unaffected). Only Mutants can choose this trait.",
["Half Mutant", "Super Mutant"],
attr_mod=-1, attr_name="Perception"),
Trait("Ham Fisted",
"Genetic engineering – or dumb luck – has endowed you with huge hands. You get a “free” tag skill in "
"Unarmed, but you suffer a -20% penalty to Small Guns, First Aid, Doctor, Repair, Science, and Lockpick "
"Skills (these numbers cannot go below 0%). Only Mutants can choose this trait.",
["Half Mutant", "Super Mutant"]),
Trait("Domesticated",
"You have undergone extensive house training and have developed an above average Intelligence. Your IN is "
"raised by 1, and can even go above your racial maximum, but you get a –2 penalty to Melee Damage. Only "
"Deathclaws and Dogs can choose this trait.",
ANIMALS,
attr_mod=1, attr_name="Intelligence"),
Trait("Rabid",
"You are a half-crazed, feral killing machine. You are not affected by crippled limbs (blindness still "
"affects you normally), and every time you kill an opponent in combat, you get 5 more APs that round. "
"Chems, including stimpaks, have no effect on you. Only Deathclaws and Dogs can choose this trait.",
ANIMALS),
Trait("Tight Nuts",
"This robot was built to take the knocks. You get double the base Damage Resistance to any attack, but "
"you gain only half the Hit Points back from repairs. Only Robots can choose this trait.",
ROBOTS),
Trait("Targeting Computer",
"You have been programmed with an onboard targeting computer. All attacks cost 1 extra AP to perform, "
"but you can always add +15% to your chance to-hit. Only Robots can choose this trait.",
ROBOTS),
Trait("EMP Shielding",
"You have a dedicated EMP shielding system. It takes you 2 AP to move 1 hex because of your heavy "
"equipment, but you have a 30% Resistance to all forms of EMP attack. Only Robots can choose this trait.",
ROBOTS),
Trait("Beta Software",
"You have been equipped with an experimental operating system and peripherals. You get 1 extra Tag Skill, "
"but whenever using ANY tag skill, you must roll against Luck or suffer an automatic failure and, in "
"combat, a loss of all APs for that round. Only Robots can choose this trait.",
ROBOTS)
]
PERK_LIST = [
# Perk("Foo", "Bar", "42")
]
| DarthBubi/fallout-pnp-character-creator | character-creator/config.py | Python | mit | 11,633 |
from gdsfactory.port import csv2port
def test_csv2port(data_regression):
import gdsfactory as gf
name = "straight"
csvpath = gf.CONFIG["gdsdir"] / f"{name}.ports"
ports = csv2port(csvpath)
data_regression.check(ports)
| gdsfactory/gdsfactory | gdsfactory/tests/test_port_from_csv.py | Python | mit | 241 |
import logging
import struct
import threading
import serial
import serial.tools.list_ports
COMMAND_TO_CHANNEL = {
'F': 0x00,
'W': 0x01,
'H': 0x02,
'C': 0x03
}
class SerialPort(object):
def __init__(self, port_name):
self.port_name = port_name
self.serial_port = None
self.serial_lock = None
self.log_thread = None
self.serial_lock = threading.Lock()
self.initSerialPort()
def initSerialPort(self):
port_device = self.get_serial_port_device()
logging.info("Initializing port %s", port_device)
try:
# Init Serial port
self.serial_port = serial.Serial(port_device, timeout=1, baudrate=115200)
self.serial_port.flushInput()
self.serial_port.flushOutput()
except (OSError, serial.serialutil.SerialException) as error:
logging.error("Cannot initialize. Reason: %s", error)
from vjdummyserial import VjDummySerial
self.serial_port = VjDummySerial(port_device)
logging.error("Running on dummy serial")
logging.debug("Serial: %s", self.serial_port)
def _send_serial_command(self, command, value):
if command not in COMMAND_TO_CHANNEL:
logging.error("Unknown command: %s", command)
return
message = self.int2bin(0xF6) + self.int2bin(0x6F) + self.int2bin(0x04) + self.int2bin(COMMAND_TO_CHANNEL[command]) + self.int2bin(value)
if self.serial_port:
try:
self.serial_lock.acquire(True)
ret = self.serial_port.write(message)
logging.debug("Sent %s Bytes, being", ret)
for x in message:
logging.debug("%s", self.bin2int(x))
finally:
self.serial_lock.release()
else:
logging.error("Not sending %s, %s - no serial port?", command, value)
def send_serial_command(self, command, value):
if not self.serial_port:
self.initSerialPort()
if self.serial_port:
try:
self._send_serial_command(command, value)
except IOError:
self.initSerialPort()
self._send_serial_command(command, value)
def get_serial_port_device(self):
ports = serial.tools.list_ports.grep(self.port_name)
try:
return next(ports).device
except StopIteration:
return None
@staticmethod
def int2bin(value):
return struct.pack('!B', value)
@staticmethod
def bin2int(value):
if isinstance(value, int):
return value
return struct.unpack('!B', value)[0]
def close(self):
# Close serial port
logging.info("Close serial port")
if self.serial_port is not None and self.serial_port.isOpen():
self.serial_port.close()
self.serial_port = None
| j-be/vj-control-server | vj_serial.py | Python | mit | 2,458 |
import json
from jaspyx.visitor import BaseVisitor
class Types(BaseVisitor):
def visit_Num(self, node):
self.output(json.dumps(node.n))
def visit_Str(self, node):
self.output(json.dumps(node.s))
def visit_List(self, node):
self.group(node.elts, prefix='[', infix=', ', suffix=']')
visit_Tuple = visit_List
| iksteen/jaspyx | jaspyx/visitor/types.py | Python | mit | 351 |
import sys
import gc
import pygame
from pygame.locals import *
from input import *
import snd
TICKS_PER_SECOND = 25
GAMETICKS = 1000 / TICKS_PER_SECOND
def set_game_speed( slowdown ):
global TICKS_PER_SECOND
global GAMETICKS
TICKS_PER_SECOND = int( 25 * slowdown )
GAMETICKS = 1000 / TICKS_PER_SECOND
class Game:
def __init__( self, name, configuration ):
self.config = configuration
self.name = name
def init_pygame( self ):
snd.pre_init()
# Init the display
pygame.init()
self.userinput = UserInput()
if not self.config.is_fullscreen:
pygame.display.set_mode( self.config.resolution )
else:
pygame.display.set_mode( self.config.resolution, pygame.FULLSCREEN )
pygame.display.set_caption( self.name )
# Init the input
pygame.mouse.set_visible( False )
pygame.event.set_grab( False )
snd.init()
def deinit_pygame( self ):
snd.deinit()
pygame.quit()
def before_gameloop( self ):
pass
def after_gameloop( self ):
pass
def run( self ):
try:
self.init_pygame()
self.before_gameloop()
self.fps = 0
frame_count = 0
next_game_tick = pygame.time.get_ticks()
next_half_second = pygame.time.get_ticks()
# main loop
self.game_is_done = False
while not self.game_is_done:
# events
self.handle_events()
# game tick
loop_count = 0
while pygame.time.get_ticks() > next_game_tick and loop_count < 4:
x, y = pygame.mouse.get_pos()
self.userinput.mouse.feed_pos( Vec2D(x, y) )
self.do_tick( self.userinput )
self.userinput.update()
next_game_tick += GAMETICKS
loop_count += 1
## gc.collect()
if loop_count >= 4: # don't overdo the ticks
next_game_tick = pygame.time.get_ticks()
# render
time_sec = pygame.time.get_ticks() * 0.001
interpol = 1 - ((next_game_tick - pygame.time.get_ticks()) / float(GAMETICKS))
self.render(pygame.display.get_surface(), interpol, time_sec )
pygame.display.flip()
frame_count += 1
if pygame.time.get_ticks() > next_half_second:
self.fps = 2 * frame_count
frame_count = 0
next_half_second += 500
self.after_gameloop()
self.deinit_pygame()
except:
self.deinit_pygame()
print "Unexpected error:", sys.exc_info()[0]
raise
def handle_events( self ):
for event in pygame.event.get():
if event.type == QUIT:
self.game_is_done = True
elif event.type == KEYDOWN:
self.userinput.key.feed_down( event.key )
self.userinput.key.feed_char( event.unicode )
elif event.type == KEYUP:
self.userinput.key.feed_up( event.key )
elif event.type == MOUSEBUTTONDOWN:
self.userinput.mouse.feed_down( event.button )
self.state.mouse_down( event.button )
elif event.type == MOUSEBUTTONUP:
self.userinput.mouse.feed_up( event.button )
elif event.type == JOYBUTTONDOWN:
self.userinput.joys[event.joy].feed_down( event.button )
elif event.type == JOYBUTTONUP:
self.userinput.joys[event.joy].feed_up( event.button )
def draw_fps( self, surface ):
font = pygame.font.Font( None, 20 )
render_text = font.render( str(self.fps), 0, (255,255,255) )
surface.blit( render_text, (10,10) )
| koonsolo/MysticMine | monorail/koon/app.py | Python | mit | 4,111 |
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Wallet classes:
# - Imported_Wallet: imported address, no keystore
# - Standard_Wallet: one keystore, P2PKH
# - Multisig_Wallet: several keystores, P2SH
import os
import threading
import random
import time
import json
import copy
import errno
import traceback
from functools import partial
from collections import defaultdict
from numbers import Number
from decimal import Decimal
import sys
from .i18n import _
from .util import (NotEnoughFunds, PrintError, UserCancelled, profiler,
format_satoshis, NoDynamicFeeEstimates)
from .bitcoin import *
from .version import *
from .keystore import load_keystore, Hardware_KeyStore
from .storage import multisig_type, STO_EV_PLAINTEXT, STO_EV_USER_PW, STO_EV_XPUB_PW
from . import transaction
from .transaction import Transaction
from .plugins import run_hook
from . import bitcoin
from . import coinchooser
from .synchronizer import Synchronizer
from .verifier import SPV
from . import paymentrequest
from .paymentrequest import PR_PAID, PR_UNPAID, PR_UNKNOWN, PR_EXPIRED
from .paymentrequest import InvoiceStore
from .contacts import Contacts
TX_STATUS = [
_('Replaceable'),
_('Unconfirmed parent'),
_('Unconfirmed'),
_('Not Verified'),
_('Local only'),
]
TX_HEIGHT_LOCAL = -2
TX_HEIGHT_UNCONF_PARENT = -1
TX_HEIGHT_UNCONFIRMED = 0
def relayfee(network):
from .simple_config import FEERATE_DEFAULT_RELAY
MAX_RELAY_FEE = 50000
f = network.relay_fee if network and network.relay_fee else FEERATE_DEFAULT_RELAY
return min(f, MAX_RELAY_FEE)
def dust_threshold(network):
# Change <= dust threshold is added to the tx fee
return 182 * 3 * relayfee(network) / 1000
def append_utxos_to_inputs(inputs, network, pubkey, txin_type, imax):
if txin_type != 'p2pk':
address = bitcoin.pubkey_to_address(txin_type, pubkey)
sh = bitcoin.address_to_scripthash(address)
else:
script = bitcoin.public_key_to_p2pk_script(pubkey)
sh = bitcoin.script_to_scripthash(script)
address = '(pubkey)'
u = network.synchronous_get(('blockchain.scripthash.listunspent', [sh]))
for item in u:
if len(inputs) >= imax:
break
item['address'] = address
item['type'] = txin_type
item['prevout_hash'] = item['tx_hash']
item['prevout_n'] = item['tx_pos']
item['pubkeys'] = [pubkey]
item['x_pubkeys'] = [pubkey]
item['signatures'] = [None]
item['num_sig'] = 1
inputs.append(item)
def sweep_preparations(privkeys, network, imax=100):
def find_utxos_for_privkey(txin_type, privkey, compressed):
pubkey = bitcoin.public_key_from_private_key(privkey, compressed)
append_utxos_to_inputs(inputs, network, pubkey, txin_type, imax)
keypairs[pubkey] = privkey, compressed
inputs = []
keypairs = {}
for sec in privkeys:
txin_type, privkey, compressed = bitcoin.deserialize_privkey(sec)
find_utxos_for_privkey(txin_type, privkey, compressed)
# do other lookups to increase support coverage
if is_minikey(sec):
# minikeys don't have a compressed byte
# we lookup both compressed and uncompressed pubkeys
find_utxos_for_privkey(txin_type, privkey, not compressed)
elif txin_type == 'p2pkh':
# WIF serialization does not distinguish p2pkh and p2pk
# we also search for pay-to-pubkey outputs
find_utxos_for_privkey('p2pk', privkey, compressed)
if not inputs:
raise BaseException(_('No inputs found. (Note that inputs need to be confirmed)'))
return inputs, keypairs
def sweep(privkeys, network, config, recipient, fee=None, imax=100):
inputs, keypairs = sweep_preparations(privkeys, network, imax)
total = sum(i.get('value') for i in inputs)
if fee is None:
outputs = [(TYPE_ADDRESS, recipient, total)]
tx = Transaction.from_io(inputs, outputs)
fee = config.estimate_fee(tx.estimated_size())
if total - fee < 0:
raise BaseException(_('Not enough funds on address.') + '\nTotal: %d satoshis\nFee: %d'%(total, fee))
if total - fee < dust_threshold(network):
raise BaseException(_('Not enough funds on address.') + '\nTotal: %d satoshis\nFee: %d\nDust Threshold: %d'%(total, fee, dust_threshold(network)))
outputs = [(TYPE_ADDRESS, recipient, total - fee)]
locktime = network.get_local_height()
tx = Transaction.from_io(inputs, outputs, locktime=locktime)
tx.BIP_LI01_sort()
tx.set_rbf(True)
tx.sign(keypairs)
return tx
class AddTransactionException(Exception):
pass
class UnrelatedTransactionException(AddTransactionException):
def __str__(self):
return _("Transaction is unrelated to this wallet.")
class NotIsMineTransactionException(AddTransactionException):
def __str__(self):
return _("Only transactions with inputs owned by the wallet can be added.")
class Abstract_Wallet(PrintError):
"""
Wallet classes are created to handle various address generation methods.
Completion states (watching-only, single account, no seed, etc) are handled inside classes.
"""
max_change_outputs = 3
def __init__(self, storage):
self.electrum_version = ELECTRUM_VERSION
self.storage = storage
self.network = None
# verifier (SPV) and synchronizer are started in start_threads
self.synchronizer = None
self.verifier = None
self.gap_limit_for_change = 6 # constant
# saved fields
self.use_change = storage.get('use_change', True)
self.multiple_change = storage.get('multiple_change', False)
self.labels = storage.get('labels', {})
self.frozen_addresses = set(storage.get('frozen_addresses',[]))
self.history = storage.get('addr_history',{}) # address -> list(txid, height)
self.fiat_value = storage.get('fiat_value', {})
self.load_keystore()
self.load_addresses()
self.load_transactions()
self.build_spent_outpoints()
# load requests
self.receive_requests = self.storage.get('payment_requests', {})
# Transactions pending verification. A map from tx hash to transaction
# height. Access is not contended so no lock is needed.
self.unverified_tx = defaultdict(int)
# Verified transactions. Each value is a (height, timestamp, block_pos) tuple. Access with self.lock.
self.verified_tx = storage.get('verified_tx3', {})
# there is a difference between wallet.up_to_date and interface.is_up_to_date()
# interface.is_up_to_date() returns true when all requests have been answered and processed
# wallet.up_to_date is true when the wallet is synchronized (stronger requirement)
self.up_to_date = False
# locks: if you need to take multiple ones, acquire them in the order they are defined here!
self.lock = threading.RLock()
self.transaction_lock = threading.RLock()
self.check_history()
# save wallet type the first time
if self.storage.get('wallet_type') is None:
self.storage.put('wallet_type', self.wallet_type)
# invoices and contacts
self.invoices = InvoiceStore(self.storage)
self.contacts = Contacts(self.storage)
def diagnostic_name(self):
return self.basename()
def __str__(self):
return self.basename()
def get_master_public_key(self):
return None
@profiler
def load_transactions(self):
self.txi = self.storage.get('txi', {})
self.txo = self.storage.get('txo', {})
self.tx_fees = self.storage.get('tx_fees', {})
self.pruned_txo = self.storage.get('pruned_txo', {})
tx_list = self.storage.get('transactions', {})
self.transactions = {}
for tx_hash, raw in tx_list.items():
tx = Transaction(raw)
self.transactions[tx_hash] = tx
if self.txi.get(tx_hash) is None and self.txo.get(tx_hash) is None \
and (tx_hash not in self.pruned_txo.values()):
self.print_error("removing unreferenced tx", tx_hash)
self.transactions.pop(tx_hash)
@profiler
def save_transactions(self, write=False):
with self.transaction_lock:
tx = {}
for k,v in self.transactions.items():
tx[k] = str(v)
self.storage.put('transactions', tx)
self.storage.put('txi', self.txi)
self.storage.put('txo', self.txo)
self.storage.put('tx_fees', self.tx_fees)
self.storage.put('pruned_txo', self.pruned_txo)
self.storage.put('addr_history', self.history)
if write:
self.storage.write()
def clear_history(self):
with self.lock:
with self.transaction_lock:
self.txi = {}
self.txo = {}
self.tx_fees = {}
self.pruned_txo = {}
self.spent_outpoints = {}
self.history = {}
self.verified_tx = {}
self.transactions = {}
self.save_transactions()
@profiler
def build_spent_outpoints(self):
self.spent_outpoints = {}
for txid, items in self.txi.items():
for addr, l in items.items():
for ser, v in l:
self.spent_outpoints[ser] = txid
@profiler
def check_history(self):
save = False
hist_addrs_mine = list(filter(lambda k: self.is_mine(k), self.history.keys()))
hist_addrs_not_mine = list(filter(lambda k: not self.is_mine(k), self.history.keys()))
for addr in hist_addrs_not_mine:
self.history.pop(addr)
save = True
for addr in hist_addrs_mine:
hist = self.history[addr]
for tx_hash, tx_height in hist:
if tx_hash in self.pruned_txo.values() or self.txi.get(tx_hash) or self.txo.get(tx_hash):
continue
tx = self.transactions.get(tx_hash)
if tx is not None:
self.add_transaction(tx_hash, tx)
save = True
if save:
self.save_transactions()
def basename(self):
return os.path.basename(self.storage.path)
def save_addresses(self):
self.storage.put('addresses', {'receiving':self.receiving_addresses, 'change':self.change_addresses})
def load_addresses(self):
d = self.storage.get('addresses', {})
if type(d) != dict: d={}
self.receiving_addresses = d.get('receiving', [])
self.change_addresses = d.get('change', [])
def synchronize(self):
pass
def is_deterministic(self):
return self.keystore.is_deterministic()
def set_up_to_date(self, up_to_date):
with self.lock:
self.up_to_date = up_to_date
if up_to_date:
self.save_transactions(write=True)
def is_up_to_date(self):
with self.lock: return self.up_to_date
def set_label(self, name, text = None):
changed = False
old_text = self.labels.get(name)
if text:
text = text.replace("\n", " ")
if old_text != text:
self.labels[name] = text
changed = True
else:
if old_text:
self.labels.pop(name)
changed = True
if changed:
run_hook('set_label', self, name, text)
self.storage.put('labels', self.labels)
return changed
def set_fiat_value(self, txid, ccy, text):
if txid not in self.transactions:
return
if not text:
d = self.fiat_value.get(ccy, {})
if d and txid in d:
d.pop(txid)
else:
return
else:
try:
Decimal(text)
except:
return
if ccy not in self.fiat_value:
self.fiat_value[ccy] = {}
self.fiat_value[ccy][txid] = text
self.storage.put('fiat_value', self.fiat_value)
def get_fiat_value(self, txid, ccy):
fiat_value = self.fiat_value.get(ccy, {}).get(txid)
try:
return Decimal(fiat_value)
except:
return
def is_mine(self, address):
return address in self.get_addresses()
def is_change(self, address):
if not self.is_mine(address):
return False
return self.get_address_index(address)[0]
def get_address_index(self, address):
raise NotImplementedError()
def get_redeem_script(self, address):
return None
def export_private_key(self, address, password):
if self.is_watching_only():
return []
index = self.get_address_index(address)
pk, compressed = self.keystore.get_private_key(index, password)
txin_type = self.get_txin_type(address)
redeem_script = self.get_redeem_script(address)
serialized_privkey = bitcoin.serialize_privkey(pk, compressed, txin_type)
return serialized_privkey, redeem_script
def get_public_keys(self, address):
return [self.get_public_key(address)]
def add_unverified_tx(self, tx_hash, tx_height):
if tx_height in (TX_HEIGHT_UNCONFIRMED, TX_HEIGHT_UNCONF_PARENT) \
and tx_hash in self.verified_tx:
self.verified_tx.pop(tx_hash)
if self.verifier:
self.verifier.merkle_roots.pop(tx_hash, None)
# tx will be verified only if height > 0
if tx_hash not in self.verified_tx:
self.unverified_tx[tx_hash] = tx_height
def add_verified_tx(self, tx_hash, info):
# Remove from the unverified map and add to the verified map and
self.unverified_tx.pop(tx_hash, None)
with self.lock:
self.verified_tx[tx_hash] = info # (tx_height, timestamp, pos)
height, conf, timestamp = self.get_tx_height(tx_hash)
self.network.trigger_callback('verified', tx_hash, height, conf, timestamp)
def get_unverified_txs(self):
'''Returns a map from tx hash to transaction height'''
return self.unverified_tx
def undo_verifications(self, blockchain, height):
'''Used by the verifier when a reorg has happened'''
txs = set()
with self.lock:
for tx_hash, item in list(self.verified_tx.items()):
tx_height, timestamp, pos = item
if tx_height >= height:
header = blockchain.read_header(tx_height)
# fixme: use block hash, not timestamp
if not header or header.get('timestamp') != timestamp:
self.verified_tx.pop(tx_hash, None)
txs.add(tx_hash)
return txs
def get_local_height(self):
""" return last known height if we are offline """
return self.network.get_local_height() if self.network else self.storage.get('stored_height', 0)
def get_tx_height(self, tx_hash):
""" Given a transaction, returns (height, conf, timestamp) """
with self.lock:
if tx_hash in self.verified_tx:
height, timestamp, pos = self.verified_tx[tx_hash]
conf = max(self.get_local_height() - height + 1, 0)
return height, conf, timestamp
elif tx_hash in self.unverified_tx:
height = self.unverified_tx[tx_hash]
return height, 0, None
else:
# local transaction
return TX_HEIGHT_LOCAL, 0, None
def get_txpos(self, tx_hash):
"return position, even if the tx is unverified"
with self.lock:
if tx_hash in self.verified_tx:
height, timestamp, pos = self.verified_tx[tx_hash]
return height, pos
elif tx_hash in self.unverified_tx:
height = self.unverified_tx[tx_hash]
return (height, 0) if height > 0 else ((1e9 - height), 0)
else:
return (1e9+1, 0)
def is_found(self):
return self.history.values() != [[]] * len(self.history)
def get_num_tx(self, address):
""" return number of transactions where address is involved """
return len(self.history.get(address, []))
def get_tx_delta(self, tx_hash, address):
"effect of tx on address"
# pruned
if tx_hash in self.pruned_txo.values():
return None
delta = 0
# substract the value of coins sent from address
d = self.txi.get(tx_hash, {}).get(address, [])
for n, v in d:
delta -= v
# add the value of the coins received at address
d = self.txo.get(tx_hash, {}).get(address, [])
for n, v, cb in d:
delta += v
return delta
def get_tx_value(self, txid):
" effect of tx on the entire domain"
delta = 0
for addr, d in self.txi.get(txid, {}).items():
for n, v in d:
delta -= v
for addr, d in self.txo.get(txid, {}).items():
for n, v, cb in d:
delta += v
return delta
def get_wallet_delta(self, tx):
""" effect of tx on wallet """
addresses = self.get_addresses()
is_relevant = False
is_mine = False
is_pruned = False
is_partial = False
v_in = v_out = v_out_mine = 0
for item in tx.inputs():
addr = item.get('address')
if addr in addresses:
is_mine = True
is_relevant = True
d = self.txo.get(item['prevout_hash'], {}).get(addr, [])
for n, v, cb in d:
if n == item['prevout_n']:
value = v
break
else:
value = None
if value is None:
is_pruned = True
else:
v_in += value
else:
is_partial = True
if not is_mine:
is_partial = False
for addr, value in tx.get_outputs():
v_out += value
if addr in addresses:
v_out_mine += value
is_relevant = True
if is_pruned:
# some inputs are mine:
fee = None
if is_mine:
v = v_out_mine - v_out
else:
# no input is mine
v = v_out_mine
else:
v = v_out_mine - v_in
if is_partial:
# some inputs are mine, but not all
fee = None
else:
# all inputs are mine
fee = v_in - v_out
if not is_mine:
fee = None
return is_relevant, is_mine, v, fee
def get_tx_info(self, tx):
is_relevant, is_mine, v, fee = self.get_wallet_delta(tx)
exp_n = None
can_broadcast = False
can_bump = False
label = ''
height = conf = timestamp = None
tx_hash = tx.txid()
if tx.is_complete():
if tx_hash in self.transactions.keys():
label = self.get_label(tx_hash)
height, conf, timestamp = self.get_tx_height(tx_hash)
if height > 0:
if conf:
status = _("{} confirmations").format(conf)
else:
status = _('Not verified')
elif height in (TX_HEIGHT_UNCONF_PARENT, TX_HEIGHT_UNCONFIRMED):
status = _('Unconfirmed')
if fee is None:
fee = self.tx_fees.get(tx_hash)
if fee and self.network.config.has_fee_etas():
size = tx.estimated_size()
fee_per_kb = fee * 1000 / size
exp_n = self.network.config.fee_to_eta(fee_per_kb)
can_bump = is_mine and not tx.is_final()
else:
status = _('Local')
can_broadcast = self.network is not None
else:
status = _("Signed")
can_broadcast = self.network is not None
else:
s, r = tx.signature_count()
status = _("Unsigned") if s == 0 else _('Partially signed') + ' (%d/%d)'%(s,r)
if is_relevant:
if is_mine:
if fee is not None:
amount = v + fee
else:
amount = v
else:
amount = v
else:
amount = None
return tx_hash, status, label, can_broadcast, can_bump, amount, fee, height, conf, timestamp, exp_n
def get_addr_io(self, address):
h = self.get_address_history(address)
received = {}
sent = {}
for tx_hash, height in h:
l = self.txo.get(tx_hash, {}).get(address, [])
for n, v, is_cb in l:
received[tx_hash + ':%d'%n] = (height, v, is_cb)
for tx_hash, height in h:
l = self.txi.get(tx_hash, {}).get(address, [])
for txi, v in l:
sent[txi] = height
return received, sent
def get_addr_utxo(self, address):
coins, spent = self.get_addr_io(address)
for txi in spent:
coins.pop(txi)
out = {}
for txo, v in coins.items():
tx_height, value, is_cb = v
prevout_hash, prevout_n = txo.split(':')
x = {
'address':address,
'value':value,
'prevout_n':int(prevout_n),
'prevout_hash':prevout_hash,
'height':tx_height,
'coinbase':is_cb
}
out[txo] = x
return out
# return the total amount ever received by an address
def get_addr_received(self, address):
received, sent = self.get_addr_io(address)
return sum([v for height, v, is_cb in received.values()])
# return the balance of a bitcoin address: confirmed and matured, unconfirmed, unmatured
def get_addr_balance(self, address):
received, sent = self.get_addr_io(address)
c = u = x = 0
for txo, (tx_height, v, is_cb) in received.items():
if is_cb and tx_height + COINBASE_MATURITY > self.get_local_height():
x += v
elif tx_height > 0:
c += v
else:
u += v
if txo in sent:
if sent[txo] > 0:
c -= v
else:
u -= v
return c, u, x
def get_spendable_coins(self, domain, config):
confirmed_only = config.get('confirmed_only', False)
return self.get_utxos(domain, exclude_frozen=True, mature=True, confirmed_only=confirmed_only)
def get_utxos(self, domain = None, exclude_frozen = False, mature = False, confirmed_only = False):
coins = []
if domain is None:
domain = self.get_addresses()
if exclude_frozen:
domain = set(domain) - self.frozen_addresses
for addr in domain:
utxos = self.get_addr_utxo(addr)
for x in utxos.values():
if confirmed_only and x['height'] <= 0:
continue
if mature and x['coinbase'] and x['height'] + COINBASE_MATURITY > self.get_local_height():
continue
coins.append(x)
continue
return coins
def dummy_address(self):
return self.get_receiving_addresses()[0]
def get_addresses(self):
out = []
out += self.get_receiving_addresses()
out += self.get_change_addresses()
return out
def get_frozen_balance(self):
return self.get_balance(self.frozen_addresses)
def get_balance(self, domain=None):
if domain is None:
domain = self.get_addresses()
cc = uu = xx = 0
for addr in domain:
c, u, x = self.get_addr_balance(addr)
cc += c
uu += u
xx += x
return cc, uu, xx
def get_address_history(self, addr):
h = []
# we need self.transaction_lock but get_tx_height will take self.lock
# so we need to take that too here, to enforce order of locks
with self.lock, self.transaction_lock:
for tx_hash in self.transactions:
if addr in self.txi.get(tx_hash, []) or addr in self.txo.get(tx_hash, []):
tx_height = self.get_tx_height(tx_hash)[0]
h.append((tx_hash, tx_height))
return h
def get_txin_address(self, txi):
addr = txi.get('address')
if addr != "(pubkey)":
return addr
prevout_hash = txi.get('prevout_hash')
prevout_n = txi.get('prevout_n')
dd = self.txo.get(prevout_hash, {})
for addr, l in dd.items():
for n, v, is_cb in l:
if n == prevout_n:
self.print_error("found pay-to-pubkey address:", addr)
return addr
def get_txout_address(self, txo):
_type, x, v = txo
if _type == TYPE_ADDRESS:
addr = x
elif _type == TYPE_PUBKEY:
addr = bitcoin.public_key_to_p2pkh(bfh(x))
else:
addr = None
return addr
def get_conflicting_transactions(self, tx):
"""Returns a set of transaction hashes from the wallet history that are
directly conflicting with tx, i.e. they have common outpoints being
spent with tx. If the tx is already in wallet history, that will not be
reported as a conflict.
"""
conflicting_txns = set()
with self.transaction_lock:
for txi in tx.inputs():
ser = Transaction.get_outpoint_from_txin(txi)
if ser is None:
continue
spending_tx_hash = self.spent_outpoints.get(ser, None)
if spending_tx_hash is None:
continue
# this outpoint (ser) has already been spent, by spending_tx
assert spending_tx_hash in self.transactions
conflicting_txns |= {spending_tx_hash}
txid = tx.txid()
if txid in conflicting_txns:
# this tx is already in history, so it conflicts with itself
if len(conflicting_txns) > 1:
raise Exception('Found conflicting transactions already in wallet history.')
conflicting_txns -= {txid}
return conflicting_txns
def add_transaction(self, tx_hash, tx):
# we need self.transaction_lock but get_tx_height will take self.lock
# so we need to take that too here, to enforce order of locks
with self.lock, self.transaction_lock:
# NOTE: returning if tx in self.transactions might seem like a good idea
# BUT we track is_mine inputs in a txn, and during subsequent calls
# of add_transaction tx, we might learn of more-and-more inputs of
# being is_mine, as we roll the gap_limit forward
is_coinbase = tx.inputs()[0]['type'] == 'coinbase'
tx_height = self.get_tx_height(tx_hash)[0]
is_mine = any([self.is_mine(txin['address']) for txin in tx.inputs()])
# do not save if tx is local and not mine
if tx_height == TX_HEIGHT_LOCAL and not is_mine:
# FIXME the test here should be for "not all is_mine"; cannot detect conflict in some cases
raise NotIsMineTransactionException()
# raise exception if unrelated to wallet
is_for_me = any([self.is_mine(self.get_txout_address(txo)) for txo in tx.outputs()])
if not is_mine and not is_for_me:
raise UnrelatedTransactionException()
# Find all conflicting transactions.
# In case of a conflict,
# 1. confirmed > mempool > local
# 2. this new txn has priority over existing ones
# When this method exits, there must NOT be any conflict, so
# either keep this txn and remove all conflicting (along with dependencies)
# or drop this txn
conflicting_txns = self.get_conflicting_transactions(tx)
if conflicting_txns:
existing_mempool_txn = any(
self.get_tx_height(tx_hash2)[0] in (TX_HEIGHT_UNCONFIRMED, TX_HEIGHT_UNCONF_PARENT)
for tx_hash2 in conflicting_txns)
existing_confirmed_txn = any(
self.get_tx_height(tx_hash2)[0] > 0
for tx_hash2 in conflicting_txns)
if existing_confirmed_txn and tx_height <= 0:
# this is a non-confirmed tx that conflicts with confirmed txns; drop.
return False
if existing_mempool_txn and tx_height == TX_HEIGHT_LOCAL:
# this is a local tx that conflicts with non-local txns; drop.
return False
# keep this txn and remove all conflicting
to_remove = set()
to_remove |= conflicting_txns
for conflicting_tx_hash in conflicting_txns:
to_remove |= self.get_depending_transactions(conflicting_tx_hash)
for tx_hash2 in to_remove:
self.remove_transaction(tx_hash2)
# add inputs
self.txi[tx_hash] = d = {}
for txi in tx.inputs():
addr = self.get_txin_address(txi)
if txi['type'] != 'coinbase':
prevout_hash = txi['prevout_hash']
prevout_n = txi['prevout_n']
ser = prevout_hash + ':%d'%prevout_n
# find value from prev output
if addr and self.is_mine(addr):
dd = self.txo.get(prevout_hash, {})
for n, v, is_cb in dd.get(addr, []):
if n == prevout_n:
if d.get(addr) is None:
d[addr] = []
d[addr].append((ser, v))
# we only track is_mine spends
self.spent_outpoints[ser] = tx_hash
break
else:
self.pruned_txo[ser] = tx_hash
# add outputs
self.txo[tx_hash] = d = {}
for n, txo in enumerate(tx.outputs()):
v = txo[2]
ser = tx_hash + ':%d'%n
addr = self.get_txout_address(txo)
if addr and self.is_mine(addr):
if d.get(addr) is None:
d[addr] = []
d[addr].append((n, v, is_coinbase))
# give v to txi that spends me
next_tx = self.pruned_txo.get(ser)
if next_tx is not None:
self.pruned_txo.pop(ser)
dd = self.txi.get(next_tx, {})
if dd.get(addr) is None:
dd[addr] = []
dd[addr].append((ser, v))
# save
self.transactions[tx_hash] = tx
return True
def remove_transaction(self, tx_hash):
def undo_spend(outpoint_to_txid_map):
for addr, l in self.txi[tx_hash].items():
for ser, v in l:
outpoint_to_txid_map.pop(ser, None)
with self.transaction_lock:
self.print_error("removing tx from history", tx_hash)
self.transactions.pop(tx_hash, None)
undo_spend(self.pruned_txo)
undo_spend(self.spent_outpoints)
# add tx to pruned_txo, and undo the txi addition
for next_tx, dd in self.txi.items():
for addr, l in list(dd.items()):
ll = l[:]
for item in ll:
ser, v = item
prev_hash, prev_n = ser.split(':')
if prev_hash == tx_hash:
l.remove(item)
self.pruned_txo[ser] = next_tx
if l == []:
dd.pop(addr)
else:
dd[addr] = l
try:
self.txi.pop(tx_hash)
self.txo.pop(tx_hash)
except KeyError:
self.print_error("tx was not in history", tx_hash)
def receive_tx_callback(self, tx_hash, tx, tx_height):
self.add_unverified_tx(tx_hash, tx_height)
self.add_transaction(tx_hash, tx)
def receive_history_callback(self, addr, hist, tx_fees):
with self.lock:
old_hist = self.get_address_history(addr)
for tx_hash, height in old_hist:
if (tx_hash, height) not in hist:
# make tx local
self.unverified_tx.pop(tx_hash, None)
self.verified_tx.pop(tx_hash, None)
self.verifier.merkle_roots.pop(tx_hash, None)
# but remove completely if not is_mine
if self.txi[tx_hash] == {}:
# FIXME the test here should be for "not all is_mine"; cannot detect conflict in some cases
self.remove_transaction(tx_hash)
self.history[addr] = hist
for tx_hash, tx_height in hist:
# add it in case it was previously unconfirmed
self.add_unverified_tx(tx_hash, tx_height)
# if addr is new, we have to recompute txi and txo
tx = self.transactions.get(tx_hash)
if tx is not None and self.txi.get(tx_hash, {}).get(addr) is None and self.txo.get(tx_hash, {}).get(addr) is None:
self.add_transaction(tx_hash, tx)
# Store fees
self.tx_fees.update(tx_fees)
def get_history(self, domain=None):
# get domain
if domain is None:
domain = self.get_addresses()
# 1. Get the history of each address in the domain, maintain the
# delta of a tx as the sum of its deltas on domain addresses
tx_deltas = defaultdict(int)
for addr in domain:
h = self.get_address_history(addr)
for tx_hash, height in h:
delta = self.get_tx_delta(tx_hash, addr)
if delta is None or tx_deltas[tx_hash] is None:
tx_deltas[tx_hash] = None
else:
tx_deltas[tx_hash] += delta
# 2. create sorted history
history = []
for tx_hash in tx_deltas:
delta = tx_deltas[tx_hash]
height, conf, timestamp = self.get_tx_height(tx_hash)
history.append((tx_hash, height, conf, timestamp, delta))
history.sort(key = lambda x: self.get_txpos(x[0]))
history.reverse()
# 3. add balance
c, u, x = self.get_balance(domain)
balance = c + u + x
h2 = []
for tx_hash, height, conf, timestamp, delta in history:
h2.append((tx_hash, height, conf, timestamp, delta, balance))
if balance is None or delta is None:
balance = None
else:
balance -= delta
h2.reverse()
# fixme: this may happen if history is incomplete
if balance not in [None, 0]:
self.print_error("Error: history not synchronized")
return []
return h2
def balance_at_timestamp(self, domain, target_timestamp):
h = self.get_history(domain)
for tx_hash, height, conf, timestamp, value, balance in h:
if timestamp > target_timestamp:
return balance - value
# return last balance
return balance
@profiler
def get_full_history(self, domain=None, from_timestamp=None, to_timestamp=None, fx=None, show_addresses=False):
from .util import timestamp_to_datetime, Satoshis, Fiat
out = []
capital_gains = 0
fiat_income = 0
h = self.get_history(domain)
for tx_hash, height, conf, timestamp, value, balance in h:
if from_timestamp and timestamp < from_timestamp:
continue
if to_timestamp and timestamp >= to_timestamp:
continue
item = {
'txid':tx_hash,
'height':height,
'confirmations':conf,
'timestamp':timestamp,
'value': Satoshis(value),
'balance': Satoshis(balance)
}
item['date'] = timestamp_to_datetime(timestamp)
item['label'] = self.get_label(tx_hash)
if show_addresses:
tx = self.transactions.get(tx_hash)
tx.deserialize()
input_addresses = []
output_addresses = []
for x in tx.inputs():
if x['type'] == 'coinbase': continue
addr = self.get_txin_address(x)
if addr is None:
continue
input_addresses.append(addr)
for addr, v in tx.get_outputs():
output_addresses.append(addr)
item['input_addresses'] = input_addresses
item['output_addresses'] = output_addresses
if fx is not None:
date = timestamp_to_datetime(time.time() if conf <= 0 else timestamp)
fiat_value = self.get_fiat_value(tx_hash, fx.ccy)
if fiat_value is None:
fiat_value = fx.historical_value(value, date)
fiat_default = True
else:
fiat_default = False
item['fiat_value'] = Fiat(fiat_value, fx.ccy)
item['fiat_default'] = fiat_default
if value is not None and value < 0:
ap, lp = self.capital_gain(tx_hash, fx.timestamp_rate, fx.ccy)
cg = lp - ap
item['acquisition_price'] = Fiat(ap, fx.ccy)
item['capital_gain'] = Fiat(cg, fx.ccy)
capital_gains += cg
else:
if fiat_value is not None:
fiat_income += fiat_value
out.append(item)
# add summary
if out:
b, v = out[0]['balance'].value, out[0]['value'].value
start_balance = None if b is None or v is None else b - v
end_balance = out[-1]['balance'].value
if from_timestamp is not None and to_timestamp is not None:
start_date = timestamp_to_datetime(from_timestamp)
end_date = timestamp_to_datetime(to_timestamp)
else:
start_date = out[0]['date']
end_date = out[-1]['date']
summary = {
'start_date': start_date,
'end_date': end_date,
'start_balance': Satoshis(start_balance),
'end_balance': Satoshis(end_balance)
}
if fx:
unrealized = self.unrealized_gains(domain, fx.timestamp_rate, fx.ccy)
summary['capital_gains'] = Fiat(capital_gains, fx.ccy)
summary['fiat_income'] = Fiat(fiat_income, fx.ccy)
summary['unrealized_gains'] = Fiat(unrealized, fx.ccy)
if start_date:
summary['start_fiat_balance'] = Fiat(fx.historical_value(start_balance, start_date), fx.ccy)
if end_date:
summary['end_fiat_balance'] = Fiat(fx.historical_value(end_balance, end_date), fx.ccy)
else:
summary = {}
return {
'transactions': out,
'summary': summary
}
def get_label(self, tx_hash):
label = self.labels.get(tx_hash, '')
if label is '':
label = self.get_default_label(tx_hash)
return label
def get_default_label(self, tx_hash):
if self.txi.get(tx_hash) == {}:
d = self.txo.get(tx_hash, {})
labels = []
for addr in d.keys():
label = self.labels.get(addr)
if label:
labels.append(label)
return ', '.join(labels)
return ''
def get_tx_status(self, tx_hash, height, conf, timestamp):
from .util import format_time
exp_n = False
if conf == 0:
tx = self.transactions.get(tx_hash)
if not tx:
return 2, 'unknown'
is_final = tx and tx.is_final()
fee = self.tx_fees.get(tx_hash)
if fee and self.network and self.network.config.has_fee_mempool():
size = tx.estimated_size()
fee_per_kb = fee * 1000 / size
exp_n = self.network.config.fee_to_depth(fee_per_kb//1000)
if height == TX_HEIGHT_LOCAL:
status = 4
elif height == TX_HEIGHT_UNCONF_PARENT:
status = 1
elif height == TX_HEIGHT_UNCONFIRMED and not is_final:
status = 0
elif height == TX_HEIGHT_UNCONFIRMED:
status = 2
else:
status = 3
else:
status = 4 + min(conf, 6)
time_str = format_time(timestamp) if timestamp else _("unknown")
status_str = TX_STATUS[status] if status < 5 else time_str
if exp_n:
status_str += ' [%d sat/b, %.2f MB]'%(fee_per_kb//1000, exp_n/1000000)
return status, status_str
def relayfee(self):
return relayfee(self.network)
def dust_threshold(self):
return dust_threshold(self.network)
def make_unsigned_transaction(self, inputs, outputs, config, fixed_fee=None,
change_addr=None, is_sweep=False):
# check outputs
i_max = None
for i, o in enumerate(outputs):
_type, data, value = o
if _type == TYPE_ADDRESS:
if not is_address(data):
raise BaseException("Invalid bitcoin address:" + data)
if value == '!':
if i_max is not None:
raise BaseException("More than one output set to spend max")
i_max = i
# Avoid index-out-of-range with inputs[0] below
if not inputs:
raise NotEnoughFunds()
if fixed_fee is None and config.fee_per_kb() is None:
raise NoDynamicFeeEstimates()
if not is_sweep:
for item in inputs:
self.add_input_info(item)
# change address
if change_addr:
change_addrs = [change_addr]
else:
addrs = self.get_change_addresses()[-self.gap_limit_for_change:]
if self.use_change and addrs:
# New change addresses are created only after a few
# confirmations. Select the unused addresses within the
# gap limit; if none take one at random
change_addrs = [addr for addr in addrs if
self.get_num_tx(addr) == 0]
if not change_addrs:
change_addrs = [random.choice(addrs)]
else:
# coin_chooser will set change address
change_addrs = []
# Fee estimator
if fixed_fee is None:
fee_estimator = config.estimate_fee
elif isinstance(fixed_fee, Number):
fee_estimator = lambda size: fixed_fee
elif callable(fixed_fee):
fee_estimator = fixed_fee
else:
raise BaseException('Invalid argument fixed_fee: %s' % fixed_fee)
if i_max is None:
# Let the coin chooser select the coins to spend
max_change = self.max_change_outputs if self.multiple_change else 1
coin_chooser = coinchooser.get_coin_chooser(config)
tx = coin_chooser.make_tx(inputs, outputs, change_addrs[:max_change],
fee_estimator, self.dust_threshold())
else:
# FIXME?? this might spend inputs with negative effective value...
sendable = sum(map(lambda x:x['value'], inputs))
_type, data, value = outputs[i_max]
outputs[i_max] = (_type, data, 0)
tx = Transaction.from_io(inputs, outputs[:])
fee = fee_estimator(tx.estimated_size())
amount = max(0, sendable - tx.output_value() - fee)
outputs[i_max] = (_type, data, amount)
tx = Transaction.from_io(inputs, outputs[:])
# Sort the inputs and outputs deterministically
tx.BIP_LI01_sort()
# Timelock tx to current height.
tx.locktime = self.get_local_height()
run_hook('make_unsigned_transaction', self, tx)
return tx
def mktx(self, outputs, password, config, fee=None, change_addr=None, domain=None):
coins = self.get_spendable_coins(domain, config)
tx = self.make_unsigned_transaction(coins, outputs, config, fee, change_addr)
self.sign_transaction(tx, password)
return tx
def is_frozen(self, addr):
return addr in self.frozen_addresses
def set_frozen_state(self, addrs, freeze):
'''Set frozen state of the addresses to FREEZE, True or False'''
if all(self.is_mine(addr) for addr in addrs):
if freeze:
self.frozen_addresses |= set(addrs)
else:
self.frozen_addresses -= set(addrs)
self.storage.put('frozen_addresses', list(self.frozen_addresses))
return True
return False
def prepare_for_verifier(self):
# review transactions that are in the history
for addr, hist in self.history.items():
for tx_hash, tx_height in hist:
# add it in case it was previously unconfirmed
self.add_unverified_tx(tx_hash, tx_height)
def start_threads(self, network):
self.network = network
if self.network is not None:
self.prepare_for_verifier()
self.verifier = SPV(self.network, self)
self.synchronizer = Synchronizer(self, network)
network.add_jobs([self.verifier, self.synchronizer])
else:
self.verifier = None
self.synchronizer = None
def stop_threads(self):
if self.network:
self.network.remove_jobs([self.synchronizer, self.verifier])
self.synchronizer.release()
self.synchronizer = None
self.verifier = None
# Now no references to the syncronizer or verifier
# remain so they will be GC-ed
self.storage.put('stored_height', self.get_local_height())
self.save_transactions()
self.storage.put('verified_tx3', self.verified_tx)
self.storage.write()
def wait_until_synchronized(self, callback=None):
def wait_for_wallet():
self.set_up_to_date(False)
while not self.is_up_to_date():
if callback:
msg = "%s\n%s %d"%(
_("Please wait..."),
_("Addresses generated:"),
len(self.addresses(True)))
callback(msg)
time.sleep(0.1)
def wait_for_network():
while not self.network.is_connected():
if callback:
msg = "%s \n" % (_("Connecting..."))
callback(msg)
time.sleep(0.1)
# wait until we are connected, because the user
# might have selected another server
if self.network:
wait_for_network()
wait_for_wallet()
else:
self.synchronize()
def can_export(self):
return not self.is_watching_only() and hasattr(self.keystore, 'get_private_key')
def is_used(self, address):
h = self.history.get(address,[])
if len(h) == 0:
return False
c, u, x = self.get_addr_balance(address)
return c + u + x == 0
def is_empty(self, address):
c, u, x = self.get_addr_balance(address)
return c+u+x == 0
def address_is_old(self, address, age_limit=2):
age = -1
h = self.history.get(address, [])
for tx_hash, tx_height in h:
if tx_height <= 0:
tx_age = 0
else:
tx_age = self.get_local_height() - tx_height + 1
if tx_age > age:
age = tx_age
return age > age_limit
def bump_fee(self, tx, delta):
if tx.is_final():
raise BaseException(_("Cannot bump fee: transaction is final"))
inputs = copy.deepcopy(tx.inputs())
outputs = copy.deepcopy(tx.outputs())
for txin in inputs:
txin['signatures'] = [None] * len(txin['signatures'])
self.add_input_info(txin)
# use own outputs
s = list(filter(lambda x: self.is_mine(x[1]), outputs))
# ... unless there is none
if not s:
s = outputs
x_fee = run_hook('get_tx_extra_fee', self, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
s = filter(lambda x: x[1]!=x_fee_address, s)
# prioritize low value outputs, to get rid of dust
s = sorted(s, key=lambda x: x[2])
for o in s:
i = outputs.index(o)
otype, address, value = o
if value - delta >= self.dust_threshold():
outputs[i] = otype, address, value - delta
delta = 0
break
else:
del outputs[i]
delta -= value
if delta > 0:
continue
if delta > 0:
raise BaseException(_('Cannot bump fee: could not find suitable outputs'))
locktime = self.get_local_height()
tx_new = Transaction.from_io(inputs, outputs, locktime=locktime)
tx_new.BIP_LI01_sort()
return tx_new
def cpfp(self, tx, fee):
txid = tx.txid()
for i, o in enumerate(tx.outputs()):
otype, address, value = o
if otype == TYPE_ADDRESS and self.is_mine(address):
break
else:
return
coins = self.get_addr_utxo(address)
item = coins.get(txid+':%d'%i)
if not item:
return
self.add_input_info(item)
inputs = [item]
outputs = [(TYPE_ADDRESS, address, value - fee)]
locktime = self.get_local_height()
# note: no need to call tx.BIP_LI01_sort() here - single input/output
return Transaction.from_io(inputs, outputs, locktime=locktime)
def add_input_info(self, txin):
address = txin['address']
if self.is_mine(address):
txin['type'] = self.get_txin_type(address)
# segwit needs value to sign
if txin.get('value') is None and Transaction.is_segwit_input(txin):
received, spent = self.get_addr_io(address)
item = received.get(txin['prevout_hash']+':%d'%txin['prevout_n'])
tx_height, value, is_cb = item
txin['value'] = value
self.add_input_sig_info(txin, address)
def can_sign(self, tx):
if tx.is_complete():
return False
for k in self.get_keystores():
if k.can_sign(tx):
return True
return False
def get_input_tx(self, tx_hash):
# First look up an input transaction in the wallet where it
# will likely be. If co-signing a transaction it may not have
# all the input txs, in which case we ask the network.
tx = self.transactions.get(tx_hash)
if not tx and self.network:
request = ('blockchain.transaction.get', [tx_hash])
tx = Transaction(self.network.synchronous_get(request))
return tx
def add_hw_info(self, tx):
# add previous tx for hw wallets
for txin in tx.inputs():
tx_hash = txin['prevout_hash']
txin['prev_tx'] = self.get_input_tx(tx_hash)
# add output info for hw wallets
info = {}
xpubs = self.get_master_public_keys()
for txout in tx.outputs():
_type, addr, amount = txout
if self.is_change(addr):
index = self.get_address_index(addr)
pubkeys = self.get_public_keys(addr)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
info[addr] = index, sorted_xpubs, self.m if isinstance(self, Multisig_Wallet) else None
tx.output_info = info
def sign_transaction(self, tx, password):
if self.is_watching_only():
return
# hardware wallets require extra info
if any([(isinstance(k, Hardware_KeyStore) and k.can_sign(tx)) for k in self.get_keystores()]):
self.add_hw_info(tx)
# sign
for k in self.get_keystores():
try:
if k.can_sign(tx):
k.sign_transaction(tx, password)
except UserCancelled:
continue
def get_unused_addresses(self):
# fixme: use slots from expired requests
domain = self.get_receiving_addresses()
return [addr for addr in domain if not self.history.get(addr)
and addr not in self.receive_requests.keys()]
def get_unused_address(self):
addrs = self.get_unused_addresses()
if addrs:
return addrs[0]
def get_receiving_address(self):
# always return an address
domain = self.get_receiving_addresses()
if not domain:
return
choice = domain[0]
for addr in domain:
if not self.history.get(addr):
if addr not in self.receive_requests.keys():
return addr
else:
choice = addr
return choice
def get_payment_status(self, address, amount):
local_height = self.get_local_height()
received, sent = self.get_addr_io(address)
l = []
for txo, x in received.items():
h, v, is_cb = x
txid, n = txo.split(':')
info = self.verified_tx.get(txid)
if info:
tx_height, timestamp, pos = info
conf = local_height - tx_height
else:
conf = 0
l.append((conf, v))
vsum = 0
for conf, v in reversed(sorted(l)):
vsum += v
if vsum >= amount:
return True, conf
return False, None
def get_payment_request(self, addr, config):
r = self.receive_requests.get(addr)
if not r:
return
out = copy.copy(r)
out['URI'] = 'bitcoin:' + addr + '?amount=' + format_satoshis(out.get('amount'))
status, conf = self.get_request_status(addr)
out['status'] = status
if conf is not None:
out['confirmations'] = conf
# check if bip70 file exists
rdir = config.get('requests_dir')
if rdir:
key = out.get('id', addr)
path = os.path.join(rdir, 'req', key[0], key[1], key)
if os.path.exists(path):
baseurl = 'file://' + rdir
rewrite = config.get('url_rewrite')
if rewrite:
baseurl = baseurl.replace(*rewrite)
out['request_url'] = os.path.join(baseurl, 'req', key[0], key[1], key, key)
out['URI'] += '&r=' + out['request_url']
out['index_url'] = os.path.join(baseurl, 'index.html') + '?id=' + key
websocket_server_announce = config.get('websocket_server_announce')
if websocket_server_announce:
out['websocket_server'] = websocket_server_announce
else:
out['websocket_server'] = config.get('websocket_server', 'localhost')
websocket_port_announce = config.get('websocket_port_announce')
if websocket_port_announce:
out['websocket_port'] = websocket_port_announce
else:
out['websocket_port'] = config.get('websocket_port', 9999)
return out
def get_request_status(self, key):
r = self.receive_requests.get(key)
if r is None:
return PR_UNKNOWN
address = r['address']
amount = r.get('amount')
timestamp = r.get('time', 0)
if timestamp and type(timestamp) != int:
timestamp = 0
expiration = r.get('exp')
if expiration and type(expiration) != int:
expiration = 0
conf = None
if amount:
if self.up_to_date:
paid, conf = self.get_payment_status(address, amount)
status = PR_PAID if paid else PR_UNPAID
if status == PR_UNPAID and expiration is not None and time.time() > timestamp + expiration:
status = PR_EXPIRED
else:
status = PR_UNKNOWN
else:
status = PR_UNKNOWN
return status, conf
def make_payment_request(self, addr, amount, message, expiration):
timestamp = int(time.time())
_id = bh2u(Hash(addr + "%d"%timestamp))[0:10]
r = {'time':timestamp, 'amount':amount, 'exp':expiration, 'address':addr, 'memo':message, 'id':_id}
return r
def sign_payment_request(self, key, alias, alias_addr, password):
req = self.receive_requests.get(key)
alias_privkey = self.export_private_key(alias_addr, password)[0]
pr = paymentrequest.make_unsigned_request(req)
paymentrequest.sign_request_with_alias(pr, alias, alias_privkey)
req['name'] = pr.pki_data
req['sig'] = bh2u(pr.signature)
self.receive_requests[key] = req
self.storage.put('payment_requests', self.receive_requests)
def add_payment_request(self, req, config):
addr = req['address']
amount = req.get('amount')
message = req.get('memo')
self.receive_requests[addr] = req
self.storage.put('payment_requests', self.receive_requests)
self.set_label(addr, message) # should be a default label
rdir = config.get('requests_dir')
if rdir and amount is not None:
key = req.get('id', addr)
pr = paymentrequest.make_request(config, req)
path = os.path.join(rdir, 'req', key[0], key[1], key)
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
with open(os.path.join(path, key), 'wb') as f:
f.write(pr.SerializeToString())
# reload
req = self.get_payment_request(addr, config)
with open(os.path.join(path, key + '.json'), 'w') as f:
f.write(json.dumps(req))
return req
def remove_payment_request(self, addr, config):
if addr not in self.receive_requests:
return False
r = self.receive_requests.pop(addr)
rdir = config.get('requests_dir')
if rdir:
key = r.get('id', addr)
for s in ['.json', '']:
n = os.path.join(rdir, 'req', key[0], key[1], key, key + s)
if os.path.exists(n):
os.unlink(n)
self.storage.put('payment_requests', self.receive_requests)
return True
def get_sorted_requests(self, config):
def f(x):
try:
addr = x.get('address')
return self.get_address_index(addr) or addr
except:
return addr
return sorted(map(lambda x: self.get_payment_request(x, config), self.receive_requests.keys()), key=f)
def get_fingerprint(self):
raise NotImplementedError()
def can_import_privkey(self):
return False
def can_import_address(self):
return False
def can_delete_address(self):
return False
def add_address(self, address):
if address not in self.history:
self.history[address] = []
if self.synchronizer:
self.synchronizer.add(address)
def has_password(self):
return self.has_keystore_encryption() or self.has_storage_encryption()
def can_have_keystore_encryption(self):
return self.keystore and self.keystore.may_have_password()
def get_available_storage_encryption_version(self):
"""Returns the type of storage encryption offered to the user.
A wallet file (storage) is either encrypted with this version
or is stored in plaintext.
"""
if isinstance(self.keystore, Hardware_KeyStore):
return STO_EV_XPUB_PW
else:
return STO_EV_USER_PW
def has_keystore_encryption(self):
"""Returns whether encryption is enabled for the keystore.
If True, e.g. signing a transaction will require a password.
"""
if self.can_have_keystore_encryption():
return self.storage.get('use_encryption', False)
return False
def has_storage_encryption(self):
"""Returns whether encryption is enabled for the wallet file on disk."""
return self.storage.is_encrypted()
@classmethod
def may_have_password(cls):
return True
def check_password(self, password):
if self.has_keystore_encryption():
self.keystore.check_password(password)
self.storage.check_password(password)
def update_password(self, old_pw, new_pw, encrypt_storage=False):
if old_pw is None and self.has_password():
raise InvalidPassword()
self.check_password(old_pw)
if encrypt_storage:
enc_version = self.get_available_storage_encryption_version()
else:
enc_version = STO_EV_PLAINTEXT
self.storage.set_password(new_pw, enc_version)
# note: Encrypting storage with a hw device is currently only
# allowed for non-multisig wallets. Further,
# Hardware_KeyStore.may_have_password() == False.
# If these were not the case,
# extra care would need to be taken when encrypting keystores.
self._update_password_for_keystore(old_pw, new_pw)
encrypt_keystore = self.can_have_keystore_encryption()
self.storage.set_keystore_encryption(bool(new_pw) and encrypt_keystore)
self.storage.write()
def sign_message(self, address, message, password):
index = self.get_address_index(address)
return self.keystore.sign_message(index, message, password)
def decrypt_message(self, pubkey, message, password):
addr = self.pubkeys_to_address(pubkey)
index = self.get_address_index(addr)
return self.keystore.decrypt_message(index, message, password)
def get_depending_transactions(self, tx_hash):
"""Returns all (grand-)children of tx_hash in this wallet."""
children = set()
for other_hash, tx in self.transactions.items():
for input in (tx.inputs()):
if input["prevout_hash"] == tx_hash:
children.add(other_hash)
children |= self.get_depending_transactions(other_hash)
return children
def txin_value(self, txin):
txid = txin['prevout_hash']
prev_n = txin['prevout_n']
for address, d in self.txo[txid].items():
for n, v, cb in d:
if n == prev_n:
return v
raise BaseException('unknown txin value')
def price_at_timestamp(self, txid, price_func):
height, conf, timestamp = self.get_tx_height(txid)
return price_func(timestamp if timestamp else time.time())
def unrealized_gains(self, domain, price_func, ccy):
coins = self.get_utxos(domain)
now = time.time()
p = price_func(now)
ap = sum(self.coin_price(coin['prevout_hash'], price_func, ccy, self.txin_value(coin)) for coin in coins)
lp = sum([coin['value'] for coin in coins]) * p / Decimal(COIN)
return lp - ap
def capital_gain(self, txid, price_func, ccy):
"""
Difference between the fiat price of coins leaving the wallet because of transaction txid,
and the price of these coins when they entered the wallet.
price_func: function that returns the fiat price given a timestamp
"""
out_value = - self.get_tx_value(txid)/Decimal(COIN)
fiat_value = self.get_fiat_value(txid, ccy)
liquidation_price = - fiat_value if fiat_value else out_value * self.price_at_timestamp(txid, price_func)
acquisition_price = out_value * self.average_price(txid, price_func, ccy)
return acquisition_price, liquidation_price
def average_price(self, txid, price_func, ccy):
""" Average acquisition price of the inputs of a transaction """
input_value = 0
total_price = 0
for addr, d in self.txi.get(txid, {}).items():
for ser, v in d:
input_value += v
total_price += self.coin_price(ser.split(':')[0], price_func, ccy, v)
return total_price / (input_value/Decimal(COIN))
def coin_price(self, txid, price_func, ccy, txin_value):
"""
Acquisition price of a coin.
This assumes that either all inputs are mine, or no input is mine.
"""
if self.txi.get(txid, {}) != {}:
return self.average_price(txid, price_func, ccy) * txin_value/Decimal(COIN)
else:
fiat_value = self.get_fiat_value(txid, ccy)
if fiat_value is not None:
return fiat_value
else:
p = self.price_at_timestamp(txid, price_func)
return p * txin_value/Decimal(COIN)
class Simple_Wallet(Abstract_Wallet):
# wallet with a single keystore
def get_keystore(self):
return self.keystore
def get_keystores(self):
return [self.keystore]
def is_watching_only(self):
return self.keystore.is_watching_only()
def _update_password_for_keystore(self, old_pw, new_pw):
if self.keystore and self.keystore.may_have_password():
self.keystore.update_password(old_pw, new_pw)
self.save_keystore()
def save_keystore(self):
self.storage.put('keystore', self.keystore.dump())
class Imported_Wallet(Simple_Wallet):
# wallet made of imported addresses
wallet_type = 'imported'
txin_type = 'address'
def __init__(self, storage):
Abstract_Wallet.__init__(self, storage)
def is_watching_only(self):
return self.keystore is None
def get_keystores(self):
return [self.keystore] if self.keystore else []
def can_import_privkey(self):
return bool(self.keystore)
def load_keystore(self):
self.keystore = load_keystore(self.storage, 'keystore') if self.storage.get('keystore') else None
def save_keystore(self):
self.storage.put('keystore', self.keystore.dump())
def load_addresses(self):
self.addresses = self.storage.get('addresses', {})
# fixme: a reference to addresses is needed
if self.keystore:
self.keystore.addresses = self.addresses
def save_addresses(self):
self.storage.put('addresses', self.addresses)
def can_import_address(self):
return self.is_watching_only()
def can_delete_address(self):
return True
def has_seed(self):
return False
def is_deterministic(self):
return False
def is_change(self, address):
return False
def get_master_public_keys(self):
return []
def is_beyond_limit(self, address):
return False
def is_mine(self, address):
return address in self.addresses
def get_fingerprint(self):
return ''
def get_addresses(self, include_change=False):
return sorted(self.addresses.keys())
def get_receiving_addresses(self):
return self.get_addresses()
def get_change_addresses(self):
return []
def import_address(self, address):
if not bitcoin.is_address(address):
return ''
if address in self.addresses:
return ''
self.addresses[address] = {}
self.storage.put('addresses', self.addresses)
self.storage.write()
self.add_address(address)
return address
def delete_address(self, address):
if address not in self.addresses:
return
transactions_to_remove = set() # only referred to by this address
transactions_new = set() # txs that are not only referred to by address
with self.lock:
for addr, details in self.history.items():
if addr == address:
for tx_hash, height in details:
transactions_to_remove.add(tx_hash)
else:
for tx_hash, height in details:
transactions_new.add(tx_hash)
transactions_to_remove -= transactions_new
self.history.pop(address, None)
for tx_hash in transactions_to_remove:
self.remove_transaction(tx_hash)
self.tx_fees.pop(tx_hash, None)
self.verified_tx.pop(tx_hash, None)
self.unverified_tx.pop(tx_hash, None)
self.transactions.pop(tx_hash, None)
# FIXME: what about pruned_txo?
self.storage.put('verified_tx3', self.verified_tx)
self.save_transactions()
self.set_label(address, None)
self.remove_payment_request(address, {})
self.set_frozen_state([address], False)
pubkey = self.get_public_key(address)
self.addresses.pop(address)
if pubkey:
self.keystore.delete_imported_key(pubkey)
self.save_keystore()
self.storage.put('addresses', self.addresses)
self.storage.write()
def get_address_index(self, address):
return self.get_public_key(address)
def get_public_key(self, address):
return self.addresses[address].get('pubkey')
def import_private_key(self, sec, pw, redeem_script=None):
try:
txin_type, pubkey = self.keystore.import_privkey(sec, pw)
except Exception:
raise BaseException('Invalid private key', sec)
if txin_type in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
if redeem_script is not None:
raise BaseException('Cannot use redeem script with', txin_type, sec)
addr = bitcoin.pubkey_to_address(txin_type, pubkey)
elif txin_type in ['p2sh', 'p2wsh', 'p2wsh-p2sh']:
if redeem_script is None:
raise BaseException('Redeem script required for', txin_type, sec)
addr = bitcoin.redeem_script_to_address(txin_type, redeem_script)
else:
raise NotImplementedError(txin_type)
self.addresses[addr] = {'type':txin_type, 'pubkey':pubkey, 'redeem_script':redeem_script}
self.save_keystore()
self.save_addresses()
self.storage.write()
self.add_address(addr)
return addr
def get_redeem_script(self, address):
d = self.addresses[address]
redeem_script = d['redeem_script']
return redeem_script
def get_txin_type(self, address):
return self.addresses[address].get('type', 'address')
def add_input_sig_info(self, txin, address):
if self.is_watching_only():
x_pubkey = 'fd' + address_to_script(address)
txin['x_pubkeys'] = [x_pubkey]
txin['signatures'] = [None]
return
if txin['type'] in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
pubkey = self.addresses[address]['pubkey']
txin['num_sig'] = 1
txin['x_pubkeys'] = [pubkey]
txin['signatures'] = [None]
else:
redeem_script = self.addresses[address]['redeem_script']
num_sig = 2
num_keys = 3
txin['num_sig'] = num_sig
txin['redeem_script'] = redeem_script
txin['signatures'] = [None] * num_keys
def pubkeys_to_address(self, pubkey):
for addr, v in self.addresses.items():
if v.get('pubkey') == pubkey:
return addr
class Deterministic_Wallet(Abstract_Wallet):
def __init__(self, storage):
Abstract_Wallet.__init__(self, storage)
self.gap_limit = storage.get('gap_limit', 20)
def has_seed(self):
return self.keystore.has_seed()
def get_receiving_addresses(self):
return self.receiving_addresses
def get_change_addresses(self):
return self.change_addresses
def get_seed(self, password):
return self.keystore.get_seed(password)
def add_seed(self, seed, pw):
self.keystore.add_seed(seed, pw)
def change_gap_limit(self, value):
'''This method is not called in the code, it is kept for console use'''
if value >= self.gap_limit:
self.gap_limit = value
self.storage.put('gap_limit', self.gap_limit)
return True
elif value >= self.min_acceptable_gap():
addresses = self.get_receiving_addresses()
k = self.num_unused_trailing_addresses(addresses)
n = len(addresses) - k + value
self.receiving_addresses = self.receiving_addresses[0:n]
self.gap_limit = value
self.storage.put('gap_limit', self.gap_limit)
self.save_addresses()
return True
else:
return False
def num_unused_trailing_addresses(self, addresses):
k = 0
for a in addresses[::-1]:
if self.history.get(a):break
k = k + 1
return k
def min_acceptable_gap(self):
# fixme: this assumes wallet is synchronized
n = 0
nmax = 0
addresses = self.get_receiving_addresses()
k = self.num_unused_trailing_addresses(addresses)
for a in addresses[0:-k]:
if self.history.get(a):
n = 0
else:
n += 1
if n > nmax: nmax = n
return nmax + 1
def load_addresses(self):
super().load_addresses()
self._addr_to_addr_index = {} # key: address, value: (is_change, index)
for i, addr in enumerate(self.receiving_addresses):
self._addr_to_addr_index[addr] = (False, i)
for i, addr in enumerate(self.change_addresses):
self._addr_to_addr_index[addr] = (True, i)
def create_new_address(self, for_change=False):
assert type(for_change) is bool
with self.lock:
addr_list = self.change_addresses if for_change else self.receiving_addresses
n = len(addr_list)
x = self.derive_pubkeys(for_change, n)
address = self.pubkeys_to_address(x)
addr_list.append(address)
self._addr_to_addr_index[address] = (for_change, n)
self.save_addresses()
self.add_address(address)
return address
def synchronize_sequence(self, for_change):
limit = self.gap_limit_for_change if for_change else self.gap_limit
while True:
addresses = self.get_change_addresses() if for_change else self.get_receiving_addresses()
if len(addresses) < limit:
self.create_new_address(for_change)
continue
if list(map(lambda a: self.address_is_old(a), addresses[-limit:] )) == limit*[False]:
break
else:
self.create_new_address(for_change)
def synchronize(self):
with self.lock:
self.synchronize_sequence(False)
self.synchronize_sequence(True)
def is_beyond_limit(self, address):
is_change, i = self.get_address_index(address)
addr_list = self.get_change_addresses() if is_change else self.get_receiving_addresses()
limit = self.gap_limit_for_change if is_change else self.gap_limit
if i < limit:
return False
prev_addresses = addr_list[max(0, i - limit):max(0, i)]
for addr in prev_addresses:
if self.history.get(addr):
return False
return True
def is_mine(self, address):
return address in self._addr_to_addr_index
def get_address_index(self, address):
return self._addr_to_addr_index[address]
def get_master_public_keys(self):
return [self.get_master_public_key()]
def get_fingerprint(self):
return self.get_master_public_key()
def get_txin_type(self, address):
return self.txin_type
class Simple_Deterministic_Wallet(Simple_Wallet, Deterministic_Wallet):
""" Deterministic Wallet with a single pubkey per address """
def __init__(self, storage):
Deterministic_Wallet.__init__(self, storage)
def get_public_key(self, address):
sequence = self.get_address_index(address)
pubkey = self.get_pubkey(*sequence)
return pubkey
def load_keystore(self):
self.keystore = load_keystore(self.storage, 'keystore')
try:
xtype = bitcoin.xpub_type(self.keystore.xpub)
except:
xtype = 'standard'
self.txin_type = 'p2pkh' if xtype == 'standard' else xtype
def get_pubkey(self, c, i):
return self.derive_pubkeys(c, i)
def add_input_sig_info(self, txin, address):
derivation = self.get_address_index(address)
x_pubkey = self.keystore.get_xpubkey(*derivation)
txin['x_pubkeys'] = [x_pubkey]
txin['signatures'] = [None]
txin['num_sig'] = 1
def get_master_public_key(self):
return self.keystore.get_master_public_key()
def derive_pubkeys(self, c, i):
return self.keystore.derive_pubkey(c, i)
class Standard_Wallet(Simple_Deterministic_Wallet):
wallet_type = 'standard'
def pubkeys_to_address(self, pubkey):
return bitcoin.pubkey_to_address(self.txin_type, pubkey)
class Multisig_Wallet(Deterministic_Wallet):
# generic m of n
gap_limit = 20
def __init__(self, storage):
self.wallet_type = storage.get('wallet_type')
self.m, self.n = multisig_type(self.wallet_type)
Deterministic_Wallet.__init__(self, storage)
def get_pubkeys(self, c, i):
return self.derive_pubkeys(c, i)
def get_public_keys(self, address):
sequence = self.get_address_index(address)
return self.get_pubkeys(*sequence)
def pubkeys_to_address(self, pubkeys):
redeem_script = self.pubkeys_to_redeem_script(pubkeys)
return bitcoin.redeem_script_to_address(self.txin_type, redeem_script)
def pubkeys_to_redeem_script(self, pubkeys):
return transaction.multisig_script(sorted(pubkeys), self.m)
def get_redeem_script(self, address):
pubkeys = self.get_public_keys(address)
redeem_script = self.pubkeys_to_redeem_script(pubkeys)
return redeem_script
def derive_pubkeys(self, c, i):
return [k.derive_pubkey(c, i) for k in self.get_keystores()]
def load_keystore(self):
self.keystores = {}
for i in range(self.n):
name = 'x%d/'%(i+1)
self.keystores[name] = load_keystore(self.storage, name)
self.keystore = self.keystores['x1/']
xtype = bitcoin.xpub_type(self.keystore.xpub)
self.txin_type = 'p2sh' if xtype == 'standard' else xtype
def save_keystore(self):
for name, k in self.keystores.items():
self.storage.put(name, k.dump())
def get_keystore(self):
return self.keystores.get('x1/')
def get_keystores(self):
return [self.keystores[i] for i in sorted(self.keystores.keys())]
def can_have_keystore_encryption(self):
return any([k.may_have_password() for k in self.get_keystores()])
def _update_password_for_keystore(self, old_pw, new_pw):
for name, keystore in self.keystores.items():
if keystore.may_have_password():
keystore.update_password(old_pw, new_pw)
self.storage.put(name, keystore.dump())
def check_password(self, password):
for name, keystore in self.keystores.items():
if keystore.may_have_password():
keystore.check_password(password)
self.storage.check_password(password)
def get_available_storage_encryption_version(self):
# multisig wallets are not offered hw device encryption
return STO_EV_USER_PW
def has_seed(self):
return self.keystore.has_seed()
def is_watching_only(self):
return not any([not k.is_watching_only() for k in self.get_keystores()])
def get_master_public_key(self):
return self.keystore.get_master_public_key()
def get_master_public_keys(self):
return [k.get_master_public_key() for k in self.get_keystores()]
def get_fingerprint(self):
return ''.join(sorted(self.get_master_public_keys()))
def add_input_sig_info(self, txin, address):
# x_pubkeys are not sorted here because it would be too slow
# they are sorted in transaction.get_sorted_pubkeys
# pubkeys is set to None to signal that x_pubkeys are unsorted
derivation = self.get_address_index(address)
txin['x_pubkeys'] = [k.get_xpubkey(*derivation) for k in self.get_keystores()]
txin['pubkeys'] = None
# we need n place holders
txin['signatures'] = [None] * self.n
txin['num_sig'] = self.m
wallet_types = ['standard', 'multisig', 'imported']
def register_wallet_type(category):
wallet_types.append(category)
wallet_constructors = {
'standard': Standard_Wallet,
'old': Standard_Wallet,
'xpub': Standard_Wallet,
'imported': Imported_Wallet
}
def register_constructor(wallet_type, constructor):
wallet_constructors[wallet_type] = constructor
# former WalletFactory
class Wallet(object):
"""The main wallet "entry point".
This class is actually a factory that will return a wallet of the correct
type when passed a WalletStorage instance."""
def __new__(self, storage):
wallet_type = storage.get('wallet_type')
WalletClass = Wallet.wallet_class(wallet_type)
wallet = WalletClass(storage)
# Convert hardware wallets restored with older versions of
# Electrum to BIP44 wallets. A hardware wallet does not have
# a seed and plugins do not need to handle having one.
rwc = getattr(wallet, 'restore_wallet_class', None)
if rwc and storage.get('seed', ''):
storage.print_error("converting wallet type to " + rwc.wallet_type)
storage.put('wallet_type', rwc.wallet_type)
wallet = rwc(storage)
return wallet
@staticmethod
def wallet_class(wallet_type):
if multisig_type(wallet_type):
return Multisig_Wallet
if wallet_type in wallet_constructors:
return wallet_constructors[wallet_type]
raise RuntimeError("Unknown wallet type: " + wallet_type)
| romanz/electrum | lib/wallet.py | Python | mit | 85,717 |
import asyncio
import aiohttp
import async_timeout
import atexit
import re
import json
from .. import exception
from ..api import _methodurl, _which_pool, _fileurl, _guess_filename
_loop = asyncio.get_event_loop()
_pools = {
'default': aiohttp.ClientSession(
connector=aiohttp.TCPConnector(limit=10),
loop=_loop)
}
_timeout = 30
_proxy = None # (url, (username, password))
def set_proxy(url, basic_auth=None):
global _proxy
if not url:
_proxy = None
else:
_proxy = (url, basic_auth) if basic_auth else (url,)
def _proxy_kwargs():
if _proxy is None or len(_proxy) == 0:
return {}
elif len(_proxy) == 1:
return {'proxy': _proxy[0]}
elif len(_proxy) == 2:
return {'proxy': _proxy[0], 'proxy_auth': aiohttp.BasicAuth(*_proxy[1])}
else:
raise RuntimeError("_proxy has invalid length")
async def _close_pools():
global _pools
for s in _pools.values():
await s.close()
atexit.register(lambda: _loop.create_task(_close_pools())) # have to wrap async function
def _create_onetime_pool():
return aiohttp.ClientSession(
connector=aiohttp.TCPConnector(limit=1, force_close=True),
loop=_loop)
def _default_timeout(req, **user_kw):
return _timeout
def _compose_timeout(req, **user_kw):
token, method, params, files = req
if method == 'getUpdates' and params and 'timeout' in params:
# Ensure HTTP timeout is longer than getUpdates timeout
return params['timeout'] + _default_timeout(req, **user_kw)
elif files:
# Disable timeout if uploading files. For some reason, the larger the file,
# the longer it takes for the server to respond (after upload is finished).
# It is unclear how long timeout should be.
return None
else:
return _default_timeout(req, **user_kw)
def _compose_data(req, **user_kw):
token, method, params, files = req
data = aiohttp.FormData()
if params:
for key,value in params.items():
data.add_field(key, str(value))
if files:
for key,f in files.items():
if isinstance(f, tuple):
if len(f) == 2:
filename, fileobj = f
else:
raise ValueError('Tuple must have exactly 2 elements: filename, fileobj')
else:
filename, fileobj = _guess_filename(f) or key, f
data.add_field(key, fileobj, filename=filename)
return data
def _transform(req, **user_kw):
timeout = _compose_timeout(req, **user_kw)
data = _compose_data(req, **user_kw)
url = _methodurl(req, **user_kw)
name = _which_pool(req, **user_kw)
if name is None:
session = _create_onetime_pool()
cleanup = session.close # one-time session: remember to close
else:
session = _pools[name]
cleanup = None # reuse: do not close
kwargs = {'data':data}
kwargs.update(user_kw)
return session.post, (url,), kwargs, timeout, cleanup
async def _parse(response):
try:
data = await response.json()
if data is None:
raise ValueError()
except (ValueError, json.JSONDecodeError, aiohttp.ClientResponseError):
text = await response.text()
raise exception.BadHTTPResponse(response.status, text, response)
if data['ok']:
return data['result']
else:
description, error_code = data['description'], data['error_code']
# Look for specific error ...
for e in exception.TelegramError.__subclasses__():
n = len(e.DESCRIPTION_PATTERNS)
if any(map(re.search, e.DESCRIPTION_PATTERNS, n*[description], n*[re.IGNORECASE])):
raise e(description, error_code, data)
# ... or raise generic error
raise exception.TelegramError(description, error_code, data)
async def request(req, **user_kw):
fn, args, kwargs, timeout, cleanup = _transform(req, **user_kw)
kwargs.update(_proxy_kwargs())
try:
if timeout is None:
async with fn(*args, **kwargs) as r:
return await _parse(r)
else:
try:
with async_timeout.timeout(timeout):
async with fn(*args, **kwargs) as r:
return await _parse(r)
except asyncio.TimeoutError:
raise exception.TelegramError('Response timeout', 504, {})
except aiohttp.ClientConnectionError:
raise exception.TelegramError('Connection Error', 400, {})
finally:
if cleanup: # e.g. closing one-time session
if asyncio.iscoroutinefunction(cleanup):
await cleanup()
else:
cleanup()
def download(req):
session = _create_onetime_pool()
kwargs = {}
kwargs.update(_proxy_kwargs())
return session, session.get(_fileurl(req), timeout=_timeout, **kwargs)
# Caller should close session after download is complete
| nickoala/telepot | telepot/aio/api.py | Python | mit | 5,062 |
import argparse
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
from perf.errsim import *
def plot_x_vs_pmf(params, show=True, fpath=None):
def plot(ax, x, param, **plotargs):
if param['pb'] is None:
param['pb'] = param['pe']
label = 'BSC pe={pe} m={m} n={n}'.format(**param)
else:
label = 'GBMM pe={pe} pb={pb} m={m} n={n}'.format(**param)
pmf = errpmf(**param)
if 'label' not in plotargs:
plotargs['label'] = label
ax.plot(x, pmf[x], **plotargs)
plt.close('all')
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=plt.figaspect(1/2))
t = np.arange(11)
for param in params:
plot(ax, t, param.copy(), lw=1.5)
ax.axhline(1e-15, color='black', linestyle='dashed')
ax.set_ylim(1e-25, 1e-1)
ax.set_ylabel('PMF, $p_X(x)$')
ax.set_yscale('log')
ax.grid(True)
ax.set_xticks(t)
ax.set_xlabel('Number of Symbols, $x$')
ax.set_title('Symbol Error PMF (Prob. of x errors in n digits)')
ax.legend(fontsize=12)
if fpath:
fig.savefig(fpath)
if show:
plt.show()
def plot_x_vs_pndc(params, show=True, fpath=None):
def plot(ax, x, param, **plotargs):
if param['pb'] is None:
param['pb'] = param['pe']
label = 'BSC pe={pe} m={m} n={n}'.format(**param)
else:
label = 'GBMM pe={pe} pb={pb} m={m} n={n}'.format(**param)
pmf = errpmf(**param)
pndc = prob_ndc(pmf)
if 'label' not in plotargs:
plotargs['label'] = label
ax.plot(x, pndc[x], **plotargs)
plt.close('all')
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=plt.figaspect(1/2))
t = np.arange(11)
for param in params:
plot(ax, t, param.copy(), lw=1.5)
ax.axhline(1e-15, color='black', linestyle='dashed')
ax.set_ylim(1e-25, 1e-1)
ax.set_ylabel('$P_{ndc}(t)$')
ax.set_yscale('log')
ax.grid(True)
ax.set_xticks(t)
ax.set_xlabel('Number of Symbols, $x$')
ax.set_title('Probability of not-decoding-correctly')
ax.legend(fontsize=12)
if fpath:
fig.savefig(fpath)
if show:
plt.show()
def plot_t_vs_ober(params, show=True, fpath=None):
def plot(ax, t, param, **plotargs):
if param['pb'] is None:
param['pb'] = param['pe']
label = 'BSC pe={pe} m={m} n={n}'.format(**param)
else:
label = 'GBMM pe={pe} pb={pb} m={m} n={n}'.format(**param)
pmf = errpmf(**param)
ober = ber_out(param['pe'], param['pb'], pmf)
if 'label' not in plotargs:
plotargs['label'] = label
ax.plot(t, ober[t], **plotargs)
plt.close('all')
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=plt.figaspect(1/2))
t = np.arange(11)
for param in params:
plot(ax, t, param.copy(), lw=1.5)
ax.axhline(1e-15, color='black', linestyle='dashed')
ax.set_ylim(1e-25, 1e-5)
ax.set_ylabel('Output BER, $BER_o$')
ax.set_yscale('log')
ax.grid(True)
ax.set_xticks(t)
ax.set_xlabel('Number of Symbols corrected, $t$')
ax.set_title('Number of Symbols Corrected vs. Output BER')
ax.legend(fontsize=12)
if fpath:
fig.savefig(fpath)
if show:
plt.show()
def plot_r_vs_ober(params, show=True, fpath=None):
def plot(axes, t, param, **plotargs):
if param['pb'] is None:
param['pb'] = param['pe']
label = 'BSC pe={pe} m={m} n={n}'.format(**param)
else:
label = 'GBMM pe={pe} pb={pb} m={m} n={n}'.format(**param)
pmf = errpmf(**param)
ober = ber_out(param['pe'], param['pb'], pmf)
if 'label' not in plotargs:
plotargs['label'] = label
n = param['n']
frac_t = 100 * t / n
k = n - 2 * t
r = k / n
axes[0].plot(frac_t, ober[t], **plotargs)
axes[1].plot(r, ober[t], **plotargs)
plt.close('all')
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=plt.figaspect(1/2))
t = np.arange(16)
for param in params:
plot(axes, t, param.copy(), lw=1.5)
for ax in axes:
ax.axhline(1e-15, color='black', linestyle='dashed')
ax.set_ylim(1e-25, 1e-5)
ax.set_ylabel('Output BER, $BER_o$')
ax.set_yscale('log')
ax.grid(True)
axes[0].set_xlim(0, 10)
axes[0].set_xlabel('Fraction of Symbols corrected, $t/n$ [%]')
axes[0].set_title('Fraction of Symbols corrected vs. Output BER')
axes[0].legend(loc='upper right', fontsize=12)
axes[1].set_xlim(0.8, 1.0)
axes[1].set_xlabel('Coding Rate, $R = k/n = (n - 2t)/n$')
axes[1].set_title('Coding Rate vs. Output BER')
axes[1].legend(loc='upper left', fontsize=12)
plt.tight_layout()
if fpath:
fig.savefig(fpath)
if show:
plt.show()
def plot_pe_vs_ober(params, show=True, fpath=None):
def plot(ax, pe, param, **plotargs):
if param['pb'] is None:
label = 'BSC m={m} n={n} t={t}'.format(**param)
else:
label = 'GBMM pb={pb} m={m} n={n} t={t}'.format(**param)
ober = pe_vs_ober(pe, **param)
if 'label' not in plotargs:
plotargs['label'] = label
ax.plot(pe, ober, **plotargs)
plt.close('all')
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=plt.figaspect(1/2))
pe = 10.0 ** np.arange(-15, -0.5, 0.5)
for param in params:
plot(ax, pe, param.copy(), lw=1.5)
ax.axhline(1e-15, color='black', linestyle='dashed')
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlim(pe[0], pe[-1])
ax.set_ylim(1e-25, 1e-1)
ax.set_xlabel('Input BER, $BER_i$')
ax.set_ylabel('Output BER, $BER_o$')
ax.set_title('Input vs. Output BER')
ax.legend(loc='upper left', fontsize=12)
ax.grid(True)
if fpath:
fig.savefig(fpath)
if show:
plt.show()
def plot_ebn0_vs_ober(params, show=True, fpath=None):
def plot(ax, ebn0, param, **plotargs):
if param['pb'] is None:
label = 'BSC m={m} n={n} t={t}'.format(**param)
else:
label = 'GBMM pb={pb} m={m} n={n} t={t}'.format(**param)
n = param['n']
t = param['t']
R = (n - 2 * t)/n
esn0 = ebn0 + dB(R)
pe = esn02pe(esn0)
ober = pe_vs_ober(pe, **param)
if 'label' not in plotargs:
plotargs['label'] = label
ax.plot(ebn0, ober, **plotargs)
plt.close('all')
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=plt.figaspect(1/2))
ebn0 = np.arange(5, 20.5, 0.5)
# Uncoded (FEC input) for reference
pe = esn02pe(ebn0)
iber = ber_in(pe=pe, pb=0.5)
ax.plot(ebn0, pe, lw=1.5, color='black', label='Uncoded BSC')
ax.plot(ebn0, iber, lw=1.5, color='black', linestyle='dashed',
label='Uncoded GBMM(pb=0.5)')
for param in params:
plot(ax, ebn0, param.copy(), lw=1.5)
ax.axhline(1e-15, color='black', linestyle='dashed')
ax.set_yscale('log')
ax.set_xlim(ebn0[0], ebn0[-1])
ax.set_xticks(ebn0[::2])
ax.set_ylim(1e-25, 1e-1)
ax.set_xlabel('$E_b/N_0 [dB]$')
ax.set_ylabel('Output BER, $BER_o$')
ax.set_title('Eb/N0 vs. Output BER')
ax.legend(fontsize=10)
ax.grid(True)
if fpath:
fig.savefig(fpath)
if show:
plt.show()
if __name__ == '__main__':
argp = argparse.ArgumentParser(description='Create code performance plots.')
argp.add_argument('dir', metavar='DIR', help='plots directory')
argp.add_argument('--no-show', dest='show', action='store_false',
help='Don\'t show, just save to file.')
argns = argp.parse_args()
dirpath = os.path.abspath(argns.dir)
os.makedirs(dirpath, exist_ok=True)
# pe vs ober
params = [
# GBMM
dict(pb=0.5, m=8, n=124, t=4),
dict(pb=0.5, m=8, n=124, t=6),
dict(pb=0.5, m=8, n=124, t=8),
dict(pb=0.5, m=8, n=248, t=4),
dict(pb=0.5, m=8, n=248, t=6),
dict(pb=0.5, m=8, n=248, t=8),
dict(pb=0.5, m=10, n=528, t=7),
# BSC
dict(pb=None, m=8, n=124, t=4),
dict(pb=None, m=8, n=248, t=4)]
plot_pe_vs_ober(params, argns.show, os.path.join(dirpath, 'pe-vs-ober.png'))
plot_ebn0_vs_ober(params, argns.show, os.path.join(dirpath, 'ebn0-vs-ober.png'))
params = [
# GBMM
dict(pb=0.5, m=8, n=240//8, t=1),
dict(pb=0.5, m=8, n=240//8, t=2),
dict(pb=0.5, m=8, n=240//8, t=3),
# BSC
dict(pb=None, m=8, n=240//8, t=1),
dict(pb=None, m=8, n=240//8, t=2),
dict(pb=None, m=8, n=240//8, t=3)]
plot_pe_vs_ober(params, argns.show, os.path.join(dirpath, '240bits-pe-vs-ober.png'))
plot_ebn0_vs_ober(params, argns.show, os.path.join(dirpath, '240bits-ebn0-vs-ober.png'))
params = [
# GBMM
dict(pb=0.5, m=8, n=120//8, t=1),
dict(pb=0.5, m=8, n=120//8, t=2),
dict(pb=0.5, m=8, n=120//8, t=3),
# BSC
dict(pb=None, m=8, n=120//8, t=1),
dict(pb=None, m=8, n=120//8, t=2),
dict(pb=None, m=8, n=120//8, t=3)]
plot_pe_vs_ober(params, argns.show, os.path.join(dirpath, '120bits-pe-vs-ober.png'))
plot_ebn0_vs_ober(params, argns.show, os.path.join(dirpath, '120bits-ebn0-vs-ober.png'))
#sys.exit()
# Short codes
params = [
# GBMM
dict(pe=1e-12, pb=0.5, m=5, n=240//5),
dict(pe=1e-12, pb=0.5, m=8, n=240//8),
# BSC
dict(pe=1e-12, pb=None, m=5, n=240//5),
dict(pe=1e-12, pb=None, m=8, n=240//8)]
plot_x_vs_pmf(params, argns.show, os.path.join(dirpath, '240bits-x-vs-pmf.png'))
plot_x_vs_pndc(params, argns.show, os.path.join(dirpath, '240bits-x-vs-pndc.png'))
plot_t_vs_ober(params, argns.show, os.path.join(dirpath, '240bits-t-vs-ober.png'))
plot_r_vs_ober(params, argns.show, os.path.join(dirpath, '240bits-r-vs-ober.png'))
# Very short codes
params = [
# GBMM
dict(pe=1e-12, pb=0.5, m=5, n=120//5),
dict(pe=1e-12, pb=0.5, m=8, n=120//8),
# BSC
dict(pe=1e-12, pb=None, m=5, n=120//5),
dict(pe=1e-12, pb=None, m=8, n=120//8)]
plot_x_vs_pmf(params, argns.show, os.path.join(dirpath, '120bits-x-vs-pmf.png'))
plot_x_vs_pndc(params, argns.show, os.path.join(dirpath, '120bits-x-vs-pndc.png'))
plot_t_vs_ober(params, argns.show, os.path.join(dirpath, '120bits-t-vs-ober.png'))
plot_r_vs_ober(params, argns.show, os.path.join(dirpath, '120bits-r-vs-ober.png'))
# Practical codes
params = [
# GBMM
dict(pe=1e-6, pb=0.5, m=8, n=124),
dict(pe=1e-6, pb=0.5, m=8, n=248),
dict(pe=1e-6, pb=0.5, m=10, n=264),
dict(pe=1e-6, pb=0.5, m=10, n=528),
# BSC
dict(pe=1e-6, pb=None, m=8, n=124),
dict(pe=1e-6, pb=None, m=8, n=248)]
plot_x_vs_pmf(params, argns.show, os.path.join(dirpath, 'x-vs-pmf.png'))
plot_x_vs_pndc(params, argns.show, os.path.join(dirpath, 'x-vs-pndc.png'))
plot_t_vs_ober(params, argns.show, os.path.join(dirpath, 't-vs-ober.png'))
plot_r_vs_ober(params, argns.show, os.path.join(dirpath, 'r-vs-ober.png'))
| r-rathi/error-control-coding | perf/perf_plots.py | Python | mit | 11,256 |
#!/usr/bin/env python3
"""
Unit tests for the Grammar class and for the GrammarAnalyzer class.
Tests each grammar in the "grammars" folder against a variety of strings.
"""
import unittest
from grammar import Grammar
from grammaranalyzer import GrammarAnalyzer
class TestGrammar(unittest.TestCase):
def test_nonexistent_file(self):
# Ensure no exceptions are thrown.
grammar = Grammar("nonexistent.json")
self.assertEqual(grammar.get_desc(), "")
def test_grammar_load(self):
grammar = Grammar("grammars/grammar1.json")
self.assertEqual(grammar.get_desc(), "{a^n # b^n | n > 0}")
def test_grammar_productions(self):
grammar = Grammar("grammars/grammar1.json")
# Check start variable productions.
rules = grammar.produces("S")
self.assertEqual(rules, ["aAb"])
rules = grammar.produces("A")
self.assertEqual(rules, ["aAb", "#"])
# Check nonexistent variable productions.
rules = grammar.produces("N")
self.assertFalse(rules)
def test_grammar_rules(self):
grammar = Grammar("grammars/grammar1.json")
# Check that the correct rules are returned.
rule = grammar.get_rule("S", "a")
self.assertEqual(rule, "aAb")
rule = grammar.get_rule("A", "#")
self.assertEqual(rule, "#")
# Check nonexistent input symbol.
rule = grammar.get_rule("S", "k")
self.assertFalse(rule)
# Check nonexistent variable.
rule = grammar.get_rule("N", "a")
self.assertFalse(rule)
class TestGrammarAnalyzer(unittest.TestCase):
def test_grammar1(self):
grammar = Grammar("grammars/grammar1.json")
grammar_analyzer = GrammarAnalyzer(grammar)
# Check accepted strings.
self.assertTrue(grammar_analyzer.test_string("a#b"))
self.assertTrue(grammar_analyzer.test_string("aa#bb"))
self.assertTrue(grammar_analyzer.test_string("aaa#bbb"))
self.assertTrue(grammar_analyzer.test_string("aaaa#bbbb"))
self.assertTrue(grammar_analyzer.test_string("aaaaa#bbbbb"))
self.assertTrue(grammar_analyzer.test_string("aaaaaa#bbbbbb"))
self.assertTrue(grammar_analyzer.test_string("aaaaaaa#bbbbbbb"))
self.assertTrue(grammar_analyzer.test_string("aaaaaaaa#bbbbbbbb"))
self.assertTrue(grammar_analyzer.test_string("aaaaaaaaa#bbbbbbbbb"))
self.assertTrue(grammar_analyzer.test_string("aaaaaaaaaa#bbbbbbbbbb"))
# Check rejected strings.
self.assertFalse(grammar_analyzer.test_string("xxx"))
self.assertFalse(grammar_analyzer.test_string(""))
self.assertFalse(grammar_analyzer.test_string("#"))
self.assertFalse(grammar_analyzer.test_string("a"))
self.assertFalse(grammar_analyzer.test_string("aa#b"))
self.assertFalse(grammar_analyzer.test_string("a#bb"))
self.assertFalse(grammar_analyzer.test_string("asdf"))
self.assertFalse(grammar_analyzer.test_string("aaaa#bbbbbb"))
def test_grammar2(self):
grammar = Grammar("grammars/grammar2.json")
grammar_analyzer = GrammarAnalyzer(grammar)
# Check accepted strings.
self.assertTrue(grammar_analyzer.test_string("#"))
self.assertTrue(grammar_analyzer.test_string("0#0"))
self.assertTrue(grammar_analyzer.test_string("1#1"))
self.assertTrue(grammar_analyzer.test_string("01#10"))
self.assertTrue(grammar_analyzer.test_string("10#01"))
self.assertTrue(grammar_analyzer.test_string("010#010"))
self.assertTrue(grammar_analyzer.test_string("1111#1111"))
self.assertTrue(grammar_analyzer.test_string("010001#100010"))
self.assertTrue(grammar_analyzer.test_string("0100011#1100010"))
self.assertTrue(grammar_analyzer.test_string("01000101#10100010"))
# Check rejected strings.
self.assertFalse(grammar_analyzer.test_string("xxx"))
self.assertFalse(grammar_analyzer.test_string(""))
self.assertFalse(grammar_analyzer.test_string("0"))
self.assertFalse(grammar_analyzer.test_string("0#1"))
self.assertFalse(grammar_analyzer.test_string("1#10"))
self.assertFalse(grammar_analyzer.test_string("01#01"))
self.assertFalse(grammar_analyzer.test_string("11#111"))
self.assertFalse(grammar_analyzer.test_string("111#11"))
self.assertFalse(grammar_analyzer.test_string("111#110"))
self.assertFalse(grammar_analyzer.test_string("0111#110"))
def test_grammar3(self):
grammar = Grammar("grammars/grammar3.json")
grammar_analyzer = GrammarAnalyzer(grammar)
# Check accepted strings.
self.assertTrue(grammar_analyzer.test_string("a#b#c#"))
self.assertTrue(grammar_analyzer.test_string("a#b#cc#"))
self.assertTrue(grammar_analyzer.test_string("a#b#ccc#"))
self.assertTrue(grammar_analyzer.test_string("a#b#cccc#"))
self.assertTrue(grammar_analyzer.test_string("a#b#ccccc#"))
self.assertTrue(grammar_analyzer.test_string("aa#bb#c#"))
self.assertTrue(grammar_analyzer.test_string("aa#bb#cc#"))
self.assertTrue(grammar_analyzer.test_string("aa#bb#ccc#"))
self.assertTrue(grammar_analyzer.test_string("aa#bb#cccc#"))
self.assertTrue(grammar_analyzer.test_string("aa#bb#ccccc#"))
self.assertTrue(grammar_analyzer.test_string("aaaa#bbbb#c#"))
self.assertTrue(grammar_analyzer.test_string("aaaaa#bbbbb#c#"))
self.assertTrue(grammar_analyzer.test_string("aaaaa#bbbbb#cc#"))
self.assertTrue(grammar_analyzer.test_string("aaaaa#bbbbb#ccc#"))
self.assertTrue(grammar_analyzer.test_string("aaaaa#bbbbb#cccc#"))
self.assertTrue(grammar_analyzer.test_string("aaaaa#bbbbb#ccccc#"))
# Check rejected strings.
self.assertFalse(grammar_analyzer.test_string("xxx"))
self.assertFalse(grammar_analyzer.test_string(""))
self.assertFalse(grammar_analyzer.test_string("a"))
self.assertFalse(grammar_analyzer.test_string("a#b"))
self.assertFalse(grammar_analyzer.test_string("a#b#c"))
self.assertFalse(grammar_analyzer.test_string("####"))
self.assertFalse(grammar_analyzer.test_string("abcd"))
self.assertFalse(grammar_analyzer.test_string("aaaaa#bbb#c#"))
self.assertFalse(grammar_analyzer.test_string("aaaaa##ccccc#"))
self.assertFalse(grammar_analyzer.test_string("aaaa##ccccc#"))
self.assertFalse(grammar_analyzer.test_string("aaa##ccccc#"))
def test_grammar4(self):
grammar = Grammar("grammars/grammar4.json")
grammar_analyzer = GrammarAnalyzer(grammar)
# Check accepted strings.
self.assertTrue(grammar_analyzer.test_string("a#b#c#d"))
self.assertTrue(grammar_analyzer.test_string("aa#bb#c#d"))
self.assertTrue(grammar_analyzer.test_string("a#b#cc#dd"))
self.assertTrue(grammar_analyzer.test_string("aaa#bbb#c#d"))
self.assertTrue(grammar_analyzer.test_string("a#b#ccc#ddd"))
self.assertTrue(grammar_analyzer.test_string("aaaa#bbbb#c#d"))
self.assertTrue(grammar_analyzer.test_string("a#b#cccc#dddd"))
self.assertTrue(grammar_analyzer.test_string("aa#bb#cccc#dddd"))
self.assertTrue(grammar_analyzer.test_string("aaa#bbb#cccc#dddd"))
self.assertTrue(grammar_analyzer.test_string("aaaa#bbbb#ccccc#ddddd"))
self.assertTrue(grammar_analyzer.test_string("a#b#cccccc#dddddd"))
self.assertTrue(grammar_analyzer.test_string("aaaaaaa#bbbbbbb#c#d"))
# Check rejected strings.
self.assertFalse(grammar_analyzer.test_string("xxx"))
self.assertFalse(grammar_analyzer.test_string(""))
self.assertFalse(grammar_analyzer.test_string("#"))
self.assertFalse(grammar_analyzer.test_string("a#b#c#"))
self.assertFalse(grammar_analyzer.test_string("#b#c#d"))
self.assertFalse(grammar_analyzer.test_string("a#bb#c#d"))
self.assertFalse(grammar_analyzer.test_string("a#b#c#dd"))
self.assertFalse(grammar_analyzer.test_string("a#bb#c#dd"))
self.assertFalse(grammar_analyzer.test_string("aa#bb#cc#dd#"))
self.assertFalse(grammar_analyzer.test_string("aaa#bbb#ccc#dddd"))
self.assertFalse(grammar_analyzer.test_string("aaa#bbb#ccc#dddd##"))
| JVMartin/grammar-analyzer | tests.py | Python | mit | 7,593 |
import re
import datetime
from pymongo import MongoClient
from bson import ObjectId
from .exception import RecorderException, StructureException
__all__ = ['get_database', 'Recorder', 'Structure']
def get_database(db_name, host, port=27017):
return MongoClient(host, port)[db_name]
class Structure(dict):
__store = {}
def __init__(self, *args, **kwargs):
super(Structure, self).__init__(*args, **kwargs)
self.__dict__ = self
self._validate()
def _validate(self):
pass
def to_dict(self):
return self.__dict__
class Recorder:
struct = None
__store = None
class Meta:
database = None
class DataStore:
def get(self, key):
return self.__dict__.get(key)
def set(self, key, value):
self.__dict__[key] = value
def to_dict(self):
return self.__dict__
def __init__(self, key, data=None):
self._key = key
self.__store = self.DataStore()
self._init_from_dict(data)
def _init_from_dict(self, data):
if not isinstance(self.struct, Structure):
raise RecorderException("{0} struct is not a defined".format(self.__class__.__name__))
if not isinstance(data, dict):
data = dict()
# initialize store data
for k, v in self.struct.to_dict().items():
result = data.get(k)
if not result:
result = v
self.__store.set(k, result)
def key(self):
return self._key
def pk(self):
return ObjectId(self.key())
def __str__(self):
return self.__name__
def __getattr__(self, key):
if key in list(self.struct.keys()):
return self.__store.get(key)
else:
return super(Recorder, self).__getattr__(key)
def __setattr__(self, key, value):
if key in list(self.struct.keys()):
self.__store.set(key, value)
else:
super(Recorder, self).__setattr__(key, value)
@classmethod
def colname(cls):
return re.sub('(?!^)([A-Z]+)', r'_\1', cls.__name__).lower().__str__()
@classmethod
def collection(cls):
return cls.Meta.database[cls.colname()]
@classmethod
def new(cls, data=None):
return cls(None, data)
@classmethod
def create(cls, data):
key = None
if '_id' in data.keys():
key = data['_id']
if isinstance(data['_id'], ObjectId):
key = data['_id'].__str__()
return cls(key, data)
@classmethod
def get(cls, key, *args, **kwargs):
data = cls.collection().find_one({'_id': ObjectId(key)}, *args, **kwargs)
if not data:
return None
return cls(key, data)
@classmethod
def get_by(cls, key, value, *args, **kwargs):
data = cls.collection().find_one({key: value}, *args, **kwargs)
if not data:
return None
return cls.create(data)
@classmethod
def find(cls, *args, **kwargs):
return [cls.create(x) for x in cls.collection().find(*args, **kwargs)]
def save(self):
if not self.key():
return self.insert()
return self.update()
def insert(self):
result = self.collection().insert_one(self.to_mongo())
self._key = result.inserted_id.__str__()
self.__store.set('_id', self.key())
return True
def update(self, upsert=False):
if not self.key():
return self.insert()
self.collection().update_one({'_id': self.pk()}, {'$set': self.to_mongo()}, upsert=upsert)
return True
def delete(self):
if not self.key():
return False
self.collection().delete_one({'_id': self.pk()})
return True
@classmethod
def exists(cls, key, value):
return cls.find(filter={key: value}, limit=1).__len__() > 0
def to_dict(self):
return self.__store.to_dict()
def to_mongo(self):
store = self.to_dict()
now = datetime.datetime.now()
if not 'created_at' in store.keys():
store['created_at'] = now
store['modified_at'] = now
if '_id' in store.keys():
del store['_id']
return store
| teitei-tk/ice-pick | icePick/recorder.py | Python | mit | 4,310 |
import graphene
from graphene_django.types import DjangoObjectType
from django.contrib.auth import (get_user_model, authenticate,
login as auth_login, logout as auth_logout,
update_session_auth_hash)
from django.contrib.auth.forms import (UserCreationForm, AuthenticationForm,
PasswordChangeForm)
from cryptographer.models import Cryptographer
from cryptographer.schema import CryptographerType
from helpers.schema import FormError, list_errors
class LoginUser(graphene.Mutation):
class Arguments:
username = graphene.String()
password = graphene.String()
user = graphene.Field(CryptographerType)
success = graphene.Boolean()
errors = graphene.List(FormError)
def mutate(self, info, username, password):
if info.context.user.is_authenticated:
return LoginUser(
user=None,
success=False,
errors=list_errors({ "__all__": ['Cannot login when already logged in']})
)
form = AuthenticationForm(info.context, { "username": username, "password": password })
if form.is_valid():
success = True
user = form.get_user()
auth_login(info.context, user)
return LoginUser(user=user.cryptographer, success=True, errors=[])
else:
return LoginUser(user=None, success=False, errors=list_errors(form.errors))
class SignupUser(graphene.Mutation):
class Arguments:
username = graphene.String()
password1 = graphene.String()
password2 = graphene.String()
user = graphene.Field(CryptographerType)
success = graphene.Boolean()
errors = graphene.List(FormError)
def mutate(self, info, username, password1, password2):
if info.context.user.is_authenticated:
return SignupUser(
user=None,
success=False,
errors=list_errors({ "__all__": ['Cannot signup when already logged in']})
)
form = UserCreationForm({
"username": username,
"password1": password1,
"password2": password2
})
if form.is_valid():
form.save()
user = authenticate(username=username, password=password1)
# create a Cryptographer and link it to the user
c = Cryptographer(user=user)
c.save()
auth_login(info.context, user)
return SignupUser(user=c, success=True, errors=[])
else:
return SignupUser(user=None, success=False, errors=list_errors(form.errors))
class LogoutUser(graphene.Mutation):
success = graphene.Boolean()
user = graphene.Field(CryptographerType)
def mutate(self, info):
auth_logout(info.context)
return LogoutUser(success=True, user=None)
class ChangePassword(graphene.Mutation):
class Arguments:
old_password = graphene.String()
new_password1 = graphene.String()
new_password2 = graphene.String()
success = graphene.Boolean()
errors = graphene.List(FormError)
def mutate(self, info, old_password, new_password1, new_password2):
form = PasswordChangeForm(info.context.user, data={
"old_password": old_password,
"new_password1": new_password1,
"new_password2": new_password2
})
if form.is_valid():
form.save()
update_session_auth_hash(info.context, form.user)
return ChangePassword(success=True, errors=[])
else:
return ChangePassword(success=False, errors=list_errors(form.errors))
class Mutation(object):
login_user = LoginUser.Field()
signup_user = SignupUser.Field()
logout_user = LogoutUser.Field()
change_password = ChangePassword.Field()
| pshrmn/cryptonite | cryptonite/user_auth/schema.py | Python | mit | 3,906 |
#!/usr/bin/env python3
# This script prints a new "servers.json" to stdout.
# It prunes the offline servers from the existing list (note: run with Tor proxy to keep .onions),
# and adds new servers from provided file(s) of candidate servers.
# A file of new candidate servers can be created via e.g.:
# $ ./electrum_ltc/scripts/servers.py > reply.txt
import asyncio
import sys
import json
from electrum_ltc.network import Network
from electrum_ltc.util import create_and_start_event_loop, log_exceptions
from electrum_ltc.simple_config import SimpleConfig
from electrum_ltc import constants
try:
fname1 = sys.argv[1]
fname2 = sys.argv[2] if len(sys.argv) > 2 else None
except Exception:
print("usage: update_default_servers.py <file1> [<file2>]")
print(" - the file(s) should contain json hostmaps for new servers to be added")
print(" - if two files are provided, their intersection is used (peers found in both).\n"
" file1 should have the newer data.")
sys.exit(1)
def get_newly_added_servers(fname1, fname2=None):
with open(fname1) as f:
res_hostmap = json.loads(f.read())
if fname2 is not None:
with open(fname2) as f:
dict2 = json.loads(f.read())
common_set = set.intersection(set(res_hostmap), set(dict2))
res_hostmap = {k: v for k, v in res_hostmap.items() if k in common_set}
return res_hostmap
# testnet?
#constants.set_testnet()
config = SimpleConfig({'testnet': False})
loop, stopping_fut, loop_thread = create_and_start_event_loop()
network = Network(config)
network.start()
@log_exceptions
async def f():
try:
# prune existing servers
old_servers_all = constants.net.DEFAULT_SERVERS
old_servers_online = await network.prune_offline_servers(constants.net.DEFAULT_SERVERS)
# add new servers
newly_added_servers = get_newly_added_servers(fname1, fname2)
res_servers = {**old_servers_online, **newly_added_servers}
print(json.dumps(res_servers, indent=4, sort_keys=True))
print(f"got reply from {len(old_servers_online)}/{len(old_servers_all)} old servers", file=sys.stderr)
print(f"len(newly_added_servers)={len(newly_added_servers)}. total: {len(res_servers)}", file=sys.stderr)
finally:
stopping_fut.set_result(1)
asyncio.run_coroutine_threadsafe(f(), loop)
| pooler/electrum-ltc | electrum_ltc/scripts/update_default_servers.py | Python | mit | 2,380 |
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# Copyright (C) 2014 Frederic Branczyk fbranczyk@gmail.com
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
### END LICENSE
from indicator import Indicator
from gi.repository import Gtk
def main():
xf_indicator = Indicator()
Gtk.main()
| flower-pot/xf-indicator | xf_indicator/__init__.py | Python | mit | 1,347 |
from django.http import JsonResponse
from user.models import User
from django.db.models import Q
from django.apps import apps
def user_filter(request, *args, **kwargs):
# 将来的にはpostで、結果に含めるタグを選択できるようにしたい。
query = Q()
user = request.user
tags = user.follow_tag.all()
if len(tags) > 0:
Tag = apps.get_model('tag', 'Tag')
tag_query = None
for t in tags:
tag = Tag.objects.get(pk=t)
if tag_query is None:
tag_query = Q(tag=tag)
else:
tag_query = tag_query | Q(tag=tag)
query = query & tag_query
# ほんとはソボレージ順に並べたい。
# けど、現状、ソボレージのfieldはないので、id順。
# 追加したい。
users = User.objects.all() \
.filter(query) \
.order_by('-id') \
.distinct()
res = {'filtered_users': []}
for u in users:
res['filtered_users'].append({
'username': u.username,
'img_url': u.get_image_url()
})
return JsonResponse(res)
| internship2016/sovolo | app/user/api.py | Python | mit | 1,184 |