repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
electrumalt/electrum-ixc
|
gui/qt/util.py
|
1
|
6755
|
from electrum_ixc.i18n import _
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import os.path
import time
import traceback
import sys
import threading
import platform
if platform.system() == 'Windows':
MONOSPACE_FONT = 'Lucida Console'
elif platform.system() == 'Darwin':
MONOSPACE_FONT = 'Monaco'
else:
MONOSPACE_FONT = 'monospace'
class WaitingDialog(QThread):
def __init__(self, parent, message, run_task, on_success=None, on_complete=None):
QThread.__init__(self)
self.parent = parent
self.d = QDialog(parent)
self.d.setWindowTitle('Please wait')
l = QLabel(message)
vbox = QVBoxLayout(self.d)
vbox.addWidget(l)
self.run_task = run_task
self.on_success = on_success
self.on_complete = on_complete
self.d.connect(self.d, SIGNAL('done'), self.close)
self.d.show()
def run(self):
self.error = None
try:
self.result = self.run_task()
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.error = str(e)
self.d.emit(SIGNAL('done'))
def close(self):
self.d.accept()
if self.error:
QMessageBox.warning(self.parent, _('Error'), self.error, _('OK'))
else:
if self.on_success:
if type(self.result) is not tuple:
self.result = (self.result,)
self.on_success(*self.result)
if self.on_complete:
self.on_complete()
class Timer(QThread):
def run(self):
while True:
self.emit(SIGNAL('timersignal'))
time.sleep(0.5)
class EnterButton(QPushButton):
def __init__(self, text, func):
QPushButton.__init__(self, text)
self.func = func
self.clicked.connect(func)
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
apply(self.func,())
class HelpButton(QPushButton):
def __init__(self, text):
QPushButton.__init__(self, '?')
self.help_text = text
self.setFocusPolicy(Qt.NoFocus)
self.setFixedWidth(20)
self.alt = None
self.clicked.connect(self.onclick)
def set_alt(self, func):
self.alt = func
def onclick(self):
if self.alt:
apply(self.alt)
else:
QMessageBox.information(self, 'Help', self.help_text, 'OK')
def close_button(dialog, label=None):
hbox = QHBoxLayout()
hbox.addStretch(1)
b = QPushButton(label or _("Close"))
hbox.addWidget(b)
b.clicked.connect(dialog.close)
b.setDefault(True)
return hbox
def ok_cancel_buttons2(dialog, ok_label=None, cancel_label=None):
hbox = QHBoxLayout()
hbox.addStretch(1)
b = QPushButton(cancel_label or _('Cancel'))
hbox.addWidget(b)
b.clicked.connect(dialog.reject)
b = QPushButton(ok_label or _("OK"))
hbox.addWidget(b)
b.clicked.connect(dialog.accept)
b.setDefault(True)
return hbox, b
def ok_cancel_buttons(dialog, ok_label=None, cancel_label=None):
hbox, b = ok_cancel_buttons2(dialog, ok_label, cancel_label)
return hbox
def line_dialog(parent, title, label, ok_label, default=None):
dialog = QDialog(parent)
dialog.setMinimumWidth(500)
dialog.setWindowTitle(title)
dialog.setModal(1)
l = QVBoxLayout()
dialog.setLayout(l)
l.addWidget(QLabel(label))
txt = QLineEdit()
if default:
txt.setText(default)
l.addWidget(txt)
l.addLayout(ok_cancel_buttons(dialog, ok_label))
if dialog.exec_():
return unicode(txt.text())
def text_dialog(parent, title, label, ok_label, default=None):
from qrtextedit import ScanQRTextEdit
dialog = QDialog(parent)
dialog.setMinimumWidth(500)
dialog.setWindowTitle(title)
dialog.setModal(1)
l = QVBoxLayout()
dialog.setLayout(l)
l.addWidget(QLabel(label))
txt = ScanQRTextEdit(parent)
if default:
txt.setText(default)
l.addWidget(txt)
l.addLayout(ok_cancel_buttons(dialog, ok_label))
if dialog.exec_():
return unicode(txt.toPlainText())
def address_field(addresses):
hbox = QHBoxLayout()
address_e = QLineEdit()
if addresses:
address_e.setText(addresses[0])
def func():
i = addresses.index(str(address_e.text())) + 1
i = i % len(addresses)
address_e.setText(addresses[i])
button = QPushButton(_('Address'))
button.clicked.connect(func)
hbox.addWidget(button)
hbox.addWidget(address_e)
return hbox, address_e
def filename_field(parent, config, defaultname, select_msg):
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Format")))
gb = QGroupBox("format", parent)
b1 = QRadioButton(gb)
b1.setText(_("CSV"))
b1.setChecked(True)
b2 = QRadioButton(gb)
b2.setText(_("json"))
vbox.addWidget(b1)
vbox.addWidget(b2)
hbox = QHBoxLayout()
directory = config.get('io_dir', unicode(os.path.expanduser('~')))
path = os.path.join( directory, defaultname )
filename_e = QLineEdit()
filename_e.setText(path)
def func():
text = unicode(filename_e.text())
_filter = "*.csv" if text.endswith(".csv") else "*.json" if text.endswith(".json") else None
p = unicode( QFileDialog.getSaveFileName(None, select_msg, text, _filter))
if p:
filename_e.setText(p)
button = QPushButton(_('File'))
button.clicked.connect(func)
hbox.addWidget(button)
hbox.addWidget(filename_e)
vbox.addLayout(hbox)
def set_csv(v):
text = unicode(filename_e.text())
text = text.replace(".json",".csv") if v else text.replace(".csv",".json")
filename_e.setText(text)
b1.clicked.connect(lambda: set_csv(True))
b2.clicked.connect(lambda: set_csv(False))
return vbox, filename_e, b1
class MyTreeWidget(QTreeWidget):
def __init__(self, parent):
QTreeWidget.__init__(self, parent)
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.itemActivated.connect(self.on_activated)
def on_activated(self, item):
if not item: return
for i in range(0,self.viewport().height()/5):
if self.itemAt(QPoint(0,i*5)) == item:
break
else:
return
for j in range(0,30):
if self.itemAt(QPoint(0,i*5 + j)) != item:
break
self.emit(SIGNAL('customContextMenuRequested(const QPoint&)'), QPoint(50, i*5 + j - 1))
if __name__ == "__main__":
app = QApplication([])
t = WaitingDialog(None, 'testing ...', lambda: [time.sleep(1)], lambda x: QMessageBox.information(None, 'done', "done", _('OK')))
t.start()
app.exec_()
|
gpl-3.0
| -9,210,945,300,034,786,000
| 26.684426
| 133
| 0.610067
| false
| 3.518229
| false
| false
| false
|
kushalbhola/MyStuff
|
Practice/PythonApplication/env/Lib/site-packages/pandas/core/internals/construction.py
|
1
|
26379
|
"""
Functions for preparing various inputs passed to the DataFrame or Series
constructors before passing them to a BlockManager.
"""
from collections import OrderedDict, abc
import numpy as np
import numpy.ma as ma
from pandas._libs import lib
from pandas._libs.tslibs import IncompatibleFrequency, OutOfBoundsDatetime
import pandas.compat as compat
from pandas.compat import PY36, raise_with_traceback
from pandas.core.dtypes.cast import (
construct_1d_arraylike_from_scalar,
construct_1d_ndarray_preserving_na,
construct_1d_object_array_from_listlike,
infer_dtype_from_scalar,
maybe_cast_to_datetime,
maybe_cast_to_integer_array,
maybe_castable,
maybe_convert_platform,
maybe_infer_to_datetimelike,
maybe_upcast,
)
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_datetime64tz_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_extension_type,
is_float_dtype,
is_integer_dtype,
is_iterator,
is_list_like,
is_object_dtype,
pandas_dtype,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDatetimeIndex,
ABCIndexClass,
ABCPeriodIndex,
ABCSeries,
ABCTimedeltaIndex,
)
from pandas.core.dtypes.missing import isna
from pandas.core import algorithms, common as com
from pandas.core.arrays import Categorical, ExtensionArray, period_array
from pandas.core.index import (
Index,
_get_objs_combined_axis,
_union_indexes,
ensure_index,
)
from pandas.core.indexes import base as ibase
from pandas.core.internals import (
create_block_manager_from_arrays,
create_block_manager_from_blocks,
)
from pandas.core.internals.arrays import extract_array
# ---------------------------------------------------------------------
# BlockManager Interface
def arrays_to_mgr(arrays, arr_names, index, columns, dtype=None):
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
# figure out the index, if necessary
if index is None:
index = extract_index(arrays)
else:
index = ensure_index(index)
# don't force copy because getting jammed in an ndarray anyway
arrays = _homogenize(arrays, index, dtype)
# from BlockManager perspective
axes = [ensure_index(columns), index]
return create_block_manager_from_arrays(arrays, arr_names, axes)
def masked_rec_array_to_mgr(data, index, columns, dtype, copy):
"""
Extract from a masked rec array and create the manager.
"""
# essentially process a record array then fill it
fill_value = data.fill_value
fdata = ma.getdata(data)
if index is None:
index = get_names_from_index(fdata)
if index is None:
index = ibase.default_index(len(data))
index = ensure_index(index)
if columns is not None:
columns = ensure_index(columns)
arrays, arr_columns = to_arrays(fdata, columns)
# fill if needed
new_arrays = []
for fv, arr, col in zip(fill_value, arrays, arr_columns):
mask = ma.getmaskarray(data[col])
if mask.any():
arr, fv = maybe_upcast(arr, fill_value=fv, copy=True)
arr[mask] = fv
new_arrays.append(arr)
# create the manager
arrays, arr_columns = reorder_arrays(new_arrays, arr_columns, columns)
if columns is None:
columns = arr_columns
mgr = arrays_to_mgr(arrays, arr_columns, index, columns, dtype)
if copy:
mgr = mgr.copy()
return mgr
# ---------------------------------------------------------------------
# DataFrame Constructor Interface
def init_ndarray(values, index, columns, dtype=None, copy=False):
# input must be a ndarray, list, Series, index
if isinstance(values, ABCSeries):
if columns is None:
if values.name is not None:
columns = [values.name]
if index is None:
index = values.index
else:
values = values.reindex(index)
# zero len case (GH #2234)
if not len(values) and columns is not None and len(columns):
values = np.empty((0, 1), dtype=object)
# we could have a categorical type passed or coerced to 'category'
# recast this to an arrays_to_mgr
if is_categorical_dtype(getattr(values, "dtype", None)) or is_categorical_dtype(
dtype
):
if not hasattr(values, "dtype"):
values = prep_ndarray(values, copy=copy)
values = values.ravel()
elif copy:
values = values.copy()
index, columns = _get_axes(len(values), 1, index, columns)
return arrays_to_mgr([values], columns, index, columns, dtype=dtype)
elif is_extension_array_dtype(values):
# GH#19157
if columns is None:
columns = [0]
return arrays_to_mgr([values], columns, index, columns, dtype=dtype)
# by definition an array here
# the dtypes will be coerced to a single dtype
values = prep_ndarray(values, copy=copy)
if dtype is not None:
if not is_dtype_equal(values.dtype, dtype):
try:
values = values.astype(dtype)
except Exception as orig:
e = ValueError(
"failed to cast to '{dtype}' (Exception "
"was: {orig})".format(dtype=dtype, orig=orig)
)
raise_with_traceback(e)
index, columns = _get_axes(*values.shape, index=index, columns=columns)
values = values.T
# if we don't have a dtype specified, then try to convert objects
# on the entire block; this is to convert if we have datetimelike's
# embedded in an object type
if dtype is None and is_object_dtype(values):
if values.ndim == 2 and values.shape[0] != 1:
# transpose and separate blocks
dvals_list = [maybe_infer_to_datetimelike(row) for row in values]
for n in range(len(dvals_list)):
if isinstance(dvals_list[n], np.ndarray):
dvals_list[n] = dvals_list[n].reshape(1, -1)
from pandas.core.internals.blocks import make_block
# TODO: What about re-joining object columns?
block_values = [
make_block(dvals_list[n], placement=[n]) for n in range(len(dvals_list))
]
else:
datelike_vals = maybe_infer_to_datetimelike(values)
block_values = [datelike_vals]
else:
block_values = [values]
return create_block_manager_from_blocks(block_values, [columns, index])
def init_dict(data, index, columns, dtype=None):
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
if columns is not None:
from pandas.core.series import Series
arrays = Series(data, index=columns, dtype=object)
data_names = arrays.index
missing = arrays.isnull()
if index is None:
# GH10856
# raise ValueError if only scalars in dict
index = extract_index(arrays[~missing])
else:
index = ensure_index(index)
# no obvious "empty" int column
if missing.any() and not is_integer_dtype(dtype):
if dtype is None or np.issubdtype(dtype, np.flexible):
# GH#1783
nan_dtype = object
else:
nan_dtype = dtype
val = construct_1d_arraylike_from_scalar(np.nan, len(index), nan_dtype)
arrays.loc[missing] = [val] * missing.sum()
else:
keys = com.dict_keys_to_ordered_list(data)
columns = data_names = Index(keys)
arrays = (com.maybe_iterable_to_list(data[k]) for k in keys)
# GH#24096 need copy to be deep for datetime64tz case
# TODO: See if we can avoid these copies
arrays = [
arr if not isinstance(arr, ABCIndexClass) else arr._data for arr in arrays
]
arrays = [
arr if not is_datetime64tz_dtype(arr) else arr.copy() for arr in arrays
]
return arrays_to_mgr(arrays, data_names, index, columns, dtype=dtype)
# ---------------------------------------------------------------------
def prep_ndarray(values, copy=True):
if not isinstance(values, (np.ndarray, ABCSeries, Index)):
if len(values) == 0:
return np.empty((0, 0), dtype=object)
def convert(v):
return maybe_convert_platform(v)
# we could have a 1-dim or 2-dim list here
# this is equiv of np.asarray, but does object conversion
# and platform dtype preservation
try:
if is_list_like(values[0]) or hasattr(values[0], "len"):
values = np.array([convert(v) for v in values])
elif isinstance(values[0], np.ndarray) and values[0].ndim == 0:
# GH#21861
values = np.array([convert(v) for v in values])
else:
values = convert(values)
except (ValueError, TypeError):
values = convert(values)
else:
# drop subclass info, do not copy data
values = np.asarray(values)
if copy:
values = values.copy()
if values.ndim == 1:
values = values.reshape((values.shape[0], 1))
elif values.ndim != 2:
raise ValueError("Must pass 2-d input")
return values
def _homogenize(data, index, dtype=None):
oindex = None
homogenized = []
for val in data:
if isinstance(val, ABCSeries):
if dtype is not None:
val = val.astype(dtype)
if val.index is not index:
# Forces alignment. No need to copy data since we
# are putting it into an ndarray later
val = val.reindex(index, copy=False)
else:
if isinstance(val, dict):
if oindex is None:
oindex = index.astype("O")
if isinstance(index, (ABCDatetimeIndex, ABCTimedeltaIndex)):
val = com.dict_compat(val)
else:
val = dict(val)
val = lib.fast_multiget(val, oindex.values, default=np.nan)
val = sanitize_array(
val, index, dtype=dtype, copy=False, raise_cast_failure=False
)
homogenized.append(val)
return homogenized
def extract_index(data):
index = None
if len(data) == 0:
index = Index([])
elif len(data) > 0:
raw_lengths = []
indexes = []
have_raw_arrays = False
have_series = False
have_dicts = False
have_ordered = False
for val in data:
if isinstance(val, ABCSeries):
have_series = True
indexes.append(val.index)
elif isinstance(val, dict):
have_dicts = True
if isinstance(val, OrderedDict):
have_ordered = True
indexes.append(list(val.keys()))
elif is_list_like(val) and getattr(val, "ndim", 1) == 1:
have_raw_arrays = True
raw_lengths.append(len(val))
if not indexes and not raw_lengths:
raise ValueError("If using all scalar values, you must pass an index")
if have_series:
index = _union_indexes(indexes)
elif have_dicts:
index = _union_indexes(indexes, sort=not (compat.PY36 or have_ordered))
if have_raw_arrays:
lengths = list(set(raw_lengths))
if len(lengths) > 1:
raise ValueError("arrays must all be same length")
if have_dicts:
raise ValueError(
"Mixing dicts with non-Series may lead to ambiguous ordering."
)
if have_series:
if lengths[0] != len(index):
msg = (
"array length {length} does not match index "
"length {idx_len}".format(length=lengths[0], idx_len=len(index))
)
raise ValueError(msg)
else:
index = ibase.default_index(lengths[0])
return ensure_index(index)
def reorder_arrays(arrays, arr_columns, columns):
# reorder according to the columns
if (
columns is not None
and len(columns)
and arr_columns is not None
and len(arr_columns)
):
indexer = ensure_index(arr_columns).get_indexer(columns)
arr_columns = ensure_index([arr_columns[i] for i in indexer])
arrays = [arrays[i] for i in indexer]
return arrays, arr_columns
def get_names_from_index(data):
has_some_name = any(getattr(s, "name", None) is not None for s in data)
if not has_some_name:
return ibase.default_index(len(data))
index = list(range(len(data)))
count = 0
for i, s in enumerate(data):
n = getattr(s, "name", None)
if n is not None:
index[i] = n
else:
index[i] = "Unnamed {count}".format(count=count)
count += 1
return index
def _get_axes(N, K, index, columns):
# helper to create the axes as indexes
# return axes or defaults
if index is None:
index = ibase.default_index(N)
else:
index = ensure_index(index)
if columns is None:
columns = ibase.default_index(K)
else:
columns = ensure_index(columns)
return index, columns
# ---------------------------------------------------------------------
# Conversion of Inputs to Arrays
def to_arrays(data, columns, coerce_float=False, dtype=None):
"""
Return list of arrays, columns.
"""
if isinstance(data, ABCDataFrame):
if columns is not None:
arrays = [
data._ixs(i, axis=1).values
for i, col in enumerate(data.columns)
if col in columns
]
else:
columns = data.columns
arrays = [data._ixs(i, axis=1).values for i in range(len(columns))]
return arrays, columns
if not len(data):
if isinstance(data, np.ndarray):
columns = data.dtype.names
if columns is not None:
return [[]] * len(columns), columns
return [], [] # columns if columns is not None else []
if isinstance(data[0], (list, tuple)):
return _list_to_arrays(data, columns, coerce_float=coerce_float, dtype=dtype)
elif isinstance(data[0], abc.Mapping):
return _list_of_dict_to_arrays(
data, columns, coerce_float=coerce_float, dtype=dtype
)
elif isinstance(data[0], ABCSeries):
return _list_of_series_to_arrays(
data, columns, coerce_float=coerce_float, dtype=dtype
)
elif isinstance(data[0], Categorical):
if columns is None:
columns = ibase.default_index(len(data))
return data, columns
elif (
isinstance(data, (np.ndarray, ABCSeries, Index))
and data.dtype.names is not None
):
columns = list(data.dtype.names)
arrays = [data[k] for k in columns]
return arrays, columns
else:
# last ditch effort
data = [tuple(x) for x in data]
return _list_to_arrays(data, columns, coerce_float=coerce_float, dtype=dtype)
def _list_to_arrays(data, columns, coerce_float=False, dtype=None):
if len(data) > 0 and isinstance(data[0], tuple):
content = list(lib.to_object_array_tuples(data).T)
else:
# list of lists
content = list(lib.to_object_array(data).T)
# gh-26429 do not raise user-facing AssertionError
try:
result = _convert_object_array(
content, columns, dtype=dtype, coerce_float=coerce_float
)
except AssertionError as e:
raise ValueError(e) from e
return result
def _list_of_series_to_arrays(data, columns, coerce_float=False, dtype=None):
if columns is None:
columns = _get_objs_combined_axis(data, sort=False)
indexer_cache = {}
aligned_values = []
for s in data:
index = getattr(s, "index", None)
if index is None:
index = ibase.default_index(len(s))
if id(index) in indexer_cache:
indexer = indexer_cache[id(index)]
else:
indexer = indexer_cache[id(index)] = index.get_indexer(columns)
values = com.values_from_object(s)
aligned_values.append(algorithms.take_1d(values, indexer))
values = np.vstack(aligned_values)
if values.dtype == np.object_:
content = list(values.T)
return _convert_object_array(
content, columns, dtype=dtype, coerce_float=coerce_float
)
else:
return values.T, columns
def _list_of_dict_to_arrays(data, columns, coerce_float=False, dtype=None):
"""Convert list of dicts to numpy arrays
if `columns` is not passed, column names are inferred from the records
- for OrderedDict and (on Python>=3.6) dicts, the column names match
the key insertion-order from the first record to the last.
- For other kinds of dict-likes, the keys are lexically sorted.
Parameters
----------
data : iterable
collection of records (OrderedDict, dict)
columns: iterables or None
coerce_float : bool
dtype : np.dtype
Returns
-------
tuple
arrays, columns
"""
if columns is None:
gen = (list(x.keys()) for x in data)
types = (dict, OrderedDict) if PY36 else OrderedDict
sort = not any(isinstance(d, types) for d in data)
columns = lib.fast_unique_multiple_list_gen(gen, sort=sort)
# assure that they are of the base dict class and not of derived
# classes
data = [(type(d) is dict) and d or dict(d) for d in data]
content = list(lib.dicts_to_array(data, list(columns)).T)
return _convert_object_array(
content, columns, dtype=dtype, coerce_float=coerce_float
)
def _convert_object_array(content, columns, coerce_float=False, dtype=None):
if columns is None:
columns = ibase.default_index(len(content))
else:
if len(columns) != len(content): # pragma: no cover
# caller's responsibility to check for this...
raise AssertionError(
"{col:d} columns passed, passed data had "
"{con} columns".format(col=len(columns), con=len(content))
)
# provide soft conversion of object dtypes
def convert(arr):
if dtype != object and dtype != np.object:
arr = lib.maybe_convert_objects(arr, try_float=coerce_float)
arr = maybe_cast_to_datetime(arr, dtype)
return arr
arrays = [convert(arr) for arr in content]
return arrays, columns
# ---------------------------------------------------------------------
# Series-Based
def sanitize_index(data, index, copy=False):
"""
Sanitize an index type to return an ndarray of the underlying, pass
through a non-Index.
"""
if index is None:
return data
if len(data) != len(index):
raise ValueError("Length of values does not match length of index")
if isinstance(data, ABCIndexClass) and not copy:
pass
elif isinstance(data, (ABCPeriodIndex, ABCDatetimeIndex)):
data = data._values
if copy:
data = data.copy()
elif isinstance(data, np.ndarray):
# coerce datetimelike types
if data.dtype.kind in ["M", "m"]:
data = sanitize_array(data, index, copy=copy)
return data
def sanitize_array(data, index, dtype=None, copy=False, raise_cast_failure=False):
"""
Sanitize input data to an ndarray, copy if specified, coerce to the
dtype if specified.
"""
if dtype is not None:
dtype = pandas_dtype(dtype)
if isinstance(data, ma.MaskedArray):
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = maybe_upcast(data, copy=True)
data.soften_mask() # set hardmask False if it was True
data[mask] = fill_value
else:
data = data.copy()
# extract ndarray or ExtensionArray, ensure we have no PandasArray
data = extract_array(data, extract_numpy=True)
# GH#846
if isinstance(data, np.ndarray):
if dtype is not None and is_float_dtype(data.dtype) and is_integer_dtype(dtype):
# possibility of nan -> garbage
try:
subarr = _try_cast(data, dtype, copy, True)
except ValueError:
if copy:
subarr = data.copy()
else:
subarr = np.array(data, copy=False)
else:
# we will try to copy be-definition here
subarr = _try_cast(data, dtype, copy, raise_cast_failure)
elif isinstance(data, ExtensionArray):
# it is already ensured above this is not a PandasArray
subarr = data
if dtype is not None:
subarr = subarr.astype(dtype, copy=copy)
elif copy:
subarr = subarr.copy()
return subarr
elif isinstance(data, (list, tuple)) and len(data) > 0:
if dtype is not None:
try:
subarr = _try_cast(data, dtype, copy, raise_cast_failure)
except Exception:
if raise_cast_failure: # pragma: no cover
raise
subarr = np.array(data, dtype=object, copy=copy)
subarr = lib.maybe_convert_objects(subarr)
else:
subarr = maybe_convert_platform(data)
subarr = maybe_cast_to_datetime(subarr, dtype)
elif isinstance(data, range):
# GH#16804
arr = np.arange(data.start, data.stop, data.step, dtype="int64")
subarr = _try_cast(arr, dtype, copy, raise_cast_failure)
else:
subarr = _try_cast(data, dtype, copy, raise_cast_failure)
# scalar like, GH
if getattr(subarr, "ndim", 0) == 0:
if isinstance(data, list): # pragma: no cover
subarr = np.array(data, dtype=object)
elif index is not None:
value = data
# figure out the dtype from the value (upcast if necessary)
if dtype is None:
dtype, value = infer_dtype_from_scalar(value)
else:
# need to possibly convert the value here
value = maybe_cast_to_datetime(value, dtype)
subarr = construct_1d_arraylike_from_scalar(value, len(index), dtype)
else:
return subarr.item()
# the result that we want
elif subarr.ndim == 1:
if index is not None:
# a 1-element ndarray
if len(subarr) != len(index) and len(subarr) == 1:
subarr = construct_1d_arraylike_from_scalar(
subarr[0], len(index), subarr.dtype
)
elif subarr.ndim > 1:
if isinstance(data, np.ndarray):
raise Exception("Data must be 1-dimensional")
else:
subarr = com.asarray_tuplesafe(data, dtype=dtype)
# This is to prevent mixed-type Series getting all casted to
# NumPy string type, e.g. NaN --> '-1#IND'.
if issubclass(subarr.dtype.type, str):
# GH#16605
# If not empty convert the data to dtype
# GH#19853: If data is a scalar, subarr has already the result
if not lib.is_scalar(data):
if not np.all(isna(data)):
data = np.array(data, dtype=dtype, copy=False)
subarr = np.array(data, dtype=object, copy=copy)
if (
not (is_extension_array_dtype(subarr.dtype) or is_extension_array_dtype(dtype))
and is_object_dtype(subarr.dtype)
and not is_object_dtype(dtype)
):
inferred = lib.infer_dtype(subarr, skipna=False)
if inferred == "period":
try:
subarr = period_array(subarr)
except IncompatibleFrequency:
pass
return subarr
def _try_cast(arr, dtype, copy, raise_cast_failure):
"""
Convert input to numpy ndarray and optionally cast to a given dtype.
Parameters
----------
arr : array-like
dtype : np.dtype, ExtensionDtype or None
copy : bool
If False, don't copy the data if not needed.
raise_cast_failure : bool
If True, and if a dtype is specified, raise errors during casting.
Otherwise an object array is returned.
"""
# perf shortcut as this is the most common case
if isinstance(arr, np.ndarray):
if maybe_castable(arr) and not copy and dtype is None:
return arr
try:
# GH#15832: Check if we are requesting a numeric dype and
# that we can convert the data to the requested dtype.
if is_integer_dtype(dtype):
subarr = maybe_cast_to_integer_array(arr, dtype)
subarr = maybe_cast_to_datetime(arr, dtype)
# Take care in creating object arrays (but iterators are not
# supported):
if is_object_dtype(dtype) and (
is_list_like(subarr)
and not (is_iterator(subarr) or isinstance(subarr, np.ndarray))
):
subarr = construct_1d_object_array_from_listlike(subarr)
elif not is_extension_type(subarr):
subarr = construct_1d_ndarray_preserving_na(subarr, dtype, copy=copy)
except OutOfBoundsDatetime:
# in case of out of bound datetime64 -> always raise
raise
except (ValueError, TypeError):
if is_categorical_dtype(dtype):
# We *do* allow casting to categorical, since we know
# that Categorical is the only array type for 'category'.
subarr = Categorical(arr, dtype.categories, ordered=dtype._ordered)
elif is_extension_array_dtype(dtype):
# create an extension array from its dtype
array_type = dtype.construct_array_type()._from_sequence
subarr = array_type(arr, dtype=dtype, copy=copy)
elif dtype is not None and raise_cast_failure:
raise
else:
subarr = np.array(arr, dtype=object, copy=copy)
return subarr
|
apache-2.0
| -9,052,426,239,437,025,000
| 31.566667
| 88
| 0.585731
| false
| 3.995002
| false
| false
| false
|
michaelkuty/mbot
|
mbot/backends/slack.py
|
1
|
3363
|
import logging
from mbot.event import Event
from mbot.state import User
from mbot.utils.packages import install_package
from .base import Dispatcher
LOG = logging.getLogger(__name__)
class Slack(Dispatcher):
"""Yet another simple bot
Uses slack RTM and calls middlewares for all messages
:param: slack_token: slack token
"""
@property
def sc(self):
"""Returns slack client"""
if not hasattr(self, "client"):
try:
from slackclient import SlackClient
except ImportError:
install_package("slackclient")
from slackclient import SlackClient
self.client = SlackClient(self.conf['token'])
return self.client
def connect(self):
"""Returns True if connection is successfull"""
try:
return self.sc.rtm_connect()
except Exception as e:
LOG.exception(e)
def upload(self, data, initial_comment=None, channel=None):
if isinstance(data, list):
results = []
for datum in data:
results.append(self.sc.api_call(
"files.upload",
channel=channel,
**datum))
return results
response = self.sc.api_call(
"files.upload",
channel=channel,
attachments=data)
LOG.debug(response)
return response
def read(self):
try:
events = self.sc.rtm_read()
except:
self.connect()
try:
events = self.sc.rtm_read()
except Exception as e:
LOG.exception(e)
return self.process_events(events)
def send(self, *args, **kwargs):
return self.sc.rtm_send_message(*args, **kwargs)
def reply(self, message, text, attachments=None, *args, **kwargs):
"""Reply to a message"""
if 'channel' not in message.body:
LOG.error("Cannot reply on message %s" % message)
return
if attachments:
return self.upload(attachments, text, message.body['channel'])
return self.send(message.body['channel'], text, *args, **kwargs)
@property
def author_id(self):
if not hasattr(self, "_author_id"):
self._author_id = self.sc.api_call("auth.test")['user_id']
return self._author_id
def process_events(self, events):
"""Returns new events
"""
_events = []
for event in events:
# skip own events
if 'user' in event and event['user'] == self.author_id:
continue
# skip event types
msg_type = event.get("type", None)
if msg_type and self.bot.process_types:
if msg_type not in self.process_types:
continue
_events.append(Event(self.bot, self, event))
return _events
def get_users(self):
"""Returns dictionary of users"""
try:
members = self.sc.api_call("users.list")['members']
except:
members = []
return {u['id']: User(**{
'name': u['name'],
'real_name': u['real_name'],
'is_bot': u['is_bot'],
'id': u['id'],
}) for u in members}
|
mit
| -8,143,825,461,480,550,000
| 25.480315
| 74
| 0.531966
| false
| 4.311538
| false
| false
| false
|
hiconversion/spark-ec2
|
spark_ec2.py
|
1
|
65557
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import division, print_function, with_statement
import codecs
import hashlib
import itertools
import logging
import os
import os.path
import pipes
import random
import shutil
import string
from stat import S_IRUSR
import subprocess
import sys
import tarfile
import tempfile
import textwrap
import time
import warnings
from datetime import datetime
from optparse import OptionParser
from sys import stderr
if sys.version < "3":
from urllib2 import urlopen, Request, HTTPError
else:
from urllib.request import urlopen, Request
from urllib.error import HTTPError
raw_input = input
xrange = range
SPARK_EC2_VERSION = "2.1.1"
SPARK_EC2_DIR = os.path.dirname(os.path.realpath(__file__))
VALID_SPARK_VERSIONS = set([
"0.7.3",
"0.8.0",
"0.8.1",
"0.9.0",
"0.9.1",
"0.9.2",
"1.0.0",
"1.0.1",
"1.0.2",
"1.1.0",
"1.1.1",
"1.2.0",
"1.2.1",
"1.3.0",
"1.3.1",
"1.4.0",
"1.4.1",
"1.5.0",
"1.5.1",
"1.5.2",
"1.6.0",
"1.6.1",
"1.6.2",
"1.6.3",
"2.0.0-preview",
"2.0.0",
"2.0.1",
"2.0.2",
"2.1.0",
"2.1.1",
"2.2.0",
"2.2.1",
"2.3.0",
"2.3.1",
"2.3.2",
"2.4.0",
"2.4.1",
"2.4.2",
"2.4.3",
"2.4.4",
"2.4.5",
"2.4.6",
"2.4.7",
"3.0.0",
"3.0.1",
"3.0.2",
"3.1.1"
])
SPARK_TACHYON_MAP = {
"1.0.0": "0.4.1",
"1.0.1": "0.4.1",
"1.0.2": "0.4.1",
"1.1.0": "0.5.0",
"1.1.1": "0.5.0",
"1.2.0": "0.5.0",
"1.2.1": "0.5.0",
"1.3.0": "0.5.0",
"1.3.1": "0.5.0",
"1.4.0": "0.6.4",
"1.4.1": "0.6.4",
"1.5.0": "0.7.1",
"1.5.1": "0.7.1",
"1.5.2": "0.7.1",
"1.6.0": "0.8.2",
"1.6.1": "0.8.2",
"1.6.2": "0.8.2",
"2.0.0-preview": ""
}
DEFAULT_SPARK_VERSION = SPARK_EC2_VERSION
DEFAULT_SPARK_GITHUB_REPO = "https://github.com/apache/spark"
# Default location to get the spark-ec2 scripts (and ami-list) from
DEFAULT_SPARK_EC2_GITHUB_REPO = "https://github.com/amplab/spark-ec2"
DEFAULT_SPARK_EC2_BRANCH = "branch-2.0"
def setup_external_libs(libs):
"""
Download external libraries from PyPI to SPARK_EC2_DIR/lib/ and prepend them to our PATH.
"""
PYPI_URL_PREFIX = "https://pypi.python.org/packages/source"
SPARK_EC2_LIB_DIR = os.path.join(SPARK_EC2_DIR, "lib")
if not os.path.exists(SPARK_EC2_LIB_DIR):
print("Downloading external libraries that spark-ec2 needs from PyPI to {path}...".format(
path=SPARK_EC2_LIB_DIR
))
print("This should be a one-time operation.")
os.mkdir(SPARK_EC2_LIB_DIR)
for lib in libs:
versioned_lib_name = "{n}-{v}".format(n=lib["name"], v=lib["version"])
lib_dir = os.path.join(SPARK_EC2_LIB_DIR, versioned_lib_name)
if not os.path.isdir(lib_dir):
tgz_file_path = os.path.join(SPARK_EC2_LIB_DIR, versioned_lib_name + ".tar.gz")
print(" - Downloading {lib}...".format(lib=lib["name"]))
lib_url = lib.get("url",
"{prefix}/{first_letter}/{lib_name}/{lib_name}-{lib_version}.tar.gz".format(
prefix=PYPI_URL_PREFIX,
first_letter=lib["name"][:1],
lib_name=lib["name"],
lib_version=lib["version"])
)
print(lib_url)
download_stream = urlopen(lib_url)
with open(tgz_file_path, "wb") as tgz_file:
tgz_file.write(download_stream.read())
with open(tgz_file_path, "rb") as tar:
if hashlib.md5(tar.read()).hexdigest() != lib["md5"]:
print("ERROR: Got wrong md5sum for {lib}.".format(lib=lib["name"]), file=stderr)
sys.exit(1)
tar = tarfile.open(tgz_file_path)
tar.extractall(path=SPARK_EC2_LIB_DIR)
tar.close()
os.remove(tgz_file_path)
print(" - Finished downloading {lib}.".format(lib=lib["name"]))
sys.path.insert(1, lib_dir)
# Only PyPI libraries are supported.
external_libs = [
{
"name": "boto",
"version": "2.47.0",
"url": "https://pypi.python.org/packages/bc/ee/e674c01b10972765511705dc77b824b550646a30994cbc428087c4910ac3/boto-2.47.0.tar.gz#md5=c7ed986a6f369fe93f04ec62d16299ac",
"md5": "c7ed986a6f369fe93f04ec62d16299ac"
}
]
setup_external_libs(external_libs)
import boto
from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType, EBSBlockDeviceType
from boto import ec2
class UsageError(Exception):
pass
# Configure and parse our command-line arguments
def parse_args():
parser = OptionParser(
prog="spark-ec2",
version="%prog {v}".format(v=SPARK_EC2_VERSION),
usage="%prog [options] <action> <cluster_name>\n\n"
+ "<action> can be: launch, destroy, login, stop, start, get-master, reboot-slaves")
parser.add_option(
"-s", "--slaves", type="int", default=1,
help="Number of slaves to launch (default: %default)")
parser.add_option(
"-w", "--wait", type="int",
help="DEPRECATED (no longer necessary) - Seconds to wait for nodes to start")
parser.add_option(
"-k", "--key-pair",
help="Key pair to use on instances")
parser.add_option(
"-i", "--identity-file",
help="SSH private key file to use for logging into instances")
parser.add_option(
"-p", "--profile", default=None,
help="If you have multiple profiles (AWS or boto config), you can configure " +
"additional, named profiles by using this option (default: %default)")
parser.add_option(
"-t", "--instance-type", default="m1.large",
help="Type of instance to launch (default: %default). " +
"WARNING: must be 64-bit; small instances won't work")
parser.add_option(
"-m", "--master-instance-type", default="",
help="Master instance type (leave empty for same as instance-type)")
parser.add_option(
"-r", "--region", default="us-east-1",
help="EC2 region used to launch instances in, or to find them in (default: %default)")
parser.add_option(
"-z", "--zone", default="",
help="Availability zone to launch instances in, or 'all' to spread " +
"slaves across multiple (an additional $0.01/Gb for bandwidth" +
"between zones applies) (default: a single zone chosen at random)")
parser.add_option(
"-a", "--ami",
help="Amazon Machine Image ID to use")
parser.add_option(
"-v", "--spark-version", default=DEFAULT_SPARK_VERSION,
help="Version of Spark to use: 'X.Y.Z' or a specific git hash (default: %default)")
parser.add_option(
"--spark-git-repo",
default=DEFAULT_SPARK_GITHUB_REPO,
help="Github repo from which to checkout supplied commit hash (default: %default)")
parser.add_option(
"--spark-ec2-git-repo",
default=DEFAULT_SPARK_EC2_GITHUB_REPO,
help="Github repo from which to checkout spark-ec2 (default: %default)")
parser.add_option(
"--spark-ec2-git-branch",
default=DEFAULT_SPARK_EC2_BRANCH,
help="Github repo branch of spark-ec2 to use (default: %default)")
parser.add_option(
"--deploy-root-dir",
default=None,
help="A directory to copy into / on the first master. " +
"Must be absolute. Note that a trailing slash is handled as per rsync: " +
"If you omit it, the last directory of the --deploy-root-dir path will be created " +
"in / before copying its contents. If you append the trailing slash, " +
"the directory is not created and its contents are copied directly into /. " +
"(default: %default).")
parser.add_option(
"--hadoop-major-version", default="yarn",
help="Major version of Hadoop. Valid options are 1 (Hadoop 1.0.4), 2 (CDH 4.2.0), yarn " +
"(Hadoop 2.4.0) (default: %default)")
parser.add_option(
"-D", metavar="[ADDRESS:]PORT", dest="proxy_port",
help="Use SSH dynamic port forwarding to create a SOCKS proxy at " +
"the given local address (for use with login)")
parser.add_option(
"--resume", action="store_true", default=False,
help="Resume installation on a previously launched cluster " +
"(for debugging)")
parser.add_option(
"--ebs-vol-size", metavar="SIZE", type="int", default=200,
help="Size (in GB) of each EBS volume.")
parser.add_option(
"--ebs-vol-type", default="gp2",
help="EBS volume type (e.g. 'gp2', 'standard').")
parser.add_option(
"--ebs-vol-num", type="int", default=1,
help="Number of EBS volumes to attach to each node as /vol[x]. " +
"The volumes will be deleted when the instances terminate. " +
"Only possible on EBS-backed AMIs. " +
"EBS volumes are only attached if --ebs-vol-size > 0. " +
"Only support up to 8 EBS volumes.")
parser.add_option(
"--placement-group", type="string", default=None,
help="Which placement group to try and launch " +
"instances into. Assumes placement group is already " +
"created.")
parser.add_option(
"--swap", metavar="SWAP", type="int", default=1024,
help="Swap space to set up per node, in MB (default: %default)")
parser.add_option(
"--spot-price", metavar="PRICE", type="float",
help="If specified, launch slaves as spot instances with the given " +
"maximum price (in dollars)")
parser.add_option(
"--ganglia", action="store_true", default=True,
help="Setup Ganglia monitoring on cluster (default: %default). NOTE: " +
"the Ganglia page will be publicly accessible")
parser.add_option(
"--no-ganglia", action="store_false", dest="ganglia",
help="Disable Ganglia monitoring for the cluster")
parser.add_option(
"-u", "--user", default="root",
help="The SSH user you want to connect as (default: %default)")
parser.add_option(
"--delete-groups", action="store_true", default=False,
help="When destroying a cluster, delete the security groups that were created")
parser.add_option(
"--use-existing-master", action="store_true", default=False,
help="Launch fresh slaves, but use an existing stopped master if possible")
parser.add_option(
"--worker-instances", type="int", default=1,
help="Number of instances per worker: variable SPARK_WORKER_INSTANCES. Not used if YARN " +
"is used as Hadoop major version (default: %default)")
parser.add_option(
"--master-opts", type="string", default="",
help="Extra options to give to master through SPARK_MASTER_OPTS variable " +
"(e.g -Dspark.worker.timeout=180)")
parser.add_option(
"--user-data", type="string", default="",
help="Path to a user-data file (most AMIs interpret this as an initialization script)")
parser.add_option(
"--authorized-address", type="string", default="0.0.0.0/0",
help="Address to authorize on created security groups (default: %default)")
parser.add_option(
"--additional-security-group", type="string", default="",
help="Additional security group to place the machines in")
parser.add_option(
"--additional-tags", type="string", default="",
help="Additional tags to set on the machines; tags are comma-separated, while name and " +
"value are colon separated; ex: \"Task:MySparkProject,Env:production\"")
parser.add_option(
"--tag-volumes", action="store_true", default=False,
help="Apply the tags given in --additional-tags to any EBS volumes " +
"attached to master and slave instances.")
parser.add_option(
"--copy-aws-credentials", action="store_true", default=False,
help="Add AWS credentials to hadoop configuration to allow Spark to access S3")
parser.add_option(
"--subnet-id", default=None,
help="VPC subnet to launch instances in")
parser.add_option(
"--vpc-id", default=None,
help="VPC to launch instances in")
parser.add_option(
"--private-ips", action="store_true", default=False,
help="Use private IPs for instances rather than public if VPC/subnet " +
"requires that.")
parser.add_option(
"--instance-initiated-shutdown-behavior", default="stop",
choices=["stop", "terminate"],
help="Whether instances should terminate when shut down or just stop")
parser.add_option(
"--instance-profile-name", default=None,
help="IAM profile name to launch instances under")
(opts, args) = parser.parse_args()
if len(args) != 2:
parser.print_help()
sys.exit(1)
(action, cluster_name) = args
# Boto config check
# http://boto.cloudhackers.com/en/latest/boto_config_tut.html
home_dir = os.getenv('HOME')
if home_dir is None or not os.path.isfile(home_dir + '/.boto'):
if not os.path.isfile('/etc/boto.cfg'):
# If there is no boto config, check aws credentials
if not os.path.isfile(home_dir + '/.aws/credentials'):
if os.getenv('AWS_ACCESS_KEY_ID') is None:
print("ERROR: The environment variable AWS_ACCESS_KEY_ID must be set",
file=stderr)
sys.exit(1)
if os.getenv('AWS_SECRET_ACCESS_KEY') is None:
print("ERROR: The environment variable AWS_SECRET_ACCESS_KEY must be set",
file=stderr)
sys.exit(1)
return (opts, action, cluster_name)
# Get the EC2 security group of the given name, creating it if it doesn't exist
def get_or_make_group(conn, name, vpc_id):
groups = conn.get_all_security_groups()
group = [g for g in groups if g.name == name]
if len(group) > 0:
return group[0]
else:
print("Creating security group " + name)
return conn.create_security_group(name, "Spark EC2 group", vpc_id)
def validate_spark_hadoop_version(spark_version, hadoop_version):
if "." in spark_version:
parts = spark_version.split(".")
if parts[0].isdigit():
spark_major_version = float(parts[0])
if spark_major_version > 1.0 and hadoop_version != "yarn":
print("Spark version: {v}, does not support Hadoop version: {hv}".
format(v=spark_version, hv=hadoop_version), file=stderr)
sys.exit(1)
else:
print("Invalid Spark version: {v}".format(v=spark_version), file=stderr)
sys.exit(1)
def get_validate_spark_version(version, repo):
if "." in version:
# Remove leading v to handle inputs like v1.5.0
version = version.lstrip("v")
if version not in VALID_SPARK_VERSIONS:
print("Don't know about Spark version: {v}".format(v=version), file=stderr)
sys.exit(1)
return version
else:
github_commit_url = "{repo}/commit/{commit_hash}".format(repo=repo, commit_hash=version)
request = Request(github_commit_url)
request.get_method = lambda: 'HEAD'
try:
response = urlopen(request)
except HTTPError as e:
print("Couldn't validate Spark commit: {url}".format(url=github_commit_url),
file=stderr)
print("Received HTTP response code of {code}.".format(code=e.code), file=stderr)
sys.exit(1)
return version
# Source: http://aws.amazon.com/amazon-linux-ami/instance-type-matrix/
# Last Updated: 2015-06-19
# For easy maintainability, please keep this manually-inputted dictionary sorted by key.
EC2_INSTANCE_TYPES = {
"c1.medium": "pvm",
"c1.xlarge": "pvm",
"c3.large": "hvm",
"c3.xlarge": "hvm",
"c3.2xlarge": "hvm",
"c3.4xlarge": "hvm",
"c3.8xlarge": "hvm",
"c4.large": "hvm",
"c4.xlarge": "hvm",
"c4.2xlarge": "hvm",
"c4.4xlarge": "hvm",
"c4.8xlarge": "hvm",
"cc1.4xlarge": "hvm",
"cc2.8xlarge": "hvm",
"cg1.4xlarge": "hvm",
"cr1.8xlarge": "hvm",
"d2.xlarge": "hvm",
"d2.2xlarge": "hvm",
"d2.4xlarge": "hvm",
"d2.8xlarge": "hvm",
"g2.2xlarge": "hvm",
"g2.8xlarge": "hvm",
"hi1.4xlarge": "pvm",
"hs1.8xlarge": "pvm",
"i2.xlarge": "hvm",
"i2.2xlarge": "hvm",
"i2.4xlarge": "hvm",
"i2.8xlarge": "hvm",
"m1.small": "pvm",
"m1.medium": "pvm",
"m1.large": "pvm",
"m1.xlarge": "pvm",
"m2.xlarge": "pvm",
"m2.2xlarge": "pvm",
"m2.4xlarge": "pvm",
"m3.medium": "hvm",
"m3.large": "hvm",
"m3.xlarge": "hvm",
"m3.2xlarge": "hvm",
"m4.large": "hvm",
"m4.xlarge": "hvm",
"m4.2xlarge": "hvm",
"m4.4xlarge": "hvm",
"m4.10xlarge": "hvm",
"r3.large": "hvm",
"r3.xlarge": "hvm",
"r3.2xlarge": "hvm",
"r3.4xlarge": "hvm",
"r3.8xlarge": "hvm",
"r4.large": "hvm",
"r4.xlarge": "hvm",
"r4.2xlarge": "hvm",
"r4.4xlarge": "hvm",
"r4.8xlarge": "hvm",
"r4.16xlarge": "hvm",
"r5.large": "hvm",
"r5.xlarge": "hvm",
"r5.2xlarge": "hvm",
"r5.4xlarge": "hvm",
"r5.8xlarge": "hvm",
"r5.12xlarge": "hvm",
"r5.24xlarge": "hvm",
"r5a.large": "hvm",
"r5a.xlarge": "hvm",
"r5a.2xlarge": "hvm",
"r5a.4xlarge": "hvm",
"r5a.12xlarge": "hvm",
"r5a.24xlarge": "hvm",
"t1.micro": "pvm",
"t2.micro": "hvm",
"t2.small": "hvm",
"t2.medium": "hvm",
"t2.large": "hvm",
"x1.16xlarge": "hvm",
"x1.32xlarge": "hvm"
}
def get_tachyon_version(spark_version):
return SPARK_TACHYON_MAP.get(spark_version, "")
# Attempt to resolve an appropriate AMI given the architecture and region of the request.
def get_spark_ami(opts):
if opts.instance_type in EC2_INSTANCE_TYPES:
instance_type = EC2_INSTANCE_TYPES[opts.instance_type]
else:
instance_type = "pvm"
print("Don't recognize %s, assuming type is pvm" % opts.instance_type, file=stderr)
# URL prefix from which to fetch AMI information
ami_prefix = "{r}/{b}/ami-list".format(
r=opts.spark_ec2_git_repo.replace("https://github.com", "https://raw.github.com", 1),
b=opts.spark_ec2_git_branch)
ami_path = "%s/%s/%s" % (ami_prefix, opts.region, instance_type)
reader = codecs.getreader("ascii")
try:
ami = reader(urlopen(ami_path)).read().strip()
except:
print("Could not resolve AMI at: " + ami_path, file=stderr)
sys.exit(1)
print("Spark AMI: " + ami)
return ami
# Launch a cluster of the given name, by setting up its security groups,
# and then starting new instances in them.
# Returns a tuple of EC2 reservation objects for the master and slaves
# Fails if there already instances running in the cluster's groups.
def launch_cluster(conn, opts, cluster_name):
if opts.identity_file is None:
print("ERROR: Must provide an identity file (-i) for ssh connections.", file=stderr)
sys.exit(1)
if opts.key_pair is None:
print("ERROR: Must provide a key pair name (-k) to use on instances.", file=stderr)
sys.exit(1)
user_data_content = None
if opts.user_data:
with open(opts.user_data) as user_data_file:
user_data_content = user_data_file.read()
print("Setting up security groups... customized")
master_group = get_or_make_group(conn, cluster_name + "-master", opts.vpc_id)
slave_group = get_or_make_group(conn, cluster_name + "-slaves", opts.vpc_id)
authorized_address = opts.authorized_address
ssh_strict_group_name = 'ssh-strict-sg';
ssh_strict_group = get_or_make_group(conn, ssh_strict_group_name, opts.vpc_id)
if master_group.rules == []: # Group was just now created
if opts.vpc_id is None:
master_group.authorize(src_group=master_group)
master_group.authorize(src_group=slave_group)
else:
# master_group.authorize(ip_protocol='icmp', from_port=-1, to_port=-1,
# src_group=master_group)
master_group.authorize(ip_protocol='tcp', from_port=0, to_port=65535, src_group=master_group)
# master_group.authorize(ip_protocol='udp', from_port=0, to_port=65535,
# src_group=master_group)
# master_group.authorize(ip_protocol='icmp', from_port=-1, to_port=-1,
# src_group=slave_group)
master_group.authorize(ip_protocol='tcp', from_port=0, to_port=65535, src_group=slave_group)
# master_group.authorize(ip_protocol='udp', from_port=0, to_port=65535,
# src_group=slave_group)
master_group.authorize('tcp', 8080, 8081, authorized_address) # spark master,worker ui
master_group.authorize('tcp', 18080, 18080, authorized_address) # spark history ui
# master_group.authorize('tcp', 19999, 19999, authorized_address) # tachyon
# master_group.authorize('tcp', 50030, 50030, authorized_address) # mapred jobtracker
# master_group.authorize('tcp', 50070, 50070, authorized_address) # hdfs / dfs health
# master_group.authorize('tcp', 60070, 60070, authorized_address) # ???
master_group.authorize('tcp', 4040, 4045, authorized_address) # ??? spark running job/application ui
## Rstudio (GUI for R) needs port 8787 for web access
# master_group.authorize('tcp', 8787, 8787, authorized_address)
## HDFS NFS gateway requires 111,2049,4242 for tcp & udp
# master_group.authorize('tcp', 111, 111, authorized_address)
# master_group.authorize('udp', 111, 111, authorized_address)
# master_group.authorize('tcp', 2049, 2049, authorized_address)
# master_group.authorize('udp', 2049, 2049, authorized_address)
# master_group.authorize('tcp', 4242, 4242, authorized_address)
# master_group.authorize('udp', 4242, 4242, authorized_address)
# RM in YARN mode uses 8088
# master_group.authorize('tcp', 8088, 8088, authorized_address) # hadoop cluster ui
# if opts.ganglia:
# master_group.authorize('tcp', 5080, 5080, authorized_address)
if slave_group.rules == []: # Group was just now created
if opts.vpc_id is None:
slave_group.authorize(src_group=master_group)
slave_group.authorize(src_group=slave_group)
else:
# slave_group.authorize(ip_protocol='icmp', from_port=-1, to_port=-1,
# src_group=master_group)
slave_group.authorize(ip_protocol='tcp', from_port=0, to_port=65535, src_group=master_group)
# slave_group.authorize(ip_protocol='udp', from_port=0, to_port=65535,
# src_group=master_group)
# slave_group.authorize(ip_protocol='icmp', from_port=-1, to_port=-1,
# src_group=slave_group)
slave_group.authorize(ip_protocol='tcp', from_port=0, to_port=65535, src_group=slave_group)
# slave_group.authorize(ip_protocol='udp', from_port=0, to_port=65535,
# src_group=slave_group)
# slave_group.authorize('tcp', 8080, 8081, authorized_address)
# slave_group.authorize('tcp', 50060, 50060, authorized_address)
# slave_group.authorize('tcp', 50075, 50075, authorized_address)
# slave_group.authorize('tcp', 60060, 60060, authorized_address)
# slave_group.authorize('tcp', 60075, 60075, authorized_address)
# Check if instances are already running in our groups
existing_masters, existing_slaves = get_existing_cluster(conn, opts, cluster_name,
die_on_error=False)
if existing_slaves or (existing_masters and not opts.use_existing_master):
print("ERROR: There are already instances running in group %s or %s" %
(master_group.name, slave_group.name), file=stderr)
sys.exit(1)
# Figure out Spark AMI
if opts.ami is None:
opts.ami = get_spark_ami(opts)
# we use group ids to work around https://github.com/boto/boto/issues/350
additional_group_ids = []
if opts.additional_security_group:
additional_group_ids = [sg.id
for sg in conn.get_all_security_groups()
if opts.additional_security_group in (sg.name, sg.id)]
# our custom security group
print("Adding custom security group...")
additional_group_ids.append(ssh_strict_group.id)
print("Launching instances...")
try:
image = conn.get_all_images(image_ids=[opts.ami])[0]
except:
print("Could not find AMI " + opts.ami, file=stderr)
sys.exit(1)
# Create block device mapping so that we can add EBS volumes if asked to.
# The first drive is attached as /dev/sds, 2nd as /dev/sdt, ... /dev/sdz
block_map = BlockDeviceMapping()
if opts.ebs_vol_size > 0:
for i in range(opts.ebs_vol_num):
device = EBSBlockDeviceType()
device.size = opts.ebs_vol_size
device.volume_type = opts.ebs_vol_type
device.delete_on_termination = True
block_map["/dev/sd" + chr(ord('s') + i)] = device
# AWS ignores the AMI-specified block device mapping for M3 (see SPARK-3342).
if opts.instance_type.startswith('m3.'):
for i in range(get_num_disks(opts.instance_type)):
dev = BlockDeviceType()
dev.ephemeral_name = 'ephemeral%d' % i
# The first ephemeral drive is /dev/sdb.
name = '/dev/sd' + string.ascii_letters[i + 1]
block_map[name] = dev
# Launch slaves
if opts.spot_price is not None:
# Launch spot instances with the requested price
print("Requesting %d slaves as spot instances with price $%.3f" %
(opts.slaves, opts.spot_price))
zones = get_zones(conn, opts)
num_zones = len(zones)
i = 0
my_req_ids = []
for zone in zones:
num_slaves_this_zone = get_partition(opts.slaves, num_zones, i)
slave_reqs = conn.request_spot_instances(
price=opts.spot_price,
image_id=opts.ami,
launch_group="launch-group-%s" % cluster_name,
placement=zone,
count=num_slaves_this_zone,
key_name=opts.key_pair,
security_group_ids=[slave_group.id] + additional_group_ids,
instance_type=opts.instance_type,
block_device_map=block_map,
subnet_id=opts.subnet_id,
placement_group=opts.placement_group,
user_data=user_data_content,
instance_profile_name=opts.instance_profile_name)
my_req_ids += [req.id for req in slave_reqs]
i += 1
print("Waiting for spot instances to be granted...")
try:
while True:
time.sleep(10)
reqs = conn.get_all_spot_instance_requests()
id_to_req = {}
for r in reqs:
id_to_req[r.id] = r
active_instance_ids = []
for i in my_req_ids:
if i in id_to_req and id_to_req[i].state == "active":
active_instance_ids.append(id_to_req[i].instance_id)
if len(active_instance_ids) == opts.slaves:
print("All %d slaves granted" % opts.slaves)
reservations = conn.get_all_reservations(active_instance_ids)
slave_nodes = []
for r in reservations:
slave_nodes += r.instances
break
else:
print("%d of %d slaves granted, waiting longer" % (
len(active_instance_ids), opts.slaves))
except:
print("Canceling spot instance requests")
conn.cancel_spot_instance_requests(my_req_ids)
# Log a warning if any of these requests actually launched instances:
(master_nodes, slave_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
running = len(master_nodes) + len(slave_nodes)
if running:
print(("WARNING: %d instances are still running" % running), file=stderr)
sys.exit(0)
else:
# Launch non-spot instances
zones = get_zones(conn, opts)
num_zones = len(zones)
i = 0
slave_nodes = []
for zone in zones:
num_slaves_this_zone = get_partition(opts.slaves, num_zones, i)
if num_slaves_this_zone > 0:
slave_res = image.run(
key_name=opts.key_pair,
security_group_ids=[slave_group.id] + additional_group_ids,
instance_type=opts.instance_type,
placement=zone,
min_count=num_slaves_this_zone,
max_count=num_slaves_this_zone,
block_device_map=block_map,
subnet_id=opts.subnet_id,
placement_group=opts.placement_group,
user_data=user_data_content,
instance_initiated_shutdown_behavior=opts.instance_initiated_shutdown_behavior,
instance_profile_name=opts.instance_profile_name)
slave_nodes += slave_res.instances
print("Launched {s} slave{plural_s} in {z}, regid = {r}".format(
s=num_slaves_this_zone,
plural_s=('' if num_slaves_this_zone == 1 else 's'),
z=zone,
r=slave_res.id))
i += 1
# Launch or resume masters
if existing_masters:
print("Starting master...")
for inst in existing_masters:
if inst.state not in ["shutting-down", "terminated"]:
inst.start()
master_nodes = existing_masters
else:
master_type = opts.master_instance_type
if master_type == "":
master_type = opts.instance_type
if opts.zone == 'all':
opts.zone = random.choice(conn.get_all_zones()).name
master_res = image.run(
key_name=opts.key_pair,
security_group_ids=[master_group.id] + additional_group_ids,
instance_type=master_type,
placement=opts.zone,
min_count=1,
max_count=1,
block_device_map=block_map,
subnet_id=opts.subnet_id,
placement_group=opts.placement_group,
user_data=user_data_content,
instance_initiated_shutdown_behavior=opts.instance_initiated_shutdown_behavior,
instance_profile_name=opts.instance_profile_name)
master_nodes = master_res.instances
print("Launched master in %s, regid = %s" % (zone, master_res.id))
# This wait time corresponds to SPARK-4983
print("Waiting for AWS to propagate instance metadata...")
time.sleep(15)
# Give the instances descriptive names and set additional tags
additional_tags = {}
if opts.additional_tags.strip():
additional_tags = dict(
map(str.strip, tag.split(':', 1)) for tag in opts.additional_tags.split(',')
)
print('Applying tags to master nodes')
for master in master_nodes:
master.add_tags(
dict(additional_tags, Name='{cn}-master-{iid}'.format(cn=cluster_name, iid=master.id))
)
print('Applying tags to slave nodes')
for slave in slave_nodes:
slave.add_tags(
dict(additional_tags, Name='{cn}-slave-{iid}'.format(cn=cluster_name, iid=slave.id))
)
if opts.tag_volumes:
if len(additional_tags) > 0:
print('Applying tags to volumes')
all_instance_ids = [x.id for x in master_nodes + slave_nodes]
volumes = conn.get_all_volumes(filters={'attachment.instance-id': all_instance_ids})
for v in volumes:
v.add_tags(additional_tags)
else:
print('--tag-volumes has no effect without --additional-tags')
# Return all the instances
return (master_nodes, slave_nodes)
def get_existing_cluster(conn, opts, cluster_name, die_on_error=True):
"""
Get the EC2 instances in an existing cluster if available.
Returns a tuple of lists of EC2 instance objects for the masters and slaves.
"""
print("Searching for existing cluster {c} in region {r}...".format(
c=cluster_name, r=opts.region))
def get_instances(group_names):
"""
Get all non-terminated instances that belong to any of the provided security groups.
EC2 reservation filters and instance states are documented here:
http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options
"""
reservations = conn.get_all_reservations(
filters={"instance.group-name": group_names})
instances = itertools.chain.from_iterable(r.instances for r in reservations)
return [i for i in instances if i.state not in ["shutting-down", "terminated"]]
master_instances = get_instances([cluster_name + "-master"])
slave_instances = get_instances([cluster_name + "-slaves"])
if any((master_instances, slave_instances)):
print("Found {m} master{plural_m}, {s} slave{plural_s}.".format(
m=len(master_instances),
plural_m=('' if len(master_instances) == 1 else 's'),
s=len(slave_instances),
plural_s=('' if len(slave_instances) == 1 else 's')))
if not master_instances and die_on_error:
print("ERROR: Could not find a master for cluster {c} in region {r}.".format(
c=cluster_name, r=opts.region), file=sys.stderr)
sys.exit(1)
return (master_instances, slave_instances)
# Deploy configuration files and run setup scripts on a newly launched
# or started EC2 cluster.
def setup_cluster(conn, master_nodes, slave_nodes, opts, deploy_ssh_key):
master = get_dns_name(master_nodes[0], opts.private_ips)
if deploy_ssh_key:
print("Generating cluster's SSH key on master...")
key_setup = """
[ -f ~/.ssh/id_rsa ] ||
(ssh-keygen -q -t rsa -N '' -f ~/.ssh/id_rsa &&
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys)
"""
ssh(master, opts, key_setup)
dot_ssh_tar = ssh_read(master, opts, ['tar', 'c', '.ssh'])
print("Transferring cluster's SSH key to slaves...")
for slave in slave_nodes:
slave_address = get_dns_name(slave, opts.private_ips)
print(slave_address)
ssh_write(slave_address, opts, ['tar', 'x'], dot_ssh_tar)
# orginal full set of modules
# modules = ['spark', 'ephemeral-hdfs', 'persistent-hdfs',
# 'mapreduce', 'spark-standalone', 'tachyon', 'rstudio']
# install minimal set of modules
modules = ['spark', 'ephemeral-hdfs', 'spark-standalone']
if opts.hadoop_major_version == "1":
modules = list(filter(lambda x: x != "mapreduce", modules))
if opts.ganglia:
modules.append('ganglia')
# Clear SPARK_WORKER_INSTANCES if running on YARN
if opts.hadoop_major_version == "yarn":
opts.worker_instances = ""
# NOTE: We should clone the repository before running deploy_files to
# prevent ec2-variables.sh from being overwritten
print("Cloning spark-ec2 scripts from {r}/tree/{b} on master...".format(
r=opts.spark_ec2_git_repo, b=opts.spark_ec2_git_branch))
ssh(
host=master,
opts=opts,
command="rm -rf spark-ec2"
+ " && "
+ "git clone {r} -b {b} spark-ec2".format(r=opts.spark_ec2_git_repo,
b=opts.spark_ec2_git_branch)
)
print("Deploying files to master...")
deploy_files(
conn=conn,
root_dir=SPARK_EC2_DIR + "/" + "deploy.generic",
opts=opts,
master_nodes=master_nodes,
slave_nodes=slave_nodes,
modules=modules
)
if opts.deploy_root_dir is not None:
print("Deploying {s} to master...".format(s=opts.deploy_root_dir))
deploy_user_files(
root_dir=opts.deploy_root_dir,
opts=opts,
master_nodes=master_nodes
)
print("Running setup on master...")
setup_spark_cluster(master, opts)
print("Done!")
def setup_spark_cluster(master, opts):
ssh(master, opts, "chmod u+x spark-ec2/setup.sh")
ssh(master, opts, "spark-ec2/setup.sh")
print("Spark standalone cluster started at http://%s:8080" % master)
if opts.ganglia:
print("Ganglia started at http://%s:5080/ganglia" % master)
def is_ssh_available(host, opts, print_ssh_output=True):
"""
Check if SSH is available on a host.
"""
s = subprocess.Popen(
ssh_command(opts) + ['-t', '-t', '-o', 'ConnectTimeout=3',
'%s@%s' % (opts.user, host), stringify_command('true')],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT # we pipe stderr through stdout to preserve output order
)
cmd_output = s.communicate()[0] # [1] is stderr, which we redirected to stdout
if s.returncode != 0 and print_ssh_output:
# extra leading newline is for spacing in wait_for_cluster_state()
print(textwrap.dedent("""\n
Warning: SSH connection error. (This could be temporary.)
Host: {h}
SSH return code: {r}
SSH output: {o}
""").format(
h=host,
r=s.returncode,
o=cmd_output.strip()
))
return s.returncode == 0
def is_cluster_ssh_available(cluster_instances, opts):
"""
Check if SSH is available on all the instances in a cluster.
"""
for i in cluster_instances:
dns_name = get_dns_name(i, opts.private_ips)
if not is_ssh_available(host=dns_name, opts=opts):
return False
else:
return True
def wait_for_cluster_state(conn, opts, cluster_instances, cluster_state):
"""
Wait for all the instances in the cluster to reach a designated state.
cluster_instances: a list of boto.ec2.instance.Instance
cluster_state: a string representing the desired state of all the instances in the cluster
value can be 'ssh-ready' or a valid value from boto.ec2.instance.InstanceState such as
'running', 'terminated', etc.
(would be nice to replace this with a proper enum: http://stackoverflow.com/a/1695250)
"""
sys.stdout.write(
"Waiting for cluster to enter '{s}' state.".format(s=cluster_state)
)
sys.stdout.flush()
start_time = datetime.now()
num_attempts = 0
while True:
time.sleep(5 * num_attempts) # seconds
for i in cluster_instances:
i.update()
max_batch = 100
statuses = []
for j in xrange(0, len(cluster_instances), max_batch):
batch = [i.id for i in cluster_instances[j:j + max_batch]]
statuses.extend(conn.get_all_instance_status(instance_ids=batch))
if cluster_state == 'ssh-ready':
if all(i.state == 'running' for i in cluster_instances) and \
all(s.system_status.status == 'ok' for s in statuses) and \
all(s.instance_status.status == 'ok' for s in statuses) and \
is_cluster_ssh_available(cluster_instances, opts):
break
else:
if all(i.state == cluster_state for i in cluster_instances):
break
num_attempts += 1
sys.stdout.write(".")
sys.stdout.flush()
sys.stdout.write("\n")
end_time = datetime.now()
print("Cluster is now in '{s}' state. Waited {t} seconds.".format(
s=cluster_state,
t=(end_time - start_time).seconds
))
# Get number of local disks available for a given EC2 instance type.
def get_num_disks(instance_type):
# Source: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html
# Last Updated: 2015-06-19
# For easy maintainability, please keep this manually-inputted dictionary sorted by key.
disks_by_instance = {
"c1.medium": 1,
"c1.xlarge": 4,
"c3.large": 2,
"c3.xlarge": 2,
"c3.2xlarge": 2,
"c3.4xlarge": 2,
"c3.8xlarge": 2,
"c4.large": 0,
"c4.xlarge": 0,
"c4.2xlarge": 0,
"c4.4xlarge": 0,
"c4.8xlarge": 0,
"cc1.4xlarge": 2,
"cc2.8xlarge": 4,
"cg1.4xlarge": 2,
"cr1.8xlarge": 2,
"d2.xlarge": 3,
"d2.2xlarge": 6,
"d2.4xlarge": 12,
"d2.8xlarge": 24,
"g2.2xlarge": 1,
"g2.8xlarge": 2,
"hi1.4xlarge": 2,
"hs1.8xlarge": 24,
"i2.xlarge": 1,
"i2.2xlarge": 2,
"i2.4xlarge": 4,
"i2.8xlarge": 8,
"m1.small": 1,
"m1.medium": 1,
"m1.large": 2,
"m1.xlarge": 4,
"m2.xlarge": 1,
"m2.2xlarge": 1,
"m2.4xlarge": 2,
"m3.medium": 1,
"m3.large": 1,
"m3.xlarge": 2,
"m3.2xlarge": 2,
"m4.large": 0,
"m4.xlarge": 0,
"m4.2xlarge": 0,
"m4.4xlarge": 0,
"m4.10xlarge": 0,
"r3.large": 1,
"r3.xlarge": 1,
"r3.2xlarge": 1,
"r3.4xlarge": 1,
"r3.8xlarge": 2,
"r4.large": 0,
"r4.xlarge": 0,
"r4.2xlarge": 0,
"r4.4xlarge": 0,
"r4.8xlarge": 0,
"r4.16xlarge": 0,
"r5.large": 0,
"r5.xlarge": 0,
"r5.2xlarge": 0,
"r5.4xlarge": 0,
"r5.8xlarge": 0,
"r5.12xlarge": 0,
"r5.24xlarge": 0,
"r5a.large": 0,
"r5a.xlarge": 0,
"r5a.2xlarge": 0,
"r5a.4xlarge": 0,
"r5a.12xlarge": 0,
"r5a.24xlarge": 0,
"t1.micro": 0,
"t2.micro": 0,
"t2.small": 0,
"t2.medium": 0,
"t2.large": 0,
"x1.16xlarge": 1,
"x1.32xlarge": 2,
}
if instance_type in disks_by_instance:
return disks_by_instance[instance_type]
else:
print("WARNING: Don't know number of disks on instance type %s; assuming 1"
% instance_type, file=stderr)
return 1
# Deploy the configuration file templates in a given local directory to
# a cluster, filling in any template parameters with information about the
# cluster (e.g. lists of masters and slaves). Files are only deployed to
# the first master instance in the cluster, and we expect the setup
# script to be run on that instance to copy them to other nodes.
#
# root_dir should be an absolute path to the directory with the files we want to deploy.
def deploy_files(conn, root_dir, opts, master_nodes, slave_nodes, modules):
active_master = get_dns_name(master_nodes[0], opts.private_ips)
num_disks = get_num_disks(opts.instance_type)
hdfs_data_dirs = "/mnt/ephemeral-hdfs/data"
mapred_local_dirs = "/mnt/hadoop/mrlocal"
spark_local_dirs = "/mnt/spark"
if num_disks > 1:
for i in range(2, num_disks + 1):
hdfs_data_dirs += ",/mnt%d/ephemeral-hdfs/data" % i
mapred_local_dirs += ",/mnt%d/hadoop/mrlocal" % i
spark_local_dirs += ",/mnt%d/spark" % i
cluster_url = "%s:7077" % active_master
if "." in opts.spark_version:
# Pre-built Spark deploy
spark_v = get_validate_spark_version(opts.spark_version, opts.spark_git_repo)
validate_spark_hadoop_version(spark_v, opts.hadoop_major_version)
tachyon_v = get_tachyon_version(spark_v)
else:
# Spark-only custom deploy
spark_v = "%s|%s" % (opts.spark_git_repo, opts.spark_version)
tachyon_v = ""
if tachyon_v == "":
print("No valid Tachyon version found; Tachyon won't be set up")
try:
modules.remove("tachyon")
except ValueError:
pass # ignore
master_addresses = [get_dns_name(i, opts.private_ips) for i in master_nodes]
slave_addresses = [get_dns_name(i, opts.private_ips) for i in slave_nodes]
worker_instances_str = "%d" % opts.worker_instances if opts.worker_instances else ""
template_vars = {
"master_list": '\n'.join(master_addresses),
"active_master": active_master,
"slave_list": '\n'.join(slave_addresses),
"cluster_url": cluster_url,
"hdfs_data_dirs": hdfs_data_dirs,
"mapred_local_dirs": mapred_local_dirs,
"spark_local_dirs": spark_local_dirs,
"swap": str(opts.swap),
"modules": '\n'.join(modules),
"spark_version": spark_v,
"tachyon_version": tachyon_v,
"hadoop_major_version": opts.hadoop_major_version,
"spark_worker_instances": worker_instances_str,
"spark_master_opts": opts.master_opts
}
if opts.copy_aws_credentials:
template_vars["aws_access_key_id"] = conn.aws_access_key_id
template_vars["aws_secret_access_key"] = conn.aws_secret_access_key
else:
template_vars["aws_access_key_id"] = ""
template_vars["aws_secret_access_key"] = ""
# Create a temp directory in which we will place all the files to be
# deployed after we substitue template parameters in them
tmp_dir = tempfile.mkdtemp()
for path, dirs, files in os.walk(root_dir):
if path.find(".svn") == -1:
dest_dir = os.path.join('/', path[len(root_dir):])
local_dir = tmp_dir + dest_dir
if not os.path.exists(local_dir):
os.makedirs(local_dir)
for filename in files:
if filename[0] not in '#.~' and filename[-1] != '~':
dest_file = os.path.join(dest_dir, filename)
local_file = tmp_dir + dest_file
with open(os.path.join(path, filename)) as src:
with open(local_file, "w") as dest:
text = src.read()
for key in template_vars:
text = text.replace("{{" + key + "}}", template_vars[key])
dest.write(text)
dest.close()
# rsync the whole directory over to the master machine
command = [
'rsync', '-rv',
'-e', stringify_command(ssh_command(opts)),
"%s/" % tmp_dir,
"%s@%s:/" % (opts.user, active_master)
]
subprocess.check_call(command)
# Remove the temp directory we created above
shutil.rmtree(tmp_dir)
# Deploy a given local directory to a cluster, WITHOUT parameter substitution.
# Note that unlike deploy_files, this works for binary files.
# Also, it is up to the user to add (or not) the trailing slash in root_dir.
# Files are only deployed to the first master instance in the cluster.
#
# root_dir should be an absolute path.
def deploy_user_files(root_dir, opts, master_nodes):
active_master = get_dns_name(master_nodes[0], opts.private_ips)
command = [
'rsync', '-rv',
'-e', stringify_command(ssh_command(opts)),
"%s" % root_dir,
"%s@%s:/" % (opts.user, active_master)
]
subprocess.check_call(command)
def stringify_command(parts):
if isinstance(parts, str):
return parts
else:
return ' '.join(map(pipes.quote, parts))
def ssh_args(opts):
parts = ['-o', 'StrictHostKeyChecking=no']
parts += ['-o', 'UserKnownHostsFile=/dev/null']
if opts.identity_file is not None:
parts += ['-i', opts.identity_file]
return parts
def ssh_command(opts):
return ['ssh'] + ssh_args(opts)
# Run a command on a host through ssh, retrying up to five times
# and then throwing an exception if ssh continues to fail.
def ssh(host, opts, command):
tries = 0
while True:
try:
return subprocess.check_call(
ssh_command(opts) + ['-t', '-t', '%s@%s' % (opts.user, host),
stringify_command(command)])
except subprocess.CalledProcessError as e:
if tries > 5:
# If this was an ssh failure, provide the user with hints.
if e.returncode == 255:
raise UsageError(
"Failed to SSH to remote host {0}.\n"
"Please check that you have provided the correct --identity-file and "
"--key-pair parameters and try again.".format(host))
else:
raise e
print("Error executing remote command, retrying after 30 seconds: {0}".format(e),
file=stderr)
time.sleep(30)
tries = tries + 1
# Backported from Python 2.7 for compatiblity with 2.6 (See SPARK-1990)
def _check_output(*popenargs, **kwargs):
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
def ssh_read(host, opts, command):
return _check_output(
ssh_command(opts) + ['%s@%s' % (opts.user, host), stringify_command(command)])
def ssh_write(host, opts, command, arguments):
tries = 0
while True:
proc = subprocess.Popen(
ssh_command(opts) + ['%s@%s' % (opts.user, host), stringify_command(command)],
stdin=subprocess.PIPE)
proc.stdin.write(arguments)
proc.stdin.close()
status = proc.wait()
if status == 0:
break
elif tries > 5:
raise RuntimeError("ssh_write failed with error %s" % proc.returncode)
else:
print("Error {0} while executing remote command, retrying after 30 seconds".
format(status), file=stderr)
time.sleep(30)
tries = tries + 1
# Gets a list of zones to launch instances in
def get_zones(conn, opts):
if opts.zone == 'all':
zones = [z.name for z in conn.get_all_zones()]
else:
zones = [opts.zone]
return zones
# Gets the number of items in a partition
def get_partition(total, num_partitions, current_partitions):
num_slaves_this_zone = total // num_partitions
if (total % num_partitions) - current_partitions > 0:
num_slaves_this_zone += 1
return num_slaves_this_zone
# Gets the IP address, taking into account the --private-ips flag
def get_ip_address(instance, private_ips=False):
ip = instance.ip_address if not private_ips else \
instance.private_ip_address
return ip
# Gets the DNS name, taking into account the --private-ips flag
def get_dns_name(instance, private_ips=False):
dns = instance.public_dns_name if not private_ips else \
instance.private_ip_address
if not dns:
raise UsageError("Failed to determine hostname of {0}.\n"
"Please check that you provided --private-ips if "
"necessary".format(instance))
return dns
def real_main():
(opts, action, cluster_name) = parse_args()
# Input parameter validation
spark_v = get_validate_spark_version(opts.spark_version, opts.spark_git_repo)
validate_spark_hadoop_version(spark_v, opts.hadoop_major_version)
if opts.wait is not None:
# NOTE: DeprecationWarnings are silent in 2.7+ by default.
# To show them, run Python with the -Wdefault switch.
# See: https://docs.python.org/3.5/whatsnew/2.7.html
warnings.warn(
"This option is deprecated and has no effect. "
"spark-ec2 automatically waits as long as necessary for clusters to start up.",
DeprecationWarning
)
if opts.identity_file is not None:
if not os.path.exists(opts.identity_file):
print("ERROR: The identity file '{f}' doesn't exist.".format(f=opts.identity_file),
file=stderr)
sys.exit(1)
file_mode = os.stat(opts.identity_file).st_mode
if not (file_mode & S_IRUSR) or not oct(file_mode)[-2:] == '00':
print("ERROR: The identity file must be accessible only by you.", file=stderr)
print('You can fix this with: chmod 400 "{f}"'.format(f=opts.identity_file),
file=stderr)
sys.exit(1)
if opts.instance_type not in EC2_INSTANCE_TYPES:
print("Warning: Unrecognized EC2 instance type for instance-type: {t}".format(
t=opts.instance_type), file=stderr)
if opts.master_instance_type != "":
if opts.master_instance_type not in EC2_INSTANCE_TYPES:
print("Warning: Unrecognized EC2 instance type for master-instance-type: {t}".format(
t=opts.master_instance_type), file=stderr)
# Since we try instance types even if we can't resolve them, we check if they resolve first
# and, if they do, see if they resolve to the same virtualization type.
if opts.instance_type in EC2_INSTANCE_TYPES and \
opts.master_instance_type in EC2_INSTANCE_TYPES:
if EC2_INSTANCE_TYPES[opts.instance_type] != \
EC2_INSTANCE_TYPES[opts.master_instance_type]:
print("Error: spark-ec2 currently does not support having a master and slaves "
"with different AMI virtualization types.", file=stderr)
print("master instance virtualization type: {t}".format(
t=EC2_INSTANCE_TYPES[opts.master_instance_type]), file=stderr)
print("slave instance virtualization type: {t}".format(
t=EC2_INSTANCE_TYPES[opts.instance_type]), file=stderr)
sys.exit(1)
if opts.ebs_vol_num > 8:
print("ebs-vol-num cannot be greater than 8", file=stderr)
sys.exit(1)
# Prevent breaking ami_prefix (/, .git and startswith checks)
# Prevent forks with non spark-ec2 names for now.
if opts.spark_ec2_git_repo.endswith("/") or \
opts.spark_ec2_git_repo.endswith(".git") or \
not opts.spark_ec2_git_repo.startswith("https://github.com") or \
not opts.spark_ec2_git_repo.endswith("spark-ec2"):
print("spark-ec2-git-repo must be a github repo and it must not have a trailing / or .git. "
"Furthermore, we currently only support forks named spark-ec2.", file=stderr)
sys.exit(1)
if not (opts.deploy_root_dir is None or
(os.path.isabs(opts.deploy_root_dir) and
os.path.isdir(opts.deploy_root_dir) and
os.path.exists(opts.deploy_root_dir))):
print("--deploy-root-dir must be an absolute path to a directory that exists "
"on the local file system", file=stderr)
sys.exit(1)
try:
if opts.profile is None:
conn = ec2.connect_to_region(opts.region)
else:
conn = ec2.connect_to_region(opts.region, profile_name=opts.profile)
except Exception as e:
print((e), file=stderr)
sys.exit(1)
# Select an AZ at random if it was not specified.
if opts.zone == "":
opts.zone = random.choice(conn.get_all_zones()).name
if action == "launch":
if opts.slaves <= 0:
print("ERROR: You have to start at least 1 slave", file=sys.stderr)
sys.exit(1)
if opts.resume:
(master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
else:
(master_nodes, slave_nodes) = launch_cluster(conn, opts, cluster_name)
wait_for_cluster_state(
conn=conn,
opts=opts,
cluster_instances=(master_nodes + slave_nodes),
cluster_state='ssh-ready'
)
setup_cluster(conn, master_nodes, slave_nodes, opts, True)
elif action == "destroy":
(master_nodes, slave_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
if any(master_nodes + slave_nodes):
print("The following instances will be terminated:")
for inst in master_nodes + slave_nodes:
print("> %s" % get_dns_name(inst, opts.private_ips))
print("ALL DATA ON ALL NODES WILL BE LOST!!")
msg = "Are you sure you want to destroy the cluster {c}? (y/N) ".format(c=cluster_name)
response = raw_input(msg)
if response == "y":
print("Terminating master...")
for inst in master_nodes:
inst.terminate()
print("Terminating slaves...")
for inst in slave_nodes:
inst.terminate()
# Delete security groups as well
if opts.delete_groups:
group_names = [cluster_name + "-master", cluster_name + "-slaves"]
wait_for_cluster_state(
conn=conn,
opts=opts,
cluster_instances=(master_nodes + slave_nodes),
cluster_state='terminated'
)
print("Deleting security groups (this will take some time)...")
attempt = 1
while attempt <= 3:
print("Attempt %d" % attempt)
groups = [g for g in conn.get_all_security_groups() if g.name in group_names]
success = True
# Delete individual rules in all groups before deleting groups to
# remove dependencies between them
for group in groups:
print("Deleting rules in security group " + group.name)
for rule in group.rules:
for grant in rule.grants:
success &= group.revoke(ip_protocol=rule.ip_protocol,
from_port=rule.from_port,
to_port=rule.to_port,
src_group=grant)
# Sleep for AWS eventual-consistency to catch up, and for instances
# to terminate
time.sleep(30) # Yes, it does have to be this long :-(
for group in groups:
try:
# It is needed to use group_id to make it work with VPC
conn.delete_security_group(group_id=group.id)
print("Deleted security group %s" % group.name)
except boto.exception.EC2ResponseError:
success = False
print("Failed to delete security group %s" % group.name)
# Unfortunately, group.revoke() returns True even if a rule was not
# deleted, so this needs to be rerun if something fails
if success:
break
attempt += 1
if not success:
print("Failed to delete all security groups after 3 tries.")
print("Try re-running in a few minutes.")
elif action == "login":
(master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
if not master_nodes[0].public_dns_name and not opts.private_ips:
print("Master has no public DNS name. Maybe you meant to specify --private-ips?")
else:
master = get_dns_name(master_nodes[0], opts.private_ips)
print("Logging into master " + master + "...")
proxy_opt = []
if opts.proxy_port is not None:
proxy_opt = ['-D', opts.proxy_port]
subprocess.check_call(
ssh_command(opts) + proxy_opt + ['-t', '-t', "%s@%s" % (opts.user, master)])
elif action == "reboot-slaves":
response = raw_input(
"Are you sure you want to reboot the cluster " +
cluster_name + " slaves?\n" +
"Reboot cluster slaves " + cluster_name + " (y/N): ")
if response == "y":
(master_nodes, slave_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
print("Rebooting slaves...")
for inst in slave_nodes:
if inst.state not in ["shutting-down", "terminated"]:
print("Rebooting " + inst.id)
inst.reboot()
elif action == "get-master":
(master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
if not master_nodes[0].public_dns_name and not opts.private_ips:
print("Master has no public DNS name. Maybe you meant to specify --private-ips?")
else:
print(get_dns_name(master_nodes[0], opts.private_ips))
elif action == "stop":
response = raw_input(
"Are you sure you want to stop the cluster " +
cluster_name + "?\nDATA ON EPHEMERAL DISKS WILL BE LOST, " +
"BUT THE CLUSTER WILL KEEP USING SPACE ON\n" +
"AMAZON EBS IF IT IS EBS-BACKED!!\n" +
"All data on spot-instance slaves will be lost.\n" +
"Stop cluster " + cluster_name + " (y/N): ")
if response == "y":
(master_nodes, slave_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
print("Stopping master...")
for inst in master_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.stop()
print("Stopping slaves...")
for inst in slave_nodes:
if inst.state not in ["shutting-down", "terminated"]:
if inst.spot_instance_request_id:
inst.terminate()
else:
inst.stop()
elif action == "start":
(master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
print("Starting slaves...")
for inst in slave_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.start()
print("Starting master...")
for inst in master_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.start()
wait_for_cluster_state(
conn=conn,
opts=opts,
cluster_instances=(master_nodes + slave_nodes),
cluster_state='ssh-ready'
)
# Determine types of running instances
existing_master_type = master_nodes[0].instance_type
existing_slave_type = slave_nodes[0].instance_type
# Setting opts.master_instance_type to the empty string indicates we
# have the same instance type for the master and the slaves
if existing_master_type == existing_slave_type:
existing_master_type = ""
opts.master_instance_type = existing_master_type
opts.instance_type = existing_slave_type
setup_cluster(conn, master_nodes, slave_nodes, opts, False)
else:
print("Invalid action: %s" % action, file=stderr)
sys.exit(1)
def main():
try:
real_main()
except UsageError as e:
print("\nError:\n", e, file=stderr)
sys.exit(1)
if __name__ == "__main__":
logging.basicConfig()
main()
|
apache-2.0
| 808,082,597,537,682,300
| 38.779733
| 173
| 0.574553
| false
| 3.625539
| false
| false
| false
|
grembo/ice
|
python/test/Ice/objects/TestI.py
|
1
|
5256
|
# **********************************************************************
#
# Copyright (c) 2003-2017 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
import Ice, Test
class BI(Test.B):
def __init__(self):
self.preMarshalInvoked = False
self.postUnmarshalInvoked = False
def ice_preMarshal(self):
self.preMarshalInvoked = True
def ice_postUnmarshal(self):
self.postUnmarshalInvoked = True
class CI(Test.C):
def __init__(self):
self.preMarshalInvoked = False
self.postUnmarshalInvoked = False
def ice_preMarshal(self):
self.preMarshalInvoked = True
def ice_postUnmarshal(self):
self.postUnmarshalInvoked = True
class DI(Test.D):
def __init__(self):
self.preMarshalInvoked = False
self.postUnmarshalInvoked = False
def ice_preMarshal(self):
self.preMarshalInvoked = True
def ice_postUnmarshal(self):
self.postUnmarshalInvoked = True
class EI(Test.E):
def __init__(self):
Test.E.__init__(self, 1, "hello")
def checkValues(self, current=None):
return self._i == 1 and self._s == "hello"
class FI(Test.F):
def __init__(self, e=None):
Test.F.__init__(self, e, e)
def checkValues(self, current=None):
return self._e1 != None and self._e1 == self.e2
class II(Ice.InterfaceByValue):
def __init__(self):
Ice.InterfaceByValue.__init__(self, "::Test::I")
class JI(Ice.InterfaceByValue):
def __init__(self):
Ice.InterfaceByValue.__init__(self, "::Test::J")
class HI(Test.H):
pass
class InitialI(Test.Initial):
def __init__(self, adapter):
self._adapter = adapter
self._b1 = BI()
self._b2 = BI()
self._c = CI()
self._d = DI()
self._e = EI()
self._f = FI(self._e)
self._b1.theA = self._b2 # Cyclic reference to another B
self._b1.theB = self._b1 # Self reference.
self._b1.theC = None # Null reference.
self._b2.theA = self._b2 # Self reference, using base.
self._b2.theB = self._b1 # Cyclic reference to another B
self._b2.theC = self._c # Cyclic reference to a C.
self._c.theB = self._b2 # Cyclic reference to a B.
self._d.theA = self._b1 # Reference to a B.
self._d.theB = self._b2 # Reference to a B.
self._d.theC = None # Reference to a C.
def shutdown(self, current=None):
self._adapter.getCommunicator().shutdown()
def getB1(self, current=None):
self._b1.preMarshalInvoked = False
self._b2.preMarshalInvoked = False
self._c.preMarshalInvoked = False
return self._b1
def getB2(self, current=None):
self._b1.preMarshalInvoked = False
self._b2.preMarshalInvoked = False
self._c.preMarshalInvoked = False
return self._b2
def getC(self, current=None):
self._b1.preMarshalInvoked = False
self._b2.preMarshalInvoked = False
self._c.preMarshalInvoked = False
return self._c
def getD(self, current=None):
self._b1.preMarshalInvoked = False
self._b2.preMarshalInvoked = False
self._c.preMarshalInvoked = False
self._d.preMarshalInvoked = False
return self._d
def getE(self, current=None):
return self._e
def getF(self, current=None):
return self._f
def setRecursive(self, r, current):
pass
def supportsClassGraphDepthMax(self, current):
return True
def getMB(self, current):
return Test.Initial.GetMBMarshaledResult(self._b1, current)
def getAMDMB(self, current):
return Ice.Future.completed(Test.Initial.GetAMDMBMarshaledResult(self._b1, current))
def getAll(self, current=None):
self._b1.preMarshalInvoked = False
self._b2.preMarshalInvoked = False
self._c.preMarshalInvoked = False
self._d.preMarshalInvoked = False
return (self._b1, self._b2, self._c, self._d)
def getI(self, current=None):
return II()
def getJ(self, current=None):
return JI()
def getH(self, current=None):
return HI()
def getD1(self, d1, current=None):
return d1
def throwEDerived(self, current=None):
raise Test.EDerived(Test.A1("a1"), Test.A1("a2"), Test.A1("a3"), Test.A1("a4"))
def setI(self, i, current=None):
pass
def opBaseSeq(self, inSeq, current=None):
return (inSeq, inSeq)
def getCompact(self, current=None):
return Test.CompactExt()
def getInnerA(self, current=None):
return Test.Inner.A(self._b1)
def getInnerSubA(self, current=None):
return Test.Inner.Sub.A(Test.Inner.A(self._b1))
def throwInnerEx(self, current=None):
raise Test.Inner.Ex("Inner::Ex")
def throwInnerSubEx(self, current=None):
raise Test.Inner.Sub.Ex("Inner::Sub::Ex")
class UnexpectedObjectExceptionTestI(Test.UnexpectedObjectExceptionTest):
def op(self, current=None):
return Test.AlsoEmpty()
|
gpl-2.0
| 8,795,476,075,585,207,000
| 27.410811
| 92
| 0.599125
| false
| 3.364917
| true
| false
| false
|
kapteyn-astro/kapteyn
|
doc/source/EXAMPLES/kmpfit_compare_wei_unwei.py
|
1
|
3993
|
#!/usr/bin/env python
#------------------------------------------------------------
# Purpose: Demonstrate quality improvement weighted vs
# unweighted fit for Wolberg data. Wolberg's
# best fit parameters for a weighted fit is not
# accurate (a,b) = (1.8926, 4.9982)
# Improved values are derived from the analytical
# solutions and kmpfit: (a,b) = (1.8705, 5.0290)
#
# Vog, 01 Jan 2012
#------------------------------------------------------------
import numpy
from numpy.random import normal
from kapteyn import kmpfit
def model(p, x):
a, b = p
return a + b*x
def residuals(p, my_arrays):
x, y, err = my_arrays
a, b = p
return (y-model(p,x))/err
x = numpy.array([1.0, 2, 3, 4, 5, 6, 7])
y = numpy.array([6.9, 11.95, 16.8, 22.5, 26.2, 33.5, 41.0])
N = len(y)
err = numpy.ones(N)
errw = numpy.array([0.05, 0.1, 0.2, 0.5, 0.8, 1.5, 4.0])
print("Data x:", x)
print("Data y:", y)
print("Errors:", errw)
fitobj = kmpfit.Fitter(residuals=residuals, data=(x, y, err))
fitobj.fit(params0=[1,1])
print("\n-- Results kmpfit unit weighting wi=1.0:")
print("Best-fit parameters: ", fitobj.params)
print("Parameter errors using measurement uncertainties: ", fitobj.xerror)
print("Parameter errors unit-/relative weighted fit: ", fitobj.stderr)
print("Minimum chi^2: ", fitobj.chi2_min)
print("Covariance matrix:")
print(fitobj.covar)
fitobj = kmpfit.Fitter(residuals=residuals, data=(x, y, 10*err))
fitobj.fit(params0=[1,1])
print("\n-- Results kmpfit with (scaled) equal weights wi=10*1.0:")
print("Best-fit parameters: ", fitobj.params)
print("Parameter errors using measurement uncertainties: ", fitobj.xerror)
print("Parameter errors unit-/relative weighted fit: ", fitobj.stderr)
print("Minimum chi^2: ", fitobj.chi2_min)
print("Covariance matrix:")
print(fitobj.covar)
fitobj = kmpfit.Fitter(residuals=residuals, data=(x, y, errw))
fitobj.fit(params0=[1,1])
print("\n-- Results kmpfit with weights:")
print("Best-fit parameters: ", fitobj.params)
print("Parameter errors using measurement uncertainties: ", fitobj.xerror)
print("Parameter errors unit-/relative weighted fit: ", fitobj.stderr)
print("Minimum chi^2: ", fitobj.chi2_min)
print("Minimum reduced chi^2: ", fitobj.rchi2_min)
print("Covariance matrix:")
print(fitobj.covar)
rchi2 = fitobj.rchi2_min # Store for future scaling purposes
errw10 = errw * 10.0
fitobj = kmpfit.Fitter(residuals=residuals, data=(x, y, errw10))
fitobj.fit(params0=[1,1])
print("\n-- Results kmpfit with scaled individual errors (factor=10):")
print("Best-fit parameters: ", fitobj.params)
print("Parameter errors using measurement uncertainties: ", fitobj.xerror)
print("Parameter errors unit-/relative weighted fit: ", fitobj.stderr)
print("Minimum chi^2: ", fitobj.chi2_min)
print("Minimum reduced chi^2: ", fitobj.rchi2_min)
print("Covariance matrix:")
print(fitobj.covar)
scaled_errw = errw * numpy.sqrt(rchi2)
print("""\n\nNew array with measurement errors, scaled with factor %g to give
a reduced chi-squared of 1.0:"""%rchi2)
print(scaled_errw)
fitobj = kmpfit.Fitter(residuals=residuals, data=(x, y, scaled_errw))
fitobj.fit(params0=[1,1])
print("\n-- Results kmpfit with scaled individual errors to force red_chi2=1:")
print("Best-fit parameters: ", fitobj.params)
print("Parameter errors using measurement uncertainties: ", fitobj.xerror)
print("Parameter errors unit-/relative weighted fit: ", fitobj.stderr)
print("Minimum chi^2: ", fitobj.chi2_min)
print("Minimum reduced chi^2: ", fitobj.rchi2_min)
print("Covariance matrix:")
print(fitobj.covar)
|
bsd-3-clause
| 6,116,116,169,382,425,000
| 41.489362
| 79
| 0.611821
| false
| 3.233198
| false
| false
| false
|
dleecefft/pcapstats
|
pbin/pcapsessionslicer.py
|
1
|
4562
|
#!/usr/bin/env python
from scapy.all import *
import re, sys, getopt, shutil
def pcapsessions(pfile):
pssn = rdpcap(pfile)
return pssn
def fullstrsplit(ipportstr):
retlist=[]
sssnlist = ipportstr.split()
# stack up the list and split out the port values
retlist.append(sssnlist[0])
tmpip = sssnlist[1].split(':')
retlist.append(tmpip[0])
retlist.append(tmpip[1])
tmpip = sssnlist[3].split(':')
retlist.append(tmpip[0])
retlist.append(tmpip[1])
return retlist
def partstrsplit(ipportstr):
retlist=[]
sssnlist = ipportstr.split()
# stack up the list and split out the port values
retlist.append(sssnlist[0])
retlist.append(sssnlist[1])
retlist.append('')
retlist.append(sssnlist[3])
retlist.append('')
return retlist
def writesessioncsv(fileline,wfile):
try:
with open(wfile,'a') as wfh:
wfh.write(fileline + "\n")
except Exception as e:
print(e)
pass
return
def sessionparse(ssnobj,include,pktgrep):
sessions = ssnobj.sessions()
for k, v in sessions.iteritems():
rxparse = re.match(r'^\w+\s+(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5}).*\s(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5})',k)
if include and rxparse is not None:
# looking for a match
if rxparse.group(1) == pktgrep or rxparse.group(2) == pktgrep :
ksplit = fullstrsplit(k)
kline = ','.join(map(str,ksplit))
kline = kline +"," + str(len(str(v)))
print kline
#print k,str(len(v))
elif rxparse is not None:
if rxparse.group(1) != pktgrep and rxparse.group(2) != pktgrep :
#print k,str(len(v))
ksplit = fullstrsplit(k)
kline = ','.join(map(str,ksplit))
kline = kline +"," + str(len(str(v)))
print kline
elif not include and rxparse is None:
ksplit = partstrsplit(k)
kline = ','.join(map(str,ksplit))
kline = kline +"," + str(len(str(v)))
print kline
#print k,str(len(v))
return
def sessionparsewrite(ssnobj,include,pktgrep,csvoutfile):
sessions = ssnobj.sessions()
for k, v in sessions.iteritems():
rxparse = re.match(r'^\w+\s+(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5}).*\s(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5})',k)
if include and rxparse is not None:
# looking for a match
if rxparse.group(1) == pktgrep or rxparse.group(2) == pktgrep :
ksplit = fullstrsplit(k)
kline = ','.join(map(str,ksplit))
kline = kline +"," + str(len(str(v)))
writesessioncsv(kline,csvoutfile)
elif rxparse is not None:
if rxparse.group(1) != pktgrep and rxparse.group(2) != pktgrep :
#print k,str(len(v))
ksplit = fullstrsplit(k)
kline = ','.join(map(str,ksplit))
kline = kline +"," + str(len(str(v)))
writesessioncsv(kline,csvoutfile)
elif not include and rxparse is None:
ksplit = partstrsplit(k)
kline = ','.join(map(str,ksplit))
kline = kline +"," + str(len(str(v)))
writesessioncsv(kline,csvoutfile)
#print k,str(len(v))
return
if __name__ == "__main__":
if len(sys.argv) > 3 :
action=''
outcsv=False
# Use getopt to avoid param order errors
opts, args = getopt.getopt(sys.argv[1:],"f:m:o:t:h:")
for o, a in opts:
if o == '-f':
capfile=a
elif o == '-m':
strmatch=a
elif o == '-o':
outfile=a
outcsv=True
elif o == '-t':
action=a
else:
print("Usage: %s -f file.pcap -m ip:port_string -o [outputfile] -t [exclude] <- ignore these sessions " % sys.argv[0])
exit()
else:
print("Usage: %s -f file.pcap -m ip:port_string -o [outputfile] -t [exclude] <- ignore these sessions " % sys.argv[0])
exit()
# default action is search for string provided vs exclude
if action == "exclude":
action=False
else:
action=True
# grab sessions from pcap
thisssnobj = pcapsessions(capfile)
if outcsv:
sessionparsewrite(thisssnobj,action,strmatch,outfile)
else:
sessionparse(thisssnobj,action,strmatch)
|
apache-2.0
| 6,310,344,041,747,523,000
| 31.126761
| 134
| 0.531565
| false
| 3.258571
| false
| false
| false
|
NicoLugil/Yafa
|
python/TimedActions.py
|
1
|
2411
|
# Copyright 2014 Nico Lugil <nico at lugil dot be>
#
# This file is part of Yafa!
#
# Yafa! is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Yafa! is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Yafa. If not, see <http://www.gnu.org/licenses/>.
import sys
import string
import time
"""
Class used to space actions in time by at least interval seconds.
First time enough_time_passed is cheked it will return true,
from then only if interval seconds have passed since the last True
"""
class IntervalTimer:
def __init__(self,interval):
self.iv=interval;
self.last_time=time.time() # seconds since epoch
self.did_run=False
def set_interval(self,interval):
self.iv=interval;
def reset_timer(self):
self.last_time=time.time()
def get_remaining_time(self):
time_passed = (time.time()-self.last_time)
if time_passed >= self.iv:
return 0
else:
return (self.iv-time_passed)
def enough_time_passed(self):
if not self.did_run:
self.did_run=True
self.reset_timer()
return True
else:
if (time.time()-self.last_time)>self.iv:
self.reset_timer()
return True
else:
return False
"""
Single countdown:
start (re)starts it
"""
class CountDownTimer:
def __init__(self,interval):
self.iv=interval
self.start_time=time.time()
def set_interval(self,interval):
self.iv=interval;
def start(self):
self.start_time=time.time()
def get_remaining_time(self):
time_passed = (time.time()-self.start_time)
if time_passed >= self.iv:
return 0
else:
return (self.iv-time_passed)
def end(self):
self.iv=0
def is_time_passed(self):
if(self.get_remaining_time()==0):
return True
else:
return False
|
gpl-3.0
| 1,210,667,763,575,287,800
| 29.1375
| 70
| 0.625052
| false
| 3.808847
| false
| false
| false
|
nsynapse/edison_cat
|
catserver/control/control_system.py
|
1
|
1326
|
#-*- coding:utf-8 -*-
from models import DBSystemInfo as DB
import psutil
class Control_System(object):
def __init__(self, request):
self.request = request
def __del__(self):
pass
def update(self):
if self.request.method == 'POST':
try:
_net_if = self.request.POST.get('net_if','')
if DB.objects.exists():
_db = DB.objects.latest('id')
_db.net_if = _net_if
_nets = psutil.net_if_addrs()
_db.net_address = _nets[_net_if][0].address
_db.websocket_port = self.request.POST.get('websocket_port',9002)
_db.save()
else:
_new_db = DB()
_new_db.net_if = self.request.POST.get('net_if','')
_new_db.websocket_port = self.request.POST.get('websocket_port',9002)
_nets = psutil.net_if_addrs()
_new_db.net_address = _nets[_net_if][0].address
_new_db.save()
return True
except Exception, e:
print "Exception(Control_System update) : ", e
return False
|
mit
| -5,306,238,310,272,985,000
| 32.175
| 89
| 0.435143
| false
| 4.277419
| false
| false
| false
|
helixyte/TheLMA
|
thelma/tools/libcreation/base.py
|
1
|
10770
|
"""
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Base classes and constants for library creation ticket.
AAB
"""
from thelma.tools.iso.poolcreation.base import StockSampleCreationLayout
from thelma.tools.utils.base import round_up
from thelma.tools.utils.converters import BaseLayoutConverter
from thelma.tools.utils.layouts import ParameterSet
from thelma.tools.utils.layouts import WorkingLayout
from thelma.tools.utils.layouts import WorkingPosition
from thelma.entities.moleculetype import MOLECULE_TYPE_IDS
__docformat__ = 'reStructuredText en'
__all__ = ['NUMBER_SECTORS',
'NUMBER_MOLECULE_DESIGNS',
'MOLECULE_DESIGN_TRANSFER_VOLUME',
'POOL_STOCK_RACK_CONCENTRATION',
'PREPARATION_PLATE_CONCENTRATION',
'ALIQUOT_PLATE_CONCENTRATION',
'ALIQUOT_PLATE_VOLUME',
'STARTING_NUMBER_ALIQUOTS',
'get_stock_pool_buffer_volume',
'get_source_plate_transfer_volume',
'LibraryBaseLayoutParameters',
'LibraryBaseLayoutPosition',
'LibraryBaseLayout',
'LibraryBaseLayoutConverter',
'LibraryLayout']
#: The number of rack sectors (96-to-384 plate transition).
NUMBER_SECTORS = 4
#: The molecule type ID for the library.
MOLECULE_TYPE = MOLECULE_TYPE_IDS.SIRNA
#: The number of molecule designs per pool.
NUMBER_MOLECULE_DESIGNS = 3
#: The transfer volume of each molecule design in the pool (from single
#: molecule design stock to pool) in ul.
MOLECULE_DESIGN_TRANSFER_VOLUME = 3
#: The volume of the pool stock racks in ul.
POOL_STOCK_RACK_VOLUME = 45
#: The concentration of the pool stock racks in nM.
POOL_STOCK_RACK_CONCENTRATION = 10000 # 10 uM
#: The concentration of the prepartion plate in nM.
PREPARATION_PLATE_CONCENTRATION = 1270 # 1270 nM
#: The sample volume (after dilution, before aliquot plate creation) in the
#: preparation plate in ul.
PREPARATION_PLATE_VOLUME = 43.3 # 43.3 ul
#: The concentration of the library plate in nM.
ALIQUOT_PLATE_CONCENTRATION = 1270 # 1270 nM
#: The final sample volume in the library aliquot plate in ul.
ALIQUOT_PLATE_VOLUME = 4
#: The number of aliquot plates generated for each layout.
STARTING_NUMBER_ALIQUOTS = 8
OPTIMEM_DILUTION_FACTOR = 3
def get_stock_pool_buffer_volume():
"""
Returns the buffer volume required to generate the pool stock samples.
"""
total_transfer_volume = NUMBER_MOLECULE_DESIGNS \
* MOLECULE_DESIGN_TRANSFER_VOLUME
return POOL_STOCK_RACK_VOLUME - total_transfer_volume
def get_source_plate_transfer_volume():
"""
Returns the volume that is transferred from a pool stock rack to a
library source (preparation) plate in ul.
"""
dilution_factor = float(POOL_STOCK_RACK_CONCENTRATION) \
/ PREPARATION_PLATE_CONCENTRATION
vol = PREPARATION_PLATE_VOLUME / dilution_factor
return round_up(vol)
class LibraryBaseLayoutParameters(ParameterSet):
"""
This layout defines which positions in a library will contain samples.
"""
DOMAIN = 'library_base_layout'
#: If *True* the position in a library plate will contain a library sample.
IS_SAMPLE_POS = 'is_sample_position'
REQUIRED = [IS_SAMPLE_POS]
ALL = [IS_SAMPLE_POS]
ALIAS_MAP = {IS_SAMPLE_POS : []}
DOMAIN_MAP = {IS_SAMPLE_POS : DOMAIN}
class LibraryBaseLayoutPosition(WorkingPosition):
"""
There is actually only one value for a position in a library base layout
and this is the availability for library samples.
**Equality condition**: equal :attr:`rack_position` and
:attr:`is_sample_pos`
"""
PARAMETER_SET = LibraryBaseLayoutParameters
def __init__(self, rack_position, is_sample_position=True):
"""
Constructor:
:param rack_position: The rack position.
:type rack_position: :class:`thelma.entities.rack.RackPosition`.
:param is_sample_position: Is this position available for samples?
:type is_sample_position: :class:`bool`
"""
WorkingPosition.__init__(self, rack_position)
if not isinstance(is_sample_position, bool):
msg = 'The "sample position" flag must be a bool (obtained: %s).' \
% (is_sample_position.__class__.__name__)
raise TypeError(msg)
#: Is this position available for samples?
self.is_sample_position = is_sample_position
def _get_parameter_values_map(self):
"""
Returns a map with key = parameter name, value = associated attribute.
"""
return {self.PARAMETER_SET.IS_SAMPLE_POS : self.is_sample_position}
def __eq__(self, other):
return isinstance(other, self.__class__) and \
other.rack_position == self.rack_position and \
other.is_sample_position == self.is_sample_position
def __repr__(self):
str_format = '<%s rack position: %s, is sample position: %s>'
params = (self.__class__.__name__, self.rack_position,
self.is_sample_position)
return str_format % params
class LibraryBaseLayout(WorkingLayout):
"""
Defines which position in a library may contain library samples.
"""
WORKING_POSITION_CLASS = LibraryBaseLayoutPosition
def __init__(self, shape):
"""
Constructor:
:param shape: The rack shape.
:type shape: :class:`thelma.entities.rack.RackShape`
"""
WorkingLayout.__init__(self, shape)
#: You cannot add new positions to a closed layout.
self.is_closed = False
def add_position(self, working_position):
"""
Adds a :class:`Working_position` to the layout.
:param working_position: The working position to be added.
:type working_position: :class:`LibraryBaseLayoutPosition`
:raises ValueError: If the added position is not a
:attr:`WORKING_POSITION_CLASS` object.
:raises AttributeError: If the layout is closed.
:raises TypeError: if the position has the wrong type
"""
if not self.is_closed:
WorkingLayout.add_position(self, working_position)
else:
raise AttributeError('The layout is closed!')
def close(self):
"""
Removes all positions that may not contain samples.
"""
if not self.is_closed:
del_positions = []
for rack_pos, libbase_pos in self._position_map.iteritems():
if not libbase_pos.is_sample_position:
del_positions.append(rack_pos)
for rack_pos in del_positions: del self._position_map[rack_pos]
self.is_closed = True
def create_rack_layout(self):
"""
The layout is closed before rack layout creation.
"""
self.close()
return WorkingLayout.create_rack_layout(self)
class LibraryBaseLayoutConverter(BaseLayoutConverter):
"""
Converts a :class:`thelma.entities.racklayout.RackLayout` into a
:class:`LibraryBaseLayout`.
"""
NAME = 'Library Base Layout Converter'
PARAMETER_SET = LibraryBaseLayoutParameters
LAYOUT_CLS = LibraryBaseLayout
POSITION_CLS = LibraryBaseLayoutPosition
def __init__(self, rack_layout, parent=None):
BaseLayoutConverter.__init__(self, rack_layout, parent=parent)
# intermediate storage of invalid rack positions
self.__invalid_flag = None
def reset(self):
BaseLayoutConverter.reset(self)
self.__invalid_flag = []
def _get_position_init_values(self, parameter_map, rack_pos):
"""
Derives a working position from a parameter map (including validity
checks).
"""
is_sample_pos_str = parameter_map[self.PARAMETER_SET.IS_SAMPLE_POS]
pos_label = rack_pos.label
if is_sample_pos_str is None: return None
values = {str(True) : True, str(False) : False}
if not values.has_key(is_sample_pos_str):
info = '%s (%s)' % (pos_label, is_sample_pos_str)
self.__invalid_flag.append(info)
else:
return dict(is_sample_position=values[is_sample_pos_str])
def _record_errors(self):
BaseLayoutConverter._record_errors(self)
if len(self.__invalid_flag) > 0:
msg = 'The "sample position" flag must be a boolean. The values ' \
'for some positions are invalid. Details: %s.' \
% (', '.join(sorted(self.__invalid_flag)))
self.add_error(msg)
def _perform_layout_validity_checks(self, working_layout):
"""
We do not check anything but we close the layout.
"""
working_layout.close()
class LibraryLayout(StockSampleCreationLayout):
"""
A special :class:`StockSampleCreationLayout` for a plate involived
in library generation (either :class:`IsoAliquotPlate` (rack shape 16x24)
or :class:`IsoSectorPreparationPlate` (rack shape 8x12)).
"""
def __init__(self, shape):
"""
Constructor:
:param shape: The rack shape.
:type shape: :class:`thelma.entities.rack.RackShape`
"""
StockSampleCreationLayout.__init__(self, shape)
#: Allows validation of new position (is only set, if the layout is
#: initialised via :func:`from_base_layout`.
self.base_layout_positions = None
@classmethod
def from_base_layout(cls, base_layout):
"""
Creates a new library layout which will only accept positions that
are part of the base layout.
"""
base_layout.close()
layout = LibraryLayout(shape=base_layout.shape)
layout.base_layout_positions = base_layout.get_positions()
return layout
def add_position(self, working_position):
"""
Adds a :class:`Working_position` to the layout.
:param working_position: The transfer position to be added.
:type working_position: :class:`LibraryPosition`
:raise ValueError: If the rack position is not allowed by the
base layout.
:raises TypeError: If the added position is not a
:class:`TransferPosition` object.
"""
rack_pos = working_position.rack_position
if not self.base_layout_positions is None and \
not rack_pos in self.base_layout_positions:
msg = 'Position %s is not part of the base layout. It must not ' \
'take up samples.' % (rack_pos)
raise ValueError(msg)
WorkingLayout.add_position(self, working_position)
|
mit
| 6,090,208,923,717,987,000
| 33.408946
| 80
| 0.644475
| false
| 3.893709
| false
| false
| false
|
ReddyLab/1000Genomes
|
make-intron-retention-slurms.py
|
1
|
1769
|
#!/usr/bin/env python
#=========================================================================
# This is OPEN SOURCE SOFTWARE governed by the Gnu General Public
# License (GPL) version 3, as described at www.opensource.org.
# Copyright (C)2017 William H. Majoros (martiandna@gmail.com).
#=========================================================================
from __future__ import (absolute_import, division, print_function,
unicode_literals, generators, nested_scopes, with_statement)
from builtins import (bytes, dict, int, list, object, range, str, ascii,
chr, hex, input, next, oct, open, pow, round, super, filter, map, zip)
# The above imports should allow this program to run in both Python 2 and
# Python 3. You might need to update your version of module "future".
from SlurmWriter import SlurmWriter
THOUSAND="/home/bmajoros/1000G/assembly"
GEUVADIS=THOUSAND+"/geuvadis.txt"
SLURM_DIR=THOUSAND+"/intron-slurms"
JOB_NAME="INTRON"
MAX_PARALLEL=1000
NICE=500
MEMORY=0
THREADS=0
#=========================================================================
# main()
#=========================================================================
dirs=[]
with open(GEUVADIS,"rt") as IN:
for line in IN:
id=line.rstrip()
dir=THOUSAND+"/combined/"+id
dirs.append(dir)
writer=SlurmWriter()
for dir in dirs:
writer.addCommand("cd "+dir+"/RNA3\n"+
THOUSAND+"/src/get-intron-retentions.py "+
"../1.gff ../2.gff ../1.lengths ../2.lengths "+
"depth.txt.gz > IR.txt\n"
)
writer.setQueue("new,all")
writer.nice(NICE)
if(MEMORY): writer.mem(MEMORY)
if(THREADS): writer.threads(THREADS)
writer.writeArrayScript(SLURM_DIR,JOB_NAME,MAX_PARALLEL)
|
gpl-2.0
| 681,284,877,154,889,300
| 35.854167
| 74
| 0.560204
| false
| 3.693111
| false
| false
| false
|
luisfg30/Webserver
|
tests/table (1).py
|
1
|
1978
|
from tkinter import *
class ExampleApp(Tk):
def __init__(self):
Tk.__init__(self)
list2=["Data","Página","Tipo de Resuisição"]
t = SimpleTable(self,list2)
t.pack(side="top", fill="x")
list=["abc","def","ghi"]
t.insert_row(list)
t.insert_row(list)
t.set_cell(1,0,"hello world")
#print(t._widgets)
t.set_row(1,list2)
class SimpleTable(Canvas):
def __init__(self, parent,value_names):
self.rows=0
self.columns=len(value_names)
# use black background so it "peeks through" to
# form grid lines
Canvas.__init__(self, parent, background="black")
self._widgets = []
self.current_row=0
for j in range(self.columns):
self.grid_columnconfigure(j, weight=1)
#add first line
new_row = []
for j in range(self.columns):
label = Label(self, text=value_names[j],font=("Verdana 9 bold"),borderwidth=0, width=len(value_names[j]))
label.grid(row=self.current_row, column=j, sticky="nsew", padx=1, pady=1)
new_row.append(label)
self._widgets.append(new_row)
self.current_row+=1
self.rows+=1
def set_cell(self, i, j, value):
widget = self._widgets[i][j]
widget.configure(text=value)
def set_row(self,i,values):
for j in range(len(values)):
widget = self._widgets[i][j]
widget.configure(text=values[j])
def insert_row(self,values):
self.current_row+=1
self.rows+=1
new_row = []
for j in range(len(values)):
label = Label(self, text=values[j],borderwidth=0, width=10)
label.grid(row=self.current_row, column=j, sticky="nsew", padx=1, pady=1)
new_row.append(label)
self._widgets.append(new_row)
if __name__ == "__main__":
app = ExampleApp()
app.mainloop()
|
gpl-2.0
| -3,464,466,802,696,428,000
| 29.875
| 117
| 0.547342
| false
| 3.440767
| false
| false
| false
|
eubr-bigsea/tahiti
|
migrations/versions/185a7d89aa72_remove_field_from_form_85.py
|
1
|
3078
|
"""Remove Field from Form 85
Revision ID: 185a7d89aa72
Revises: 2eaeb4b0c43f
Create Date: 2018-04-17 14:28:35.098385
"""
import json
from alembic import op
from sqlalchemy import String, Integer, Text
from sqlalchemy.sql import table, column, text
# revision identifiers, used by Alembic.
revision = '185a7d89aa72'
down_revision = '2eaeb4b0c43f'
branch_labels = None
depends_on = None
X_FORMAT_ID = 307
LEGEND_ID = 310
FORM_ID = 85
def upgrade():
op.execute('DELETE FROM operation_form_field WHERE id={}'.format(X_FORMAT_ID))
op.execute('DELETE FROM operation_form_field_translation WHERE id={}'.format(X_FORMAT_ID))
op.execute('DELETE FROM operation_form_field WHERE id={}'.format(LEGEND_ID))
op.execute('DELETE FROM operation_form_field_translation WHERE id={}'.format(LEGEND_ID))
def downgrade():
tb = table(
'operation_form_field',
column('id', Integer),
column('name', String),
column('type', String),
column('required', Integer),
column('order', Integer),
column('default', Text),
column('suggested_widget', String),
column('values_url', String),
column('values', String),
column('scope', String),
column('form_id', Integer),
)
columns = [c.name for c in tb.columns]
supported_formats = [
{"key": "%Y-%m-%dT%H:%M:%S.%LZ",
"value": "%Y-%m-%dT%H:%M:%S.%LZ"},
{"key": "%m-%d", "value": "%m-%d"},
{"key": "%d-%", "value": "%d-%m"},
{"key": "%Y-%m-%d", "value": "%Y-%m-%d"},
{"key": "%m-%Y-%d", "value": "%m-%Y-%d"},
{"key": "%m-%Y-%d", "value": "%m-%Y-%d"},
{"key": "%m-%Y-%d %H:%M",
"value": "%m-%Y-%d %H:%M"},
{"key": "%m-%Y-%d %H:%M",
"value": "%m-%Y-%d %H:%M"},
{"key": "%m-%Y-%d %H:%M:%S", "value": "%m-%Y-%d %H:%M:%S"},
{"key": "%m-%Y-%d %H:%M:%S",
"value": "%m-%Y-%d %H:%M:%S"},
{"key": "%H:%M", "value": "%H:%M"},
{"key": "%H:%M:%S", "value": "%H:%M:%S"},
{"key": ".2", "value": ".2"},
{"key": ".4", "value": ".4"},
{"key": "%", "value": "%"},
{"key": "p", "value": "p"},
{"key": "d", "value": "d"}
]
data = [
[X_FORMAT_ID, 'x_format', 'TEXT', 0, 8, None, 'select2', None, json.dumps(supported_formats), 'EXECUTION', FORM_ID],
[LEGEND_ID, 'legend', 'INTEGER', 0, 5, 1, 'checkbox', None, None, 'EXECUTION', FORM_ID],
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
tb = table(
'operation_form_field_translation',
column('id', Integer),
column('locale', String),
column('label', String),
column('help', String), )
columns = [c.name for c in tb.columns]
data = [
[X_FORMAT_ID, 'en', 'X-axis format', 'X-axis format'],
[X_FORMAT_ID, 'pt', 'Formato para eixo X', 'Formato para eixo X'],
[LEGEND_ID, 'en', 'Display Legend', 'Display Legend'],
[LEGEND_ID, 'pt', 'Exibir Legenda', 'Exibir Legenda'],
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
|
apache-2.0
| 9,139,908,008,826,830,000
| 30.408163
| 120
| 0.536387
| false
| 2.937023
| false
| false
| false
|
clach04/Stache
|
__init__.py
|
1
|
17723
|
from __future__ import generators
import sys
from cgi import escape
try:
raise ImportError
import itertools
itertools_takewhile = itertools.takewhile
except ImportError:
# fake it
def takewhile(predicate, iterable):
# takewhile(lambda x: x<5, [1,4,6,4,1]) --> 1 4
for x in iterable:
if predicate(x):
yield x
else:
break
itertools_takewhile = takewhile
try:
from sys import intern
except ImportError:
pass
string_func = unicode
TOKEN_RAW = intern('raw')
TOKEN_TAGOPEN = intern('tagopen')
TOKEN_TAGINVERT = intern('taginvert')
TOKEN_TAGCLOSE = intern('tagclose')
TOKEN_TAGCOMMENT = intern('tagcomment')
TOKEN_TAGDELIM = intern('tagdelim')
TOKEN_TAG = intern('tag')
TOKEN_PARTIAL = intern('partial')
TOKEN_PUSH = intern('push')
TOKEN_BOOL = intern('bool')
BOOTSRAP_PRE = """
(function(data){
var isArray = Array.isArray || function(obj) {
return toString.call(obj) == '[object Array]';
},
each = function(obj, iterator, context) {
if (obj == null) return;
if (Array.prototype.forEach && obj.forEach === Array.prototype.forEach) {
obj.forEach(iterator, context);
} else if (obj.length === +obj.length) {
for (var i = 0, l = obj.length; i < l; i++) {
if (i in obj && iterator.call(context, obj[i], i, obj) === breaker) return;
}
} else {
for (var key in obj) {
if (obj.hasOwnProperty(key)) {
if (iterator.call(context, obj[key], key, obj) === breaker) return;
}
}
}
},
map = function(obj, iterator, context) {
var results = [];
if (obj == null) return results;
if (Array.prototype.map && obj.map === Array.prototype.map) return obj.map(iterator, context);
each(obj, function(value, index, list) {
results[results.length] = iterator.call(context, value, index, list);
});
if (obj.length === +obj.length) results.length = obj.length;
return results;
},
htmlEncode = function(str) {
return String(str)
.replace(/&/g, '&')
.replace(/"/g, '"')
.replace(/'/g, ''')
.replace(/</g, '<')
.replace(/>/g, '>');
},
lookup = function (data, datum) {
var i = 0,
l = data ? data.length : 0;
for (; i < l; i += 1) {
if (datum === '.') {
return data[i]
} else if (data[i] !== void 0 && data[i][datum] !== void 0 && data[i][datum] !== false) {
if (toString.call(data[i][datum]) == '[object Function]') {
return data[i][datum](data)
} else {
return data[i][datum]
}
}
}
return '';
},
section = function(data, tagvalue, callback, invert){
invert = invert || false;
if (isArray(tagvalue)) {
if (!invert && tagvalue.length > 0) {
return map(tagvalue, function(v) { return callback([v].concat(data))}).join('')
} else if (invert && tagvalue.length == 0) {
return callback(data);
}
} else {
if((!invert && tagvalue) || (invert && !tagvalue)) {
if (tagvalue !== void 0 || tagvalue !== true) {
return callback([tagvalue].concat(data));
} else {
return callback(data);
}
}
}
};
"""
BOOTSRAP_POST = """
})
"""
def _checkprefix(tag, prefix):
if tag and tag[0] == prefix:
return tag[1:].strip()
else:
return None
def _lookup(data, datum):
for scope in data:
if datum == '.':
return string_func(scope)
elif datum in scope:
return scope[datum]
elif hasattr(scope, datum):
return getattr(scope, datum)
return None
def _renderjsfunction(parts, prefix = "", postfix = "", params="data, tag"):
return "function({params}) {{{prefix} return {content} {postfix} }}".format(
content=_renderjsjoin(*parts),
prefix=prefix,
postfix=postfix,
params=params)
def _renderjsjoin(*args):
return "[{0}].join('');".format(','.join(args))
def render(template, data):
return Stache().render(template, data)
def render_js(template):
return Stache().render_js(template)
class Stache(object):
def __init__(self):
self.otag = '{{'
self.ctag = '}}'
self.templates = {}
self.hoist = {}
self.hoist_data = {}
self.section_counter = 0
def copy(self):
copy = Stache()
copy.templates = self.templates
return copy
def add_template(self, name, template):
self.templates[name] = list(self._tokenize(template))
def render(self, template, data={}):
self.otag = '{{'
self.ctag = '}}'
return ''.join(self._parse(self._tokenize(template), data))
def render_iter(self, template, data={}):
copy = self.copy()
return copy._parse(copy._tokenize(template), data)
def render_template(self, template_name, data={}):
self.otag = '{{'
self.ctag = '}}'
return ''.join(self._parse(iter(list(self.templates[template_name])), data))
def render_template_iter(self, template_name, data={}):
copy = self.copy()
return copy._parse(iter(list(copy.templates[template_name])), data)
def _js_hoisted(self, bare=True):
hoist = ''
if self.templates:
hoist += "\n var templates = {};\n"
for name in self.templates:
render_function = list(self._jsparse(iter(list(self.templates[name]))))
newparams = "data"
prefix = ""
if not bare and self.hoist_data:
hoisted = map(lambda x: '"{0}": {1}, '.format(x, self.hoist_data[x], "baseData"), self.hoist_data.keys())
prefix = ' var data = [dat2, {{{0}}}];'.format(', '.join(hoisted))
self.hoist_data = {}
newparams = 'dat2';
hoist += ' templates["{0}"] = {1};\n'.format(name, _renderjsfunction(render_function, prefix=prefix, params=newparams))
if self.hoist:
for name in self.hoist:
hoist += ' var {0} = {1};\n'.format(name, self.hoist[name])
if bare:
if self.hoist_data:
for name in self.hoist_data:
hoist += ' {2}["{0}"] = {1};\n'.format(name, self.hoist_data[name], "data")
return hoist
def render_js(self, template):
copy = self.copy()
renderedjs = _renderjsjoin(*list(copy._jsparse(copy._tokenize(template))))
hoist = copy._js_hoisted()
jstemplate = "{0}\n {1}\n data = [data];\n return {2};\n{3}"
return jstemplate.format(BOOTSRAP_PRE, hoist, renderedjs, BOOTSRAP_POST)
def render_js_template(self, template_name):
copy = self.copy()
hoist = copy._js_hoisted(bare=False)
jstemplate = "{0}\n {1}\n return templates['{2}']([data]);\n{3}"
return jstemplate.format(BOOTSRAP_PRE, hoist, template_name, BOOTSRAP_POST)
def render_all_js(self):
copy = self.copy()
hoist = copy._js_hoisted(bare=False)
jstemplate = "{0}\n var baseData={{}};\n {1}\n return templates;\n{2}"
return jstemplate.format(BOOTSRAP_PRE, hoist, BOOTSRAP_POST)
def _tokenize(self, template):
rest = template
scope = []
while rest and len(rest) > 0:
pre_section = rest.split(self.otag, 1)
if len(pre_section) == 2:
pre, rest = pre_section
else:
pre, rest = (pre_section[0], None)
if rest:
taglabel, rest = rest.split(self.ctag, 1)
else:
taglabel, rest = (None, None)
if taglabel:
taglabel = taglabel.strip()
else:
taglabel = ''
open_tag = _checkprefix(taglabel, '#')
if not open_tag:
invert_tag = _checkprefix(taglabel, '^')
else:
invert_tag = None
if not invert_tag:
close_tag = _checkprefix(taglabel, '/')
else:
close_tag = None
comment_tag = None
partial_tag = None
push_tag = None
bool_tag = None
booltern_tag = None
unescape_tag = None
if not close_tag:
comment_tag = _checkprefix(taglabel, '!')
if not comment_tag:
partial_tag = _checkprefix(taglabel, '>')
if not partial_tag:
push_tag = _checkprefix(taglabel, '<')
if not push_tag:
bool_tag = _checkprefix(taglabel, '?')
if not bool_tag:
booltern_tag = _checkprefix(taglabel, ':')
if not booltern_tag:
unescape_tag = _checkprefix(taglabel, '{')
if unescape_tag:
rest = rest[1:]
else:
rest = rest # FIXME seems like a NOOP
if not booltern_tag:
unescape_tag = (unescape_tag or _checkprefix(taglabel, '&'))
else:
unescape_tag = None
if not unescape_tag and len(taglabel) >= 2 and taglabel[0] == '=' and taglabel[-1] == '=':
delim_tag = taglabel[1:-1]
else:
delim_tag = None
if delim_tag:
delim_tag = delim_tag.split(' ', 1)
else:
delim_tag = None
if delim_tag and len(delim_tag) == 2:
delim_tag = delim_tag
else:
delim_tag = None
if push_tag:
pre = pre.rstrip()
rest = rest.lstrip()
if pre:
yield TOKEN_RAW, pre, len(scope)
if open_tag:
scope.append(open_tag)
yield TOKEN_TAGOPEN, open_tag, len(scope)
elif bool_tag:
scope.append(bool_tag)
yield TOKEN_BOOL, bool_tag, len(scope)
elif invert_tag:
scope.append(invert_tag)
yield TOKEN_TAGINVERT, invert_tag, len(scope)
elif close_tag is not None:
current_scope = scope.pop()
if close_tag:
assert (current_scope == close_tag), 'Mismatch open/close blocks'
yield TOKEN_TAGCLOSE, current_scope, len(scope)+1
elif booltern_tag:
scope.append(booltern_tag)
yield TOKEN_TAG, booltern_tag, 0
yield TOKEN_TAGINVERT, booltern_tag, len(scope)
elif comment_tag:
yield TOKEN_TAGCOMMENT, comment_tag, 0
elif partial_tag:
yield TOKEN_PARTIAL, partial_tag, 0
elif push_tag:
scope.append(push_tag)
yield TOKEN_PUSH, push_tag, len(scope)
elif delim_tag:
yield TOKEN_TAGDELIM, delim_tag, 0
elif unescape_tag:
yield TOKEN_TAG, unescape_tag, True
else:
yield TOKEN_TAG, taglabel, False
def _parse(self, tokens, *data):
for token in tokens:
#print ' token:' + string_func(token)
tag, content, scope = token
if tag == TOKEN_RAW:
yield string_func(content)
elif tag == TOKEN_TAG:
tagvalue = _lookup(data, content)
#cant use if tagvalue because we need to render tagvalue if it's 0
#testing if tagvalue == 0, doesnt work since False == 0
if tagvalue is not None and tagvalue is not False:
try:
if len(tagvalue) > 0:
if scope:
yield string_func(tagvalue)
else:
yield escape(string_func(tagvalue))
except TypeError:
if scope:
yield string_func(tagvalue)
else:
yield escape(string_func(tagvalue))
elif tag == TOKEN_TAGOPEN or tag == TOKEN_TAGINVERT:
tagvalue = _lookup(data, content)
untilclose = itertools_takewhile(lambda x: x != (TOKEN_TAGCLOSE, content, scope), tokens)
if (tag == TOKEN_TAGOPEN and tagvalue) or (tag == TOKEN_TAGINVERT and not tagvalue):
if hasattr(tagvalue, 'items'):
#print ' its a dict!', tagvalue, untilclose
for part in self._parse(untilclose, tagvalue, *data):
yield part
else:
try:
iterlist = list(iter(tagvalue))
if len(iterlist) == 0:
raise TypeError
#print ' its a list!', list(rest)
#from http://docs.python.org/library/itertools.html#itertools.tee
#In general, if one iterator uses most or all of the data before
#another iterator starts, it is faster to use list() instead of tee().
rest = list(untilclose)
for listitem in iterlist:
for part in self._parse(iter(rest), listitem, *data):
yield part
except TypeError:
#print ' its a bool!'
for part in self._parse(untilclose, *data):
yield part
else:
for ignore in untilclose:
pass
elif tag == TOKEN_BOOL:
tagvalue = _lookup(data, content)
untilclose = itertools_takewhile(lambda x: x != (TOKEN_TAGCLOSE, content, scope), tokens)
if tagvalue:
for part in self._parse(untilclose, *data):
yield part
else:
for part in untilclose:
pass
elif tag == TOKEN_PARTIAL:
if content in self.templates:
for part in self._parse(iter(list(self.templates[content])), *data):
yield part
elif tag == TOKEN_PUSH:
untilclose = itertools_takewhile(lambda x: x != (TOKEN_TAGCLOSE, content, scope), tokens)
data[-1][content] = ''.join(self._parse(untilclose, *data))
elif tag == TOKEN_TAGDELIM:
self.otag, self.ctag = content
def _jsparse(self, tokens):
self.otag = '{{'
self.ctag = '}}'
for token in tokens:
tag, content, scope = token
if tag == TOKEN_RAW:
yield "'{0}'".format(string_func(content))
elif tag == TOKEN_TAG:
if content != '':
if scope:
yield "lookup(data, '{0}')".format(content)
else:
yield "htmlEncode(lookup(data, '{0}'))".format(content)
elif tag == TOKEN_TAGOPEN or tag == TOKEN_TAGINVERT or tag == TOKEN_BOOL:
untilclose = itertools_takewhile(lambda x: x != (TOKEN_TAGCLOSE, content, scope), tokens)
inside = self._jsparse(untilclose)
if tag == TOKEN_TAGOPEN:
pre = "return section(data, lookup(data, tag), function (data) {"
post = "});"
self.hoist["__section{0}".format(len(self.hoist))] = _renderjsfunction(inside, pre, post)
yield "__section{1}(data, '{0}')".format(content, len(self.hoist)-1)
elif tag == TOKEN_TAGINVERT:
pre = "return section(data, lookup(data, tag), function (data) {"
post = "}, true);"
self.hoist["__section{0}".format(len(self.hoist))] = _renderjsfunction(inside, pre, post)
yield "__section{1}(data, '{0}')".format(content, len(self.hoist)-1)
elif tag == TOKEN_BOOL:
pre = "var tagvalue = lookup(data, tag); if ((!isArray(tagvalue) && tagvalue) || (isArray(tagvalue)) && tagvalue.length > 0){"
post = "}"
self.hoist["__section{0}".format(len(self.hoist))] = _renderjsfunction(inside, pre, post)
yield "__section{1}(data, '{0}')".format(content, len(self.hoist)-1)
elif tag == TOKEN_PARTIAL:
yield "templates['{0}'](data)".format(content)
elif tag == TOKEN_PUSH:
untilclose = itertools_takewhile(lambda x: x != (TOKEN_TAGCLOSE, content, scope), tokens)
self.hoist_data[content] = _renderjsfunction(self._jsparse(untilclose), params="data")
elif tag == TOKEN_TAGDELIM:
self.otag, self.ctag = content
|
mit
| 7,108,883,387,215,044,000
| 37.444685
| 146
| 0.486092
| false
| 4.115885
| false
| false
| false
|
acrosby/get-git-hash
|
git.py
|
1
|
1455
|
#
# Copyright 2013 A.Crosby
# See LICENSE for license information
#
import subprocess, os
def subs(cmd):
p = subprocess.Popen(cmd, shell=True,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE)
out = p.communicate()
if len(out[1])>0:
raise ValueError("Error using git through subprocess: %s" % (out[1],))
else:
return out[0]
def check(repo):
cmd = "cd %s && git status | grep 'modified:'" % (repo,)
modified = subs(cmd)
cmd = "cd %s && git status | grep 'new file:'" % (repo,)
new = subs(cmd)
if len(modified) > 0 or len(new) > 0:
raise ValueError("Please commit the changes to the repository '%s'" % (repo,))
def current_hash(repo):
check(repo)
cmd = "cd %s && git log | head -n 1" % (repo,)
out = subs(cmd)
out = out.strip("commit").strip(" ").strip("\n")
return out
def current_branch(repo):
check(repo)
cmd = "cd %s && git status | grep 'On branch'" % (repo,)
out = subs(cmd)
out = out.strip("# On branch ").strip(" ").strip("\n")
return out
def unique(repo):
check(repo)
branch = current_branch(repo)
hash = current_hash(repo)
return branch + "-" + hash
def prepend_unique(repo, filename):
path = os.path.abspath(filename)
fnames = os.path.split(path)
this = unique(repo)
return os.path.join(fnames[0], this+"_"+fnames[1])
unique_path = prepend_unique
|
mit
| -6,748,326,175,159,556,000
| 27.529412
| 86
| 0.580069
| false
| 3.352535
| false
| false
| false
|
schwertfjo/PowerEnergyIC_CS5461_python2Driver
|
CS5461.py
|
1
|
4234
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import RPi.GPIO as GPIO
import spidev
import time
class cs5461:
# define command bytes
sync0 = 254
sync1 = 255
reset = 128
compu = 232
# default settings
default_mode = 2
default_speed = 100000
default_inverted = True # due to optocouplers
def __init__(self, mode = default_mode, speed = default_speed, inverted = default_inverted):
self.spi = spidev.SpiDev()
self.spi_mode = mode
self.spi_speed = speed
self.inverted = inverted
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(25, GPIO.OUT)
self.Init()
def rw(self, bytes):
send_bytes = []
ret = -1
if type(bytes) is int:
send_bytes = [bytes] + [self.sync0] * 3
elif type(bytes) is list:
send_bytes = bytes + [self.sync0] * (4 - len(bytes))
self.spi.open(0,0)
self.spi.mode = self.spi_mode
self.spi.max_speed_hz = self.spi_speed
if self.inverted:
r = self.spi.xfer2( map(lambda x: x ^ 0xFF, send_bytes) )
ret = map(lambda x: x ^ 0xFF, r)
else:
ret = self.spi.xfer2( send_bytes )
self.spi.close()
return ret
def Reset(self):
self.rw(self.reset)
def Sync(self):
self.rw([self.sync1]*3 + [self.sync0])
def Init(self):
# chip reset cycle via gpio25
GPIO.output(25, True)
time.sleep(1)
GPIO.output(25, False)
time.sleep(1)
self.Sync()
self.Reset()
self.Sync()
wrReg00 = 0x40 # Config
wrReg01 = 0x42 # Current Offset
wrReg02 = 0x44 # Current Gain
wrReg03 = 0x46 # Voltage Offset
wrReg04 = 0x48 # Voltage Gain
wrReg13 = 0x5A # Timebase Calibration
wrReg14 = 0x5C # Power Offset Calibration
wrReg16 = 0x60 # Current Channel AC Offset
wrReg17 = 0x62 # Voltage Channel AC Offset
# good working calibration data for energenie power meter lan (determined by trial)
self.rw([wrReg00, 0b1, 0b0, 0b1])
self.rw([wrReg01, 0xFF, 0xB5, 0x62])
self.rw([wrReg02, 0x54, 0xFE, 0xFF])
self.rw([wrReg03, 0x15, 0x8C, 0x71])
self.rw([wrReg04, 0x3D, 0xE0, 0xEF])
self.rw([wrReg13, 0x83, 0x12, 0x6E])
self.rw([wrReg14, 0xFF, 0xCF, 0xC3])
self.rw([wrReg16, 0x00, 0x01, 0x4A])
self.rw([wrReg17, 0x00, 0x44, 0xCA])
# Perform continuous computation cycles
self.rw(self.compu)
time.sleep(2) # wait until values becomes good
def readregister(self, register):
if register > 31 or register < 0: #just check range
return -1
self.Sync()
received = self.rw(register << 1)
return received[1]*256*256 + received[2]*256 + received[3]
def getregister(self, register):
Expotential= [ 0, -23, -22, -23, -22, -0, -5, -23, -23, -23, # 0:9 decimal point position
-23, -24, -24, -23, -23, 0, -24, -24, -5, -16, # 10:19
0, 0, -22, -23, 0, 0, 0, 0, 0, 0, 0, 0 ] # 20:31
Binary = [0, 15, 26, 28] # binary registers
twosComplement =[1, 7, 8, 9, 10, 14, 19, 23] # two's complement registers
if register > 31 or register < 0: # just check range
return -1
value = self.readregister(register)
if register in Binary:
return bin(value)
elif register in twosComplement:
if value > 2**23:
value = ((value ^ 0xFFFFFF) + 1) * -1 # convert to host two's complement system
return value * 2**Expotential[register]
def main():
Ugain = 400
Igain = 10
Egain = 4000
device = cs5461()
# for i in range(32):
# print i, device.getregister(i)
while True:
Irms = device.getregister(11)
Urms = device.getregister(12)
Erms = device.getregister(10)
I = round(Irms*Igain, 3)
U = round(Urms*Ugain, 1)
E = round(Erms*Egain, 1)
print( "voltage = %.1fV current = %.3fA power = %.1fW" % (U, I, E) )
time.sleep(1)
if __name__ == '__main__':
main()
|
gpl-2.0
| 1,260,076,423,460,905,200
| 31.821705
| 101
| 0.549835
| false
| 3.150298
| false
| false
| false
|
tentangdata/ig
|
helpers.py
|
1
|
1557
|
import os
import yaml
class AppConfig(object):
DB_URL_TEMPLATE = "{}://{}:{}@{}:{}/{}"
def __init__(self, db_type,
db_host, db_port, db_name,
db_username, db_password,
file_in_dir, file_out_dir,
posts_dir):
self.db_type = db_type
self.db_host = db_host
self.db_port = db_port
self.db_name = db_name
self.db_username = db_username
self.db_password = db_password
self.file_in_dir = file_in_dir
self.file_out_dir = file_out_dir
self.posts_dir = posts_dir
def get_db_url(self):
return AppConfig.DB_URL_TEMPLATE.format(
self.db_type,
self.db_username,
self.db_password,
self.db_host,
self.db_port,
self.db_name
)
class AppConfigParser(object):
""" IG App Config Parser
only accept yml format
"""
def __init__(self):
self._config_file_path = os.getenv(
'IG_CONF_PATH',
'config.yml'
)
def parse(self):
_config = yaml.load(
open(self._config_file_path, 'r')
)
return AppConfig(**_config)
if __name__ == '__main__':
""" for running simple tests """
app_conf_parser = AppConfigParser()
app_conf = app_conf_parser.parse()
assert app_conf.db_host == 'localhost'
assert app_conf.db_type == 'postgresql'
assert app_conf.get_db_url() \
== 'postgresql://postgres:postgres@localhost:5432/ig'
|
mit
| 8,782,905,886,537,452,000
| 24.966667
| 64
| 0.526012
| false
| 3.571101
| true
| false
| false
|
NationalSecurityAgency/ghidra
|
Ghidra/Extensions/SleighDevTools/pcodetest/build.py
|
1
|
10519
|
## ###
# IP: GHIDRA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
import os
import shutil
import subprocess
import sys
import pwd
import grp
import re
class BuildUtil(object):
def __init__(self):
self.log = False
self.name = False
self.num_errors = 0
self.num_warnings = 0
def run(self, cmd, stdout=False, stderr=False, verbose=True):
if isinstance(cmd, basestring):
if stdout and stderr:
cmd += ' 1>%s 2>%s' % (stdout, stderr)
elif stdout and not stderr:
cmd += ' 1>%s 2>&1' % (stdout)
elif not stdout and stderr:
cmd += ' 2>%s' % (stderr)
if verbose: self.log_info(cmd)
os.system(cmd)
else:
str = ' '.join(cmd);
if stdout:
f = file(stdout, 'w+')
str += ' 1>%s 2>&1' % (stdout)
else:
f = subprocess.PIPE
if verbose: self.log_info(str)
try:
sp = subprocess.Popen(cmd, stdout=f, stderr=subprocess.PIPE)
except OSError as e:
self.log_err("Command: " + str)
self.log_err(e.message)
return 0,e.message#raise
if stdout: f.close()
out, err = sp.communicate()
# print 'run returned %d bytes stdout and %d bytes stderr' % (len(out) if out else 0, len(err) if err else 0)
return out, err
def isdir(self, dname):
return os.path.isdir(dname)
def getcwd(self):
return os.getcwd()
def basename(self, fname):
return os.path.basename(fname)
def dirname(self, fname):
return os.path.dirname(fname)
def getmtime(self, fname):
return os.path.getmtime(fname)
def isfile(self, fname):
return os.path.isfile(fname)
def getenv(self, var, dflt):
return os.getenv(var, dflt)
def pw_name(self, fname):
return pwd.getpwuid(os.stat(fname).st_uid).pw_name
def gr_name(self, fname):
return grp.getgrgid(os.stat(fname).st_gid).gr_name
def isatty(self):
return os.isatty(sys.stdin.fileno())
def is_readable_file(self, fname):
if not self.isfile(fname):
self.log_warn('%s does not exist' % fname)
return False
if os.stat(fname).st_size == 0:
self.log_warn('%s is empty' % fname)
return False
if os.access(fname, os.R_OK) == 0:
self.log_warn('%s is not readable' % fname)
return False
return True
def is_executable_file(self, fname):
if not self.is_readable_file(fname): return False
if os.access(fname, os.X_OK) == 0:
self.log_warn('%s is not executable' % fname)
return False
return True
# export a file to a directory
def export_file(self, fname, dname,):
try:
if not os.path.isdir(dname):
self.makedirs(dname)
if os.path.isfile(fname):
self.copy(fname, dname, verbose=True)
elif os.path.isdir(fname):
self.copy(fname, dname, dir=True, verbose=True)
except IOError as e:
self.log_err('Error occurred exporting %s to %s' % (fname, dname))
self.log_err("Unexpected error: %s" % str(e))
def rmtree(self, dir, verbose=True):
if verbose: self.log_info('rm -r %s' % dir)
shutil.rmtree(dir)
def makedirs(self, dir, verbose=True):
if verbose: self.log_info('mkdir -p %s' % dir)
try: os.makedirs(dir)
except: pass
# copy a file to a directory
def copy(self, fname, dname, verbose=True, dir=False):
if not dir:
if verbose: self.log_info('cp -av %s %s' % (fname, dname))
shutil.copy(fname, dname)
else:
if verbose: self.log_info('cp -avr %s %s' % (fname, dname))
if os.path.exists(dname):
shutil.rmtree(dname)
shutil.copytree(fname, dname)
def chdir(self, dir, verbose=True):
if verbose: self.log_info('cd %s' % dir)
os.chdir(dir)
def remove(self, fname, verbose=True):
if verbose: self.log_info('rm -f %s' % fname)
try: os.remove(fname)
except: pass
def environment(self, var, val, verbose=True):
if verbose: self.log_info('%s=%s' % (var, val))
os.environ[var] = val
def unlink(self, targ, verbose=True):
if verbose: self.log_info('unlink %s' % targ)
os.unlink(targ)
def symlink(self, src, targ, verbose=True):
if verbose: self.log_info('ln -s %s %s' % (src, targ))
if os.path.islink(targ):
os.unlink(targ)
os.symlink(src, targ)
def build_dir(self, root, kind, what):
return root + "/" + re.sub(r'[^a-zA-Z0-9_-]+', '_', 'build-%s-%s' % (kind, what))
def log_prefix(self, kind, what):
return kind.upper() + ' ' + what
def open_log(self, root, kind, what, chdir=False):
build_dir = self.build_dir(root, kind, what)
# Get the name of the log file
logFile = '%s/log.txt' % build_dir
self.log_info('%s LOGFILE %s' % (self.log_prefix(kind, what), logFile))
try: self.rmtree(build_dir, verbose=False)
except: pass
self.makedirs(build_dir, verbose=False)
self.log_open(logFile)
if chdir: self.chdir(build_dir)
def log_open(self, name):
if self.log: self.log_close()
self.log = open(name, 'w')
self.name = name
def log_close(self):
if self.log:
if self.num_errors > 0:
print '# ERROR: There were errors, see %s' % self.name
elif self.num_warnings > 0:
print '# WARNING: There were warnings, see %s' % self.name
self.log.close()
self.log = False
self.name = False
self.num_errors = 0
self.num_warnings = 0
def log_pr(self, prefix, what):
if isinstance(what, basestring):
log_string = prefix + what
else:
log_string = prefix + repr(what)
if self.log:
self.log.write(log_string + '\n')
self.log.flush()
else:
print log_string
sys.stdout.flush()
def log_err(self, what):
self.log_pr('# ERROR: ', what)
self.num_errors += 1
def log_warn(self, what):
self.log_pr('# WARNING: ', what)
self.num_warnings += 1
def log_info(self, what):
self.log_pr('# INFO: ', what)
# create a file with size, type, and symbol info
# the function is here because it is useful and has no dependencies
def mkinfo(self, fname):
ifdefs = { 'i8':'HAS_LONGLONG', 'u8':'HAS_LONGLONG', 'f4':'HAS_FLOAT', 'f8':'HAS_DOUBLE' }
sizes = [
'char', 'signed char', 'unsigned char',
'short', 'signed short', 'unsigned short',
'int', 'signed int', 'unsigned int',
'long', 'signed long', 'unsigned long',
'long long', 'signed long long', 'unsigned long long',
'float', 'double', 'float', 'long double',
'i1', 'i2', 'i4', 'u1', 'u2', 'u4', 'i8', 'u8', 'f4', 'f8']
syms = [
'__AVR32__', '__AVR_ARCH__', 'dsPIC30', '__GNUC__', '__has_feature', 'INT4_IS_LONG',
'__INT64_TYPE__', '__INT8_TYPE__', '__llvm__', '_M_ARM_FP', '__MSP430__', '_MSV_VER',
'__SDCC', '__SIZEOF_DOUBLE__', '__SIZEOF_FLOAT__', '__SIZEOF_SIZE_T__', '__TI_COMPILER_VERSION__',
'__INT8_TYPE__', '__INT16_TYPE__', '__INT32_TYPE__', '__INT64_TYPE__', '__UINT8_TYPE__',
'__UINT16_TYPE__', '__UINT32_TYPE__', '__UINT64_TYPE__', 'HAS_FLOAT', 'HAS_DOUBLE',
'HAS_LONGLONG', 'HAS_FLOAT_OVERRIDE', 'HAS_DOUBLE_OVERRIDE', 'HAS_LONGLONG_OVERRIDE']
typedefs = { 'i1':1, 'i2':2, 'i4':4, 'u1':1, 'u2':2, 'u4':4, 'i8':8, 'u8':8, 'f4':4, 'f8':8 }
f = open(fname, 'w')
f.write('#include "types.h"\n\n')
i = 0
for s in sizes:
i += 1
d = 'INFO sizeof(%s) = ' % s
x = list(d)
x = "', '".join(x)
x = "'%s', '0'+sizeof(%s), '\\n'" % (x, s)
l = 'char size_info_%d[] = {%s};\n' % (i, x)
if s in ifdefs: f.write('#ifdef %s\n' % ifdefs[s])
f.write(l)
if s in ifdefs: f.write('#endif\n')
for s in typedefs:
if s in ifdefs: f.write('#ifdef %s\n' % ifdefs[s])
f.write('_Static_assert(sizeof(%s) == %d, "INFO %s should have size %d, is not correct\\n");\n' % (s, typedefs[s], s, typedefs[s]))
if s in ifdefs: f.write('#endif\n')
for s in syms:
i += 1
f.write('#ifdef %s\n' % s)
f.write('char sym_info_%d[] = "INFO %s is defined\\n\";\n' % (i, s))
f.write('#else\n')
f.write('char sym_info_%d[] = "INFO %s is not defined\\n\";\n' % (i, s))
f.write('#endif\n')
f.close()
class Config(object):
def __init__(self, *obj):
for o in obj:
if isinstance(o, dict): self.__dict__.update(o)
else: self.__dict__.update(o.__dict__)
def format(self, val):
if isinstance(val, basestring) and '%' in val:
return val % self.__dict__
elif isinstance(val, dict):
return dict(map(lambda (k,v): (k,self.format(v)), val.iteritems()))
else: return val
def __getattr__(self, attr):
return ''
def expand(self):
for k,v in self.__dict__.iteritems():
self.__dict__[k] = self.format(v)
def dump(self):
ret = ''
for k,v in sorted(self.__dict__.iteritems()):
if isinstance(v, basestring): vv = "'" + v + "'"
else: vv = str(v)
ret += ' '.ljust(10) + k.ljust(20) + vv + '\n'
return ret
|
apache-2.0
| -260,404,642,648,897,020
| 32.823151
| 143
| 0.523624
| false
| 3.431974
| false
| false
| false
|
DirectlineDev/django-hitcounter
|
django_hitcounter/models.py
|
1
|
2032
|
# -*- coding: utf-8 -*-
from django.utils import timezone
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
from django.db import models, transaction
from django.db.models import F
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import python_2_unicode_compatible
from .managers import CounterManager
__all__ = ['Counter', ]
@python_2_unicode_compatible
class Counter(models.Model):
""" Hits counter per date
"""
# Content-object field
content_type = models.ForeignKey(ContentType,
verbose_name=_('content type'),
related_name="content_type_set_for_%(class)s")
object_pk = models.TextField(_('object ID'))
content_object = GenericForeignKey(ct_field="content_type", fk_field="object_pk")
date = models.DateField(default=timezone.now, verbose_name=_('date'))
hits = models.PositiveIntegerField(default=0, verbose_name=_('hits count'))
# Manager
objects = CounterManager()
class Meta:
verbose_name = _('counter')
verbose_name_plural = _('counters')
unique_together = (('content_type', 'object_pk', 'date'), )
def __str__(self):
return '{date}: {hits}'.format(
date=self.date.strftime('%d-%m-%Y'),
hits=self.hits
)
@classmethod
@transaction.atomic()
def hit(cls, obj, amount=1, date=None):
""" Increase hits counter for particular object on date (now() by default)
:param obj: model object
:param amount: increase amount (1 by default)
:return: None
"""
ct = ContentType.objects.get_for_model(obj)
date = date or timezone.now()
obj, _ = cls.objects.get_or_create(content_type=ct, object_pk=obj._get_pk_val(), date=date,
defaults={'hits': 0})
cls.objects.filter(pk=obj.pk).update(hits=F('hits')+amount)
|
apache-2.0
| -8,971,828,384,236,141,000
| 34.649123
| 99
| 0.622047
| false
| 4.096774
| false
| false
| false
|
z/xonotic-map-manager
|
xmm/util.py
|
1
|
7782
|
import configparser
import os
import sys
import json
import time
import hashlib
import subprocess
import urllib.request
from datetime import datetime
from shutil import copyfile
def convert_size(number):
"""
Convert and integer to a human-readable B/KB/MB/GB/TB string.
:param number:
integer to be converted to readable string
:type number: ``int``
:returns: `str`
"""
for x in ['B', 'KB', 'MB', 'GB']:
if number < 1024.0:
string = "%3.1d%s" % (number, x)
return string.strip()
number /= 1024.0
string = "%3.1f%s" % (number, 'TB')
return string.strip()
def reporthook(count, block_size, total_size):
"""
Pretty progress for urllib downloads.
>>> import urllib.request
>>> urllib.request.urlretrieve(url, filename, reporthook)
https://github.com/yahoo/caffe/blob/master/scripts/download_model_binary.py
"""
global start_time
if count == 0:
start_time = time.time()
return
duration = time.time() - start_time
progress_size = int(count * block_size)
speed = int(progress_size / (1024 * duration))
percent = int(count * block_size * 100 / total_size)
sys.stdout.write("\r...%d%%, %d MB, %d KB/s, %d seconds passed. " %
(percent, progress_size / (1024 * 1024), speed, duration))
sys.stdout.flush()
def download_file(filename_with_path, url, use_curl=False, overwrite=False):
"""
downloads a file from any URL
:param filename_with_path:
filename with path to download file to
:type filename_with_path: ``str``
:param url:
URL to download map from
:type url: ``str``
:param use_curl:
Whether or not to use curl to download the file, default ``False``
:param overwrite:
Whether or not to overwrite the existing file, default ``False``
:type use_curl: ``bool``
"""
if not os.path.exists(filename_with_path) or overwrite:
if not use_curl:
urllib.request.urlretrieve(url, os.path.expanduser(filename_with_path), reporthook)
else:
subprocess.call(['curl', '-o', filename_with_path, url])
print("{}Done.{}".format(zcolors.INFO, zcolors.ENDC))
else:
print("{}file already exists, please remove first.{}".format(zcolors.FAIL, zcolors.ENDC))
return False
def parse_config(config_file):
"""
downloads a file from any URL
:param config_file:
filename with path to config file
:type config_file: ``str``
:returns: ``dict``
"""
if not os.path.isfile(config_file):
print("{}{} not found, please create one.{}".format(zcolors.WARNING, config_file, zcolors.ENDC))
Exception('Config not found.')
conf = configparser.ConfigParser()
conf.read(config_file)
return conf['xmm']
def check_if_not_create(file, template):
"""
Checks for a file, if it doesn't exist, it will be created from a template.
:param file:
filename with path to file
:type file: ``str``
:param template:
filename with path to template file
:type template: ``str``
"""
if not os.path.isfile(file):
os.makedirs(os.path.dirname(file), exist_ok=True)
copyfile(template, file)
def create_if_not_exists(file, contents):
"""
Checks for a file, if it doesn't exist, it will be created from a template.
:param file:
filename with path to file
:type file: ``str``
:param contents:
string contents of the file being created
:type contents: ``str``
"""
if not os.path.isfile(file):
os.makedirs(os.path.dirname(file), exist_ok=True)
with open(file, 'w') as f:
f.write(contents)
def file_is_empty(filename):
"""
Checks to see if a file is empty
:param filename:
string filename
:type filename: ``str``
:returns: ``bool``
"""
return os.stat(filename).st_size == 0
def replace_last(string, old, new):
"""
Replace the last occurrence of a pattern in a string
:param string:
string
:type string: ``str``
:param old:
string to find
:type old: ``str``
:param new:
string to replace
:type new: ``str``
:returns: ``str``
"""
return string[::-1].replace(old[::-1], new[::-1], 1)[::-1]
def hash_file(filename):
"""
Returns the SHA-1 hash of the file passed into it
:param filename:
string filename
:type filename: ``str``
:returns: ``str``
"""
# make a hash object
h = hashlib.sha1()
# open file for reading in binary mode
with open(filename, 'rb') as file:
# loop till the end of the file
chunk = 0
while chunk != b'':
# read only 1024 bytes at a time
chunk = file.read(1024)
h.update(chunk)
# return the hex representation of digest
return h.hexdigest()
# http://stackoverflow.com/questions/3041986/python-command-line-yes-no-input
def query_yes_no(question, default="yes"):
"""
Ask a yes/no question via raw_input() and return their answer.
:param question:
a string that is presented to the user.
:type question: ``str``
:param default:
is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
:type default: ``str``
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
# http://stackoverflow.com/a/24030569
class ObjectEncoder(json.JSONEncoder):
"""
JSONEncoder subclass that leverages an object's `__json__()` method,
if available, to obtain its default JSON representation.
"""
def default(self, obj):
if isinstance(obj, datetime):
return obj.isoformat()
if hasattr(obj, '__json__'):
return obj.__json__()
return json.JSONEncoder.default(self, obj)
class zcolors:
"""
Terminal formatting.
Options:
* HEADER
* INFO
* SUCCESS
* WARNING
* FAIL
* ENDC (end color)
* BOLD
* UNDERLINE
>>> "{}eggs{}: {}spam{}".format(zcolors.INFO, zcolors.ENDC, zcolors.UNDERLINE, zcolors.ENDC)
"""
HEADER = '\033[95m'
INFO = '\033[94m'
SUCCESS = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def cprint(string, style='INFO'):
"""
Terminal formatting convenience function.
:param string:
A string to print.
:type string: ``str``
:param style:
A style to print.
Options:
* HEADER
* INFO
* SUCCESS
* WARNING
* FAIL
* ENDC (end color)
* BOLD
* UNDERLINE
:type style: ``str``
>>> cprint("Success", style='SUCCESS')
"""
color = getattr(zcolors, style)
print('{}{}{}'.format(color, string, zcolors.ENDC))
|
mit
| 841,026,930,794,236,800
| 23.626582
| 104
| 0.577101
| false
| 3.785019
| true
| false
| false
|
Panda3D-google-code-repositories/naith
|
game/plugins/dirlight/dirlight.py
|
1
|
2667
|
# -*- coding: utf-8 -*-
# Copyright Tom SF Haines, Aaron Snoswell
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from panda3d.core import NodePath, VBase4, BitMask32
from panda3d.core import DirectionalLight as PDirectionalLight
class DirLight:
"""Creates a simple directional light"""
def __init__(self,manager,xml):
self.light = PDirectionalLight('dlight')
self.lightNode = NodePath(self.light)
self.lightNode.setCompass()
if hasattr(self.lightNode.node(), "setCameraMask"):
self.lightNode.node().setCameraMask(BitMask32.bit(3))
self.reload(manager,xml)
def reload(self,manager,xml):
color = xml.find('color')
if color!=None:
self.light.setColor(VBase4(float(color.get('r')), float(color.get('g')), float(color.get('b')), 1.0))
pos = xml.find('pos')
if pos!=None:
self.lightNode.setPos(float(pos.get('x')), float(pos.get('y')), float(pos.get('z')))
else:
self.lightNode.setPos(0, 0, 0)
lookAt = xml.find('lookAt')
if lookAt!=None:
self.lightNode.lookAt(float(lookAt.get('x')), float(lookAt.get('y')), float(lookAt.get('z')))
lens = xml.find('lens')
if lens!=None and hasattr(self.lightNode.node(), 'getLens'):
if bool(int(lens.get('auto'))):
self.lightNode.reparentTo(base.camera)
else:
self.lightNode.reparentTo(render)
lobj = self.lightNode.node().getLens()
lobj.setNearFar(float(lens.get('near', 1.0)), float(lens.get('far', 100000.0)))
lobj.setFilmSize(float(lens.get('width', 1.0)), float(lens.get('height', 1.0)))
lobj.setFilmOffset(float(lens.get('x', 0.0)), float(lens.get('y', 0.0)))
if hasattr(self.lightNode.node(), 'setShadowCaster'):
shadows = xml.find('shadows')
if shadows!=None:
self.lightNode.node().setShadowCaster(True, int(shadows.get('width', 512)), int(shadows.get('height', 512)), int(shadows.get('sort', -10)))
#self.lightNode.node().setPushBias(float(shadows.get('bias', 0.5)))
else:
self.lightNode.node().setShadowCaster(False)
def start(self):
render.setLight(self.lightNode)
def stop(self):
render.clearLight(self.lightNode)
|
apache-2.0
| -1,823,117,428,801,273,600
| 37.1
| 147
| 0.673416
| false
| 3.194012
| false
| false
| false
|
johjeff/Python-Wesleyen
|
Week2/ProblemSet2.py
|
1
|
11302
|
# -ProblemSet2.py *- coding: utf-8 -*-
"""
Each problem will be a function to write.
Remember that you can execute just the code between the #%% signs by clicking
somewhere in that space and the using Ctrl-Enter (Cmd-Enter on Mac). An
alternative is to use the second toolbar green triangle or Menu>Run>Run cell.
On loops especially, you can make an error that causes the program to run
forever. If you don't get immediate response, then this is probably happening.
In that case, try Ctrl-C. If that doesn't stop it click your IPython console
away and open a new one. Look over you code and see why the termination
condition can't be met and fix it. Then run again.
"""
"""
Problem 2_1:
Write a function 'problem2_1()' that sets a variable lis = list(range(20,30)) and
does all of the following, each on a separate line:
(a) print the element of lis with the index 3
(b) print lis itself
(c) write a 'for' loop that prints out every element of lis. Recall that
len() will give you the length of such a data collection if you need that.
Use end=" " to put one space between the elements of the list lis. Allow
the extra space at the end of the list to stand, don't make a special case
of it.
"""
#%%
def problem2_1():
lis = list(range(20,30))
print(lis[3])
print(lis)
for item in range(0,len(lis)):
print(lis[item],end=" ")
#%%
"""
Test run:
problem2_1()
23
[20, 21, 22, 23, 24, 25, 26, 27, 28, 29]
20 21 22 23 24 25 26 27 28 29
"""
"""
Problem 2_2:
Write a function 'problem2_2()' that takes a list and does the following to it.
Actually, I've started the function for you below. Your function should do all
of the following, each on a separate line. Recall that lists start numbering
with 0.
0) print the whole list (this doesn't require a while or for loop)
1) print the item with index 0
2) print the last item in the list
3) print the items with indexes 3 through 5 but not including 5
4) print the items up to the one with index 3 but not including item 3
5) print the items starting at index 3 and going through the end.
6) print the length of the list ( use len() )
7) Use the append() method of a list to append the letter "z" onto a list.
Print the list with z appended.
Make sure that your function also works with blist below. For this to work,
you cannot use alist as a variable inside your function.
"""
#%%
alist = ["a","e","i","o","u","y"]
blist = ["alpha", "beta", "gamma", "delta", "epsilon", "eta", "theta"]
def problem2_2(my_list):
print(my_list)
print(my_list[0])
lislen = int(len(my_list))-1
print(my_list[lislen])
print(my_list[3:5:1])
print(my_list[:3])
print(my_list[3:])
print(len(my_list))
my_list.append("z")
print(my_list)
#%%
"""
Test run, two of them. The same function should work with either list. The
grader function will use different lists.
problem2_2(alist)
['a', 'e', 'i', 'o', 'u', 'y']
a
y
['o', 'u']
['a', 'e', 'i']
['o', 'u', 'y']
6
['a', 'e', 'i', 'o', 'u', 'y', 'z']
problem2_2(blist)
['alpha', 'beta', 'gamma', 'delta', 'epsilon', 'eta', 'theta']
alpha
theta
['delta', 'epsilon']
['alpha', 'beta', 'gamma']
['delta', 'epsilon', 'eta', 'theta']
7
['alpha', 'beta', 'gamma', 'delta', 'epsilon', 'eta', 'theta', 'z']
"""
"""
Problem 2_3:
Write a function problem2_3() that should have a 'for' loop that steps
through the list below and prints the name of the state and the number of
letters in the state's name. You may use the len() function.
Here is the output from mine:
In [70]: problem2_3(newEngland)
Maine has 5 letters.
New Hampshire has 13 letters.
Vermont has 7 letters.
Rhode Island has 12 letters.
Massachusetts has 13 letters.
Connecticut has 11 letters.
The function is started for you. The grader will not use the list newEngland
so don't use the variable newEngland inside your function.
"""
"""
newEngland = ["Maine","New Hampshire","Vermont", "Rhode Island",
"Massachusetts","Connecticut"]
"""
#%%
def problem2_3(st):
ind = 0
for state in st:
print(st[ind],"has",len(st[ind]),"letters.")
ind = ind + 1
"""
Problem 2_4:
random.random() generates pseudo-random real numbers between 0 and 1. But what
if you needed other random reals? Write a program to use only random.random()
to generate a list of random reals between 30 and 35. This is a simple matter
of multiplication and addition. By multiplying you can spread the random numbers
out to cover the range 0 to 5. By adding you can shift these numbers up to the
required range from 30 to 35. Set the seed in this function to 70 so that
everyone generates the same random numbers and will agree with the grader's
list of random numbers. Print out the list (in list form).
"""
#%%
import random
def problem2_4():
""" Make a list of 10 random reals between 30 and 35 """
random.seed(70)
num1 = []
#num1 = [random.random() * 5 + 30 for _ in range(10)] # same as loop below
for num in range(0,10):
num1.append(random.random() * 5 + 30)
print(num1)
#%%
"""
COMMENT: Note that this uses a pseudorandom number generator. That means
that the list will be different for each person. We issue the command
random.seed(70) inside the function problem2_4() to insure that we generate the
same numbers that the grader expects. If you do this problem correctly, you
should get the list of random numbers below.
Test run:
problem2_4()
[34.54884618961936, 31.470395203793395, 32.297169396656095, 30.681793552717807,
34.97530360173135, 30.773219981037737, 33.36969776732032, 32.990127772708405,
33.57311858494461, 32.052629620057274]
""""""
Problem 2_5:
Let's do a small simulation. Suppose that you rolled a die repeatedly. Each
time that you roll the die you get a integer from 1 to 6, the number of pips
on the die. Use random.randint(a,b) to simulate rolling a die 10 times and
printout the 10 outcomes. The function random.randint(a,b) will
generate an integer (whole number) between the integers a and b inclusive.
Remember each outcome is 1, 2, 3, 4, 5, or 6, so make sure that you can get
all of these outcomes and none other. Print the list, one item to a line so that
there are 10 lines as in the example run. Make sure that it has 10 items
and they are all in the range 1 through 6. Here is one of my runs. In
the problem below I ask you to set the seed to 171 for the benefit of the
auto-grader. In this example, that wasn't done and so your numbers will be
different. Note that the seed must be set BEFORE randint is used.
problem2_5()
4
5
3
1
4
3
5
1
6
3
"""
"""
Problem 2_5:
"""
import random
def problem2_5():
""" Simulates rolling a die 10 times."""
# Setting the seed makes the random numbers always the same
# This is to make the auto-grader's job easier.
random.seed(171) # don't remove when you submit for grading
#die = [random.randint(1,6) for _ in range(10)]
pip = []
for val in range(10):
#die = random.randint(1,6)
pip.append(random.randint(1,6))
print(pip[val])
#%%
"""
Problem 2_6:
Let's continue with our simulation of dice by rolling two of them. This time
each die can come up with a number from 1 to 6, but you have two of them. The
result or outcome is taken to be the sum of the pips on the two dice. Write a
program that will roll 2 dice and produce the outcome. This time let's roll
the two dice 100 times. Print the outcomes one outcome per line.
"""
#%%
import random
def problem2_6():
""" Simulates rolling 2 dice 100 times """
# Setting the seed makes the random numbers always the same
# This is to make the auto-grader's job easier.
random.seed(431) # don't remove when you submit for grading
for val in range(100):
tot = random.randint(1,6) + random.randint(1,6)
print(tot)
#%%
"""
Test run with seed 82, but make sure that you submit with the seed 431:
problem2_6()
6
8
4
9
3
8
6
5
7
5
7
6
5
6
3
9
4
8
11
'
'
'
9
6
7
10
4
"""
"""
Problem 2_7:
Heron's formula for computing the area of a triangle with sides a, b, and c is
as follows. Let s = .5(a + b + c) --- that is, 1/2 of the perimeter of the
triangle. Then the area is the square root of s(s-a)(s-b)(s-c). You can compute
the square root of x by x**.5 (raise x to the 1/2 power). Use an input
statement to get the length of the sides. Don't forget to convert this input
to a real number using float(). Adjust your output to be just like what you
see below. Here is a run of my program:
problem2_7()
Enter length of side one: 9
Enter length of side two: 12
Enter length of side three: 15
Area of a triangle with sides 9.0 12.0 15.0 is 54.0
"""
#%%
def problem2_7():
""" computes area of triangle using Heron's formula. """
side1 = float(input("Enter length of side one: "))
if side1 <= 0.0:
print("Value must be 1 or more")
exit()
side2 = float(input("Enter length of side two: "))
if side2 <= 0.0:
print("Value must be 1 or more")
exit()
side3 = float(input("Enter length of side three: "))
if side3 <= 0.0:
print("Value must be 1 or more")
exit()
s = float((side1 + side2 + side3)/2)
x = s * (s - side1) * (s - side2) * (s - side3)
area = x**.5
print("Area of a triangle with sides",side1,side2,side3,"is",area)
#%%
"""
Problem 2_8:
The following list gives the hourly temperature during a 24 hour day. Please
write a function, that will take such a list and compute 3 things: average
temperature, high (maximum temperature), and low (minimum temperature) for the
day. I will test with a different set of temperatures, so don't pick out
the low or the high and code it into your program. This should work for
other hourly_temp lists as well. This can be done by looping (interating)
through the list. I suggest you not write it all at once. You might write
a function that computes just one of these, say average, then improve it
to handle another, say maximum, etc. Note that there are Python functions
called max() and min() that could also be used to do part of the jobs.
"""
#%%
hourly_temp = [40.0, 39.0, 37.0, 34.0, 33.0, 34.0, 36.0, 37.0, 38.0, 39.0, \
40.0, 41.0, 44.0, 45.0, 47.0, 48.0, 45.0, 42.0, 39.0, 37.0, \
36.0, 35.0, 33.0, 32.0]
#%%
def problem2_8(temp_list):
average = sum(temp_list)/len(temp_list)
print("Average:",average)
high = max(temp_list)
print("High:",high)
low = min(temp_list)
print("Low:",low)
#%%
"""
Sample run using the list hourly_temp. Note that the grader will use a
different hourly list. Be sure that you function works on this list and test
it on at least one other list of your own construction.
Note also, that the list the grader uses may not have the same number of items
as this one.
problem2_8(hourly_temp)
Average: 38.791666666666664
High: 48.0
Low: 32.0
"""
|
bsd-2-clause
| -8,545,179,733,091,308,000
| 28.711957
| 81
| 0.656432
| false
| 3.133352
| true
| false
| false
|
kernsuite-debian/lofar
|
CEP/Pipeline/recipes/sip/nodes/setupsourcedb.py
|
1
|
3286
|
# LOFAR IMAGING PIPELINE
#
# setupsourcedb nodes recipe
# Marcel Loose, 2012
# loose@astron.nl
# ------------------------------------------------------------------------------
from subprocess import CalledProcessError
import errno
import os
import tempfile
import shutil
import sys
from lofarpipe.support.lofarnode import LOFARnodeTCP
from lofarpipe.support.utilities import log_time
from lofarpipe.support.pipelinelogging import CatchLog4CPlus
from lofarpipe.support.utilities import catch_segfaults
class setupsourcedb(LOFARnodeTCP):
"""
Create the sourcedb at the supplied location
1. Create output directory if it does not yet exist.
2. Create sourcedb
3. validate performance, cleanup
"""
def run(self, executable, catalogue, skydb, dbtype):
"""
Contains all functionality
"""
with log_time(self.logger):
# ****************************************************************
# 1. Create output directory if it does not yet exist.
skydb_dir = os.path.dirname(skydb)
try:
os.makedirs(skydb_dir)
self.logger.debug("Created output directory %s" % skydb_dir)
except FileExistsError:
pass
# ****************************************************************
# 2 Remove any old sky database
# Create the sourcedb
shutil.rmtree(skydb, ignore_errors=True)
self.logger.info("Creating skymodel: %s" % (skydb))
scratch_dir = tempfile.mkdtemp(suffix=".%s" % (os.path.basename(__file__),))
try:
cmd = [executable,
"in=%s" % catalogue,
"out=%s" % skydb,
"outtype=%s" % dbtype,
"format=<",
"append=false"
]
with CatchLog4CPlus(
scratch_dir,
self.logger.name + "." + os.path.basename(skydb),
os.path.basename(executable)
) as logger:
catch_segfaults(cmd, scratch_dir, None, logger)
# *****************************************************************
# 3. Validate performance and cleanup temp files
except CalledProcessError as err:
# For CalledProcessError isn't properly propagated by IPython
# Temporary workaround...
self.logger.error(str(err))
return 1
finally:
shutil.rmtree(scratch_dir)
return 0
if __name__ == "__main__":
# If invoked directly, parse command line arguments for logger information
# and pass the rest to the run() method defined above
# --------------------------------------------------------------------------
jobid, jobhost, jobport = sys.argv[1:4]
sys.exit(setupsourcedb(jobid, jobhost, jobport).run_with_stored_arguments())
|
gpl-3.0
| -1,931,678,355,029,964,500
| 38.590361
| 88
| 0.460743
| false
| 5.070988
| false
| false
| false
|
Princessgladys/googleresourcefinder
|
lib/feedlib/geo.py
|
1
|
2777
|
# Copyright 2009-2010 by Ka-Ping Yee
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Geographical functions. All measurements are in metres."""
from math import asin, cos, pi, sin, sqrt
EARTH_RADIUS = 6371009
def hav(theta):
"""Computes the haversine of an angle given in radians."""
return sin(theta/2)**2
def central_angle((phi_s, lam_s), (phi_f, lam_f)):
"""Returns the central angle between two points on a sphere, whose
locations are given as (latitude, longitude) pairs in radians."""
d_phi = phi_s - phi_f
d_lam = lam_s - lam_f
return 2*asin(sqrt(hav(d_phi) + cos(phi_s)*cos(phi_f)*hav(d_lam)))
def distance(start, finish):
"""Approximates the distance in metres between two points on the Earth,
which are given as {'lat':y, 'lon':x} objects in degrees."""
start_rad = (start['lat']*pi/180, start['lon']*pi/180)
finish_rad = (finish['lat']*pi/180, finish['lon']*pi/180)
return central_angle(start_rad, finish_rad)*EARTH_RADIUS
def point_inside_polygon(point, poly):
"""Returns true if the given point is inside the given polygon.
point is given as an {'lat':y, 'lon':x} object in degrees
poly is given as a list of (longitude, latitude) tuples. The last vertex
is assumed to be the same as the first vertex.
TODO(shakusa): poly should probably be expressed in a less-confusing way"""
lat = point['lat']
lon = point['lon']
n = len(poly)
inside = False
# Count the parity of intersections of a horizontal eastward ray starting
# at (lon, lat). If even, point is outside, odd, point is inside
lon1, lat1 = poly[0]
for i in range(n + 1):
lon2, lat2 = poly[i % n]
# if our ray falls within the vertical coords of the edge
if min(lat1, lat2) < lat <= max(lat1, lat2):
# if our (eastward) ray starts before the edge and the edge is not
# horizontal
if lon <= max(lon1, lon2) and lat1 != lat2:
lon_inters = lon1 + (lat - lat1) * (lon2 - lon1) / (lat2 - lat1)
# if the intersection is beyond the start of the ray,
# we've crossed it
if lon <= lon_inters:
inside = not inside
lon1, lat1 = lon2, lat2
return inside
|
apache-2.0
| 188,185,627,319,149,760
| 41.075758
| 80
| 0.649982
| false
| 3.551151
| false
| false
| false
|
voxpupuli/puppetboard
|
puppetboard/utils.py
|
1
|
3597
|
from __future__ import absolute_import
from __future__ import unicode_literals
import json
import logging
import os.path
from distutils.util import strtobool
from flask import abort, request, url_for
from jinja2.utils import contextfunction
from pypuppetdb.errors import EmptyResponseError
from requests.exceptions import ConnectionError, HTTPError
log = logging.getLogger(__name__)
@contextfunction
def url_static_offline(context, value):
request_parts = os.path.split(os.path.dirname(context.name))
static_path = '/'.join(request_parts[1:])
return url_for('static', filename="%s/%s" % (static_path, value))
def url_for_field(field, value):
args = request.view_args.copy()
args.update(request.args.copy())
args[field] = value
return url_for(request.endpoint, **args)
def jsonprint(value):
return json.dumps(value, indent=2, separators=(',', ': '))
def get_db_version(puppetdb):
'''
Get the version of puppetdb. Version form 3.2 query
interface is slightly different on mbeans
'''
ver = ()
try:
version = puppetdb.current_version()
(major, minor, build) = [int(x) for x in version.split('.')]
ver = (major, minor, build)
log.info("PuppetDB Version %d.%d.%d" % (major, minor, build))
except ValueError as e:
log.error("Unable to determine version from string: '%s'" % version)
ver = (4, 2, 0)
except HTTPError as e:
log.error(str(e))
except ConnectionError as e:
log.error(str(e))
except EmptyResponseError as e:
log.error(str(e))
return ver
def formatvalue(value):
if isinstance(value, str):
return value
elif isinstance(value, list):
return ", ".join(map(formatvalue, value))
elif isinstance(value, dict):
ret = ""
for k in value:
ret += k + " => " + formatvalue(value[k]) + ",<br/>"
return ret
else:
return str(value)
def prettyprint(value):
html = '<table class="ui basic fixed sortable table"><thead><tr>'
# Get keys
for k in value[0]:
html += "<th>" + k + "</th>"
html += "</tr></thead><tbody>"
for e in value:
html += "<tr>"
for k in e:
html += "<td>" + formatvalue(e[k]) + "</td>"
html += "</tr>"
html += "</tbody></table>"
return (html)
def get_or_abort(func, *args, **kwargs):
"""Execute the function with its arguments and handle the possible
errors that might occur.
In this case, if we get an exception we simply abort the request.
"""
try:
return func(*args, **kwargs)
except HTTPError as e:
log.error(str(e))
abort(e.response.status_code)
except ConnectionError as e:
log.error(str(e))
abort(500)
except EmptyResponseError as e:
log.error(str(e))
abort(204)
except Exception as e:
log.error(str(e))
abort(500)
def yield_or_stop(generator):
"""Similar in intent to get_or_abort this helper will iterate over our
generators and handle certain errors.
Since this is also used in streaming responses where we can't just abort
a request we raise StopIteration.
"""
while True:
try:
yield next(generator)
except (EmptyResponseError, ConnectionError, HTTPError, StopIteration):
return
def is_bool(b):
try:
bool(strtobool(b))
return True
except ValueError:
return False
except TypeError:
return False
except AttributeError:
return False
|
apache-2.0
| -6,206,864,817,678,152,000
| 25.065217
| 79
| 0.615791
| false
| 3.863588
| false
| false
| false
|
CrazyBBer/Python-Learn-Sample
|
Modules/modules_BuiltIn.py
|
1
|
1445
|
#!/usr/bin/env python3
# -*- coding utf-8 -*-
__Author__ ='eamon'
'Modules Built-In'
from datetime import datetime
now = datetime.now()
print(now)
print(type(now))
dt=datetime(2015,10,5,20,1,20)
print(dt)
print(dt.timestamp())
t=1444046480.0
print(datetime.fromtimestamp(t))
print(datetime.utcfromtimestamp(t))
cday=datetime.strptime('2015-10-05 20:07:59','%Y-%m-%d %H:%M:%S')
print(cday)
now=datetime.now()
print(now.strftime('%a,%b,%d %H:%M'))
from datetime import timedelta
now = datetime.now()
print(now)
# datetime.datetime(2015,10,05,20,12,58,10054)
print(now+timedelta(hours=10))
from datetime import timezone
tz_utc_8 = timezone(timedelta(hours=8))
now= datetime.now()
print(now)
dt=now.replace(tzinfo=tz_utc_8)
print(dt)
print('------------------------')
utc_dt=datetime.utcnow().replace(tzinfo=timezone.utc)
print(utc_dt)
bjtm=utc_dt.astimezone(timezone(timedelta(hours=8)))
print(bjtm)
tokyo_tm=bjtm.astimezone(timezone(timedelta(hours=9)))
print('------------------------')
print(tokyo_tm)
import re
def to_timestamp(dt_str,tz_str):
tz_fmt_str='^UTC([+-]\d{1,2})\:\d{2}$'
tm_fmt=re.match(tz_fmt_str,tz_str)
if tm_fmt:
tz_hours=int(tm_fmt.group(1))
cur_datetime=datetime.strptime(dt_str,'%Y-%m-%d %H:%M:%S')
return cur_datetime.replace(tzinfo=timezone(timedelta(hours=tz_hours))).timestamp()
t1 = to_timestamp('2015-6-1 08:10:30', 'UTC+7:00')
assert t1 == 1433121030.0, t1
print('Pass')
|
mit
| -5,883,119,671,976,176,000
| 15.802326
| 85
| 0.666436
| false
| 2.5
| false
| false
| false
|
nansencenter/sea_ice_drift
|
sea_ice_drift/seaicedrift.py
|
1
|
3709
|
# Name: seaicedrift.py
# Purpose: Container of SeaIceDrift class
# Authors: Anton Korosov, Stefan Muckenhuber
# Created: 21.09.2016
# Copyright: (c) NERSC 2016
# Licence:
# This file is part of SeaIceDrift.
# SeaIceDrift is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
# http://www.gnu.org/licenses/gpl-3.0.html
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
from __future__ import absolute_import
import numpy as np
from sea_ice_drift.lib import get_n, get_drift_vectors
from sea_ice_drift.ftlib import feature_tracking
from sea_ice_drift.pmlib import pattern_matching
class SeaIceDrift(object):
''' Retrieve Sea Ice Drift using Feature Tracking and Pattern Matching'''
def __init__(self, filename1, filename2, **kwargs):
''' Initialize from two file names:
Open files with Nansat
Read data from sigma0_HV or other band and convert to UInt8
Parameters
----------
filename1 : str, file name of the first Sentinel-1 image
filename2 : str, file name of the second Sentinel-1 image
'''
self.filename1 = filename1
self.filename2 = filename2
# get Nansat
self.n1 = get_n(self.filename1, **kwargs)
self.n2 = get_n(self.filename2, **kwargs)
def get_drift_FT(self, **kwargs):
''' Get sea ice drift using Feature Tracking
Parameters
----------
**kwargs : parameters for
feature_tracking
get_drift_vectors
Returns
-------
u : 1D vector - eastward ice drift speed
v : 1D vector - northward ice drift speed
lon1 : 1D vector - longitudes of source points
lat1 : 1D vector - latitudes of source points
lon2 : 1D vector - longitudes of destination points
lat2 : 1D vector - latitudes of destination points
'''
x1, y1, x2, y2 = feature_tracking(self.n1, self.n2, **kwargs)
return get_drift_vectors(self.n1, x1, y1,
self.n2, x2, y2, **kwargs)
def get_drift_PM(self, lons, lats, lon1, lat1, lon2, lat2, **kwargs):
''' Get sea ice drift using Pattern Matching
Parameters
----------
lons : 1D vector, longitude of result vectors on image 1
lats : 1D vector, latitude of result vectors on image 1
lon1 : 1D vector, longitude of keypoints on image1
lat1 : 1D vector, latitude of keypoints on image1
lon2 : 1D vector, longitude of keypoints on image2
lat2 : 1D vector, latitude of keypoints on image2
**kwargs : parameters for
feature_tracking
get_drift_vectors
Returns
-------
u : 1D vector, eastward ice drift speed, m/s
v : 1D vector, eastward ice drift speed, m/s
a : 1D vector, angle that gives the highes MCC
r : 1D vector, MCC
h : 1D vector, Hessian of CC matrix and MCC point
lon2_dst : 1D vector, longitude of results on image 2
lat2_dst : 1D vector, latitude of results on image 2
'''
x1, y1 = self.n1.transform_points(lon1, lat1, 1)
x2, y2 = self.n2.transform_points(lon2, lat2, 1)
return pattern_matching(lons, lats, self.n1, x1, y1,
self.n2, x2, y2, **kwargs)
|
gpl-3.0
| -6,663,547,883,015,826,000
| 41.147727
| 77
| 0.605284
| false
| 3.727638
| false
| false
| false
|
revcoin/revcoin
|
contrib/pyminer/pyminer.py
|
1
|
6434
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 7572
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
mit
| -6,053,348,634,752,949,000
| 24.531746
| 84
| 0.648896
| false
| 2.83062
| false
| false
| false
|
DamnWidget/txorm
|
txorm/_compat/python2_.py
|
1
|
1170
|
# Copyright (c) 2014 Oscar Campos <oscar.campos@member.fsf.org>
# See LICENSE for details
import re
import sys
import urlparse
try:
import cPickle as pickle
except ImportError:
import pickle
try:
import cStringIO as StringIO
except ImportError:
from StringIO import StringIO
_queryprog = None
def __splitquery(url):
"""splitquery('/path?query') --> '/path', 'query'."""
global _queryprog
if _queryprog is None:
_queryprog = re.compile('^(.*)\?([^?]*)$')
match = _queryprog.match(url)
if match:
return match.group(1, 2)
return url, None
urlparse.splitquery = __splitquery
# urlparse prior to Python 2.7.6 have a bug in parsing the port, fix it
if sys.version_info < (2, 7, 6):
def port(self):
netloc = self.netloc.split('@')[-1].split(']')[-1]
if ':' in netloc:
port = netloc.split(':')[1]
if port:
port = int(port, 10)
# verify legal port
if (0 <= port <= 65535):
return port
return None
urlparse.ResultMixin.port = property(port)
__all__ = ['pickle', 'StringIO', 'urlparse']
|
lgpl-3.0
| -8,705,973,143,589,217,000
| 21.5
| 71
| 0.582051
| false
| 3.726115
| false
| false
| false
|
imclab/confer
|
scripts/chi2013/prepare_paper_paper_graph_data.py
|
1
|
4179
|
#!/usr/bin/python
import sys, os, operator, numpy, MySQLdb, json
import matplotlib.pyplot as plt
from db import entity
from db import session
from collections import defaultdict
'''
@author: anant bhardwaj
@date: Feb 12, 2013
script for preparing data in lenskit format
'''
entities = entity.Entity().entities
sessions = session.Session().sessions
connection = MySQLdb.connect(host="mysql.csail.mit.edu",
user="cobi",
passwd="su4Biha",
db="cobi")
nodes = {}
edges = defaultdict(dict)
likes = defaultdict(set)
scon = {}
papers_count = [];
def load_data():
for p in entities:
nodes[p] = {'title': entities[p]['title'], 'session': entities[p]['session'], 'award': entities[p]['award'], 'hm': entities[p]['hm']}
cursor = connection.cursor()
cursor.execute("SELECT auth_no, likes, email1 FROM pcs_authors where likes!= 'NULL' and likes !='[]';")
data = cursor.fetchall()
for row in data:
papers = json.loads(row[1])
papers_count.append(len(papers))
for p in papers:
likes[p].add(row[0])
for p1 in entities:
for p2 in entities:
edges[p1][p2] = -1
if(p1 != p2):
common_likes = likes[p1].intersection(likes[p2])
edges[p1][p2] = len(common_likes)
for s in sessions:
num_edges = 0
papers = sessions[s]['submissions']
for p1 in papers:
for p2 in papers:
try:
if(p1 != p2 and edges[p1][p2] > 0):
num_edges += 1
except:
pass
if(len(sessions[s]['submissions']) > 0):
scon[s] = float(num_edges)/len(sessions[s]['submissions'])
def main():
load_data()
nodesArray = []
linksArray = []
print numpy.mean(scon.values()), numpy.std(scon.values()), numpy.median(scon.values()), numpy.min(scon.values()), numpy.max(scon.values())
'''
plt.hist([awards_count, non_awards_count], bins=100, histtype='bar', stacked=True, color=['yellow', 'green'],
label=['Award Papers', 'Non-Award Papers'])
plt.title('Number of People Starred vs. Number of Papers')
plt.xlabel('Number of People Starred')
plt.ylabel('Number of Papers')
plt.legend()
plt.show()
awards_count = []
non_awards_count = []
likes_count = [len(v) for k,v in likes.iteritems()]
for k,v in likes.iteritems():
if k in nodes and (nodes[k]['award'] or nodes[k]['hm']):
awards_count.append(len(v))
else:
non_awards_count.append(len(v))
#print numpy.mean(papers_count), numpy.std(papers_count), numpy.median(papers_count), numpy.min(papers_count), numpy.max(papers_count)
print numpy.mean(likes_count), numpy.std(likes_count), numpy.median(likes_count), numpy.min(likes_count), numpy.max(likes_count)
print numpy.mean(awards_count), numpy.std(awards_count), numpy.median(awards_count), numpy.min(awards_count), numpy.max(awards_count)
plt.hist([awards_count, non_awards_count], bins=100, histtype='bar', stacked=True, color=['yellow', 'green'],
label=['Award Papers', 'Non-Award Papers'])
plt.title('Number of People Starred vs. Number of Papers')
plt.xlabel('Number of People Starred')
plt.ylabel('Number of Papers')
plt.legend()
plt.show()
plt.hist(papers_count, bins=20, color="cyan")
plt.title('Number of Papers vs. Number of People')
plt.xlabel('Number of Likes')
plt.ylabel('Number of People')
plt.show()
'''
k = 0
for node in nodes:
nodes[node]['id']= k
nodesArray.append({'title' : nodes[node]['title'], 'session': nodes[node]['session'], 'weight': len(likes[node])})
k = k+1
edgesToRemove = set()
'''
for edge in edges:
links = edges[edge]
for l in links:
weight = edges[edge][l]
if(weight > 14):
edgesToRemove.add(nodes[edge]['id'])
edgesToRemove.add(nodes[l]['id'])
'''
for edge in edges:
links = edges[edge]
for l in links:
weight = edges[edge][l]
if(weight > 0 and (nodes[edge]['id'] not in edgesToRemove) and (nodes[l]['id'] not in edgesToRemove)):
linksArray.append({'source' : nodes[edge]['id'], 'target' : nodes[l]['id'], 'weight': weight})
p = open('/Volumes/Workspace/www/data.json','w')
p.write(json.dumps({"nodes": nodesArray, "links": linksArray}))
if __name__ == '__main__':
main()
|
mit
| -679,769,721,634,642,000
| 26.675497
| 139
| 0.639627
| false
| 2.910167
| false
| false
| false
|
leiferikb/bitpop
|
src/tools/perf/benchmarks/sunspider.py
|
1
|
2047
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import json
import os
from metrics import power
from telemetry import test
from telemetry.page import page_measurement
from telemetry.page import page_set
_URL = 'http://www.webkit.org/perf/sunspider-1.0.2/sunspider-1.0.2/driver.html'
class _SunspiderMeasurement(page_measurement.PageMeasurement):
def __init__(self):
super(_SunspiderMeasurement, self).__init__()
self._power_metric = power.PowerMetric()
def CustomizeBrowserOptions(self, options):
power.PowerMetric.CustomizeBrowserOptions(options)
def DidNavigateToPage(self, page, tab):
self._power_metric.Start(page, tab)
def MeasurePage(self, page, tab, results):
tab.WaitForJavaScriptExpression(
'window.location.pathname.indexOf("results.html") >= 0'
'&& typeof(output) != "undefined"', 300)
self._power_metric.Stop(page, tab)
self._power_metric.AddResults(tab, results)
js_get_results = 'JSON.stringify(output);'
js_results = json.loads(tab.EvaluateJavaScript(js_get_results))
r = collections.defaultdict(list)
totals = []
# js_results is: [{'foo': v1, 'bar': v2},
# {'foo': v3, 'bar': v4},
# ...]
for result in js_results:
total = 0
for key, value in result.iteritems():
r[key].append(value)
total += value
totals.append(total)
for key, values in r.iteritems():
results.Add(key, 'ms', values, data_type='unimportant')
results.Add('Total', 'ms', totals)
class Sunspider(test.Test):
"""Apple's SunSpider JavaScript benchmark."""
test = _SunspiderMeasurement
def CreatePageSet(self, options):
ps = page_set.PageSet(
archive_data_file='../page_sets/data/sunspider.json',
make_javascript_deterministic=False,
file_path=os.path.abspath(__file__))
ps.AddPageWithDefaultRunNavigate(_URL)
return ps
|
gpl-3.0
| -1,330,169,115,319,398,100
| 30.984375
| 79
| 0.674646
| false
| 3.623009
| false
| false
| false
|
arnaudcoquelet/myAvstServer
|
node_modules/soap/node_modules/node-expat/build/c4che/Release.cache.py
|
1
|
1498
|
AR = '/usr/bin/ar'
ARFLAGS = 'rcs'
CCFLAGS = ['-g']
CCFLAGS_MACBUNDLE = ['-fPIC']
CCFLAGS_NODE = ['-D_LARGEFILE_SOURCE', '-D_FILE_OFFSET_BITS=64']
CC_VERSION = ('4', '4', '5')
COMPILER_CXX = 'g++'
CPP = '/usr/bin/cpp'
CPPFLAGS_NODE = ['-D_GNU_SOURCE']
CPPPATH_EXPAT.H = ['/usr/include', '/usr/local/include']
CPPPATH_NODE = '/usr/local/include/node'
CPPPATH_ST = '-I%s'
CXX = ['/usr/bin/g++']
CXXDEFINES_ST = '-D%s'
CXXFLAGS = ['-g']
CXXFLAGS_DEBUG = ['-g']
CXXFLAGS_NODE = ['-D_LARGEFILE_SOURCE', '-D_FILE_OFFSET_BITS=64']
CXXFLAGS_RELEASE = ['-O2']
CXXLNK_SRC_F = ''
CXXLNK_TGT_F = ['-o', '']
CXX_NAME = 'gcc'
CXX_SRC_F = ''
CXX_TGT_F = ['-c', '-o', '']
DEST_BINFMT = 'elf'
DEST_CPU = 'x86'
DEST_OS = 'linux'
FULLSTATIC_MARKER = '-static'
HAVE_EXPAT_H = 1
LIBDIR = '/root/.node_libraries'
LIBPATH_NODE = '/usr/local/lib'
LIBPATH_ST = '-L%s'
LIB_EXPAT = ['expat']
LIB_ST = '-l%s'
LINKFLAGS_MACBUNDLE = ['-bundle', '-undefined', 'dynamic_lookup']
LINK_CXX = ['/usr/bin/g++']
NODE_PATH = '/root/.node_libraries'
PREFIX = '/usr/local'
PREFIX_NODE = '/usr/local'
RANLIB = '/usr/bin/ranlib'
RPATH_ST = '-Wl,-rpath,%s'
SHLIB_MARKER = '-Wl,-Bdynamic'
SONAME_ST = '-Wl,-h,%s'
STATICLIBPATH_ST = '-L%s'
STATICLIB_MARKER = '-Wl,-Bstatic'
STATICLIB_ST = '-l%s'
defines = {'HAVE_EXPAT_H': 1}
macbundle_PATTERN = '%s.bundle'
program_PATTERN = '%s'
shlib_CXXFLAGS = ['-fPIC', '-DPIC']
shlib_LINKFLAGS = ['-shared']
shlib_PATTERN = 'lib%s.so'
staticlib_LINKFLAGS = ['-Wl,-Bstatic']
staticlib_PATTERN = 'lib%s.a'
|
gpl-2.0
| -36,677,115,917,842,780
| 27.264151
| 65
| 0.619493
| false
| 2.439739
| false
| true
| false
|
eile/ITK
|
Modules/ThirdParty/pygccxml/src/pygccxml/declarations/mdecl_wrapper.py
|
1
|
3186
|
# Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
"""
defines class L{mdecl_wrapper_t} that allows to work on set of declarations,
as it was one declaration.
The L{class<mdecl_wrapper_t>} allows user to not write "for" loops within the code.
"""
import os
class call_redirector_t( object ):
"""Internal class used to call some function of objects"""
def __init__( self, name, decls ):
"""creates call_redirector_t instance.
@param name: name of method, to be called on every object in C{decls} list
@param decls: list of objects
"""
object.__init__( self )
self.name = name
self.decls = decls
def __call__( self, *arguments, **keywords ):
"""calls method C{self.name} on every object within C{self.decls} list"""
for d in self.decls:
callable_ = getattr(d, self.name)
callable_( *arguments, **keywords )
class mdecl_wrapper_t( object ):
"""Multiple declarations wrapper.
The main purpose of this class is to allow an user to work on many
declarations, as they were only one single declaration.
Example:
mb = module_builder_t( ... )
#lets say we want to exclude all member functions, that returns reference to int:
mb.member_functions( return_type='int &' ).exclude()
"exclude" function will be called on every function that match the criteria.
"""
def __init__( self, decls ):
"""@param decls: list of declarations to operate on.
@type decls: list of L{declaration wrappers<decl_wrapper_t>}
"""
object.__init__( self )
self.__dict__['declarations'] = decls
def __nonzero__( self ):
return bool( self.declarations )
def __len__( self ):
"""returns the number of declarations"""
return len( self.declarations )
def __getitem__( self, index ):
"""provides access to declaration"""
return self.declarations[index]
def __iter__( self ):
return iter(self.declarations)
def __ensure_attribute( self, name ):
invalid_decls = filter( lambda d: not hasattr( d, name ), self.declarations )
sep = os.linesep + ' '
if invalid_decls:
raise RuntimeError( "Next declarations don't have '%s' attribute: %s"
% ( name, sep.join( map( str, invalid_decls ) ) ) )
def __setattr__( self, name, value ):
"""Updates the value of attribute on all declarations.
@param name: name of attribute
@param value: new value of attribute
"""
self.__ensure_attribute( name )
for d in self.declarations:
setattr( d, name, value )
def __getattr__( self, name ):
"""@param name: name of method
"""
return call_redirector_t( name, self.declarations )
def __contains__( self, item ):
return item in self.declarations
def to_list(self):
l = []
for d in self.declarations:
l.append( d )
return l
|
apache-2.0
| 5,379,909,319,253,769,000
| 32.197917
| 85
| 0.603264
| false
| 4.121604
| false
| false
| false
|
Azure/azure-sdk-for-python
|
sdk/maintenance/azure-mgmt-maintenance/azure/mgmt/maintenance/aio/operations/_apply_updates_operations.py
|
1
|
19789
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ApplyUpdatesOperations:
"""ApplyUpdatesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.maintenance.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get_parent(
self,
resource_group_name: str,
resource_parent_type: str,
resource_parent_name: str,
provider_name: str,
resource_type: str,
resource_name: str,
apply_update_name: str,
**kwargs
) -> "_models.ApplyUpdate":
"""Track Updates to resource with parent.
Track maintenance updates to resource with parent.
:param resource_group_name: Resource group name.
:type resource_group_name: str
:param resource_parent_type: Resource parent type.
:type resource_parent_type: str
:param resource_parent_name: Resource parent identifier.
:type resource_parent_name: str
:param provider_name: Resource provider name.
:type provider_name: str
:param resource_type: Resource type.
:type resource_type: str
:param resource_name: Resource identifier.
:type resource_name: str
:param apply_update_name: applyUpdate Id.
:type apply_update_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplyUpdate, or the result of cls(response)
:rtype: ~azure.mgmt.maintenance.models.ApplyUpdate
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplyUpdate"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
# Construct URL
url = self.get_parent.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceParentType': self._serialize.url("resource_parent_type", resource_parent_type, 'str'),
'resourceParentName': self._serialize.url("resource_parent_name", resource_parent_name, 'str'),
'providerName': self._serialize.url("provider_name", provider_name, 'str'),
'resourceType': self._serialize.url("resource_type", resource_type, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'applyUpdateName': self._serialize.url("apply_update_name", apply_update_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.MaintenanceError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplyUpdate', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_parent.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{providerName}/{resourceParentType}/{resourceParentName}/{resourceType}/{resourceName}/providers/Microsoft.Maintenance/applyUpdates/{applyUpdateName}'} # type: ignore
async def get(
self,
resource_group_name: str,
provider_name: str,
resource_type: str,
resource_name: str,
apply_update_name: str,
**kwargs
) -> "_models.ApplyUpdate":
"""Track Updates to resource.
Track maintenance updates to resource.
:param resource_group_name: Resource group name.
:type resource_group_name: str
:param provider_name: Resource provider name.
:type provider_name: str
:param resource_type: Resource type.
:type resource_type: str
:param resource_name: Resource identifier.
:type resource_name: str
:param apply_update_name: applyUpdate Id.
:type apply_update_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplyUpdate, or the result of cls(response)
:rtype: ~azure.mgmt.maintenance.models.ApplyUpdate
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplyUpdate"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'providerName': self._serialize.url("provider_name", provider_name, 'str'),
'resourceType': self._serialize.url("resource_type", resource_type, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'applyUpdateName': self._serialize.url("apply_update_name", apply_update_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.MaintenanceError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplyUpdate', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{providerName}/{resourceType}/{resourceName}/providers/Microsoft.Maintenance/applyUpdates/{applyUpdateName}'} # type: ignore
async def create_or_update_parent(
self,
resource_group_name: str,
provider_name: str,
resource_parent_type: str,
resource_parent_name: str,
resource_type: str,
resource_name: str,
**kwargs
) -> "_models.ApplyUpdate":
"""Apply Updates to resource with parent.
Apply maintenance updates to resource with parent.
:param resource_group_name: Resource group name.
:type resource_group_name: str
:param provider_name: Resource provider name.
:type provider_name: str
:param resource_parent_type: Resource parent type.
:type resource_parent_type: str
:param resource_parent_name: Resource parent identifier.
:type resource_parent_name: str
:param resource_type: Resource type.
:type resource_type: str
:param resource_name: Resource identifier.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplyUpdate, or the result of cls(response)
:rtype: ~azure.mgmt.maintenance.models.ApplyUpdate
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplyUpdate"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
# Construct URL
url = self.create_or_update_parent.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'providerName': self._serialize.url("provider_name", provider_name, 'str'),
'resourceParentType': self._serialize.url("resource_parent_type", resource_parent_type, 'str'),
'resourceParentName': self._serialize.url("resource_parent_name", resource_parent_name, 'str'),
'resourceType': self._serialize.url("resource_type", resource_type, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.MaintenanceError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplyUpdate', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update_parent.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{providerName}/{resourceParentType}/{resourceParentName}/{resourceType}/{resourceName}/providers/Microsoft.Maintenance/applyUpdates/default'} # type: ignore
async def create_or_update(
self,
resource_group_name: str,
provider_name: str,
resource_type: str,
resource_name: str,
**kwargs
) -> "_models.ApplyUpdate":
"""Apply Updates to resource.
Apply maintenance updates to resource.
:param resource_group_name: Resource group name.
:type resource_group_name: str
:param provider_name: Resource provider name.
:type provider_name: str
:param resource_type: Resource type.
:type resource_type: str
:param resource_name: Resource identifier.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplyUpdate, or the result of cls(response)
:rtype: ~azure.mgmt.maintenance.models.ApplyUpdate
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplyUpdate"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'providerName': self._serialize.url("provider_name", provider_name, 'str'),
'resourceType': self._serialize.url("resource_type", resource_type, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.MaintenanceError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplyUpdate', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{providerName}/{resourceType}/{resourceName}/providers/Microsoft.Maintenance/applyUpdates/default'} # type: ignore
def list(
self,
**kwargs
) -> AsyncIterable["_models.ListApplyUpdate"]:
"""Get Configuration records within a subscription.
Get Configuration records within a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListApplyUpdate or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.maintenance.models.ListApplyUpdate]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListApplyUpdate"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ListApplyUpdate', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.MaintenanceError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Maintenance/applyUpdates'} # type: ignore
|
mit
| -6,968,445,616,262,737,000
| 47.148418
| 282
| 0.648188
| false
| 4.369397
| true
| false
| false
|
OpenBfS/dokpool-plone
|
Plone/src/docpool.users/docpool/users/browser/personalpreferences.py
|
1
|
1961
|
# -*- coding: utf-8 -*-
from docpool.users import DocpoolMessageFactory as _
from docpool.users.interfaces import IDocPoolUsersLayer
from plone.app.users.browser.account import AccountPanelSchemaAdapter
from plone.app.users.browser.personalpreferences import PersonalPreferencesPanel
from plone.autoform import directives
from plone.supermodel import model
from plone.z3cform.fieldsets import extensible
from z3c.form import field
from z3c.form.browser.checkbox import CheckBoxFieldWidget
from zope import schema
from zope.component import adapter
from zope.interface import Interface
class IEnhancedPersonalPreferences(model.Schema):
""" Use all the fields from the default user data schema, and add various
extra fields.
"""
apps = schema.List(
title=_(u'label_user_apps', default=u'Applications'),
description=_(u'description_user_apps', default=u''),
required=False,
value_type=schema.Choice(
source="docpool.base.vocabularies.AvailableApps"),
)
directives.widget(apps=CheckBoxFieldWidget)
class EnhancedPersonalPreferencesAdapter(AccountPanelSchemaAdapter):
schema = IEnhancedPersonalPreferences
def get_apps(self):
return self.context.getProperty('apps', [])
def set_apps(self, value):
return self.context.setMemberProperties({'apps': value})
dp = property(get_apps, set_apps)
@adapter(Interface, IDocPoolUsersLayer, PersonalPreferencesPanel)
class PersonalPreferencesPanelExtender(extensible.FormExtender):
def update(self):
fields = field.Fields(IEnhancedPersonalPreferences)
self.add(fields)
# remove not needed fields
self.remove('wysiwyg_editor')
self.remove('language')
self.remove('timezone')
# little monkey patch
def updateWidgets(self):
super(PersonalPreferencesPanel, self).updateWidgets()
# skip the other fields
PersonalPreferencesPanel.updateWidgets = updateWidgets
|
gpl-3.0
| 1,532,422,157,962,041,600
| 31.147541
| 80
| 0.748598
| false
| 4.043299
| false
| false
| false
|
Kortemme-Lab/klab
|
klab/bio/pymolmod/scaffold_model_design.py
|
1
|
9771
|
#!/usr/bin/python
# encoding: utf-8
"""
scaffold_model_design.py
A PSE builder for scaffold/model/design structures.
Created by Shane O'Connor 2014.
The PyMOL commands are adapted from scripts developed and written by Roland A. Pache, Ph.D., Copyright (C) 2012, 2013.
"""
from klab.fs.fsio import write_file
from klab import colortext
from .psebuilder import PyMOLSessionBuilder, create_pymol_selection_from_PDB_residue_ids
# Notes:
#
# The select or cmd.select commands create the selection objects e.g. '(ExpStructure_mutations_s)' in the right pane. These
# are just selection sets so clicking on the name in the pane only results in a selection.
#
# The create or cmd.create commands create an object e.g. ExpStructure_mutations in the right pane. Clicking on this name
# toggles whether this selection is shown or not. To set up a default view, follow the create command with a show command
# e.g. show sticks, Scaffold_mutations.
#
# However, if you want the selection to be hidden when the PSE is loaded, you need to use the disable command, *not the hide command*
# e.g. disable spheres_Scaffold_HETATMs.
#
# There is another subtlety behavior difference between loading a PSE file versus pasting commands into the terminal of a PyMOL window.
# If you write e.g.
# select Scaffold_mutations, [some selection string]
# create Scaffold_mutations, [some selection string]
# into the terminal, two objects are created in the right pane. However, if you save the PSE and reload it, only one of these
# objects works as expected. Therefore, if you need both, use two separately named objects. Below, I instead write the equivalent of:
# select Scaffold_mutations_s, [some selection string]
# create Scaffold_mutations, [some selection string]
# to create two distinct objects. The '_s' is just my arbitrary convention to denote that the object came from a select command.
class ScaffoldModelDesignBuilder(PyMOLSessionBuilder):
def __init__(self, pdb_containers, settings = {}, rootdir = '/tmp'):
super(ScaffoldModelDesignBuilder, self).__init__(pdb_containers, settings, rootdir)
self.Scaffold = pdb_containers.get('Scaffold')
self.Model = pdb_containers['Model']
self.ExpStructure = pdb_containers.get('ExpStructure')
def _create_input_files(self):
#colortext.message('self.outdir: ' + self.outdir)
if self.Scaffold:
write_file(self._filepath('scaffold.pdb'), self.Scaffold.pdb_contents)
write_file(self._filepath('model.pdb'), self.Model.pdb_contents)
if self.ExpStructure:
write_file(self._filepath('design.pdb'), self.ExpStructure.pdb_contents)
def _add_preamble(self):
self.script.append("cd %(outdir)s" % self.__dict__)
def _add_load_section(self):
self.script.append("### Load the structures")
if self.ExpStructure:
self.script.append("load design.pdb, ExpStructure")
self.script.append("load model.pdb, RosettaModel")
self.script.append("load scaffold.pdb, Scaffold")
def _add_view_settings_section(self):
self.script.append('''
# Set general view options and hide waters
viewport 1200,800
hide eve
remove resn hoh
bg_color %(global.background-color)s
''' % self.color_scheme)
def _add_generic_chain_settings_section(self):
self.script.append('''
# Set generic chain and HETATM view options
show cartoon
util.cbc
# Hide selenomethionines and selenocysteines
hide sticks, resn CSE+SEC+MSE
util.cnc
set cartoon_side_chain_helper
set cartoon_rect_length, 0.9
set cartoon_oval_length, 0.9
set stick_radius, 0.2
''')
def _add_specific_chain_settings_section(self):
self.script.append('''
# Scaffold display
color %(Scaffold.bb)s, Scaffold
# RosettaModel display
show car, RosettaModel
color %(RosettaModel.bb)s, RosettaModel
''' % self.color_scheme)
if self.ExpStructure:
self.script.append('''
# ExpStructure display
show car, ExpStructure
color %(ExpStructure.bb)s, ExpStructure
''' % self.color_scheme)
def _add_superimposition_section(self):
self.script.append('''
# Superimpose the structures
super Scaffold, RosettaModel''')
if self.ExpStructure:
self.script.append("super ExpStructure, RosettaModel")
def _add_orient_view_section(self):
pass
def _add_scaffold_view_section(self):
self.script.append('''
# Scaffold view options
hide lines, Scaffold
hide ribbon, Scaffold
show car, Scaffold
util.cbc Scaffold''')
if self.ExpStructure:
# Hide the scaffold if there is an experimental structure
self.script.append('''
disable Scaffold''')
def _add_residue_highlighting_section(self):
if self.Scaffold:
scaffold_selection = 'Scaffold and (%s)' % (create_pymol_selection_from_PDB_residue_ids(self.Scaffold.residues_of_interest))
self.script.append('''
### Scaffold objects ###
# Scaffold mutations
has_mutations = cmd.count_atoms('%(scaffold_selection)s') > 0
if has_mutations: cmd.select('Scaffold_mutations_s', '%(scaffold_selection)s');
if has_mutations: cmd.create('Scaffold_mutations', '%(scaffold_selection)s');
if has_mutations: cmd.show('sticks', 'Scaffold_mutations')
''' % vars())
self.script.append('''
if has_mutations: cmd.color('%(Scaffold.mutations)s', 'Scaffold_mutations')
# Scaffold HETATMs - create
has_hetatms = cmd.count_atoms('Scaffold and het and !(resn CSE+SEC+MSE)') > 0
if has_hetatms: cmd.create('Scaffold_HETATMs', 'Scaffold and het and !(resn CSE+SEC+MSE)');
if has_hetatms: cmd.show('sticks', 'Scaffold_HETATMs')
if has_hetatms: cmd.disable('Scaffold_HETATMs')
if has_hetatms: cmd.create('spheres_Scaffold_HETATMs', 'Scaffold and het and !(resn CSE+SEC+MSE)');
if has_hetatms: cmd.show('spheres', 'spheres_Scaffold_HETATMs')
if has_hetatms: cmd.disable('spheres_Scaffold_HETATMs')
''' % self.color_scheme)
#self.script.append('set label_color, black')
#self.script.append('label n. CA and Scaffold and chain A and i. 122, "A122" ')
model_selection = 'RosettaModel and (%s)' % (create_pymol_selection_from_PDB_residue_ids(self.Model.residues_of_interest))
self.script.append('''
### Rosetta model objects ###
# Rosetta model mutations
has_mutations = cmd.count_atoms('%(model_selection)s') > 0
if has_mutations: cmd.select('RosettaModel_mutations_s', '%(model_selection)s');
if has_mutations: cmd.create('RosettaModel_mutations', '%(model_selection)s');
if has_mutations: cmd.show('sticks', 'RosettaModel_mutations')
''' % vars())
self.script.append('''
if has_mutations: cmd.color('%(RosettaModel.mutations)s', 'RosettaModel_mutations')
# Rosetta model HETATMs - create and display
has_hetatms = cmd.count_atoms('RosettaModel and het and !(resn CSE+SEC+MSE)') > 0
if has_hetatms: cmd.create('RosettaModel_HETATMs', 'RosettaModel and het and !(resn CSE+SEC+MSE)');
if has_hetatms: cmd.show('sticks', 'RosettaModel_HETATMs')
if has_hetatms: cmd.create('spheres_RosettaModel_HETATMs', 'RosettaModel and het and !(resn CSE+SEC+MSE)');
if has_hetatms: cmd.show('spheres', 'spheres_RosettaModel_HETATMs')
if has_hetatms: cmd.disable('spheres_RosettaModel_HETATMs')
''' % self.color_scheme)
if self.ExpStructure:
exp_structure_selection = 'ExpStructure and (%s)' % (create_pymol_selection_from_PDB_residue_ids(self.ExpStructure.residues_of_interest))
self.script.append('''
### ExpStructure objects ###
# ExpStructure mutations
has_mutations = cmd.count_atoms('%(exp_structure_selection)s') > 0
if has_mutations: cmd.select('ExpStructure_mutations_s', '%(exp_structure_selection)s');
if has_mutations: cmd.create('ExpStructure_mutations', '%(exp_structure_selection)s');
if has_mutations: cmd.show('sticks', 'ExpStructure_mutations')
''' % vars())
self.script.append('''if has_mutations: cmd.color('%(ExpStructure.mutations)s', 'ExpStructure_mutations')
# ExpStructure HETATMs - create and display
has_hetatms = cmd.count_atoms('ExpStructure and het and !(resn CSE+SEC+MSE)') > 0
if has_hetatms: cmd.create('ExpStructure_HETATMs', 'ExpStructure and het and !(resn CSE+SEC+MSE)');
if has_hetatms: cmd.show('sticks', 'ExpStructure_HETATMs')
if has_hetatms: cmd.create('spheres_ExpStructure_HETATMs', 'ExpStructure and het and !(resn CSE+SEC+MSE)');
if has_hetatms: cmd.show('spheres', 'spheres_ExpStructure_HETATMs')
if has_hetatms: cmd.disable('spheres_ExpStructure_HETATMs')
#ExpStructure and het and !(resn CSE+SEC+MSE)')
''' % self.color_scheme)
def _add_raytracing_section(self):
self.script.append('''
# Atom coloring
select none
util.cnc
# Set lighting
set two_sided_lighting, on
''')
def _add_postamble(self):
self.script.append('''
# Show only polar hydrogens
hide (hydro)
# Set zoom
zoom
# Re-order the objects in the right pane
order *,yes
order Scaffold_mutations_s, location=bottom
order RosettaModel_mutations_s, location=bottom
order ExpStructure_mutations_s, location=bottom
order spheres_Scaffold_HETATMs, location=bottom
order spheres_RosettaModel_HETATMs, location=bottom
order spheres_ExpStructure_HETATMs, location=bottom
save session.pse
quit
''')
def _create_script(self):
self.script = []
self._add_preamble()
self._add_load_section()
self._add_view_settings_section()
self._add_generic_chain_settings_section()
self._add_specific_chain_settings_section()
self._add_superimposition_section()
self._add_orient_view_section()
self._add_scaffold_view_section()
self._add_residue_highlighting_section()
self._add_raytracing_section()
self._add_postamble()
self.script = '\n'.join(self.script)
|
mit
| -6,901,978,881,668,865,000
| 37.624506
| 149
| 0.707911
| false
| 3.203607
| false
| false
| false
|
tchellomello/home-assistant
|
homeassistant/components/kodi/device_trigger.py
|
1
|
2824
|
"""Provides device automations for Kodi."""
from typing import List
import voluptuous as vol
from homeassistant.components.automation import AutomationActionType
from homeassistant.components.device_automation import TRIGGER_BASE_SCHEMA
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_ENTITY_ID,
CONF_PLATFORM,
CONF_TYPE,
)
from homeassistant.core import CALLBACK_TYPE, Event, HomeAssistant, callback
from homeassistant.helpers import config_validation as cv, entity_registry
from homeassistant.helpers.typing import ConfigType
from .const import DOMAIN, EVENT_TURN_OFF, EVENT_TURN_ON
TRIGGER_TYPES = {"turn_on", "turn_off"}
TRIGGER_SCHEMA = TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In(TRIGGER_TYPES),
}
)
async def async_get_triggers(hass: HomeAssistant, device_id: str) -> List[dict]:
"""List device triggers for Kodi devices."""
registry = await entity_registry.async_get_registry(hass)
triggers = []
# Get all the integrations entities for this device
for entry in entity_registry.async_entries_for_device(registry, device_id):
if entry.domain == "media_player":
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "turn_on",
}
)
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "turn_off",
}
)
return triggers
@callback
def _attach_trigger(
hass: HomeAssistant, config: ConfigType, action: AutomationActionType, event_type
):
@callback
def _handle_event(event: Event):
if event.data[ATTR_ENTITY_ID] == config[CONF_ENTITY_ID]:
hass.async_run_job(
action,
{"trigger": {**config, "description": event_type}},
event.context,
)
return hass.bus.async_listen(event_type, _handle_event)
async def async_attach_trigger(
hass: HomeAssistant,
config: ConfigType,
action: AutomationActionType,
automation_info: dict,
) -> CALLBACK_TYPE:
"""Attach a trigger."""
config = TRIGGER_SCHEMA(config)
if config[CONF_TYPE] == "turn_on":
return _attach_trigger(hass, config, action, EVENT_TURN_ON)
if config[CONF_TYPE] == "turn_off":
return _attach_trigger(hass, config, action, EVENT_TURN_OFF)
return lambda: None
|
apache-2.0
| 1,542,951,620,193,615,400
| 29.365591
| 85
| 0.61296
| false
| 3.994342
| true
| false
| false
|
Jonwing/morphling
|
morphling/renderer.py
|
1
|
4057
|
# -*- coding: utf-8 -*-
import re
class Renderer(object):
'''
the default renderer for parser
'''
_escape_pattern = re.compile(r'&(?!#?\w+;)')
_not_allowed_schemes = ['javascript:', 'vbscript:']
# HTML tags
_p = 'p'
_tr = 'tr'
def __init__(self, **kwargs):
self._escape = kwargs.get('escape', True)
@property
def p(self):
'''
<p>
'''
return self._p
@property
def close_p(self):
return '</%s>' % self._p
@property
def placeholder(self):
return ''
@property
def hr(self):
return '<hr>\n'
@property
def line_break(self):
return '<br>\n'
def escape(self, content, quote=False, smart_amp=True):
if smart_amp:
content = self._escape_pattern.sub('&', content)
else:
content = content.replace('&', '&')
content = content.replace('<', '<').replace('>', '>')
if quote:
content = content.replace('"', '"').replace("'", ''')
return content
def escape_link(self, link):
lower_url = link.lower().strip('\x00\x1a \n\r\t')
for scheme in self._not_allowed_schemes:
if lower_url.startswith(scheme):
return ''
return self.escape(link, quote=True, smart_amp=False)
def open_tag(self, tag, **kwargs):
extras = ['%s=%s' % (k, v) for k, v in kwargs.items() if v]
tag = getattr(self, ''.join(['_', tag]), tag)
return '<{tag} {attrs}>'.format(tag=tag, attrs=' '.join(extras))
def close_tag(self, tag, breakline=False):
tag = getattr(self, ''.join(['_', tag]), tag)
if breakline:
return '</%s>\n' % tag
return '</%s>' % tag
def block_html(self, tag, content, breakline=True, **kwargs):
fmt = '{open_t}{cnt}{close_t}'
return fmt.format(
open_t=self.open_tag(tag, **kwargs),
cnt=content,
close_t=self.close_tag(tag, breakline=breakline)
)
def tr(self, content, **kwargs):
return self.block_html('tr', content, **kwargs)
def table(self, header, body):
return (
'<table>\n<thead>%s</thead>\n'
'<tbody>\n%s</tbody>\n</table>\n'
) % (header, body)
def code(self, content):
return self.block_html('code', content, False)
def emphasis(self, content):
return self.block_html('em', content, False)
def double_emphasis(self, content):
return self.block_html('strong', content, False)
def strikethrough(self, content):
return self.block_html('del', content, False)
def footnote_ref(self, ref_key, index):
hyperlink = self.block_html(
'a', index, breakline=False, **{'class': 'footnote', 'href': '#fn:%s' % ref_key})
return self.block_html('sup', hyperlink, False)
# return '<sup><a class=footnote href=#fn:%s>%s</a></sup>' % (ref_key, index)
def link(self, addr, text):
return self.block_html('a', text, breakline=False, href=addr)
# return '<a href={addr}>{text}<a>'.format(addr=addr, text=text)
def img(self, src, alt=None, title=None):
seg = '<img src=%s' % (self.escape_link(src) if self._escape else src)
if alt:
seg += 'alt=%s' % (self.escape(alt) if self._escape else alt)
if title:
seg += 'title=%s' % (self.escape(title) if self._escape else title)
return seg + '>'
def fence(self, code, language=None, escape=True):
if escape:
code = self.escape(code, quote=True, smart_amp=False)
lang = 'class=lang-%s' % language if language else ''
return '<pre><code {cls}>{code}\n</code></pre>'.format(cls=lang, code=code)
def link_definition(self, key, link, **kwargs):
fmt = '{open_p}[{key}] : {link}{close_p}'
return fmt.format(open_p=self.open_tag(self.p, **kwargs),
key=key, link=link, close_p=self.close_tag(self.p))
|
mit
| 8,642,026,942,259,503,000
| 31.198413
| 93
| 0.539808
| false
| 3.46456
| false
| false
| false
|
knittledan/imageResizer
|
imageResizer.py
|
1
|
6167
|
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# imageUtility.py
#-------------------------------------------------------------------------------
# Software License
# The Python Imaging Library (PIL) is
#
# Copyright © 1997-2011 by Secret Labs AB
# Copyright © 1995-2011 by Fredrik Lundh
#-------------------------------------------------------------------------------
import os
from PIL import Image
class Parameters(object):
# where to save thumnails and images
thumbNailPath = r'C:\Users\codingCobra\Desktop\backgrounds'
imagePath = r'C:\Users\codingCobra\Desktop\backgrounds'
# default parameters
maxImageWidth = 900
thumbHalfRez = 200
thumbWidth = 110
thumbHeight = 90
class ImageResizer(object):
"""
Utilities to Resize and Crop an image based on parameters.
Supply a path to the image that needs processing.
"""
WIDTH = 'width'
HEIGHT = 'height'
RESIZE = 'resize'
THUMB_NAIL = 'thumbNail'
def __init__(self, imagePath, parameters):
self.originalImage = self.__openImage(imagePath)
self.image = self.originalImage
self.mode = self.image.mode
self.format = self.image.format
self.width = self.image.size[0]
self.height = self.image.size[1]
self.name = self.__fileName(self.image)
self.savePrefix = 'resized_'
self.thumbPrefix = 'thumbnail_'
self.parameters = parameters
def __getattr__(self, key):
print 'ImageResizer has no attribute %s' % key
def __delattr__(self, key):
print 'You are not allowed to delete attributes.'
#---------------------------------------------------------------------------
# Methods
#---------------------------------------------------------------------------
def resizeImage(self, scaleBy=None, size=None):
"""
Uniformally Resize an image by height or width.
:param scaleBy: width or height
:param size: pixels count
:return:
"""
sizeDefault = int(self.parameters.maxImageWidth)
scaleBy = self.WIDTH if scaleBy is None else scaleBy
size = sizeDefault if size is None else size
self.__downRezImage(scaleBy, size)
self.__saveImage(self.RESIZE)
def createThumbNail(self):
"""
Resize image to smaller size then crop based on parameters
thumbWidth and thumbHeight
:return:
"""
halfRezWidth = int(self.parameters.thumbHalfRez)
newWidth = int(self.parameters.thumbWidth)
newHeight = int(self.parameters.thumbHeight)
if self.width > halfRezWidth:
self.__downRezImage(self.WIDTH, halfRezWidth)
left = (self.width - newWidth) /2
upper = (self.height - newHeight)/2
right = (self.width + newWidth) /2
lower = (self.height + newHeight)/2
box = (left, upper, right, lower)
self.image = self.image.crop(box)
self.__saveImage(self.THUMB_NAIL)
#---------------------------------------------------------------------------
# Helpers
#---------------------------------------------------------------------------
def __saveImage(self, saveType):
"""
Save processed image as thumbNail or resize.
:param saveType: resize or thumbNail
:return: boolean
"""
if saveType == self.RESIZE:
newName = str(self.savePrefix) + str(self.name)
savePath = self.parameters.imagePath
elif saveType == self.THUMB_NAIL:
newName = str(self.thumbPrefix) + str(self.name)
savePath = self.parameters.thumbNailPath
imagePath = os.path.join(savePath, newName)
try:
self.image.save(imagePath, "JPEG")
return True
except IOError, e:
raise IOError('Unable to save new image: %s' % str(e))
def __downRezImage(self, region, size):
"""
Resize image into memory before cropping.
:param region: width or height
:param size: pixels count
:return:
"""
if region == self.WIDTH:
ratio = float(size)/float(self.width)
newWidth = int(size)
newHeight = int(self.height*ratio)
if region == self.HEIGHT:
ratio = float(size)/float(self.height)
newHeight = int(size)
newWidth = int(self.width*ratio)
self.image = self.image.resize((newWidth, newHeight), Image.ANTIALIAS)
self.width = newWidth
self.height = newHeight
#---------------------------------------------------------------------------
# Statics
#---------------------------------------------------------------------------
@staticmethod
def __openImage(image):
"""
Open image using the PIL.
:param image: path to image
:return: PIL image obj
"""
if os.path.isfile(image):
try:
return Image.open(image)
except IOError:
raise
else:
mssage = 'This is not a file'
raise IOError(mssage)
@staticmethod
def __fileName(image):
"""
Get the name of the image without the path.
:param image: path to image
:return: imageName.ext
"""
return os.path.split(image.filename)[-1]
# example usages
path = r'C:\Users\codingCobra\Desktop\backgrounds\7YMpZvD.jpg'
image = ImageResizer(path, Parameters())
image.savePrefix = 'resized-1_'
image.thumbPrefix = 'thumb-1_'
image.resizeImage(scaleBy='width', size=700)
image.createThumbNail()
image = ImageResizer(path, Parameters())
image.savePrefix = 'resized-2_'
image.thumbPrefix = 'thumb-2_'
image.resizeImage(scaleBy='height', size=600)
image.createThumbNail()
image = ImageResizer(path, Parameters())
image.savePrefix = 'resized-3_'
image.thumbPrefix = 'thumb-3_'
image.resizeImage()
image.createThumbNail()
|
mit
| -2,618,635,322,029,660,700
| 31.967914
| 80
| 0.525061
| false
| 4.168357
| false
| false
| false
|
emmanvg/cti-stix-elevator
|
stix2elevator/stix_stepper.py
|
1
|
3114
|
import io
import json
import sys
from six import text_type
from stix2.pattern_visitor import create_pattern_object
def step_cyber_observable(obj):
type_name20 = obj["type"]
if type_name20 == "file":
obj.pop("is_encrypted", None)
obj.pop("encryption_algorithm", None)
obj.pop("decryption_key", None)
if "extensions" in obj:
exts = obj["extensions"]
if "archive-ext" in exts:
exts["archive-ext"].pop("version", None)
if "raster-image-ext" in exts:
exts["raster-image-ext"].pop("image_compression_algorithm", None)
elif type_name20 == "network-traffic":
if "extensions" in obj:
exts = obj["extensions"]
if "socket-ext" in exts:
exts["socket-ext"].pop("protocol_family", None)
elif type_name20 == "process":
obj.pop("name", None)
obj.pop("arguments", None)
if "binary_ref" in obj:
obj["image_ref"] = obj["binary_ref"]
obj.pop("binary_ref", None)
elif type_name20 == "user-account":
if "password_last_changed" in obj:
obj["credential_last_changed"] = obj["password_last_changed"]
obj.pop("password_last_changed", None)
def step_observable_data(object):
for key, obj in object["objects"].items():
step_cyber_observable(obj)
def step_pattern(pattern):
pattern_obj = create_pattern_object(pattern, module_suffix="Elevator", module_name="stix2elevator.convert_pattern")
return text_type(pattern_obj.toSTIX21())
def step_object(object):
object["spec_version"] = "2.1"
if (object["type"] == "indicator" or object["type"] == "malware" or
object["type"] == "report" or object["type"] == "threat-actor" or
object["type"] == "tool"):
if "labels" in object:
types_property_name = object["type"].replace("-", "_") + "_types"
object[types_property_name] = object["labels"]
object.pop("labels")
if object["type"] == "indicator":
object["pattern"] = step_pattern(object["pattern"])
elif object["type"] == "observed-data":
step_observable_data(object)
# update "in place"
def step_bundle(bundle):
for o in bundle["objects"]:
step_object(o)
bundle.pop("spec_version", None)
return bundle
def step_file(fn, encoding="utf-8"):
sys.setrecursionlimit(5000)
with io.open(fn, "r", encoding=encoding) as json_data:
json_content = json.load(json_data)
if 'spec_version' in json_content and "type" in json_content and json_content["type"] == "bundle":
json_string = json.dumps(step_bundle(json_content),
ensure_ascii=False,
indent=4,
separators=(',', ': '),
sort_keys=True)
print(json_string)
return json_string
else:
print("stix_stepper only converts STIX 2.0 to STIX 2.1")
return
if __name__ == '__main__':
step_file(sys.argv[1])
|
bsd-3-clause
| -5,713,456,181,478,497,000
| 33.21978
| 119
| 0.571291
| false
| 3.72043
| false
| false
| false
|
matejc/searx
|
searx/engines/bing_images.py
|
1
|
3090
|
"""
Bing (Images)
@website https://www.bing.com/images
@provide-api yes (http://datamarket.azure.com/dataset/bing/search),
max. 5000 query/month
@using-api no (because of query limit)
@results HTML (using search portal)
@stable no (HTML can change)
@parse url, title, img_src
@todo currently there are up to 35 images receive per page,
because bing does not parse count=10.
limited response to 10 images
"""
from urllib import urlencode
from lxml import html
from json import loads
import re
from searx.engines.bing import _fetch_supported_languages, supported_languages_url
# engine dependent config
categories = ['images']
paging = True
safesearch = True
time_range_support = True
# search-url
base_url = 'https://www.bing.com/'
search_string = 'images/search?{query}&count=10&first={offset}'
time_range_string = '&qft=+filterui:age-lt{interval}'
thumb_url = "https://www.bing.com/th?id={ihk}"
time_range_dict = {'day': '1440',
'week': '10080',
'month': '43200',
'year': '525600'}
# safesearch definitions
safesearch_types = {2: 'STRICT',
1: 'DEMOTE',
0: 'OFF'}
_quote_keys_regex = re.compile('({|,)([a-z][a-z0-9]*):(")', re.I | re.U)
# do search-request
def request(query, params):
offset = (params['pageno'] - 1) * 10 + 1
# required for cookie
if params['language'] == 'all':
language = 'en-US'
else:
language = params['language']
search_path = search_string.format(
query=urlencode({'q': query}),
offset=offset)
params['cookies']['SRCHHPGUSR'] = \
'NEWWND=0&NRSLT=-1&SRCHLANG=' + language.split('-')[0] +\
'&ADLT=' + safesearch_types.get(params['safesearch'], 'DEMOTE')
params['url'] = base_url + search_path
if params['time_range'] in time_range_dict:
params['url'] += time_range_string.format(interval=time_range_dict[params['time_range']])
return params
# get response from search-request
def response(resp):
results = []
dom = html.fromstring(resp.text)
# parse results
for result in dom.xpath('//div[@class="dg_u"]/div'):
link = result.xpath('./a')[0]
# parse json-data (it is required to add a space, to make it parsable)
json_data = loads(_quote_keys_regex.sub(r'\1"\2": \3', link.attrib.get('m')))
title = link.attrib.get('t1')
ihk = link.attrib.get('ihk')
# url = 'http://' + link.attrib.get('t3')
url = json_data.get('surl')
img_src = json_data.get('imgurl')
# append result
results.append({'template': 'images.html',
'url': url,
'title': title,
'content': '',
'thumbnail_src': thumb_url.format(ihk=ihk),
'img_src': img_src})
# TODO stop parsing if 10 images are found
if len(results) >= 10:
break
# return results
return results
|
agpl-3.0
| 4,278,168,107,756,276,700
| 27.878505
| 97
| 0.572492
| false
| 3.523375
| false
| false
| false
|
kcleong/rover_challenge
|
tests/test_grid.py
|
1
|
1349
|
# -*- coding: utf-8 -*-
from challenge.grid import Grid
from challenge.rover import Rover
import unittest
class GridTestSuite(unittest.TestCase):
"""Advanced test cases."""
def test_instantion(self):
""" Test if we can instantiate a grid object """
grid = Grid(5, 5)
self.assertEqual(type(grid), Grid)
def test_invalid_instantation(self):
""" Test if we can instantiate a grid object with invalid values """
with self.assertRaises(ValueError):
Grid('a', None) # This should give a value error
def test_xy_max(self):
""" Test if a given coords are set in the grid """
max_x = 7
max_y = 9
grid = Grid(max_x, max_y)
self.assertEqual(grid.max_x, max_x)
self.assertEqual(grid.max_y, max_y)
def test_turn(self):
""" Test a turn movement in the grid """
starting_pos = '12N'
turn = 'L'
grid = Grid(5, 5)
output = grid.move(starting_pos, turn)
self.assertEqual(output, '12W')
def test_movement(self):
""" Test a forward movement in the grid """
starting_pos = '12W'
movement = 'M'
grid = Grid(5, 5)
output = grid.move(starting_pos, movement)
self.assertEqual(output, '02W')
if __name__ == '__main__':
unittest.main()
|
mit
| 6,842,073,942,312,527,000
| 24.471698
| 76
| 0.576723
| false
| 3.706044
| true
| false
| false
|
bixbydev/Bixby
|
util/sqltocsv.py
|
1
|
1559
|
#!/usr/bin/python
#------------------------------------------------------------------------------
# Copyright (C) 2013 Bradley Hilton <bradleyhilton@bradleyhilton.com>
#
# Distributed under the terms of the GNU GENERAL PUBLIC LICENSE V3.
#______________________________________________________________________________
# There is stuff below you may need to change. Specifically in the Oracle, MySQL, And Google Provisioning API Stuff sections.
# Filename: sqltocsv.py
import csv
def csv_from_sql(query, outputfile, dbcursor, supress_header=False):
f = open(outputfile, 'wb')
dbcursor.execute(query)
queryresults = dbcursor.fetchall()
csvwriter = csv.writer(f, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
if not supress_header:
csvwriter.writerow([i[0] for i in queryresults.description])
for row in queryresults:
csvwriter.writerow(row)
print row
f.close()
def csv_to_sql(csvfile, db_table, dbcursor=None):
"""Opens a CSV file. Reads the row headers
and generates an INSERT statement and inserts
rows into file. Row headers must match column names
in the insert table."""
with open(csvfile, 'rU') as f:
reader = csv.reader(f, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
headers = reader.next()
print headers
data = []
insert = 'INSERT INTO %s \n(' %db_table
columns = ', '.join(headers) +') \n'
values = 'VALUES ('+'%s, ' *(len(headers) - 1) +'%s)'
query = insert + columns + values
for row in reader:
if dbcursor:
dbcursor.execute(query, row)
print query %tuple(row)
|
gpl-3.0
| -5,794,217,589,997,273,000
| 31.479167
| 125
| 0.627967
| false
| 3.352688
| false
| false
| false
|
GitAcrown/Kreybot
|
Krey/cogs/economy.py
|
1
|
27235
|
import discord
from discord.ext import commands
from cogs.utils.dataIO import dataIO, fileIO
from collections import namedtuple, defaultdict
from datetime import datetime
from random import randint
from copy import deepcopy
from .utils import checks
from __main__ import send_cmd_help
import os
import time
import logging
#Modifié
default_settings = {"BOOST" : 1, "PAYDAY_TIME" : 86400, "PAYDAY_CREDITS" : 150, "SLOT_MIN" : 5, "SLOT_MAX" : 500, "SLOT_TIME" : 120}
slot_payouts = """Gains possibles dans la machine:
:two: :two: :six: Offre * 5000
:four_leaf_clover: :four_leaf_clover: :four_leaf_clover: +1000
:cherries: :cherries: :cherries: +800
:two: :six: Offre * 4
:cherries: :cherries: Offre * 3
Trois symboles: +500
Deux symboles: Offre * 2"""
class BankError(Exception):
pass
class AccountAlreadyExists(BankError):
pass
class NoAccount(BankError):
pass
class InsufficientBalance(BankError):
pass
class NegativeValue(BankError):
pass
class SameSenderAndReceiver(BankError):
pass
class Bank:
def __init__(self, bot, file_path):
self.accounts = dataIO.load_json(file_path)
self.bot = bot
def create_account(self, user):
server = user.server
if not self.account_exists(user):
if server.id not in self.accounts:
self.accounts[server.id] = {}
if user.id in self.accounts: # Legacy account
balance = self.accounts[user.id]["balance"]
else:
balance = 0
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
account = {"name" : user.name, "balance" : balance,
"created_at" : timestamp}
self.accounts[server.id][user.id] = account
self._save_bank()
return self.get_account(user)
else:
raise AccountAlreadyExists()
def account_exists(self, user):
try:
self._get_account(user)
except NoAccount:
return False
return True
def withdraw_credits(self, user, amount):
server = user.server
if amount < 0:
raise NegativeValue()
account = self._get_account(user)
if account["balance"] >= amount:
account["balance"] -= amount
self.accounts[server.id][user.id] = account
self._save_bank()
else:
raise InsufficientBalance()
def deposit_credits(self, user, amount):
server = user.server
if amount < 0:
raise NegativeValue()
account = self._get_account(user)
account["balance"] += amount
self.accounts[server.id][user.id] = account
self._save_bank()
def set_credits(self, user, amount):
server = user.server
if amount < 0:
raise NegativeValue()
account = self._get_account(user)
account["balance"] = amount
self.accounts[server.id][user.id] = account
self._save_bank()
def transfer_credits(self, sender, receiver, amount):
server = sender.server
if amount < 0:
raise NegativeValue()
if sender is receiver:
raise SameSenderAndReceiver()
if self.account_exists(sender) and self.account_exists(receiver):
sender_acc = self._get_account(sender)
if sender_acc["balance"] < amount:
raise InsufficientBalance()
self.withdraw_credits(sender, amount)
self.deposit_credits(receiver, amount)
else:
raise NoAccount()
def can_spend(self, user, amount):
account = self._get_account(user)
if account["balance"] >= amount:
return True
else:
return False
def wipe_bank(self, server):
self.accounts[server.id] = {}
self._save_bank()
def get_server_accounts(self, server):
if server.id in self.accounts:
raw_server_accounts = deepcopy(self.accounts[server.id])
accounts = []
for k, v in raw_server_accounts.items():
v["id"] = k
v["server"] = server
acc = self._create_account_obj(v)
accounts.append(acc)
return accounts
else:
return []
def get_all_accounts(self):
accounts = []
for server_id, v in self.accounts.items():
server = self.bot.get_server(server_id)
if server is None:# Servers that have since been left will be ignored
continue # Same for users_id from the old bank format
raw_server_accounts = deepcopy(self.accounts[server.id])
for k, v in raw_server_accounts.items():
v["id"] = k
v["server"] = server
acc = self._create_account_obj(v)
accounts.append(acc)
return accounts
def get_balance(self, user):
account = self._get_account(user)
return account["balance"]
def get_account(self, user):
acc = self._get_account(user)
acc["id"] = user.id
acc["server"] = user.server
return self._create_account_obj(acc)
def _create_account_obj(self, account):
account["member"] = account["server"].get_member(account["id"])
account["created_at"] = datetime.strptime(account["created_at"],
"%Y-%m-%d %H:%M:%S")
Account = namedtuple("Account", "id name balance "
"created_at server member")
return Account(**account)
def _save_bank(self):
dataIO.save_json("data/economy/bank.json", self.accounts)
def _get_account(self, user):
server = user.server
try:
return deepcopy(self.accounts[server.id][user.id])
except KeyError:
raise NoAccount()
class Economy:
"""Soyez riche virtuellement !"""
def __init__(self, bot):
global default_settings
self.bot = bot
self.bank = Bank(bot, "data/economy/bank.json")
self.settings = fileIO("data/economy/settings.json", "load")
if "PAYDAY_TIME" in self.settings: #old format
default_settings = self.settings
self.settings = {}
self.settings = defaultdict(lambda: default_settings, self.settings)
self.payday_register = defaultdict(dict)
self.slot_register = defaultdict(dict)
@commands.group(name="bank", pass_context=True)
async def _bank(self, ctx):
"""Opérations bancaires"""
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
@_bank.command(pass_context=True, no_pm=True, hidden=True) #Inutile depuis MAJ "auto_register"
async def register(self, ctx):
"""Enregistre un compte dans Bank"""
user = ctx.message.author
try:
account = self.bank.create_account(user)
await self.bot.say("{} Compte ouvert. Vous avez: {}§".format(user.mention,
account.balance))
except AccountAlreadyExists:
await self.bot.say("{} Tu as déjà un compte Bank.".format(user.mention))
async def auto_register(self, message): #Enregistre automatiquement
user = message.author
server = message.server
if server != None:
try:
account = self.bank.create_account(user)
except AccountAlreadyExists:
pass
else:
pass
@_bank.command(pass_context=True)
async def balance(self, ctx, user : discord.Member=None):
"""Montre l'argent possédé par quelqu'un.
Par défaut, son argent."""
if not user:
user = ctx.message.author
try:
await self.bot.say("{} Vous avez: {}§".format(user.mention, self.bank.get_balance(user)))
except NoAccount:
await self.bot.say("{} Vous n'avez pas de compte chez Bank. Tapez {}bank register pour en ouvrir un.".format(user.mention, ctx.prefix))
else:
try:
await self.bot.say("{} possède {}§".format(user.name, self.bank.get_balance(user)))
except NoAccount:
await self.bot.say("Cet utilisateur ne possède pas de compte Bank.")
@_bank.command(pass_context=True)
async def transfer(self, ctx, user : discord.Member, sum : int):
"""Transfert des crédits d'un utilisateur à un autre. (Taxe de 4%)"""
author = ctx.message.author
mult = sum * 0.96
sum = round(mult)
try:
self.bank.transfer_credits(author, user, sum)
logger.info("{}({}) transferred {} credits to {}({})".format(
author.name, author.id, sum, user.name, user.id))
await self.bot.say("{} crédits ont été transférés au compte de {}. (Taxe de 4%)".format(sum, user.name))
except NegativeValue:
await self.bot.say("Vous avez besoin de transférer au moins 1 crédit.")
except SameSenderAndReceiver:
await self.bot.say("Vous ne pouvez pas transférer des crédits à vous-même.")
except InsufficientBalance:
await self.bot.say("Vous n'avez pas cette somme dans votre compte.")
except NoAccount:
await self.bot.say("Cet utilisateur ne possède pas de compte.")
@_bank.command(name="set", pass_context=True)
@checks.admin_or_permissions(manage_server=True)
async def _set(self, ctx, user : discord.Member, sum : int):
"""Change la valeur d'un compte
Admin/Proprio seulement."""
author = ctx.message.author
try:
self.bank.set_credits(user, sum)
logger.info("{}({}) set {} credits to {} ({})".format(author.name, author.id, str(sum), user.name, user.id))
await self.bot.say("{} possède maintenant {}".format(user.name, str(sum)))
except NoAccount:
await self.bot.say("Cet utilisateur ne possède pas de compte.")
@commands.command(pass_context=True, no_pm=True)
async def rjd(self, ctx): # TODO
"""Pour avoir quelques crédits"""
author = ctx.message.author
server = author.server
id = author.id
sum = self.settings[server.id]["PAYDAY_CREDITS"] * self.settings[server.id]["BOOST"]
if self.bank.account_exists(author):
if id in self.payday_register[server.id]:
seconds = abs(self.payday_register[server.id][id] - int(time.perf_counter()))
if seconds >= self.settings[server.id]["PAYDAY_TIME"]:
self.bank.deposit_credits(author, sum)
self.payday_register[server.id][id] = int(time.perf_counter())
await self.bot.say("{} Voilà quelques crédits ! (+{}§)".format(author.mention, str(sum)))
else:
await self.bot.say("{} Trop tôt, il faudra attendre {}.".format(author.mention, self.display_time(self.settings[server.id]["PAYDAY_TIME"] - seconds)))
else:
self.payday_register[server.id][id] = int(time.perf_counter())
self.bank.deposit_credits(author, sum)
await self.bot.say("{} Voilà quelques crédits. (+{}§)".format(author.mention, str(sum)))
else:
await self.bot.say("{} Vous avez besoin d'un compte. tapez {}bank register pour en ouvrir un.".format(author.mention, ctx.prefix))
@commands.group(pass_context=True)
async def leaderboard(self, ctx):
"""Top par serveur ou global
Par défaut le serveur"""
if ctx.invoked_subcommand is None:
await ctx.invoke(self._server_leaderboard)
@leaderboard.command(name="server", pass_context=True)
async def _server_leaderboard(self, ctx, top : int=10):
"""Poste un top des personnes les plus riche
par défaut top 10""" #Originally coded by Airenkun - edited by irdumb
server = ctx.message.server
if top < 1:
top = 10
bank_sorted = sorted(self.bank.get_server_accounts(server),
key=lambda x: x.balance, reverse=True)
if len(bank_sorted) < top:
top = len(bank_sorted)
topten = bank_sorted[:top]
highscore = ""
place = 1
for acc in topten:
highscore += str(place).ljust(len(str(top))+1)
highscore += (acc.name+" ").ljust(23-len(str(acc.balance)))
highscore += str(acc.balance) + "\n"
place += 1
if highscore:
if len(highscore) < 1985:
await self.bot.say("```py\n"+highscore+"```")
else:
await self.bot.say("Trop gros pour être affiché.")
else:
await self.bot.say("Aucun compte à afficher.")
@leaderboard.command(name="global")
async def _global_leaderboard(self, top : int=10):
"""Affiche le top global mutli-serveur"""
if top < 1:
top = 10
bank_sorted = sorted(self.bank.get_all_accounts(),
key=lambda x: x.balance, reverse=True)
unique_accounts = []
for acc in bank_sorted:
if not self.already_in_list(unique_accounts, acc):
unique_accounts.append(acc)
if len(unique_accounts) < top:
top = len(unique_accounts)
topten = unique_accounts[:top]
highscore = ""
place = 1
for acc in topten:
highscore += str(place).ljust(len(str(top))+1)
highscore += ("{} |{}| ".format(acc.name, acc.server.name)).ljust(23-len(str(acc.balance)))
highscore += str(acc.balance) + "\n"
place += 1
if highscore:
if len(highscore) < 1985:
await self.bot.say("```py\n"+highscore+"```")
else:
await self.bot.say("Trop gros pour être affiché.")
else:
await self.bot.say("Aucun compte à afficher.")
def already_in_list(self, accounts, user):
for acc in accounts:
if user.id == acc.id:
return True
return False
@commands.command()
async def payouts(self):
"""Montre les gains possibles"""
await self.bot.whisper(slot_payouts)
@commands.command(pass_context=True, no_pm=True)
async def slot(self, ctx, bid : int):
"""Joue à la machine à sous"""
author = ctx.message.author
server = author.server
if not self.bank.account_exists(author):
await self.bot.say("{} Tu as besoin d'un compte pour y jouer. Tape {}bank register pour en ouvrir un.".format(author.mention, ctx.prefix))
return
if self.bank.can_spend(author, bid):
if bid >= self.settings[server.id]["SLOT_MIN"] and bid <= self.settings[server.id]["SLOT_MAX"]:
if author.id in self.slot_register:
if abs(self.slot_register[author.id] - int(time.perf_counter())) >= self.settings[server.id]["SLOT_TIME"]:
self.slot_register[author.id] = int(time.perf_counter())
await self.slot_machine(ctx.message, bid)
else:
await self.bot.say("La machine n'est pas encore disponible ! Attendez {} secondes entre chaque utilisation".format(self.settings[server.id]["SLOT_TIME"]))
else:
self.slot_register[author.id] = int(time.perf_counter())
await self.slot_machine(ctx.message, bid)
else:
await self.bot.say("{0} L'offre doit être entre {1} et {2}.".format(author.mention, self.settings[server.id]["SLOT_MIN"], self.settings[server.id]["SLOT_MAX"]))
else:
await self.bot.say("{0} Tu as besoin d'un compte avec assez de fonds pour y jouer.".format(author.mention))
async def slot_machine(self, message, bid):
reel_pattern = [":cherries:", ":cookie:", ":two:", ":four_leaf_clover:", ":cyclone:", ":sunflower:", ":six:", ":mushroom:", ":heart:", ":snowflake:"]
padding_before = [":mushroom:", ":heart:", ":snowflake:"] # padding prevents index errors
padding_after = [":cherries:", ":cookie:", ":two:"]
reel = padding_before + reel_pattern + padding_after
reels = []
for i in range(0, 3):
n = randint(3,12)
reels.append([reel[n - 1], reel[n], reel[n + 1]])
line = [reels[0][1], reels[1][1], reels[2][1]]
display_reels = "\n " + reels[0][0] + " " + reels[1][0] + " " + reels[2][0] + "\n"
display_reels += ">" + reels[0][1] + " " + reels[1][1] + " " + reels[2][1] + "\n"
display_reels += " " + reels[0][2] + " " + reels[1][2] + " " + reels[2][2] + "\n"
if line[0] == ":two:" and line[1] == ":two:" and line[2] == ":six:":
bid = bid * 5000
await self.bot.send_message(message.channel, "{}{} 226 ! Offre * 5000! {}! ".format(display_reels, message.author.mention, str(bid)))
elif line[0] == ":four_leaf_clover:" and line[1] == ":four_leaf_clover:" and line[2] == ":four_leaf_clover:":
bid += 1000
await self.bot.send_message(message.channel, "{}{} Trois trèfles ! +1000! ".format(display_reels, message.author.mention))
elif line[0] == ":cherries:" and line[1] == ":cherries:" and line[2] == ":cherries:":
bid += 800
await self.bot.send_message(message.channel, "{}{} Trois cerises ! +800! ".format(display_reels, message.author.mention))
elif line[0] == line[1] == line[2]:
bid += 500
await self.bot.send_message(message.channel, "{}{} Trois symboles ! +500! ".format(display_reels, message.author.mention))
elif line[0] == ":two:" and line[1] == ":six:" or line[1] == ":two:" and line[2] == ":six:":
bid = bid * 4
await self.bot.send_message(message.channel, "{}{} 26 ! Offre * 4! {}! ".format(display_reels, message.author.mention, str(bid)))
elif line[0] == ":cherries:" and line[1] == ":cherries:" or line[1] == ":cherries:" and line[2] == ":cherries:":
bid = bid * 3
await self.bot.send_message(message.channel, "{}{} Deux cerises ! Offre * 3! {}! ".format(display_reels, message.author.mention, str(bid)))
elif line[0] == line[1] or line[1] == line[2]:
bid = bid * 2
await self.bot.send_message(message.channel, "{}{} Deux symvoles ! Offre * 2! {}! ".format(display_reels, message.author.mention, str(bid)))
else:
await self.bot.send_message(message.channel, "{}{} Rien ! Offre perdue. ".format(display_reels, message.author.mention))
self.bank.withdraw_credits(message.author, bid)
await self.bot.send_message(message.channel, "Crédits restant: {}".format(self.bank.get_balance(message.author)))
return True
self.bank.deposit_credits(message.author, bid)
await self.bot.send_message(message.channel, "Crédits restant: {}".format(self.bank.get_balance(message.author)))
@commands.command(name="playrole", pass_context=True)
async def play_role(self, ctx):
"""Vous donne le rôle @Play pour être notifié au début de chaque partie d'un jeu lié à l'économie.
Si le rôle n'existe pas sur le serveur, il sera créé automatiquement."""
server = ctx.message.server
user = ctx.message.author
# Regarde si le rôle existe
if 'Play' not in [r.name for r in server.roles]:
await self.bot.say("Le rôle n'existe pas. Je vais donc le créer...")
try:
perms = discord.Permissions.none()
# Active les permissions voulues (si nécéssaire)
await self.bot.create_role(server, name="Play", permissions=perms)
await self.bot.say("Rôle crée ! Refaites la commande pour obtenir le rôle !")
try:
for c in server.channels:
if c.type.name == 'text':
perms = discord.PermissionOverwrite()
perms.send_messages = False
r = discord.utils.get(ctx.message.server.roles, name="Play")
await self.bot.edit_channel_permissions(c, r, perms)
await asyncio.sleep(1.5)
except discord.Forbidden:
await self.bot.say("Une erreur est apparue.")
except discord.Forbidden:
await self.bot.say("Je ne peux pas créer le rôle.")
else:
server = ctx.message.server
if user.id == self.bot.user.id:
await self.bot.say("Je ne peux pas obtenir ce rôle...")
r = discord.utils.get(ctx.message.server.roles, name="Play")
if 'Play' not in [r.name for r in user.roles]:
await self.bot.add_roles(user, r)
await self.bot.say("{} Vous avec maintenant le rôle *Play*".format(user.name))
else:
await self.bot.remove_roles(user, r)
await self.bot.say("{} Vous n'avez plus le rôle *Play*".format(user.name))
@commands.group(pass_context=True, no_pm=True)
@checks.admin_or_permissions(manage_server=True)
async def economyset(self, ctx):
"""Change les paramètres du module économie"""
server = ctx.message.server
settings = self.settings[server.id]
if ctx.invoked_subcommand is None:
msg = "```"
for k, v in settings.items():
msg += "{}: {}\n".format(k, v)
msg += "```"
await send_cmd_help(ctx)
await self.bot.say(msg)
@economyset.command(pass_context=True)
async def wipe(self, ctx):
"""Efface entièrement Bank. N'efface pas les données des autres modules."""
server = ctx.message.server
self.bank.wipe_bank(server)
await self.bot.say("Banque effacée.")
@economyset.command(pass_context=True)
async def boost(self, ctx, multiplicateur : int):
"""Active le boost et définit le multiplicateur"""
self.settings["BOOST"] = mult
if boost <= 0:
await self.bot.say("Le boost ne peut pas être inférieur ou égal à 0")
fileIO("data/economy/settings.json", "save", self.settings)
if boost < 1:
await self.bot.say("Le boost est maintenant de " + str(mult) + ", ce qui retire de l'argent à chaque distribution.")
fileIO("data/economy/settings.json", "save", self.settings)
if boost > 1:
await self.bot.say("Le boost est maintenant de " + str(mult))
fileIO("data/economy/settings.json", "save", self.settings)
@economyset.command(pass_context=True)
async def slotmin(self, ctx, bid : int):
"""Minimum slot machine bid"""
server = ctx.message.server
self.settings[server.id]["SLOT_MIN"] = bid
await self.bot.say("Minimum bid is now " + str(bid) + " credits.")
fileIO("data/economy/settings.json", "save", self.settings)
@economyset.command(pass_context=True)
async def slotmax(self, ctx, bid : int):
"""Maximum slot machine bid"""
server = ctx.message.server
self.settings[server.id]["SLOT_MAX"] = bid
await self.bot.say("Maximum bid is now " + str(bid) + " credits.")
fileIO("data/economy/settings.json", "save", self.settings)
@economyset.command(pass_context=True)
async def slottime(self, ctx, seconds : int):
"""Seconds between each slots use"""
server = ctx.message.server
self.settings[server.id]["SLOT_TIME"] = seconds
await self.bot.say("Cooldown is now " + str(seconds) + " seconds.")
fileIO("data/economy/settings.json", "save", self.settings)
@economyset.command(pass_context=True)
async def paydaytime(self, ctx, seconds : int):
"""Seconds between each payday"""
server = ctx.message.server
self.settings[server.id]["PAYDAY_TIME"] = seconds
await self.bot.say("Value modified. At least " + str(seconds) + " seconds must pass between each payday.")
fileIO("data/economy/settings.json", "save", self.settings)
@economyset.command(pass_context=True)
async def paydaycredits(self, ctx, credits : int):
"""Credits earned each payday"""
server = ctx.message.server
self.settings[server.id]["PAYDAY_CREDITS"] = credits
await self.bot.say("Every payday will now give " + str(credits) + " credits.")
fileIO("data/economy/settings.json", "save", self.settings)
def display_time(self, seconds, granularity=2): # What would I ever do without stackoverflow?
intervals = ( # Source: http://stackoverflow.com/a/24542445
('weeks', 604800), # 60 * 60 * 24 * 7
('days', 86400), # 60 * 60 * 24
('hours', 3600), # 60 * 60
('minutes', 60),
('seconds', 1),
)
result = []
for name, count in intervals:
value = seconds // count
if value:
seconds -= value * count
if value == 1:
name = name.rstrip('s')
result.append("{} {}".format(value, name))
return ', '.join(result[:granularity])
def check_folders():
if not os.path.exists("data/economy"):
print("Creating data/economy folder...")
os.makedirs("data/economy")
def check_files():
f = "data/economy/settings.json"
if not fileIO(f, "check"):
print("Creating default economy's settings.json...")
fileIO(f, "save", {})
f = "data/economy/bank.json"
if not fileIO(f, "check"):
print("Creating empty bank.json...")
fileIO(f, "save", {})
def setup(bot):
global logger
check_folders()
check_files()
logger = logging.getLogger("red.economy")
n = Economy(bot)
bot.add_listener(n.auto_register, "on_message")
if logger.level == 0: # Prevents the logger from being loaded again in case of module reload
logger.setLevel(logging.INFO)
handler = logging.FileHandler(filename='data/economy/economy.log', encoding='utf-8', mode='a')
handler.setFormatter(logging.Formatter('%(asctime)s %(message)s', datefmt="[%d/%m/%Y %H:%M]"))
logger.addHandler(handler)
bot.add_cog(n)
|
mit
| 8,481,619,921,465,153,000
| 42.147967
| 178
| 0.562226
| false
| 3.675014
| false
| false
| false
|
shundread/pyweek24
|
gamelib/main.py
|
1
|
3587
|
'''Game main module.
Contains the entry point used by the run_game.py script.
Feel free to put all your game code here, or in other modules in this "gamelib"
package.
'''
import json
import pygame
import data
import engine
class Game(object):
'''A class that delegates its engine functionalities to a hot-swappable
module'''
FPS = 60.0
def __init__(self):
self.running = False
self.data = { "gamestate": "newtitle" }
# Swapping state
self.swapped = False
# Error states
self.input_handle_error = None
self.simulate_error = None
self.render_error = None
def run(self):
engine.init()
clock = pygame.time.Clock()
self.running = True
dt = 0
frames = 0
while self.running:
self.handle_input()
if self.swapped:
self.swapped = False
continue
self.simulate(dt)
self.render()
dt = clock.tick(self.FPS)
frames += 1
# Report framerate on exit
ticks = pygame.time.get_ticks()
framerate = frames / (ticks / 1000.0)
print("Framerate was {0}".format(framerate))
def handle_input(self):
try:
engine.handle_input(self, self.data)
self.input_handling_error = None
except Exception as error:
if self.input_handling_error != error.message:
print("Unable to handle input, reason:")
print(error)
self.input_handling_error = error.message
def simulate(self, dt):
try:
engine.simulate(self, self.data, dt)
self.simulate_error = None
except Exception as error:
if self.simulate_error != error.message:
print("Unable to render, reason:")
print(error)
self.simulate_error = error.message
def render(self):
try:
engine.render(self.data)
self.render_error = None
except Exception as error:
if self.render_error != error.message:
print("Unable to render, reason:")
print(error)
self.render_error = error.message
def quit(self):
self.dump_data()
self.running = False
def request_swap(self):
try:
print("Attempting to swap engine...")
reload(engine)
print("Engine swapped. Reinitializing engine...")
engine.init()
print("Engine reinitialized\n")
except Exception as error:
print("Errors were thrown in the engine swap:")
print(error)
def dump_data(self):
print("Saving the gamestate...")
try:
with open("gamestate.json", "wt") as fout:
json_data = json.dumps(self.data, indent=4)
print(json_data)
fout.write(json_data)
print("Gamestate saved\n")
except Exception as error:
print("Unable to dump the data, reason:")
print(error)
def load_data(self):
print("Restoring the gamestate...")
try:
with open("gamestate.json", "rt") as fin:
new_data = json.load(fin)
self.data = new_data
print("Gamestate restored")
except Exception as error:
print("Unable to load the data, reason:")
print(error)
def main():
game = Game()
# game.load_data()
game.run()
|
gpl-3.0
| -8,235,789,348,464,003,000
| 27.927419
| 79
| 0.539448
| false
| 4.385086
| false
| false
| false
|
zardus/idalink
|
idalink/server.py
|
1
|
1521
|
# -*- coding: utf-8 -*-
# Copyright (C) 2013- Yan Shoshitaishvili aka. zardus
# Ruoyu Wang aka. fish
# Audrey Dutcher aka. rhelmot
# Kevin Borgolte aka. cao
from __future__ import print_function
# idc is just within IDA, so make pylint stop complaining
import idc # pylint: disable=F0401
import threading
from rpyc.core import SlaveService
from rpyc.utils.server import OneShotServer, ThreadedServer
def main_thread(port):
srv = ThreadedServer(SlaveService, port=port)
srv.start()
def main():
port = int(idc.ARGV[1]) if idc.ARGV[1:] else 18861
thread_mode = idc.ARGV[2] == 'threaded' if idc.ARGV[2:] else False
print('Received arguments: port=%s, thread_mode=%s' % (port, thread_mode))
# :note: For speed, we don't want to idc.Wait() here,
# but you might want to call it in your code
# to make sure that autoanalysis has finished.
if thread_mode:
thread = threading.Thread(target=main_thread, args=(port, thread_mode))
thread.daemon = True
thread.start()
else:
srv = OneShotServer(SlaveService, port=port)
# OneShotServer is a LIE so we have to do some shit
# this is copied from https://github.com/tomerfiliba/rpyc/blob/master/rpyc/utils/server.py
# specifically, the start method. if stuff breaks look here!
srv._listen()
srv._register()
srv.accept()
idc.Exit(0)
if __name__ == '__main__':
main()
|
bsd-2-clause
| 7,068,586,202,755,159,000
| 32.065217
| 98
| 0.627876
| false
| 3.328228
| false
| false
| false
|
egbertbouman/tribler-g
|
Tribler/Category/TestCategory.py
|
1
|
4853
|
# Written by Yuan Yuan
# see LICENSE.txt for license information
import sys, os
execpath = os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), '..', '..')
sys.path.append(execpath)
#print sys.path
from Utility.utility import getMetainfo
from Tribler.Category.Category import Category
DEBUG = False
def testFilter(catfilename, torrentpath):
readCategorisationFile(catfilename)
#print 'Install_dir is %s' % execpath
c = Category.getInstance(execpath, None)
total = porn = fn = fp = 0
for tfilename,isporn in tdict.items():
torrent = getMetainfo(os.path.join(torrentpath,tfilename))
name = torrent['info']['name']
cat = c.calculateCategory(torrent, name)
fporn = (cat == ['xxx'])
total+= 1
porn += int(isporn)
if isporn == fporn:
if DEBUG:
print (isporn, fporn), 'good', name
elif isporn and not fporn:
fn+=1
print 'FALSE NEGATIVE'
showTorrent(os.path.join(torrentpath,tfilename))
elif not isporn and fporn:
fp +=1
print 'FALSE POSITIVE'
showTorrent(os.path.join(torrentpath,tfilename))
print """
Total torrents: %(total)d
XXX torrents: %(porn)d
Correct filtered: %(good)d
False negatives: %(fn)d
False positives: %(fp)d
""" % {'total':total, 'porn':porn, 'fn':fn,'fp':fp,'good':total-fn-fp}
def readCategorisationFile(filename):
global tdict
tdict = {}
try:
f = file(filename, 'r')
lines = f.read().splitlines()
for line in lines:
if line:
parts = line.split('\t')
tdict[parts[0]] = bool(int(parts[1]))
f.close()
except IOError:
print 'No file %s found, starting with empty file' % filename
def getTorrentData(path, max_num=-1):
torrents= []
i = 0
for fname in os.listdir(path):
if fname.endswith('.torrent'):
torrents.append(os.path.join(path,fname))
if i%1000 == 0 and i:
print 'Loaded: %d torrents' % i
if i == int(max_num):
break
i+=1
print 'Loaded %d torrents' % len(torrents)
return torrents
def showTorrent(path):
torrent = getMetainfo(os.path.join(path))
name = torrent['info']['name']
print '------------------------------'
print '\tfiles :'
files_list = []
__size_change = 1024
try:
# the multi-files mode
for ifiles in torrent['info']["files"]:
files_list.append((ifiles['path'][-1], ifiles['length'] / float(__size_change)))
except KeyError:
# single mode
files_list.append((torrent['info']["name"],torrent['info']['length'] / float(__size_change)))
for fname, fsize in files_list:
print'\t\t%s\t%d kb' % (fname, fsize)
print 'Torrent name: %s' % name
print '\ttracker:%s' % torrent['announce']
print '------------------------------'
def createTorrentDataSet(filename, torrentpath):
initSaveFile(filename)
f_out = file(filename, 'a')
torrents = getTorrentData(torrentpath)
for torrent in torrents:
if os.path.split(torrent)[-1] in tset: # already done
continue
showTorrent(torrent)
ans = None
while ans not in ['q', 'y','n']:
print 'Is this torrent porn? (y/n/q)'
ans = sys.stdin.readline()[:-1].lower()
if ans == 'q':
break
else:
saveTorrent(f_out, torrent, (ans=='y'))
f_out.close()
def saveTorrent(f_out, torrent, boolean):
if torrent in tset:
return
tfilename = os.path.split(torrent)[-1]
assert tfilename
f_out.write('%s\t%d\n' % (tfilename, int(boolean)))
f_out.flush()
tset.add(torrent)
def initSaveFile(filename):
global tset
tset = set()
try:
f = file(filename, 'r')
lines = f.read().splitlines()
for line in lines:
tset.add(line.split('\t')[0])
f.close()
except IOError:
print 'No file %s found, starting with empty file' % filename
def main(args):
if len(args) != 4 or args[1] not in ['categorise', 'test']:
print 'Usage 1: %s categorise [torrent-dir] [torrent-data-file]' % args[0]
print 'Usage 2: %s test [torrent-dir] [torrent-data-file]' % args[0]
sys.exit(1)
if args[1] == 'categorise':
createTorrentDataSet(args[3], args[2])
elif args[1] == 'test':
testFilter(args[3], args[2])
print 'ready'
if __name__ == '__main__':
main(sys.argv)
|
lgpl-2.1
| 2,244,882,841,439,192,600
| 30.790541
| 101
| 0.535957
| false
| 3.632485
| false
| false
| false
|
TargetHolding/pyspark-elastic
|
python/setup.py
|
1
|
1149
|
#!/usr/bin/env python
import os
from setuptools import setup, find_packages
basedir = os.path.dirname(os.path.abspath(__file__))
os.chdir(basedir)
def f(*path):
return open(os.path.join(basedir, *path))
setup(
name='pyspark_elastic',
maintainer='Frens Jan Rumph',
maintainer_email='frens.jan.rumph@target-holding.nl',
version='0.3.1',
description='Utilities to asssist in working with Elastic Serach and PySpark.',
long_description=f('../README.md').read(),
url='https://github.com/TargetHolding/pyspark-elastic',
license='Apache License 2.0',
packages=find_packages(),
include_package_data=True,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Other Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Database',
'Topic :: Software Development :: Libraries',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Utilities',
]
)
|
apache-2.0
| 3,498,440,906,200,967,700
| 26.357143
| 80
| 0.700609
| false
| 3.399408
| false
| false
| false
|
Mirantis/pumphouse
|
pumphouse/_vendor/fuelclient/cli/actions/role.py
|
1
|
1906
|
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from .base import Action
from .. import arguments as Args
from ..formatting import format_table
from ...objects.release import Release
class RoleAction(Action):
"""List all roles for specific release
"""
action_name = "role"
def __init__(self):
super(RoleAction, self).__init__()
self.args = [
Args.get_list_arg("List all roles for specific release"),
Args.get_release_arg("Release id", required=True)
]
self.flag_func_map = (
(None, self.list),
)
def list(self, params):
"""Print all available roles and their
conflicts for some release with id=1:
fuel role --rel 1
"""
release = Release(params.release, params=params)
data = release.get_fresh_data()
acceptable_keys = ("name", "conflicts")
roles = [
{
"name": role_name,
"conflicts": ", ".join(
metadata.get("conflicts", ["-"])
)
} for role_name, metadata in data["roles_metadata"].iteritems()]
self.serializer.print_to_output(
roles,
format_table(
roles,
acceptable_keys=acceptable_keys
)
)
|
apache-2.0
| 6,900,483,627,728,180,000
| 31.862069
| 78
| 0.58447
| false
| 4.351598
| false
| false
| false
|
alexey-ernest/ml-for-trading
|
cumulative_returns.py
|
1
|
1740
|
import os
import pandas as pd
import matplotlib.pyplot as plt
def symbol_to_path(symbol, base_dir="data"):
"""Return CSV file path given ticker symbol."""
return os.path.join(base_dir, "{}.csv".format(str(symbol)))
def get_data(symbols, dates):
"""Read stock data (adjusted close) for given symbols from CSV files."""
df = pd.DataFrame(index=dates)
if 'SPY' not in symbols: # add SPY for reference, if absent
symbols.insert(0, 'SPY')
for symbol in symbols:
df_temp = pd.read_csv(symbol_to_path(symbol), index_col='Date',
parse_dates=True, usecols=['Date', 'Adj Close'],
na_values=['nan'])
df_temp = df_temp.rename(columns={'Adj Close': symbol})
df = df.join(df_temp)
if symbol == 'SPY':
df = df.dropna(subset=['SPY'])
return df
def plot_data(df, title="Stock prices", xlabel="Date", ylabel="Price"):
"""Plot stock prices with a custom title and meaningful axis labels."""
ax = df.plot(title=title, fontsize=12)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
plt.show()
def compute_cumulative_returns(df):
"""Compute and return the cumulative return values."""
cum_returns = (df/df.ix[0]) - 1
return cum_returns
def test_run():
# Define a date range
dates = pd.date_range('2015-11-23', '2016-11-18')
# Choose stock symbols to read
symbols = ['SPY', 'XOM']
# Get stock data
df = get_data(symbols, dates)
#plot_data(df)
# Compute daily returns
daily_returns = compute_cumulative_returns(df)
plot_data(daily_returns, title="Cumulative returns", ylabel="Cumulative returns")
if __name__ == "__main__":
test_run()
|
mit
| -1,047,409,870,784,264,300
| 29.526316
| 85
| 0.613793
| false
| 3.486974
| false
| false
| false
|
better-dem/geo_feedback
|
survey/migrations/0001_initial.py
|
1
|
3777
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-10-14 21:45
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='FeedbackGoal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('description', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('polygon_coords', models.CharField(max_length=500)),
('feedback_goals', models.ManyToManyField(to='survey.FeedbackGoal')),
],
),
migrations.CreateModel(
name='ProjectResponse',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('creation_time', models.DateTimeField(auto_now_add=True)),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='survey.Project')),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='QuestionResponse',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='TMCQ',
fields=[
('question_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='survey.Question')),
('option1', models.CharField(max_length=30)),
('option2', models.CharField(max_length=30)),
('option3', models.CharField(max_length=30)),
('option4', models.CharField(max_length=30)),
('option5', models.CharField(max_length=30)),
],
bases=('survey.question',),
),
migrations.CreateModel(
name='TMCQResponse',
fields=[
('questionresponse_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='survey.QuestionResponse')),
('option_index', models.IntegerField()),
],
bases=('survey.questionresponse',),
),
migrations.AddField(
model_name='questionresponse',
name='project_response',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='survey.ProjectResponse'),
),
migrations.AddField(
model_name='questionresponse',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='survey.Question'),
),
migrations.AddField(
model_name='question',
name='feedback_goal',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='survey.FeedbackGoal'),
),
]
|
agpl-3.0
| -8,674,803,448,750,544,000
| 40.966667
| 210
| 0.568705
| false
| 4.459268
| false
| false
| false
|
DirkHoffmann/indico
|
indico/modules/events/requests/base.py
|
1
|
7309
|
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from flask_pluginengine import plugin_context
from wtforms.fields import SubmitField, TextAreaField
from indico.core.config import config
from indico.core.db import db
from indico.modules.events.requests.notifications import (notify_accepted_request, notify_new_modified_request,
notify_rejected_request, notify_withdrawn_request)
from indico.modules.events.requests.views import WPRequestsEventManagement
from indico.util.date_time import now_utc
from indico.util.i18n import _
from indico.web.flask.templating import get_overridable_template_name, get_template_module
from indico.web.forms.base import FormDefaults, IndicoForm
class RequestFormBase(IndicoForm):
def __init__(self, *args, **kwargs):
self.event = kwargs.pop('event')
self.request = kwargs.pop('request')
super().__init__(*args, **kwargs)
class RequestManagerForm(IndicoForm):
action_buttons = {'action_save', 'action_accept', 'action_reject'}
comment = TextAreaField(_('Comment'),
description=_('The comment will be shown only if the request is accepted or rejected.'))
action_save = SubmitField(_('Save'))
action_accept = SubmitField(_('Accept'))
action_reject = SubmitField(_('Reject'))
class RequestDefinitionBase:
"""A service request which can be sent by event managers."""
#: the plugin containing this request definition - assigned automatically
plugin = None
#: the unique internal name of the request type
name = None
#: the title of the request type as shown to users
title = None
#: the :class:`IndicoForm` to use for the request form
form = None
#: the :class:`IndicoForm` to use for the request manager form
manager_form = RequestManagerForm
#: default values to use if there's no existing request
form_defaults = {}
@classmethod
def render_form(cls, event, **kwargs):
"""Render the request form.
:param event: the event the request is for
:param kwargs: arguments passed to the template
"""
tpl = get_overridable_template_name('event_request_details.html', cls.plugin, 'events/requests/')
return WPRequestsEventManagement.render_template(tpl, event, **kwargs)
@classmethod
def create_form(cls, event, existing_request=None):
"""Create the request form.
:param event: the event the request is for
:param existing_request: the :class:`Request` if there's an existing request of this type
:return: an instance of an :class:`IndicoForm` subclass
"""
defaults = FormDefaults(existing_request.data if existing_request else cls.form_defaults)
with plugin_context(cls.plugin):
return cls.form(prefix='request-', obj=defaults, event=event, request=existing_request)
@classmethod
def create_manager_form(cls, req):
"""Create the request management form.
:param req: the :class:`Request` of the request
:return: an instance of an :class:`IndicoForm` subclass
"""
defaults = FormDefaults(req, **req.data)
with plugin_context(cls.plugin):
return cls.manager_form(prefix='request-manage-', obj=defaults)
@classmethod
def get_notification_template(cls, name, **context):
"""Get the template module for a notification email.
:param name: the template name
:param context: data passed to the template
"""
tpl = get_overridable_template_name(name, cls.plugin, 'events/requests/emails/', 'emails/')
return get_template_module(tpl, **context)
@classmethod
def can_be_managed(cls, user):
"""Check whether the user is allowed to manage this request type.
:param user: a :class:`.User`
"""
raise NotImplementedError
@classmethod
def get_manager_notification_emails(cls):
"""Return the email addresses of users who manage requests of this type.
The email addresses are used only for notifications.
It usually makes sense to return the email addresses of the users who
pass the :method:`can_be_managed` check.
:return: set of email addresses
"""
return set()
@classmethod
def get_notification_reply_email(cls):
"""Return the *Reply-To* e-mail address for notifications."""
return config.SUPPORT_EMAIL
@classmethod
def send(cls, req, data):
"""Send a new/modified request.
:param req: the :class:`Request` of the request
:param data: the form data from the request form
"""
req.data = dict(req.data or {}, **data)
is_new = req.id is None
if is_new:
db.session.add(req)
db.session.flush() # we need the creation dt for the notification
notify_new_modified_request(req, is_new)
@classmethod
def withdraw(cls, req, notify_event_managers=True):
"""Withdraw the request.
:param req: the :class:`Request` of the request
:param notify_event_managers: if event managers should be notified
"""
from indico.modules.events.requests.models.requests import RequestState
req.state = RequestState.withdrawn
notify_withdrawn_request(req, notify_event_managers)
@classmethod
def accept(cls, req, data, user):
"""Accept the request.
To ensure that additional data is saved, this method should
call :method:`manager_save`.
:param req: the :class:`Request` of the request
:param data: the form data from the management form
:param user: the user processing the request
"""
from indico.modules.events.requests.models.requests import RequestState
cls.manager_save(req, data)
req.state = RequestState.accepted
req.processed_by_user = user
req.processed_dt = now_utc()
notify_accepted_request(req)
@classmethod
def reject(cls, req, data, user):
"""Reject the request.
To ensure that additional data is saved, this method should
call :method:`manager_save`.
:param req: the :class:`Request` of the request
:param data: the form data from the management form
:param user: the user processing the request
"""
from indico.modules.events.requests.models.requests import RequestState
cls.manager_save(req, data)
req.state = RequestState.rejected
req.processed_by_user = user
req.processed_dt = now_utc()
notify_rejected_request(req)
@classmethod
def manager_save(cls, req, data):
"""Save management-specific data.
This method is called when the management form is submitted without
accepting/rejecting the request (which is guaranteed to be already
accepted or rejected).
:param req: the :class:`Request` of the request
:param data: the form data from the management form
"""
req.comment = data['comment']
|
gpl-3.0
| 3,155,499,741,189,574,000
| 36.290816
| 116
| 0.658093
| false
| 4.301942
| false
| false
| false
|
tholewebgods/jenkins-scripts
|
jobs/syncgit.py
|
1
|
10188
|
import json
import re
import datetime
import time
import os
import os.path
import dulwich.repo
import jenkinscli
import xml.etree.ElementTree as ET
import sys
import argparse
import textwrap
# This script will create jobs for each remote branch found in the repository
# in the current directory. It will also remove the jobs if the branches are
# no longer exiting.
#
# - The branch name pattern can be configured
# - The template job name can be configured
# - A branch is being ignored if the last commit is older than a configurable
# amount of days
#
# Requirements:
# - Python 2.6 (2.7 should work too)
# - dulwich (install it using # pip install dulwich)
# - py-jenkins-cli (https://github.com/tholewebgods/py-jenkins-cli)
#
BINARY_NAME="syncgit"
VERSION="0.1"
# Default for max. commit age of a branch
DEFAULT_MAX_COMMIT_AGE=30
class Jenkins(object):
"""
Jenkins job management.
- job_tpl -- the exact job name used as a template (this job might/should be disabled)
- job_name_tpl -- the resulting job name, has to contain one "%s" placeholder that will be replaced with the sanitized branch name
"""
def __init__(self, host, cli_jar, ssh_key, job_tpl, job_name_tpl):
self._jenkins = jenkinscli.JenkinsCli(host, cli_jar, ssh_key)
self._job_template = job_tpl
self._job_name_tpl = job_name_tpl
"""
Create Job for Git ref name
"""
def create_job(self, ref_name):
# load template and replace placeholder in config
config_template = self._jenkins.get_job(self._job_template)
# deserialize
root = ET.fromstring(config_template)
xpath = ".//scm/branches/hudson.plugins.git.BranchSpec/name"
# get branch name config node
name_element = root.findall(xpath)
# check if a "name" node has been selected
if len(name_element) > 0:
# set branch name config
name_element[0].text = ref_name
else:
raise Exception("Missing Git branch spec config in config template (xpath: %s)" % (xpath))
# serialize DOM
config = ET.tostring(root)
# replace slashes in ref name to get clean job name and build job name
filtered_ref_name = ref_name.replace("origin/", "")
# Python 2.6 does not support flags=..., using (?i)
filtered_ref_name = re.sub("(?i)[^a-z0-9_-]+", "-", filtered_ref_name)
job_name = self._job_name_tpl % filtered_ref_name
print "Creating and enabling job '%s' for branch %s" % (job_name, ref_name)
self._jenkins.create_job(job_name, config)
self._jenkins.enable_job(job_name)
"""
Remove Job by Git ref name
"""
def remove_job(self, ref_name):
# replace slashes in ref name to get clean job name and build job name
filtered_ref_name = ref_name.replace("origin/", "")
# Python 2.6 does not support flags=..., using (?i)
filtered_ref_name = re.sub("(?i)[^a-z0-9_-]+", "-", filtered_ref_name)
job_name = self._job_name_tpl % filtered_ref_name
print "Removing job '%s' for branch '%s'" % (job_name, ref_name)
self._jenkins.delete_job(job_name)
# get branch from one Job's config
def _get_branch_from_config(self, config):
root = ET.fromstring(config)
name_element = root.findall(".//scm/branches/hudson.plugins.git.BranchSpec/name")
if len(name_element) == 1:
return name_element[0].text
else:
return None
"""
Get all branches that are configured by Jobs.
Examines each Job in the list for their branch names
"""
def get_currently_configured_branches(self):
jobs = self._jenkins.get_joblist()
branches = []
for job in jobs:
if re.match("^" + (self._job_name_tpl % ""), job):
config = self._jenkins.get_job(job)
branch_name = self._get_branch_from_config(config)
if not re.match("^refs/remotes/", branch_name):
branch_name = "refs/remotes/" + branch_name
branches.append(branch_name)
return branches
"""
Represents branches in Git
"""
class GitBranches(object):
"""
Git branch management.
repo -- Repository location (relative or absolute paths)
ref_matcher -- A regular expression that matches branch names to create jobs for
max_commit_age -- Max days the last commit was made to a branch
"""
def __init__(self, repo, ref_matcher, max_commit_age):
self._repo = dulwich.repo.Repo(repo)
self._ref_matcher = ref_matcher
self._max_commit_age = max_commit_age
def get_branches(self):
_refs = []
# iterate over branches (refs) and their SHA1
for ref, sha1 in self._repo.get_refs().iteritems():
# ref matches the configured matcher
if re.match(self._ref_matcher, ref):
obj = self._repo.get_object(sha1)
_refs.append([ref, sha1, obj.commit_time])
# filter (ref, SHA1, commit time) tupel for outdated branches
refs = filter(lambda x: self._within_days(x[2], self._max_commit_age), _refs)
# extract ref
refs = set([x[0] for x in refs])
return refs
# Return True if the Unix timestamp is within the timerange now - days
def _within_days(self, timestamp, days):
return datetime.datetime.fromtimestamp(timestamp) >= (datetime.datetime.now() + datetime.timedelta(days=-days))
class GitJenkinsSync(object):
def __init__(self, host, cli_jar, ssh_key, job_tpl, job_name_tpl, repo, ref_matcher, max_commit_age):
self._jenkins = Jenkins(host, cli_jar, ssh_key, job_tpl, job_name_tpl)
self._git = GitBranches(repo, ref_matcher, max_commit_age)
"""Do the actual sync. Query both sides, do diff/intersection and create/remove jobs"""
def sync(self):
git_branches = self._git.get_branches()
job_branches = set(self._jenkins.get_currently_configured_branches())
print "Found these branches in the repository:\n %s" % "\n ".join(git_branches)
print "Found these branches configured in Jenkins:\n %s" % "\n ".join(job_branches)
to_remove = job_branches - git_branches
if len(to_remove) > 0:
print "Remove these:\n %s" % "\n ".join(to_remove)
for ref in to_remove:
self._jenkins.remove_job(ref.replace("refs/remotes/", ""))
else:
print "No branch jobs to remove."
to_create = git_branches - job_branches
if len(to_create) > 0:
print "Create these:\n %s" % "\n ".join(to_create)
for ref in to_create:
self._jenkins.create_job(ref.replace("refs/remotes/", ""))
else:
print "No branch jobs to create."
class CustomParser(argparse.ArgumentParser):
# extend help screen to print more
def print_help(self):
super(CustomParser, self).print_help()
print "example usage:"
print """
Create a job named "Build Project XYZ TEMPLATE" and set "BBBBB" in the Git
config section for the branch name.
%s --host http://localhost:8080/ --key /home/jenkins/.ssh/id_rsa_local \\
--jar /tmp/jenkins_cli.jar --tpl-job "Build Project XYZ TEMPLATE" \\
--job-name-tpl "Build Project XYZ %%s" --git-repo /tmp/sync-checkout \\
--ref-regex "^refs/remotes/origin/((dev|bugfix)/ACME-[0-9]+|int/[0-9]+)" \\
--max-commit-age 14
This will create jobs named like "Build Project XYZ dev-ACME-123-name"
""" % (BINARY_NAME)
# Validating store action for --max-commit-age
class MaxAgeSwitchAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if values > 1000 or values < 1:
raise Exception("Max commit age %d exceeds 1 - 1000" % values)
setattr(namespace, self.dest, values)
# Internal exception
class ArgumentValidationException(Exception):
def __init__(self, msg):
super(ArgumentValidationException, self).__init__(msg)
def _validate_arguments(parsed):
if not os.path.exists(parsed.ssh_key):
raise ArgumentValidationException("SSH Key does not exist: " + parsed.ssh_key)
if not os.path.exists(parsed.jar):
raise ArgumentValidationException("Jenkins CLI .jar does not exist: " + parsed.jar)
if parsed.jobname_tpl.count("%s") != 1:
raise ArgumentValidationException("Expected one \"%s\" placeholder in the job name template.")
if not os.path.exists(parsed.git_repo_path):
raise ArgumentValidationException("Git directory does not exist: " + parsed.git_repo_path)
try:
re.match(parsed.ref_regex, "")
except Exception as e:
raise ArgumentValidationException("Malformed regular expression '" + parsed.ref_regex + "': " + str(e))
def main(args):
# add_help=False,
parser = CustomParser(
prog=BINARY_NAME,
description="Sync Git branches by branch name pattern with corresponding jobs in Jenkins"
)
parser.add_argument( '-V','--version', action='version', version='%(prog)s ' + VERSION)
parser.add_argument(
'-J', '--host', dest="jenkins_host", action='store', metavar="URL", required=True,
help="URL to Jenkins in form <protocol>://<host>[:port][<path>]/"
)
parser.add_argument(
'-S', '--key', dest="ssh_key", action='store', metavar="PATH", required=True,
help="Path to the SSH key used for authentication"
)
parser.add_argument(
'-j', '--jar', dest="jar", action='store', metavar="PATH", required=True,
help="Path to the Jenkins CLI .jar"
)
parser.add_argument(
'-G', '--git-repo', dest="git_repo_path", action='store', metavar="PATH", required=True,
help="Path to the Git repository"
)
parser.add_argument(
'-T', '--tpl-job', dest="tpl_job", action='store', metavar="JOBNAME", required=True,
help="Name of the job used as template"
)
parser.add_argument(
'-n', '--job-name-tpl', dest="jobname_tpl", action='store', metavar="NAME", required=True,
help="Name template for the jobs being created, should contain \"%%s\" as placeholder for the branch name"
)
parser.add_argument(
'-R', '--ref-regex', dest="ref_regex", action='store', metavar="REGEX", required=True,
help="Regular expression matching the branch names to create jobs for"
)
parser.add_argument(
'-a', '--max-commit-age', dest="max_commit_age", action=MaxAgeSwitchAction, type=int, metavar="DAYS", required=False,
help="Max days the last commit was made on a branch. Defaults to %d" % DEFAULT_MAX_COMMIT_AGE
)
parsed = parser.parse_args(args)
_validate_arguments(parsed)
sync = GitJenkinsSync(
parsed.jenkins_host, parsed.jar, parsed.ssh_key,
parsed.tpl_job, parsed.jobname_tpl,
parsed.git_repo_path, parsed.ref_regex, parsed.max_commit_age
)
sync.sync()
if __name__ == "__main__":
try:
main(sys.argv[1:])
except Exception as e:
print "Error occured: %s" % str(e)
|
mit
| 2,865,741,185,627,415,000
| 31.037736
| 132
| 0.688457
| false
| 3.171856
| true
| false
| false
|
agacek/camkes-tool
|
camkes/runner/NameMangling.py
|
1
|
12529
|
#
# Copyright 2014, NICTA
#
# This software may be distributed and modified according to the terms of
# the BSD 2-Clause license. Note that NO WARRANTY is provided.
# See "LICENSE_BSD2.txt" for details.
#
# @TAG(NICTA_BSD)
#
'''This code manages the name mangling (and reversal of such) that needs to
happen in the templates and follow-on logic in the runner. E.g. based on the
name of a component instance, we need to construct a name of the control TCB.
The logic for performing that translation and (if necessary) reversing it later
is encapsulated here so it can more easily be modified.
Callers should only import and use the Perspective class. When instantiating
one of these, generally as much information as is known should be provided to
give Perspective the opportunity to spot internal inconsistencies. See the
comments in the class itself for further information.'''
from camkes.internal.dictutils import get_fields
import re
class Deriver(object):
'''Logic for constructing one symbol from one or more other symbols. This
class itself is never intended to be directly instantiated and is probably
best understood by looking at its inherited children.'''
def inputs(self):
raise NotImplementedError
def output(self):
raise NotImplementedError
def derive(self, perspective):
raise NotImplementedError
class ForwardDeriver(Deriver):
'''Logic for deriving one symbol from several other symbols by way of
concatenation, interspersed with other static text.'''
def __init__(self, format, out):
self.format = format
self.out = out
def inputs(self):
return get_fields(self.format)
def output(self):
return self.out
def derive(self, perspective):
return self.format % perspective
class BackwardDeriver(Deriver):
'''Logic for deriving one symbol from one other symbol by pulling out a
substring of the input.'''
def __init__(self, regex, input, out):
self.regex = re.compile(regex)
self.input = input
self.out = out
def inputs(self):
return set([self.input])
def output(self):
return self.out
def derive(self, perspective):
m = self.regex.match(perspective[self.input])
if m is None:
return None
return m.group(1)
# The remaining derivers are for specific symbols (or qualities) that are not
# strings. These each need slightly inflected logic.
class ControlDeriver(Deriver):
def __init__(self, regex, input):
self.regex = re.compile(regex)
self.input = input
def inputs(self):
return set([self.input])
def output(self):
return 'control'
def derive(self, perspective):
return self.regex.match(perspective[self.input]) is not None
class PoolDeriver(Deriver):
def __init__(self, regex, input):
self.regex = re.compile(regex)
self.input = input
def inputs(self):
return set([self.input])
def output(self):
return 'pool'
def derive(self, perspective):
return self.regex.match(perspective[self.input]) is not None
class PoolIndexDeriver(Deriver):
def __init__(self, regex, input):
self.regex = re.compile(regex)
self.input = input
def inputs(self):
return set([self.input])
def output(self):
return 'pool_index'
def derive(self, perspective):
m = self.regex.match(perspective[self.input])
if m is None:
return None
return int(m.group(1))
class FromControlDeriver(ForwardDeriver):
def derive(self, perspective):
if not perspective.get('control', False):
return None
return self.format % perspective
class DMAFrameIndexDeriver(Deriver):
def __init__(self, regex, input):
self.regex = re.compile(regex)
self.input = input
def inputs(self):
return set([self.input])
def output(self):
return 'dma_frame_index'
def derive(self, perspective):
m = self.regex.match(perspective[self.input])
if m is None:
return None
return int(m.group(1))
# Phases.
RUNNER, TEMPLATES, FILTERS = range(3)
# Instantiate the derivers to describe how name mangling happens in CAmkES. If
# you want to modify the name mangling scheme, this is the place to do it.
DERIVATIONS = {
RUNNER:[
ForwardDeriver('pd_%(group)s_group_bin', 'pd'),
ForwardDeriver('pd_%(elf_name)s', 'pd'),
BackwardDeriver(r'^pd_(.+)$', 'pd', 'elf_name'),
BackwardDeriver(r'^pd_(.+)_group_bin$', 'pd', 'group'),
ForwardDeriver('cnode_%(group)s', 'cnode'),
BackwardDeriver(r'^cnode_(.+)$', 'cnode', 'group'),
], TEMPLATES:[
ForwardDeriver('dma_frame_%(dma_frame_index)s', 'dma_frame_symbol'),
DMAFrameIndexDeriver(r'^dma_frame_([0-9]+)$', 'dma_frame_symbol'),
ForwardDeriver('_camkes_ipc_buffer_%(instance)s_%(interface)s', 'ipc_buffer_symbol'),
FromControlDeriver('_camkes_ipc_buffer_%(instance)s__control', 'ipc_buffer_symbol'),
ControlDeriver(r'^_camkes_ipc_buffer_.+__control$', 'ipc_buffer_symbol'),
ForwardDeriver('_camkes_stack_%(instance)s_%(interface)s', 'stack_symbol'),
FromControlDeriver('_camkes_stack_%(instance)s__control', 'stack_symbol'),
ControlDeriver(r'^_camkes_stack_.+__control$', 'stack_symbol'),
ForwardDeriver('%(dataport)s_data', 'dataport_symbol'),
BackwardDeriver(r'^([^ ]+)_data$', 'dataport_symbol', 'dataport'),
ForwardDeriver('%(to_interface)s_attributes', 'hardware_attribute'),
BackwardDeriver(r'^(.+)_attributes', 'hardware_attribute', 'to_interface'),
ForwardDeriver('%(group)s_group_bin', 'elf_name'),
BackwardDeriver(r'^(.+)_group_bin', 'elf_name', 'group'),
ForwardDeriver('%(instance)s_main', 'entry_symbol'),
BackwardDeriver(r'^(.+)_main$', 'entry_symbol', 'instance'),
ForwardDeriver('%(instance)s_tls_setup', 'tls_symbol'),
BackwardDeriver(r'^(.+)_tls_setup$', 'tls_symbol', 'instance'),
ForwardDeriver('camkes_dma_pool', 'dma_pool_symbol'),
], FILTERS:[
ForwardDeriver('%(instance)s_tcb_%(interface)s', 'tcb'),
FromControlDeriver('%(instance)s_tcb__control', 'tcb'),
BackwardDeriver(r'^(.+)_tcb_.+$', 'tcb', 'instance'),
BackwardDeriver(r'^.+_tcb_([^_].*)$', 'tcb', 'interface'),
ControlDeriver(r'^.+_tcb__control$', 'tcb'),
ForwardDeriver('_camkes_ipc_buffer_%(instance)s_%(interface)s', 'ipc_buffer_symbol'),
FromControlDeriver('_camkes_ipc_buffer_%(instance)s__control', 'ipc_buffer_symbol'),
ControlDeriver(r'^_camkes_ipc_buffer_.+__control$', 'ipc_buffer_symbol'),
ForwardDeriver('_camkes_stack_%(instance)s_%(interface)s', 'stack_symbol'),
FromControlDeriver('_camkes_stack_%(instance)s__control', 'stack_symbol'),
ControlDeriver(r'^_camkes_stack_.+__control$', 'stack_symbol'),
ForwardDeriver('camkes %(instance)s_main', 'entry_symbol'),
BackwardDeriver(r'^camkes (.+)_main$', 'entry_symbol', 'instance'),
ForwardDeriver('camkes %(instance)s_tls_setup', 'tls_symbol'),
BackwardDeriver(r'^camkes (.+)_tls_setup$', 'tls_symbol', 'instance'),
ForwardDeriver('%(group)s_group_bin', 'elf_name'),
BackwardDeriver(r'^(.+)_group_bin', 'elf_name', 'group'),
PoolDeriver(r'.+_tcb_pool_[0-9]+$', 'tcb'),
PoolIndexDeriver(r'.+_tcb_pool_([0-9]+)$', 'tcb'),
ForwardDeriver('pd_%(group)s_group_bin', 'pd'),
ForwardDeriver('pd_%(elf_name)s', 'pd'),
BackwardDeriver(r'^pd_(.+)$', 'pd', 'elf_name'),
BackwardDeriver(r'^pd_(.+)_group_bin$', 'pd', 'group'),
ForwardDeriver('camkes %(instance)s %(dataport)s data', 'dataport_symbol'),
BackwardDeriver(r'^camkes ([^ ]+) [^ ]+ data$', 'dataport_symbol', 'instance'),
BackwardDeriver(r'^camkes [^ ]+ ([^ ]+) data$', 'dataport_symbol', 'dataport'),
ForwardDeriver('%(to_interface)s_attributes', 'hardware_attribute'),
BackwardDeriver(r'^(.+)_attributes', 'hardware_attribute', 'to_interface'),
ForwardDeriver('camkes %(instance)s_dma_pool', 'dma_pool_symbol'),
BackwardDeriver(r'^camkes (.+)_dma_pool$', 'dma_pool_symbol', 'instance'),
ForwardDeriver('%(instance)s_dma_frame_%(dma_frame_index)s', 'dma_frame_symbol'),
BackwardDeriver(r'^(.+)_dma_frame_[0-9]+$', 'dma_frame_symbol', 'instance'),
DMAFrameIndexDeriver(r'^.+_dma_frame_([0-9]+)$', 'dma_frame_symbol'),
ControlDeriver(r'^_control_priority$', 'priority_attribute'),
FromControlDeriver('_control_priority', 'priority_attribute'),
ForwardDeriver('%(interface)s_priority', 'priority_attribute'),
BackwardDeriver(r'^([^_].*)_priority$', 'priority_attribute', 'interface'),
ControlDeriver(r'^_control_domain$', 'domain_attribute'),
FromControlDeriver('_control_domain', 'domain_attribute'),
ForwardDeriver('%(interface)s_domain', 'domain_attribute'),
BackwardDeriver(r'^([^_].*)_domain$', 'domain_attribute', 'interface'),
ForwardDeriver('cnode_%(group)s', 'cnode'),
BackwardDeriver(r'^cnode_(.+)$', 'cnode', 'group'),
],
}
class Perspective(object):
'''A partial state from which to mangle symbols. That may make no sense,
but consider this as a collection of *some* of the symbols we need from
which *all* the symbols we need can be derived. You need to pass some
initial symbols in to the constructor. These may not be sufficient to
derive all other known symbols, but they must be sufficient to derive any
you need. The known symbols can be updated at any point via __setitem__. A
more appropriate name for this class would be 'context', but I didn't want
to cause confusion by introducing yet another 'context' into this code
base.'''
def __init__(self, phase=FILTERS, **kwargs):
self.kwargs = kwargs
self.derivations = DERIVATIONS[phase]
if __debug__:
# When optimisations are not enabled, infer everything possible
# upfront (not lazily). This can catch some internal
# inconsistencies though we will probably end up inferring things
# we don't need.
self._infer()
def _infer(self, limit=None):
'''Infer some or all possible unknown symbols. If the limit argument is
given, inference stops when we know that symbol.'''
prev_keys = set(self.kwargs.keys())
while limit is None or limit not in prev_keys:
for d in self.derivations:
if d.inputs() <= set(self.kwargs.keys()):
# We have enough information to use this derivation.
v = d.derive(self.kwargs)
if v is None:
# We could not derive this value.
continue
k = d.output()
if k in self.kwargs:
# We already knew this symbol. It had better have been
# the same as what we just derived for consistency.
assert self.kwargs[k] == v, \
'perspective is internally inconsistent: %s' % self.kwargs
else:
self.kwargs[k] = v
next_keys = set(self.kwargs.keys())
if prev_keys == next_keys:
# We didn't learn anything new this time around.
break
prev_keys = next_keys
def __setitem__(self, key, value):
assert key not in self.kwargs or self.kwargs[key] == value
# The following assertion is conservative. In the future, it may make
# sense to set some 'core' strings that we cannot infer.
assert key in map(lambda x: x.output(), self.derivations), \
'setting \'%s\' that is not inferrable' % key
self.kwargs[key] = value
if __debug__:
self._infer()
def __getitem__(self, key):
# As for the assertion in __setitem__, this is conservative.
assert key in map(lambda x: x.output(), self.derivations), \
'getting \'%s\' that is not inferrable' % key
if key not in self.kwargs:
self._infer(key)
if key not in self.kwargs:
raise Exception('not enough information to infer attribute, %s' % key)
return self.kwargs[key]
|
bsd-2-clause
| -1,779,961,257,078,954,200
| 45.576208
| 93
| 0.621758
| false
| 3.759076
| false
| false
| false
|
JavaCardOS/pyResMan
|
pyResMan/Dialogs/pyResManCommandDialog_MifareAuthentication.py
|
1
|
3904
|
# -*- coding: utf-8 -*-
'''
Modified on 2017-03-28
@author: javacardos@gmail.com
@organization: https://www.javacardos.com/
@copyright: JavaCardOS Technologies. All rights reserved.
'''
from pyResMan.BaseDialogs.pyResManCommandDialogBase_MifareAuthentication import CommandDialogBase_MifareAuthentication
from pyResMan.Util import IDOK, IDCANCEL
from pyResMan.Util import HexValidator, Util
###########################################################################
## Class CommandDialog_MifareAuthentication
###########################################################################
MODE_IDLE = 0
MODE_PARSING = 1
MODE_BUILDING = 2
class CommandDialog_MifareAuthentication ( CommandDialogBase_MifareAuthentication ):
def __init__( self, parent, bytesCount = 1 ):
CommandDialogBase_MifareAuthentication.__init__ ( self, parent )
self.__mode = MODE_IDLE
self._textctrlCommandValue.SetMaxLength(bytesCount * 2)
# Set validator;
self._textctrlCommandValue.SetValidator(HexValidator())
self._textctrlUID.SetValue('00000000')
for i in range(256):
self._choiceBlockNumber.Append('%d' %(i))
def _buttonOKOnButtonClick(self, event):
self.EndModal(IDOK)
def _buttonCancelOnButtonClick(self, event):
self.EndModal(IDCANCEL)
def getCommandName(self):
return self._statictextCommandName.GetLabelText()
def getCommandValue(self):
return self._textctrlCommandValue.GetValue()
def setCommandName(self, name):
self._statictextCommandName.SetLabelText(name)
self.SetTitle(name)
def setCommandValue(self, value):
self._textctrlCommandValue.SetValue(value)
self.parseCommandValue()
def parseCommandValue(self):
if self.__mode == MODE_IDLE:
self.__mode = MODE_PARSING
commandValue = Util.s2vl(self._textctrlCommandValue.GetValue())
self._choiceMode.SetSelection(0 if commandValue[0] == 0x60 else 1)
self._choiceBlockNumber.SetSelection(commandValue[1])
self._textctrlKey.SetValue(Util.vl2s(commandValue[2 : 8], ''))
if len(commandValue) >= 12:
self._textctrlUID.SetValue(Util.vl2s(commandValue[8 : ], ''))
self.__mode = MODE_IDLE
else:
pass
def buildCommandValue(self):
if self.__mode == MODE_IDLE:
self.__mode = MODE_BUILDING
commandValue = []
# Mode;
commandValue.append(0x60 if (self._choiceMode.GetSelection() == 0) else 0x61)
# Sector number;
commandValue.append(self._choiceBlockNumber.GetSelection())
# Key data;
keyData= [0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF]
try:
keyData = Util.s2vl(self._textctrlKey.GetValue())
except:
pass
for kd in keyData:
commandValue.append(kd)
# UID;
UID = [0x65, 0xE0, 0x5E, 0x1E]
try:
UID = Util.s2vl(self._textctrlUID.GetValue())
except:
pass
for id in UID:
commandValue.append(id)
#
self._textctrlCommandValue.SetValue(Util.vl2s(commandValue, ''))
self.__mode = MODE_IDLE
else:
pass
def _choiceModeOnChoice( self, event ):
self.buildCommandValue()
def _choiceBlockNumberOnChoice(self, event):
self.buildCommandValue()
def _textctrlKeyOnText( self, event ):
self.buildCommandValue()
def _textctrlUIDOnText(self, event):
self.buildCommandValue()
def _textctrlCommandValueOnText( self, event ):
self.parseCommandValue()
|
gpl-2.0
| -8,872,457,769,474,260,000
| 32.367521
| 118
| 0.574795
| false
| 4.162047
| false
| false
| false
|
fofix/fretwork
|
fretwork/midi/constants.py
|
1
|
6316
|
# -*- coding: utf-8 -*-
"""
A collection of constants from the midi spec.
"""
###################################################
## Midi channel events (The most usual events)
## also called "Channel Voice Messages"
NOTE_OFF = 0x80
# 1000cccc 0nnnnnnn 0vvvvvvv (channel, note, velocity)
NOTE_ON = 0x90
# 1001cccc 0nnnnnnn 0vvvvvvv (channel, note, velocity)
AFTERTOUCH = 0xA0
# 1010cccc 0nnnnnnn 0vvvvvvv (channel, note, velocity)
CONTINUOUS_CONTROLLER = 0xB0 # see Channel Mode Messages!!!
# 1011cccc 0ccccccc 0vvvvvvv (channel, controller, value)
PATCH_CHANGE = 0xC0
# 1100cccc 0ppppppp (channel, program)
CHANNEL_PRESSURE = 0xD0
# 1101cccc 0ppppppp (channel, pressure)
PITCH_BEND = 0xE0
# 1110cccc 0vvvvvvv 0wwwwwww (channel, value-lo, value-hi)
###################################################
## Channel Mode Messages (Continuous Controller)
## They share a status byte.
## The controller makes the difference here
# High resolution continuous controllers (MSB)
BANK_SELECT = 0x00
MODULATION_WHEEL = 0x01
BREATH_CONTROLLER = 0x02
FOOT_CONTROLLER = 0x04
PORTAMENTO_TIME = 0x05
DATA_ENTRY = 0x06
CHANNEL_VOLUME = 0x07
BALANCE = 0x08
PAN = 0x0A
EXPRESSION_CONTROLLER = 0x0B
EFFECT_CONTROL_1 = 0x0C
EFFECT_CONTROL_2 = 0x0D
GEN_PURPOSE_CONTROLLER_1 = 0x10
GEN_PURPOSE_CONTROLLER_2 = 0x11
GEN_PURPOSE_CONTROLLER_3 = 0x12
GEN_PURPOSE_CONTROLLER_4 = 0x13
# High resolution continuous controllers (LSB)
BANK_SELECT = 0x20
MODULATION_WHEEL = 0x21
BREATH_CONTROLLER = 0x22
FOOT_CONTROLLER = 0x24
PORTAMENTO_TIME = 0x25
DATA_ENTRY = 0x26
CHANNEL_VOLUME = 0x27
BALANCE = 0x28
PAN = 0x2A
EXPRESSION_CONTROLLER = 0x2B
EFFECT_CONTROL_1 = 0x2C
EFFECT_CONTROL_2 = 0x2D
GENERAL_PURPOSE_CONTROLLER_1 = 0x30
GENERAL_PURPOSE_CONTROLLER_2 = 0x31
GENERAL_PURPOSE_CONTROLLER_3 = 0x32
GENERAL_PURPOSE_CONTROLLER_4 = 0x33
# Switches
SUSTAIN_ONOFF = 0x40
PORTAMENTO_ONOFF = 0x41
SOSTENUTO_ONOFF = 0x42
SOFT_PEDAL_ONOFF = 0x43
LEGATO_ONOFF = 0x44
HOLD_2_ONOFF = 0x45
# Low resolution continuous controllers
SOUND_CONTROLLER_1 = 0x46 # (TG: Sound Variation; FX: Exciter On/Off)
SOUND_CONTROLLER_2 = 0x47 # (TG: Harmonic Content; FX: Compressor On/Off)
SOUND_CONTROLLER_3 = 0x48 # (TG: Release Time; FX: Distortion On/Off)
SOUND_CONTROLLER_4 = 0x49 # (TG: Attack Time; FX: EQ On/Off)
SOUND_CONTROLLER_5 = 0x4A # (TG: Brightness; FX: Expander On/Off)75 SOUND_CONTROLLER_6 (TG: Undefined; FX: Reverb OnOff)
SOUND_CONTROLLER_7 = 0x4C # (TG: Undefined; FX: Delay OnOff)
SOUND_CONTROLLER_8 = 0x4D # (TG: Undefined; FX: Pitch Transpose OnOff)
SOUND_CONTROLLER_9 = 0x4E # (TG: Undefined; FX: Flange/Chorus OnOff)
SOUND_CONTROLLER_10 = 0x4F # (TG: Undefined; FX: Special Effects OnOff)
GENERAL_PURPOSE_CONTROLLER_5 = 0x50
GENERAL_PURPOSE_CONTROLLER_6 = 0x51
GENERAL_PURPOSE_CONTROLLER_7 = 0x52
GENERAL_PURPOSE_CONTROLLER_8 = 0x53
PORTAMENTO_CONTROL = 0x54 # (PTC) (0vvvvvvv is the source Note number) (Detail)
EFFECTS_1 = 0x5B # (Ext. Effects Depth)
EFFECTS_2 = 0x5C # (Tremelo Depth)
EFFECTS_3 = 0x5D # (Chorus Depth)
EFFECTS_4 = 0x5E # (Celeste Depth)
EFFECTS_5 = 0x5F # (Phaser Depth)
DATA_INCREMENT = 0x60 # (0vvvvvvv is n/a; use 0)
DATA_DECREMENT = 0x61 # (0vvvvvvv is n/a; use 0)
NON_REGISTERED_PARAMETER_NUMBER = 0x62 # (LSB)
NON_REGISTERED_PARAMETER_NUMBER = 0x63 # (MSB)
REGISTERED_PARAMETER_NUMBER = 0x64 # (LSB)
REGISTERED_PARAMETER_NUMBER = 0x65 # (MSB)
# Channel Mode messages - (Detail)
ALL_SOUND_OFF = 0x78
RESET_ALL_CONTROLLERS = 0x79
LOCAL_CONTROL_ONOFF = 0x7A
ALL_NOTES_OFF = 0x7B
OMNI_MODE_OFF = 0x7C # (also causes ANO)
OMNI_MODE_ON = 0x7D # (also causes ANO)
MONO_MODE_ON = 0x7E # (Poly Off; also causes ANO)
POLY_MODE_ON = 0x7F # (Mono Off; also causes ANO)
###################################################
## System Common Messages, for all channels
SYSTEM_EXCLUSIVE = 0xF0
# 11110000 0iiiiiii 0ddddddd ... 11110111
MTC = 0xF1 # MIDI Time Code Quarter Frame
# 11110001
SONG_POSITION_POINTER = 0xF2
# 11110010 0vvvvvvv 0wwwwwww (lo-position, hi-position)
SONG_SELECT = 0xF3
# 11110011 0sssssss (songnumber)
#UNDEFINED = 0xF4
## 11110100
#UNDEFINED = 0xF5
## 11110101
TUNING_REQUEST = 0xF6
# 11110110
END_OFF_EXCLUSIVE = 0xF7 # terminator
# 11110111 # End of system exclusive
###################################################
## Midifile meta-events
SEQUENCE_NUMBER = 0x00 # 00 02 ss ss (seq-number)
TEXT = 0x01 # 01 len text...
COPYRIGHT = 0x02 # 02 len text...
SEQUENCE_NAME = 0x03 # 03 len text...
INSTRUMENT_NAME = 0x04 # 04 len text...
LYRIC = 0x05 # 05 len text...
MARKER = 0x06 # 06 len text...
CUEPOINT = 0x07 # 07 len text...
PROGRAM_NAME = 0x08 # 08 len text...
DEVICE_NAME = 0x09 # 09 len text...
MIDI_CH_PREFIX = 0x20 # MIDI channel prefix assignment (unofficial)
MIDI_PORT = 0x21 # 21 01 port, legacy stuff but still used
END_OF_TRACK = 0x2F # 2f 00
TEMPO = 0x51 # 51 03 tt tt tt (tempo in us/quarternote)
SMTP_OFFSET = 0x54 # 54 05 hh mm ss ff xx
TIME_SIGNATURE = 0x58 # 58 04 nn dd cc bb
KEY_SIGNATURE = 0x59 # ??? len text...
SPECIFIC = 0x7F # Sequencer specific event
FILE_HEADER = 'MThd'
TRACK_HEADER = 'MTrk'
###################################################
## System Realtime messages
## I don't supose these are to be found in midi files?!
TIMING_CLOCK = 0xF8
# undefined = 0xF9
SONG_START = 0xFA
SONG_CONTINUE = 0xFB
SONG_STOP = 0xFC
# undefined = 0xFD
ACTIVE_SENSING = 0xFE
SYSTEM_RESET = 0xFF
###################################################
## META EVENT, it is used only in midi files.
## In transmitted data it means system reset!!!
META_EVENT = 0xFF
# 11111111
###################################################
## Helper functions
def is_status(byte):
return (byte & 0x80) == 0x80 # 1000 0000
|
gpl-2.0
| 6,677,587,943,104,313,000
| 29.365385
| 144
| 0.611463
| false
| 2.817128
| false
| false
| false
|
COSMOGRAIL/COSMOULINE
|
pipe/modules/readandreplace_fct.py
|
1
|
2009
|
def justreplace(inputstring, repdict):
template = inputstring
for key, value in repdict.iteritems():
template = template.replace(key, value)
return template
def justread(inputfilename):
import sys
import os
infile = open(inputfilename, 'r')
content = infile.read()
infile.close()
return content
#Try to use readmancat in variousfct, it's better
#def readmancoords(mancatfile): # reads a man cat with format "id x y flux" and comments + blank lines
#
# import sys
# import os
#
# print "WARNING THIS FUNCTION IS DEPRECATED"
#
# myfile = open(mancatfile, "r")
# lines = myfile.readlines()
# myfile.close
# table=[]
# for line in lines:
# if line[0] == '#' or len(line) < 4:
# continue
# elements = line.split()
# if len(elements) != 4:
# print "Wrong format :", mancatfile
# sys.exit()
# starid = elements[0]
# xpos = float(elements[1])
# ypos = float(elements[2])
# flux = float(elements[3])
# table.append([starid, xpos, ypos, flux])
#
# print "I've read", len(table), "stars from", mancatfile
# return table
def readouttxt(outtxtfile, nbimg): # function to read the out.txt written by deconv.exe
import sys
import os
infile = open(outtxtfile, 'r')
content = infile.readlines()
nblines = len(content)
print "Number of lines :", nblines
infile.close()
i = 0
intpostable = []
while i < nblines:
line = content[i]
if line.find("Nombre d")>=0:
nbiter = line.split()[-1]
if nbiter[0] == ":":
nbiter = nbiter[1:]
nbiter = int(nbiter)
print "Number of iterations :", nbiter
if line.find(" - Num")>=0:
table = []
for j in range(i+1, i+1+nbimg):
values = map(float, content[j].split())
table.append(values)
intpostable.append(table)
i = i+nbimg
if line.find("* Valeurs finales de z1, z2, delta1 et delta2 :")>=0:
zdeltatable = []
for j in range(i+1, i+1+nbimg):
values = map(float, content[j].split())
zdeltatable.append(values)
i = i+nbimg
i = i+1
return intpostable, zdeltatable
|
gpl-3.0
| 1,196,499,792,120,505,900
| 21.829545
| 102
| 0.649079
| false
| 2.703903
| false
| false
| false
|
Eksmo/calibre
|
src/calibre/gui2/dialogs/conversion_error_ui.py
|
1
|
2280
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/gugu/w/calibre/src/calibre/gui2/dialogs/conversion_error.ui'
#
# Created: Thu Jul 19 23:32:30 2012
# by: PyQt4 UI code generator 4.9.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_ConversionErrorDialog(object):
def setupUi(self, ConversionErrorDialog):
ConversionErrorDialog.setObjectName(_fromUtf8("ConversionErrorDialog"))
ConversionErrorDialog.resize(658, 515)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(I("lt.png"))), QtGui.QIcon.Normal, QtGui.QIcon.Off)
ConversionErrorDialog.setWindowIcon(icon)
self.gridlayout = QtGui.QGridLayout(ConversionErrorDialog)
self.gridlayout.setObjectName(_fromUtf8("gridlayout"))
self.label = QtGui.QLabel(ConversionErrorDialog)
self.label.setText(_fromUtf8(""))
self.label.setPixmap(QtGui.QPixmap(_fromUtf8(I("dialog_error.png"))))
self.label.setObjectName(_fromUtf8("label"))
self.gridlayout.addWidget(self.label, 0, 0, 1, 1)
self.text = QtGui.QTextBrowser(ConversionErrorDialog)
self.text.setObjectName(_fromUtf8("text"))
self.gridlayout.addWidget(self.text, 0, 1, 2, 1)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridlayout.addItem(spacerItem, 1, 0, 1, 1)
self.buttonBox = QtGui.QDialogButtonBox(ConversionErrorDialog)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.gridlayout.addWidget(self.buttonBox, 2, 1, 1, 1)
self.retranslateUi(ConversionErrorDialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), ConversionErrorDialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), ConversionErrorDialog.reject)
QtCore.QMetaObject.connectSlotsByName(ConversionErrorDialog)
def retranslateUi(self, ConversionErrorDialog):
ConversionErrorDialog.setWindowTitle(_("ERROR"))
|
gpl-3.0
| 6,277,705,584,022,447,000
| 45.530612
| 120
| 0.716228
| false
| 3.68932
| false
| false
| false
|
research-team/NEUCOGAR
|
NEST/cube/integration/excitement/synapses.py
|
1
|
3707
|
from keys import *
from simulation_params import *
import nest
import numpy.random as random
# Neuron parameters
hh_neuronparams = {'E_L': -70., # Resting membrane potential in mV
'V_T': -63., # Voltage offset that controls dynamics.
# -63mV results in a threshold around -50mV.
'C_m': 2., # Capacity of the membrane in pF 1
't_ref': 2., # Duration of refractory period (V_m = V_reset) in ms
'tau_syn_ex': 5., # Time constant of postsynaptic excitatory currents in ms
'tau_syn_in': 10. # Time constant of postsynaptic inhibitory currents in ms
}
# Synapse common parameters
STDP_synapseparams = {
'alpha': random.normal(0.5, 5.0), # Asymmetry parameter (scales depressing increments as alpha*lambda)
'lambda': 0.5 # Step size
}
# Glutamate synapse
STDP_synparams_Glu = dict({'delay': random.uniform(low=1.0, high=1.3), # Distribution of delay values for connections
'weight': w_Glu, # Weight (power) of synapse
'Wmax': 20.}, **STDP_synapseparams) # Maximum allowed weight
# GABA synapse
STDP_synparams_GABA = dict({'delay': random.uniform(low=1.0, high=1.3),
'weight': w_GABA,
'Wmax': -20.}, **STDP_synapseparams)
# Acetylcholine synapse
STDP_synparams_ACh = dict({'delay': random.uniform(low=1.0, high=1.3),
'weight': w_ACh,
'Wmax': 20.}, **STDP_synapseparams)
# Noradrenaline excitatory synapse
NORA_synparams_ex = dict({'delay': 1.,
'weight': w_NA_ex,
'Wmax': 100.})
# Noradrenaline inhibitory synapse
NORA_synparams_in = dict({'delay': 1.,
'weight': w_NA_in,
'Wmax': -100.})
# Dopamine excitatory synapse
DOPA_synparams_ex = dict({'delay': 1.,
'weight': w_DA_ex,
'Wmax': 100.})
# Dopamine inhibitory synapse
DOPA_synparams_in = dict({'delay': 1.,
'weight': w_DA_in,
'Wmax': -100.})
# Serotonin excitatory synapse
SERO_synparams_ex = dict({'delay': 1.,
'weight': w_SERO_ex,
'Wmax': 100.})
# Serotonin inhibitory synapse
SERO_synparams_in = dict({'delay': 1.,
'weight': w_SERO_in,
'Wmax': -100.})
# Dictionary of synapses with keys and their parameters
synapses = {GABA: (gaba_synapse, w_GABA ),
Glu: (glu_synapse, w_Glu ),
ACh: (ach_synapse, w_ACh ),
NA_ex: (nora_synapse_ex, w_NA_ex),
NA_in: (nora_synapse_in, w_NA_in),
DA_ex: (dopa_synapse_ex, w_DA_ex),
DA_in: (dopa_synapse_in, w_DA_in),
SERO_ex: (sero_synapse_ex, w_SERO_ex),
SERO_in: (sero_synapse_in, w_SERO_in),
}
# Parameters for generator
static_syn = {
'weight': w_Glu * 5,
'delay': pg_delay
}
# Device parameters
multimeter_param = {'to_memory': True,
'to_file': False,
'withtime': True,
'interval': 0.1,
'record_from': ['V_m'],
'withgid': True}
detector_param = {'label': 'spikes',
'withtime': True,
'withgid': True,
'to_file': False,
'to_memory': True,
'scientific': True}
|
gpl-2.0
| -8,908,638,224,511,350,000
| 38.021053
| 118
| 0.488535
| false
| 3.394689
| false
| false
| false
|
Weihonghao/ECM
|
Vpy34/lib/python3.5/site-packages/theano/ifelse.py
|
1
|
30209
|
"""
IfElse introduces lazy evaluation in Theano (coupled with the CVM/VM
linkers). It resembles the if clause of any programming language, that
has a `then` and `else` branch, and executes either one or the other
according to the condition provided.
This op differs from the already existent `switch` op, that evaluates both
branches of the clause and afterwards picks (according to the condition)
which value to report. Note also that `switch` is an elemwise operation (so
it picks each entry of a matrix according to the condition) while `ifelse`
is a global operation with a scalar condition.
"""
from __future__ import absolute_import, print_function, division
from copy import deepcopy
from theano.compat import izip
import logging
import numpy
import theano.tensor
from theano.tensor import TensorType
from theano import gof
from theano.gof import Op, Apply
from six import iteritems
from six.moves import xrange
from theano.compile import optdb
from theano.tensor import opt
from theano.scan_module.scan_utils import find_up
from theano.scan_module.scan_utils import clone
__docformat__ = 'restructedtext en'
__authors__ = ("Razvan Pascanu "
"James Bergstra "
"Dumitru Erhan "
"David Warde-Farley")
__copyright__ = "(c) 2010, Universite de Montreal"
__contact__ = "Razvan Pascanu <r.pascanu@gmail>"
_logger = logging.getLogger('theano.ifelse')
class IfElse(Op):
"""
Op that provides conditional graph evaluation if used with the CVM/VM
linkers. Note that there exist a helpful function `ifelse` that should
be used to instantiate the op!
According to a scalar condition `condition` the op evaluates and then
returns all the tensors provided on the `then` branch, otherwise it
evaluates and returns the tensors provided on the `else` branch. The op
supports multiple tensors on each branch, with the condition that the same
number of tensors are on the `then` as on the `else` and there is a one
to one correspondence between them (shape and dtype wise).
The `then` branch is defined as the first N tensors (after the
condition), while the `else` branch is defined as the last N tensors.
Example usage:
``rval = ifelse(condition, rval_if_true1, .., rval_if_trueN,
rval_if_false1, rval_if_false2, .., rval_if_falseN)``
:note:
Other Linkers then CVM and VM are INCOMPATIBLE with this Op, and
will ignore its lazy characteristic, computing both the True and
False branch before picking one.
"""
def __init__(self, n_outs, as_view=False, gpu=False, name=None):
if as_view:
# check destroyhandler and others to ensure that a view_map with
# multiple inputs can work
view_map = {}
for idx in xrange(n_outs):
view_map[idx] = [idx + 1]
self.view_map = view_map
self.as_view = as_view
self.gpu = gpu
self.n_outs = n_outs
self.name = name
def __eq__(self, other):
if not type(self) == type(other):
return False
if not self.as_view == other.as_view:
return False
if not self.gpu == other.gpu:
return False
if not self.n_outs == other.n_outs:
return False
return True
def __hash__(self):
rval = (hash(type(self)) ^
hash(self.as_view) ^
hash(self.gpu) ^
hash(self.n_outs))
return rval
def __str__(self):
args = []
if self.name is not None:
args.append(self.name)
if self.as_view:
args.append('inplace')
if self.gpu:
args.append('gpu')
return 'if{%s}' % ','.join(args)
def infer_shape(self, node, inputs_shapes):
# By construction, corresponding then/else pairs have the same number
# of dimensions
ts_shapes = inputs_shapes[1:][:self.n_outs]
fs_shapes = inputs_shapes[1:][self.n_outs:]
# All elements of all shape tuples for the true and false outputs are
# unpacked into the inputs of a separate ifelse, and then the outputs
# of that ifelse are packed back into shape tuples.
new_ts_inputs = []
for ts_shape in ts_shapes:
if isinstance(ts_shape, (list, tuple)):
new_ts_inputs += list(ts_shape)
else:
# It can be None for generic objects
return [None] * self.n_outs
new_fs_inputs = []
for fs_shape in fs_shapes:
if isinstance(fs_shape, (list, tuple)):
new_fs_inputs += list(fs_shape)
else:
# It can be None for generic objects
return [None] * self.n_outs
assert len(new_ts_inputs) == len(new_fs_inputs)
if len(new_ts_inputs + new_fs_inputs) > 0:
name_tokens = ['shape']
if self.name is not None:
name_tokens.append(self.name)
new_ifelse = IfElse(
n_outs=len(new_ts_inputs),
as_view=False,
gpu=False,
name='_'.join(name_tokens))
new_outs = new_ifelse(node.inputs[0],
*(new_ts_inputs + new_fs_inputs),
**dict(return_list=True))
else:
new_outs = []
# generate pairs of shapes
out_shapes = []
for out in node.outputs:
out_shapes.append(tuple(new_outs[:out.ndim]))
new_outs = new_outs[out.ndim:]
# new_outs should be an empty list after last iteration
assert len(new_outs) == 0
return out_shapes
def make_node(self, c, *args):
assert len(args) == 2 * self.n_outs, (
"Wrong number of arguments to make_node: "
"expected %d, got %d" % (2 * self.n_outs, len(args))
)
c = theano.tensor.as_tensor_variable(c)
if not self.gpu:
# When gpu is true, we are given only cuda ndarrays, and we want
# to keep them be cuda ndarrays
nw_args = []
for x in args:
if hasattr(x, '_as_TensorVariable'):
nw_args.append(x._as_TensorVariable())
elif isinstance(x, theano.Variable):
nw_args.append(x)
else:
nw_args.append(theano.tensor.as_tensor_variable(x))
args = nw_args
ts = args[:self.n_outs]
fs = args[self.n_outs:]
for t, f in izip(ts, fs):
if t.type != f.type:
raise TypeError(('IfElse requires same types for true and '
'false return values'), t, f, t.type, f.type)
if c.ndim > 0:
raise TypeError(('Condition given to the op has to be a scalar '
'with 0 standing for False, anything else '
'for True'))
return Apply(self, [c] + list(args), [t.type() for t in ts])
def R_op(self, inputs, eval_points):
return self(inputs[0], *eval_points[1:], **dict(return_list=True))
def grad(self, ins, grads):
ts = ins[1:][:self.n_outs]
fs = ins[1:][self.n_outs:]
if self.name is not None:
nw_name_t = self.name + '_grad_t'
nw_name_f = self.name + '_grad_f'
else:
nw_name_t = None
nw_name_f = None
if_true_op = IfElse(n_outs=self.n_outs,
as_view=self.as_view,
gpu=self.gpu,
name=nw_name_t)
if_false_op = IfElse(n_outs=self.n_outs,
as_view=self.as_view,
gpu=self.gpu,
name=nw_name_f)
# The grads can have a different dtype then the inputs.
# As inputs true/false pair must have the same dtype,
# we must cast the zeros to the corresponding grad dtype
# and not the input dtype.
if_true = ([ins[0]] +
grads +
[theano.tensor.zeros_like(t, dtype=grads[i].dtype)
for i, t in enumerate(ts)])
if_false = ([ins[0]] +
[theano.tensor.zeros_like(f, dtype=grads[i].dtype)
for i, f in enumerate(fs)] +
grads)
condition = ins[0]
# condition does affect the elements of the output so it is connected.
# For the sake of making the gradient convenient we assume that
# condition + epsilon always triggers the same branch as condition
condition_grad = condition.zeros_like().astype(theano.config.floatX)
return ([condition_grad] +
if_true_op(*if_true, **dict(return_list=True)) +
if_false_op(*if_false, **dict(return_list=True)))
def make_thunk(self, node, storage_map, compute_map, no_recycling, impl=None):
cond = node.inputs[0]
ts = node.inputs[1:][:self.n_outs]
fs = node.inputs[1:][self.n_outs:]
outputs = node.outputs
def thunk():
if not compute_map[cond][0]:
return [0]
else:
truthval = storage_map[cond][0]
if truthval != 0:
ls = [idx + 1 for idx in xrange(self.n_outs)
if not compute_map[ts[idx]][0]]
if len(ls) > 0:
return ls
else:
for out, t in izip(outputs, ts):
compute_map[out][0] = 1
val = storage_map[t][0]
if self.as_view:
storage_map[out][0] = val
# Work around broken numpy deepcopy
elif type(val) in (numpy.ndarray, numpy.memmap):
storage_map[out][0] = val.copy()
else:
storage_map[out][0] = deepcopy(val)
return []
else:
ls = [1 + idx + self.n_outs for idx in xrange(self.n_outs)
if not compute_map[fs[idx]][0]]
if len(ls) > 0:
return ls
else:
for out, f in izip(outputs, fs):
compute_map[out][0] = 1
# can't view both outputs unless destroyhandler
# improves
# Work around broken numpy deepcopy
val = storage_map[f][0]
if type(val) in (numpy.ndarray, numpy.memmap):
storage_map[out][0] = val.copy()
else:
storage_map[out][0] = deepcopy(val)
return []
thunk.lazy = True
thunk.inputs = [storage_map[v] for v in node.inputs]
thunk.outputs = [storage_map[v] for v in node.outputs]
return thunk
def ifelse(condition, then_branch, else_branch, name=None):
"""
This function corresponds to an if statement, returning (and evaluating)
inputs in the ``then_branch`` if ``condition`` evaluates to True or
inputs in the ``else_branch`` if ``condition`` evalutates to False.
:type condition: scalar like
:param condition:
``condition`` should be a tensor scalar representing the condition.
If it evaluates to 0 it corresponds to False, anything else stands
for True.
:type then_branch: list of theano expressions/ theano expression
:param then_branch:
A single theano variable or a list of theano variables that the
function should return as the output if ``condition`` evaluates to
true. The number of variables should match those in the
``else_branch``, and there should be a one to one correspondance
(type wise) with the tensors provided in the else branch
:type else_branch: list of theano expressions/ theano expressions
:param else_branch:
A single theano variable or a list of theano variables that the
function should return as the output if ``condition`` evaluates to
false. The number of variables should match those in the then branch,
and there should be a one to one correspondace (type wise) with the
tensors provided in the then branch.
:return:
A list of theano variables or a single variable (depending on the
nature of the ``then_branch`` and ``else_branch``). More exactly if
``then_branch`` and ``else_branch`` is a tensor, then
the return variable will be just a single variable, otherwise a
list. The value returns correspond either to the values in the
``then_branch`` or in the ``else_branch`` depending on the value of
``cond``.
"""
rval_type = None
if type(then_branch) is list:
rval_type = list
elif type(then_branch) is tuple:
rval_type = tuple
if type(then_branch) not in (list, tuple):
then_branch = [then_branch]
if type(else_branch) not in (list, tuple):
else_branch = [else_branch]
# Some of the elements might be converted into another type,
# we will store them in these new_... lists.
new_then_branch = []
new_else_branch = []
for then_branch_elem, else_branch_elem in izip(then_branch, else_branch):
if not isinstance(then_branch_elem, theano.Variable):
then_branch_elem = theano.tensor.as_tensor_variable(
then_branch_elem)
if not isinstance(else_branch_elem, theano.Variable):
else_branch_elem = theano.tensor.as_tensor_variable(
else_branch_elem)
if then_branch_elem.type != else_branch_elem.type:
# If one of them is a TensorType, and the other one can be
# converted into one, then we try to do that.
# This case happens when one of the elements has a GPU type,
# for instance a shared variable that was silently moved to GPU.
if (isinstance(then_branch_elem.type, TensorType) and not
isinstance(else_branch_elem.type, TensorType)):
else_branch_elem = then_branch_elem.type.filter_variable(
else_branch_elem)
elif (isinstance(else_branch_elem.type, TensorType) and not
isinstance(then_branch_elem.type, TensorType)):
then_branch_elem = else_branch_elem.type.filter_variable(
then_branch_elem)
if then_branch_elem.type != else_branch_elem.type:
# If the types still don't match, there is a problem.
raise TypeError(
'The two branches should have identical types, but '
'they are %s and %s respectively. This error could be '
'raised if for example you provided a one element '
'list on the `then` branch but a tensor on the `else` '
'branch.' %
(then_branch_elem.type, else_branch_elem.type))
new_then_branch.append(then_branch_elem)
new_else_branch.append(else_branch_elem)
if len(then_branch) != len(else_branch):
raise ValueError(('The number of values on the `then` branch'
' should have the same number of variables as '
'the `else` branch : (variables on `then` '
'%d' % len(then_branch) + ', variables on `else` '
'%d' % len(else_branch) + ')'))
new_ifelse = IfElse(n_outs=len(then_branch),
as_view=False,
gpu=False,
name=name)
ins = [condition] + list(new_then_branch) + list(new_else_branch)
rval = new_ifelse(*ins, **dict(return_list=True))
if rval_type is None:
return rval[0]
elif rval_type is list:
return list(rval)
else:
return tuple(rval)
@gof.local_optimizer([IfElse])
def cond_make_inplace(node):
op = node.op
if (isinstance(op, IfElse) and
not op.as_view and
# For big graph, do not make inplace scalar to speed up
# optimization.
(len(node.fgraph.apply_nodes) < 500 or
not all([getattr(o.type, 'ndim', -1) == 0
for o in node.outputs]))):
return IfElse(n_outs=op.n_outs,
as_view=True,
gpu=op.gpu,
name=op.name)(*node.inputs, **dict(return_list=True))
return False
optdb.register('cond_make_inplace', opt.in2out(cond_make_inplace,
ignore_newtrees=True), 95, 'fast_run', 'inplace')
# XXX: Optimizations commented pending further debugging (certain optimizations
# make computation less lazy than it should be currently).
#
# ifelse_equilibrium = gof.EquilibriumDB()
# ifelse_seqopt = gof.SequenceDB()
# ifelse_equilibrium.register('seq_ifelse', ifelse_seqopt, 'fast_run',
# 'ifelse')
''' Comments:
I've wrote this comments to explain how the optimization of ifelse function
(for future developers that need to parse this part of code. Please try to
keep this comments in sync with whatever changes you add to the code.
ifelse optimization are registered before canonicalize !
The optimizations are called in sequence as follows:
* equilibrium shell (runs until no change):
* ifelse_lift
* ifelse_merge_ifs
* ifelse_merge_nodes
* ifelse_remove_identical_inside
* ifelse_sameCondTrue_inside
* ifelse_sameCondFalse_inside
* merge_nodes_1
* ifelse_sameCondTrue
* ifelse_sameCondFalse
* ifelse_removeIdentical
where, each of the optimization do the following things:
`ifelse_lift` (def cond_lift_single_if):
'''
# optdb.register('ifelse_equilibriumOpt', ifelse_equilibrium, .5, 'fast_run',
# 'ifelse')
acceptable_ops = (theano.tensor.basic.Dot,
theano.tensor.basic.Reshape,
theano.tensor.basic.Shape,
theano.tensor.SpecifyShape,
theano.tensor.basic.MaxAndArgmax,
theano.tensor.Subtensor,
theano.tensor.IncSubtensor,
theano.tensor.basic.Rebroadcast,
theano.tensor.basic.Alloc,
theano.tensor.elemwise.Elemwise,
theano.tensor.elemwise.DimShuffle)
@gof.local_optimizer(acceptable_ops)
def ifelse_lift_single_if_through_acceptable_ops(main_node):
"""This optimization lifts up certain ifelse instances.
op(ifelse(c, x, y)) -> ifelse(c, op(x), op(y))
if `op` is in the `acceptable_ops` list, and there is no other if as
input to that specific `op`, and the if has no other clients !?
"""
if not (isinstance(main_node.op, acceptable_ops)):
return False
all_inp_nodes = set()
for inp in main_node.inputs:
all_inp_nodes.add(inp.owner)
ifnodes = [x for x in list(all_inp_nodes)
if x and isinstance(x.op, IfElse)]
# if we have multiple ifs as inputs .. it all becomes quite complicated
# :)
if len(ifnodes) != 1:
return False
node = ifnodes[0]
op = node.op
ts = node.inputs[1:][:op.n_outs]
fs = node.inputs[1:][op.n_outs:]
# outs = main_node.outputs
mop = main_node.op
true_ins = []
false_ins = []
for x in main_node.inputs:
if x in node.outputs:
idx = node.outputs.index(x)
true_ins.append(ts[idx])
false_ins.append(fs[idx])
else:
true_ins.append(x)
false_ins.append(x)
true_eval = mop(*true_ins, **dict(return_list=True))
false_eval = mop(*false_ins, **dict(return_list=True))
# true_eval = clone(outs, replace = dict(zip(node.outputs, ts)))
# false_eval = clone(outs, replace = dict(zip(node.outputs, fs)))
nw_outs = ifelse(node.inputs[0], true_eval, false_eval, return_list=True)
return nw_outs
@gof.local_optimizer([IfElse])
def cond_merge_ifs_true(node):
op = node.op
if not isinstance(op, IfElse):
return False
t_ins = node.inputs[1:][:op.n_outs]
replace = {}
for idx, tval in enumerate(t_ins):
if (tval.owner and isinstance(tval.owner.op, IfElse) and
tval.owner.inputs[0] == node.inputs[0]):
ins_op = tval.owner.op
ins_t = tval.owner.inputs[1:][:ins_op.n_outs]
replace[idx + 1] = ins_t[tval.owner.outputs.index(tval)]
if len(replace) == 0:
return False
old_ins = list(node.inputs)
for pos, var in iteritems(replace):
old_ins[pos] = var
return op(*old_ins, **dict(return_list=True))
@gof.local_optimizer([IfElse])
def cond_merge_ifs_false(node):
op = node.op
if not isinstance(op, IfElse):
return False
f_ins = node.inputs[1:][op.n_outs:]
replace = {}
for idx, fval in enumerate(f_ins):
if (fval.owner and isinstance(fval.owner.op, IfElse) and
fval.owner.inputs[0] == node.inputs[0]):
ins_op = fval.owner.op
ins_t = fval.owner.inputs[1:][ins_op.n_outs:]
replace[idx + 1 + op.n_outs] = \
ins_t[fval.owner.outputs.index(fval)]
if len(replace) == 0:
return False
old_ins = list(node.inputs)
for pos, var in iteritems(replace):
old_ins[pos] = var
return op(*old_ins, **dict(return_list=True))
class CondMerge(gof.Optimizer):
""" Graph Optimizer that merges different cond ops """
def add_requirements(self, fgraph):
fgraph.add_feature(gof.toolbox.ReplaceValidate())
def apply(self, fgraph):
nodelist = list(fgraph.toposort())
cond_nodes = [s for s in nodelist if isinstance(s.op, IfElse)]
if len(cond_nodes) < 2:
return False
merging_node = cond_nodes[0]
for proposal in cond_nodes[1:]:
if (proposal.inputs[0] == merging_node.inputs[0] and
not find_up(proposal, merging_node)):
# Create a list of replacements for proposal
mn_ts = merging_node.inputs[1:][:merging_node.op.n_outs]
mn_fs = merging_node.inputs[1:][merging_node.op.n_outs:]
pl_ts = proposal.inputs[1:][:proposal.op.n_outs]
pl_fs = proposal.inputs[1:][proposal.op.n_outs:]
new_ins = ([merging_node.inputs[0]] +
mn_ts + pl_ts + mn_fs + pl_fs)
mn_name = '?'
if merging_node.op.name:
mn_name = merging_node.op.name
pl_name = '?'
# mn_n_ts = len(mn_ts)
# mn_n_fs = len(mn_fs)
if proposal.op.name:
pl_name = proposal.op.name
new_ifelse = IfElse(
n_outs=len(mn_ts + pl_ts),
as_view=False,
gpu=False,
name=mn_name + '&' + pl_name)
print('here')
new_outs = new_ifelse(*new_ins, **dict(return_list=True))
new_outs = [clone(x) for x in new_outs]
old_outs = []
if type(merging_node.outputs) not in (list, tuple):
old_outs += [merging_node.outputs]
else:
old_outs += merging_node.outputs
if type(proposal.outputs) not in (list, tuple):
old_outs += [proposal.outputs]
else:
old_outs += proposal.outputs
pairs = list(zip(old_outs, new_outs))
fgraph.replace_all_validate(pairs, reason='cond_merge')
@gof.local_optimizer([IfElse])
def cond_remove_identical(node):
op = node.op
if not isinstance(op, IfElse):
return False
ts = node.inputs[1:][:op.n_outs]
fs = node.inputs[1:][op.n_outs:]
# sync outs
out_map = {}
for idx in xrange(len(node.outputs)):
if idx not in out_map:
for jdx in xrange(idx + 1, len(node.outputs)):
if (ts[idx] == ts[jdx] and
fs[idx] == fs[jdx] and
jdx not in out_map):
out_map[jdx] = idx
if len(out_map) == 0:
return False
nw_ts = []
nw_fs = []
inv_map = {}
pos = 0
for idx in xrange(len(node.outputs)):
if idx not in out_map:
inv_map[idx] = pos
pos = pos + 1
nw_ts.append(ts[idx])
nw_fs.append(fs[idx])
new_ifelse = IfElse(n_outs=len(nw_ts),
as_view=op.as_view,
gpu=op.gpu,
name=op.name)
new_ins = [node.inputs[0]] + nw_ts + nw_fs
new_outs = new_ifelse(*new_ins, **dict(return_list=True))
rval = []
for idx in xrange(len(node.outputs)):
if idx in out_map:
rval += [new_outs[inv_map[out_map[idx]]]]
else:
rval += [new_outs[inv_map[idx]]]
return rval
@gof.local_optimizer([IfElse])
def cond_merge_random_op(main_node):
if isinstance(main_node.op, IfElse):
return False
all_inp_nodes = set()
for inp in main_node.inputs:
all_inp_nodes.add(inp.owner)
cond_nodes = [x for x in list(all_inp_nodes)
if x and isinstance(x.op, IfElse)]
if len(cond_nodes) < 2:
return False
merging_node = cond_nodes[0]
for proposal in cond_nodes[1:]:
if (proposal.inputs[0] == merging_node.inputs[0] and
not find_up(proposal, merging_node) and
not find_up(merging_node, proposal)):
# Create a list of replacements for proposal
mn_ts = merging_node.inputs[1:][:merging_node.op.n_outs]
mn_fs = merging_node.inputs[1:][merging_node.op.n_outs:]
pl_ts = proposal.inputs[1:][:proposal.op.n_outs]
pl_fs = proposal.inputs[1:][proposal.op.n_outs:]
new_ins = ([merging_node.inputs[0]] +
mn_ts + pl_ts + mn_fs + pl_fs)
mn_name = '?'
if merging_node.op.name:
mn_name = merging_node.op.name
pl_name = '?'
# mn_n_ts = len(mn_ts)
# mn_n_fs = len(mn_fs)
if proposal.op.name:
pl_name = proposal.op.name
new_ifelse = IfElse(
n_outs=len(mn_ts + pl_ts),
as_view=False,
gpu=False,
name=mn_name + '&' + pl_name)
new_outs = new_ifelse(*new_ins, **dict(return_list=True))
old_outs = []
if type(merging_node.outputs) not in (list, tuple):
old_outs += [merging_node.outputs]
else:
old_outs += merging_node.outputs
if type(proposal.outputs) not in (list, tuple):
old_outs += [proposal.outputs]
else:
old_outs += proposal.outputs
pairs = list(zip(old_outs, new_outs))
main_outs = clone(main_node.outputs, replace=pairs)
return main_outs
# XXX: Optimizations commented pending further debugging (certain optimizations
# make computation less lazy than it should be currently).
#
# pushout_equilibrium = gof.EquilibriumDB()
#
# XXX: This optimization doesn't seem to exist anymore?
# pushout_equilibrium.register("cond_lift_single_if",
# opt.in2out(cond_lift_single_if,
# ignore_newtrees=True),
# 'fast_run', 'ifelse')
#
# pushout_equilibrium.register("cond_merge_random_op",
# opt.in2out(cond_merge_random_op,
# ignore_newtrees=True),
# 'fast_run', 'ifelse')
#
#
# pushout_equilibrium.register("ifelse_merge",
# gof.MergeOptimizer(skip_const_merge=False),
# 'fast_run', 'ifelse')
#
# pushout_equilibrium.register("ifelse_remove_identical_inside",
# opt.in2out(cond_remove_identical,
# ignore_newtrees=True),
# 'fast_run', 'ifelse')
#
# pushout_equilibrium.register('ifelse_sameCondTrue_inside',
# opt.in2out(cond_merge_ifs_true,
# ignore_newtrees=True),
# 'fast_run', 'ifelse')
#
# pushout_equilibrium.register('ifelse_sameCondFalse_inside',
# opt.in2out(cond_merge_ifs_false,
# ignore_newtrees=True),
# 'fast_run', 'ifelse')
#
# ifelse_seqopt.register('ifelse_condPushOut_equilibrium',
# pushout_equilibrium,
# 1, 'fast_run', 'ifelse')
#
# ifelse_seqopt.register('merge_nodes_1',
# gof.MergeOptimizer(skip_const_merge=False),
# 2, 'fast_run', 'ifelse')
#
#
# ifelse_seqopt.register('ifelse_sameCondTrue',
# opt.in2out(cond_merge_ifs_true,
# ignore_newtrees=True),
# 3, 'fast_run', 'ifelse')
#
#
# ifelse_seqopt.register('ifelse_sameCondFalse',
# opt.in2out(cond_merge_ifs_false,
# ignore_newtrees=True),
# 4, 'fast_run', 'ifelse')
#
#
# ifelse_seqopt.register('ifelse_removeIdenetical',
# opt.in2out(cond_remove_identical,
# ignore_newtrees=True),
# 7, 'fast_run', 'ifelse')
|
agpl-3.0
| -1,317,404,698,973,674,000
| 37.581098
| 82
| 0.543646
| false
| 3.826831
| false
| false
| false
|
ReactiveX/RxPY
|
rx/core/operators/exclusive.py
|
1
|
2032
|
from typing import Callable
import rx
from rx.core import Observable
from rx.disposable import CompositeDisposable, SingleAssignmentDisposable
from rx.internal.utils import is_future
def _exclusive() -> Callable[[Observable], Observable]:
"""Performs a exclusive waiting for the first to finish before
subscribing to another observable. Observables that come in between
subscriptions will be dropped on the floor.
Returns:
An exclusive observable with only the results that
happen when subscribed.
"""
def exclusive(source: Observable) -> Observable:
def subscribe(observer, scheduler=None):
has_current = [False]
is_stopped = [False]
m = SingleAssignmentDisposable()
g = CompositeDisposable()
g.add(m)
def on_next(inner_source):
if not has_current[0]:
has_current[0] = True
inner_source = rx.from_future(inner_source) if is_future(inner_source) else inner_source
inner_subscription = SingleAssignmentDisposable()
g.add(inner_subscription)
def on_completed_inner():
g.remove(inner_subscription)
has_current[0] = False
if is_stopped[0] and len(g) == 1:
observer.on_completed()
inner_subscription.disposable = inner_source.subscribe_(
observer.on_next,
observer.on_error,
on_completed_inner,
scheduler
)
def on_completed():
is_stopped[0] = True
if not has_current[0] and len(g) == 1:
observer.on_completed()
m.disposable = source.subscribe_(on_next, observer.on_error, on_completed, scheduler)
return g
return Observable(subscribe)
return exclusive
|
mit
| -6,735,326,453,861,305,000
| 34.034483
| 108
| 0.557579
| false
| 4.747664
| false
| false
| false
|
rmcgibbo/nebterpolator
|
nebterpolator/smoothing.py
|
1
|
6493
|
"""Smoothing a 1d signal
"""
##############################################################################
# Imports
##############################################################################
# library imports
import numpy as np
from scipy.optimize import leastsq
from scipy.signal import lfilter, lfilter_zi, filtfilt, butter
##############################################################################
# Globals
##############################################################################
__all__ = ['polynomial_smooth', 'window_smooth', 'buttersworth_smooth']
##############################################################################
# Functions
##############################################################################
def polynomial_smooth(y, x=None, order=2, end_weight=1):
"""Smooth a dataset by fitting it to a polynomial
Parameters
----------
y : np.ndarray
The signal
x : np.ndarray, optional
The x coordinate of each point. If left unsupplied, we'll
take the x range to be just the ints 0 through len(y)-1
order : int
The order of the polynomial
Returns
-------
smoothed : np.ndarray
The value of the fitted polynomial at each point x
"""
if x is None:
x = np.arange(len(y))
weights = np.r_[end_weight, np.ones(len(x)-2), end_weight]
def func(p):
return (np.polyval(p, x) - y) * weights
# need 1 more for the constant, so that order 2 is quadratic
# (even though it's 3 params)
#popt, pcov = curve_fit(func, x, y, p0=np.ones(order+1), sigma=1.0/weights)
popt, covp, info, msg, ier = leastsq(func, x0=np.zeros(order+1),
full_output=True)
return np.polyval(popt, x)
def window_smooth(signal, window_len=11, window='hanning'):
"""Smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
This code is copied from the scipy cookbook, with sytlistic improvements.
http://www.scipy.org/Cookbook/SignalSmooth
Parameters
----------
signal : np.ndarray, ndim=1
The input signal
window_len: int
The dimension of the smoothing window; should be an odd integer
window: {'flat', 'hanning', 'hamming', 'bartlett', 'blackman'}
Which type of window to use? Flat will produce a moving average
smoothin
Returns
-------
output : np.ndarray, ndim=1
The smoothed signal
"""
if signal.ndim != 1:
raise TypeError('I only smooth 1d arrays')
if signal.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len % 2 != 1:
raise ValueError('window_len must be an odd integer')
if window_len < 3:
return signal
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError("Window is on of 'flat', 'hanning', 'hamming', "
"'bartlett', 'blackman'")
# this does a mirroring padding
padded = np.r_[2*signal[0] - signal[window_len-1: 0: -1],
signal,
2*signal[-1] - signal[-2: -window_len-1: -1]]
if window == 'flat':
w = np.ones(window_len, 'd')
else:
w = getattr(np, window)(window_len)
output = np.convolve(w / w.sum(), padded, mode='valid')
return output[(window_len/2):-(window_len/2)]
def buttersworth_smooth(signal, width=11, order=3):
"""Smooth the data using zero-delay buttersworth filter
This code is copied from the scipy cookbook, with sytlistic improvements.
http://www.scipy.org/Cookbook/FiltFilt
Parameters
----------
signal : np.ndarray, ndim=1
The input signal
width : float
This acts very similar to the window_len in the window smoother. In
the implementation, the frequency of the low-pass filter is taken to
be two over this width, so it's like "half the period" of the sinusiod
where the filter starts to kick in.
order : int, optional
The order of the filter. A small odd number is recommended. Higher
order filters cutoff more quickly, but have worse numerical
properties.
Returns
-------
output : np.ndarray, ndim=1
The smoothed signal
"""
if width < 2.0:
return signal
# first pad the signal on the ends
pad = int(np.ceil((width + 1)/2)*2 - 1) # nearest odd integer
padded = np.r_[signal[pad - 1: 0: -1], signal, signal[-1: -pad: -1]]
#padded = np.r_[[signal[0]]*pad, signal, [signal[-1]]*pad]
b, a = butter(order, 2.0 / width)
# Apply the filter to the width. Use lfilter_zi to choose the
# initial condition of the filter.
zi = lfilter_zi(b, a)
z, _ = lfilter(b, a, padded, zi=zi*padded[0])
# Apply the filter again, to have a result filtered at an order
# the same as filtfilt.
z2, _ = lfilter(b, a, z, zi=zi*z[0])
# Use filtfilt to apply the filter.
output = filtfilt(b, a, padded)
return output[(pad-1): -(pad-1)]
def angular_smooth(signal, smoothing_func=buttersworth_smooth, **kwargs):
"""Smooth an signal which represents an angle by filtering its
sine and cosine components separately.
Parameters
----------
signal : np.ndarray, ndim=1
The input signal
smoothing_func : callable
A function that takes the signal as its first argument and smoothes
it.
All other parameters (**kwargs) will be passed through to smoothing_func.
Returns
-------
smoothed_signal : bp.ndarray, ndim=1
The smoothed version of the function.
"""
sin = smoothing_func(np.sin(signal), **kwargs)
cos = smoothing_func(np.cos(signal), **kwargs)
return np.arctan2(sin, cos)
def main():
"test code"
import matplotlib.pyplot as pp
N = 1000
sigma = 0.25
x = np.cumsum(sigma * np.random.randn(N))
y = np.cumsum(sigma * np.random.randn(N))
signal = np.arctan2(x, y)
pp.plot(signal)
pp.plot(np.arctan2(filtfit_smooth(np.sin(signal), width=21),
filtfit_smooth(np.cos(signal), width=21)))
pp.show()
if __name__ == '__main__':
main()
|
gpl-3.0
| 7,068,469,299,697,415,000
| 31.465
| 79
| 0.574003
| false
| 3.869487
| false
| false
| false
|
DYFeng/pyaiml
|
aiml/Kernel.py
|
1
|
46819
|
# -*- coding: latin-1 -*-
"""This file contains the public interface to the aiml module."""
import AimlParser
import DefaultSubs
import Utils
from PatternMgr import PatternMgr
from WordSub import WordSub
from ConfigParser import ConfigParser
import copy
import glob
import os
import random
import re
import string
import sys
import time
import threading
import xml.sax
class Kernel:
# module constants
_globalSessionID = "_global" # key of the global session (duh)
_maxHistorySize = 10 # maximum length of the _inputs and _responses lists
_maxRecursionDepth = 100 # maximum number of recursive <srai>/<sr> tags before the response is aborted.
# special predicate keys
_inputHistory = "_inputHistory" # keys to a queue (list) of recent user input
_outputHistory = "_outputHistory" # keys to a queue (list) of recent responses.
_inputStack = "_inputStack" # Should always be empty in between calls to respond()
def __init__(self):
self._verboseMode = True
self._version = "PyAIML 0.8.6"
self._brain = PatternMgr()
self._respondLock = threading.RLock()
self._textEncoding = "utf-8"
# set up the sessions
self._sessions = {}
self._addSession(self._globalSessionID)
# Set up the bot predicates
self._botPredicates = {}
self.setBotPredicate("name", "Nameless")
# set up the word substitutors (subbers):
self._subbers = {}
self._subbers['gender'] = WordSub(DefaultSubs.defaultGender)
self._subbers['person'] = WordSub(DefaultSubs.defaultPerson)
self._subbers['person2'] = WordSub(DefaultSubs.defaultPerson2)
self._subbers['normal'] = WordSub(DefaultSubs.defaultNormal)
# set up the element processors
self._elementProcessors = {
"bot": self._processBot,
"condition": self._processCondition,
"date": self._processDate,
"formal": self._processFormal,
"gender": self._processGender,
"get": self._processGet,
"gossip": self._processGossip,
"id": self._processId,
"input": self._processInput,
"javascript": self._processJavascript,
"learn": self._processLearn,
"li": self._processLi,
"lowercase": self._processLowercase,
"person": self._processPerson,
"person2": self._processPerson2,
"random": self._processRandom,
"text": self._processText,
"sentence": self._processSentence,
"set": self._processSet,
"size": self._processSize,
"sr": self._processSr,
"srai": self._processSrai,
"star": self._processStar,
"system": self._processSystem,
"template": self._processTemplate,
"that": self._processThat,
"thatstar": self._processThatstar,
"think": self._processThink,
"topicstar": self._processTopicstar,
"uppercase": self._processUppercase,
"version": self._processVersion,
}
def bootstrap(self, brainFile = None, learnFiles = [], commands = []):
"""Prepare a Kernel object for use.
If a brainFile argument is provided, the Kernel attempts to
load the brain at the specified filename.
If learnFiles is provided, the Kernel attempts to load the
specified AIML files.
Finally, each of the input strings in the commands list is
passed to respond().
"""
start = time.clock()
if brainFile:
self.loadBrain(brainFile)
# learnFiles might be a string, in which case it should be
# turned into a single-element list.
learns = learnFiles
try: learns = [ learnFiles + "" ]
except: pass
for file in learns:
self.learn(file)
# ditto for commands
cmds = commands
try: cmds = [ commands + "" ]
except: pass
for cmd in cmds:
print self._respond(cmd, self._globalSessionID)
if self._verboseMode:
print "Kernel bootstrap completed in %.2f seconds" % (time.clock() - start)
def verbose(self, isVerbose = True):
"""Enable/disable verbose output mode."""
self._verboseMode = isVerbose
def version(self):
"""Return the Kernel's version string."""
return self._version
def numCategories(self):
"""Return the number of categories the Kernel has learned."""
# there's a one-to-one mapping between templates and categories
return self._brain.numTemplates()
def resetBrain(self):
"""Reset the brain to its initial state.
This is essentially equivilant to:
del(kern)
kern = aiml.Kernel()
"""
del(self._brain)
self.__init__()
def loadBrain(self, filename):
"""Attempt to load a previously-saved 'brain' from the
specified filename.
NOTE: the current contents of the 'brain' will be discarded!
"""
if self._verboseMode: print "Loading brain from %s..." % filename,
start = time.clock()
self._brain.restore(filename)
if self._verboseMode:
end = time.clock() - start
print "done (%d categories in %.2f seconds)" % (self._brain.numTemplates(), end)
def saveBrain(self, filename):
"""Dump the contents of the bot's brain to a file on disk."""
if self._verboseMode: print "Saving brain to %s..." % filename,
start = time.clock()
self._brain.save(filename)
if self._verboseMode:
print "done (%.2f seconds)" % (time.clock() - start)
def getPredicate(self, name, sessionID = _globalSessionID):
"""Retrieve the current value of the predicate 'name' from the
specified session.
If name is not a valid predicate in the session, the empty
string is returned.
"""
try: return self._sessions[sessionID][name]
except KeyError: return ""
def setPredicate(self, name, value, sessionID = _globalSessionID):
"""Set the value of the predicate 'name' in the specified
session.
If sessionID is not a valid session, it will be created. If
name is not a valid predicate in the session, it will be
created.
"""
self._addSession(sessionID) # add the session, if it doesn't already exist.
self._sessions[sessionID][name] = value
def getBotPredicate(self, name):
"""Retrieve the value of the specified bot predicate.
If name is not a valid bot predicate, the empty string is returned.
"""
try: return self._botPredicates[name]
except KeyError: return ""
def setBotPredicate(self, name, value):
"""Set the value of the specified bot predicate.
If name is not a valid bot predicate, it will be created.
"""
self._botPredicates[name] = value
# Clumsy hack: if updating the bot name, we must update the
# name in the brain as well
if name == "name":
self._brain.setBotName(self.getBotPredicate("name"))
def setTextEncoding(self, encoding):
"""Set the text encoding used when loading AIML files (Latin-1, UTF-8, etc.)."""
self._textEncoding = encoding
def loadSubs(self, filename):
"""Load a substitutions file.
The file must be in the Windows-style INI format (see the
standard ConfigParser module docs for information on this
format). Each section of the file is loaded into its own
substituter.
"""
inFile = file(filename)
parser = ConfigParser()
parser.readfp(inFile, filename)
inFile.close()
for s in parser.sections():
# Add a new WordSub instance for this section. If one already
# exists, delete it.
if self._subbers.has_key(s):
del(self._subbers[s])
self._subbers[s] = WordSub()
# iterate over the key,value pairs and add them to the subber
for k,v in parser.items(s):
self._subbers[s][k] = v
def _addSession(self, sessionID):
"""Create a new session with the specified ID string."""
if self._sessions.has_key(sessionID):
return
# Create the session.
self._sessions[sessionID] = {
# Initialize the special reserved predicates
self._inputHistory: [],
self._outputHistory: [],
self._inputStack: []
}
def _deleteSession(self, sessionID):
"""Delete the specified session."""
if self._sessions.has_key(sessionID):
self._sessions.pop(sessionID)
def getSessionData(self, sessionID = None):
"""Return a copy of the session data dictionary for the
specified session.
If no sessionID is specified, return a dictionary containing
*all* of the individual session dictionaries.
"""
s = None
if sessionID is not None:
try: s = self._sessions[sessionID]
except KeyError: s = {}
else:
s = self._sessions
return copy.deepcopy(s)
def learn(self, filename):
"""Load and learn the contents of the specified AIML file.
If filename includes wildcard characters, all matching files
will be loaded and learned.
"""
for f in glob.glob(filename):
if self._verboseMode: print "Loading %s..." % f,
start = time.clock()
# Load and parse the AIML file.
parser = AimlParser.create_parser()
handler = parser.getContentHandler()
handler.setEncoding(self._textEncoding)
try: parser.parse(f)
except xml.sax.SAXParseException, msg:
err = "\nFATAL PARSE ERROR in file %s:\n%s\n" % (f,msg)
sys.stderr.write(err)
continue
# store the pattern/template pairs in the PatternMgr.
for key,tem in handler.categories.items():
self._brain.add(key,tem)
# Parsing was successful.
if self._verboseMode:
print "done (%.2f seconds)" % (time.clock() - start)
def respond(self, input, sessionID = _globalSessionID):
"""Return the Kernel's response to the input string."""
if len(input) == 0:
return ""
#ensure that input is a unicode string
try: input = input.decode(self._textEncoding, 'replace')
except UnicodeError: pass
except AttributeError: pass
# prevent other threads from stomping all over us.
self._respondLock.acquire()
# Add the session, if it doesn't already exist
self._addSession(sessionID)
# split the input into discrete sentences
sentences = Utils.sentences(input)
finalResponse = ""
for s in sentences:
# Add the input to the history list before fetching the
# response, so that <input/> tags work properly.
inputHistory = self.getPredicate(self._inputHistory, sessionID)
inputHistory.append(s)
while len(inputHistory) > self._maxHistorySize:
inputHistory.pop(0)
self.setPredicate(self._inputHistory, inputHistory, sessionID)
# Fetch the response
response = self._respond(s, sessionID)
# add the data from this exchange to the history lists
outputHistory = self.getPredicate(self._outputHistory, sessionID)
outputHistory.append(response)
while len(outputHistory) > self._maxHistorySize:
outputHistory.pop(0)
self.setPredicate(self._outputHistory, outputHistory, sessionID)
# append this response to the final response.
finalResponse += (response + " ")
finalResponse = finalResponse.strip()
assert(len(self.getPredicate(self._inputStack, sessionID)) == 0)
# release the lock and return
self._respondLock.release()
try: return finalResponse.encode(self._textEncoding)
except UnicodeError: return finalResponse
# This version of _respond() just fetches the response for some input.
# It does not mess with the input and output histories. Recursive calls
# to respond() spawned from tags like <srai> should call this function
# instead of respond().
def _respond(self, input, sessionID):
"""Private version of respond(), does the real work."""
if len(input) == 0:
return ""
# guard against infinite recursion
inputStack = self.getPredicate(self._inputStack, sessionID)
if len(inputStack) > self._maxRecursionDepth:
if self._verboseMode:
err = "WARNING: maximum recursion depth exceeded (input='%s')" % input.encode(self._textEncoding, 'replace')
sys.stderr.write(err)
return ""
# push the input onto the input stack
inputStack = self.getPredicate(self._inputStack, sessionID)
inputStack.append(input)
self.setPredicate(self._inputStack, inputStack, sessionID)
# run the input through the 'normal' subber
subbedInput = self._subbers['normal'].sub(input)
# fetch the bot's previous response, to pass to the match()
# function as 'that'.
outputHistory = self.getPredicate(self._outputHistory, sessionID)
try: that = outputHistory[-1]
except IndexError: that = ""
subbedThat = self._subbers['normal'].sub(that)
# fetch the current topic
topic = self.getPredicate("topic", sessionID)
subbedTopic = self._subbers['normal'].sub(topic)
# Determine the final response.
response = ""
elem = self._brain.match(subbedInput, subbedThat, subbedTopic)
if elem is None:
if self._verboseMode:
err = "WARNING: No match found for input: %s\n" % input.encode(self._textEncoding)
sys.stderr.write(err)
else:
# Process the element into a response string.
response += self._processElement(elem, sessionID).strip()
response += " "
response = response.strip()
# pop the top entry off the input stack.
inputStack = self.getPredicate(self._inputStack, sessionID)
inputStack.pop()
self.setPredicate(self._inputStack, inputStack, sessionID)
return response
def _processElement(self,elem, sessionID):
"""Process an AIML element.
The first item of the elem list is the name of the element's
XML tag. The second item is a dictionary containing any
attributes passed to that tag, and their values. Any further
items in the list are the elements enclosed by the current
element's begin and end tags; they are handled by each
element's handler function.
"""
try:
handlerFunc = self._elementProcessors[elem[0]]
except:
# Oops -- there's no handler function for this element
# type!
if self._verboseMode:
err = "WARNING: No handler found for <%s> element\n" % elem[0].encode(self._textEncoding, 'replace')
sys.stderr.write(err)
return ""
return handlerFunc(elem, sessionID)
######################################################
### Individual element-processing functions follow ###
######################################################
# <bot>
def _processBot(self, elem, sessionID):
"""Process a <bot> AIML element.
Required element attributes:
name: The name of the bot predicate to retrieve.
<bot> elements are used to fetch the value of global,
read-only "bot predicates." These predicates cannot be set
from within AIML; you must use the setBotPredicate() function.
"""
attrName = elem[1]['name']
return self.getBotPredicate(attrName)
# <condition>
def _processCondition(self, elem, sessionID):
"""Process a <condition> AIML element.
Optional element attributes:
name: The name of a predicate to test.
value: The value to test the predicate for.
<condition> elements come in three flavors. Each has different
attributes, and each handles their contents differently.
The simplest case is when the <condition> tag has both a 'name'
and a 'value' attribute. In this case, if the predicate
'name' has the value 'value', then the contents of the element
are processed and returned.
If the <condition> element has only a 'name' attribute, then
its contents are a series of <li> elements, each of which has
a 'value' attribute. The list is scanned from top to bottom
until a match is found. Optionally, the last <li> element can
have no 'value' attribute, in which case it is processed and
returned if no other match is found.
If the <condition> element has neither a 'name' nor a 'value'
attribute, then it behaves almost exactly like the previous
case, except that each <li> subelement (except the optional
last entry) must now include both 'name' and 'value'
attributes.
"""
attr = None
response = ""
attr = elem[1]
# Case #1: test the value of a specific predicate for a
# specific value.
if attr.has_key('name') and attr.has_key('value'):
val = self.getPredicate(attr['name'], sessionID)
if val == attr['value']:
for e in elem[2:]:
response += self._processElement(e,sessionID)
return response
else:
# Case #2 and #3: Cycle through <li> contents, testing a
# name and value pair for each one.
try:
name = None
if attr.has_key('name'):
name = attr['name']
# Get the list of <li> elemnents
listitems = []
for e in elem[2:]:
if e[0] == 'li':
listitems.append(e)
# if listitems is empty, return the empty string
if len(listitems) == 0:
return ""
# iterate through the list looking for a condition that
# matches.
foundMatch = False
for li in listitems:
try:
liAttr = li[1]
# if this is the last list item, it's allowed
# to have no attributes. We just skip it for now.
if len(liAttr.keys()) == 0 and li == listitems[-1]:
continue
# get the name of the predicate to test
liName = name
if liName == None:
liName = liAttr['name']
# get the value to check against
liValue = liAttr['value']
# do the test
if self.getPredicate(liName, sessionID) == liValue:
foundMatch = True
response += self._processElement(li,sessionID)
break
except:
# No attributes, no name/value attributes, no
# such predicate/session, or processing error.
if self._verboseMode: print "Something amiss -- skipping listitem", li
raise
if not foundMatch:
# Check the last element of listitems. If it has
# no 'name' or 'value' attribute, process it.
try:
li = listitems[-1]
liAttr = li[1]
if not (liAttr.has_key('name') or liAttr.has_key('value')):
response += self._processElement(li, sessionID)
except:
# listitems was empty, no attributes, missing
# name/value attributes, or processing error.
if self._verboseMode: print "error in default listitem"
raise
except:
# Some other catastrophic cataclysm
if self._verboseMode: print "catastrophic condition failure"
raise
return response
# <date>
def _processDate(self, elem, sessionID):
"""Process a <date> AIML element.
<date> elements resolve to the current date and time. The
AIML specification doesn't require any particular format for
this information, so I go with whatever's simplest.
"""
return time.asctime()
# <formal>
def _processFormal(self, elem, sessionID):
"""Process a <formal> AIML element.
<formal> elements process their contents recursively, and then
capitalize the first letter of each word of the result.
"""
response = ""
for e in elem[2:]:
response += self._processElement(e, sessionID)
return string.capwords(response)
# <gender>
def _processGender(self,elem, sessionID):
"""Process a <gender> AIML element.
<gender> elements process their contents, and then swap the
gender of any third-person singular pronouns in the result.
This subsitution is handled by the aiml.WordSub module.
"""
response = ""
for e in elem[2:]:
response += self._processElement(e, sessionID)
return self._subbers['gender'].sub(response)
# <get>
def _processGet(self, elem, sessionID):
"""Process a <get> AIML element.
Required element attributes:
name: The name of the predicate whose value should be
retrieved from the specified session and returned. If the
predicate doesn't exist, the empty string is returned.
<get> elements return the value of a predicate from the
specified session.
"""
return self.getPredicate(elem[1]['name'], sessionID)
# <gossip>
def _processGossip(self, elem, sessionID):
"""Process a <gossip> AIML element.
<gossip> elements are used to capture and store user input in
an implementation-defined manner, theoretically allowing the
bot to learn from the people it chats with. I haven't
descided how to define my implementation, so right now
<gossip> behaves identically to <think>.
"""
return self._processThink(elem, sessionID)
# <id>
def _processId(self, elem, sessionID):
""" Process an <id> AIML element.
<id> elements return a unique "user id" for a specific
conversation. In PyAIML, the user id is the name of the
current session.
"""
return sessionID
# <input>
def _processInput(self, elem, sessionID):
"""Process an <input> AIML element.
Optional attribute elements:
index: The index of the element from the history list to
return. 1 means the most recent item, 2 means the one
before that, and so on.
<input> elements return an entry from the input history for
the current session.
"""
inputHistory = self.getPredicate(self._inputHistory, sessionID)
try: index = int(elem[1]['index'])
except: index = 1
try: return inputHistory[-index]
except IndexError:
if self._verboseMode:
err = "No such index %d while processing <input> element.\n" % index
sys.stderr.write(err)
return ""
# <javascript>
def _processJavascript(self, elem, sessionID):
"""Process a <javascript> AIML element.
<javascript> elements process their contents recursively, and
then run the results through a server-side Javascript
interpreter to compute the final response. Implementations
are not required to provide an actual Javascript interpreter,
and right now PyAIML doesn't; <javascript> elements are behave
exactly like <think> elements.
"""
return self._processThink(elem, sessionID)
# <learn>
def _processLearn(self, elem, sessionID):
"""Process a <learn> AIML element.
<learn> elements process their contents recursively, and then
treat the result as an AIML file to open and learn.
"""
filename = ""
for e in elem[2:]:
filename += self._processElement(e, sessionID)
self.learn(filename)
return ""
# <li>
def _processLi(self,elem, sessionID):
"""Process an <li> AIML element.
Optional attribute elements:
name: the name of a predicate to query.
value: the value to check that predicate for.
<li> elements process their contents recursively and return
the results. They can only appear inside <condition> and
<random> elements. See _processCondition() and
_processRandom() for details of their usage.
"""
response = ""
for e in elem[2:]:
response += self._processElement(e, sessionID)
return response
# <lowercase>
def _processLowercase(self,elem, sessionID):
"""Process a <lowercase> AIML element.
<lowercase> elements process their contents recursively, and
then convert the results to all-lowercase.
"""
response = ""
for e in elem[2:]:
response += self._processElement(e, sessionID)
return string.lower(response)
# <person>
def _processPerson(self,elem, sessionID):
"""Process a <person> AIML element.
<person> elements process their contents recursively, and then
convert all pronouns in the results from 1st person to 2nd
person, and vice versa. This subsitution is handled by the
aiml.WordSub module.
If the <person> tag is used atomically (e.g. <person/>), it is
a shortcut for <person><star/></person>.
"""
response = ""
for e in elem[2:]:
response += self._processElement(e, sessionID)
if len(elem[2:]) == 0: # atomic <person/> = <person><star/></person>
response = self._processElement(['star',{}], sessionID)
return self._subbers['person'].sub(response)
# <person2>
def _processPerson2(self,elem, sessionID):
"""Process a <person2> AIML element.
<person2> elements process their contents recursively, and then
convert all pronouns in the results from 1st person to 3rd
person, and vice versa. This subsitution is handled by the
aiml.WordSub module.
If the <person2> tag is used atomically (e.g. <person2/>), it is
a shortcut for <person2><star/></person2>.
"""
response = ""
for e in elem[2:]:
response += self._processElement(e, sessionID)
if len(elem[2:]) == 0: # atomic <person2/> = <person2><star/></person2>
response = self._processElement(['star',{}], sessionID)
return self._subbers['person2'].sub(response)
# <random>
def _processRandom(self, elem, sessionID):
"""Process a <random> AIML element.
<random> elements contain zero or more <li> elements. If
none, the empty string is returned. If one or more <li>
elements are present, one of them is selected randomly to be
processed recursively and have its results returned. Only the
chosen <li> element's contents are processed. Any non-<li> contents are
ignored.
"""
listitems = []
for e in elem[2:]:
if e[0] == 'li':
listitems.append(e)
if len(listitems) == 0:
return ""
# select and process a random listitem.
random.shuffle(listitems)
return self._processElement(listitems[0], sessionID)
# <sentence>
def _processSentence(self,elem, sessionID):
"""Process a <sentence> AIML element.
<sentence> elements process their contents recursively, and
then capitalize the first letter of the results.
"""
response = ""
for e in elem[2:]:
response += self._processElement(e, sessionID)
try:
response = response.strip()
words = string.split(response, " ", 1)
words[0] = string.capitalize(words[0])
response = string.join(words)
return response
except IndexError: # response was empty
return ""
# <set>
def _processSet(self, elem, sessionID):
"""Process a <set> AIML element.
Required element attributes:
name: The name of the predicate to set.
<set> elements process their contents recursively, and assign the results to a predicate
(given by their 'name' attribute) in the current session. The contents of the element
are also returned.
"""
value = ""
for e in elem[2:]:
value += self._processElement(e, sessionID)
self.setPredicate(elem[1]['name'], value, sessionID)
return value
# <size>
def _processSize(self,elem, sessionID):
"""Process a <size> AIML element.
<size> elements return the number of AIML categories currently
in the bot's brain.
"""
return str(self.numCategories())
# <sr>
def _processSr(self,elem,sessionID):
"""Process an <sr> AIML element.
<sr> elements are shortcuts for <srai><star/></srai>.
"""
star = self._processElement(['star',{}], sessionID)
response = self._respond(star, sessionID)
return response
# <srai>
def _processSrai(self,elem, sessionID):
"""Process a <srai> AIML element.
<srai> elements recursively process their contents, and then
pass the results right back into the AIML interpreter as a new
piece of input. The results of this new input string are
returned.
"""
newInput = ""
for e in elem[2:]:
newInput += self._processElement(e, sessionID)
return self._respond(newInput, sessionID)
# <star>
def _processStar(self, elem, sessionID):
"""Process a <star> AIML element.
Optional attribute elements:
index: Which "*" character in the current pattern should
be matched?
<star> elements return the text fragment matched by the "*"
character in the current input pattern. For example, if the
input "Hello Tom Smith, how are you?" matched the pattern
"HELLO * HOW ARE YOU", then a <star> element in the template
would evaluate to "Tom Smith".
"""
try: index = int(elem[1]['index'])
except KeyError: index = 1
# fetch the user's last input
inputStack = self.getPredicate(self._inputStack, sessionID)
input = self._subbers['normal'].sub(inputStack[-1])
# fetch the Kernel's last response (for 'that' context)
outputHistory = self.getPredicate(self._outputHistory, sessionID)
try: that = self._subbers['normal'].sub(outputHistory[-1])
except: that = "" # there might not be any output yet
topic = self.getPredicate("topic", sessionID)
response = self._brain.star("star", input, that, topic, index)
return response
# <system>
def _processSystem(self,elem, sessionID):
"""Process a <system> AIML element.
<system> elements process their contents recursively, and then
attempt to execute the results as a shell command on the
server. The AIML interpreter blocks until the command is
complete, and then returns the command's output.
For cross-platform compatibility, any file paths inside
<system> tags should use Unix-style forward slashes ("/") as a
directory separator.
"""
# build up the command string
command = ""
for e in elem[2:]:
command += self._processElement(e, sessionID)
# normalize the path to the command. Under Windows, this
# switches forward-slashes to back-slashes; all system
# elements should use unix-style paths for cross-platform
# compatibility.
#executable,args = command.split(" ", 1)
#executable = os.path.normpath(executable)
#command = executable + " " + args
command = os.path.normpath(command)
# execute the command.
response = ""
try:
out = os.popen(command)
except RuntimeError, msg:
if self._verboseMode:
err = "WARNING: RuntimeError while processing \"system\" element:\n%s\n" % msg.encode(self._textEncoding, 'replace')
sys.stderr.write(err)
return "There was an error while computing my response. Please inform my botmaster."
time.sleep(0.01) # I'm told this works around a potential IOError exception.
for line in out:
response += line + "\n"
response = string.join(response.splitlines()).strip()
return response
# <template>
def _processTemplate(self,elem, sessionID):
"""Process a <template> AIML element.
<template> elements recursively process their contents, and
return the results. <template> is the root node of any AIML
response tree.
"""
response = ""
for e in elem[2:]:
response += self._processElement(e, sessionID)
return response
# text
def _processText(self,elem, sessionID):
"""Process a raw text element.
Raw text elements aren't really AIML tags. Text elements cannot contain
other elements; instead, the third item of the 'elem' list is a text
string, which is immediately returned. They have a single attribute,
automatically inserted by the parser, which indicates whether whitespace
in the text should be preserved or not.
"""
try: elem[2] + ""
except TypeError: raise TypeError, "Text element contents are not text"
# If the the whitespace behavior for this element is "default",
# we reduce all stretches of >1 whitespace characters to a single
# space. To improve performance, we do this only once for each
# text element encountered, and save the results for the future.
if elem[1]["xml:space"] == "default":
elem[2] = re.sub("\s+", " ", elem[2])
elem[1]["xml:space"] = "preserve"
return elem[2]
# <that>
def _processThat(self,elem, sessionID):
"""Process a <that> AIML element.
Optional element attributes:
index: Specifies which element from the output history to
return. 1 is the most recent response, 2 is the next most
recent, and so on.
<that> elements (when they appear inside <template> elements)
are the output equivilant of <input> elements; they return one
of the Kernel's previous responses.
"""
outputHistory = self.getPredicate(self._outputHistory, sessionID)
index = 1
try:
# According to the AIML spec, the optional index attribute
# can either have the form "x" or "x,y". x refers to how
# far back in the output history to go. y refers to which
# sentence of the specified response to return.
index = int(elem[1]['index'].split(',')[0])
except:
pass
try: return outputHistory[-index]
except IndexError:
if self._verboseMode:
err = "No such index %d while processing <that> element.\n" % index
sys.stderr.write(err)
return ""
# <thatstar>
def _processThatstar(self, elem, sessionID):
"""Process a <thatstar> AIML element.
Optional element attributes:
index: Specifies which "*" in the <that> pattern to match.
<thatstar> elements are similar to <star> elements, except
that where <star/> returns the portion of the input string
matched by a "*" character in the pattern, <thatstar/> returns
the portion of the previous input string that was matched by a
"*" in the current category's <that> pattern.
"""
try: index = int(elem[1]['index'])
except KeyError: index = 1
# fetch the user's last input
inputStack = self.getPredicate(self._inputStack, sessionID)
input = self._subbers['normal'].sub(inputStack[-1])
# fetch the Kernel's last response (for 'that' context)
outputHistory = self.getPredicate(self._outputHistory, sessionID)
try: that = self._subbers['normal'].sub(outputHistory[-1])
except: that = "" # there might not be any output yet
topic = self.getPredicate("topic", sessionID)
response = self._brain.star("thatstar", input, that, topic, index)
return response
# <think>
def _processThink(self,elem, sessionID):
"""Process a <think> AIML element.
<think> elements process their contents recursively, and then
discard the results and return the empty string. They're
useful for setting predicates and learning AIML files without
generating any output.
"""
for e in elem[2:]:
self._processElement(e, sessionID)
return ""
# <topicstar>
def _processTopicstar(self, elem, sessionID):
"""Process a <topicstar> AIML element.
Optional element attributes:
index: Specifies which "*" in the <topic> pattern to match.
<topicstar> elements are similar to <star> elements, except
that where <star/> returns the portion of the input string
matched by a "*" character in the pattern, <topicstar/>
returns the portion of current topic string that was matched
by a "*" in the current category's <topic> pattern.
"""
try: index = int(elem[1]['index'])
except KeyError: index = 1
# fetch the user's last input
inputStack = self.getPredicate(self._inputStack, sessionID)
input = self._subbers['normal'].sub(inputStack[-1])
# fetch the Kernel's last response (for 'that' context)
outputHistory = self.getPredicate(self._outputHistory, sessionID)
try: that = self._subbers['normal'].sub(outputHistory[-1])
except: that = "" # there might not be any output yet
topic = self.getPredicate("topic", sessionID)
response = self._brain.star("topicstar", input, that, topic, index)
return response
# <uppercase>
def _processUppercase(self,elem, sessionID):
"""Process an <uppercase> AIML element.
<uppercase> elements process their contents recursively, and
return the results with all lower-case characters converted to
upper-case.
"""
response = ""
for e in elem[2:]:
response += self._processElement(e, sessionID)
return string.upper(response)
# <version>
def _processVersion(self,elem, sessionID):
"""Process a <version> AIML element.
<version> elements return the version number of the AIML
interpreter.
"""
return self.version()
##################################################
### Self-test functions follow ###
##################################################
def _testTag(kern, tag, input, outputList):
"""Tests 'tag' by feeding the Kernel 'input'. If the result
matches any of the strings in 'outputList', the test passes.
"""
global _numTests, _numPassed
_numTests += 1
print "Testing <" + tag + ">:",
response = kern.respond(input).decode(kern._textEncoding)
if response in outputList:
print "PASSED"
_numPassed += 1
return True
else:
print "FAILED (response: '%s')" % response.encode(kern._textEncoding, 'replace')
return False
if __name__ == "__main__":
# Run some self-tests
k = Kernel()
k.bootstrap(learnFiles="self-test.aiml")
global _numTests, _numPassed
_numTests = 0
_numPassed = 0
_testTag(k, 'bot', 'test bot', ["My name is Nameless"])
k.setPredicate('gender', 'male')
_testTag(k, 'condition test #1', 'test condition name value', ['You are handsome'])
k.setPredicate('gender', 'female')
_testTag(k, 'condition test #2', 'test condition name value', [''])
_testTag(k, 'condition test #3', 'test condition name', ['You are beautiful'])
k.setPredicate('gender', 'robot')
_testTag(k, 'condition test #4', 'test condition name', ['You are genderless'])
_testTag(k, 'condition test #5', 'test condition', ['You are genderless'])
k.setPredicate('gender', 'male')
_testTag(k, 'condition test #6', 'test condition', ['You are handsome'])
# the date test will occasionally fail if the original and "test"
# times cross a second boundary. There's no good way to avoid
# this problem and still do a meaningful test, so we simply
# provide a friendly message to be printed if the test fails.
date_warning = """
NOTE: the <date> test will occasionally report failure even if it
succeeds. So long as the response looks like a date/time string,
there's nothing to worry about.
"""
if not _testTag(k, 'date', 'test date', ["The date is %s" % time.asctime()]):
print date_warning
_testTag(k, 'formal', 'test formal', ["Formal Test Passed"])
_testTag(k, 'gender', 'test gender', ["He'd told her he heard that her hernia is history"])
_testTag(k, 'get/set', 'test get and set', ["I like cheese. My favorite food is cheese"])
_testTag(k, 'gossip', 'test gossip', ["Gossip is not yet implemented"])
_testTag(k, 'id', 'test id', ["Your id is _global"])
_testTag(k, 'input', 'test input', ['You just said: test input'])
_testTag(k, 'javascript', 'test javascript', ["Javascript is not yet implemented"])
_testTag(k, 'lowercase', 'test lowercase', ["The Last Word Should Be lowercase"])
_testTag(k, 'person', 'test person', ['HE think i knows that my actions threaten him and his.'])
_testTag(k, 'person2', 'test person2', ['YOU think me know that my actions threaten you and yours.'])
_testTag(k, 'person2 (no contents)', 'test person2 I Love Lucy', ['YOU Love Lucy'])
_testTag(k, 'random', 'test random', ["response #1", "response #2", "response #3"])
_testTag(k, 'random empty', 'test random empty', ["Nothing here!"])
_testTag(k, 'sentence', "test sentence", ["My first letter should be capitalized."])
_testTag(k, 'size', "test size", ["I've learned %d categories" % k.numCategories()])
_testTag(k, 'sr', "test sr test srai", ["srai results: srai test passed"])
_testTag(k, 'sr nested', "test nested sr test srai", ["srai results: srai test passed"])
_testTag(k, 'srai', "test srai", ["srai test passed"])
_testTag(k, 'srai infinite', "test srai infinite", [""])
_testTag(k, 'star test #1', 'You should test star begin', ['Begin star matched: You should'])
_testTag(k, 'star test #2', 'test star creamy goodness middle', ['Middle star matched: creamy goodness'])
_testTag(k, 'star test #3', 'test star end the credits roll', ['End star matched: the credits roll'])
_testTag(k, 'star test #4', 'test star having multiple stars in a pattern makes me extremely happy',
['Multiple stars matched: having, stars in a pattern, extremely happy'])
_testTag(k, 'system', "test system", ["The system says hello!"])
_testTag(k, 'that test #1', "test that", ["I just said: The system says hello!"])
_testTag(k, 'that test #2', "test that", ["I have already answered this question"])
_testTag(k, 'thatstar test #1', "test thatstar", ["I say beans"])
_testTag(k, 'thatstar test #2', "test thatstar", ["I just said \"beans\""])
_testTag(k, 'thatstar test #3', "test thatstar multiple", ['I say beans and franks for everybody'])
_testTag(k, 'thatstar test #4', "test thatstar multiple", ['Yes, beans and franks for all!'])
_testTag(k, 'think', "test think", [""])
k.setPredicate("topic", "fruit")
_testTag(k, 'topic', "test topic", ["We were discussing apples and oranges"])
k.setPredicate("topic", "Soylent Green")
_testTag(k, 'topicstar test #1', 'test topicstar', ["Solyent Green is made of people!"])
k.setPredicate("topic", "Soylent Ham and Cheese")
_testTag(k, 'topicstar test #2', 'test topicstar multiple', ["Both Soylents Ham and Cheese are made of people!"])
_testTag(k, 'unicode support', u"ÔÇÉϺÃ", [u"Hey, you speak Chinese! ÔÇÉϺÃ"])
_testTag(k, 'uppercase', 'test uppercase', ["The Last Word Should Be UPPERCASE"])
_testTag(k, 'version', 'test version', ["PyAIML is version %s" % k.version()])
_testTag(k, 'whitespace preservation', 'test whitespace', ["Extra Spaces\n Rule! (but not in here!) But Here They Do!"])
# Report test results
print "--------------------"
if _numTests == _numPassed:
print "%d of %d tests passed!" % (_numPassed, _numTests)
else:
print "%d of %d tests passed (see above for detailed errors)" % (_numPassed, _numTests)
# Run an interactive interpreter
#print "\nEntering interactive mode (ctrl-c to exit)"
#while True: print k.respond(raw_input("> "))
|
bsd-2-clause
| -5,386,696,871,265,022,000
| 38.543074
| 139
| 0.589312
| false
| 4.289418
| true
| false
| false
|
jsa4000/OpenGL-Python
|
zero/core/engine.py
|
1
|
4721
|
import time
from .base import Thread
from .controllers import DisplayController, DeviceController
from ..system import InputManager, SceneManager, RenderManager
__all__ = ['CoreEngine']
class CoreEngine(Thread):
""" Core Engine Class
This class is the main loop of the process that will manage all
the scene like inputs, updates, rendering, physics, etc..
"""
@property
def display(self):
""" Return display controller
"""
return self._display
@property
def device(self):
""" Return device controller
"""
return self._device
@property
def render(self):
""" Return render controller
"""
return self._render
@property
def scene(self):
""" Get current Scene Graph
"""
return self._scene
@scene.setter
def scene(self, value):
""" This will set the new Scene Graph to render.
"""
self._scene = value
def __init__(self, display, device, render, scene, fps=60):
""" Contructor for the class
This class is the main loop for the Engine. In this class all the
Managers and workers will be created.
Devices or controllers that will be used in for the engine. They
will take the Scene Graph and perform the work that corresponds
i.e. input, update, physics, render etc.
Controllers are used for the cases where more devices or drivers are
used, for example in cases of diplays or devices, where it can be used
more than one device at the same time. Also it can be used for rendering
where depending on the type of rendering it could be used one or more
rendering types, like opengl, directx, ray casting, etc..
Also the engine will initialize the Display and do the calls to
the display driver so the Scene could be rendered properly.
Parameters:
display: controller that will be used to display- The admited
classes will be :DisplayController or Display
device: controller or device that will be used to interact with the
user by the Human User Devices(HUD). The admited classes are :
DeviceController or any of the devices associated with it that allows
get_events operation, like KeyboardDevice, MouseDevice, etc..
render: controller that will be used for the engine. The rende controller
will manage all the interface between the engine and the drivers being
used.
scene: This object will contain the whole scene with all the entities
and componentes. The catalogueManager.Instance() is storing all this
information in the creation and bindings between entities and components.
fps: frames-per-second the engine will use.
"""
super(CoreEngine,self).__init__()
# Initilaize parameters
self._display = display
self._device = device
self._render = render
self._scene = scene
self._fps = fps
# Initialize the variables for the Managers
self._input_manager = None
self._scene_manager = None
self._render_manager = None
def __del__(self):
""" Clean up the memory
"""
# Call threadBase __del__
super(CoreEngine,self).__del__()
def init(self):
""" Initialize all the Managers at start
"""
self._input_manager = InputManager(self).init()
self._scene_manager = SceneManager(self).init()
self._render_manager = RenderManager(self).init()
# Return itself for Cascade
return self
# Override
def _process(self):
""" Main process running the engine
Basically the overal loop will be: Input, Update and Render
"""
# Display must be created in the same context (thread) as OpenGL
self.display.init()
# Start the Main loop for the program
while self.running:
# Process Inputs from the user
self._input_manager.run(False)
# Update Scene, Physics, Logic and solvers
self._scene_manager.run()
# Finally render the scene
self._render_manager.run()
time.sleep(1/60)
# Update the display
self.display.update()
# Set running to false
self._running = False
def stop(self, close=False):
"""This method force to Stops the engine and close the window
"""
super(CoreEngine,self).stop()
# Close All the windows and dipose
self.display.close(True)
|
apache-2.0
| -1,644,156,547,459,216,400
| 31.115646
| 81
| 0.61703
| false
| 4.75428
| false
| false
| false
|
sky111144/nicoBookworld
|
server/app/blueprint/home.py
|
1
|
12289
|
#!/usr/bin/python
#coding=utf-8
from flask import Blueprint,render_template,make_response,redirect,request,g,jsonify
from flask import session as flaskSession
from sqlalchemy import distinct,desc,or_
from app.model.base import User,Site,Novel,Shelf,Comment,Message
from novelSpider.task import createDownloader
def object_to_dict(data, flag):
if flag == 'shelf':
result = {
'status': 'success',
'data': []
}
for novel in data:
result['data'].append({
'id': novel.id,
'name': novel.novelName,
'author': novel.novelAuthor,
'img': novel.novelImg,
'intro': novel.novelIntro,
'lastUpdate': novel.lastUpdate,
})
elif flag == 'novel':
charpts = data['charpts']
info = data['info']
result = {
'info': {
'id': info.id,
'name': info.novelName,
'author': info.novelAuthor,
'img': info.novelImg,
'lastUpdate': info.lastUpdate.charptName,
'type': info.novelType,
'intro': info.novelIntro
},
'charpts': []
}
for charpt in charpts:
result['charpts'].append({
'id': charpt.id,
'name': charpt.charptName
})
elif flag == 'charpt':
result = {
'id': data.id,
'name': data.charptName,
'content': data.charptContent
}
elif flag == 'search':
result = []
for novel in data:
result.append({
'id': novel.id,
'name': novel.novelName,
'author': novel.novelAuthor,
'img': novel.novelImg,
'intro': novel.novelIntro,
'lastUpdate': novel.lastUpdate
})
elif flag == 'comment':
result = {
'status': 'success',
'data': []
}
for comment in data:
result['data'].append({
'userId': comment[1].id,
'username': comment[1].username,
'novelId': comment[0].novelId,
'comment': comment[0].comment,
'time': comment[0].time
})
elif flag == 'message':
result = {
'status': 'success',
'data': []
}
for message in data:
result['data'].append({
'senderId': message[0].senderId,
'receiverId': message[1].id,
'receiverName': message[1].username,
'message': message[0].message,
'time': message[0].time
})
elif flag == 'userMessage':
result = {
'status': 'success',
'data': []
}
for message in data:
result['data'].append({
'userId': message[1].id,
'username': message[1].username,
'message': message[0].message,
'time': message[0].time
})
elif flag == 'userComment':
result = {
'status': 'success',
'data': []
}
for comment in data:
result['data'].append({
'novelId': comment[1].id,
'novelName': comment[1].novelName,
'comment': comment[0].comment,
'time': comment[0].time
})
return result
homeBlueprint = Blueprint(
'home',
__name__
)
@homeBlueprint.route('/novel/list/<int:novelNum>')
def novelList(novelNum):
novel = g.dbSession.query(Site).limit(novelNum)
return jsonify(object_to_dict(novel, 'shelf'))
@homeBlueprint.route('/shelf')
def shelf():
userId = request.cookies.get('userId')
shelf = g.dbSession.query(Site).join(Shelf, Site.id == Shelf.novelId).filter(Shelf.userId == userId).all()
return jsonify(object_to_dict(shelf, 'shelf'))
@homeBlueprint.route('/novel/<int:id>')
def novel(id):
data = {}
charpts = g.dbSession.query(Novel).filter_by(novelId=id).all()
page = request.values.get('page')
size = request.values.get('size')
if page is not None and size is not None:
page = int(request.values.get('page'))
size = int(request.values.get('size'))
data['charpts'] = charpts[(page-1)*size:page*size]
else :
data['charpts'] = charpts
data['info'] = g.dbSession.query(Site).filter_by(id=id).first()
data['info'].lastUpdate = charpts[-1]
return jsonify(object_to_dict(data, 'novel'))
@homeBlueprint.route('/novel/<int:id>/<int:charptId>')
def charpt(id, charptId):
novel = g.dbSession.query(Novel).filter_by(id=charptId, novelId=id).first()
return jsonify(object_to_dict(novel, 'charpt'))
@homeBlueprint.route('/search', methods=['GET'])
def search():
query = request.args.get('query')
if query != '':
novel = g.dbSession.query(Site).filter(
or_(Site.novelName.like('%%%s%%'%query))
).all()
return jsonify(object_to_dict(novel, 'search'))
else:
return jsonify({
'status': 'fail',
'msg': '搜索失败',
'data': []
})
@homeBlueprint.route('/user/<int:userId>', methods=['GET'])
def userInfo(userId):
userInfo = g.dbSession.query(User).filter_by(id=userId).first()
return jsonify({
'status': 'success',
'msg': '获取用户信息成功',
'data': {
'username': userInfo.username,
'id': userInfo.id
}
})
# 查询用户个人评论
@homeBlueprint.route('/user/comment', methods=['GET'])
def userComment():
userId = request.cookies.get('userId')
comments = g.dbSession.query(Comment,Site).join(Site, Comment.novelId==Site.id).filter(
Comment.userId==userId
).all()
return jsonify(object_to_dict(comments,'userComment'))
# 查询用户个人私信
@homeBlueprint.route('/user/message', methods=['GET'])
def userMessage():
userId = request.cookies.get('userId')
messages = g.dbSession.query(Message,User).join(User, Message.receiverId==User.id).filter(
Message.senderId==userId
).all()
return jsonify(object_to_dict(messages,'userMessage'))
@homeBlueprint.route('/message/<int:userId>', methods=['POST', 'GET'])
def message(userId):
senderId = request.cookies.get('userId')
if request.method == 'POST':
message = request.get_json().get('message')
g.dbSession.add(Message(
senderId=senderId,
receiverId=userId,
message=message,
time=g.time
))
g.dbSession.commit()
return jsonify({
'status': 'success',
'msg': '私信成功'
})
elif request.method == 'GET':
messages = g.dbSession.query(Message,User).join(User, User.id==Message.receiverId).filter(
Message.senderId==senderId,
Message.receiverId==userId
).all()
return jsonify(object_to_dict(messages,'message'))
@homeBlueprint.route('/comment/<int:novelId>', methods=['POST', 'GET'])
def comment(novelId):
userId = request.cookies.get('userId')
if request.method == 'POST':
comment = request.get_json().get('comment')
g.dbSession.add(Comment(
userId=userId,
novelId=novelId,
comment=comment,
time=g.time
))
g.dbSession.commit()
return jsonify({
'status': 'success',
'msg': '评论成功'
})
elif request.method == 'GET':
comments = g.dbSession.query(Comment,User).join(User, User.id==Comment.userId).filter(
Comment.novelId==novelId
).all()
return jsonify(object_to_dict(comments,'comment'))
@homeBlueprint.route('/collect/<int:novelId>', methods=['GET'])
def collectNovel(novelId):
userId = request.cookies.get('userId')
if userId is not None:
userId = int(userId)
isCollected = g.dbSession.query(Shelf).filter_by(
novelId=novelId,
userId=userId
).count()
result = jsonify({
'status': 'fail',
'msg': '收藏失败'
})
if isCollected == 0 and userId is not None:
g.dbSession.add(Shelf(
novelId=novelId,
userId=userId
))
g.dbSession.commit()
result = jsonify({
'status': 'success',
'msg': '收藏成功'
})
return result
@homeBlueprint.route('/register', methods=['POST'])
def register():
username = request.get_json().get('username')
password = request.get_json().get('password')
email = request.get_json().get('email')
result = jsonify({
'status': 'fail',
'msg': '注册失败'
})
if username == None or password == None or email == None:
return result
user = g.dbSession.query(User).filter_by(email=email).all()
isExsisted = len(user) == 0
if isExsisted:
g.dbSession.add(User(
username=username,
password=password,
email=email
))
result = jsonify({
'status': 'success',
'msg': '注册成功',
'data': {
'username': username
}
})
flaskSession['username'] = username
g.dbSession.commit()
res = make_response(result)
return res
@homeBlueprint.route('/login', methods=['POST'])
def login():
username = request.get_json().get('username')
password = request.get_json().get('password')
user = g.dbSession.query(User).filter_by(username=username,password=password).all()
isIllegal = len(user) == 1
result = {
'status': 'fail',
'msg': '登录失败'
}
if isIllegal:
flaskSession['username'] = username
result = {
'status': 'success',
'msg': '登录成功',
'data': {
'userId': user[0].id,
'username': username
}
}
res = make_response(jsonify(result))
if isIllegal:
res.set_cookie('isLogin', 'true', expires=g.expires)
res.set_cookie('username', username, expires=g.expires)
res.set_cookie('userId', str(user[0].id), expires=g.expires)
return res
@homeBlueprint.route('/logout', methods=['POST'])
def logout():
if 'username' in flaskSession:
flaskSession['username'] = None
res = make_response(jsonify({
'status': 'success',
'msg': '退出成功'
}))
res.set_cookie('username', '')
res.set_cookie('userId', '')
return res
@homeBlueprint.route('/changePassword', methods=['POST'])
def changePassword():
oldPassword = request.get_json().get('oldPassword')
newPassword = request.get_json().get('newPassword')
username = request.get_json().get('username')
isUserself = g.dbSession.query(User).filter_by(username=username,password=oldPassword).count()
result = {
'status': 'fail',
'msg': '修改失败'
}
if isUserself == 1:
g.dbSession.query(User).filter_by(username=username).update({
User.password: newPassword
})
g.dbSession.commit()
result = {
'status': 'success',
'msg': '修改成功'
}
return jsonify(result)
@homeBlueprint.route('/novel/count')
def novelCount():
count = g.dbSession.query(Site).count()
return jsonify({
'status': 'success',
'data': {
'count': count
}
})
@homeBlueprint.route('/charpt/count')
def charptCount():
count = g.dbSession.query(Novel).count()
return jsonify({
'status': 'success',
'data': {
'count': count
}
})
@homeBlueprint.route('/task/getCharptList', methods=['GET'])
def getCharptList():
downloader = createDownloader()
downloader.getCharptList(1)
return jsonify({
'status': 'success'
})
@homeBlueprint.route('/task/getCharptContent', methods=['GET'])
def getCharptContent():
downloader = createDownloader()
downloader.getCharptContent(charptNum=1)
return jsonify({
'status': 'success'
})
|
apache-2.0
| 5,621,720,612,245,947,000
| 29.982143
| 110
| 0.545492
| false
| 3.60172
| false
| false
| false
|
jepler/linuxcnc-mirror
|
configs/by_machine/plasmac/pmx485.py
|
4
|
9722
|
#!/usr/bin/env python2
'''
pmx485.py
Copyright (C) 2019 2020 Phillip A Carter
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
'''
import sys
import hal
import time
import serial
address = '01'
regRead = '04'
regWrite = '06'
rCurrent = '2094'
rCurrentMax = '209A'
rCurrentMin = '2099'
rFault = '2098'
rMode = '2093'
rPressure = '2096'
rPressureMax = '209D'
rPressureMin = '209C'
validRead = '0402'
started = False
errorCount = 0
# create pmx485 component
pmx485 = hal.component('pmx485')
pmx485.newpin('mode_set', hal.HAL_FLOAT, hal.HAL_IN) #set cutting mode
pmx485.newpin('current_set', hal.HAL_FLOAT, hal.HAL_IN) #set cutting current
pmx485.newpin('pressure_set', hal.HAL_FLOAT, hal.HAL_IN) #set gas pressure
pmx485.newpin('enable', hal.HAL_BIT, hal.HAL_IN) #enabler
pmx485.newpin('mode', hal.HAL_FLOAT, hal.HAL_OUT) #cut mode feedback
pmx485.newpin('current', hal.HAL_FLOAT, hal.HAL_OUT) #cutting current feedback
pmx485.newpin('pressure', hal.HAL_FLOAT, hal.HAL_OUT) #gas pressure feedback
pmx485.newpin('fault', hal.HAL_FLOAT, hal.HAL_OUT) #fault code
pmx485.newpin('status', hal.HAL_BIT, hal.HAL_OUT) #connection status out
pmx485.newpin('current_min', hal.HAL_FLOAT, hal.HAL_OUT) #minimum allowed current
pmx485.newpin('current_max', hal.HAL_FLOAT, hal.HAL_OUT) #maximum allowed current
pmx485.newpin('pressure_min', hal.HAL_FLOAT, hal.HAL_OUT) #minimum allowed gas pressure
pmx485.newpin('pressure_max', hal.HAL_FLOAT, hal.HAL_OUT) #maximum allowed gas pressure
pmx485.ready()
enabled = pmx485.enable
# connection setup
comPort = sys.argv[1]
try:
comms = serial.Serial(comPort,
baudrate = 19200,
bytesize = 8,
parity = 'E',
stopbits = 1,
timeout = 0.1
)
except:
print'\nCould not open {} for Powermax communications\n'.format(comPort)
raise SystemExit
# get the checksum
def get_lrc(data):
lrc = 0
for i in xrange(0, len(data), 2):
a, b = data[i:i+2]
try:
lrc = (lrc + int(a + b, 16)) & 255
except:
return '00'
lrc = ('{:02X}'.format((((lrc ^ 255) + 1) & 255))).upper()
return lrc
# write data to register
def write_register(reg, value):
data = '{}{}{}{}'.format(address, regWrite, reg, value)
if len(data) == 12:
lrc = get_lrc(data)
packet = ':{}{}\r\n'.format(data, lrc)
reply = ''
comms.write(packet)
reply = comms.readline()
if reply:
if reply == packet:
return 1
return 0
# read data from register
def read_register(reg):
data = '{}{}{}0001'.format(address, regRead, reg)
if len(data) == 12:
lrc = get_lrc(data)
packet = ':{}{}\r\n'.format(data, lrc)
reply = ''
comms.write(packet)
reply = comms.readline()
if reply:
if len(reply) == 15 and reply[:7] == ':{}{}'.format(address, validRead):
lrc = get_lrc(reply[1:11])
if lrc == reply[11:13]:
return reply[7:11]
return 0
# set machine to local mode
def close_machine():
mode = write_register(rMode, '{:04X}'.format(0))
current = write_register(rCurrent, '{:04X}'.format(0))
pressure = write_register(rPressure, '{:04X}'.format(0))
# set machine to remote mode
def open_machine():
# set mode
mode = write_register(rMode, '{:04X}'.format(int(pmx485.mode_set)))
# set current
current = write_register(rCurrent, '{:04X}'.format(int(pmx485.current_set * 64.0)))
# set pressure
pressure = write_register(rPressure, '{:04X}'.format(int(pmx485.pressure_set * 128.0)))
if mode and current and pressure:
return True
else:
return False
# get settings limits
def get_limits():
# get minimum current setting
cMin = read_register(rCurrentMin)
if cMin:
pmx485.current_min = round(int(cMin, 16) / 64.0, 1)
# get maximum current setting
cMax = read_register(rCurrentMax)
if cMax:
pmx485.current_max = round(int(cMax, 16) / 64.0, 1)
# get minimum pressure setting
pMin = read_register(rPressureMin)
if pMin:
pmx485.pressure_min = round(int(pMin, 16) / 128.0, 1)
# get maximum pressure setting
pMax = read_register(rPressureMax)
if pMax:
pmx485.pressure_max = round(int(pMax, 16) / 128.0, 1)
if cMin and cMax and pMin and pMax:
# debugging
# print('\nPowermax Settings:')
# print(' Mode Force = {}'.format(int(pmx485.mode_set)))
# print(' Current Force = {}'.format(int(pmx485.current_set)))
# print('Pressure Force = {}'.format(int(pmx485.pressure_set)))
# print(' Current Min = {}'.format(pmx485.current_min))
# print(' Current Max = {}'.format(pmx485.current_max))
# print(' Pressure Min = {}'.format(pmx485.pressure_min))
# print(' Pressure Max = {}\n'.format(pmx485.pressure_max))
return True
else:
return False
# main loop
try:
while 1:
if hal.component_exists('plasmac_run'):
if enabled != pmx485.enable:
enabled = pmx485.enable
if not enabled:
close_machine()
comms.close()
pmx485.status = False
started = False
if enabled:
if not started:
if not comms.isOpen():
comms.open()
if open_machine():
started = True
if started and get_limits():
started = True
else:
started = False
else:
# set mode
if pmx485.mode_set != pmx485.mode:
mode = write_register(rMode, '{:04X}'.format(int(pmx485.mode_set)))
if mode:
pmx485.mode = pmx485.mode_set
get_limits()
# get mode
else:
mode = read_register(rMode)
if mode:
pmx485.mode = int(mode, 16)
# set current
if pmx485.current_set != round(pmx485.current, 1):
current = write_register(rCurrent, '{:04X}'.format(int(pmx485.current_set * 64)))
if current:
pmx485.current = pmx485.current_set
# get current
else:
current = read_register(rCurrent)
if current:
pmx485.current = round(int(current, 16) / 64.0, 1)
# set pressure
if pmx485.pressure_set != round(pmx485.pressure, 1):
pressure = write_register(rPressure, '{:04X}'.format(int(pmx485.pressure_set * 128)))
if pressure:
pmx485.pressure = pmx485.pressure_set
# get pressure
else:
pressure = read_register(rPressure)
if pressure:
pmx485.pressure = round(int(pressure, 16) / 128.0, 1)
# get fault code
fault = read_register(rFault)
if fault:
pmx485.fault = int(fault, 16)
# set status
if mode and current and pressure and fault:
pmx485.status = True
errorCount = 0
else:
errorCount += 1
# debugging
# print('\nPMX485 STATUS ERROR #{}'.format(errorCount))
# if not mode:
# print(' Mode: set={:5.1f} get={:5.1f}'.format(pmx485.mode_set, pmx485.mode))
# if not current:
# print(' Current: set={:5.1f} get={:5.1f}'.format(pmx485.current_set, pmx485.current))
# if not pressure:
# print('Pressure: set={:5.1f} get={:5.1f}'.format(pmx485.pressure_set, pmx485.pressure))
# if not fault:
# print(' Fault: get={:5.1f}'.format(pmx485.fault))
if errorCount > 2:
print('Closing pmx485.py, error count exceeded')
errorCount = 0
comms.close()
pmx485.status = False
started = False
except:
print('Shutting down pmx485 communications')
if started:
if not comms.isOpen():
comms.open()
close_machine()
comms.close()
|
lgpl-2.1
| -3,219,997,986,610,003,500
| 37.275591
| 119
| 0.528595
| false
| 3.724904
| false
| false
| false
|
honzajavorek/python.cz
|
pythoncz/models/meetups.py
|
1
|
2352
|
from functools import lru_cache
from lxml import html
import requests
from slugify import slugify
__all__ = ('get_meetups',)
WIKI_URL = ('https://cs.wikipedia.org/wiki/'
'Seznam_m%C4%9Bst_v_%C4%8Cesku_podle_po%C4%8Dtu_obyvatel')
@lru_cache()
def get_meetups(lang='cs'):
return sort_by_city_size(scrape_meetups(lang))
def scrape_meetups(lang='cs'):
"""
Ideally, pyvo.cz would have an API where we get all this info. Let's assume
HTML API is good enough API for us now.
"""
url = 'https://pyvo.cz/en/' if lang == 'en' else 'https://pyvo.cz/'
res = requests.get(url, headers={'Accept-Charset': 'utf-8'})
res.raise_for_status()
root = html.fromstring(res.content.decode('utf-8'))
root.make_links_absolute(res.url)
for event in root.cssselect('#events .event'):
try:
yield {
'name': event.cssselect('h3')[0].text_content().strip(),
'url': event.cssselect('h3 a')[0].get('href'),
}
except IndexError:
continue
@lru_cache()
def scrape_cities():
res = requests.get(WIKI_URL)
res.raise_for_status()
root = html.fromstring(res.text)
rows = root.cssselect('.wikitable tbody tr')
return [row.cssselect('td')[1].text_content().strip() for row in rows[1:]]
def sort_by_city_size(meetups):
"""
Sorts given iterable of meetups by the size of the city. While pyvo.cz
lists the meetups according to when the closest event happens or happened,
this doesn't make sense for python.cz where the meetups are listed just
as a general overview. Also alphabetical sorting is pretty much just
confusing for the visitor. It only makes sense to sort the meetups by the
size of the city. The most populated cities have a larger probability
that the visitor of the page is close to them, thus they deserve to be
higher in the list.
"""
city_slugs = [slugify(city) + '-pyvo' for city in scrape_cities()]
# convert list [city1, city2, ...] into dict {city1: 0, city2: 1, ...}
city_slugs = {city: n for n, city in enumerate(city_slugs)}
city_slugs['hradec-pyvo'] = city_slugs['hradec-kralove-pyvo']
def key_func(meetup):
slug = meetup['url'].rstrip('/').split('/')[-1]
return city_slugs[slug]
return sorted(meetups, key=key_func)
|
mit
| -6,805,475,397,628,776,000
| 32.6
| 79
| 0.642007
| false
| 3.253112
| false
| false
| false
|
peoplepower/botlab
|
com.ppc.Microservices/intelligence/dailyreport/location_dailyreport_microservice.py
|
1
|
26353
|
'''
Created on November 20, 2019
This file is subject to the terms and conditions defined in the
file 'LICENSE.txt', which is part of this source code package.
@author: David Moss
'''
from intelligence.intelligence import Intelligence
import domain
import json
import utilities.utilities as utilities
import signals.analytics as analytics
# Section weights
WEIGHT_ALERTS = 0
WEIGHT_NOTES = 5
WEIGHT_TASKS = 10
WEIGHT_SLEEP = 15
WEIGHT_ACTIVITIES = 20
WEIGHT_MEALS = 25
WEIGHT_MEDICATION = 30
WEIGHT_BATHROOM = 35
WEIGHT_SOCIAL = 40
WEIGHT_MEMORIES = 45
WEIGHT_SYSTEM = 50
# Section ID's
SECTION_ID_ALERTS = "alerts"
SECTION_ID_NOTES = "notes"
SECTION_ID_TASKS = "tasks"
SECTION_ID_SLEEP = "sleep"
SECTION_ID_ACTIVITIES = "activities"
SECTION_ID_MEALS = "meals"
SECTION_ID_MEDICATION = "medication"
SECTION_ID_BATHROOM = "bathroom"
SECTION_ID_SOCIAL = "social"
SECTION_ID_MEMORIES = "memories"
SECTION_ID_SYSTEM = "system"
# Section Colors
SECTION_COLOR_ALERTS = "D0021B"
SECTION_COLOR_NOTES = "530F8B"
SECTION_COLOR_TASKS = "00AD9D"
SECTION_COLOR_SLEEP = "946C49"
SECTION_COLOR_ACTIVITIES = "27195F"
SECTION_COLOR_MEALS = "C1006E"
SECTION_COLOR_MEDICATION = "1E6601"
SECTION_COLOR_BATHROOM = "17A5F6"
SECTION_COLOR_SOCIAL = "B6B038"
SECTION_COLOR_MEMORIES = "600000"
SECTION_COLOR_SYSTEM = "787F84"
# Reasons why the occupancy status would have changed
REASON_ML = "ML"
REASON_USER = "USER"
# Timer references
TIMER_REFERENCE_ADVANCE_REPORTS = "new"
# State UI content address
DAILY_REPORT_ADDRESS = "dailyreport"
class LocationDailyReportMicroservice(Intelligence):
"""
Create a daily report
"""
def __init__(self, botengine, parent):
"""
Instantiate this object
:param parent: Parent object, either a location or a device object.
"""
Intelligence.__init__(self, botengine, parent)
# Timestamp at which the current report was created
self.current_report_ms = None
# Timestamp at which the home went into SLEEP mode
self.started_sleeping_ms = None
# Last report we emailed
self.last_emailed_report_ms = None
def initialize(self, botengine):
"""
Initialize
:param botengine: BotEngine environment
"""
if not hasattr(self, 'last_emailed_report_ms'):
self.last_emailed_report_ms = None
return
def destroy(self, botengine):
"""
This device or object is getting permanently deleted - it is no longer in the user's account.
:param botengine: BotEngine environment
"""
return
def mode_updated(self, botengine, current_mode):
"""
Mode was updated
:param botengine: BotEngine environment
:param current_mode: Current mode
:param current_timestamp: Current timestamp
"""
return
def occupancy_status_updated(self, botengine, status, reason, last_status, last_reason):
"""
AI Occupancy Status updated
:param botengine: BotEngine
:param status: Current occupancy status
:param reason: Current occupancy reason
:param last_status: Last occupancy status
:param last_reason: Last occupancy reason
"""
if 'SLEEP' in status and REASON_ML in reason and self.started_sleeping_ms is None:
# Started sleeping
self.started_sleeping_ms = botengine.get_timestamp()
if self.parent.get_relative_time_of_day(botengine) > 12.0:
# Went to sleep before midnight - send out the daily report now.
self.last_emailed_report_ms = self.current_report_ms
self.email_report(botengine)
if 'SLEEP' not in status and 'S2H' not in status and self.started_sleeping_ms is not None:
# Stopped sleeping
self.started_sleeping_ms = None
return
def device_measurements_updated(self, botengine, device_object):
"""
Device was updated
:param botengine: BotEngine environment
:param device_object: Device object that was updated
"""
return
def device_metadata_updated(self, botengine, device_object):
"""
Evaluate a device that is new or whose goal/scenario was recently updated
:param botengine: BotEngine environment
:param device_object: Device object that was updated
"""
return
def device_alert(self, botengine, device_object, alert_type, alert_params):
"""
Device sent an alert.
When a device disconnects, it will send an alert like this: [{u'alertType': u'status', u'params': [{u'name': u'deviceStatus', u'value': u'2'}], u'deviceId': u'eb10e80a006f0d00'}]
When a device reconnects, it will send an alert like this: [{u'alertType': u'on', u'deviceId': u'eb10e80a006f0d00'}]
:param botengine: BotEngine environment
:param device_object: Device object that sent the alert
:param alert_type: Type of alert
"""
return
def device_added(self, botengine, device_object):
"""
A new Device was added to this Location
:param botengine: BotEngine environment
:param device_object: Device object that is getting added
"""
return
def device_deleted(self, botengine, device_object):
"""
Device is getting deleted
:param botengine: BotEngine environment
:param device_object: Device object that is getting deleted
"""
return
def question_answered(self, botengine, question_object):
"""
The user answered a question
:param botengine: BotEngine environment
:param question_object: Question object
"""
return
def datastream_updated(self, botengine, address, content):
"""
Data Stream Message Received
:param botengine: BotEngine environment
:param address: Data Stream address
:param content: Content of the message
"""
if hasattr(self, address):
getattr(self, address)(botengine, content)
def schedule_fired(self, botengine, schedule_id):
"""
The bot executed on a hard coded schedule specified by our runtime.json file
:param botengine: BotEngine environment
:param schedule_id: Schedule ID that is executing from our list of runtime schedules
"""
return
def timer_fired(self, botengine, argument):
"""
The bot's intelligence timer fired
:param botengine: Current botengine environment
:param argument: Argument applied when setting the timer
"""
return
def file_uploaded(self, botengine, device_object, file_id, filesize_bytes, content_type, file_extension):
"""
A device file has been uploaded
:param botengine: BotEngine environment
:param device_object: Device object that uploaded the file
:param file_id: File ID to reference this file at the server
:param filesize_bytes: The file size in bytes
:param content_type: The content type, for example 'video/mp4'
:param file_extension: The file extension, for example 'mp4'
"""
return
def coordinates_updated(self, botengine, latitude, longitude):
"""
Approximate coordinates of the parent proxy device object have been updated
:param latitude: Latitude
:param longitude: Longitude
"""
return
def user_role_updated(self, botengine, user_id, alert_category, location_access, previous_alert_category, previous_location_access):
"""
A user changed roles
:param botengine: BotEngine environment
:param user_id: User ID that changed roles
:param alert_category: User's current alert/communications category (1=resident; 2=supporter)
:param location_access: User's access to the location and devices. (0=None; 10=read location/device data; 20=control devices and modes; 30=update location info and manage devices)
:param previous_alert_category: User's previous category, if any
:param previous_location_access: User's previous access to the location, if any
"""
return
def midnight_fired(self, botengine, content=None):
"""
Data stream message - Midnight timer fired
:param botengine:
:param content:
:return:
"""
# If we haven't emailed the daily report yet because the person hasn't gone to sleep yet, email it now.
if self.current_report_ms is not None:
if self.last_emailed_report_ms != self.current_report_ms:
self.last_emailed_report_ms = self.current_report_ms
if "SLEEP" not in self.parent.occupancy_status and "VACATION" not in self.parent.occupancy_status:
self.add_entry(botengine, SECTION_ID_SLEEP, comment=_("Hasn't gone to sleep by midnight."), include_timestamp=True)
self.email_report(botengine)
# Create a new report
self.current_report_ms = self._get_todays_timestamp(botengine)
report = {}
name = self._get_resident_name(botengine)
if name is not None:
report['title'] = name.upper()
else:
report['title'] = _("DAILY REPORT")
report['subtitle'] = _("Daily Report for {}").format(self.parent.get_local_datetime(botengine).strftime("%A %B %-d, %Y"))
report['created_ms'] = botengine.get_timestamp()
report['sections'] = []
self.parent.set_location_property_separately(botengine, DAILY_REPORT_ADDRESS, report, overwrite=True, timestamp_ms=self.current_report_ms)
analytics.track(botengine,
self.parent,
"daily_report_initialized",
properties={
"timestamp_ms": self.current_report_ms
})
# Add our first entry if possible.
if self.started_sleeping_ms is not None and "SLEEP" in self.parent.occupancy_status:
self.add_entry(botengine, SECTION_ID_SLEEP, comment=_("Went to sleep."), subtitle=_("Currently sleeping."), include_timestamp=True, timestamp_override_ms=self.started_sleeping_ms)
def daily_report_entry(self, botengine, content):
"""
Data stream message to add content to our daily report
:param botengine: BotEngine environment
:param content: Data Stream Content
:return:
"""
botengine.get_logger().info("location_dailyreport_microservice: 'daily_report_entry' data stream message received.")
if 'section_id' not in content:
botengine.get_logger().error("location_dailyreport_microservice: Section ID not found in data stream message {}".format(content))
return
section_id = content['section_id']
comment = None
subtitle = None
identifier = None
include_timestamp = False
timestamp_override_ms = None
if 'comment' in content:
comment = content['comment']
if 'subtitle' in content:
subtitle = content['subtitle']
if 'identifier' in content:
identifier = content['identifier']
if 'include_timestamp' in content:
include_timestamp = content['include_timestamp']
if 'timestamp_override_ms' in content:
timestamp_override_ms = content['timestamp_override_ms']
self.add_entry(botengine, section_id, comment=comment, subtitle=subtitle, identifier=identifier, include_timestamp=include_timestamp, timestamp_override_ms=timestamp_override_ms)
def add_entry(self, botengine, section_id, comment=None, subtitle=None, identifier=None, include_timestamp=False, timestamp_override_ms=None):
"""
Add a section and bullet point the current daily report
:param botengine: BotEngine environment
:param comment: Comment like "Woke up."
:param subtitle: Subtitle comment like "Consistent sleep schedule and good quality sleep last night."
:param identifier: Optional identifier to come back and edit this entry later.
:param include_timestamp: True to include a timestamp like "7:00 AM - <comment>" (default is False)
:param timestamp_override_ms: Optional timestamp in milliseconds to override the current time when citing the timestamp with include_timestamp=True
"""
botengine.get_logger().info("location_dailyreport_microservice.add_entry(): Current report timestamp is {}".format(self.current_report_ms))
# Make sure our midnight schedule fired properly.
# We added a 1 hour buffer for backwards compatibility, because the self.current_report_ms was previously being set to the current botengine.get_timestamp()
# which was some time after midnight.
if self.current_report_ms is None:
self.midnight_fired(botengine)
if self._get_todays_timestamp(botengine) < (self.current_report_ms - utilities.ONE_HOUR_MS):
self.midnight_fired(botengine)
report = botengine.get_ui_content(DAILY_REPORT_ADDRESS, timestamp_ms=self.current_report_ms)
if report is None:
botengine.get_logger().info("location_dailyreport_microservice: There is currently no active daily report.")
self.midnight_fired(botengine)
report = botengine.get_ui_content(DAILY_REPORT_ADDRESS, self.current_report_ms)
if report is None:
return
else:
botengine.get_logger().info("location_dailyreport_microservice: Successfully created and loaded a new report.")
else:
botengine.get_logger().info("location_dailyreport_microservice: Successfully loaded an existing report.")
focused_section = self._get_section_object(botengine, report, section_id)
if focused_section is None:
botengine.get_logger().info("location_dailyreport_microservice: Need to create a new section for section_id '{}'.".format(section_id))
if section_id == SECTION_ID_ALERTS:
focused_section = {
"weight": WEIGHT_ALERTS,
"id": SECTION_ID_ALERTS,
"title": _("Today's Alerts"),
"icon": "comment-exclamation",
"color": SECTION_COLOR_ALERTS,
"items": []
}
elif section_id == SECTION_ID_NOTES:
focused_section = {
"weight": WEIGHT_NOTES,
"id": SECTION_ID_NOTES,
"title": _("Today's Notes"),
"icon": "clipboard",
"color": SECTION_COLOR_NOTES,
"items": []
}
elif section_id == SECTION_ID_TASKS:
focused_section = {
"weight": WEIGHT_TASKS,
"id": SECTION_ID_TASKS,
"title": _("Today's Tasks"),
"icon": "clipboard-list-check",
"color": SECTION_COLOR_TASKS,
"items": []
}
elif section_id == SECTION_ID_SLEEP:
focused_section = {
"weight": WEIGHT_SLEEP,
"id": SECTION_ID_SLEEP,
"title": _("Sleep"),
"icon": "moon",
"color": SECTION_COLOR_SLEEP,
"items": []
}
elif section_id == SECTION_ID_BATHROOM:
focused_section = {
"weight": WEIGHT_BATHROOM,
"id": SECTION_ID_BATHROOM,
"title": _("Bathroom"),
"icon": "toilet",
"color": SECTION_COLOR_BATHROOM,
"items": []
}
elif section_id == SECTION_ID_ACTIVITIES:
focused_section = {
"weight": WEIGHT_ACTIVITIES,
"id": SECTION_ID_ACTIVITIES,
"title": _("Activities"),
"icon": "walking",
"color": SECTION_COLOR_ACTIVITIES,
"items": []
}
elif section_id == SECTION_ID_MEALS:
focused_section = {
"weight": WEIGHT_MEALS,
"id": SECTION_ID_MEALS,
"title": _("Meals"),
"icon": "utensils",
"color": SECTION_COLOR_MEALS,
"items": []
}
elif section_id == SECTION_ID_MEDICATION:
focused_section = {
"weight": WEIGHT_MEDICATION,
"id": SECTION_ID_MEDICATION,
"title": _("Medication"),
"icon": "pills",
"color": SECTION_COLOR_MEDICATION,
"items": []
}
elif section_id == SECTION_ID_SOCIAL:
focused_section = {
"weight": WEIGHT_SOCIAL,
"id": SECTION_ID_SOCIAL,
"title": _("Social"),
"icon": "user-friends",
"color": SECTION_COLOR_SOCIAL,
"items": []
}
elif section_id == SECTION_ID_MEMORIES:
focused_section = {
"weight": WEIGHT_MEMORIES,
"id": SECTION_ID_MEMORIES,
"title": _("Memories"),
"icon": "camera-retro",
"color": SECTION_COLOR_MEMORIES,
"items": []
}
elif section_id == SECTION_ID_SYSTEM:
focused_section = {
"weight": WEIGHT_SYSTEM,
"id": SECTION_ID_SYSTEM,
"title": _("System Status"),
"icon": "brain",
"color": SECTION_COLOR_SYSTEM,
"items": []
}
else:
botengine.get_logger().error("location_dailyreport_microservice: Unknown section '{}'".format(section_id))
return
if 'sections' not in report:
report['sections'] = []
report['sections'].append(focused_section)
report['sections'] = sorted(report['sections'], key=lambda k: k['weight'])
if comment is not None or identifier is not None:
if include_timestamp and comment is not None:
if timestamp_override_ms is not None:
dt = self.parent.get_local_datetime_from_timestamp(botengine, timestamp_override_ms)
else:
dt = self.parent.get_local_datetime(botengine)
if section_id == SECTION_ID_SLEEP:
# Sleep timestamps include the day
comment = "{} - {}".format(dt.strftime("%-I:%M %p %A"), comment)
else:
# Other timestamps don't include the day
comment = "{} - {}".format(dt.strftime("%-I:%M %p"), comment)
if identifier is None and comment is not None:
ts = botengine.get_timestamp()
if timestamp_override_ms is not None:
ts = timestamp_override_ms
focused_item = {
"timestamp_ms": ts,
"comment": comment
}
focused_section['items'].append(focused_item)
focused_section['items'] = sorted(focused_section['items'], key=lambda k: k['timestamp_ms'])
else:
# Try to overwrite any previous entry with this identifier
focused_item = None
for item in focused_section['items']:
if 'id' in item:
if item['id'] == identifier:
focused_item = item
if focused_item is not None:
# Edit the item in place
if comment is not None:
# Modify the item
ts = botengine.get_timestamp()
if timestamp_override_ms is not None:
ts = timestamp_override_ms
focused_item['timestamp_ms'] = ts
focused_item['comment'] = comment
focused_section['items'] = sorted(focused_section['items'], key=lambda k: k['timestamp_ms'])
else:
# Delete the item
focused_section['items'].remove(focused_item)
focused_section['items'] = sorted(focused_section['items'], key=lambda k: k['timestamp_ms'])
if len(focused_section['items']) == 0:
# Delete the entire section
report['sections'].remove(focused_section)
else:
# Add the item
ts = botengine.get_timestamp()
if timestamp_override_ms is not None:
ts = timestamp_override_ms
focused_item = {
"timestamp_ms": ts,
"comment": comment,
"id": identifier
}
focused_section['items'].append(focused_item)
focused_section['items'] = sorted(focused_section['items'], key=lambda k: k['timestamp_ms'])
if subtitle is not None:
# Manually defined subtitle for this section
focused_section['subtitle'] = subtitle
else:
# Auto-generated subtitles for specific sections that support it
if section_id == SECTION_ID_NOTES:
if len(focused_section['items']) == 0:
focused_section['subtitle'] = _("No notes captured today.")
elif len(focused_section['items']) == 1:
focused_section['subtitle'] = _("Captured one note today.")
elif len(focused_section['items']) > 1:
focused_section['subtitle'] = _("Captured {} notes today.").format(len(focused_section['items']))
elif section_id == SECTION_ID_TASKS:
if len(focused_section['items']) == 0:
focused_section['subtitle'] = _("No tasks updated today.")
elif len(focused_section['items']) == 1:
focused_section['subtitle'] = _("Updated one task today.")
elif len(focused_section['items']) > 1:
focused_section['subtitle'] = _("Updated {} tasks today.").format(len(focused_section['items']))
elif section_id == SECTION_ID_MEDICATION:
if len(focused_section['items']) == 0:
focused_section['subtitle'] = _("No medication accessed today.")
elif len(focused_section['items']) == 1:
focused_section['subtitle'] = _("Accessed medicine once today.")
elif len(focused_section['items']) > 1:
focused_section['subtitle'] = _("Accessed medicine {} times today.").format(len(focused_section['items']))
elif section_id == SECTION_ID_BATHROOM:
if len(focused_section['items']) == 0:
focused_section['subtitle'] = _("No bathroom visits observed today.")
elif len(focused_section['items']) == 1:
focused_section['subtitle'] = _("Visited the bathroom once today.")
elif len(focused_section['items']) > 1:
focused_section['subtitle'] = _("Visited the bathroom {} times today.").format(len(focused_section['items']))
self.parent.set_location_property_separately(botengine, DAILY_REPORT_ADDRESS, report, overwrite=True, timestamp_ms=self.current_report_ms)
def email_report(self, botengine):
"""
Email the current report
:param botengine:
:return:
"""
return
def _get_section_object(self, botengine, report, section_id):
"""
Find and return a section object out of all the sections in the report dictionary that is passed in
:param botengine:
:param report: report dictionary object
:param section_id: section ID to return
:return: section object dictionary, or None if it doesn't exist
"""
if report is not None:
if 'sections' in report:
for section in report['sections']:
if section['id'] == section_id:
return section
return None
def _get_resident_name(self, botengine):
"""
Get the name of the resident in a way that we can use this in a sentence
:param botengine:
:return:
"""
residents = botengine.get_location_user_names(to_residents=True, to_supporters=False, sms_only=False)
name = ""
if len(residents) == 0:
# Nobody lives here, nothing to do
return None
elif len(residents) == 1:
name = "{} {}".format(residents[0]['firstName'], residents[0]['lastName']).strip()
elif len(residents) == 2:
a = _("and")
# a and b
name = "{} {} {}".format(residents[0]['firstName'], a, residents[1]['firstName'])
elif len(residents) > 2:
# So, we only list 3 names max just because we don't want to waste a ton of SMS space.
a = _("and")
# a, b, and c
name = "{}, {}, {} {}".format(residents[0]['firstName'], residents[1]['firstName'], a, residents[2]['firstName'])
return name
def _get_todays_timestamp(self, botengine):
"""
Get the timestamp for midnight last night
:param botengine:
:return:
"""
return self.parent.timezone_aware_datetime_to_unix_timestamp(botengine, self.parent.get_midnight_last_night(botengine))
|
apache-2.0
| 9,092,760,513,962,145,000
| 39.111111
| 191
| 0.567222
| false
| 4.227302
| false
| false
| false
|
miumok98/weblate
|
weblate/billing/admin.py
|
1
|
1644
|
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2015 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from weblate.billing.models import Plan, Billing
class PlanAdmin(admin.ModelAdmin):
list_display = (
'name', 'price', 'limit_strings', 'limit_languages',
'limit_repositories', 'limit_projects',
)
class BillingAdmin(admin.ModelAdmin):
list_display = (
'user', 'plan',
'list_projects',
'count_repositories', 'count_strings', 'count_words',
'count_languages',
'in_limits',
)
list_filter = ('plan', )
search_fields = ('user__username', 'projects__name')
def list_projects(self, obj):
return u','.join(obj.projects.values_list('name', flat=True))
list_projects.short_description = _('Projects')
admin.site.register(Plan, PlanAdmin)
admin.site.register(Billing, BillingAdmin)
|
gpl-3.0
| -1,565,841,120,037,533,000
| 31.176471
| 71
| 0.69287
| false
| 3.68764
| false
| false
| false
|
smithchristian/arcpy-create-base-dataset
|
supportingModules/pln.py
|
1
|
2508
|
# ----------------------------------------------------------------------------
# Name: pln.py (Planning.py)
# Purpose: This module contains variables for the construction
# of a planning dataset. This module is to be used in
# conjunction with create-Base-DataSet/main.py.
# Description
# and Examples: Regulatory planning data: Regional plans, LGA planning
# schemes, Zoning, Strategic Plan data, Growth Management,
# Zone boundaries.
#
# Author: Christian Fletcher Smith
#
# Created: 10/02/2015
# Copyright: (c) smithc5 2015
# Version: 2
# -----------------------------------------------------------------------------
# This is the name for the planning dataset
PLN_GDB_NAME = "Planning.gdb"
'''
The following information outlines the variable structure for each feature
in order to be used correctly within main.py.
NOTE: The * used in the information below is to indicate a user defined
name.
Feature variable structure:
# Layer Name ----------------------------------------------------------
* -- This is the source location of the layer to be clipped.
*_FC_NAME -- This is the .gdb name and feature class name for the layer to
be used. The user only needs to populate text after the '{}\', as
'{}\' is formatted to use the variable ADM_GDB_NAME.
*_ALIAS -- This is the alias name to be displayed within ArcGIS.
*_DIC -- The dictionary is used to store all the features variables which
will be imported into main.py as required.
example:
# Planning Zones -----------------------------------
PLNZONE = r"D:\Planning\PlanningZones.shp"
PLNZONE_FC_NAME = "{}\Planning_Zones.format(PLN_GDB_NAME)
PLNZONE_ALIAS = "Planning Zones"
PLNZONE_DIC = {"source_location": PLNZONE,
"output_name": PLNZONE_FC_NAME,
"alias": PLNZONE_ALIAS}
'''
# TODO: need to add in layer variables
# ----------------------------------------------------------------------------
# DO NOT ADD LAYER VARIABLES BELOW THIS LINE!
#
# The following list comprehension is designed to compile all the dictionaries
# from the above layers into a single list. This list is imported into main.py
# when required.
# ----------------------------------------------------------------------------
PLN_DIC_LIST = [val for name, val in globals().items() if name.endswith('_DIC')]
|
mit
| 7,385,860,457,291,931,000
| 33.828571
| 80
| 0.544657
| false
| 4.159204
| false
| false
| false
|
smc170/fam-study-password
|
game_code.py
|
1
|
2177
|
"""
Family Study Password - The Biblical Game
Copyright (C) 2013 Spencer Caesare
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
#!/usr/bin/env python3
import sys
from sys import exit
from PySide import QtCore, QtGui
from random import *
from game_gui import Ui_main_window
from game_list import cards
class game_window(QtGui.QWidget, Ui_main_window):
def __init__(self, parent=None):
super(game_window, self).__init__(parent)
self.setupUi(self)
self.loop_count = 0
self.random_word()
def random_word(self):
if self.loop_count >= 2:
self.get_button.clicked.disconnect(self.random_word)
else:
pass
self.card_to_play = choice(cards)
cards.remove(self.card_to_play)
password_label = self.password_label
get_button = self.get_button
self.password_label.setText('Push Button To Receive Word')
self.get_button.setText('Push Me To Get A Word')
self.loop_count += 1
self.get_button.clicked.connect(self.set_labels)
def set_labels(self):
self.password_label.setText(self.card_to_play)
self.get_button.setText('Push To Clear Word')
self.get_button.clicked.disconnect(self.set_labels)
self.get_button.clicked.connect(self.random_word)
if not cards:
self.password_label.setText("Congrats! You've gone through all the words! Press the button to quit.")
self.get_button.setText('Push Me To Quit')
self.get_button.clicked.connect(QtCore.QCoreApplication.instance().quit)
else:
pass
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
window = game_window()
window.show()
sys.exit(app.exec_())
|
gpl-3.0
| -7,751,935,000,141,383,000
| 23.738636
| 104
| 0.726229
| false
| 3.36476
| false
| false
| false
|
lcy-seso/models
|
fluid/icnet/_ce.py
|
1
|
1308
|
# this file is only used for continuous evaluation test!
import os
import sys
sys.path.append(os.environ['ceroot'])
from kpi import CostKpi, DurationKpi, AccKpi
# NOTE kpi.py should shared in models in some way!!!!
train_cost_kpi = CostKpi('train_cost', 0.05, 0, actived=True)
train_duration_kpi = DurationKpi('train_duration', 0.06, 0, actived=True)
tracking_kpis = [
train_cost_kpi,
train_duration_kpi,
]
def parse_log(log):
'''
This method should be implemented by model developers.
The suggestion:
each line in the log should be key, value, for example:
"
train_cost\t1.0
test_cost\t1.0
train_cost\t1.0
train_cost\t1.0
train_acc\t1.2
"
'''
for line in log.split('\n'):
fs = line.strip().split('\t')
print(fs)
if len(fs) == 3 and fs[0] == 'kpis':
kpi_name = fs[1]
kpi_value = float(fs[2])
yield kpi_name, kpi_value
def log_to_ce(log):
kpi_tracker = {}
for kpi in tracking_kpis:
kpi_tracker[kpi.name] = kpi
for (kpi_name, kpi_value) in parse_log(log):
print(kpi_name, kpi_value)
kpi_tracker[kpi_name].add_record(kpi_value)
kpi_tracker[kpi_name].persist()
if __name__ == '__main__':
log = sys.stdin.read()
log_to_ce(log)
|
apache-2.0
| 771,536,410,682,091,400
| 21.947368
| 73
| 0.603211
| false
| 2.900222
| false
| false
| false
|
dajuno/nmrpy
|
mri0D.py
|
1
|
9879
|
# -*- coding: utf8 -*-
'''
Simulate magnetization of one group of nuclear spins "0D"
solving the Bloch equation within a frame of reference rotating with w_rf
dM/dt = g*(M x B) + relax
M: magnetization
B: applied magnetic field = B_0 + B_RF + B_G
g: gyromagnetic ratio
relax: T1, T2 relaxation terms '''
# TODO: [ ] spin echo sequence: 90y - TE/2 - 180x - TE - 180x - ..
# [ ] compute MRI signal
# [ ] compare to analytical solution
# [ ] and matrix formulism
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import ode
from utils import progressbar
# import warning
# class spin:
# ''' spin class
# (add some methods later?) '''
# def __init__(self, M0=1, T1=0.200, T2=0.600, Minit=[0, 0, 1]):
# ''' constructor
# M0 equilibrium magnetization
# Minit initial magnetization
# T1 relaxation time of substance
# T2
# '''
# # gyromagnetic ratio of protons (¹H):
# self.gm = 42.6e6 # Hz/Tesla
# self.M0 = 1
# self.T1 = T1
# self.T2 = T2
# self.Minit = Minit
def pulseseq(t, s, params, it):
''' compute contribution to magnetic field `Beff(t)` at time `t`
due to static gradient `Bg`, RF pulse `Brf` and/or gradient pulse `Brfg`
return: B'(t) = Bg + Brf(t) + Brfg(t)
= [Bx, By, Bz]
'''
B1 = params.get('amp')
w0 = params.get('w0')
TR = params.get('TR')
TE = params.get('TE')
pseq = params.get('pseq')
dphi = params.get('dephase') # dephase angle in rad: by how much will
# magnetization be dephased between P1 and P2 ?
if pseq == 'flip90':
tp = np.pi/(2*B1*s['gm'])
dt = TE/2
dB = dphi/s['gm']/dt
if np.mod(t, TR) <= tp: # 90° flip
Bp = B1*np.array([np.cos(w0*t), 0, -dB])
else:
Bp = np.array([0, 0, -dB])
elif pseq == 'continuous':
Bp = B1*np.array([np.cos(w0*t), 0, 0])
elif pseq == 'pulsed':
if np.mod(t, TR) < TE: # echo!
Bp = B1*np.array([np.cos(w0*t), 0, 0])
else:
Bp = np.array([0, 0, 0])
elif pseq == 'spinecho':
''' - one pulse of length tp flips M by pi/2
- magnetization is dephased due to field inhomogeinities
(specify angle in rad!!)
- refocus pulse after \tau -> pi flip
- phase coherence restored after 2\tau
cf. Slichter
'''
# pulse duration pi flip
tp = np.pi/(2*B1*s['gm'])
dt = TE/2
dB = dphi/s['gm']/dt
if np.mod(t, TR) <= tp: # 90° flip
Bp = B1*np.array([np.cos(w0*t), 0, -dB])
# elif np.mod(t, TR) <= tp + TE/2: # dephase!
elif np.mod(t, TR) <= TE/2: # dephase!
Bp = np.array([0, 0, -dB])
# elif np.mod(t, TR) <= TE/2+3*tp: # 180° flip
elif np.mod(t, TR) <= TE/2+2*tp: # 180° flip
Bp = B1*np.array([np.cos(w0*t), 0, -dB])
else:
Bp = np.array([0, 0, -dB])
else:
Bp = np.array([0, 0, 0])
return Bp
def bloch(s, tend=1, nsteps=1000, backend='vode', pulse_params={},
B0=3, dw_rot=0, dw_rf=0, rtol=1e-6):
''' solve Bloch equations for spin `s` in the ROTATING FRAME OF REFERENCE
rotating with the Larmor frequency plus a shift `dw_rot` (default: 0)
setting dw_rot = None (-> -w0) corresponds to the laboratory frame.
dw_fr: frequency shift for off resonance excitation
'''
w0 = -s['gm']*B0
# RF freq in rotating frame of reference is `w - w_fr`,
# so just the "off resonance" freq (=w_0-w_rf) plus the
# difference in frequency between wf_fr and w_0
if dw_rot is None:
dw_rot = -w0
pulse_params['w0'] = dw_rot + dw_rf
def rhs(t, y, s, pulse_params, B0, w0, dw_rot, it):
B = np.array([0, 0, B0]) # static
B = B + pulseseq(t, s, pulse_params, it) # RF
# rotating frame with w+dw
B = B + np.array([0, 0, (w0+dw_rot)/s['gm']])
# relax
R = np.array([y[0]/s['T2'], y[1]/s['T2'], (y[2]-s['M0'])/s['T1']])
return s['gm']*np.cross(y, B) - R
''' VAR 1 ## automatic step size control '''
it = 1
sol = []
t = []
dt = tend/nsteps
solver = ode(rhs).set_integrator(backend, rtol=rtol)
solver.set_initial_value(s['Minit'], 0)
solver.set_f_params(s, pulse_params, B0, w0, dw_rot, it)
while solver.successful() and solver.t < tend:
# works only with vode!! not recommended:
# solver.integrate(tend, step=True)
solver.integrate(solver.t+dt)
t.append(solver.t)
sol.append(solver.y)
it = it + 1
progressbar(solver.t, tend, 'solve')
return np.array(t), np.array(sol)
def plot_3Dtime(t, M, skip=10):
from mpl_toolkits.mplot3d import Axes3D
import time
plt.ion()
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.axis([-1, 1, -1, 1])
ax.plot([0, 0], [0, 0], [-1, 1], '-.k')
ax.plot([-1, 1], [0, 0], [0, 0], '-.k')
ax.plot([0, 0], [-1, 1], [0, 0], '-.k')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
for i in range(0, len(t), skip):
ax.plot([0, M[i, 0]], [0, M[i, 1]], [0, M[i, 2]],
'-<r')
# print('%i \t t = %g s' % (i, t[i]))
progressbar(t[i], t.max(), s='plot')
plt.draw()
time.sleep(0.05)
def plot_relax(t, M):
plt.ion()
fig, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
Mt = np.sqrt(M[:, 0]**2 + M[:, 1]**2)
ax1.plot(t, Mt)
ax1.set_xlabel('time in ms')
ax1.set_ylabel('$|M|$')
ax1.set_title('T1 relaxation')
ax2.plot(t, M[:, 2])
ax2.set_title('T2 relaxation')
def plot_pulse(t, M, params, s):
plt.ion()
fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
# plot magnetization components
ax1.plot(t, M)
ax1.legend(('$M_x$', '$M_y$', '$M_z$'))
ax1.set_xlabel('time in ms')
ax1.set_ylabel('$M$')
ax1.set_title('Magnetization')
plt.draw()
# plot pulse train
pseq = params.get('pseq')
if pseq == 'spinecho' or pseq == 'flip90':
TE = params.get('TE')
TR = params.get('TR')
B1 = params.get('amp')
N = int(np.ceil(t[-1]/TR)) # number of periods
tp = np.pi/(2*B1*s['gm'])
# draw polygone of one period:
if pseq == 'spinecho':
p1 = [0, 1, 1, 0, 0, 1, 1, 0, 0]
tp1 = np.array([0, 0, tp, tp, tp+TE/2, tp+TE/2, TE/2+3*tp,
TE/2+3*tp, TR])
elif pseq == 'flip90':
p1 = [0, 1, 1, 0, 0]
tp1 = np.array([0, 0, tp, tp, TR])
p, tp = [], []
for i in range(N):
tp.extend(tp1+i*TR)
p.extend(p1)
ax2.plot(tp, p)
ax2.set_ylim([-0.2, 1.2])
ax1.set_xlim([0, t.max()])
plt.draw()
if __name__ == '__main__':
B0 = 3
# spin dict
s = {
'M0': 1,
'T1': 0.100,
'T2': 0.600,
'Minit': [0, 0, 1],
'gm': 42.6e6
}
# pulse dict
pulse = {
'TE': 0.050,
'TR': 1.000,
'amp': 1.75e-5, # B1 = 1.75e-5 taken from Yuan1987
'pseq': 'flip90',
'dephase': .1
}
w0 = s['gm']*B0
nsteps = 1e3
# t, M = bloch(s, tend=0.2, backend='dopri5', pulse_params=pulse, dw_rot=0,
# dw_rf=0, rtol=1e-6, nsteps=nsteps, B0=B0)
# Mc = M[:, 0] + 1j*M[:, 1]
# MANY SPINS EXPERIMENT
N = 100
r = 2*np.random.rand(N) - 1
dw_off = r*100 # frequency shift between +-100 Hz
dphi = r*B0*0.5 # max angle (rad) of dephasing during TE/2
var = dphi # dw_off
M = []
i = 0
Mc = np.zeros((nsteps, N), dtype=complex)
for x in var:
print('\nrun %i/%i \t shift %.2f' % (i+1, len(var), x))
pulse['dephase'] = x
t, H = bloch(s, tend=0.2, backend='dopri5', pulse_params=pulse,
dw_rot=0, dw_rf=0, rtol=1e-6, nsteps=nsteps, B0=B0)
M.append(H)
Mc[:, i] = H[:, 0] + 1j*H[:, 1]
i += 1
M = np.array(M)
# integrate Mt to get signal
def plot_cplx(t, Mc):
plt.figure()
plt.ion()
plt.plot(t, np.real(Mc), '-', t, np.imag(Mc), ':')
def plot_signal(t, M):
signal = np.sum(M, 0)[:, 0:2]
fig = plt.figure()
plt.ion()
plt.plot(t, signal)
plt.plot(t, signal[:, 0]+signal[:, 1], ':')
plt.legend(('x', 'y', 'sum'))
ax = fig.gca()
ylim = ax.get_ylim()
TE = pulse['TE']
plt.plot([TE, TE], [ylim[0], ylim[1]], '-.k')
# *** BENCHMARK: COMPARE ODE BACKENDS
# Mloop = []
# for be in ['vode', 'lsoda', 'dopri5', 'dop853']:
# t, M = bloch(s, tend=0.1, backend=be, pulse_params=pulse, dw_rot=0,
# dw_rf=0, rtol=1e-6, nsteps=1e5, B0=B0)
# Mloop.append(M)
# *** EXAMPLE: continuous excitation, M -> 2pi turn
# pulse = {'TE': 20, 'TR': 50, 'amp': 1, 'pseq': 'continuous'}
# t1 = 2*np.pi/s.gm/1
# t, M = bloch(s, tend=t1, backend='vode', pulse_params=pulse, dw_rot=0,
# rtol=1e-6, nsteps=1e3, B0=B0)
# *** EXAMPLE: free precession, relaxed
# pulse = {'pseq': 'none'}
# s = spin(Minit=[0.7, 0, 0.8])
# laboratory frame (insane)
# t, M = bloch(s, backend='dopri5', tend=0.01, nsteps=1e4,
# pulse_params=pulse, dw_rot=None, rtol=1e-3, B0=3)
# rotating reference frame (sensible)
# t, M = bloch(s, backend='vode', nsteps=1e3, pulse_params=pulse,
# dw_rot=100, rtol=1e-6, B0=3)
# ** BENCHMARK ** dopri5 (RK45): 1 loops, best of 3: 346 ms per loop
# vode (ABF): 10 loops, best of 3: 77.1 ms per loop
# command: %timeit %run mri0D.py
# plot_relax(t, y)
# plot3Dtime(t, y)
|
mit
| 2,515,762,664,697,472,000
| 29.760125
| 79
| 0.50081
| false
| 2.657158
| false
| false
| false
|
ctuning/ck-env
|
soft/lib.papi/customize.py
|
1
|
2808
|
#
# Collective Knowledge (individual environment - setup)
#
# See CK LICENSE.txt for licensing details
# See CK COPYRIGHT.txt for copyright details
#
# Developer: Grigori Fursin, Grigori.Fursin@cTuning.org, http://fursin.net
#
import os
##############################################################################
# setup environment setup
def setup(i):
"""
Input: {
cfg - meta of this soft entry
self_cfg - meta of module soft
ck_kernel - import CK kernel module (to reuse functions)
host_os_uoa - host OS UOA
host_os_uid - host OS UID
host_os_dict - host OS meta
target_os_uoa - target OS UOA
target_os_uid - target OS UID
target_os_dict - target OS meta
target_device_id - target device ID (if via ADB)
tags - list of tags used to search this entry
env - updated environment vars from meta
customize - updated customize vars from meta
deps - resolved dependencies for this soft
interactive - if 'yes', can ask questions, otherwise quiet
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
bat - prepared string for bat file
}
"""
import os
# Get variables
ck=i['ck_kernel']
s=''
iv=i.get('interactive','')
cus=i.get('customize',{})
fp=cus.get('full_path','')
hosd=i['host_os_dict']
tosd=i['target_os_dict']
# Check platform
env=i['env']
src=cus.get('install_env',{}).get('PACKAGE_SUB_DIR','')
ep=cus['env_prefix']
pl=os.path.dirname(fp)
p2=os.path.dirname(pl)
p3=os.path.dirname(p2)
pb=os.path.join(p2,'bin')
pinc=os.path.join(p2,'include')
psrc=''
if src!='':
psrc=os.path.join(p3,src)
cus['path_src']=psrc
env[ep+'_SRC']=psrc
cus['path_bin']=pb
cus['path_lib']=pl
cus['path_include']=pinc
env[ep]=p2
env[ep+'_BIN']=pb
env[ep+'_LIB']=pl
env[ep+'_INCLUDE']=pinc
lb=os.path.basename(fp)
lbs=lb
if lbs.endswith('.so'):
lbs=lbs[:-3]+'.a'
cus['static_lib']=lbs
cus['dynamic_lib']=lb
env[ep+'_STATIC_NAME']=cus.get('static_lib','')
env[ep+'_DYNAMIC_NAME']=cus.get('dynamic_lib','')
r = ck.access({'action': 'lib_path_export_script', 'module_uoa': 'os', 'host_os_dict': hosd,
'lib_path': cus.get('path_lib','')})
if r['return']>0: return r
s += r['script']
return {'return':0, 'bat':s}
|
bsd-3-clause
| -5,265,577,132,017,976,000
| 24.297297
| 97
| 0.503561
| false
| 3.536524
| false
| false
| false
|
CanalTP/navitia
|
source/jormungandr/tests/integration_tests_settings.py
|
1
|
2752
|
# encoding: utf-8
# Copyright (c) 2001-2018, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# channel `#navitia` on riot https://riot.im/app/#/room/#navitia:matrix.org
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import
import os
START_MONITORING_THREAD = False
SAVE_STAT = True
DISABLE_DATABASE = True
# for tests we want only 1/2 seconds timeout instead of the normal 10s
INSTANCE_TIMEOUT = int(os.environ.get('CUSTOM_INSTANCE_TIMEOUT', 500))
STAT_CIRCUIT_BREAKER_MAX_FAIL = int(os.getenv('JORMUNGANDR_STAT_CIRCUIT_BREAKER_MAX_FAIL', 1000))
STAT_CIRCUIT_BREAKER_TIMEOUT_S = int(os.getenv('JORMUNGANDR_STAT_CIRCUIT_BREAKER_TIMEOUT_S', 1))
# do not authenticate for tests
PUBLIC = True
LOGGER = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'default': {'format': '[%(asctime)s] [%(levelname)5s] [%(process)5s] [%(name)10s] %(message)s'}
},
'handlers': {'default': {'level': 'INFO', 'class': 'logging.StreamHandler', 'formatter': 'default'}},
'loggers': {
'': {'handlers': ['default'], 'level': 'INFO', 'propagate': True},
'navitiacommon.default_values': {'handlers': ['default'], 'level': 'ERROR', 'propagate': True},
},
}
CACHE_CONFIGURATION = {'CACHE_TYPE': 'null'}
# List of enabled modules
MODULES = {
'v1': { # API v1 of Navitia
'import_path': 'jormungandr.modules.v1_routing.v1_routing',
'class_name': 'V1Routing',
}
}
# circuit breaker parameters, for the tests by default we don't want the circuit breaker
CIRCUIT_BREAKER_MAX_INSTANCE_FAIL = 99999
CIRCUIT_BREAKER_INSTANCE_TIMEOUT_S = 1
GRAPHICAL_ISOCHRONE = True
HEAT_MAP = True
PATCH_WITH_GEVENT_SOCKET = True
GREENLET_POOL_FOR_RIDESHARING_SERVICES = True
|
agpl-3.0
| 7,036,363,262,379,790,000
| 34.74026
| 105
| 0.706759
| false
| 3.323671
| false
| false
| false
|
SymbiFlow/prjxray
|
fuzzers/005-tilegrid/bram_block/top.py
|
1
|
1635
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017-2020 The Project X-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
import os
import random
random.seed(int(os.getenv("SEED"), 16))
from prjxray import util
from prjxray.db import Database
def gen_sites():
db = Database(util.get_db_root(), util.get_part())
grid = db.grid()
for tile_name in sorted(grid.tiles()):
loc = grid.loc_of_tilename(tile_name)
gridinfo = grid.gridinfo_at_loc(loc)
for site_name, site_type in gridinfo.sites.items():
if site_type in ['FIFO18E1']:
yield tile_name, site_name
def write_params(params):
pinstr = 'tile,val,site\n'
for tile, (site, val) in sorted(params.items()):
pinstr += '%s,%s,%s\n' % (tile, val, site)
open('params.csv', 'w').write(pinstr)
def run():
print('''
module top();
''')
params = {}
sites = list(gen_sites())
for (tile_name, site_name), isone in zip(sites,
util.gen_fuzz_states(len(sites))):
params[tile_name] = (site_name, isone)
print(
'''
(* KEEP, DONT_TOUCH, LOC = "{site_name}" *)
RAMB18E1 #(
.INIT_00(256'b{isone})
) bram_{site_name} ();'''.format(
site_name=site_name,
isone=isone,
))
print("endmodule")
write_params(params)
if __name__ == '__main__':
run()
|
isc
| 4,388,151,815,182,200,000
| 24.546875
| 79
| 0.549847
| false
| 3.283133
| false
| false
| false
|
keepokeepo/MITx-6.00.1x-Introduction-to-Computer-Science-and-Programming-Using-Python
|
PSET 4/ps4b.py
|
1
|
6456
|
from ps4a import *
import time
#
#
# Computer chooses a word
#
#
def compChooseWord(hand, wordList, n):
"""
Given a hand and a wordList, find the word that gives
the maximum value score, and return it.
This word should be calculated by considering all the words
in the wordList.
If no words in the wordList can be made from the hand, return None.
hand: dictionary (string -> int)
wordList: list (string)
n: integer (HAND_SIZE; i.e., hand size required for additional points)
returns: string or None
"""
# Create a new variable to store the maximum score seen so far (initially 0)
bestScore = 0
# Create a new variable to store the best word seen so far (initially None)
bestWord = None
# For each word in the wordList
for word in wordList:
# If you can construct the word from your hand
if isValidWord(word, hand, wordList):
# find out how much making that word is worth
score = getWordScore(word, n)
# If the score for that word is higher than your best score
if (score > bestScore):
# update your best score, and best word accordingly
bestScore = score
bestWord = word
# return the best word you found.
return bestWord
#
# Computer plays a hand
#
def compPlayHand(hand, wordList, n):
"""
Allows the computer to play the given hand, following the same procedure
as playHand, except instead of the user choosing a word, the computer
chooses it.
1) The hand is displayed.
2) The computer chooses a word.
3) After every valid word: the word and the score for that word is
displayed, the remaining letters in the hand are displayed, and the
computer chooses another word.
4) The sum of the word scores is displayed when the hand finishes.
5) The hand finishes when the computer has exhausted its possible
choices (i.e. compChooseWord returns None).
hand: dictionary (string -> int)
wordList: list (string)
n: integer (HAND_SIZE; i.e., hand size required for additional points)
"""
# Keep track of the total score
totalScore = 0
# As long as there are still letters left in the hand:
while (calculateHandlen(hand) > 0) :
# Display the hand
print("Current Hand: ", end=' ')
displayHand(hand)
# computer's word
word = compChooseWord(hand, wordList, n)
# If the input is a single period:
if word == None:
# End the game (break out of the loop)
break
# Otherwise (the input is not a single period):
else :
# If the word is not valid:
if (not isValidWord(word, hand, wordList)) :
print('This is a terrible error! I need to check my own code!')
break
# Otherwise (the word is valid):
else :
# Tell the user how many points the word earned, and the updated total score
score = getWordScore(word, n)
totalScore += score
print('"' + word + '" earned ' + str(score) + ' points. Total: ' + str(totalScore) + ' points')
# Update hand and show the updated hand to the user
hand = updateHand(hand, word)
print()
# Game is over (user entered a '.' or ran out of letters), so tell user the total score
print('Total score: ' + str(totalScore) + ' points.')
#
# Problem #6: Playing a game
#
#
def playGame(wordList):
"""
Allow the user to play an arbitrary number of hands.
1) Asks the user to input 'n' or 'r' or 'e'.
* If the user inputs 'e', immediately exit the game.
* If the user inputs anything that's not 'n', 'r', or 'e', keep asking them again.
2) Asks the user to input a 'u' or a 'c'.
* If the user inputs anything that's not 'c' or 'u', keep asking them again.
3) Switch functionality based on the above choices:
* If the user inputted 'n', play a new (random) hand.
* Else, if the user inputted 'r', play the last hand again.
* If the user inputted 'u', let the user play the game
with the selected hand, using playHand.
* If the user inputted 'c', let the computer play the
game with the selected hand, using compPlayHand.
4) After the computer or user has played the hand, repeat from step 1
wordList: list (string)
"""
#print("playGame not yet implemented.") # <-- Remove this when you code this function
gameCompleted = False
hand = {}
n = HAND_SIZE
while not gameCompleted:
userInput = input('Enter n to deal a new hand, r to replay the last hand, ' + \
'or e to end game: ')
validPlayer = False
if userInput == 'e':
gameCompleted = True
break
elif userInput == 'r':
if hand == {}:
print('You have not played a hand yet. Please play a new hand first!')
else:
while validPlayer == False:
choosePlayer = input('Enter u to have yourself play, c to have the computer play: ')
if choosePlayer == 'u':
validPlayer = True
playHand(hand, wordList, n)
elif choosePlayer == 'c':
validPlayer = True
compPlayHand(hand, wordList, n)
else:
print('Invalid command.')
elif userInput == 'n':
hand = dealHand(n)
while validPlayer == False:
choosePlayer = input('Enter u to have yourself play, c to have the computer play: ')
if choosePlayer == 'u':
validPlayer = True
playHand(hand, wordList, n)
elif choosePlayer == 'c':
validPlayer = True
compPlayHand(hand, wordList, n)
else:
print('Invalid command.')
else:
print('Invalid command.')
#
# Build data structures used for entire session and play game
#
if __name__ == '__main__':
wordList = loadWords()
playGame(wordList)
|
mit
| 3,707,142,585,639,417,000
| 34.668508
| 125
| 0.568154
| false
| 4.388851
| false
| false
| false
|
3dfxsoftware/cbss-addons
|
hr_payroll_pay_generator/wizard/generator_wizard.py
|
1
|
1675
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
class GeneratorWizard(osv.TransientModel):
_name = 'hr.payroll.pay.generator.generator.wizard'
def generator_exectute(self, cr, uid, ids, context=None):
return True
_columns = {
'pay_type_id': fields.many2one('hr.payroll.pay.generator.pay.type', string='Pay Type', required=True),
'payslip_run_id': fields.many2one('hr.payslip.run', string='Payslip Batch', required=True),
'salary_rule_id': fields.many2one('hr.salary.rule', string='Salary Rule', required=True),
'employee_ids': fields.many2many('hr.employee', string='Employees', required=True),
}
|
gpl-2.0
| 7,269,671,319,546,851,000
| 44.297297
| 110
| 0.633433
| false
| 3.913551
| false
| false
| false
|
ccressent/acnav
|
DAT/Block.py
|
1
|
3076
|
"""
Parse DAT files blocks and traverse block chains.
"""
from binascii import hexlify
from struct import unpack_from
from DAT.Header import Header
class Block:
"""
A block making up a chunk of a Directory in a DAT file.
"""
def __init__(self, filename=None, offset=None, size=None, next_block_offset=None, data=None):
self.filename = filename
self.offset = offset
self.size = size
self.next_block_offset = next_block_offset
self.data = data
def parse(self, blob):
"""
Try to parse a block structure out of the given binary blob.
"""
self.data = unpack_from(str(len(blob[4:])) + "s", blob[4:])[0]
self.next_block_offset = unpack_from("I", blob)[0]
@classmethod
def from_blob(cls, blob):
"""
Return a new Block instance initialized with the result of parsing the
given binary blob.
"""
b = cls()
b.parse(blob)
b.size = len(blob)
return b
@classmethod
def from_file(cls, filename, offset):
"""
Return a new Block instance initialized with the result of parsing the
given file at the given offset.
"""
with open(filename, "rb") as fp:
h = Header.from_file(filename)
fp.seek(offset)
blob = fp.read(h.block_size)
b = cls.from_blob(blob)
b.filename = filename
b.offset = offset
return b
def __iter__(self):
return BlockIterator(self)
def __str__(self):
s = "{filename: " + str(self.filename)
s += ", offset: " + str(hex(self.offset))
s += ", size: " + str(hex(self.size))
s += ", next: " + str(hex(self.next_block_offset))
s += ", data: " + hexlify(self.data)
s += "}"
return s
class BlockIterator:
def __init__(self, first_block):
self.current_block = first_block
self.no_more_blocks = False
def __iter__(self):
return self
def next(self):
if self.no_more_blocks:
raise StopIteration()
else:
if self.current_block.next_block_offset == 0x0:
self.no_more_blocks = True
b = self.current_block
filename = self.current_block.filename
next_block_offset = self.current_block.next_block_offset
self.current_block = Block.from_file(filename, next_block_offset)
return b
class BlockChain:
"""
The result of traversing a series of Block starting at the given Block.
The data held by a BlockChain can be parsed into a Directory.
"""
def __init__(self, start_block):
self.size = 0
self.data = ""
for block in iter(start_block):
self.size += block.size
self.data += block.data
def __str__(self):
s = "{size: " + str(self.size)
s += ", data: " + hexlify(self.data)
s += "}"
return s
|
mit
| 8,332,130,015,601,070,000
| 26.221239
| 97
| 0.540637
| false
| 3.913486
| false
| false
| false
|
rjschwei/azure-sdk-for-python
|
azure-mgmt-authorization/azure/mgmt/authorization/models/classic_administrator.py
|
1
|
1424
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ClassicAdministrator(Model):
"""Classic Administrators.
:param id: The ID of the administrator.
:type id: str
:param name: The name of the administrator.
:type name: str
:param type: The type of the administrator.
:type type: str
:param properties: Properties for the classic administrator.
:type properties: :class:`ClassicAdministratorProperties
<azure.mgmt.authorization.models.ClassicAdministratorProperties>`
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'ClassicAdministratorProperties'},
}
def __init__(self, id=None, name=None, type=None, properties=None):
self.id = id
self.name = name
self.type = type
self.properties = properties
|
mit
| -2,572,937,800,584,470,000
| 34.6
| 86
| 0.589888
| false
| 4.395062
| false
| false
| false
|
bth/stools
|
stools/Machine.py
|
1
|
4555
|
# -*- coding: utf-8 -*-
"""
Module for machine
"""
import paramiko
import re, string
class Machine(object):
"""
Representation of a machine
"""
def __init__(self, name, ip, username, password, gateway="", prompt=None):
"""
Create a new Machine object
:param name: machine name
:param ip: ip address (or hostname)
:param username: username (login) for ssh connection
:param password: password for ssh connection
:param gateway_machine_name: machine name of gateway
:param prompt: prompt to wait
:return: Machine instance
:rtype: Machine
"""
self.name = name
self.ip = ip
self.username = username
self.password = password
self.gateway_machine_name = gateway
if prompt == None:
prompt = "[$#]+"
self.prompt = prompt
def set_gateway(self, gateway_machine):
"""
Set gateway to access to this machine
:param gateway_machine: instance of gateway machine
"""
self.gateway = gateway_machine
def create_connection(self):
"""
Create SSH connection with this machine
"""
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(self.ip, username=self.username, password=self.password)
return client
def write_on_terminal(self, terminal, string_to_write, string_of_end):
"""
Write string_to_write on terminal and wait for string_of_end
:param terminal: terminal instance
:param string_to_write: string to write in terminal
:param string_of_end: string of waiting
"""
terminal.send(string_to_write + "\n")
ret = ''
while re.search(string_of_end, ret) == None:
if re.search("Are you sure you want to continue connecting", ret):
terminal.send("yes" + "\n")
fragment = terminal.recv(9999)
ret += fragment
return ret
def create_connection_by_terminal(self):
"""
Create SSH connection with this machine with terminal
"""
client = self.gateway.create_connection()
terminal = client.invoke_shell()
self.write_on_terminal(terminal, "ssh " + self.username + "@" + self.ip, "password: ")
self.write_on_terminal(terminal, self.password, self.prompt)
return client, terminal
def execute_command(self, command, timeout):
"""
Execute command on this machine
:param command: command to execute
:param timeout: timeout (in seconds) for command execution
:return: return of the command
:rtype: String
"""
if self.gateway == None:
client = self.create_connection()
stdin, stdout, stderr = client.exec_command(command, timeout=timeout)
ret = stdout.readlines()
ret = ''.join(ret)
ret = ret[:string.rfind(ret, '\n')]
else:
client, terminal = self.create_connection_by_terminal()
ret = self.write_on_terminal(terminal, command, self.prompt)
ret = self.clean_output(ret)
return ret
def execute_copy(self, command, machine_target):
"""
Execute copy command on this machine
:param command: command copy to execute
:param machine_target: machine instance of target machine
:return: return of the command
:rtype: String
"""
if self.gateway == None:
client = self.create_connection()
terminal = client.invoke_shell()
else:
client, terminal = self.create_connection_by_terminal()
self.write_on_terminal(terminal, command, "password: ")
ret = self.write_on_terminal(terminal, machine_target.password, self.prompt)
return self.clean_output(ret)
def clean_output(self, output):
"""
Delete useless space of output
:param output: string to clean
:return: cleaned string
:rtype: String
"""
cut_start = 0
last_return_position = string.rfind(output, "\r\n")
first_return_position = string.find(output, "\r\n")
cut_start = first_return_position + 2
output = output[cut_start:last_return_position]
return output
|
gpl-2.0
| 7,681,146,573,439,596,000
| 32.992537
| 94
| 0.574973
| false
| 4.527833
| false
| false
| false
|
svn2github/pylucene
|
test3/test_PyLuceneThread.py
|
1
|
3930
|
# ====================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ====================================================================
import sys, lucene, unittest
import time, threading
from lucene import getVMEnv
from PyLuceneTestCase import PyLuceneTestCase
from java.lang import Thread
from org.apache.lucene.analysis.standard import StandardAnalyzer
from org.apache.lucene.document import Document, Field, TextField
from org.apache.lucene.index import Term
from org.apache.lucene.search import TermQuery
class PyLuceneThreadTestCase(PyLuceneTestCase):
"""
Test using threads in PyLucene with python threads
"""
def setUp(self):
super(PyLuceneThreadTestCase, self).setUp()
self.classLoader = Thread.currentThread().getContextClassLoader()
writer = self.getWriter(analyzer=StandardAnalyzer())
doc1 = Document()
doc2 = Document()
doc3 = Document()
doc4 = Document()
doc1.add(Field("field", "one", TextField.TYPE_STORED))
doc2.add(Field("field", "two", TextField.TYPE_STORED))
doc3.add(Field("field", "three", TextField.TYPE_STORED))
doc4.add(Field("field", "one", TextField.TYPE_STORED))
writer.addDocument(doc1)
writer.addDocument(doc2)
writer.addDocument(doc3)
writer.addDocument(doc4)
writer.commit()
writer.close()
self.testData = [('one',2), ('two',1), ('three', 1), ('five', 0)] * 500
self.lock = threading.Lock()
self.totalQueries = 0
def testWithMainThread(self):
""" warm up test for runSearch in main thread """
self.runSearch(2000, True)
def testWithPyLuceneThread(self):
""" Run 5 threads with 2000 queries each """
threads = []
for i in range(5):
threads.append(threading.Thread(target=self.runSearch,
args=(2000,)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# we survived!
# and all queries have ran successfully
self.assertEqual(10000, self.totalQueries)
def runSearch(self, runCount, mainThread=False):
""" search for runCount number of times """
# problem: if there are any assertion errors in the child
# thread, the calling thread is not notified and may still
# consider the test case pass. We are using self.totalQueries
# to double check that work has actually been done.
if not mainThread:
getVMEnv().attachCurrentThread()
time.sleep(0.5)
searcher = self.getSearcher()
try:
for word, count in self.testData[0:runCount]:
query = TermQuery(Term("field", word))
topDocs = searcher.search(query, 50)
self.assertEqual(topDocs.totalHits, count)
self.lock.acquire()
self.totalQueries += 1
self.lock.release()
finally:
del searcher
if __name__ == "__main__":
lucene.initVM(vmargs=['-Djava.awt.headless=true'])
if '-loop' in sys.argv:
sys.argv.remove('-loop')
while True:
try:
unittest.main()
except:
pass
else:
unittest.main()
|
apache-2.0
| -3,566,141,051,861,162,500
| 32.02521
| 79
| 0.597455
| false
| 4.194237
| true
| false
| false
|
NuGrid/NuGridPy
|
nugridpy/astronomy.py
|
1
|
11926
|
"""
============
astronomy.py
============
Useful functions for astronomy & astrophysics
"""
from functools import update_wrapper
import numpy as np
from scipy import integrate
from . import constants as cs
class ReadOnlyConstants:
"""Callable class for attaching constants as read-only property to a function."""
def __init__(self, constants, func):
"""Constructor that defines function and constants in class instance."""
self._constants = constants
self.func = func
def __call__(self, *args, **kwargs):
"""Defines the class as a callable and executes the decorated function."""
return self.func(*args, **kwargs)
@property
def constants(self):
"""Returns constants as private attribute."""
return self._constants
def attach_constants(*args):
"""Decorator receives function constants first, then attaches them through a callable class."""
def attach(func):
function_with_constants = ReadOnlyConstants(args, func)
# inherit docstring and other magic info from original function
return update_wrapper(function_with_constants, func)
return attach
@attach_constants(cs.visc_mol_const)
def visc_mol_sol(T, rho, X):
"""
Molecular plasma viscosity (Spitzer 1962)
Parameters
----------
T : float
temperature in K
rho : float
density in cgs
X : float
H mass fraction
Returns
-------
nu : float
molecular diffusivity in [cm**2/s]
Notes
-----
According to Eq 22 in Schatzman (1977). Assume log Lambda = 15.
(see Table 5.1), a H/He mix (for different mix use Eq. 5.54 in
Spitzer textbook)
"""
visc_mol = cs.visc_mol_const * (1. + (7.*X)) * (T**2.5 / rho)
return visc_mol
@attach_constants(cs.nu_rad_const)
def visc_rad_kap_sc(T, rho, X):
"""
Radiative viscosity (Thomas, 1930) for e- scattering opacity
Parameters
----------
T : float
temperature in K
rho : float
density in cgs
X : float
H mass fraction
Returns
-------
nu : float
radiative diffusivity in [cm**2/s]
Notes
-----
Eqn. 14 in Schatzman, 1977, assume electron scattering opacity
kappa_sc = 0.2*(1+X), Kippenhahn (2nd edn, Eqn 17.2)
"""
kappa = 0.2 * (1.+X)
nu_rad = cs.nu_rad_const * (T**4 / (kappa * rho**2))
return nu_rad
@attach_constants()
def Gamma1_gasrad(beta):
"""
Gamma1 for a mix of ideal gas and radiation
Hansen & Kawaler, page 177, Eqn. 3.110
Parameters
----------
beta : float
Gas pressure fraction Pgas/(Pgas+Prad)
"""
Gamma3minus1 = (2./3.) * (4. - (3.*beta)) / (8. - (7.*beta))
Gamma1 = beta + (4. - (3.*beta)) * Gamma3minus1
return Gamma1
@attach_constants(cs.boltzmann_const, cs.atomic_mass_unit)
def Pgas(rho, T, mmu):
"""
P = R/mu * rho * T
Parameters
----------
rho : float
Density [cgs]
T : float
Temperature [K]
mmu : float
Mean molecular weight
Returns
--------
Gas pressure
"""
R = cs.boltzmann_const / cs.atomic_mass_unit
return (R/mmu) * rho * T
@attach_constants(cs.rad_const)
def Prad(T):
"""
P = cs.rad_const/3 * T**4
Parameters
----------
T : float
Temperature [K]
Returns
--------
Radiation pressure
"""
return (cs.rad_const / 3.) * T**4
@attach_constants(cs.mimf_coeff_6, cs.mimf_coeff_5, cs.mimf_coeff_4,
cs.mimf_coeff_3, cs.mimf_coeff_2, cs.mimf_coeff_1, cs.mimf_coeff_0)
def mimf_ferrario(mi):
""" Curvature MiMf from Ferrario et al. 2005MNRAS.361.1131."""
mf = ((cs.mimf_coeff_6 * (mi**6)) + (cs.mimf_coeff_5 * (mi**5))
- (cs.mimf_coeff_4 * (mi**4)) + (cs.mimf_coeff_3 * (mi**3))
- (cs.mimf_coeff_2 * (mi**2)) + (cs.mimf_coeff_1 * mi) + cs.mimf_coeff_0)
return mf
@attach_constants(cs.core_mass_coeff, cs.core_mass_offset)
def core_mass_L(MH):
"""
Core-mass luminosity relationship from Bloecker (1993)
Parameters
----------
MH : float
Core mass in Msun
Returns
-------
L
Luminosity in Lsun
"""
return cs.core_mass_coeff*(MH - cs.core_mass_offset)
@attach_constants(cs.imf_m1, cs.imf_m2, cs.imf_a1, cs.imf_a2, cs.imf_a3)
def imf(m):
"""
Initial mass function from Kroupa
Parameters
-------
m : float
mass (g)
Returns
-------
N(M)dM
for given mass according to Kroupa IMF
"""
const2 = cs.imf_m1**(-cs.imf_a1) - cs.imf_m1**(-cs.imf_a2)
const3 = cs.imf_m2**(-cs.imf_a2) - cs.imf_m2**(-cs.imf_a3)
if m < cs.imf_m1:
alpha = cs.imf_a1
const = -const2 - const3
elif m < cs.imf_m2:
alpha = cs.imf_a2
const = -const3
else:
alpha = cs.imf_a3
const = 0.
return m**(-alpha) + const
@attach_constants()
def int_imf_dm(m1, m2, m, imf_ar, bywhat='bymass', integral='normal'):
"""
Integrate IMF between m1 and m2
Parameters
----------
m1 : float
Lower mass integration bound
m2 : float
Upper mass integration bound
m : array
Mass array
imf_ar : array
Array of IMF values corresponding to mass array
bywhat : string, optional
'bymass' integrates the mass that goes into stars of
that mass interval; or 'bynumber' which integrates the number
of stars in that mass interval. The default is 'bymass'.
integrate : string, optional
'normal' uses scipy.integrate.trapz; 'cum' returns cumulative
trapezoidal integral. The default is 'normal'.
Returns
---------
Integrated initial mass function for given bounds
"""
ind_m = (m >= min(m1, m2)) & (m <= max(m1, m2))
if integral == 'normal':
int_func = integrate.trapz
elif integral == 'cum':
int_func = integrate.cumtrapz
else:
raise ValueError(
"Error in int_imf_dm: don't know how to integrate (normal or cum)")
if bywhat == 'bymass':
return int_func(m[ind_m] * imf_ar[ind_m], m[ind_m])
elif bywhat == 'bynumber':
return int_func(imf_ar[ind_m], m[ind_m])
raise ValueError(
"Error in int_imf_dm: Need integration type (bymass or bynumber)")
@attach_constants(cs.r_sun, cs.m_sun, cs.grav_const)
def am_orb(m1, m2, a, e):
"""
Orbital angular momentum equation
e.g. Ge et al 2010
Parameters
----------
m1, m2 : float
Masses of both stars in Msun
A : float
Separation in Rsun
e : float
Eccentricity
Returns
--------
Orbital angular momentum
"""
a_cm = a * cs.r_sun
m1_g = m1 * cs.m_sun
m2_g = m2 * cs.m_sun
J_orb = np.sqrt(cs.grav_const * a_cm * ((m1_g**2 * m2_g**2) / (m1_g + m2_g))) * (1 - e**2)
return J_orb
@attach_constants(cs.van_loon_1, cs.van_loon_2, cs.van_loon_3)
def mass_loss_loon05(L, Teff):
"""
Mass loss rate from van Loon et al (2005)
Parameters
----------
L : float
L in L_sun
Teff : float
Teff in K
Returns
-------
Mdot
Mdot in Msun/yr
Notes
-----
ref: van Loon etal 2005, A&A 438, 273
"""
Mdot = (cs.van_loon_1 + np.log10(L / 10.**4) -
cs.van_loon_2 * np.log10(Teff / cs.van_loon_3))
return Mdot
@attach_constants(cs.grav_const, cs.m_sun, cs.r_sun)
def energ_orb(m1, m2, r):
"""
Orbital potential energy equation
Parameters
----------
m1, m2 : float
M in Msun
r : float
Distance in Rsun
Returns
-------
Epot
Epot in erg
"""
epo = -cs.grav_const * m1 * m2 * cs.m_sun**2 / (r * cs.r_sun)
return epo
@attach_constants(cs.r_sun, cs.grav_const, cs.m_sun, cs.day_secs)
def period(A, M1, M2):
"""
Calculate binary period from separation.
Parameters
----------
A : float
separation A Rsun
M1, M2 : float
M in Msun
Returns
-------
p
period in days
"""
A *= cs.r_sun
velocity = np.sqrt(cs.grav_const * cs.m_sun * (M1+M2) / A)
p = ((2. * np.pi * A) / velocity) / cs.day_secs
return p
@attach_constants(cs.grav_const, cs.m_sun, cs.r_sun)
def escape_velocity(M, R):
"""
Escape velocity
Parameters
----------
M : float
Mass in solar masses
R : float
Radius in solar radii
Returns
-------
v_escape
in km/s
"""
ve = np.sqrt(2. * cs.grav_const * M * cs.m_sun / (R * cs.r_sun))
ve = ve * 1.e-5
return ve
@attach_constants(cs.avogadro_const, cs.boltzmann_const, cs.mass_H_atom)
def Nasv(macs_val, T):
"""
Parameters
----------
macs_val : float
MACS [mb] at T [K]
T : float
Temperature [K}
Returns
-------
Na*<sigma v>
for MACS [mb] at T [K]
"""
Na = cs.avogadro_const
k = cs.boltzmann_const
vtherm = (2. * k * T / cs.mass_H_atom)**0.5
s = macs_val * 1.e-27
Nasv_val = s * vtherm * Na
return Nasv_val
@attach_constants(cs.avogadro_const, cs.boltzmann_const, cs.mass_H_atom)
def macs(nasv, T):
"""
Parameters
----------
nasv : float
nasv value
T : float
Temperature [K]
Returns
-------
MACS
[mb] at T [K] from Na*<sigma v>
"""
Na = cs.avogadro_const
k = cs.boltzmann_const
vtherm = (2. * k * T / cs.mass_H_atom)**0.5
s = nasv / (vtherm * Na)
macs_val = s * 1.e27
return macs_val
@attach_constants()
def mu_e(X):
"""
Mean molecular weight per free electron, assuming full ionisation, and
approximating mu_i/Z_i ~ 2 for all elements heavier then Helium.
(Kippenhahn & Weigert, Ch 13.1, Eq. 13.8)
Parameters
----------
X : float
Mass fraction of H
Returns
-------
mu_el : float
Free electron mean molecular weight
"""
try:
mu_el = 2. / (1.+X)
except TypeError:
X = np.array([X])
mu_el = 2. / (1.+X)
return mu_el
@attach_constants()
def mu(X, Z, A):
"""
Mean molecular weight assuming full ionisation.
(Kippenhahn & Weigert, Ch 13.1, Eq. 13.6)
Parameters
----------
X : float or array
Mass fraction vector
Z : float or array
Charge number vector
A : float or array
Mass number vector
Returns
-------
mmu : float
Mean molecular weight at full ionization
"""
if not isinstance(Z, np.ndarray):
Z = np.array(Z)
if not isinstance(A, np.ndarray):
A = np.array(A)
if not isinstance(X, np.ndarray):
X = np.array(X)
try:
mmu = 1. / sum(X * (1.+Z) / A)
except TypeError:
X = np.array([X])
A = np.array([A])
Z = np.array([Z])
mmu = 1. / sum(X * (1.+Z) / A)
return mmu
@attach_constants(cs.idrad_const)
def Trho_idrad(rho, mmu):
"""
T(rho) that separates P_rad from P_gas dominated regions.
Kippenhahn & Weigert, Eq. 16.10
Parameters
----------
rho : float
Density array [cgs]
mu : float
Mean molecular weight
Returns
-------
T : float
Temperature at boundary
"""
T = cs.idrad_const * (rho/mmu)**(1./3.)
return T
@attach_constants(cs.iddeg_const)
def Trho_iddeg(rho, mmu, mu_el):
"""
T(rho) that separates ideal gas and degenerate pressure dominated regions.
Kippenhahn & Weigert, Eq. 16.6
Parameters
----------
rho : float
Density array [cgs]
mmu : float
Mean molecular weight
mu_el : float
Mean molecular weight per free electron
Returns
-------
T : float
Temperature at boundary
"""
T = cs.iddeg_const * rho**(2./3.) * mmu / (mu_el**(5./3.))
return T
|
bsd-3-clause
| -490,455,445,624,667,840
| 19.84965
| 99
| 0.551149
| false
| 3.103305
| false
| false
| false
|
thimslugga/apistar
|
apistar/commands/new.py
|
1
|
1588
|
import os
import shutil
import sys
import click
import apistar
from apistar import schema
APISTAR_PACKAGE_DIR = os.path.dirname(apistar.__file__)
LAYOUTS_DIR = os.path.join(APISTAR_PACKAGE_DIR, 'layouts')
LAYOUT_CHOICES = os.listdir(LAYOUTS_DIR)
IGNORED_DIRECTORIES = ['__pycache__']
class TargetDir(schema.String):
pass
class Layout(schema.String):
description = 'Select the project layout to use.'
default = 'standard'
choices = LAYOUT_CHOICES
class Force(schema.Boolean):
description = 'Overwrite any existing project files.'
default = False
def new(target_dir: TargetDir, layout: Layout, force: Force) -> None:
"""
Create a new project in TARGET_DIR.
"""
source_dir = os.path.join(LAYOUTS_DIR, layout)
copy_paths = []
for dir_path, dirs, filenames in os.walk(source_dir):
dirs[:] = [d for d in dirs if d not in IGNORED_DIRECTORIES]
for filename in filenames:
source_path = os.path.join(dir_path, filename)
rel_path = os.path.relpath(source_path, source_dir)
target_path = os.path.join(target_dir, rel_path)
if os.path.exists(target_path) and not force:
click.echo('Project files already exist. Use `-f` to overwrite.')
sys.exit(1)
copy_paths.append((source_path, target_path))
for source_path, target_path in copy_paths:
click.echo(target_path)
parent = os.path.dirname(target_path)
if parent:
os.makedirs(parent, exist_ok=True)
shutil.copy(source_path, target_path)
|
bsd-3-clause
| 8,955,669,424,910,434,000
| 28.407407
| 81
| 0.648615
| false
| 3.521064
| false
| false
| false
|
BEugen/AI
|
KTF/evaluation_ktf_test.py
|
1
|
4967
|
# from pip import models
import numpy as np
import sys
import os
import argparse
###################################################################
# Variables #
# When launching project or scripts from Visual Studio, #
# input_dir and output_dir are passed as arguments. #
# Users could set them from the project setting page. #
###################################################################
input_dir = None
output_dir = None
log_dir = None
#################################################################################
# Keras configs. #
# Please refer to https://keras.io/backend . #
#################################################################################
import keras
from keras import backend as K
# K.set_floatx('float32')
# String: 'float16', 'float32', or 'float64'.
# K.set_epsilon(1e-05)
# float. Sets the value of the fuzz factor used in numeric expressions.
# K.set_image_data_format('channels_first')
# data_format: string. 'channels_first' or 'channels_last'.
#################################################################################
# Keras imports. #
#################################################################################
from keras.models import Model
from keras.models import Sequential
from keras.layers import Input
from keras.layers import Lambda
from keras.layers import Layer
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Activation
from keras.layers import Flatten
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.optimizers import SGD
from keras.optimizers import RMSprop
from keras.callbacks import TensorBoard
from keras.utils import np_utils
import pandas as pd
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score
# import matplotlib.pyplot as plt
from time import time
from keras.models import model_from_json
def classification(x):
if x < 0.3:
return 0
if 0.3 <= x < 0.5:
return 1
if x >= 0.5:
return 2
def model_nn(name):
json_file = open(name + '.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights(name + '.h5')
sgd = SGD(lr=0.001, momentum=0.8, decay=0.0, nesterov=False)
loaded_model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
return loaded_model
def main():
so = pd.read_csv('data_so.csv', delimiter=';')
so['c8_q'] = so.iloc[:, [1, 2, 3, 4, 5]].sum(axis=1)
so['label_n'] = so.apply(lambda x: classification(x.iloc[15]), axis=1)
so['label_m'] = so.apply(lambda x: classification(x.iloc[16]), axis=1)
so['label_ug'] = so.apply(lambda x: classification(x.iloc[17]), axis=1)
so.drop(so.columns[[0, 1, 2, 3, 4, 9, 11, 12, 15, 16, 17]], inplace=True, axis=1)
so.iloc[:, 0:7] = \
StandardScaler().fit_transform(so.iloc[:, 0:7].as_matrix())
so.to_csv('data_so_all-pr.csv', sep=';')
data = np.random.permutation(so.values)
X = data[:, 0:7].astype(float)
Y_n = data[:, 7]
Y_m = data[:, 8]
Y_u = data[:, 9]
enc = LabelEncoder()
enc_Y = enc.fit_transform(Y_n)
Y_n_f = np_utils.to_categorical(enc_Y)
enc_Y = enc.fit_transform(Y_m)
Y_m_f = np_utils.to_categorical(enc_Y)
enc_Y = enc.fit_transform(Y_u)
Y_u_f = np_utils.to_categorical(enc_Y)
model = model_nn('model_n')
score = model.evaluate(X, Y_n_f, verbose=1)
print("%s: %.2f%%" % (model.metrics_names[1], score[1] * 100))
model = model_nn('model_m')
score = model.evaluate(X, Y_m_f, verbose=1)
print("%s: %.2f%%" % (model.metrics_names[1], score[1] * 100))
model = model_nn('model_ug')
score = model.evaluate(X, Y_u_f, verbose=1)
print("%s: %.2f%%" % (model.metrics_names[1], score[1] * 100))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input_dir", type=str,
default=None,
help="Input directory where where training dataset and meta data are saved",
required=False
)
parser.add_argument("--output_dir", type=str,
default=None,
help="Input directory where where logs and models are saved",
required=False
)
args, unknown = parser.parse_known_args()
input_dir = args.input_dir
output_dir = args.output_dir
log_dir = output_dir
main()
|
gpl-3.0
| 7,968,917,967,813,314,000
| 35.240876
| 100
| 0.550856
| false
| 3.716317
| false
| false
| false
|
binho/myservertalks
|
MySTInstruct.py
|
1
|
1607
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
This file is part of MyServerTalks.
MyServerTalks is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
MyServerTalks is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with MyServerTalks; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
'''
class MySTInstruct:
def __init__(self, command, parameters=[], contact='', escope=''):
self.command = command
self.parameters = parameters
self.contact = contact
self.escope = escope
'''
result format send to script
'''
def __str__(self):
#return (" '" + self.contact + "' '" + self.escope + "' " + self.command + " '" + ";".join(self.parameters) + "'").strip()
return ( self.contact + ' ' + self.escope + ' ' + self.command + ' ' + ';'.join(self.parameters) )
def setContact(self, contact):
self.contact = contact
def setEscope(self, escope):
self.escope = escope
def getCommand(self):
return self.command
def getParameters(self):
return self.parameters
def getContact(self):
return self.contact
def getEscope(self):
return self.escope
|
gpl-2.0
| -1,327,910,839,982,942,700
| 29.320755
| 124
| 0.685128
| false
| 3.652273
| false
| false
| false
|
xdutaotao/ntlmaps
|
lib/www_client.py
|
1
|
4899
|
#--coding:utf-8--
# This file is part of 'NTLM Authorization Proxy Server'
# Copyright 2001 Dmitry A. Rozmanov <dima@xenon.spb.ru>
#
# NTLM APS is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# NTLM APS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the sofware; see the file COPYING. If not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
#
import string, socket, thread, select, time
import logger, http_header, utils
import ntlm_auth, basic_auth
import proxy_client
class www_HTTP_Client(proxy_client.proxy_HTTP_Client):
#-------------------------------------------------
def connect_rserver(self):
""
self.logger.log('*** Connecting to remote server...')
self.first_run = 0
# we don't have proxy then we have to connect server by ourselves
rs, rsp = self.client_head_obj.get_http_server()
self.logger.log('(%s:%d)...' % (rs, rsp))
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((rs, rsp))
self.rserver_socket = s
self.rserver_socket_closed = 0
self.current_rserver_net_location = '%s:%d' % (rs, rsp)
self.logger.log('Done.\n')
except:
self.rserver_socket_closed = 1
self.logger.log('Failed.\n')
self.exit()
thread.exit()
#-----------------------------------------------------------------------
def fix_client_header(self):
""
self.logger.log('*** Replacing values in client header...')
if self.config.has_key('CLIENT_HEADER'):
for i in self.config['CLIENT_HEADER'].keys():
self.client_head_obj.del_param(i)
self.client_head_obj.add_param_value(i, self.config['CLIENT_HEADER'][i])
self.logger.log('Done.\n')
# self.logger.log('*** New client header:\n=====\n' + self.client_head_obj.__repr__())
else:
self.logger.log('No need.\n*** There is no "CLIENT_HEADER" section in server.cfg.\n')
self.logger.log("*** Working as selfcontained proxy, then have to change client header.\n")
self.logger.log("*** Remake url format in client header...")
self.client_head_obj.make_right_header()
self.logger.log('Done.\n')
self.client_head_obj.del_param('Keep-Alive')
self.logger.log("*** Just killed 'Keep-Alive' value in the header.\n")
# Code which converts 'Proxy-Connection' value to 'Connection'
# I am not sure that it is needed at all
# May be it is just useless activity
self.logger.log("*** Looking for 'Proxy-Connection' in client header...")
pconnection = self.client_head_obj.get_param_values('Proxy-Connection')
if pconnection:
# if we have 'Proxy-Connection'
self.logger.log("there are some.\n")
wconnection = self.client_head_obj.get_param_values('Connection')
if wconnection:
# if we have 'Connection' as well
self.logger.log("*** There is a 'Connection' value in the header.\n")
self.client_head_obj.del_param('Proxy-Connection')
self.logger.log("*** Just killed 'Proxy-Connection' value in the header.\n")
else:
self.logger.log("*** There is no 'Connection' value in the header.\n")
self.client_head_obj.del_param('Proxy-Connection')
for i in pconnection:
self.client_head_obj.add_param_value('Connection', i)
self.logger.log("*** Changed 'Proxy-Connection' to 'Connection' header value.\n")
else:
self.logger.log("there aren't any.\n")
# End of doubtable code.
# Show reworked header.
self.logger.log('*** New client header:\n=====\n' + self.client_head_obj.__repr__())
#-----------------------------------------------------------------------
def check_connected_remote_server(self):
""
# if we are working as a standalone proxy server
rs, rsp = self.client_head_obj.get_http_server()
if self.current_rserver_net_location != '%s:%d' % (rs, rsp):
# if current connection is not we need then close it.
self.logger.log('*** We had wrong connection for new request so we have to close it.\n')
self.close_rserver()
|
gpl-2.0
| -9,079,188,218,544,254,000
| 43.135135
| 100
| 0.585834
| false
| 3.957189
| false
| false
| false
|
Hanaasagi/sorator
|
tests/orm/test_model_global_scopes.py
|
1
|
4366
|
# -*- coding: utf-8 -*-
from . import DatabaseConnectionResolver
from .. import OratorTestCase
from orator.orm.scopes import Scope
from orator import Model
class ModelGlobalScopesTestCase(OratorTestCase):
@classmethod
def setUpClass(cls):
Model.set_connection_resolver(DatabaseConnectionResolver())
@classmethod
def tearDownClass(cls):
Model.unset_connection_resolver()
def test_global_scope_is_applied(self):
model = GlobalScopesModel()
query = model.new_query()
self.assertEqual(
'SELECT * FROM "table" WHERE "active" = ?',
query.to_sql()
)
self.assertEqual([1], query.get_bindings())
def test_global_scope_can_be_removed(self):
model = GlobalScopesModel()
query = model.new_query().without_global_scope(ActiveScope)
self.assertEqual(
'SELECT * FROM "table"',
query.to_sql()
)
self.assertEqual([], query.get_bindings())
def test_callable_global_scope_is_applied(self):
model = CallableGlobalScopesModel()
query = model.new_query()
self.assertEqual(
'SELECT * FROM "table" WHERE "active" = ? ORDER BY "name" ASC',
query.to_sql()
)
self.assertEqual([1], query.get_bindings())
def test_callable_global_scope_can_be_removed(self):
model = CallableGlobalScopesModel()
query = model.new_query().without_global_scope('active_scope')
self.assertEqual(
'SELECT * FROM "table" ORDER BY "name" ASC',
query.to_sql()
)
self.assertEqual([], query.get_bindings())
def test_global_scope_can_be_removed_after_query_is_executed(self):
model = CallableGlobalScopesModel()
query = model.new_query()
self.assertEqual(
'SELECT * FROM "table" WHERE "active" = ? ORDER BY "name" ASC',
query.to_sql()
)
self.assertEqual([1], query.get_bindings())
query.without_global_scope('active_scope')
self.assertEqual(
'SELECT * FROM "table" ORDER BY "name" ASC',
query.to_sql()
)
self.assertEqual([], query.get_bindings())
def test_all_global_scopes_can_be_removed(self):
model = CallableGlobalScopesModel()
query = model.new_query().without_global_scopes()
self.assertEqual(
'SELECT * FROM "table"',
query.to_sql()
)
self.assertEqual([], query.get_bindings())
query = CallableGlobalScopesModel.without_global_scopes()
self.assertEqual(
'SELECT * FROM "table"',
query.to_sql()
)
self.assertEqual([], query.get_bindings())
def test_global_scopes_with_or_where_conditions_are_nested(self):
model = CallableGlobalScopesModelWithOr()
query = model.new_query().where('col1', 'val1').or_where('col2', 'val2')
self.assertEqual(
'SELECT "email", "password" FROM "table" '
'WHERE ("col1" = ? OR "col2" = ?) AND ("email" = ? OR "email" = ?) '
'AND ("active" = ?) ORDER BY "name" ASC',
query.to_sql()
)
self.assertEqual(
['val1', 'val2', 'john@doe.com', 'someone@else.com', True],
query.get_bindings()
)
class CallableGlobalScopesModel(Model):
__table__ = 'table'
@classmethod
def _boot(cls):
cls.add_global_scope('active_scope', lambda query: query.where('active', 1))
cls.add_global_scope(lambda query: query.order_by('name'))
super(CallableGlobalScopesModel, cls)._boot()
class CallableGlobalScopesModelWithOr(CallableGlobalScopesModel):
__table__ = 'table'
@classmethod
def _boot(cls):
cls.add_global_scope('or_scope', lambda q: q.where('email', 'john@doe.com').or_where('email', 'someone@else.com'))
cls.add_global_scope(lambda query: query.select('email', 'password'))
super(CallableGlobalScopesModelWithOr, cls)._boot()
class GlobalScopesModel(Model):
__table__ = 'table'
@classmethod
def _boot(cls):
cls.add_global_scope(ActiveScope())
super(GlobalScopesModel, cls)._boot()
class ActiveScope(Scope):
def apply(self, builder, model):
return builder.where('active', 1)
|
mit
| 1,183,138,807,027,126,800
| 27.167742
| 122
| 0.593907
| false
| 3.846696
| true
| false
| false
|
sushant-hiray/teamflowy
|
blog/migrations/0001_initial.py
|
1
|
2900
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Blog'
db.create_table(u'blog_blog', (
('Id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=200)),
('Description', self.gf('django.db.models.fields.CharField')(max_length=2000)),
('emp', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['accounts.Employee'])),
))
db.send_create_signal(u'blog', ['Blog'])
# Adding model 'BlogTags'
db.create_table(u'blog_blogtags', (
('tagID', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('blog', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['blog.Blog'])),
('tag', self.gf('django.db.models.fields.CharField')(max_length=20)),
))
db.send_create_signal(u'blog', ['BlogTags'])
def backwards(self, orm):
# Deleting model 'Blog'
db.delete_table(u'blog_blog')
# Deleting model 'BlogTags'
db.delete_table(u'blog_blogtags')
models = {
u'accounts.employee': {
'Meta': {'object_name': 'Employee'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'email': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'empid': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isManager': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
u'blog.blog': {
'Description': ('django.db.models.fields.CharField', [], {'max_length': '2000'}),
'Id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'Meta': {'object_name': 'Blog'},
'emp': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.Employee']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'blog.blogtags': {
'Meta': {'object_name': 'BlogTags'},
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['blog.Blog']"}),
'tag': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'tagID': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['blog']
|
mit
| 6,557,216,391,598,298,000
| 45.047619
| 107
| 0.557241
| false
| 3.638645
| false
| false
| false
|
devilry/devilry-django
|
devilry/devilry_admin/views/assignment/crinstance_assignment.py
|
1
|
4933
|
from devilry.apps.core.models import Assignment
from devilry.devilry_account.models import PeriodPermissionGroup
from devilry.devilry_admin.cradminextensions import devilry_crmenu_admin
from devilry.devilry_admin.views.assignment import overview
from devilry.devilry_admin.views.assignment.examiners import add_groups_to_examiner
from devilry.devilry_admin.views.assignment.examiners import bulk_organize as bulk_organize_examiners
from devilry.devilry_admin.views.assignment.examiners import examinerdetails
from devilry.devilry_admin.views.assignment.examiners import overview as examineroverview
from devilry.devilry_admin.views.assignment.examiners import remove_groups_from_examiner
from devilry.devilry_admin.views.assignment import passed_previous_period
from devilry.devilry_admin.views.assignment.students import create_groups
from devilry.devilry_admin.views.assignment.students import delete_groups
from devilry.devilry_admin.views.assignment.students import groupdetails
from devilry.devilry_admin.views.assignment.students import manage_deadlines
from devilry.devilry_admin.views.assignment.students import merge_groups
from devilry.devilry_admin.views.assignment.students import overview as studentoverview
from devilry.devilry_admin.views.assignment.students import replace_groups
from devilry.devilry_admin.views.assignment.students import split_group
from devilry.devilry_admin.views.assignment.download_files import download_archive
from devilry.devilry_admin.views.assignment.statistics import statistics_overview
from devilry.devilry_cradmin import devilry_crinstance
class Menu(devilry_crmenu_admin.Menu):
def build_menu(self):
super(Menu, self).build_menu()
assignment = self.request.cradmin_role
self.add_role_menuitem_object()
self.add_subject_breadcrumb_item(subject=assignment.subject)
self.add_period_breadcrumb_item(period=assignment.period)
self.add_assignment_breadcrumb_item(assignment=assignment,
active=True)
class CrAdminInstance(devilry_crinstance.BaseCrInstanceAdmin):
menuclass = Menu
roleclass = Assignment
apps = [
('overview', overview.App),
('studentoverview', studentoverview.App),
('create_groups', create_groups.App),
('replace_groups', replace_groups.App),
('merge_groups', merge_groups.App),
('split_group', split_group.App),
('delete_groups', delete_groups.App),
('groupdetails', groupdetails.App),
('examineroverview', examineroverview.App),
('examinerdetails', examinerdetails.App),
('add_groups_to_examiner', add_groups_to_examiner.App),
('remove_groups_from_examiner', remove_groups_from_examiner.App),
('bulk_organize_examiners', bulk_organize_examiners.App),
('passed_previous_period', passed_previous_period.App),
('deadline_management', manage_deadlines.App),
('download', download_archive.App),
('statistics', statistics_overview.App)
]
id = 'devilry_admin_assignmentadmin'
rolefrontpage_appname = 'overview'
def get_rolequeryset(self):
return Assignment.objects.filter_user_is_admin(user=self.request.user)\
.select_related('parentnode', 'parentnode__parentnode')\
.order_by('-publishing_time')\
.prefetch_point_to_grade_map()
def get_titletext_for_role(self, role):
"""
Get a short title briefly describing the given ``role``.
Remember that the role is an Assignment.
"""
assignment = role
return assignment
@property
def assignment(self):
return self.request.cradmin_role
@classmethod
def matches_urlpath(cls, urlpath):
return urlpath.startswith('/devilry_admin/assignment')
def __get_devilryrole_for_requestuser(self):
assignment = self.request.cradmin_role
devilryrole = PeriodPermissionGroup.objects.get_devilryrole_for_user_on_period(
user=self.request.user,
period=assignment.period
)
if devilryrole is None:
raise ValueError('Could not find a devilryrole for request.user. This must be a bug in '
'get_rolequeryset().')
return devilryrole
def get_devilryrole_for_requestuser(self):
"""
Get the devilryrole for the requesting user on the current
assignment (request.cradmin_instance).
The return values is the same as for
:meth:`devilry.devilry_account.models.PeriodPermissionGroupQuerySet.get_devilryrole_for_user_on_period`,
exept that this method raises ValueError if it does not find a role.
"""
if not hasattr(self, '_devilryrole_for_requestuser'):
self._devilryrole_for_requestuser = self.__get_devilryrole_for_requestuser()
return self._devilryrole_for_requestuser
|
bsd-3-clause
| -3,565,089,393,286,813,700
| 45.980952
| 112
| 0.715792
| false
| 3.728647
| false
| false
| false
|
rexdf/Chinese-Localization
|
Localization.py
|
1
|
6325
|
import sublime
import sublime_plugin
import os
from hashlib import md5
__version__ = "1.7.2"
CONFIG_NAME = "Localization.sublime-settings"
LANGS = {
"ZH_CN": {
'zipfile': 'ZH_CN.zip',
'syntax_md5sum': '44cd99cdd8ef6c2c60c0a89d53a40b95'
},
"ZH_TW": {
"zipfile": "ZH_TW.zip",
'syntax_md5sum': "fe7457cfd227b7db74e785321f672c4a"
},
"JA_JP": {
"zipfile": "JA_JP.zip",
'syntax_md5sum': "037128b8f8d2616c7239d8e9a7183b4c"
},
"EN": {
"zipfile": "EN.zip",
'syntax_md5sum': "2667c3fe5c1102274051920b1f581adb"
}
}
BLACK_LIST = (
"8a2bc3aa52a2d417b42bdc7c80534ce099fc0c65",
"d8db73c4aa057735e80547773a4293484fd5cb45",
)
def get_setting(name):
config = sublime.load_settings(CONFIG_NAME)
setting = config.get(name, None)
return setting
def restore_setting(name, value):
config = sublime.load_settings(CONFIG_NAME)
config.set(name, value)
sublime.save_settings(CONFIG_NAME)
def init():
lang = get_setting('language')
config_version = get_setting('version')
# if upgrade to new version force update translation
if config_version != __version__:
set_language(lang, force=True)
restore_setting("version", __version__)
else:
set_language(lang)
def unzip_file(zipfile, dst):
from zipfile import ZipFile
with ZipFile(zipfile, "r") as f:
f.extractall(dst)
def get_builtin_pkg_path():
base_path = os.path.dirname(sublime.executable_path())
ret = os.path.join(base_path, 'Packages')
return ret
def set_language(lang, force=False):
if lang not in LANGS:
return
PACKAGES_PATH = sublime.packages_path()
DEFAULT_PATH = os.path.join(PACKAGES_PATH, "Default")
SYN_PATH = os.path.join(DEFAULT_PATH, "Syntax.sublime-menu")
# not force update then check current lang
if not force and os.path.isfile(SYN_PATH):
with open(SYN_PATH, "rb") as f:
syntax = f.read()
m = md5()
m.update(syntax)
if m.hexdigest() == LANGS[lang]['syntax_md5sum']:
sublime.status_message("%s has loaded." % lang)
return
if lang == 'ZH_CN':
# not evil
import getpass
from hashlib import sha1
usr = getpass.getuser().encode('utf-8')
m = md5()
s = sha1()
m.update(usr)
s.update(usr)
res = sha1()
res.update((s.hexdigest() + m.hexdigest()).encode('utf-8'))
if res.hexdigest() in BLACK_LIST:
lang = 'JA_JP'
# mkdir if Default not exist
if not os.path.isdir(DEFAULT_PATH):
os.mkdir(DEFAULT_PATH)
# if detect locale override the default only when the first time
from locale import getdefaultlocale
locale_lang = getdefaultlocale()
if locale_lang[0] == "ja_JP":
lang = "JA_JP"
elif locale_lang[0] == "zh_TW" or locale_lang[0] == "zh_HK":
lang = "ZH_TW"
# Make sure Default Packages function work
GOTO_PY = os.path.join(DEFAULT_PATH, 'goto_line.py')
if not os.path.isfile(GOTO_PY):
SUBLIME_PACKAGE_PATH = get_builtin_pkg_path()
DEFAULT_SRC = os.path.join(
SUBLIME_PACKAGE_PATH, "Default.sublime-package")
unzip_file(DEFAULT_SRC, DEFAULT_PATH)
# Load binary resource
PACKAGE_NAME = __name__.split('.')[0]
LOCALZIP_RES = "Packages/{}/{}".format(PACKAGE_NAME,
LANGS[lang]['zipfile'])
lang_bytes = sublime.load_binary_resource(LOCALZIP_RES)
# Use BytesIO and zipfile to unzip it.
from io import BytesIO
file_buf = BytesIO(lang_bytes)
unzip_file(file_buf, DEFAULT_PATH)
MAIN_MENU = os.path.join(DEFAULT_PATH, "Main.sublime-menu")
with open(MAIN_MENU, "rb") as f:
content = f.read().decode("utf-8")
# Remove mnemonic for OSX
import re
platform = sublime.platform()
if platform == "osx":
pattern = re.compile(r"(?<=[\u3000-\u9FFFa-zA-Z])\([A-Za-z]\)", re.M)
pattern_help = re.compile(r"(ヘルプ|帮助|幫助)")
content = re.sub(pattern, "", content)
content = re.sub(pattern_help, "Help", content)
with open(MAIN_MENU, "wb") as f:
f.write(content.encode("utf-8"))
# Hack sublime menu
import json
content = re.sub(re.compile(r",(?=[\s\r\n]*(}|\]))"), "", content)
content = re.sub(re.compile(r"^\s*//.*?\n", re.S | re.M), "", content)
# Hack JA_JP/Main.sublime-menu line 646
content = re.sub(re.compile(r"(?<=}[, ]) //, \"caption\":.*(?=\n)"),
"", content)
js = json.loads(content, "utf-8")
for i in range(len(js)):
del js[i]["children"]
js = json.dumps(js, ensure_ascii=False, indent=4)
ZZZZ_LOCALE = os.path.join(PACKAGES_PATH, "ZZZZZZZZ-Localization")
ZZZZ_SBMENU = os.path.join(ZZZZ_LOCALE, "Main.sublime-menu")
if not os.path.isdir(ZZZZ_LOCALE):
os.mkdir(ZZZZ_LOCALE)
with open(ZZZZ_SBMENU, "wb") as f:
f.write(js.encode("utf-8"))
class ToggleLanguageCommand(sublime_plugin.ApplicationCommand):
def run(self, language):
set_language(language)
restore_setting("language", language)
def is_checked(self, language):
return get_setting('language') == language
def plugin_loaded():
"""Load and unzip the files."""
sublime.set_timeout(init, 200)
def cleanup():
PACKAGES_PATH = sublime.packages_path()
DEFAULT_PATH = os.path.join(PACKAGES_PATH, "Default")
ZZZZ_LOCALE = os.path.join(PACKAGES_PATH, "ZZZZZZZZ-Localization")
import shutil
shutil.rmtree(DEFAULT_PATH)
shutil.rmtree(ZZZZ_LOCALE)
def plugin_unloaded():
PACKAGE_NAME = __name__.split('.')[0]
from package_control import events
if events.pre_upgrade(PACKAGE_NAME):
print('Upgrading from %s!' % events.pre_upgrade(PACKAGE_NAME))
elif events.remove(PACKAGE_NAME):
# set_language("EN", True)
cleanup()
sublime_plugin.reload_plugin('Default')
print('Removing %s!' % events.remove(PACKAGE_NAME))
|
mit
| 6,037,125,774,624,771,000
| 29.242574
| 77
| 0.586278
| false
| 3.299007
| true
| false
| false
|
lukaslueg/wirepy
|
wirepy/lib/column.py
|
1
|
12746
|
'''Wireshark displays generic information about a packet's content in it's GUI
using a set of columns. Each column has one of several pre-defined column-types
which ``libwireshark`` knows about and fills with content while dissecting a
packets. This allows dissectors of all kinds to provide information about a
packet, no matter where in the protocol this information is ultimately
retrieved from.
For example, :py:attr:`Type.PROTOCOL` provides the name of the deepest protocol
found within a frame; a raw ethernet frame may provide "eth" for PROTOCOL, a IP
packet within the ethernet packet overrules this to "ip", a TCP packet within
the IP-packet again overrules to 'tcp' and a HTTP packet within the TCP packet
finally overrules to 'http'.
.. note::
Wireshark uses columns in concert with it's preferences, the API reading
column-settings directly from the global preferences object. To make this
concept more flexible, we avoid this binding.
'''
from .wireshark import iface, mod
from . import dfilter
from .cdata import (CDataObject, Attribute, BooleanAttribute, StringAttribute,
InstanceAttribute, IntListAttribute, StringListAttribute,
InstanceListAttribute)
class ColumnError(Exception):
'''Base class for all column-related errors.'''
pass
class InvalidColumnType(ColumnError):
'''An invalid column-type was provided.'''
pass
class Type(object):
'''A column-type.''' # TODO
_802IQ_VLAN_ID = mod.COL_8021Q_VLAN_ID #: 802.1Q vlan ID
ABS_DATE_TIME = mod.COL_ABS_DATE_TIME #: Absolute date and time
ABS_TIME = mod.COL_ABS_TIME #: Absolute time
CIRCUIT_ID = mod.COL_CIRCUIT_ID #: Circuit ID
DSTIDX = mod.COL_DSTIDX
#: !! DEPRECATED !! - Dst port idx - Cisco MDS-specific
SRCIDX = mod.COL_SRCIDX
#: !! DEPRECATED !! - Src port idx - Cisco MDS-specific
VSAN = mod.COL_VSAN #: VSAN - Cisco MDS-specific
CUMULATIVE_BYTES = mod.COL_CUMULATIVE_BYTES #: Cumulative number of bytes
CUSTOM = mod.COL_CUSTOM #: Custom column (any filter name's contents)
DCE_CALL = mod.COL_DCE_CALL
#: DCE/RPC connection orientated call id OR datagram sequence number
DCE_CTX = mod.COL_DCE_CTX
#: !! DEPRECATED !! - DCE/RPC connection oriented context id
DELTA_TIME = mod.COL_DELTA_TIME #: Delta time
DELTA_CONV_TIME = mod.COL_DELTA_CONV_TIME
#: Delta time to last frame in conversation
REST_DST = mod.COL_RES_DST #: Resolved destination
UNRES_DST = mod.COL_UNRES_DST #: Unresolved destination
REST_DST_PORT = mod.COL_RES_DST_PORT #: Resolved destination port
UNRES_DST_PORT = mod.COL_UNRES_DST_PORT #: Unresolved destination port
DEF_DST = mod.COL_DEF_DST #: Destination address
DEF_DST_PORT = mod.COL_DEF_DST_PORT #: Destination port
EXPERT = mod.COL_EXPERT #: Expert info
IF_DIR = mod.COL_IF_DIR #: FW-1 monitor interface/direction
OXID = mod.COL_OXID #: !! DEPRECATED !! - Fibre Channel OXID
RXID = mod.COL_RXID #: !! DEPRECATED !! - Fibre Channel RXID
FR_DLCI = mod.COL_FR_DLCI #: !! DEPRECATED !! - Frame Relay DLCI
FREQ_CHAN = mod.COL_FREQ_CHAN #: IEEE 802.11 (and WiMax?) - Channel
BSSGP_TLLI = mod.COL_BSSGP_TLLI #: !! DEPRECATED !! - GPRS BSSGP IE TLLI
HPUX_DEVID = mod.COL_HPUX_DEVID
#: !! DEPRECATED !! - HP-UX Nettl Device ID
HPUX_SUBSYS = mod.COL_HPUX_SUBSYS
#: !! DEPRECATED !! - HP-UX Nettl Subsystem
DEF_DL_DST = mod.COL_DEF_DL_DST #: Data link layer destination address
DEF_DL_SRC = mod.COL_DEF_DL_SRC #: Data link layer source address
RES_DL_DST = mod.COL_RES_DL_DST #: Unresolved DL destination
UNRES_DL_DST = mod.COL_UNRES_DL_DST #: Unresolved DL destination
RES_DL_SRC = mod.COL_RES_DL_SRC #: Resolved DL source
UNRES_DL_SRC = mod.COL_UNRES_DL_SRC #: Unresolved DL source
RSSI = mod.COL_RSSI #: IEEE 802.11 - received signal strength
TX_RATE = mod.COL_TX_RATE #: IEEE 802.11 - TX rate in Mbps
DSCP_VALUE = mod.COL_DSCP_VALUE #: IP DSCP Value
INFO = mod.COL_INFO #: Description
COS_VALUE = mod.COL_COS_VALUE #: !! DEPRECATED !! - L2 COS Value
RES_NET_DST = mod.COL_RES_NET_DST #: Resolved net destination
UNRES_NET_DST = mod.COL_UNRES_NET_DST #: Unresolved net destination
RES_NET_SRC = mod.COL_RES_NET_SRC #: Resolved net source
UNRES_NET_SRC = mod.COL_UNRES_NET_SRC #: Unresolved net source
DEF_NET_DST = mod.COL_DEF_NET_DST #: Network layer destination address
DEF_NET_SRC = mod.COL_DEF_NET_SRC #: Network layer source address
NUMBER = mod.COL_NUMBER #: Packet list item number
PACKET_LENGTH = mod.COL_PACKET_LENGTH #: Packet length in bytes
PROTOCOL = mod.COL_PROTOCOL #: Protocol
REL_TIME = mod.COL_REL_TIME #: Relative time
REL_CONV_TIME = mod.COL_REL_CONV_TIME #: blurp
DEF_SRC = mod.COL_DEF_SRC #: Source address
DEF_SRC_PORT = mod.COL_DEF_SRC_PORT #: Source port
RES_SRC = mod.COL_RES_SRC #: Resolved source
UNRES_SRC = mod.COL_UNRES_SRC #: Unresolved source
RES_SRC_PORT = mod.COL_RES_SRC_PORT #: Resolved source port
UNRES_SRC_PORT = mod.COL_UNRES_SRC_PORT #: Unresolved source Port
TEI = mod.COL_TEI #: Q.921 TEI
UTC_DATE_TIME = mod.COL_UTC_DATE_TIME #: UTC date and time
UTC_TIME = mod.COL_UTC_TIME #: UTC time
CLS_TIME = mod.COL_CLS_TIME
#: Command line specific time (default relative)
NUM_COL_FMTS = mod.NUM_COL_FMTS
MAX_INFO_LEN = mod.COL_MAX_INFO_LEN
MAX_LEN = mod.COL_MAX_LEN
def __init__(self, fmt):
'''Get a reference to specific column-type.
:param fmt:
One of the defined column-types, e.g. :py:attr:`Number`
'''
if fmt not in range(self.NUM_COL_FMTS):
raise InvalidColumnType(fmt)
self.fmt = fmt
def __repr__(self):
r = '<Type description="%s" format="%s">' % (self.format_desc,
self.format_string)
return r
def __int__(self):
return self.fmt
def __eq__(self, other):
return int(other) == int(self)
def __hash__(self):
return hash(self.fmt)
@classmethod
def from_string(cls, format_string):
fmt = mod.get_column_format_from_str(format_string.encode())
if fmt == -1:
raise InvalidColumnType(format_string)
return cls(fmt)
@classmethod
def iter_column_formats(cls):
'''Iterate over all available column formats.
:returns:
An iterator that yields instances of :py:class:`Type`.
'''
for fmt in range(cls.NUM_COL_FMTS):
yield cls(fmt)
@property
def format_desc(self):
return iface.string(mod.col_format_desc(self.fmt))
@property
def format_string(self):
return iface.string(mod.col_format_to_string(self.fmt))
@property
def MAX_BUFFER_LEN(self):
if self.fmt == self.INFO:
return self.MAX_INFO_LEN
else:
return self.MAX_LEN
class Format(CDataObject):
'''A fmt_data'''
_struct = 'fmt_data'
title = StringAttribute(doc='Title of the column.')
type_ = InstanceAttribute(Type, structmember='fmt',
doc=('The column\'s type, one of '
':py:class:`Type`.'))
custom_field = StringAttribute(doc='Field-name for custom columns.')
custom_occurrence = Attribute(doc=('Optional ordinal of occcurrence '
'of the custom field.'))
visible = BooleanAttribute(doc=('True if the column should be '
'hidden in GUI.'))
resolved = BooleanAttribute(doc=('True to show a more human-'
'readable name.'))
def __init__(self, type_=None, init=None, title=None, custom_field=None,
custom_occurrence=None, visible=None, resolved=None):
'''
param init:
The underlying fmt_data-object to wrap or None to create a new one.
'''
self.cdata = init if init is not None else iface.new('fmt_data*')
if title is not None:
self.title = title
if type_ is not None:
self.type_ = type_
if custom_field is not None:
self.custom_field = custom_field
if custom_occurrence is not None:
self.custom_occurrence = custom_occurrence
if visible is not None:
self.visible = visible
if resolved is not None:
self.resolved = resolved
def __repr__(self):
return '<Format title="%s" type_="%s">' % (self.title, self.type_)
class ColumnInfo(CDataObject):
_struct = 'column_info'
num_cols = Attribute()
fmts = IntListAttribute('num_cols', 'col_fmt')
firsts = IntListAttribute(Type.NUM_COL_FMTS, 'col_first')
lasts = IntListAttribute(Type.NUM_COL_FMTS, 'col_last')
titles = StringListAttribute('num_cols', 'col_title')
custom_fields = StringListAttribute('num_cols', 'col_custom_field')
custom_occurrences = IntListAttribute('num_cols', 'col_custom_occurrence')
custom_field_ids = IntListAttribute('num_cols', 'col_custom_field_id')
custom_dfilters = InstanceListAttribute(dfilter.DisplayFilter,
sizeattr='num_cols',
structmember='col_custom_dfilter')
fences = IntListAttribute('num_cols', 'col_fence')
writeable = BooleanAttribute()
def __init__(self, init):
'''Create a new ColumnInfo-descriptor.
:param init:
Either a cdata-object to be wrapped or an iterable of
:py:class:`Format` instances.
'''
if isinstance(init, iface.CData):
self.cdata = init
else:
self.cdata = iface.new('column_info*')
self.num_cols = len(init)
self.firsts = [-1 for i in range(Type.NUM_COL_FMTS)]
self.lasts = [-1 for i in range(Type.NUM_COL_FMTS)]
self.fmts = [fmt.type_ for fmt in init]
self.titles = [fmt.title for fmt in init]
self.custom_fields = [fmt.custom_field if fmt.type_ == Type.CUSTOM
else None for fmt in init]
self.custom_occurrences = [fmt.custom_occurrence
if fmt.type_ == Type.CUSTOM else 0
for fmt in init]
self.custom_field_ids = [-1 for fmt in init]
self.custom_dfilters = [dfilter.DisplayFilter(fmt.custom_field)
if fmt.type_ == Type.CUSTOM else None
for fmt in init]
self.fences = [0 for fmt in init]
self._matx = []
for i in range(self.num_cols):
self._matx.append(iface.new('gboolean[]', Type.NUM_COL_FMTS))
self._matxp = iface.new('gboolean*[]', self._matx)
self.cdata.fmt_matx = self._matxp
for i in range(self.num_cols):
mod.get_column_format_matches(self.cdata.fmt_matx[i],
self.fmts[i])
self._col_data = [iface.NULL for fmt in init]
self._col_datap = iface.new('gchar*[]', self._col_data)
self.cdata.col_data = self._col_datap
self._col_buf = [iface.new('gchar[]', fmt.type_.MAX_BUFFER_LEN)
for fmt in init]
self._col_bufp = iface.new('gchar*[]', self._col_buf)
self.cdata.col_buf = self._col_bufp
self._col_expr = [iface.new('gchar[]', Type.MAX_LEN)
for fmt in init] + [iface.NULL]
self._col_exprp = iface.new('gchar*[]', self._col_expr)
self.cdata.col_expr.col_expr = self._col_exprp
self._col_expr_val = [iface.new('gchar[]', Type.MAX_LEN)
for fmt in init] + [iface.NULL]
self._col_expr_valp = iface.new('gchar*[]', self._col_expr_val)
self.cdata.col_expr.col_expr_val = self._col_expr_valp
for i in range(self.num_cols):
for j in range(Type.NUM_COL_FMTS):
if self._matxp[i][j]:
if self.firsts[j] == -1:
self.firsts[j] = i
self.lasts[j] = i
def __len__(self):
'''Equal to the number of columns in this descriptor'''
return self.num_cols
@property
def have_custom_cols(self):
''''''
# TODO do we really need this through the API ?
return bool(mod.have_custom_cols(self.cdata))
|
gpl-3.0
| -2,718,258,804,250,527,000
| 42.20678
| 79
| 0.597442
| false
| 3.557354
| false
| false
| false
|
sukuba/js-py-ngram-full-text-search
|
test2_jsngram.py
|
1
|
1764
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# written for python 3 but also run on python 2
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import shutil
import jsngram.jsngram
import jsngram.dir2
import jsngram.text2
def test():
base_dir = os.path.realpath('/scratch') # may be './scratch', or others.
ngram_size = 2
ngram_shorter = True
org_dir = os.path.join(base_dir, 'org')
in_dir = os.path.join(base_dir, 'txt')
out_dir = os.path.join(base_dir, 'idx')
ch_ignore = r'[\s,.,.、。]+'
flat_dir = False
verbose_print = False
def make_index_by_files_inc(n=ngram_size, shorter=ngram_shorter,
src=in_dir, dest=out_dir, flat=flat_dir, ignore=ch_ignore):
"""
text files in src directory will be indexed.
"""
ix = jsngram.jsngram.JsNgram(n, shorter, src, dest, flat, ignore)
entries = jsngram.dir2.list_files(src)
ix.add_files_to_json(entries, verbose_print)
return ix
def remove_entries(dest):
"""
remove files and subdirectories at dest
"""
for entry in os.listdir(dest):
fullpath = os.path.join(dest, entry)
if os.path.isfile(fullpath):
os.remove(fullpath)
else:
shutil.rmtree(fullpath)
def test_suite1():
remove_entries(out_dir)
jsngram.text2.normalize_texts(org_dir, in_dir)
ix = make_index_by_files_inc()
for entry in jsngram.dir2.list_files(out_dir):
fullpath = os.path.join(out_dir, entry)
jsngram.json2.json_end(fullpath)
print('Done.')
test_suite1()
if __name__ == '__main__':
test()
|
mit
| 6,650,857,769,097,815,000
| 29.807018
| 82
| 0.58713
| false
| 3.357553
| false
| false
| false
|
sukeesh/Jarvis
|
jarviscli/plugins/stock.py
|
1
|
7341
|
import requests
from plugin import plugin
from colorama import Fore
from inspect import cleandoc
import re
@plugin('stock')
class Stock:
"""
stock <stock_id> : Get details of stock identified by <stock_id>(one id at a time)
stock getid : Search stock id
stock profile <stock_id> : Get company profile
stock fstatement <stock_id> : Get latest ANNUAL finincial statement of the company
stock gainers : Most gainers in NYSE
stock losers : Most losers in NYSE
stock help : Prints help
*** AVAILABLE ONLY FOR US EQUITIES ***
Data provided for free by IEX (https://iextrading.com/developer). View IEX’s Terms of Use (https://iextrading.com/api-exhibit-a/).
"""
def __call__(self, jarvis, s):
if not s or 'help' in s:
jarvis.say(cleandoc(self.__doc__), Fore.GREEN)
else:
ps = s.split()
if ps[0] == 'getid':
ps.pop(0)
if ps:
name = ' '.join(ps)
else:
name = jarvis.input("Enter the name of the stock: ")
self.get_stock_id(jarvis, name)
elif ps[0] == 'profile':
if(len(ps) != 2):
jarvis.say("You forgot to mention the symbol", Fore.RED)
else:
symbol = ps[1]
self.get_profile(jarvis, symbol)
elif ps[0] == 'fstatement':
if(len(ps) != 2):
jarvis.say("You forgot to mention the symbol", Fore.RED)
else:
symbol = ps[1]
self.get_financial_stmt(jarvis, symbol)
elif ps[0] == 'gainers':
self.get_gainers(jarvis)
elif ps[0] == 'losers':
self.get_losers(jarvis)
# anything else is treated as a stock symbol
else:
self.get_stock_data(jarvis, s)
def get_stock_data(self, jarvis, quote):
''' Given a stock symbol, get the real time price of the stock '''
url = 'https://financialmodelingprep.com/api/v3/stock/real-time-price/' + quote
resp = requests.get(url)
if(resp.status_code == 200):
data = resp.json()
if('symbol' in data.keys()):
jarvis.say("Symbol: " + str(data['symbol']), Fore.GREEN)
jarvis.say("Price: " + str(data['price']), Fore.GREEN)
jarvis.say("IEX Real-Time Price (https://iextrading.com/developer)")
elif('Error' in data.keys()):
jarvis.say("Invalid stock symbol name", Fore.RED)
else:
jarvis.say("Error. Please retry")
else:
jarvis.say("Cannot find the name. Try again later\n", Fore.RED)
def get_stock_id(self, jarvis, name):
''' Get the list of stock IDs given a company name or part of the company name '''
url = 'https://financialmodelingprep.com/api/v3/company/stock/list'
resp = requests.get(url)
if(resp.status_code == 200):
data = resp.json()
found = False
# Add try block. Somtimes the endpoint does not work or has unexcepted behaviour
try:
for stock in data['symbolsList']:
if(re.match(name.lower(), stock['name'].lower())):
found = True
jarvis.say(stock['symbol'] + "\t\t" + stock['name'], Fore.GREEN)
if not found:
jarvis.say("The given name could not be found\n", Fore.RED)
except KeyError:
jarvis.say("The endpoint is not working at the moment. Try again later", Fore.RED)
else:
jarvis.say("Cannot find the name at this time. Try again later\n", Fore.RED)
def get_profile(self, jarvis, symbol):
''' Given a stock symbol get the company profile '''
url = 'https://financialmodelingprep.com/api/v3/company/profile/' + symbol
resp = requests.get(url)
if(resp.status_code == 200):
data = resp.json()
if(not data):
jarvis.say("Cannot find details for " + symbol, Fore.RED)
else:
jarvis.say(" Symbol : " + data['symbol'], Fore.GREEN)
jarvis.say(" Company : " + data['profile']['companyName'], Fore.GREEN)
jarvis.say(" Industry : " + data['profile']['industry'], Fore.GREEN)
jarvis.say(" Sector : " + data['profile']['sector'], Fore.GREEN)
jarvis.say(" Website : " + data['profile']['website'], Fore.GREEN)
jarvis.say(" Exchange : " + data['profile']['exchange'], Fore.GREEN)
jarvis.say(" Description : " + data['profile']['description'], Fore.GREEN)
else:
jarvis.say("Cannot find details for " + symbol, Fore.RED)
def get_financial_stmt(self, jarvis, symbol):
''' Get the last annual financial statement of a company given it's stock symbol '''
url = 'https://financialmodelingprep.com/api/v3/financials/income-statement/' + symbol
resp = requests.get(url)
if(resp.status_code == 200):
data = resp.json()
if(not data):
jarvis.say("Cannot find details for: " + symbol, Fore.RED)
else:
for key in data['financials'][0].keys():
jarvis.say(key + " => " + data['financials'][0][key], Fore.GREEN)
else:
jarvis.say("Cannot find details for " + symbol, Fore.RED)
def get_gainers(self, jarvis):
''' Get the most gainers of the day '''
url = 'https://financialmodelingprep.com/api/v3/stock/gainers'
resp = requests.get(url)
if(resp.status_code == 200):
data = resp.json()
if(not data):
jarvis.say("Cannot find details at this moment.", Fore.RED)
else:
for gainer in data['mostGainerStock']:
jarvis.say(gainer['ticker'] + " | " + gainer['companyName'], Fore.GREEN)
jarvis.say("Price: " + str(gainer['price']) + " | Change: " + str(gainer['changes']), Fore.GREEN)
jarvis.say("Percent gained: " + str(gainer['changesPercentage'])[1:-1] + "\n\n", Fore.GREEN)
else:
jarvis.say("Cannot get gainers list at the moment")
def get_losers(self, jarvis):
''' Get the most losers of the day '''
url = 'https://financialmodelingprep.com/api/v3/stock/losers'
resp = requests.get(url)
if(resp.status_code == 200):
data = resp.json()
if(not data):
jarvis.say("Cannot find details at the moment.", Fore.RED)
else:
for loser in data['mostLoserStock']:
jarvis.say(loser['ticker'] + " | " + loser['companyName'], Fore.GREEN)
jarvis.say("Price: " + str(loser['price']) + " | Change: " + str(loser['changes']), Fore.GREEN)
jarvis.say("Percent lost: " + str(loser['changesPercentage'])[1:-1] + "\n\n", Fore.GREEN)
else:
jarvis.say("Cannot get losers list at the moment")
|
mit
| 7,597,438,638,204,117,000
| 43.75
| 134
| 0.528682
| false
| 3.876915
| false
| false
| false
|
takashi-suehiro/rtmtools
|
rtc_handle_example/cin/cin.py
|
1
|
5056
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -*- Python -*-
"""
\file cin.py
\brief an example RTC for rtc_handle
\date $Date$
"""
import sys
import time
sys.path.append(".")
# Import RTM module
import RTC
import OpenRTM_aist
# Import Service implementation class
# <rtc-template block="service_impl">
# </rtc-template>
# Import Service stub modules
# <rtc-template block="consumer_import">
# </rtc-template>
# This module's spesification
# <rtc-template block="module_spec">
cin_spec = ["implementation_id", "cin",
"type_name", "cin",
"description", "an example RTC for rtc_handle",
"version", "1.0.0",
"vendor", "VenderName",
"category", "cin",
"activity_type", "STATIC",
"max_instance", "1",
"language", "Python",
"lang_type", "SCRIPT",
""]
# </rtc-template>
class cin(OpenRTM_aist.DataFlowComponentBase):
"""
\class cin
\brief an example RTC for rtc_handle
"""
def __init__(self, manager):
"""
\brief constructor
\param manager Maneger Object
"""
OpenRTM_aist.DataFlowComponentBase.__init__(self, manager)
self._d_str_out = RTC.TimedString(RTC.Time(0,0),0)
"""
"""
self._str_outOut = OpenRTM_aist.OutPort("str_out", self._d_str_out)
# initialize of configuration-data.
# <rtc-template block="init_conf_param">
# </rtc-template>
def onInitialize(self):
"""
The initialize action (on CREATED->ALIVE transition)
formaer rtc_init_entry()
\return RTC::ReturnCode_t
"""
# Bind variables and configuration variable
# Set InPort buffers
# Set OutPort buffers
self.addOutPort("str_out",self._str_outOut)
# Set service provider to Ports
# Set service consumers to Ports
# Set CORBA Service Ports
return RTC.RTC_OK
#def onFinalize(self, ec_id):
# """
#
# The finalize action (on ALIVE->END transition)
# formaer rtc_exiting_entry()
#
# \return RTC::ReturnCode_t
#
# """
#
# return RTC.RTC_OK
#def onStartup(self, ec_id):
# """
#
# The startup action when ExecutionContext startup
# former rtc_starting_entry()
#
# \param ec_id target ExecutionContext Id
#
# \return RTC::ReturnCode_t
#
# """
#
# return RTC.RTC_OK
#def onShutdown(self, ec_id):
# """
#
# The shutdown action when ExecutionContext stop
# former rtc_stopping_entry()
#
# \param ec_id target ExecutionContext Id
#
# \return RTC::ReturnCode_t
#
# """
#
# return RTC.RTC_OK
#def onActivated(self, ec_id):
# """
#
# The activated action (Active state entry action)
# former rtc_active_entry()
#
# \param ec_id target ExecutionContext Id
#
# \return RTC::ReturnCode_t
#
# """
#
# return RTC.RTC_OK
#def onDeactivated(self, ec_id):
# """
#
# The deactivated action (Active state exit action)
# former rtc_active_exit()
#
# \param ec_id target ExecutionContext Id
#
# \return RTC::ReturnCode_t
#
# """
#
# return RTC.RTC_OK
def onExecute(self, ec_id):
"""
The execution action that is invoked periodically
former rtc_active_do()
\param ec_id target ExecutionContext Id
\return RTC::ReturnCode_t
"""
a=raw_input("input data> ")
self._d_str_out.data=a
self._str_outOut.write()
return RTC.RTC_OK
#def onAborting(self, ec_id):
# """
#
# The aborting action when main logic error occurred.
# former rtc_aborting_entry()
#
# \param ec_id target ExecutionContext Id
#
# \return RTC::ReturnCode_t
#
# """
#
# return RTC.RTC_OK
#def onError(self, ec_id):
# """
#
# The error action in ERROR state
# former rtc_error_do()
#
# \param ec_id target ExecutionContext Id
#
# \return RTC::ReturnCode_t
#
# """
#
# return RTC.RTC_OK
#def onReset(self, ec_id):
# """
#
# The reset action that is invoked resetting
# This is same but different the former rtc_init_entry()
#
# \param ec_id target ExecutionContext Id
#
# \return RTC::ReturnCode_t
#
# """
#
# return RTC.RTC_OK
#def onStateUpdate(self, ec_id):
# """
#
# The state update action that is invoked after onExecute() action
# no corresponding operation exists in OpenRTm-aist-0.2.0
#
# \param ec_id target ExecutionContext Id
#
# \return RTC::ReturnCode_t
#
# """
#
# return RTC.RTC_OK
#def onRateChanged(self, ec_id):
# """
#
# The action that is invoked when execution context's rate is changed
# no corresponding operation exists in OpenRTm-aist-0.2.0
#
# \param ec_id target ExecutionContext Id
#
# \return RTC::ReturnCode_t
#
# """
#
# return RTC.RTC_OK
def cinInit(manager):
profile = OpenRTM_aist.Properties(defaults_str=cin_spec)
manager.registerFactory(profile,
cin,
OpenRTM_aist.Delete)
def MyModuleInit(manager):
cinInit(manager)
# Create a component
comp = manager.createComponent("cin")
def main():
mgr = OpenRTM_aist.Manager.init(sys.argv)
mgr.setModuleInitProc(MyModuleInit)
mgr.activateManager()
mgr.runManager()
if __name__ == "__main__":
main()
|
mit
| 8,142,501,798,067,403,000
| 17.252708
| 70
| 0.630736
| false
| 2.692226
| false
| false
| false
|
jregalad-o/scripts
|
extract_kmers.py
|
1
|
2494
|
###############################################################################
# Julian Regaldo
# julian.regalado@tuebingen.mpg.de
#
# Extact all kmers of length K from a reference sequence
###############################################################################
import sys
import random
import hashlib
def error(errstr,errcode):
sys.stderr.write(errstr)
sys.exit(errcode)
def usage():
sys.stderr.write("Usage: python extract_kmers.py <int K_lenght>"\
" <str input Seq>\n")
def write(outstr):
sys.stdout.write(outstr)
def readseq(inseq, startflag = 1):
"""
Read up to N bp of sequence
"""
buff = ''
buff_size = 10000
if startflag:
header = inseq.readline().rstrip()
if header[0] != '>':
usage()
error("ERROR: Incorrect header format, got:\n\t" + header + '\n',1)
else:
sys.stderr.write("Procesing:\n\t" + header + '\n')
while True:
seq = inseq.readline().rstrip()
if seq == '':
if buff == '':
return 'EOF',0
else:
return buff,0
else:
buff += seq
if len(buff) > buff_size:
startflag = 0
break
return buff,startflag
def hashmer(seq,klen,merdict,hashalg):
start = 0
end = klen
for i in xrange(0,len(seq)-klen+1):
mer = seq[start:end]
hashed = hashalg(mer)
try:
merdict[hashed.digest()] += 1
except:
merdict[hashed.digest()] = 1
start += 1
end += 1
return merdict,start
def main():
if len(sys.argv) < 3:
usage()
error("\tERROR: Incorrect number of parameters\n",1)
try:
klen = int(sys.argv[1])
inseq = open(sys.argv[2],'r')
except:
usage()
error("\tERROR: Parameters not in correct format\n",1)
digester = digester = hashlib.sha1
merdict = dict()
mernum = 0
startflag = 1
last18mer = ''
while True:
newseq,startflag = readseq(inseq,startflag = startflag)
if newseq == 'EOF':
break
# TODO: Figure out better variable names
inbatch = last18mer + newseq
merdict,start = hashmer(inbatch,19,merdict,digester)
last18mer = newseq[-18:]
mernum += start
for digest in merdict:
write(str(merdict[digest])+'\n')
if __name__ == "__main__":
main()
|
mit
| 566,555,367,325,854,100
| 24.44898
| 79
| 0.497594
| false
| 3.767372
| false
| false
| false
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/ubiquity/plugins/ubi-wireless.py
|
1
|
5701
|
# -*- coding: utf-8; Mode: Python; indent-tabs-mode: nil; tab-width: 4 -*-
# Copyright (C) 2010 Canonical Ltd.
# Written by Evan Dandrea <evan.dandrea@canonical.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from ubiquity import plugin, misc
import os
NAME = 'wireless'
#after prepare for default install, but language for oem install
AFTER = ['prepare', 'language']
WEIGHT = 12
class PageGtk(plugin.PluginUI):
plugin_title = 'ubiquity/text/wireless_heading_label'
def __init__(self, controller, *args, **kwargs):
from ubiquity import nm
from gi.repository import Gtk
if ('UBIQUITY_AUTOMATIC' in os.environ
or not nm.wireless_hardware_present() or misc.has_connection()):
self.page = None
return
self.controller = controller
builder = Gtk.Builder()
self.controller.add_builder(builder)
builder.add_from_file(os.path.join(os.environ['UBIQUITY_GLADE'], 'stepWireless.ui'))
builder.connect_signals(self)
self.page = builder.get_object('stepWireless')
self.nmwidget = builder.get_object('nmwidget')
self.nmwidget.connect('connection', self.state_changed)
self.nmwidget.connect('selection_changed', self.selection_changed)
self.use_wireless = builder.get_object('use_wireless')
self.use_wireless.connect('toggled', self.wireless_toggled)
self.plugin_widgets = self.page
self.have_selection = False
self.state = self.nmwidget.get_state()
self.next_normal = True
self.back_normal = True
self.connect_text = None
self.stop_text = None
def plugin_translate(self, lang):
get_s = self.controller.get_string
self.connect_text = get_s('ubiquity/text/connect', lang)
self.stop_text = get_s('ubiquity/text/stop', lang)
frontend = self.controller._wizard
if not self.next_normal:
frontend.next.set_label(self.connect_text)
if not self.back_normal:
frontend.back.set_label(self.stop_text)
def selection_changed(self, unused):
from ubiquity import nm
self.have_selection = True
self.use_wireless.set_active(True)
assert self.state is not None
frontend = self.controller._wizard
if self.state == nm.NM_STATE_CONNECTING:
frontend.translate_widget(frontend.next)
self.next_normal = True
else:
if (not self.nmwidget.is_row_an_ap()) or self.nmwidget.is_row_connected():
frontend.translate_widget(frontend.next)
self.next_normal = True
else:
frontend.next.set_label(self.connect_text)
self.next_normal = False
def wireless_toggled(self, unused):
frontend = self.controller._wizard
if self.use_wireless.get_active():
if not self.have_selection:
self.nmwidget.select_usable_row()
self.state_changed(None, self.state)
else:
frontend.connecting_spinner.hide()
frontend.connecting_spinner.stop()
frontend.connecting_label.hide()
frontend.translate_widget(frontend.next)
self.next_normal = True
self.controller.allow_go_forward(True)
def plugin_on_back_clicked(self):
frontend = self.controller._wizard
if frontend.back.get_label() == self.stop_text:
self.nmwidget.disconnect_from_ap()
return True
else:
frontend.connecting_spinner.hide()
frontend.connecting_spinner.stop()
frontend.connecting_label.hide()
return False
def plugin_on_next_clicked(self):
frontend = self.controller._wizard
if frontend.next.get_label() == self.connect_text:
self.nmwidget.connect_to_ap()
return True
else:
frontend.connecting_spinner.hide()
frontend.connecting_spinner.stop()
frontend.connecting_label.hide()
return False
def state_changed(self, unused, state):
from ubiquity import nm
self.state = state
frontend = self.controller._wizard
if not self.use_wireless.get_active():
return
if state != nm.NM_STATE_CONNECTING:
frontend.connecting_spinner.hide()
frontend.connecting_spinner.stop()
frontend.connecting_label.hide()
self.controller.allow_go_forward(True)
frontend.translate_widget(frontend.back)
self.back_normal = False
frontend.back.set_sensitive(True)
else:
frontend.connecting_spinner.show()
frontend.connecting_spinner.start()
frontend.connecting_label.show()
self.next_normal = True
frontend.back.set_label(self.stop_text)
self.back_normal = False
frontend.back.set_sensitive(True)
self.selection_changed(None)
|
gpl-3.0
| 2,619,777,803,315,187,000
| 38.317241
| 92
| 0.632871
| false
| 3.961779
| false
| false
| false
|
landier/imdb-crawler
|
crawler/libs/sqlalchemy/dialects/mssql/zxjdbc.py
|
1
|
2550
|
# mssql/zxjdbc.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Support for the Microsoft SQL Server database via the zxjdbc JDBC
connector.
JDBC Driver
-----------
Requires the jTDS driver, available from: http://jtds.sourceforge.net/
Connecting
----------
URLs are of the standard form of
``mssql+zxjdbc://user:pass@host:port/dbname[?key=value&key=value...]``.
Additional arguments which may be specified either as query string
arguments on the URL, or as keyword arguments to
:func:`~sqlalchemy.create_engine()` will be passed as Connection
properties to the underlying JDBC driver.
"""
from sqlalchemy.connectors.zxJDBC import ZxJDBCConnector
from sqlalchemy.dialects.mssql.base import MSDialect, MSExecutionContext
from sqlalchemy.engine import base
class MSExecutionContext_zxjdbc(MSExecutionContext):
_embedded_scope_identity = False
def pre_exec(self):
super(MSExecutionContext_zxjdbc, self).pre_exec()
# scope_identity after the fact returns null in jTDS so we must
# embed it
if self._select_lastrowid and self.dialect.use_scope_identity:
self._embedded_scope_identity = True
self.statement += "; SELECT scope_identity()"
def post_exec(self):
if self._embedded_scope_identity:
while True:
try:
row = self.cursor.fetchall()[0]
break
except self.dialect.dbapi.Error as e:
self.cursor.nextset()
self._lastrowid = int(row[0])
if (self.isinsert or self.isupdate or self.isdelete) and \
self.compiled.returning:
self._result_proxy = base.FullyBufferedResultProxy(self)
if self._enable_identity_insert:
table = self.dialect.identifier_preparer.format_table(
self.compiled.statement.table)
self.cursor.execute("SET IDENTITY_INSERT %s OFF" % table)
class MSDialect_zxjdbc(ZxJDBCConnector, MSDialect):
jdbc_db_name = 'jtds:sqlserver'
jdbc_driver_name = 'net.sourceforge.jtds.jdbc.Driver'
execution_ctx_cls = MSExecutionContext_zxjdbc
def _get_server_version_info(self, connection):
return tuple(
int(x)
for x in connection.connection.dbversion.split('.')
)
dialect = MSDialect_zxjdbc
|
gpl-3.0
| 7,793,973,673,749,884,000
| 33
| 84
| 0.658824
| false
| 3.959627
| false
| false
| false
|
rocian/AdventOfCode2016
|
08/solutions.py
|
1
|
5519
|
#!/usr/bin/env python3
"""
https://adventofcode.com/2016
--- Day 8: Two-Factor Authentication ---
You come across a door implementing what you can only assume is an
implementation of two-factor authentication after a long game of requirements
telephone.
To get past the door, you first swipe a keycard (no problem; there was one on a
nearby desk). Then, it displays a code on a little screen, and you type that
code on a keypad. Then, presumably, the door unlocks.
Unfortunately, the screen has been smashed. After a few minutes, you've taken
everything apart and figured out how it works. Now you just have to work out
what the screen would have displayed.
The magnetic strip on the card you swiped encodes a series of instructions for
the screen; these instructions are your puzzle input. The screen is 50 pixels
wide and 6 pixels tall, all of which start off, and is capable of three somewhat
peculiar operations:
rect AxB turns on all of the pixels in a rectangle at the top-left of the
screen which is A wide and B tall.
rotate row y=A by B shifts all of the pixels in row A (0 is the top row)
right by B pixels. Pixels that would fall off the right end appear at the
left end of the row.
rotate column x=A by B shifts all of the pixels in column A (0 is the left
column) down by B pixels. Pixels that would fall off the bottom appear at
the top of the column.
For example, here is a simple sequence on a smaller screen:
rect 3x2 creates a small rectangle in the top-left corner:
###....
###....
.......
rotate column x=1 by 1 rotates the second column down by one pixel:
#.#....
###....
.#.....
rotate row y=0 by 4 rotates the top row right by four pixels:
....#.#
###....
.#.....
rotate column x=1 by 1 again rotates the second column down by one pixel,
causing the bottom pixel to wrap back to the top:
.#..#.#
#.#....
.#.....
As you can see, this display technology is extremely powerful, and will soon
dominate the tiny-code-displaying-screen market. That's what the advertisement
on the back of the display tries to convince you, anyway.
There seems to be an intermediate check of the voltage used by the display:
after you swipe your card, if the screen did work, how many pixels should be
lit?
--- Part Two ---
You notice that the screen is only capable of displaying capital letters; in the
font it uses, each letter is 5 pixels wide and 6 tall.
After you swipe your card, what code is the screen trying to display?
"""
import re
import numpy as np
def read_input():
""" This function read the instruction from the input file and
return a clean list of instruction."""
f = open('input', 'r')
string = f.read()
lstring = string.split("\n")
# we remove the last void instruction
# this could be made in a safer way
lstring = lstring[:-1]
return(lstring)
def array_roll(a, index, by, axis):
"Roll array row/coll by specified amount by."
if axis:
# if move by columns axis = 1, transpose array
a = np.transpose(a)
# roll row of `by` position
a[index] = np.roll(a[index], by)
if axis:
# if move by columns axis = 1, transpose again array
a = np.transpose(a)
return(a)
def process(monitor, instruction):
"""Process the instructions on the monitor and return the final monitor state."""
# create the opportune regex to capture instruction of operation
rect = re.compile(r"(\d+)x(\d+)")
rowr = re.compile(r"y=(\d+) by (\d+)")
colr = re.compile(r"x=(\d+) by (\d+)")
for operation in instruction:
if operation.startswith("rect"):
# fill rect dx x dy with 1
dx, dy = re.findall(rect, operation)[0]
monitor[0:int(dy), 0:int(dx)] = 1
elif operation.startswith("rotate column"):
# roll column `index` by `dy`
index, dy = re.findall(colr, operation)[0]
monitor = array_roll(monitor, int(index), int(dy), 1)
elif operation.startswith("rotate row"):
# roll row `index` by `dx`
index, dx = re.findall(rowr, operation)[0]
monitor = array_roll(monitor, int(index), int(dx), 0)
return(monitor)
def to_shape(monitor, nrow, ncol, by, voidc, fillc):
"Create shape letters from array"
# add 0 filled column to space letters
for c in range(ncol - by, 0, -by):
monitor = np.insert(monitor, c, 0, axis=1)
# replace 0 by `voidc` and 1 by `fillc`
# to iter tranform in a list and then agai in ndarray
monitor = [fillc if i else voidc for i in np.nditer(monitor, op_flags=['readwrite'])]
monitor = np.array(monitor).reshape(nrow, len(monitor) // nrow)
# create a string from array
string = "\n\n\t"
for row in monitor:
string += ''.join(row)
string += "\n\t"
return(string)
# number of rows and columns in monitor
nrow = 6
ncol = 50
# number of columns in a letter block
nby = 5
# chars for void and fill in a letter block
voidc = ' '
fillc = '█' # Unicode FULL BLOCK
# create the monitor as array
monitor = [0] * (nrow * ncol)
monitor = np.array(monitor).reshape(nrow, ncol)
# process instructions
monitor = process(monitor, read_input())
print("Day 8. Solution of part 1: {}".format(sum(sum(monitor))))
print("Day 8. Solution of part 2: {}".format(to_shape(monitor, nrow, ncol,
5, voidc, fillc)))
|
gpl-3.0
| -1,507,390,072,424,425,200
| 29.65
| 89
| 0.65126
| false
| 3.634387
| false
| false
| false
|
davipeterlini/routeflow_tcc
|
rflib/ipc/MongoIpc.py
|
1
|
11834
|
import rflib.ipc.RFProtocol as RFProtocol
import bson
import threading
import pymongo as mongo
import time
import sys
from rflib.ipc.Ipc import Ipc
from rflib.ipc.MongoUtils import MongoFactory
from rflib.defs import *
FIELD_NAME_ID = "_id"
FIELD_NAME_FROM = "from"
FIELD_NAME_TO = "to"
FIELD_NAME_TYPE = "type"
FIELD_NAME_READ = "read"
FIELD_NAME_CONTENT = "content"
# 1 MB for the capped collection
CC_SIZE = 1048576
class MongoIpc(Ipc):
def __init__(self, user_id, channel_id):
self._mf = MongoFactory()
self._producer_connection = self._mf.create_connection()
self._user_id = user_id
self._channel_id = channel_id
self._db_name = MONGO_DB_NAME
db = self._producer_connection[self._db_name]
try:
collection = mongo.collection.Collection(db, self._channel_id, True, capped=True, size=CC_SIZE)
collection.ensure_index([("_id", mongo.ASCENDING)])
collection.ensure_index([(FIELD_NAME_TO, mongo.ASCENDING)])
except:
print "channel already exists"
def listen(self, message_processor):
#self._producer_connection = self._mf.create_connection()
while True:
# tries to get unread messages
for i in xrange(0, MONGO_MAX_RETRIES):
try:
collection = self._producer_connection[self._db_name][self._channel_id]
cursor = collection.find(
{FIELD_NAME_TO: self._user_id, FIELD_NAME_READ: False},
tailable=True
)
#cursor OK, break for
break
except:
if (i + 1) == MONGO_MAX_RETRIES:
print "[ERROR]MongoIPC: Could not get unread messages. Error: (", sys.exc_info(), ")"
return
print "[RECOVERING]MongoIPC: Could not get unread messages. Trying again in ", MONGO_RETRY_INTERVAL, " seconds. [", (i+1), "]"
time.sleep(MONGO_RETRY_INTERVAL)
while cursor.alive:
try:
envelope = next(cursor, None)
if envelope == None:
break;
except StopIteration:
time.sleep(1)
continue
except:
#print "[RECOVERING]MongoIPC: Fail to reach messages. Err:",sys.exc_info()
break;
ipc_message = MongoIpcMessageFactory.fromMongoMessageType(envelope)
message_processor.process(ipc_message);
# tries to mark message as read
for j in xrange(0, MONGO_MAX_RETRIES):
try:
collection = self._producer_connection[self._db_name][self._channel_id]
collection.update({"_id": envelope["_id"]},
{"$set": {FIELD_NAME_READ: True}})
# update done, break for
break
except:
if (j + 1) == MONGO_MAX_RETRIES:
print "[ERROR]MongoIPC: The Message (id: ",
print envelope["_id"],
print ") could not be marked as read. ",
print "Error: (", sys.exc_info, ")"
sys.exit(1)
print "[RECOVERING]MongoIPC: Could not mark message ",
print "as read. Trying again in ",
print MONGO_RETRY_INTERVAL, " seconds. [", (j+1), "]"
time.sleep(MONGO_RETRY_INTERVAL)
print "[OK]MongoIPC: Message (id: ", envelope["_id"], ") was marked as Read."
time.sleep(0.05)
def parallel_listen(self, message_processor):
worker = threading.Thread(target=self.listen, args=(message_processor,))
worker.start()
def send(self, ipc_message):
#self._producer_connection = self._mf.create_connection()
mongo_message = MongoIpcMessageFactory.fromMessageType(ipc_message)
for i in xrange(0, MONGO_MAX_RETRIES):
try:
collection = self._producer_connection[self._db_name][self._channel_id]
collection.insert(mongo_message)
break;
except:
if (i + 1) == MONGO_MAX_RETRIES:
print "[ERROR]MongoIPC: Message could not be sent. ",
print "Error: (", sys.exc_info(), ")"
sys.exit(1)
print "[RECOVERING]MongoIPC: Message not sent. ",
print "Trying again in ", MONGO_RETRY_INTERVAL, " seconds. ",
print "[", (i+1), "]"
time.sleep(MONGO_RETRY_INTERVAL)
print "[OK]MongoIPC: Message sent"
return True
class MongoIpcMessageFactory:
"""This class implements a factory to build a Ipc Message object from Bson Object and vice versa"""
@staticmethod
def fromMongoMessageType(mongo_obj):
"""Receives mongo BSONObj and build an
ipc message object, based on message type"""
#message = bson.BSON.decode(mongo_obj)
message = mongo_obj
message_content = message[FIELD_NAME_CONTENT]
ipc_message = None
if int(message[FIELD_NAME_TYPE]) == RFProtocol.PORT_REGISTER:
ipc_message = RFProtocol.PortRegister()
ipc_message.set_vm_id(message_content["vm_id"])
ipc_message.set_vm_port(message_content["vm_port"])
ipc_message.set_hwaddress(message_content["hwaddress"])
elif int(message[FIELD_NAME_TYPE]) == RFProtocol.PORT_CONFIG:
ipc_message = RFProtocol.PortConfig()
ipc_message.set_vm_id(message_content["vm_id"])
ipc_message.set_vm_port(message_content["vm_port"])
ipc_message.set_operation_id(message_content["operation_id"])
elif int(message[FIELD_NAME_TYPE]) == RFProtocol.DATAPATH_PORT_REGISTER:
ipc_message = RFProtocol.DatapathPortRegister()
ipc_message.set_ct_id(message_content["ct_id"])
ipc_message.set_dp_id(message_content["dp_id"])
ipc_message.set_dp_port(message_content["dp_port"])
elif int(message[FIELD_NAME_TYPE]) == RFProtocol.DATAPATH_DOWN:
ipc_message = RFProtocol.DatapathDown()
ipc_message.set_ct_id(message_content["ct_id"])
ipc_message.set_dp_id(message_content["dp_id"])
elif int(message[FIELD_NAME_TYPE]) == RFProtocol.VIRTUAL_PLANE_MAP:
ipc_message = RFProtocol.VirtualPlaneMap()
ipc_message.set_vm_id(message_content["vm_id"])
ipc_message.set_vm_port(message_content["vm_port"])
ipc_message.set_vs_id(message_content["vs_id"])
ipc_message.set_vs_port(message_content["vs_port"])
elif int(message[FIELD_NAME_TYPE]) == RFProtocol.DATA_PLANE_MAP:
ipc_message = RFProtocol.DataPlaneMap()
ipc_message.set_ct_id(message_content["ct_id"])
ipc_message.set_dp_id(message_content["dp_id"])
ipc_message.set_dp_port(message_content["dp_port"])
ipc_message.set_vs_id(message_content["vs_id"])
ipc_message.set_vs_port(message_content["vs_port"])
elif int(message[FIELD_NAME_TYPE]) == RFProtocol.ROUTE_MOD:
ipc_message = RFProtocol.RouteMod()
ipc_message.set_mod(message_content["mod"])
ipc_message.set_id(message_content["id"])
ipc_message.set_matches(message_content["matches"])
ipc_message.set_actions(message_content["actions"])
ipc_message.set_options(message_content["options"])
else:
return None
ipc_message.set_message_id(message[FIELD_NAME_ID])
ipc_message.set_to(message[FIELD_NAME_TO])
ipc_message.set_from(message[FIELD_NAME_FROM])
ipc_message.set_read(message[FIELD_NAME_READ])
return ipc_message
@staticmethod
def fromMessageType(ipc_message):
"""Receives the ipc message object and build a mongo Bson Object,
based on message type"""
mongo_message = {}
mongo_message[FIELD_NAME_ID] = bson.objectid.ObjectId(ipc_message.get_message_id())
mongo_message[FIELD_NAME_TO] = str(ipc_message.get_to())
mongo_message[FIELD_NAME_FROM] = str(ipc_message.get_from())
mongo_message[FIELD_NAME_READ] = ipc_message.is_read()
mongo_message[FIELD_NAME_TYPE] = ipc_message.get_type()
message_content = {}
if int(ipc_message.get_type()) == RFProtocol.PORT_REGISTER:
message_content["vm_id"] = str(ipc_message.get_vm_id())
message_content["vm_port"] = str(ipc_message.get_vm_port())
message_content["hwaddress"] = str(ipc_message.get_hwaddress())
elif int(ipc_message.get_type()) == RFProtocol.PORT_CONFIG:
message_content["vm_id"] = str(ipc_message.get_vm_id())
message_content["vm_port"] = str(ipc_message.get_vm_port())
message_content["operation_id"] = str(ipc_message.get_operation_id())
elif int(ipc_message.get_type()) == RFProtocol.DATAPATH_PORT_REGISTER:
message_content["ct_id"] = str(ipc_message.get_ct_id())
message_content["dp_id"] = str(ipc_message.get_dp_id())
message_content["dp_port"] = str(ipc_message.get_dp_port())
elif int(ipc_message.get_type()) == RFProtocol.DATAPATH_DOWN:
message_content["ct_id"] = str(ipc_message.get_ct_id())
message_content["dp_id"] = str(ipc_message.get_dp_id())
elif int(ipc_message.get_type()) == RFProtocol.VIRTUAL_PLANE_MAP:
message_content["vm_id"] = str(ipc_message.get_vm_id())
message_content["vm_port"] = str(ipc_message.get_vm_port())
message_content["vs_id"] = str(ipc_message.get_vs_id())
message_content["vs_port"] = str(ipc_message.get_vs_port())
elif int(ipc_message.get_type()) == RFProtocol.DATA_PLANE_MAP:
message_content["ct_id"] = str(ipc_message.get_ct_id())
message_content["dp_id"] = str(ipc_message.get_dp_id())
message_content["dp_port"] = str(ipc_message.get_dp_port())
message_content["vs_id"] = str(ipc_message.get_vs_id())
message_content["vs_port"] = str(ipc_message.get_vs_port())
elif int(ipc_message.get_type()) == RFProtocol.ROUTE_MOD:
message_content["mod"] = str(ipc_message.get_mod())
message_content["id"] = str(ipc_message.get_id())
message_content["matches"] = ipc_message.get_matches()
message_content["actions"] = ipc_message.get_actions()
message_content["options"] = ipc_message.get_options()
else:
return None
mongo_message[FIELD_NAME_CONTENT] = message_content
return mongo_message
|
apache-2.0
| -1,613,797,404,470,370,000
| 43.656604
| 164
| 0.532787
| false
| 3.975143
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.