hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1cf13468ac305c0b8a287bc9c7f12d5b9c3ea341 | 5,736 | py | Python | main.py | MinistrBob/MikrotikBackup | 9c37b5cd2d06c077dd374f4e3e56c844cc4a5084 | [
"MIT"
] | null | null | null | main.py | MinistrBob/MikrotikBackup | 9c37b5cd2d06c077dd374f4e3e56c844cc4a5084 | [
"MIT"
] | null | null | null | main.py | MinistrBob/MikrotikBackup | 9c37b5cd2d06c077dd374f4e3e56c844cc4a5084 | [
"MIT"
] | null | null | null | import csv
import datetime
import os
import shutil
import sys
import argparse
from mikrotik import Mikrotik
def compare_files(f1, f2):
"""
Compare two text files. Ignore lines with '#'.
:param f1: current_file
:param f2: previous_file
:return: True - if files identical, otherwise - False. If previous_file doesn't exist return False.
"""
if not os.path.exists(f2):
return False
with open(f1, 'r') as fp1, open(f2, 'r') as fp2:
while True:
# b1 = fp1.readline().rstrip()
# b2 = fp2.readline().rstrip()
b1 = fp1.readline()
b2 = fp2.readline()
# print(b1)
# print(b2)
# print("--------------------------------")
if b1.startswith('#') or b2.startswith('#'):
continue
if b1 != b2:
return False
if not b1:
return True
if __name__ == '__main__':
begin_time = datetime.datetime.now()
curr_date = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
# Parsing arguments
parser = argparse.ArgumentParser(description="Mikrotiks Backup")
parser.add_argument("-c", dest="command", help="Mikrotik terminal command (enclose in double quotes)")
args = parser.parse_args()
# print(args)
# If arguments is not set then execute backup_mikrotiks() as default method
if len(sys.argv) == 1:
result = backup_mikrotiks()
if args.command:
result = execute_command(args.command)
print_result(result)
print(f"Program completed")
print(f"Total time spent: {datetime.datetime.now() - begin_time} sec.")
| 35.190184 | 106 | 0.586646 | import csv
import datetime
import os
import shutil
import sys
import argparse
from mikrotik import Mikrotik
def compare_files(f1, f2):
"""
Compare two text files. Ignore lines with '#'.
:param f1: current_file
:param f2: previous_file
:return: True - if files identical, otherwise - False. If previous_file doesn't exist return False.
"""
if not os.path.exists(f2):
return False
with open(f1, 'r') as fp1, open(f2, 'r') as fp2:
while True:
# b1 = fp1.readline().rstrip()
# b2 = fp2.readline().rstrip()
b1 = fp1.readline()
b2 = fp2.readline()
# print(b1)
# print(b2)
# print("--------------------------------")
if b1.startswith('#') or b2.startswith('#'):
continue
if b1 != b2:
return False
if not b1:
return True
def backup_mikrotik(mk, curr_date, base_path, line):
# Create export backup. Compare with previous.
# If exist differences, additionally create binary backup and store both.
# Binary backup restore: /system backup load name=<file_name>
mk.backup_export("current.rsc")
backup_folder = os.path.join(base_path, line[0])
# Create folder for backup
if not os.path.exists(backup_folder):
print(f"Create folder for backup {backup_folder}")
try:
os.mkdir(backup_folder)
except OSError as e:
print(f"ERROR: Can't create folder {backup_folder}:\n{e}")
# Copy local current.rsc file to previous.rsc
current_file = os.path.join(backup_folder, "current.rsc")
previous_file = os.path.join(backup_folder, "previous.rsc")
if os.path.exists(current_file):
try:
os.replace(current_file, previous_file)
except OSError as e:
print(f"ERROR: Can't rename current.rsc to previous.rsc:\n{e}")
# Download current.rsc locally
mk.download_file("current.rsc", current_file)
# Compare current and previous files
result = compare_files(current_file, previous_file)
# If exist differences, additionally create binary backup and store both
if result:
print(f"No changes. Nothing to work")
else:
print(f"There are changes. Making a backup")
backup_name = f"{line[0]}-{curr_date}"
# Copy local current.rsc to <backup_name>.rsc
backup_file = os.path.join(backup_folder, f"{backup_name}.rsc")
print(f"Copy {current_file} to {backup_file}")
try:
shutil.copy(current_file, backup_file)
except Error as e:
print(f"ERROR: Can't copy current.rsc:\n{e}")
# Create binary backup additionally
mk.backup_backup("current")
# Download current.backup locally
binary_backup_file = os.path.join(backup_folder, f"{backup_name}.backup")
mk.download_file("current.backup", binary_backup_file)
def backup_mikrotiks():
base_path = os.getenv('MIKROTIK_BACKUP_PATH')
# Get MIKROTIK_BACKUP_PATH env
try:
if base_path is None:
raise EnvironmentError("ERROR: Environment variable MIKROTIK_BACKUP_PATH not defined")
except EnvironmentError as e:
exit(1)
result = {}
with open('config.conf', newline='', encoding='utf-8') as f:
config = csv.reader(f, delimiter=';')
# headers = next(reader, None)
for line in config: # ['192.168.2.1', 'admin', '']
# print(line)
if line[0].startswith('#'):
continue
# Work with mikrotik
print(f"Work with {line[0]} mikrotik")
try:
mk = Mikrotik(line[0], line[1], line[2])
backup_mikrotik(mk, curr_date, base_path, line)
result[line[0]] = "OK"
except Exception as e:
result[line[0]] = "ERROR"
print(f"ERROR: {e}")
continue
print()
return result
def execute_command(command):
result = {}
with open('config.conf', newline='', encoding='utf-8') as f:
config = csv.reader(f, delimiter=';')
# headers = next(reader, None)
for line in config: # ['192.168.2.1', 'admin', '']
print(line)
if not line: # skip empty lines
continue
if line[0].startswith('#'): # skip comments
continue
# Work with mikrotik
print(f"Work with {line[0]} mikrotik")
try:
mk = Mikrotik(line[0], line[1], line[2])
mk.execute_command(command)
result[line[0]] = "OK"
except Exception as e:
result[line[0]] = "ERROR"
print(f"ERROR: {e}")
continue
print()
return result
def print_result(result):
print("-"*80)
for key, value in result.items():
print(f"{key} = {value}")
print("-"*80)
if __name__ == '__main__':
begin_time = datetime.datetime.now()
curr_date = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
# Parsing arguments
parser = argparse.ArgumentParser(description="Mikrotiks Backup")
parser.add_argument("-c", dest="command", help="Mikrotik terminal command (enclose in double quotes)")
args = parser.parse_args()
# print(args)
# If arguments is not set then execute backup_mikrotiks() as default method
if len(sys.argv) == 1:
result = backup_mikrotiks()
if args.command:
result = execute_command(args.command)
print_result(result)
print(f"Program completed")
print(f"Total time spent: {datetime.datetime.now() - begin_time} sec.")
| 3,976 | 0 | 92 |
6a7ab3f40a7b450d06dfc9fd213a66241c2616ba | 23,685 | py | Python | scope/tfutils.py | guygurari/scope | 0bb10f27ff9f98579873247694fe41f54d0a16df | [
"BSD-3-Clause"
] | 9 | 2018-10-27T00:59:11.000Z | 2021-03-18T22:22:17.000Z | scope/tfutils.py | guygurari/scope | 0bb10f27ff9f98579873247694fe41f54d0a16df | [
"BSD-3-Clause"
] | 1 | 2019-11-11T08:20:18.000Z | 2019-11-14T06:38:41.000Z | scope/tfutils.py | guygurari/scope | 0bb10f27ff9f98579873247694fe41f54d0a16df | [
"BSD-3-Clause"
] | 4 | 2018-12-29T15:08:44.000Z | 2020-06-24T01:11:33.000Z | """TensorFlow utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
import tensorflow.keras.backend as K
from time import time
import scope.lanczos as lanczos
KERAS_LEARNING_PHASE_TEST = 0
KERAS_LEARNING_PHASE_TRAIN = 1
class Timer:
"""A simple wallclock timer."""
@property
class NumpyPrintEverything:
"""Tell NumPy to print everything.
Synopsis:
with NumpyPrintEverything():
print(numpy_array)
"""
class NumpyPrintoptions:
"""Temporarily set NumPy printoptions.
Synopsis:
with NumpyPrintoptions(formatter={'float': '{:0.2f}'.format}):
print(numpy_array)
"""
class MiniBatchMaker:
"""Shuffle data and split it into batches."""
def at_start_of_epoch(self):
"""Are we starting a new epoch?"""
return self.i == 0
def create_iid_batch_generator(x, y, steps, batch_size, resample_prob=1):
"""Returns an IID mini-batch generator.
ds = Dataset.from_generator(
create_iid_batch_generator(x, y, batch_size), ...)
Args:
x: Input samples
y: Labels
steps: How many steps to run for
batch_size: Integer size of mini-batch
resample_prob: Probability of resampling a given sample at each step.
If a function, the function should return the current resampling
probability and will be called every time a batch is generated.
"""
N = len(x)
return gen
def keras_feed_dict(model,
x=None,
y=None,
feed_dict={},
learning_phase=KERAS_LEARNING_PHASE_TEST):
"""Return a feed dict with inputs and labels suitable for Keras.
Args:
model: A Keras Model
x: Model inputs, or None if inputs are not fed
y: Model targets (labels), or None if targets are not fed
feed_dict: Additional feed_dict to merge with (if given, updated in
place)
learning_phase: 0 for TEST, 1 for TRAIN
Returns:
The new feed_dict (equal to feed_dict if that was provided).
"""
new_feed_dict = dict(feed_dict)
if x is not None:
new_feed_dict[model.inputs[0]] = x
new_feed_dict[model.sample_weights[0]] = np.ones(x.shape[0])
if y is not None:
new_feed_dict[model.targets[0]] = y
new_feed_dict[K.learning_phase()] = learning_phase # TEST phase
return new_feed_dict
def keras_compute_tensors(model, x, y, tensors, feed_dict={}):
"""Compute the given tensors in Keras."""
new_feed_dict = keras_feed_dict(model, x, y, feed_dict)
return K.get_session().run(tensors, feed_dict=new_feed_dict)
def clone_keras_model_shared_weights(
model, input_tensor, target_tensor):
"""Clone a Keras model.
The new model shares its weights with the old model, but accepts different
inputs and targets. This is useful, for example, for evaluating a model
mid-training.
Args:
model: A compiled Keras model.
input_tensor: Tensor to use as input for the cloned model.
target_tensor: Tensor to be used as targets (labels) for the cloned model.
Returns:
The cloned Keras model.
"""
assert len(model.inputs) == 1
inputs = keras.layers.Input(tensor=input_tensor,
shape=model.inputs[0].shape[1:])
clone = keras.Model(
inputs=inputs,
outputs=model(input_tensor))
clone.compile(
loss=model.loss,
target_tensors=[target_tensor],
optimizer=model.optimizer,
metrics=model.metrics)
return clone
def flatten_array_list(arrays):
"""Flatten and concat a list of numpy arrays into a single rank 1 vector."""
return np.concatenate([np.reshape(a, [-1]) for a in arrays], axis=0)
def flatten_tensor_list(tensors):
"""Flatten and concat a list of tensors into a single rank 1 tensor."""
return tf.concat([tf.reshape(t, [-1]) for t in tensors], axis=0)
def unflatten_tensor_list(flat_tensor, orig_tensors):
"""Reshape a flattened tensor back to a list of tensors with their
original shapes.
Args:
flat_tensor: A tensor that was previously flattened using
flatten_tensor_list()
orig_tensor: A list of tensors with the original desired shapes.
"""
unflattened = []
offset = 0
for t in orig_tensors:
num_elems = t.shape.num_elements()
unflattened.append(
tf.reshape(flat_tensor[offset:offset + num_elems], t.shape))
offset += num_elems
return unflattened
def compute_sample_mean_tensor(model, batches, tensors, feed_dict={}):
"""Compute the sample mean of the given tensors.
Args:
model: Keras Model
batches: MiniBatchMaker
tensors: Tensor or list of Tensors to compute the mean of
feed_dict: Used when evaluating tensors
"""
sample_means = None
tensors_is_list = isinstance(tensors, (list, tuple))
tensors = _AsList(tensors)
while True:
x_batch, y_batch = batches.next_batch()
results = keras_compute_tensors(model, x_batch, y_batch, tensors, feed_dict)
for i in range(len(results)):
results[i] *= len(x_batch)
if sample_means is None:
sample_means = results
else:
for i in range(len(results)):
sample_means[i] += results[i]
if batches.at_start_of_epoch():
break
for i in range(len(sample_means)):
sample_means[i] /= batches.N
if tensors_is_list:
return sample_means
else:
assert len(sample_means) == 1
return sample_means[0]
def jacobian(y, x):
"""Compute the Jacobian tensor J_ij = dy_i/dx_j.
From https://github.com/tensorflow/tensorflow/issues/675, which is adapted
from tf.hessiangs().
:param Tensor y: A Tensor
:param Tensor x: A Tensor
:rtype: Tensor
:return: The Jacobian Tensor, whose shape is the concatenation of
the y_flat and x shapes.
"""
y_flat = tf.reshape(y, [-1])
# tf.shape() returns a Tensor, so this supports dynamic sizing
n = tf.shape(y_flat)[0]
loop_vars = [
tf.constant(0, tf.int32),
tf.TensorArray(tf.float32, size=n),
]
_, jacobian = tf.while_loop(
lambda j, _: j < n,
lambda j, result: (j+1,
result.write(j, tf.gradients(y_flat[j], x)[0])),
loop_vars)
jacobian_shape = tf.concat([tf.shape(y), tf.shape(x)], axis=0)
jacobian = tf.reshape(jacobian.stack(), jacobian_shape)
return jacobian
def jacobians(y, xs):
"""Compute the Jacobian tensors J_ij = dy_i/dx_j for each x in xs.
With this implementation, the gradient is computed for all xs in one
call, so if xs includes weights from different layers then back prop
is used.
:param Tensor y: A rank 1 Tensor
:param Tensor xs: A Tensor or list of Tensors
:rtype: list
:return: List of Jacobian tensors J_ij = dy_i/dx_j for each x in xs.
"""
if y.shape.ndims != 1:
raise ValueError('y must be a rank 1 Tensor')
xs = _AsList(xs)
# tf.shape() returns a Tensor, so this supports dynamic sizing
len_y = tf.shape(y)[0]
jacobians = []
# Outer loop runs over elements of y, computes gradients for each
loop_vars = [
tf.constant(0, tf.int32),
[tf.TensorArray(tf.float32, size=len_y) for x in xs]
]
def _compute_single_y_gradient(j, arrays):
"""Compute the gradient for a single y elem."""
grads = tf.gradients(y[j], xs)
for i, g in enumerate(grads):
arrays[i] = arrays[i].write(j, g)
return arrays
_, jacobians = tf.while_loop(
lambda j, _: j < len_y,
lambda j, arrays: (j + 1, _compute_single_y_gradient(j, arrays)),
loop_vars)
jacobians = [a.stack() for a in jacobians]
return jacobians
def hessians(y, xs):
"""The Hessian of y with respect to each x in xs.
:param y Tensor: A scalar Tensor.
:param xs Tensor: A Tensor or list of Tensors. Each Tensor can have any
rank.
:rtype: list
:return: List of Hessians d^2y/dx^2. The shape of a Hessian is
x.shape + y.shape.
"""
xs = _AsList(xs)
hessians = []
for x in xs:
# First derivative and flatten
grad = tf.gradients(y, x)[0]
grad_flat = tf.reshape(grad, [-1])
# Second derivative
n = tf.shape(grad_flat)[0]
loop_vars = [
tf.constant(0, tf.int32),
tf.TensorArray(tf.float32, size=n),
]
_, hessian = tf.while_loop(
lambda j, _: j < n,
lambda j, result: (j+1, result.write(
j, tf.gradients(grad_flat[j], x)[0])),
loop_vars)
hessian = hessian.stack()
x_shape = tf.shape(x)
hessian_shape = tf.concat([x_shape, x_shape], axis=0)
hessians.append(tf.reshape(hessian, hessian_shape))
return hessians
def num_weights(weights):
"""Number of weights in the given list of weight tensors."""
return sum([w.shape.num_elements() for w in weights])
def total_num_weights(model):
"""Total number of weights in the given Keras model."""
return num_weights(model.trainable_weights)
def total_tensor_elements(x):
"""Tensor containing the total number of elements of x.
:param x Tensor: A tensor.
:rtype: Tensor
:return: A scalar Tensor containing the total number of elements.
"""
return tf.reduce_prod(tf.shape(x))
def hessian_tensor_blocks(y, xs):
"""Compute the tensors that make up the full Hessian (d^2y / dxs dxs).
A full computation of the Hessian would look like this:
blocks = hessian_tensor_blocks(y, xs)
block_results = sess.run(blocks)
hessian = hessian_combine_blocks(block_results)
:param y Tensor: A scalar Tensor.
:param xs Tensor: A Tensor or list of Tensors. Each Tensor can have any
rank.
:rtype: list
:return: List of Tensors that should be evaluated, and the results
should be passed to hessian_combine_blocks() to get the full
gradient.
"""
xs = _AsList(xs)
hess_blocks = []
for i1, x1 in enumerate(xs):
# First derivative and flatten
grad_x1 = tf.gradients(y, x1)[0]
grad_x1_flat = tf.reshape(grad_x1, [-1])
x1_size = total_tensor_elements(x1)
# Second derivative: Only compute upper-triangular blocks
# because Hessian is symmetric
for x2 in xs[i1:]:
x2_size = total_tensor_elements(x2)
loop_vars = [
tf.constant(0, tf.int32),
tf.TensorArray(tf.float32, size=x1_size),
]
_, x1_x2_block = tf.while_loop(
lambda j, _: j < x1_size,
lambda j, result: (j+1, result.write(
j, tf.gradients(grad_x1_flat[j], x2)[0])),
loop_vars)
x1_x2_block = tf.reshape(x1_x2_block.stack(), [x1_size, x2_size])
hess_blocks.append(x1_x2_block)
return hess_blocks
def hessian_combine_blocks(blocks):
"""Combine the pieces obtained by evaluated the Tensors returned
by hessian_tensor_pieces(), and return the full Hessian matrix.
"""
# We only record upper-triangular blocks, and here we work out
# the number of blocks per row by solving a quadratic:
# len = n * (n+1) / 2
num_recorded_blocks = len(blocks)
blocks_per_row = int((np.sqrt(1 + 8 * num_recorded_blocks) - 1) / 2)
# Sum the column sizes
dims = [b.shape[1] for b in blocks[:blocks_per_row]]
total_dim = sum(dims)
H = np.zeros((total_dim, total_dim))
row = 0
col = 0
for i, b in enumerate(blocks):
row_start = sum(dims[:row])
row_end = sum(dims[:row + 1])
col_start = sum(dims[:col])
col_end = sum(dims[:col + 1])
H[row_start:row_end, col_start:col_end] = b
H[col_start:col_end, row_start:row_end] = b.transpose()
col += 1
if col >= blocks_per_row:
row += 1
col = row
return H
def trace_hessian(loss, logits, weights):
"""Compute the trace of the Hessian of loss with respect to weights.
We assume that loss = loss(logits(weights)), and that logits is a
piecewise-linear function of the weights (therefore d^2 logits / dw^2 = 0
for any w). This allows for a faster implementation that the naive one.
Note: This computes the Hessian of loss / logits, where logits is indexed
by sample and class, but all elements of the Hessian with two different
classes vanish. So this is still not the most efficient way to do it.
:param loss Tensor: A scalar Tensor
:param logits Tensor: The logits tensor.
:param weights Tensor: A Tensor or list of Tensors of model weights.
:rtype: Tensor
:return: The trace of the Hessian of loss with respect to all the weights.
"""
weights = _AsList(weights)
# Flatten logits with a well-specified dimension. This assumes any
# non-trivial dimension will resolve to 1.
# logits_flat = tf.reshape(logits, [-1])
loss_logits_hessian = hessians(loss, logits)[0]
tr_hessian_pieces = []
for w in weights:
J = jacobian(logits, w)
# Contract along the weight indices
# (first index is the logit index)
weight_axes = list(range(logits.shape.ndims, J.shape.ndims))
JJ = tf.tensordot(J, J, axes=[weight_axes, weight_axes])
# Doesn't work for dynamic shape
# assert loss_logits_hessian.shape == JJ.shape
all_axes = list(range(JJ.shape.ndims))
tr_hessian_pieces.append(
tf.tensordot(loss_logits_hessian, JJ, axes=[all_axes, all_axes]))
return tf.reduce_sum(tr_hessian_pieces)
def trace_hessian_softmax_crossentropy(logits, weights):
"""Compute the trace of the Hessian of loss with respect to weights.
The loss is assumed to be crossentropy(softmax(logits)), which allows
us to compute the loss/logits Hessian analytically, and it factorizes.
We also assume that logits is a piecewise-linear function of the weights
(therefore d^2 logits / dw^2 = 0 for any w).
Here we compute the Hessian of loss / logits analytically, which saves
some time, but only about 10%. It seems most time is spent just computing
the Jacobian.
:param loss Tensor: A scalar Tensor
:param logits Tensor: A Tensor with rank 2, indexed by [sample, class].
:param weights Tensor: A Tensor or list of Tensors of model weights.
:rtype: Tensor
:return: The trace of the Hessian of loss with respect to all the weights.
"""
weights = _AsList(weights)
if logits.shape.ndims != 2:
raise ValueError('logits tensor must have rank 2')
probs = tf.nn.softmax(logits)
tr_hessian_pieces = []
JdotP = tf.gradients(logits, weights, grad_ys=probs)
tf.logging.info('JdotP =', JdotP)
return tf.reduce_sum(tr_hessian_pieces)
def trace_hessian_reference(loss, weights):
"""Compute the whole Hessian for each layer, then take the trace.
This is a straightforward and slow implementation meant for testing.
"""
weights = _AsList(weights)
trace_terms = []
grads = tf.gradients(loss, weights)
for grad, weight in zip(grads, weights):
grad_unstacked = tf.unstack(tf.reshape(grad, [-1]))
for i, g in enumerate(grad_unstacked):
g2 = tf.reshape(tf.gradients(g, weight)[0], [-1])
diag_hessian_term = g2[i]
trace_terms.append(diag_hessian_term)
return tf.reduce_sum(trace_terms)
def hessian_vector_product(loss, weights, v):
"""Compute the tensor of the product H.v, where H is the loss Hessian with
respect to the weights. v is a vector (a rank 1 Tensor) of the same size as
the loss gradient. The ordering of elements in v is the same obtained from
flatten_tensor_list() acting on the gradient. Derivatives of dv/dweights
should vanish.
"""
grad = flatten_tensor_list(tf.gradients(loss, weights))
grad_v = tf.reduce_sum(grad * tf.stop_gradient(v))
H_v = flatten_tensor_list(tf.gradients(grad_v, weights))
return H_v
class TensorStatistics:
"""Collect statistics for a tensor over different mini-batches."""
def add_minibatch(self, value):
"""Add mean value over minibatch."""
self.running_sum += value
self.running_sum_of_squares += value * value
self.n += 1
@property
def mean(self):
"""The mean"""
return self.running_sum / self.n
@property
def var(self):
"""Variance of each tensor element"""
return self.running_sum_of_squares / self.n - self.mean**2
@property
def std(self):
"""Standard deviation of each tensor element"""
return np.sqrt(self.var)
@property
def norm_of_mean(self):
"""Norm of the mean"""
return np.linalg.norm(self.mean)
@property
def norm_of_std(self):
"""Norm of vector of standard deviations"""
return np.linalg.norm(self.std)
class TensorListStatistics(list):
"""Collect statistics for a list of tensors over different mini-batches.
Behaves as list where each element is a TensorStatistics object.
"""
def __init__(self, tensors):
"""tensors: list of Tensors"""
super().__init__([TensorStatistics(t) for t in tensors])
@property
def means(self):
"""List of tensor means"""
return [s.mean for s in self]
@property
def vars(self):
"""List of tensor variances"""
return [s.var for s in self]
@property
def stds(self):
"""List of tensor standard devs"""
return [s.std for s in self]
@property
def norm_of_mean(self):
"""The norm of the concatenated list of tensor means."""
norms = np.array([np.linalg.norm(s.mean) for s in self])
return np.sqrt(np.sum(norms * norms))
@property
def norm_of_std(self):
"""The norm of the concatenated list of tensor stds."""
norms = np.array([np.linalg.norm(s.std) for s in self])
return np.sqrt(np.sum(norms * norms))
class KerasHessianSpectrum:
"""Computes the partial Hessian spectrum of a Keras model using Lanczos."""
def __init__(self, model, x, y, batch_size=1024, stochastic=False,
weights=None, loss=None):
"""model is a keras sequential model.
Args:
model: A Keras Model
x: Training samples
y: Training labels
batch_size: Batch size for computing the Hessian. Does not affect the
result, only affects time and memory performance.
stochastic: If True, approximate the Hessian using batch_size samples,
chosen at random with each call to compute_spectrum().
weights: Weights with respect to which to compute the Hessian.
Can be a weight tensor or a list of tensors. If None,
all model weights are used.
loss: Can be specified separately for unit testing purposes.
"""
self.model = model
if weights is None:
self.weights = model.trainable_weights
else:
self.weights = _AsList(weights)
self.num_weights = num_weights(self.weights)
self.v = tf.placeholder(tf.float32, shape=(self.num_weights,))
# Delay looking at model because it may not be compiled yet
self._loss = loss
self._Hv = None
self.train_batches = MiniBatchMaker(x, y, batch_size)
self.batch_size = batch_size
self.stochastic = stochastic
@property
def loss(self):
"""The loss.
Evaluated lazily in case the model is not compiled at
first.
"""
if self._loss is None:
return self.model.total_loss
else:
return self._loss
@property
def Hv(self):
"""The Hessian-vector product tensor"""
if self._Hv is None:
self._Hv = hessian_vector_product(self.loss, self.weights, self.v)
return self._Hv
def compute_spectrum(self, k, v0=None, show_progress=False):
"""Compute k leading eigenvalues and eigenvectors.
Args:
v0: If specified, use as initial vector for Lanczos.
"""
timer = Timer()
if self.stochastic:
results = self._compute_stochastic_spectrum(k, v0, show_progress)
else:
results = self._compute_full_batch_spectrum(k, v0, show_progress)
self.lanczos_secs = timer.secs
if show_progress:
tf.logging.info('')
return results
def _compute_stochastic_spectrum(self, k, v0=None, show_progress=False):
"""Compute k leading eigenvalues and eigenvectors.
Each time this method is called, a new batch is selected to approximate
the Hessian.
"""
self.lanczos_iterations = 0
x_batch, y_batch = self.train_batches.next_batch()
if len(x_batch) < self.batch_size:
assert self.train_batches.at_start_of_epoch()
x_batch, y_batch = self.train_batches.next_batch()
if len(x_batch) < self.batch_size:
raise ValueError("Getting batches that are too small: {} < {}".format(
len(x_batch), self.batch_size))
evals, evecs = lanczos.eigsh(
n=self.num_weights, dtype=np.float32, matvec=compute_Hv, k=k, v0=v0)
return evals, evecs
| 29.168719 | 80 | 0.672535 | """TensorFlow utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
import tensorflow.keras.backend as K
from time import time
import scope.lanczos as lanczos
KERAS_LEARNING_PHASE_TEST = 0
KERAS_LEARNING_PHASE_TRAIN = 1
class Timer:
"""A simple wallclock timer."""
def __init__(self):
self.reset()
def reset(self):
self.start = time()
@property
def secs(self):
return time() - self.start
class NumpyPrintEverything:
"""Tell NumPy to print everything.
Synopsis:
with NumpyPrintEverything():
print(numpy_array)
"""
def __init__(self):
pass
def __enter__(self):
self.saved_threshold = np.get_printoptions()['threshold']
np.set_printoptions(threshold=np.inf)
def __exit__(self, type, value, traceback):
np.set_printoptions(threshold=self.saved_threshold)
class NumpyPrintoptions:
"""Temporarily set NumPy printoptions.
Synopsis:
with NumpyPrintoptions(formatter={'float': '{:0.2f}'.format}):
print(numpy_array)
"""
def __init__(self, **kwargs):
self.options = kwargs
def __enter__(self):
self.saved_options = np.get_printoptions()
np.set_printoptions(**self.options)
def __exit__(self, type, value, traceback):
np.set_printoptions(**self.saved_options)
class MiniBatchMaker:
"""Shuffle data and split it into batches."""
def __init__(self, x, y, batch_size):
assert len(x) == len(y)
# assert len(x) % batch_size == 0
self.x = x
self.y = y
self.N = len(x)
self.batch_size = batch_size
self.steps_per_epoch = \
(self.N + self.batch_size - 1) // self.batch_size
self.batches_per_epoch = self.steps_per_epoch
self.shuffle()
self.i = 0
self.epochs_completed = 0
self.step = 0
def shuffle(self):
perm = np.random.permutation(self.N)
self.shuffled_x = self.x[perm]
self.shuffled_y = self.y[perm]
def next_batch(self):
self.step += 1
end_idx = min(self.i + self.batch_size, self.N)
x_batch = self.shuffled_x[range(self.i, end_idx)]
y_batch = self.shuffled_y[range(self.i, end_idx)]
self.i = end_idx % self.N
if self.i == 0:
self.epochs_completed += 1
self.shuffle()
return x_batch, y_batch
def at_start_of_epoch(self):
"""Are we starting a new epoch?"""
return self.i == 0
def create_iid_batch_generator(x, y, steps, batch_size, resample_prob=1):
"""Returns an IID mini-batch generator.
ds = Dataset.from_generator(
create_iid_batch_generator(x, y, batch_size), ...)
Args:
x: Input samples
y: Labels
steps: How many steps to run for
batch_size: Integer size of mini-batch
resample_prob: Probability of resampling a given sample at each step.
If a function, the function should return the current resampling
probability and will be called every time a batch is generated.
"""
N = len(x)
def gen():
samples = np.random.choice(N, batch_size, replace=True)
for step in range(steps):
yield (x[samples], y[samples])
try:
current_resample_prob = resample_prob()
except TypeError:
current_resample_prob = resample_prob
to_replace = np.random.random((batch_size,)) < current_resample_prob
new_samples = np.random.choice(N, to_replace.sum(), replace=True)
samples[to_replace] = new_samples
return gen
def _AsList(x):
return x if isinstance(x, (list, tuple)) else [x]
def keras_feed_dict(model,
x=None,
y=None,
feed_dict={},
learning_phase=KERAS_LEARNING_PHASE_TEST):
"""Return a feed dict with inputs and labels suitable for Keras.
Args:
model: A Keras Model
x: Model inputs, or None if inputs are not fed
y: Model targets (labels), or None if targets are not fed
feed_dict: Additional feed_dict to merge with (if given, updated in
place)
learning_phase: 0 for TEST, 1 for TRAIN
Returns:
The new feed_dict (equal to feed_dict if that was provided).
"""
new_feed_dict = dict(feed_dict)
if x is not None:
new_feed_dict[model.inputs[0]] = x
new_feed_dict[model.sample_weights[0]] = np.ones(x.shape[0])
if y is not None:
new_feed_dict[model.targets[0]] = y
new_feed_dict[K.learning_phase()] = learning_phase # TEST phase
return new_feed_dict
def keras_compute_tensors(model, x, y, tensors, feed_dict={}):
"""Compute the given tensors in Keras."""
new_feed_dict = keras_feed_dict(model, x, y, feed_dict)
return K.get_session().run(tensors, feed_dict=new_feed_dict)
def clone_keras_model_shared_weights(
model, input_tensor, target_tensor):
"""Clone a Keras model.
The new model shares its weights with the old model, but accepts different
inputs and targets. This is useful, for example, for evaluating a model
mid-training.
Args:
model: A compiled Keras model.
input_tensor: Tensor to use as input for the cloned model.
target_tensor: Tensor to be used as targets (labels) for the cloned model.
Returns:
The cloned Keras model.
"""
assert len(model.inputs) == 1
inputs = keras.layers.Input(tensor=input_tensor,
shape=model.inputs[0].shape[1:])
clone = keras.Model(
inputs=inputs,
outputs=model(input_tensor))
clone.compile(
loss=model.loss,
target_tensors=[target_tensor],
optimizer=model.optimizer,
metrics=model.metrics)
return clone
def flatten_array_list(arrays):
"""Flatten and concat a list of numpy arrays into a single rank 1 vector."""
return np.concatenate([np.reshape(a, [-1]) for a in arrays], axis=0)
def flatten_tensor_list(tensors):
"""Flatten and concat a list of tensors into a single rank 1 tensor."""
return tf.concat([tf.reshape(t, [-1]) for t in tensors], axis=0)
def unflatten_tensor_list(flat_tensor, orig_tensors):
"""Reshape a flattened tensor back to a list of tensors with their
original shapes.
Args:
flat_tensor: A tensor that was previously flattened using
flatten_tensor_list()
orig_tensor: A list of tensors with the original desired shapes.
"""
unflattened = []
offset = 0
for t in orig_tensors:
num_elems = t.shape.num_elements()
unflattened.append(
tf.reshape(flat_tensor[offset:offset + num_elems], t.shape))
offset += num_elems
return unflattened
def compute_sample_mean_tensor(model, batches, tensors, feed_dict={}):
"""Compute the sample mean of the given tensors.
Args:
model: Keras Model
batches: MiniBatchMaker
tensors: Tensor or list of Tensors to compute the mean of
feed_dict: Used when evaluating tensors
"""
sample_means = None
tensors_is_list = isinstance(tensors, (list, tuple))
tensors = _AsList(tensors)
while True:
x_batch, y_batch = batches.next_batch()
results = keras_compute_tensors(model, x_batch, y_batch, tensors, feed_dict)
for i in range(len(results)):
results[i] *= len(x_batch)
if sample_means is None:
sample_means = results
else:
for i in range(len(results)):
sample_means[i] += results[i]
if batches.at_start_of_epoch():
break
for i in range(len(sample_means)):
sample_means[i] /= batches.N
if tensors_is_list:
return sample_means
else:
assert len(sample_means) == 1
return sample_means[0]
def jacobian(y, x):
"""Compute the Jacobian tensor J_ij = dy_i/dx_j.
From https://github.com/tensorflow/tensorflow/issues/675, which is adapted
from tf.hessiangs().
:param Tensor y: A Tensor
:param Tensor x: A Tensor
:rtype: Tensor
:return: The Jacobian Tensor, whose shape is the concatenation of
the y_flat and x shapes.
"""
y_flat = tf.reshape(y, [-1])
# tf.shape() returns a Tensor, so this supports dynamic sizing
n = tf.shape(y_flat)[0]
loop_vars = [
tf.constant(0, tf.int32),
tf.TensorArray(tf.float32, size=n),
]
_, jacobian = tf.while_loop(
lambda j, _: j < n,
lambda j, result: (j+1,
result.write(j, tf.gradients(y_flat[j], x)[0])),
loop_vars)
jacobian_shape = tf.concat([tf.shape(y), tf.shape(x)], axis=0)
jacobian = tf.reshape(jacobian.stack(), jacobian_shape)
return jacobian
def jacobians(y, xs):
"""Compute the Jacobian tensors J_ij = dy_i/dx_j for each x in xs.
With this implementation, the gradient is computed for all xs in one
call, so if xs includes weights from different layers then back prop
is used.
:param Tensor y: A rank 1 Tensor
:param Tensor xs: A Tensor or list of Tensors
:rtype: list
:return: List of Jacobian tensors J_ij = dy_i/dx_j for each x in xs.
"""
if y.shape.ndims != 1:
raise ValueError('y must be a rank 1 Tensor')
xs = _AsList(xs)
# tf.shape() returns a Tensor, so this supports dynamic sizing
len_y = tf.shape(y)[0]
jacobians = []
# Outer loop runs over elements of y, computes gradients for each
loop_vars = [
tf.constant(0, tf.int32),
[tf.TensorArray(tf.float32, size=len_y) for x in xs]
]
def _compute_single_y_gradient(j, arrays):
"""Compute the gradient for a single y elem."""
grads = tf.gradients(y[j], xs)
for i, g in enumerate(grads):
arrays[i] = arrays[i].write(j, g)
return arrays
_, jacobians = tf.while_loop(
lambda j, _: j < len_y,
lambda j, arrays: (j + 1, _compute_single_y_gradient(j, arrays)),
loop_vars)
jacobians = [a.stack() for a in jacobians]
return jacobians
def hessians(y, xs):
"""The Hessian of y with respect to each x in xs.
:param y Tensor: A scalar Tensor.
:param xs Tensor: A Tensor or list of Tensors. Each Tensor can have any
rank.
:rtype: list
:return: List of Hessians d^2y/dx^2. The shape of a Hessian is
x.shape + y.shape.
"""
xs = _AsList(xs)
hessians = []
for x in xs:
# First derivative and flatten
grad = tf.gradients(y, x)[0]
grad_flat = tf.reshape(grad, [-1])
# Second derivative
n = tf.shape(grad_flat)[0]
loop_vars = [
tf.constant(0, tf.int32),
tf.TensorArray(tf.float32, size=n),
]
_, hessian = tf.while_loop(
lambda j, _: j < n,
lambda j, result: (j+1, result.write(
j, tf.gradients(grad_flat[j], x)[0])),
loop_vars)
hessian = hessian.stack()
x_shape = tf.shape(x)
hessian_shape = tf.concat([x_shape, x_shape], axis=0)
hessians.append(tf.reshape(hessian, hessian_shape))
return hessians
def num_weights(weights):
"""Number of weights in the given list of weight tensors."""
return sum([w.shape.num_elements() for w in weights])
def total_num_weights(model):
"""Total number of weights in the given Keras model."""
return num_weights(model.trainable_weights)
def total_tensor_elements(x):
"""Tensor containing the total number of elements of x.
:param x Tensor: A tensor.
:rtype: Tensor
:return: A scalar Tensor containing the total number of elements.
"""
return tf.reduce_prod(tf.shape(x))
def hessian_tensor_blocks(y, xs):
"""Compute the tensors that make up the full Hessian (d^2y / dxs dxs).
A full computation of the Hessian would look like this:
blocks = hessian_tensor_blocks(y, xs)
block_results = sess.run(blocks)
hessian = hessian_combine_blocks(block_results)
:param y Tensor: A scalar Tensor.
:param xs Tensor: A Tensor or list of Tensors. Each Tensor can have any
rank.
:rtype: list
:return: List of Tensors that should be evaluated, and the results
should be passed to hessian_combine_blocks() to get the full
gradient.
"""
xs = _AsList(xs)
hess_blocks = []
for i1, x1 in enumerate(xs):
# First derivative and flatten
grad_x1 = tf.gradients(y, x1)[0]
grad_x1_flat = tf.reshape(grad_x1, [-1])
x1_size = total_tensor_elements(x1)
# Second derivative: Only compute upper-triangular blocks
# because Hessian is symmetric
for x2 in xs[i1:]:
x2_size = total_tensor_elements(x2)
loop_vars = [
tf.constant(0, tf.int32),
tf.TensorArray(tf.float32, size=x1_size),
]
_, x1_x2_block = tf.while_loop(
lambda j, _: j < x1_size,
lambda j, result: (j+1, result.write(
j, tf.gradients(grad_x1_flat[j], x2)[0])),
loop_vars)
x1_x2_block = tf.reshape(x1_x2_block.stack(), [x1_size, x2_size])
hess_blocks.append(x1_x2_block)
return hess_blocks
def hessian_combine_blocks(blocks):
"""Combine the pieces obtained by evaluated the Tensors returned
by hessian_tensor_pieces(), and return the full Hessian matrix.
"""
# We only record upper-triangular blocks, and here we work out
# the number of blocks per row by solving a quadratic:
# len = n * (n+1) / 2
num_recorded_blocks = len(blocks)
blocks_per_row = int((np.sqrt(1 + 8 * num_recorded_blocks) - 1) / 2)
# Sum the column sizes
dims = [b.shape[1] for b in blocks[:blocks_per_row]]
total_dim = sum(dims)
H = np.zeros((total_dim, total_dim))
row = 0
col = 0
for i, b in enumerate(blocks):
row_start = sum(dims[:row])
row_end = sum(dims[:row + 1])
col_start = sum(dims[:col])
col_end = sum(dims[:col + 1])
H[row_start:row_end, col_start:col_end] = b
H[col_start:col_end, row_start:row_end] = b.transpose()
col += 1
if col >= blocks_per_row:
row += 1
col = row
return H
def trace_hessian(loss, logits, weights):
"""Compute the trace of the Hessian of loss with respect to weights.
We assume that loss = loss(logits(weights)), and that logits is a
piecewise-linear function of the weights (therefore d^2 logits / dw^2 = 0
for any w). This allows for a faster implementation that the naive one.
Note: This computes the Hessian of loss / logits, where logits is indexed
by sample and class, but all elements of the Hessian with two different
classes vanish. So this is still not the most efficient way to do it.
:param loss Tensor: A scalar Tensor
:param logits Tensor: The logits tensor.
:param weights Tensor: A Tensor or list of Tensors of model weights.
:rtype: Tensor
:return: The trace of the Hessian of loss with respect to all the weights.
"""
weights = _AsList(weights)
# Flatten logits with a well-specified dimension. This assumes any
# non-trivial dimension will resolve to 1.
# logits_flat = tf.reshape(logits, [-1])
loss_logits_hessian = hessians(loss, logits)[0]
tr_hessian_pieces = []
for w in weights:
J = jacobian(logits, w)
# Contract along the weight indices
# (first index is the logit index)
weight_axes = list(range(logits.shape.ndims, J.shape.ndims))
JJ = tf.tensordot(J, J, axes=[weight_axes, weight_axes])
# Doesn't work for dynamic shape
# assert loss_logits_hessian.shape == JJ.shape
all_axes = list(range(JJ.shape.ndims))
tr_hessian_pieces.append(
tf.tensordot(loss_logits_hessian, JJ, axes=[all_axes, all_axes]))
return tf.reduce_sum(tr_hessian_pieces)
def trace_hessian_softmax_crossentropy(logits, weights):
"""Compute the trace of the Hessian of loss with respect to weights.
The loss is assumed to be crossentropy(softmax(logits)), which allows
us to compute the loss/logits Hessian analytically, and it factorizes.
We also assume that logits is a piecewise-linear function of the weights
(therefore d^2 logits / dw^2 = 0 for any w).
Here we compute the Hessian of loss / logits analytically, which saves
some time, but only about 10%. It seems most time is spent just computing
the Jacobian.
:param loss Tensor: A scalar Tensor
:param logits Tensor: A Tensor with rank 2, indexed by [sample, class].
:param weights Tensor: A Tensor or list of Tensors of model weights.
:rtype: Tensor
:return: The trace of the Hessian of loss with respect to all the weights.
"""
weights = _AsList(weights)
if logits.shape.ndims != 2:
raise ValueError('logits tensor must have rank 2')
probs = tf.nn.softmax(logits)
tr_hessian_pieces = []
JdotP = tf.gradients(logits, weights, grad_ys=probs)
tf.logging.info('JdotP =', JdotP)
return tf.reduce_sum(tr_hessian_pieces)
def trace_hessian_reference(loss, weights):
"""Compute the whole Hessian for each layer, then take the trace.
This is a straightforward and slow implementation meant for testing.
"""
weights = _AsList(weights)
trace_terms = []
grads = tf.gradients(loss, weights)
for grad, weight in zip(grads, weights):
grad_unstacked = tf.unstack(tf.reshape(grad, [-1]))
for i, g in enumerate(grad_unstacked):
g2 = tf.reshape(tf.gradients(g, weight)[0], [-1])
diag_hessian_term = g2[i]
trace_terms.append(diag_hessian_term)
return tf.reduce_sum(trace_terms)
def hessian_vector_product(loss, weights, v):
"""Compute the tensor of the product H.v, where H is the loss Hessian with
respect to the weights. v is a vector (a rank 1 Tensor) of the same size as
the loss gradient. The ordering of elements in v is the same obtained from
flatten_tensor_list() acting on the gradient. Derivatives of dv/dweights
should vanish.
"""
grad = flatten_tensor_list(tf.gradients(loss, weights))
grad_v = tf.reduce_sum(grad * tf.stop_gradient(v))
H_v = flatten_tensor_list(tf.gradients(grad_v, weights))
return H_v
class TensorStatistics:
"""Collect statistics for a tensor over different mini-batches."""
def __init__(self, tensor):
self.tensor = tensor
shape = tensor.shape.as_list()
self.running_sum = np.zeros(shape, dtype=np.float32)
self.running_sum_of_squares = np.zeros(shape, dtype=np.float32)
self.n = 0
def add_minibatch(self, value):
"""Add mean value over minibatch."""
self.running_sum += value
self.running_sum_of_squares += value * value
self.n += 1
@property
def mean(self):
"""The mean"""
return self.running_sum / self.n
@property
def var(self):
"""Variance of each tensor element"""
return self.running_sum_of_squares / self.n - self.mean**2
@property
def std(self):
"""Standard deviation of each tensor element"""
return np.sqrt(self.var)
@property
def norm_of_mean(self):
"""Norm of the mean"""
return np.linalg.norm(self.mean)
@property
def norm_of_std(self):
"""Norm of vector of standard deviations"""
return np.linalg.norm(self.std)
class TensorListStatistics(list):
"""Collect statistics for a list of tensors over different mini-batches.
Behaves as list where each element is a TensorStatistics object.
"""
def __init__(self, tensors):
"""tensors: list of Tensors"""
super().__init__([TensorStatistics(t) for t in tensors])
def add_minibatch(self, values):
for stat, val in zip(self, values):
stat.add_minibatch(val)
@property
def means(self):
"""List of tensor means"""
return [s.mean for s in self]
@property
def vars(self):
"""List of tensor variances"""
return [s.var for s in self]
@property
def stds(self):
"""List of tensor standard devs"""
return [s.std for s in self]
@property
def norm_of_mean(self):
"""The norm of the concatenated list of tensor means."""
norms = np.array([np.linalg.norm(s.mean) for s in self])
return np.sqrt(np.sum(norms * norms))
@property
def norm_of_std(self):
"""The norm of the concatenated list of tensor stds."""
norms = np.array([np.linalg.norm(s.std) for s in self])
return np.sqrt(np.sum(norms * norms))
class KerasHessianSpectrum:
"""Computes the partial Hessian spectrum of a Keras model using Lanczos."""
def __init__(self, model, x, y, batch_size=1024, stochastic=False,
weights=None, loss=None):
"""model is a keras sequential model.
Args:
model: A Keras Model
x: Training samples
y: Training labels
batch_size: Batch size for computing the Hessian. Does not affect the
result, only affects time and memory performance.
stochastic: If True, approximate the Hessian using batch_size samples,
chosen at random with each call to compute_spectrum().
weights: Weights with respect to which to compute the Hessian.
Can be a weight tensor or a list of tensors. If None,
all model weights are used.
loss: Can be specified separately for unit testing purposes.
"""
self.model = model
if weights is None:
self.weights = model.trainable_weights
else:
self.weights = _AsList(weights)
self.num_weights = num_weights(self.weights)
self.v = tf.placeholder(tf.float32, shape=(self.num_weights,))
# Delay looking at model because it may not be compiled yet
self._loss = loss
self._Hv = None
self.train_batches = MiniBatchMaker(x, y, batch_size)
self.batch_size = batch_size
self.stochastic = stochastic
@property
def loss(self):
"""The loss.
Evaluated lazily in case the model is not compiled at
first.
"""
if self._loss is None:
return self.model.total_loss
else:
return self._loss
@property
def Hv(self):
"""The Hessian-vector product tensor"""
if self._Hv is None:
self._Hv = hessian_vector_product(self.loss, self.weights, self.v)
return self._Hv
def compute_spectrum(self, k, v0=None, show_progress=False):
"""Compute k leading eigenvalues and eigenvectors.
Args:
v0: If specified, use as initial vector for Lanczos.
"""
timer = Timer()
if self.stochastic:
results = self._compute_stochastic_spectrum(k, v0, show_progress)
else:
results = self._compute_full_batch_spectrum(k, v0, show_progress)
self.lanczos_secs = timer.secs
if show_progress:
tf.logging.info('')
return results
def _compute_full_batch_spectrum(self, k, v0=None, show_progress=False):
self.lanczos_iterations = 0
def compute_Hv(v):
if show_progress:
print('.', end='')
sys.stdout.flush()
self.lanczos_iterations += 1
return compute_sample_mean_tensor(self.model, self.train_batches, self.Hv,
{self.v: v})
evals, evecs = lanczos.eigsh(
n=self.num_weights, dtype=np.float32, matvec=compute_Hv, k=k, v0=v0)
return evals, evecs
def _compute_stochastic_spectrum(self, k, v0=None, show_progress=False):
"""Compute k leading eigenvalues and eigenvectors.
Each time this method is called, a new batch is selected to approximate
the Hessian.
"""
self.lanczos_iterations = 0
x_batch, y_batch = self.train_batches.next_batch()
if len(x_batch) < self.batch_size:
assert self.train_batches.at_start_of_epoch()
x_batch, y_batch = self.train_batches.next_batch()
if len(x_batch) < self.batch_size:
raise ValueError("Getting batches that are too small: {} < {}".format(
len(x_batch), self.batch_size))
def compute_Hv(v):
if show_progress:
print('.', end='')
sys.stdout.flush()
self.lanczos_iterations += 1
return keras_compute_tensors(self.model, x_batch, y_batch, self.Hv,
{self.v: v})
evals, evecs = lanczos.eigsh(
n=self.num_weights, dtype=np.float32, matvec=compute_Hv, k=k, v0=v0)
return evals, evecs
| 2,742 | 0 | 450 |
beb3bdc9c2a1fb99b293a9ab70d6ff50bc7e3d3a | 43 | py | Python | papel/tipo_papel.py | dusannemec/stocktracker | fd83862ce47dae1615c445a1bed1a39d3a769e80 | [
"MIT"
] | null | null | null | papel/tipo_papel.py | dusannemec/stocktracker | fd83862ce47dae1615c445a1bed1a39d3a769e80 | [
"MIT"
] | null | null | null | papel/tipo_papel.py | dusannemec/stocktracker | fd83862ce47dae1615c445a1bed1a39d3a769e80 | [
"MIT"
] | null | null | null | tipo = [
'Ação',
'FII',
'ETF'
] | 8.6 | 11 | 0.325581 | tipo = [
'Ação',
'FII',
'ETF'
] | 0 | 0 | 0 |
ea0427ce42e1c22f4f5ed27b3041b6617662f463 | 795 | py | Python | anmEasy6-test-env.py | anushaihalapathirana/RL-Gym-ANM-tool | 2dee2da9be26f512179d313c985832718a34042b | [
"MIT"
] | null | null | null | anmEasy6-test-env.py | anushaihalapathirana/RL-Gym-ANM-tool | 2dee2da9be26f512179d313c985832718a34042b | [
"MIT"
] | null | null | null | anmEasy6-test-env.py | anushaihalapathirana/RL-Gym-ANM-tool | 2dee2da9be26f512179d313c985832718a34042b | [
"MIT"
] | null | null | null |
import gym
import time
if __name__ == '__main__':
run()
| 30.576923 | 136 | 0.641509 |
import gym
import time
def run():
env = gym.make('gym_anm:ANM6Easy-v0')
o = env.reset() # create innitial observation space
for i in range(10):
a = env.action_space.sample() # the agent samples random actions from the action space of the ANM6Easy-v0 task for 10 timesteps.
o, r, done, info = env.step(a)
env.render()
time.sleep(0.5) # otherwise the rendering is too fast for the human eye
# A terminal state will reach if no solution to the power flow equations is found.
# power grid has collapsed and is often due to a voltage collapse problem
if done:
# Every time a terminal state is reached, the environment gets reset.
o = env.reset()
env.close()
if __name__ == '__main__':
run()
| 708 | 0 | 23 |
f1822bd63d1f41f4af248bb046dc7db2672e62a6 | 917 | py | Python | jiminy/gym/envs/mujoco/__init__.py | sibeshkar/jiminy | 7754f86fb0f246e7d039ea0cbfd9950fcae4adfb | [
"MIT"
] | 3 | 2020-03-16T13:50:40.000Z | 2021-06-09T05:26:13.000Z | jiminy/gym/envs/mujoco/__init__.py | sibeshkar/jiminy | 7754f86fb0f246e7d039ea0cbfd9950fcae4adfb | [
"MIT"
] | null | null | null | jiminy/gym/envs/mujoco/__init__.py | sibeshkar/jiminy | 7754f86fb0f246e7d039ea0cbfd9950fcae4adfb | [
"MIT"
] | null | null | null | from jiminy.gym.envs.mujoco.mujoco_env import MujocoEnv
# ^^^^^ so that user gets the correct error
# message if mujoco is not installed correctly
from jiminy.gym.envs.mujoco.ant import AntEnv
from jiminy.gym.envs.mujoco.half_cheetah import HalfCheetahEnv
from jiminy.gym.envs.mujoco.hopper import HopperEnv
from jiminy.gym.envs.mujoco.walker2d import Walker2dEnv
from jiminy.gym.envs.mujoco.humanoid import HumanoidEnv
from jiminy.gym.envs.mujoco.inverted_pendulum import InvertedPendulumEnv
from jiminy.gym.envs.mujoco.inverted_double_pendulum import InvertedDoublePendulumEnv
from jiminy.gym.envs.mujoco.reacher import ReacherEnv
from jiminy.gym.envs.mujoco.swimmer import SwimmerEnv
from jiminy.gym.envs.mujoco.humanoidstandup import HumanoidStandupEnv
from jiminy.gym.envs.mujoco.pusher import PusherEnv
from jiminy.gym.envs.mujoco.thrower import ThrowerEnv
from jiminy.gym.envs.mujoco.striker import StrikerEnv
| 53.941176 | 85 | 0.85169 | from jiminy.gym.envs.mujoco.mujoco_env import MujocoEnv
# ^^^^^ so that user gets the correct error
# message if mujoco is not installed correctly
from jiminy.gym.envs.mujoco.ant import AntEnv
from jiminy.gym.envs.mujoco.half_cheetah import HalfCheetahEnv
from jiminy.gym.envs.mujoco.hopper import HopperEnv
from jiminy.gym.envs.mujoco.walker2d import Walker2dEnv
from jiminy.gym.envs.mujoco.humanoid import HumanoidEnv
from jiminy.gym.envs.mujoco.inverted_pendulum import InvertedPendulumEnv
from jiminy.gym.envs.mujoco.inverted_double_pendulum import InvertedDoublePendulumEnv
from jiminy.gym.envs.mujoco.reacher import ReacherEnv
from jiminy.gym.envs.mujoco.swimmer import SwimmerEnv
from jiminy.gym.envs.mujoco.humanoidstandup import HumanoidStandupEnv
from jiminy.gym.envs.mujoco.pusher import PusherEnv
from jiminy.gym.envs.mujoco.thrower import ThrowerEnv
from jiminy.gym.envs.mujoco.striker import StrikerEnv
| 0 | 0 | 0 |
151e11417e838cef74f2e569580080f73607b95b | 609 | py | Python | carpool/views.py | huudi001/uber-like | 8af92c224ab4d8644d793aac9e77d7b3647712f8 | [
"MIT"
] | null | null | null | carpool/views.py | huudi001/uber-like | 8af92c224ab4d8644d793aac9e77d7b3647712f8 | [
"MIT"
] | null | null | null | carpool/views.py | huudi001/uber-like | 8af92c224ab4d8644d793aac9e77d7b3647712f8 | [
"MIT"
] | null | null | null | from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.views.generic import TemplateView
from django.contrib.auth.mixins import LoginRequiredMixin
| 29 | 57 | 0.747126 | from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.views.generic import TemplateView
from django.contrib.auth.mixins import LoginRequiredMixin
class TestPage(LoginRequiredMixin,TemplateView):
template_name = 'test.html'
class ThanksPage(TemplateView):
template_name = 'thanks.html'
class HomePage(TemplateView):
template_name = "index.html"
def get(self, request, *args, **kwargs):
if request.user.is_authenticated():
return HttpResponseRedirect(reverse("test"))
return super().get(request, *args, **kwargs)
| 173 | 171 | 69 |
4a22fe3dc836ded77f9e576984da0d11042601e3 | 2,420 | py | Python | crypto/xnuca-2020/imposter/task.py | BadMonkey7/funny-ctf-challenge | 827caed5ab54f1da9217dfa25b034b9b398b11ef | [
"MIT"
] | 2 | 2020-10-22T08:13:52.000Z | 2021-01-16T06:56:24.000Z | crypto/xnuca-2020/imposter/task.py | BadMonkey7/funny-ctf-challenge | 827caed5ab54f1da9217dfa25b034b9b398b11ef | [
"MIT"
] | null | null | null | crypto/xnuca-2020/imposter/task.py | BadMonkey7/funny-ctf-challenge | 827caed5ab54f1da9217dfa25b034b9b398b11ef | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os
import random
import string
from hashlib import sha256
from Toy_AE import Toy_AE
from secret import FLAG
if __name__ == "__main__":
ae = Toy_AE()
if not proof_of_work():
exit(-1)
for _ in range(4):
try:
menu()
except:
exit(-1) | 28.470588 | 103 | 0.572314 | #!/usr/bin/env python3
import os
import random
import string
from hashlib import sha256
from Toy_AE import Toy_AE
from secret import FLAG
def proof_of_work():
random.seed(os.urandom(8))
proof = b''.join([random.choice(string.ascii_letters + string.digits).encode() for _ in range(20)])
digest = sha256(proof).hexdigest().encode()
print("sha256(XXXX+%s) == %s" % (proof[4:],digest))
print("Give me XXXX:")
x = input().encode()
return False if len(x) != 4 or sha256(x + proof[4:]).hexdigest().encode() != digest else True
def pack(uid, uname, token, cmd, appendix):
r = b''
r += b'Uid=%d\xff' % uid
r += b'UserName=%s\xff' % uname
r += b'T=%s\xff' % token
r += b'Cmd=%s\xff' % cmd
r += appendix
return r
def unpack(r):
data = r.split(b"\xff")
uid, uname, token, cmd, appendix = int(data[0][4:]), data[1][9:], data[2][2:], data[3][4:], data[4]
return (uid, uname, token, cmd, appendix)
def apply_ticket():
uid = int(input("Set up your user id:")[:5])
uname = input("Your username:").encode("ascii")[:16]
if uname == b"Administrator":
print("Sorry, preserved username.")
return
token = sha256(uname).hexdigest()[:max(8, uid % 16)].encode("ascii")
cmd = input("Your command:").encode("ascii")[:16]
if cmd == b"Give_Me_Flag":
print("Not allowed!")
return
appendix = input("Any Appendix?").encode("ascii")[:16]
msg = pack(uid, uname, token, cmd, appendix)
ct, te = ae.encrypt(msg)
print("Your ticket:%s" % ct.hex())
print("With my Auth:%s" % te.hex())
def check_ticket():
ct = bytes.fromhex(input("Ticket:"))
te = bytes.fromhex(input("Auth:"))
msg = ae.decrypt(ct, te)
assert msg
uid, uname, token, cmd, appendix = unpack(msg)
if uname == b"Administrator" and cmd == b"Give_Me_Flag":
print(FLAG)
exit(0)
else:
print("Nothing happend.")
def menu():
print("Menu:")
print("[1] Apply Ticket")
print("[2] Check Ticket")
print("[3] Exit")
op = int(input("Your option:"))
assert op in range(1, 4)
if op == 1:
apply_ticket()
elif op == 2:
check_ticket()
else:
print("Bye!")
exit(0)
if __name__ == "__main__":
ae = Toy_AE()
if not proof_of_work():
exit(-1)
for _ in range(4):
try:
menu()
except:
exit(-1) | 1,961 | 0 | 138 |
0640274aa1b43080f82d46e3bd92f4ec37445449 | 2,296 | py | Python | adjutant_moc/actions/users.py | CCI-MOC/adjutant-moc | 015de325dced135f56867c2ca8e07814cc950e36 | [
"Apache-2.0"
] | 1 | 2021-01-22T18:21:42.000Z | 2021-01-22T18:21:42.000Z | adjutant_moc/actions/users.py | CCI-MOC/adjutant-moc | 015de325dced135f56867c2ca8e07814cc950e36 | [
"Apache-2.0"
] | 14 | 2020-05-06T13:39:21.000Z | 2022-02-22T16:27:01.000Z | adjutant_moc/actions/users.py | CCI-MOC/adjutant-moc | 015de325dced135f56867c2ca8e07814cc950e36 | [
"Apache-2.0"
] | 3 | 2019-01-26T20:10:10.000Z | 2019-11-04T16:39:46.000Z | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from confspirator import groups as config_groups
from confspirator import fields as config_fields
from adjutant_moc.actions import base, operations
from adjutant_moc.actions import serializers
| 33.764706 | 74 | 0.682491 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from confspirator import groups as config_groups
from confspirator import fields as config_fields
from adjutant_moc.actions import base, operations
from adjutant_moc.actions import serializers
class MocInviteUserAction(base.MocBaseAction):
required = [
'email',
'project_id',
'roles',
]
serializer = serializers.MocInviteUserSerializer
config_group = config_groups.DynamicNameConfigGroup(
children=[config_fields.StrConfig(
"user_domain_id",
help_text="Domain to create projects in.",
default="default",
sample_default="Default")]
)
def _get_email(self):
"""This is the email where the invitation will be sent."""
return self.email
def _prepare(self):
if not self._validate():
self.add_note('Validation failed at _prepare')
return
self.action.auto_approve = True
self.action.state = "pending"
self.action.need_token = True
self.set_token_fields(["confirm", "user"])
def validate_token(self, token_data):
if not self.valid or not token_data.get('confirm'):
self.add_note('Invitation not valid or not accepted.')
return False
return True
def write_to_approve_journal(self):
pass
def write_to_submit_journal(self, token_data):
project = self.leader_identity.get_project(self.project_id)
project_ref = {'name': project.name,
'domain_id': project.domain_id}
services = self.find_services_for_project(project)
self.submit_journal.append(operations.AddUserToProjectOperation(
services, token_data['user'], project_ref, self.roles))
| 885 | 649 | 23 |
2c2b0fd701f5472cf7ba3aedcf86fcf586ae2654 | 13,123 | py | Python | clovis_points/Flint.py | shadoobie/cairn | ff76720627be6dc332219e46c31ce1ac98adfddd | [
"BSD-3-Clause"
] | 1 | 2020-06-10T07:50:57.000Z | 2020-06-10T07:50:57.000Z | clovis_points/Flint.py | shadoobie/cairn | ff76720627be6dc332219e46c31ce1ac98adfddd | [
"BSD-3-Clause"
] | null | null | null | clovis_points/Flint.py | shadoobie/cairn | ff76720627be6dc332219e46c31ce1ac98adfddd | [
"BSD-3-Clause"
] | null | null | null | import uuid
from numpy import random
from clovis_points import ActivationFunctions as af
from clovis_points import TruthTables as tt
from clovis_points.Flutes import Flutes
from testing.TestUtilities import TestUtilities
from the_historical_record.Block import Block
from the_historical_record.BlockChain import BlockChain
| 56.321888 | 123 | 0.597424 | import uuid
from numpy import random
from clovis_points import ActivationFunctions as af
from clovis_points import TruthTables as tt
from clovis_points.Flutes import Flutes
from testing.TestUtilities import TestUtilities
from the_historical_record.Block import Block
from the_historical_record.BlockChain import BlockChain
class Flint:
learning_rate = 0 # (Lithic reduction)
bias = 0
weights = None
truth_table = None
operation = None
training_ledger = None
utils = None
ledger_item_template = None
learning_record_template = None
data_header = None
log = None
def __init__(self, learning_rate, bias, operation, log):
self.utils = TestUtilities()
self.learning_rate = learning_rate
self.bias = bias
self.truth_table = self.__init_truth_table__(operation)
self.__init_log(log)
self.weights = list()
for k in range(3):
self.weights.append(random.random()) # Assigning random weights
self.training_ledger = BlockChain()
self.__init_historical_data__()
def __init_log(self, log):
'''this needs to be called after __determine_truth_table__ (so not functional, ik, ik)'''
component = "Flint_perceptron_learning_logical_" + self.operation + "_operation"
self.log = log
print("Flint is appending to the log file with component: " + component)
def __init_truth_table__(self, operation):
#TODO: should probably put the truth table that this determines into the data header
if operation.lower() in ['and', '&']:
self.operation = 'and'
return tt.TruthTables.and_truth_table()
elif operation.lower() in ['or', '|']:
self.operation = 'or'
return tt.TruthTables.or_truth_table()
elif operation.lower() in ['not', '-']:
self.operation = 'not'
return tt.TruthTables.not_truth_table()
def __init_historical_data__(self):
self.ledger_item_template = self.__init_ledger_item__()
some_array_yo = self.ledger_item_template["learning_history"]
self.learning_record_template = some_array_yo[0]
self.data_header = self.__create_the_data_header__()
self.data_header['id'] = str(uuid.uuid4())
self.data_header['name'] = 'Flint Perceptron'
self.data_header['nn_class'] = self.__class__.__name__
self.data_header['operation'] = self.operation
self.data_header['bias'] = self.bias
self.data_header['activation_function_name'] = "Sigmoid function"
self.data_header['activation_function'] = "s(x) = 1 / 1 + e^-x = e^x / e^x + 1"
if self.operation in ['and', 'or']:
self.data_header['weight_modification_functions'] = ["w0 = w0 + error * input1 * learning_rate",
"w1 = w1 + error * input2 * learning_rate",
"w2 = w2 + error * bias * learning_rate"]
self.data_header['weight_initialization_functions'] = ["random", "random", "random"]
self.data_header['starting_weights'] = [self.weights[0], self.weights[1], self.weights[2]]
elif self.operation in ['not']:
self.data_header['weight_modification_functions'] = ["w0 = w0 + error * input1 * learning_rate",
"w1 = w1 + error * bias * learning_rate"]
self.data_header['weight_initialization_functions'] = ["random", "random"]
self.data_header['starting_weights'] = [self.weights[0], self.weights[1]]
self.data_header['learning_history'] = [] # this might be redundant and or the best way to do this?
def __init_ledger_item__(self):
a_ledger_item = None
item_data_structure_location = "..//resources//nn_learning_snapshot.json"
#TODO: need to bring the schema in with it and validate it before proceeding.
with open(item_data_structure_location) as item_data_structure:
a_ledger_item = self.utils.load_json_file(item_data_structure)
self.log.info('successfully loaded: ' + item_data_structure_location)
return a_ledger_item
def __create_the_data_header__(self):
header = self.ledger_item_template.copy()
learning_history = header["learning_history"]
# clear out the learning history
del learning_history[0]
return header
def create_a_learning_record(self):
return self.learning_record_template.copy()
def calculate_error_for_two_inputs_one_output_nn(self, input1, input2, expected_output):
actual_output = af.ActivationFunctions.sigmoid_function_dual_inputs(input1,
input2,
self.bias,
self.weights)
error = expected_output - actual_output
flute = Flutes(error, actual_output)
return flute
def calculate_error_for_one_input_one_output_nn(self, input1, expected_output):
actual_output = af.ActivationFunctions.sigmoid_function_single_input(input1,
self.bias,
self.weights)
error = expected_output - actual_output
flute = Flutes(error, actual_output)
return flute
def calculate_error_and_modify_weights_for_case(self, case):
results = self.calculate_error_for_two_inputs_one_output_nn(self.truth_table[case].get('input1'),
self.truth_table[case].get('input2'),
self.truth_table[case].get('expected_output'))
self.modify_weights_training(results.get_error(),
self.truth_table[case].get('input1'),
self.truth_table[case].get('input2'))
return results
def calculate_error_and_modify_weights_for_not(self, case):
results = self.calculate_error_for_one_input_one_output_nn(self.truth_table[case].get('input1'),
self.truth_table[case].get('expected_output'))
self.modify_weights_training_for_not(results.get_error(),
self.truth_table[case].get('input1'))
return results
def modify_weights_training(self, error, input1, input2):
self.weights[0] += error * input1 * self.learning_rate
self.weights[1] += error * input2 * self.learning_rate
self.weights[2] += error * self.bias * self.learning_rate
def modify_weights_training_for_not(self, error, input1):
self.weights[0] += error * input1 * self.learning_rate
self.weights[2] += error * self.bias * self.learning_rate
#TODO: this whole thing sheesh man aww jeeze wow man i mean this is like a whole nother evening or somethign you know?
def determine_if_perceptron_perceives_correctly_enough(self, mastery_criteria, i, iterations):
mastered = "false"
if (i > iterations - 3):
mastered = "true"
return mastered
#TODO: validate json before commiting it to a block and adding the block to the blockchain
def train_2_inputs_1_output(self, iterations):
for i in range(iterations):
for n in range(4):
case = 'case' + str(n + 1)
results = self.calculate_error_and_modify_weights_for_case(case)
a_learning_record = self.populate_a_learning_record(self.create_a_learning_record(),
results,
case,
i,
iterations)
self.data_header['learning_history'].append(a_learning_record)
a_ledger_item_or_block = Block(self.data_header.get('name') + str(self.data_header))
self.training_ledger.mine(a_ledger_item_or_block)
self.log.info("Block created for iteration: " + str(i) + " the block's hash: " + a_ledger_item_or_block.data )
self.log.info("the block's number: " + str(a_ledger_item_or_block.blockNo))
self.log.info("the block's head: " + str(a_ledger_item_or_block.head))
self.log.info("the block's next: " + str(a_ledger_item_or_block.next))
self.log.info("the block's data: " + str(a_ledger_item_or_block.data))
while self.training_ledger.head is not None:
self.log.info(self.training_ledger.head)
self.training_ledger.head = self.training_ledger.head.next
#TODO: validate json before commiting it to a block and adding the block to the blockchain
def train_1_input_to_1_output(self, iterations):
for i in range(iterations):
for n in range(2):
case = 'case' + str(n + 1)
results = self.calculate_error_and_modify_weights_for_not(case)
a_learning_record = self.populate_a_learning_record(self.create_a_learning_record(),
results,
case,
i, #TODO: figure out what's go'n on with iterations
iterations)
self.data_header['learning_history'].append(a_learning_record)
a_ledger_item_or_block = Block(self.data_header.get('name') + str(self.data_header))
self.training_ledger.mine(a_ledger_item_or_block)
self.log.info("Block created for iteration: " + str(i) + " the block's hash: " + a_ledger_item_or_block.data )
self.log.info("the block's number: " + str(a_ledger_item_or_block.blockNo))
self.log.info("the block's head: " + str(a_ledger_item_or_block.head))
self.log.info("the block's next: " + str(a_ledger_item_or_block.next))
self.log.info("the block's data: " + str(a_ledger_item_or_block.data))
def populate_a_learning_record(self, a_learning_record, training_results, case, i, iterations):
self.log.info('about to populate a learning record with case:' + case)
a_learning_record['id'] = case + ':' + self.data_header['id'] + ':' + str(uuid.uuid4())
a_learning_record['iteration'] = i
a_learning_record['metrics']['id'] = a_learning_record['id'] + ':' + str(uuid.uuid4())
a_learning_record['metrics']['weights'] = self.weights
if self.data_header['operation'] in ['and', 'or'] and \
self.operation in ['and', 'or'] and \
self.truth_table['operation'] in ['and', 'or']:
inputs = [self.truth_table[case].get('input1'), self.truth_table[case].get('input2')]
a_learning_record['metrics']['inputs'] = inputs
elif self.data_header['operation'] in ['not'] and \
self.operation in ['not'] and \
self.truth_table['operation'] in ['not']:
inputs = [self.truth_table[case].get('input1')]
a_learning_record['metrics']['inputs'] = inputs
a_learning_record['metrics']['expected_output'] = [self.truth_table[case].get('expected_output')]
a_learning_record['metrics']['actual_output'] = [training_results.get_actual()]
a_learning_record['metrics']['error'] = [training_results.get_error()]
a_learning_record['metrics']['mastered'] = False
a_learning_record['metrics']['mastery_criteria'] = "who cares, for now when the iterations are done."
a_learning_record['metrics']['mastered'] = self.determine_if_perceptron_perceives_correctly_enough(
a_learning_record['metrics']['mastery_criteria'], i, iterations)
return a_learning_record
def use_perceptron_with_two_inputs_and_one_output(self):
for x, y in [(0, 0), (1, 0), (0, 1), (1, 1)]:
output = af.ActivationFunctions.sigmoid_function_dual_inputs(x, y, self.bias, self.weights)
self.log.info(str(x) + " " + self.operation + " " + str(y) + " yields: " + str(output))
print(str(x) + " " + self.operation + " " + str(y) + " yields: " + str(output))
def use_perceptron_with_one_input_and_one_output(self):
for x in [1, 0]:
output = af.ActivationFunctions.sigmoid_function_single_input(x, self.bias, self.weights)
self.log.info(str(x) + " " + self.operation + " yields: " + str(output))
print(str(x) + " " + self.operation + " yields: " + str(output)) | 11,393 | 1,382 | 23 |
02ad1aadd9a0594ba1cc584f230b170611e9c889 | 6,773 | py | Python | modules/sfp_tool_onesixtyone.py | khiemtq-cyber/spiderfoot | 66e671918853b0334931fd2fbabad0096d506726 | [
"MIT"
] | null | null | null | modules/sfp_tool_onesixtyone.py | khiemtq-cyber/spiderfoot | 66e671918853b0334931fd2fbabad0096d506726 | [
"MIT"
] | null | null | null | modules/sfp_tool_onesixtyone.py | khiemtq-cyber/spiderfoot | 66e671918853b0334931fd2fbabad0096d506726 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: sfp_tool_onesixtyone
# Purpose: SpiderFoot plug-in for using the onesixtyone tool.
# Tool: https://github.com/trailofbits/onesixtyone
#
# Author: <steve@binarypool.com>
#
# Created: 2022-04-02
# Copyright: (c) Steve Micallef 2022
# Licence: MIT
# -------------------------------------------------------------------------------
import sys
import os.path
import tempfile
from netaddr import IPNetwork
from subprocess import PIPE, Popen
from spiderfoot import SpiderFootPlugin, SpiderFootEvent, SpiderFootHelpers
# End of sfp_tool_onesixtyone class
| 35.835979 | 351 | 0.565776 | # -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: sfp_tool_onesixtyone
# Purpose: SpiderFoot plug-in for using the onesixtyone tool.
# Tool: https://github.com/trailofbits/onesixtyone
#
# Author: <steve@binarypool.com>
#
# Created: 2022-04-02
# Copyright: (c) Steve Micallef 2022
# Licence: MIT
# -------------------------------------------------------------------------------
import sys
import os.path
import tempfile
from netaddr import IPNetwork
from subprocess import PIPE, Popen
from spiderfoot import SpiderFootPlugin, SpiderFootEvent, SpiderFootHelpers
class sfp_tool_onesixtyone(SpiderFootPlugin):
meta = {
"name": "Tool - onesixtyone",
"summary": "Fast scanner to find publicly exposed SNMP services.",
"flags": ["tool"],
"useCases": ["Footprint", "Investigate"],
"categories": ["Crawling and Scanning"],
"toolDetails": {
"name": "onesixtyone",
"description": "onesixtyone is a fast scanner for finding publicly exposed SNMP services.",
"website": "https://github.com/trailofbits/onesixtyone",
"repository": "https://github.com/trailofbits/onesixtyone"
}
}
opts = {
'onesixtyone_path': '',
'communities': '1234,2read,4changes,CISCO,IBM,OrigEquipMfr,SNMP,SUN,access,admin,agent,all,cisco,community,default,enable,field,guest,hello,ibm,manager,mngt,monitor,netman,network,none,openview,pass,password,private,proxy,public,read,read-only,read-write,root,router,secret,security,snmp,snmpd,solaris,sun,switch,system,tech,test,world,write',
'netblockscan': True,
'netblockscanmax': 24
}
optdescs = {
'onesixtyone_path': "The path to your onesixtyone binary. Must be set.",
'communities': "Comma-separated list of SNMP communities to try.",
'netblockscan': "Scan all IPs within identified owned netblocks?",
'netblockscanmax': "Maximum netblock/subnet size to scan IPs within (CIDR value, 24 = /24, 16 = /16, etc.)"
}
results = None
errorState = False
communitiesFile = None
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = dict()
self.errorState = False
self.__dataSource__ = "Target Website"
for opt in userOpts.keys():
self.opts[opt] = userOpts[opt]
# Write communities to file for use later on
try:
_, self.communitiesFile = tempfile.mkstemp("communities")
f = open(self.communitiesFile, "w")
for community in self.opts['communities'].split(","):
f.write(community.strip() + "\n")
f.close()
except BaseException as e:
self.error(f"Unable to write communities file ({self.communitiesFile}): {e}")
self.errorState = True
def watchedEvents(self):
return ['IP_ADDRESS', 'NETBLOCK_OWNER']
def producedEvents(self):
return [
'UDP_PORT_OPEN_INFO',
'UDP_PORT_OPEN',
'IP_ADDRESS'
]
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
self.debug(f"Received event, {eventName}, from {srcModuleName}")
if self.errorState:
return
if srcModuleName == "sfp_tool_onesixtyone":
self.debug("Skipping event from myself.")
return
if not self.opts['onesixtyone_path']:
self.error("You enabled sfp_tool_onesixtyone but did not set a path to the tool!")
self.errorState = True
return
exe = self.opts['onesixtyone_path']
if self.opts['onesixtyone_path'].endswith('/'):
exe = f"{exe}onesixtyone"
if not os.path.isfile(exe):
self.error(f"File does not exist: {exe}")
self.errorState = True
return
if not SpiderFootHelpers.sanitiseInput(eventData, extra=['/']):
self.debug("Invalid input, skipping.")
return
targets = []
try:
if eventName == "NETBLOCK_OWNER" and self.opts['netblockscan']:
net = IPNetwork(eventData)
if net.prefixlen < self.opts['netblockscanmax']:
self.debug(f"Skipping scanning of {eventData}, too big.")
return
for addr in net.iter_hosts():
targets.append(str(addr))
except BaseException as e:
self.error(f"Strange netblock identified, unable to parse: {eventData} ({e})")
return
# Don't look up stuff twice, check IP == IP here
if eventData in self.results:
self.debug(f"Skipping {eventData} as already scanned.")
return
else:
# Might be a subnet within a subnet or IP within a subnet
for addr in self.results:
if IPNetwork(eventData) in IPNetwork(addr):
self.debug(f"Skipping {eventData} as already within a scanned range.")
return
self.results[eventData] = True
# If we weren't passed a netblock, this will be empty
if not targets:
targets.append(eventData)
for target in targets:
args = [
exe,
"-c",
self.communitiesFile,
target
]
try:
p = Popen(args, stdout=PIPE, stderr=PIPE)
out, stderr = p.communicate(input=None)
stdout = out.decode(sys.stdin.encoding)
except Exception as e:
self.error(f"Unable to run onesixtyone: {e}")
continue
if p.returncode != 0:
self.error(f"Unable to read onesixtyone output\nstderr: {stderr}\nstdout: {stdout}")
continue
if not stdout:
self.debug(f"onesixtyone returned no output for {target}")
continue
for result in stdout.split("\n"):
srcevent = event
if target not in result:
continue
if target != eventData:
srcevent = SpiderFootEvent("IP_ADDRESS", target, self.__name__, event)
self.notifyListeners(srcevent)
e = SpiderFootEvent('UDP_PORT_OPEN', f"{target}:161", self.__name__, srcevent)
self.notifyListeners(e)
e = SpiderFootEvent("UDP_PORT_OPEN_INFO", result, self.__name__, e)
self.notifyListeners(e)
# End of sfp_tool_onesixtyone class
| 4,437 | 1,610 | 23 |
1b8e3c325a97cccaf1c8c5f4654e6b07c027e9f1 | 626 | py | Python | pvw-dependencies/pv-flow/flow/plugins/__init__.py | psavery/HPCCloud | ffc18ab662b7ff0562cde84cefa0cc8312ab3993 | [
"Apache-2.0"
] | 45 | 2015-12-09T05:00:48.000Z | 2022-01-25T20:51:30.000Z | pvw-dependencies/pv-flow/flow/plugins/__init__.py | psavery/HPCCloud | ffc18ab662b7ff0562cde84cefa0cc8312ab3993 | [
"Apache-2.0"
] | 501 | 2015-09-09T15:41:29.000Z | 2022-03-01T23:38:34.000Z | pvw-dependencies/pv-flow/flow/plugins/__init__.py | dealenx/hpccloud-kemsu | 42fc44b06385c6eb25a979477dcea53fe66cfbfa | [
"Apache-2.0"
] | 27 | 2016-05-05T06:45:56.000Z | 2022-03-09T16:42:54.000Z | import os
from paraview import simple
# -----------------------------------------------------------------------------
MODULE_PATH = os.path.dirname(os.path.abspath(__file__))
PLUGINS = [
'parflow.py'
]
FULL_PATHS = [
'/Applications/ParaView-5.6.0-1626-g52acf2f741.app/Contents/Plugins/ParFlow.so',
]
# -----------------------------------------------------------------------------
# Load the plugins
# -----------------------------------------------------------------------------
for plugin in PLUGINS:
simple.LoadPlugin(os.path.join(MODULE_PATH, plugin))
for plugin in FULL_PATHS:
simple.LoadPlugin(plugin)
| 25.04 | 84 | 0.456869 | import os
from paraview import simple
# -----------------------------------------------------------------------------
MODULE_PATH = os.path.dirname(os.path.abspath(__file__))
PLUGINS = [
'parflow.py'
]
FULL_PATHS = [
'/Applications/ParaView-5.6.0-1626-g52acf2f741.app/Contents/Plugins/ParFlow.so',
]
# -----------------------------------------------------------------------------
# Load the plugins
# -----------------------------------------------------------------------------
for plugin in PLUGINS:
simple.LoadPlugin(os.path.join(MODULE_PATH, plugin))
for plugin in FULL_PATHS:
simple.LoadPlugin(plugin)
| 0 | 0 | 0 |
d27099910cc2d1f1fe63334226e8d2e78994fa89 | 368 | py | Python | pontos-turisticos/comentarios/models.py | LucasVarela42/PontosTuristicos | 96d8a20739dbd9f56ae26bda069ae1862b89e35d | [
"BSD-3-Clause"
] | null | null | null | pontos-turisticos/comentarios/models.py | LucasVarela42/PontosTuristicos | 96d8a20739dbd9f56ae26bda069ae1862b89e35d | [
"BSD-3-Clause"
] | null | null | null | pontos-turisticos/comentarios/models.py | LucasVarela42/PontosTuristicos | 96d8a20739dbd9f56ae26bda069ae1862b89e35d | [
"BSD-3-Clause"
] | null | null | null | from django.contrib.auth.models import User
from django.db import models
| 28.307692 | 63 | 0.75 | from django.contrib.auth.models import User
from django.db import models
class Comentario(models.Model):
usuario = models.ForeignKey(User, on_delete=models.CASCADE)
comentario = models.TextField()
data = models.DateTimeField(auto_now_add=True)
aprovado = models.BooleanField(default=True)
def __str__(self):
return self.usuario.username
| 34 | 237 | 23 |
fb51e76c32c581e7839960230756b8338e546393 | 418 | py | Python | InventorySystem/skin/migrations/0003_skinmodel_factory.py | guyueming/PythonWeb | e8a38fc26c06ec78e1de61d65055dcfc480ef8f1 | [
"MIT"
] | null | null | null | InventorySystem/skin/migrations/0003_skinmodel_factory.py | guyueming/PythonWeb | e8a38fc26c06ec78e1de61d65055dcfc480ef8f1 | [
"MIT"
] | null | null | null | InventorySystem/skin/migrations/0003_skinmodel_factory.py | guyueming/PythonWeb | e8a38fc26c06ec78e1de61d65055dcfc480ef8f1 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.3 on 2021-06-17 23:09
from django.db import migrations, models
| 22 | 81 | 0.605263 | # Generated by Django 3.2.3 on 2021-06-17 23:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('skin', '0002_alter_skinmodel_name'),
]
operations = [
migrations.AddField(
model_name='skinmodel',
name='factory',
field=models.TextField(default='', max_length=64, verbose_name='厂家'),
),
]
| 0 | 308 | 23 |
49918cf0e012844bbc237dbf805743d7bcf91358 | 2,054 | py | Python | libraries/stc-1.2.9/Selected_Track_Control/MIDI.py | lushfuture/Liveduino | c55a44a397996d77bbf14d4f883595668520690a | [
"MIT"
] | 2 | 2016-06-30T04:31:58.000Z | 2017-10-11T15:59:35.000Z | libraries/stc-1.2.9/Selected_Track_Control/MIDI.py | lushfuture/Liveduino | c55a44a397996d77bbf14d4f883595668520690a | [
"MIT"
] | null | null | null | libraries/stc-1.2.9/Selected_Track_Control/MIDI.py | lushfuture/Liveduino | c55a44a397996d77bbf14d4f883595668520690a | [
"MIT"
] | null | null | null | # this file stores some constants regarding MIDI-handling, etc.
# for settings which MIDI-notes trigger what functionality see settings.py
import Live
#from Live import MidiMap
#from Live.MidiMap import MapMode
DEFAULT_CHANNEL = 0
STATUS_MASK = 0xF0
CHAN_MASK = 0x0F
CC_STATUS = 0xb0
NOTEON_STATUS = 0x90
NOTEOFF_STATUS = 0x80
STATUS_ON = 0x7f
STATUS_OFF = 0x00
STATUS_OFF2 = 0x40
# possible CC modes (always 7bit); RELATIVE: <increment> / <decrement>
ABSOLUTE = Live.MidiMap.MapMode.absolute # 0 - 127
RELATIVE_BINARY_OFFSET = Live.MidiMap.MapMode.relative_binary_offset # 065 - 127 / 063 - 001
RELATIVE_SIGNED_BIT = Live.MidiMap.MapMode.relative_signed_bit # 001 - 064 / 065 - 127
RELATIVE_SIGNED_BIT2 = Live.MidiMap.MapMode.relative_signed_bit2 # 065 - 127 / 001 - 064
RELATIVE_TWO_COMPLIMENT = Live.MidiMap.MapMode.relative_two_compliment # 001 - 064 / 127 - 65
relative_to_signed_int = {
ABSOLUTE: lambda value: value,
RELATIVE_BINARY_OFFSET: relativebinary_offset_to_signed_int,
RELATIVE_SIGNED_BIT: relative_signed_bit_to_signed_int,
RELATIVE_SIGNED_BIT2: relative_signed_bit2_to_signed_int,
RELATIVE_TWO_COMPLIMENT: relative_two_complement_to_signed_int
}
| 30.205882 | 93 | 0.790166 | # this file stores some constants regarding MIDI-handling, etc.
# for settings which MIDI-notes trigger what functionality see settings.py
import Live
#from Live import MidiMap
#from Live.MidiMap import MapMode
DEFAULT_CHANNEL = 0
STATUS_MASK = 0xF0
CHAN_MASK = 0x0F
CC_STATUS = 0xb0
NOTEON_STATUS = 0x90
NOTEOFF_STATUS = 0x80
STATUS_ON = 0x7f
STATUS_OFF = 0x00
STATUS_OFF2 = 0x40
# possible CC modes (always 7bit); RELATIVE: <increment> / <decrement>
ABSOLUTE = Live.MidiMap.MapMode.absolute # 0 - 127
RELATIVE_BINARY_OFFSET = Live.MidiMap.MapMode.relative_binary_offset # 065 - 127 / 063 - 001
RELATIVE_SIGNED_BIT = Live.MidiMap.MapMode.relative_signed_bit # 001 - 064 / 065 - 127
RELATIVE_SIGNED_BIT2 = Live.MidiMap.MapMode.relative_signed_bit2 # 065 - 127 / 001 - 064
RELATIVE_TWO_COMPLIMENT = Live.MidiMap.MapMode.relative_two_compliment # 001 - 064 / 127 - 65
def relativebinary_offset_to_signed_int(value):
return value-64
def relative_signed_bit_to_signed_int(value):
if value > 64:
return -value+64
return value
def relative_signed_bit2_to_signed_int(value):
if value > 64:
return value-64
return -value
def relative_two_complement_to_signed_int(value):
if value > 64:
return value-128
return value
relative_to_signed_int = {
ABSOLUTE: lambda value: value,
RELATIVE_BINARY_OFFSET: relativebinary_offset_to_signed_int,
RELATIVE_SIGNED_BIT: relative_signed_bit_to_signed_int,
RELATIVE_SIGNED_BIT2: relative_signed_bit2_to_signed_int,
RELATIVE_TWO_COMPLIMENT: relative_two_complement_to_signed_int
}
class MIDICommand:
def __init__(self, key, mode = ABSOLUTE, status = NOTEON_STATUS, channel = DEFAULT_CHANNEL):
self.key = key
self.mode = mode
self.status = status
self.channel = channel
class Note (MIDICommand):
def __init__(self, note, channel = DEFAULT_CHANNEL):
MIDICommand.__init__(self, note, ABSOLUTE, NOTEON_STATUS, channel)
class CC (MIDICommand):
def __init__(self, cc, mode = RELATIVE_TWO_COMPLIMENT, channel = DEFAULT_CHANNEL):
MIDICommand.__init__(self, cc, mode, CC_STATUS, channel) | 642 | 3 | 225 |
1c411633507834ebf441912e41c4aa9c0df844f1 | 311 | py | Python | ocpmodels/datasets/embeddings/__init__.py | Irlirion/ocp | 6fb3e794eef31559db990300198eca20f41d8f37 | [
"MIT",
"BSD-3-Clause"
] | 242 | 2020-10-14T11:10:43.000Z | 2022-03-29T07:50:18.000Z | ocpmodels/datasets/embeddings/__init__.py | Irlirion/ocp | 6fb3e794eef31559db990300198eca20f41d8f37 | [
"MIT",
"BSD-3-Clause"
] | 100 | 2020-10-13T23:27:04.000Z | 2022-03-23T16:50:26.000Z | ocpmodels/datasets/embeddings/__init__.py | Irlirion/ocp | 6fb3e794eef31559db990300198eca20f41d8f37 | [
"MIT",
"BSD-3-Clause"
] | 86 | 2020-10-15T05:56:28.000Z | 2022-03-16T16:11:45.000Z | __all__ = [
"ATOMIC_RADII",
"KHOT_EMBEDDINGS",
"CONTINUOUS_EMBEDDINGS",
"QMOF_KHOT_EMBEDDINGS",
]
from .atomic_radii import ATOMIC_RADII
from .continuous_embeddings import CONTINUOUS_EMBEDDINGS
from .khot_embeddings import KHOT_EMBEDDINGS
from .qmof_khot_embeddings import QMOF_KHOT_EMBEDDINGS
| 25.916667 | 56 | 0.807074 | __all__ = [
"ATOMIC_RADII",
"KHOT_EMBEDDINGS",
"CONTINUOUS_EMBEDDINGS",
"QMOF_KHOT_EMBEDDINGS",
]
from .atomic_radii import ATOMIC_RADII
from .continuous_embeddings import CONTINUOUS_EMBEDDINGS
from .khot_embeddings import KHOT_EMBEDDINGS
from .qmof_khot_embeddings import QMOF_KHOT_EMBEDDINGS
| 0 | 0 | 0 |
173aa722f3fceaee5162a6ddc0eb2356e22c0545 | 127 | py | Python | nomnom/forms.py | storyandstructure/django-nomnom | 2fdb3712eae04f6811e66aa5848c3d7a3d6aed6f | [
"MIT"
] | null | null | null | nomnom/forms.py | storyandstructure/django-nomnom | 2fdb3712eae04f6811e66aa5848c3d7a3d6aed6f | [
"MIT"
] | 3 | 2020-02-11T23:31:18.000Z | 2021-06-10T18:54:31.000Z | nomnom/forms.py | storyandstructure/django-nomnom | 2fdb3712eae04f6811e66aa5848c3d7a3d6aed6f | [
"MIT"
] | null | null | null | from django import forms
| 15.875 | 33 | 0.661417 | from django import forms
class ImportFileForm(forms.Form):
file = forms.FileField(
label='File to import',
)
| 0 | 78 | 23 |
fc7941c072ec0bec271e41453e35eb6a8d9ed6da | 4,047 | py | Python | tap/tap_object.py | obytes/tap-python | bc119cb785dc353f0c67241a64d9fcb30f21fbf7 | [
"MIT"
] | 3 | 2019-05-28T16:19:57.000Z | 2022-03-05T13:16:43.000Z | tap/tap_object.py | obytes/tap-python | bc119cb785dc353f0c67241a64d9fcb30f21fbf7 | [
"MIT"
] | 2 | 2019-07-02T21:45:09.000Z | 2019-07-03T13:55:54.000Z | tap/tap_object.py | obytes/tap-python | bc119cb785dc353f0c67241a64d9fcb30f21fbf7 | [
"MIT"
] | null | null | null | from builtins import hex
from builtins import str
from . import six
from . import api_requestor
from . import util
import json
| 30.659091 | 96 | 0.562145 | from builtins import hex
from builtins import str
from . import six
from . import api_requestor
from . import util
import json
class TapObject(dict):
def __init__(self, id=None, api_key=None, tap_version=None,
tap_account=None, **params):
object.__setattr__(self, 'api_key', api_key)
object.__setattr__(self, 'tap_version', tap_version)
object.__setattr__(self, 'tap_account', tap_account)
object.__setattr__(self, 'params', params)
if id:
self['id'] = id
super(TapObject, self).__init__()
def __setattr__(self, k, v):
if k[0] == '_' or k in self.__dict__:
return super(TapObject, self).__setattr__(k, v)
self[k] = v
return None
def __getattr__(self, k):
if k[0] == '_':
raise AttributeError(k)
try:
return self[k]
except KeyError as err:
raise AttributeError(*err.args)
def __delattr__(self, k):
if k[0] == '_' or k in self.__dict__:
return super(TapObject, self).__delattr__(k)
else:
del self[k]
def __setitem__(self, k, v):
if v == "":
raise ValueError(
"You cannot set %s to an empty string. "
"We interpret empty strings as None in requests."
"You may set %s.%s = None to delete the property" % (
k, str(self), k))
super(TapObject, self).__setitem__(k, v)
def __getitem__(self, k):
try:
return super(TapObject, self).__getitem__(k)
except KeyError as err:
raise err
def __repr__(self):
ident_parts = [type(self).__name__]
if isinstance(self.get('object'), six.string_types):
ident_parts.append(self.get('object'))
if isinstance(self.get('id'), six.string_types):
ident_parts.append('id=%s' % (self.get('id'),))
unicode_repr = '<%s at %s> JSON: %s' % (
' '.join(ident_parts), hex(id(self)), str(self))
if six.PY2:
return unicode_repr.encode('utf-8')
else:
return unicode_repr
def __str__(self):
return json.dumps(self.to_dict_recursive(), sort_keys=True,
indent=2)
def to_dict(self):
return dict(self)
def to_dict_recursive(self):
d = dict(self)
for k, v in six.iteritems(d):
if isinstance(v, TapObject):
d[k] = v.to_dict_recursive()
return d
def request(self, method, url, params=None, headers=None):
requestor = api_requestor.APIRequestor(
key=self.api_key, api_version=self.tap_version, account=self.tap_account)
response, api_key = requestor.request(method, url, params, headers)
return util.convert_to_tap_object(response, api_key,
self.tap_version,
self.tap_account)
@classmethod
def construct_from(cls, values, api_key=None, tap_version=None,
tap_account=None):
instance = cls(values.get('id'), api_key=api_key,
tap_version=tap_version,
tap_account=tap_account)
instance.refresh_from(values, api_key=api_key,
tap_version=tap_version,
tap_account=tap_account)
return instance
def refresh_from(self, values, api_key=None, tap_version=None,
tap_account=None):
self.api_key = \
api_key or getattr(values, 'api_key', None)
self.tap_version = \
tap_version or getattr(values, 'tap_version', None)
self.tap_account = \
tap_account or getattr(values, 'tap_account', None)
for k, v in six.iteritems(values):
super(TapObject, self).\
__setitem__(k, util.convert_to_tap_object(v, api_key, tap_version, tap_account))
| 3,526 | 369 | 23 |
08ebeb940dc3243e6c6b8623fbee91b4d294e8e1 | 1,818 | py | Python | lines/libs/facades.py | betfund/betfund-lines | 9fe3e2aa69bb493df3efc2a3923889f444f2c66d | [
"MIT"
] | null | null | null | lines/libs/facades.py | betfund/betfund-lines | 9fe3e2aa69bb493df3efc2a3923889f444f2c66d | [
"MIT"
] | 1 | 2020-04-01T00:35:21.000Z | 2020-04-01T00:35:21.000Z | lines/libs/facades.py | betfund/betfund-lines | 9fe3e2aa69bb493df3efc2a3923889f444f2c66d | [
"MIT"
] | null | null | null | """Rundown API Response Facades."""
from typing import Union
class LinesResponseFacade(dict):
"""TheRundown `GET events` response accessor."""
@property
def meta(self) -> dict:
"""Fetch `meta` key from LineResponse."""
return self.get("meta")
@property
def events(self) -> Union[list, None]:
"""Fetch `events` key from LineResponse."""
raw_events = self.get("events")
if not raw_events:
return None
events = []
for event in raw_events:
events.append(LinesEventFacade(event))
return events
class LinesEventFacade(dict):
"""TheRundown `GET events.line` response accessor."""
@property
def event_id(self) -> str:
"""Fetch `event_id` from LineEvent."""
return self.get("event_id")
@property
def sport_id(self) -> int:
"""Fetch `sport_id` from LineEvent."""
return self.get("sport_id")
@property
def event_date(self) -> str:
"""Fetch `event_date` from LineEvent."""
return self.get("event_date")
@property
def score(self) -> dict:
"""Fetch `score` from LineEvent."""
return self.get("score")
@property
def teams_normalized(self) -> list:
"""Fetch `teams_normalized` from LineEvent."""
return self.get("teams_normalized")
@property
def schedule(self) -> dict:
"""Fetch `schedule` from LineEvent."""
return self.get("schedule")
@property
def line_periods(self) -> dict:
"""Fetch `line_periods` from LineEvent."""
return self.get("line_periods")
| 25.971429 | 57 | 0.60341 | """Rundown API Response Facades."""
from typing import Union
class LinesResponseFacade(dict):
"""TheRundown `GET events` response accessor."""
def __init__(self, data: dict):
super(LinesResponseFacade, self).__init__(data)
@property
def meta(self) -> dict:
"""Fetch `meta` key from LineResponse."""
return self.get("meta")
@property
def events(self) -> Union[list, None]:
"""Fetch `events` key from LineResponse."""
raw_events = self.get("events")
if not raw_events:
return None
events = []
for event in raw_events:
events.append(LinesEventFacade(event))
return events
class LinesEventFacade(dict):
"""TheRundown `GET events.line` response accessor."""
def __init__(self, event: dict):
super(LinesEventFacade, self).__init__(event)
@property
def event_id(self) -> str:
"""Fetch `event_id` from LineEvent."""
return self.get("event_id")
@property
def sport_id(self) -> int:
"""Fetch `sport_id` from LineEvent."""
return self.get("sport_id")
@property
def event_date(self) -> str:
"""Fetch `event_date` from LineEvent."""
return self.get("event_date")
@property
def score(self) -> dict:
"""Fetch `score` from LineEvent."""
return self.get("score")
@property
def teams_normalized(self) -> list:
"""Fetch `teams_normalized` from LineEvent."""
return self.get("teams_normalized")
@property
def schedule(self) -> dict:
"""Fetch `schedule` from LineEvent."""
return self.get("schedule")
@property
def line_periods(self) -> dict:
"""Fetch `line_periods` from LineEvent."""
return self.get("line_periods")
| 131 | 0 | 54 |
d2e3854d927ea8e3cbb2c9bb0e4b4dc7ed8d6347 | 4,085 | py | Python | beam_nuggets/io/test/test_base.py | shashanksen/beam-nuggets | accc738801d89329ff55e1716cd42b068e1f5237 | [
"MIT"
] | 60 | 2019-02-11T11:22:26.000Z | 2022-03-14T20:30:44.000Z | beam_nuggets/io/test/test_base.py | shashanksen/beam-nuggets | accc738801d89329ff55e1716cd42b068e1f5237 | [
"MIT"
] | 28 | 2019-01-18T21:43:51.000Z | 2021-09-14T08:22:36.000Z | beam_nuggets/io/test/test_base.py | shashanksen/beam-nuggets | accc738801d89329ff55e1716cd42b068e1f5237 | [
"MIT"
] | 29 | 2019-01-16T15:41:13.000Z | 2022-03-16T11:08:12.000Z | from __future__ import division, print_function
import unittest
import testing.mysqld
import testing.postgresql
from beam_nuggets.io.relational_db import SourceConfiguration
from .database import TestDatabase
| 31.183206 | 94 | 0.589474 | from __future__ import division, print_function
import unittest
import testing.mysqld
import testing.postgresql
from beam_nuggets.io.relational_db import SourceConfiguration
from .database import TestDatabase
class TransformBaseTest(unittest.TestCase):
postgres_instance = None
mysql_instance = None
postgres_source_config = None
mysql_source_config = None
source_config = None # default for all tests
@classmethod
def setUpClass(cls):
cls.postgres_source_config = cls.get_postgres_source_config()
cls.mysql_source_config = cls.get_mysql_source_config()
cls.source_config = (
cls.postgres_source_config or
cls.mysql_source_config or
cls.get_sqlite_source_config()
)
print(
'\nrunning {} tests against temp db instance: {}'
''.format(cls.__name__, cls.source_config.url)
)
@classmethod
def get_sqlite_source_config(cls):
return SourceConfiguration(
drivername='sqlite',
database='/tmp/delete_me_beam_nuggets_unittest.sqlite',
create_if_missing=True,
)
@classmethod
def get_postgres_source_config(cls):
cls.postgres_instance = cls.connect_to_postgresql()
if cls.postgres_instance:
return SourceConfiguration(
drivername='postgresql+pg8000',
host='localhost',
port=cls.postgres_instance.settings['port'],
username='postgres',
database='beam_nuggets_test_db',
create_if_missing=True,
)
@classmethod
def connect_to_postgresql(cls):
postgresql = None
try:
postgresql = testing.postgresql.Postgresql()
except Exception as e:
print(
'\n\nFailed to connect to local postgres instance. Error: {}'
'\nCheck postgresql installed locally to run tests against it '
'.\n{}\n{}'.format(
e,
'https://www.postgresql.org/download/linux/ubuntu/',
'https://github.com/tk0miya/testing.postgresql'
)
)
return postgresql
@classmethod
def get_mysql_source_config(cls):
cls.mysql_instance = cls.connect_to_mysql()
if cls.mysql_instance:
return SourceConfiguration(
drivername='mysql+pymysql',
host='localhost',
port=cls.mysql_instance.my_cnf['port'],
username='root',
database='beam_nuggets_test_db',
create_if_missing=True,
)
@classmethod
def connect_to_mysql(cls):
mysql = None
try:
# mysql = testing.mysqld.Mysqld(my_cnf={'skip-networking': None})
mysql = testing.mysqld.Mysqld()
except Exception as e:
print(
'\n\nFailed to connect to local mysql instance. Error: {}'
'\nCheck mysql installed locally to run tests against it '
'.\n{}\n{}'.format(
e,
'https://support.rackspace.com/how-to/installing-mysql-server-on-ubuntu/',
'https://github.com/tk0miya/testing.mysqld'
)
)
return mysql
@classmethod
def tearDownClass(cls):
if cls.postgres_instance:
cls.postgres_instance.stop()
if cls.mysql_instance:
cls.mysql_instance.stop()
@staticmethod
def configure_db(source_config):
db = TestDatabase(source_config)
db.init_db()
return db
@staticmethod
def destroy_db(db):
# need this since some TC:s will use; I don't want to call tearDown
# from within TC:s
db.destroy_db() # will, as well, delete created sqllite db file
def setUp(self):
self.db = self.configure_db(self.source_config)
def tearDown(self):
# clean DB resource
self.destroy_db(self.db)
| 3,203 | 646 | 23 |
ce85073f7b2fe24ba568646bfd956ffe91d1ac82 | 2,956 | py | Python | cm_modules/inference.py | SergejVolkov/SR_base | 285b40c0bbe9dc46f2bd660dc80ff255b4dc65a0 | [
"MIT"
] | 2 | 2021-06-10T16:29:30.000Z | 2021-06-14T16:59:29.000Z | cm_modules/inference.py | SergejVolkov/SR_base | 285b40c0bbe9dc46f2bd660dc80ff255b4dc65a0 | [
"MIT"
] | null | null | null | cm_modules/inference.py | SergejVolkov/SR_base | 285b40c0bbe9dc46f2bd660dc80ff255b4dc65a0 | [
"MIT"
] | null | null | null | import cv2
import sys
import pyprind
import torch
import numpy as np
import dl_modules.dataset as ds
import dl_modules.transforms as trf
import cm_modules.utils as utils
import skvideo.io as vio
from cm_modules.enhance import correct_colors
from cm_modules.utils import convert_to_cv_8bit
| 32.483516 | 98 | 0.530108 | import cv2
import sys
import pyprind
import torch
import numpy as np
import dl_modules.dataset as ds
import dl_modules.transforms as trf
import cm_modules.utils as utils
import skvideo.io as vio
from cm_modules.enhance import correct_colors
from cm_modules.utils import convert_to_cv_8bit
def inference(name: str, net: torch.nn.Module, device: torch.device,
length: float=0, start: float=0, batch: int=1,
cut: bool=False, normalize: bool=False, crf: int=17, tune: str=None) -> None:
net.eval()
norm = ds.get_normalization()
trn = trf.get_predict_transform(*ds.predict_res)
cap = cv2.VideoCapture(ds.SAVE_DIR + 'data/video/' + name + '.mp4')
fps = cap.get(cv2.CAP_PROP_FPS)
cap.set(cv2.CAP_PROP_POS_FRAMES, int(round(start * fps)))
w, h = ds.predict_res
w *= ds.scale
h *= ds.scale
if normalize:
path = ds.SAVE_DIR + 'data/output/' + name + '_sr_n.mp4'
else:
path = ds.SAVE_DIR + 'data/output/' + name + '_sr.mp4'
outputdict = {
'-vcodec': 'libx264',
'-crf': '%d' % crf,
'-preset': 'veryslow',
'-r' : '%g' % fps
}
if tune is not None:
outputdict['-tune'] = tune
out = vio.FFmpegWriter(
path, inputdict={
'-r': '%g' % fps,
},
outputdict=outputdict
)
i = 0
if length != 0:
total = int(round(length * fps))
else:
total = cap.get(cv2.CAP_PROP_FRAME_COUNT)
iter_bar = pyprind.ProgBar(total, title='Inference ' + name, stream=sys.stdout)
frame_list = []
end = False
with torch.no_grad():
while True:
ret, frame = cap.read()
if not ret or (length != 0 and i >= length * fps):
if not end:
end = True
else:
break
else:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = norm(trn(image=frame)["image"]).to(device)
frame_list.append(frame)
if len(frame_list) == batch or (end and len(frame_list) > 0):
frames = torch.stack(frame_list)
if cut:
pieces = utils.cut_image(frames)
out_pieces = []
for piece in pieces:
out_pieces.append(net(piece))
output = utils.glue_image(out_pieces)
else:
output = net(frames)
for j in range(len(frame_list)):
if normalize:
out_frame = correct_colors(output[j, :, :, :], frames[j, :, :, :])
else:
out_frame = output[j, :, :, :]
out.writeFrame(cv2.cvtColor(convert_to_cv_8bit(out_frame), cv2.COLOR_RGB2BGR))
frame_list.clear()
i += 1
iter_bar.update()
cap.release()
out.close()
| 2,643 | 0 | 23 |
b77c243193987ff5dbaadc790c384c21c361a077 | 2,375 | py | Python | agileutil/http/transport.py | lycclsltt/agileutil | 306e727a50db0173e4008643a227ecce48873faf | [
"MIT"
] | 22 | 2017-03-08T01:48:58.000Z | 2022-03-09T08:42:02.000Z | agileutil/http/transport.py | lycclsltt/agileutil | 306e727a50db0173e4008643a227ecce48873faf | [
"MIT"
] | 2 | 2021-05-07T01:37:05.000Z | 2021-11-10T05:21:01.000Z | agileutil/http/transport.py | lycclsltt/agileutil | 306e727a50db0173e4008643a227ecce48873faf | [
"MIT"
] | 3 | 2017-02-23T09:32:53.000Z | 2021-04-01T03:04:00.000Z | from socket import * | 28.963415 | 84 | 0.552421 | from socket import *
class TcpTransport(object):
__slots__ = ('host', 'port', 'timeout', 'socket', 'keepaliveTimeout', 'backlog')
def __init__(self, host, port, timeout = 30, keepaliveTimeout=30):
self.host = host
self.port = port
self.timeout = timeout
self.socket = None
self.keepaliveTimeout = keepaliveTimeout
self.backlog = 100
def bind(self):
self.socket = socket(AF_INET, SOCK_STREAM)
self.socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
self.socket.bind( (self.host, self.port) )
self.socket.listen(self.backlog)
def setKeepaliveTimeout(self, keepaliveTimeout: int):
self.keepaliveTimeout = keepaliveTimeout
def connect(self):
if self.socket == None:
self.socket = socket(AF_INET, SOCK_STREAM)
self.socket.settimeout(self.timeout)
self.socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
self.socket.connect( (self.host, self.port) )
def accept(self):
cli, addr = self.socket.accept()
cli.settimeout(self.keepaliveTimeout)
return cli, addr
def close(self):
if self.socket:
self.socket.close()
self.socket = None
def reconnect(self):
self.close()
self.connect()
print('tcp client reconnect')
def recvUntil(self, conn, until):
retbytearr = b''
index = 0 - len(until)
while True:
rbytes = conn.recv(1)
if rbytes == b'':
break
retbytearr += rbytes
if retbytearr[index:] == until:
break
return retbytearr
def recvFullHeader(self, conn):
fullbytes = b''
while True:
rbytes = self.recvUntil(conn, b'\r\n')
fullbytes += rbytes
if rbytes == b'\r\n' or rbytes == b'':
break
return fullbytes
def recvn(self, conn, n):
toRecv = n
hasRecv = 0
bytearr = b''
while True:
if hasRecv >= toRecv:
break
bytearr += conn.recv(1)
hasRecv += 1
return bytearr
def sendAll(self, conn, bytearr):
toSend = len(bytearr)
while toSend > 0:
toSend -= conn.send(bytearr)
bytearr = bytearr[0-toSend:] | 1,942 | 390 | 23 |
fe158762d0c9dd7c3939c2f5f7aeda784d341ed6 | 1,563 | py | Python | tests/viewsets/pandas.py | intellineers/django-bridger | ed097984a99df7da40a4d01bd00c56e3c6083056 | [
"BSD-3-Clause"
] | 2 | 2020-03-17T00:53:23.000Z | 2020-07-16T07:00:33.000Z | tests/viewsets/pandas.py | intellineers/django-bridger | ed097984a99df7da40a4d01bd00c56e3c6083056 | [
"BSD-3-Clause"
] | 76 | 2019-12-05T01:15:57.000Z | 2021-09-07T16:47:27.000Z | tests/viewsets/pandas.py | intellineers/django-bridger | ed097984a99df7da40a4d01bd00c56e3c6083056 | [
"BSD-3-Clause"
] | 1 | 2020-02-05T15:09:47.000Z | 2020-02-05T15:09:47.000Z | from django.db.models import F
from bridger import display as dp
from bridger.pandas import fields as pf
from bridger.pandas.views import PandasAPIView
from bridger.serializers import decorator
from tests.filters import PandasFilterSet
from tests.models import ModelTest
from .display import PandasDisplayConfig
| 31.26 | 100 | 0.648752 | from django.db.models import F
from bridger import display as dp
from bridger.pandas import fields as pf
from bridger.pandas.views import PandasAPIView
from bridger.serializers import decorator
from tests.filters import PandasFilterSet
from tests.models import ModelTest
from .display import PandasDisplayConfig
class MyPandasView(PandasAPIView):
search_fields = ["char_field"]
filterset_class = PandasFilterSet
# INSTANCE_ENDPOINT = "modeltest-list"
# LIST_ENDPOINT = "pandas_view"
# LIST_TITLE = "Pandas List"
display_config_class = PandasDisplayConfig
def get_filterset_class(self, request):
return PandasFilterSet
pandas_fields = pf.PandasFields(
fields=[
pf.PKField(key="id", label="ID"),
pf.CharField(key="char_field", label="Char"),
pf.FloatField(
key="integer_field",
label="Integer",
precision=2,
percent=True,
decorators=[decorator(position="left", value="@")],
),
pf.FloatField(key="integer_annotated", label="Integer Annotated", precision=2,),
]
)
queryset = ModelTest.objects.all()
ordering_fields = ["integer_field", "integer_annotated"]
def get_queryset(self):
qs = super().get_queryset()
qs = qs.annotate(integer_annotated=F("integer_field") - 1)
return qs
def get_aggregates(self, request, df):
return {"integer_field": {"Σ": df["integer_field"].sum(), "μ": df["integer_field"].mean(),}}
| 292 | 937 | 23 |
93db70d21a4cc525bbe64c89e40a25836016478f | 35 | py | Python | melior_transformers/version.py | MeliorAI/meliorTransformers | b2936e1aac23e63e0b737d03975124c31a960812 | [
"Apache-2.0"
] | 1 | 2020-08-06T10:48:49.000Z | 2020-08-06T10:48:49.000Z | melior_transformers/version.py | MeliorAI/meliorTransformers | b2936e1aac23e63e0b737d03975124c31a960812 | [
"Apache-2.0"
] | 2 | 2020-02-13T12:45:57.000Z | 2020-04-14T11:30:33.000Z | melior_transformers/version.py | MeliorAI/meliorTransformers | b2936e1aac23e63e0b737d03975124c31a960812 | [
"Apache-2.0"
] | 2 | 2020-07-21T12:43:51.000Z | 2021-08-13T15:21:22.000Z | __version__ = "0.18.2+melior1.1.6"
| 17.5 | 34 | 0.685714 | __version__ = "0.18.2+melior1.1.6"
| 0 | 0 | 0 |
e98da541cdab2e7468b7676608a99d1391d48b0c | 337 | py | Python | src/com/python/io/StringIO_Demo.py | Leeo1124/pythonDemo | 72e2209c095301a3f1f61edfe03ea69c3c05be40 | [
"Apache-2.0"
] | null | null | null | src/com/python/io/StringIO_Demo.py | Leeo1124/pythonDemo | 72e2209c095301a3f1f61edfe03ea69c3c05be40 | [
"Apache-2.0"
] | null | null | null | src/com/python/io/StringIO_Demo.py | Leeo1124/pythonDemo | 72e2209c095301a3f1f61edfe03ea69c3c05be40 | [
"Apache-2.0"
] | null | null | null | '''
Created on 2016年8月4日
@author: Administrator
'''
from io import StringIO
# write to StringIO:
f = StringIO()
f.write('hello')
f.write(' ')
f.write('world!')
print(f.getvalue())
# read from StringIO:
f = StringIO('水面细风生,\n菱歌慢慢声。\n客亭临小市,\n灯火夜妆明。')
while True:
s = f.readline()
if s == '':
break
print(s.strip())
| 14.652174 | 46 | 0.620178 | '''
Created on 2016年8月4日
@author: Administrator
'''
from io import StringIO
# write to StringIO:
f = StringIO()
f.write('hello')
f.write(' ')
f.write('world!')
print(f.getvalue())
# read from StringIO:
f = StringIO('水面细风生,\n菱歌慢慢声。\n客亭临小市,\n灯火夜妆明。')
while True:
s = f.readline()
if s == '':
break
print(s.strip())
| 0 | 0 | 0 |
a945437d7d6aae26b12be28155368cc0254fe281 | 4,024 | py | Python | tests/unit/test_remote_runner.py | pyl1b/enhterm | b4eacc25ef1bdfecab9a662b5269d016070d4e6b | [
"MIT"
] | null | null | null | tests/unit/test_remote_runner.py | pyl1b/enhterm | b4eacc25ef1bdfecab9a662b5269d016070d4e6b | [
"MIT"
] | null | null | null | tests/unit/test_remote_runner.py | pyl1b/enhterm | b4eacc25ef1bdfecab9a662b5269d016070d4e6b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
"""
from __future__ import unicode_literals
from __future__ import print_function
import logging
from unittest import TestCase
from unittest.mock import MagicMock
from mock import call
from p2p0mq.app.local_peer import LocalPeer
from p2p0mq.peer import Peer
from enhterm import EnhTerm
from enhterm.command import Command
from enhterm.command.text import TextCommand
from enhterm.impl.p2p.p2p_concern import RemoteConcern
from enhterm.impl.p2p.p2p_runner import RemoteRunner
from enhterm.message import Message
logger = logging.getLogger('')
| 35.298246 | 77 | 0.686382 | # -*- coding: utf-8 -*-
"""
"""
from __future__ import unicode_literals
from __future__ import print_function
import logging
from unittest import TestCase
from unittest.mock import MagicMock
from mock import call
from p2p0mq.app.local_peer import LocalPeer
from p2p0mq.peer import Peer
from enhterm import EnhTerm
from enhterm.command import Command
from enhterm.command.text import TextCommand
from enhterm.impl.p2p.p2p_concern import RemoteConcern
from enhterm.impl.p2p.p2p_runner import RemoteRunner
from enhterm.message import Message
logger = logging.getLogger('')
class TestRemoteRunner(TestCase):
def setUp(self):
self.app = MagicMock(spec=LocalPeer)
self.app.sender = MagicMock()
self.app.sender.medium_queue = MagicMock()
self.term = MagicMock(spec=EnhTerm)
self.concern = MagicMock(spec=RemoteConcern)
rc = RemoteConcern(MagicMock(), MagicMock())
self.app.concerns = {rc.command_id: self.concern}
self.peer = MagicMock(spec=Peer)
self.peer.uuid = '123456'
self.testee = RemoteRunner(
zmq_app=self.app, timeout=5, peer=self.peer,
term=self.term
)
def tearDown(self):
self.testee = None
def test_init(self):
testee = RemoteRunner(zmq_app=self.app)
self.assertIsNone(testee.peer)
self.assertIsNone(testee.term)
self.assertEqual(testee.timeout, 4)
testee = RemoteRunner(zmq_app=self.app, peer=self.peer)
self.assertEqual(testee.peer, self.peer)
self.app.peers = {'123456': self.peer}
testee = RemoteRunner(zmq_app=self.app, peer='123456')
self.assertEqual(testee.peer, self.peer)
def test_str(self):
self.assertIn('RemoteRunner', '%s' % self.testee)
self.assertIn('RemoteRunner', '%r' % self.testee)
def test_concern(self):
self.assertEquals(self.testee.concern, self.concern)
def test_timed_out(self):
with self.assertRaises(TimeoutError):
self.testee.timed_out()
def test_call(self):
command = MagicMock(spec=Command)
self.testee(command)
command.execute.assert_called_once()
command = MagicMock(spec=TextCommand)
command.result = 998
message = MagicMock(spec=Message)
reply = command, None
self.testee.concern.get_reply = MagicMock(return_value=reply)
self.concern.compose = MagicMock(return_value=message)
self.assertEqual(self.testee(command), 998)
command.execute.assert_not_called()
self.app.sender.medium_queue.enqueue.assert_called_once_with(message)
def test_call_messages(self):
self.testee.timed_out = MagicMock()
command = MagicMock(spec=TextCommand)
command.result = 998
message = MagicMock(spec=Message)
messages = [1, 2]
reply = command, messages
self.testee.concern.get_reply = MagicMock(return_value=reply)
self.concern.compose = MagicMock(return_value=message)
self.assertEqual(self.testee(command), 998)
command.execute.assert_not_called()
self.app.sender.medium_queue.enqueue.assert_called_once_with(message)
self.testee.term.issue_message.assert_has_calls([call(1), call(2)])
self.testee.timed_out.assert_not_called()
def test_call_timeout(self):
self.testee.timeout = 0.5
self.testee.timed_out = MagicMock()
command = MagicMock(spec=TextCommand)
command.result = 998
message = MagicMock(spec=Message)
# The reply never comes so it will timeout
self.testee.concern.get_reply = MagicMock(return_value=None)
self.concern.compose = MagicMock(return_value=message)
self.assertIsNone(self.testee(command))
command.execute.assert_not_called()
self.app.sender.medium_queue.enqueue.assert_called_once_with(message)
self.testee.term.issue_message.assert_not_called()
self.testee.timed_out.assert_called_once()
| 3,171 | 12 | 265 |
8968aa977cf258c4f59799a1b589d142566eb0f7 | 1,350 | py | Python | portal/urls.py | SlapBass/nx-portal | ee262079db1e5230a24ebbc205e44926f11f8da9 | [
"Apache-2.0"
] | 5 | 2019-10-04T04:46:44.000Z | 2019-10-09T10:02:01.000Z | portal/urls.py | SlapBass/nx-portal | ee262079db1e5230a24ebbc205e44926f11f8da9 | [
"Apache-2.0"
] | 9 | 2019-10-06T07:15:09.000Z | 2020-09-24T02:19:40.000Z | portal/urls.py | SlapBass/nx-portal | ee262079db1e5230a24ebbc205e44926f11f8da9 | [
"Apache-2.0"
] | 1 | 2020-06-19T13:26:08.000Z | 2020-06-19T13:26:08.000Z | """portal URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from portal.views import (
HomeView,
)
urlpatterns = [
path('', include('webprovider.urls')),
path('', HomeView.as_view()),
path('account/', include('account.urls')),
path('blog/', include('blog.urls')),
path('upload/', include('upload.urls')),
path('sys_console/', include('setup.urls')),
# Provided application url
path('admin/', admin.site.urls),
path('markdownx/', include('markdownx.urls')),
path('', include('first_setup.urls')),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
path('__debug__/', include(debug_toolbar.urls)),
]
| 30 | 77 | 0.675556 | """portal URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from portal.views import (
HomeView,
)
urlpatterns = [
path('', include('webprovider.urls')),
path('', HomeView.as_view()),
path('account/', include('account.urls')),
path('blog/', include('blog.urls')),
path('upload/', include('upload.urls')),
path('sys_console/', include('setup.urls')),
# Provided application url
path('admin/', admin.site.urls),
path('markdownx/', include('markdownx.urls')),
path('', include('first_setup.urls')),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
path('__debug__/', include(debug_toolbar.urls)),
]
| 0 | 0 | 0 |
46a440c9cda585a39f13d22ac47a7b77f98ad2a4 | 4,054 | py | Python | python/fedml/core/mlops/mlops_configs.py | ray-ruisun/FedML | 24ff30d636bb70f64e94e9ca205375033597d3dd | [
"Apache-2.0"
] | null | null | null | python/fedml/core/mlops/mlops_configs.py | ray-ruisun/FedML | 24ff30d636bb70f64e94e9ca205375033597d3dd | [
"Apache-2.0"
] | null | null | null | python/fedml/core/mlops/mlops_configs.py | ray-ruisun/FedML | 24ff30d636bb70f64e94e9ca205375033597d3dd | [
"Apache-2.0"
] | null | null | null | import logging
import os
import requests
if __name__ == "__main__":
pass
| 37.88785 | 102 | 0.593488 | import logging
import os
import requests
class Singleton(object):
def __new__(cls):
if not hasattr(cls, "_instance"):
orig = super(Singleton, cls)
cls._instance = orig.__new__(cls)
return cls._instance
class MLOpsConfigs(Singleton):
_config_instance = None
def __init__(self):
pass
@staticmethod
def get_instance(args):
if MLOpsConfigs._config_instance is None:
MLOpsConfigs._config_instance = MLOpsConfigs()
MLOpsConfigs._config_instance.args = args
return MLOpsConfigs._config_instance
def get_request_params(self):
url = "https://open.fedml.ai/fedmlOpsServer/configs/fetch"
config_version = "release"
if (
hasattr(self.args, "config_version")
and self.args.config_version is not None
):
# Setup config url based on selected version.
config_version = self.args.config_version
if self.args.config_version == "release":
url = "https://open.fedml.ai/fedmlOpsServer/configs/fetch"
elif self.args.config_version == "test":
url = "https://open-test.fedml.ai/fedmlOpsServer/configs/fetch"
elif self.args.config_version == "dev":
url = "https://open-dev.fedml.ai/fedmlOpsServer/configs/fetch"
elif self.args.config_version == "local":
if hasattr(self.args, "local_server") and self.args.local_server is not None:
url = "http://{}:9000/fedmlOpsServer/configs/fetch".format(self.args.local_server)
else:
url = "http://localhost:9000/fedmlOpsServer/configs/fetch"
cert_path = None
if str(url).startswith("https://"):
cur_source_dir = os.path.dirname(__file__)
cert_path = os.path.join(
cur_source_dir, "ssl", "open-" + config_version + ".fedml.ai_bundle.crt"
)
return url, cert_path
def fetch_configs(self):
url, cert_path = self.get_request_params()
logging.info(url)
json_params = {"config_name": ["mqtt_config", "s3_config"]}
if cert_path is not None:
requests.session().verify = cert_path
response = requests.post(
url, json=json_params, verify=True, headers={"Connection": "close"}
)
else:
response = requests.post(
url, json=json_params, headers={"Connection": "close"}
)
status_code = response.json().get("code")
if status_code == "SUCCESS":
mqtt_config = response.json().get("data").get("mqtt_config")
s3_config = response.json().get("data").get("s3_config")
else:
raise Exception("failed to fetch device configurations!")
return mqtt_config, s3_config
def fetch_all_configs(self):
url, cert_path = self.get_request_params()
logging.info(url)
json_params = {"config_name": ["mqtt_config", "s3_config", "ml_ops_config", "docker_config"]}
if cert_path is not None:
requests.session().verify = cert_path
response = requests.post(
url, json=json_params, verify=True, headers={"Connection": "close"}
)
else:
response = requests.post(
url, json=json_params, headers={"Connection": "close"}
)
status_code = response.json().get("code")
if status_code == "SUCCESS":
mqtt_config = response.json().get("data").get("mqtt_config")
s3_config = response.json().get("data").get("s3_config")
mlops_config = response.json().get("data").get("ml_ops_config")
docker_config = response.json().get("data").get("docker_config")
else:
raise Exception("failed to fetch device configurations!")
return mqtt_config, s3_config, mlops_config, docker_config
if __name__ == "__main__":
pass
| 3,707 | 193 | 72 |
d4a9c553c6e34514fe4c244fea127998e69ae7c6 | 388 | py | Python | apps/shop/migrations/0005_remove_shop_stock.py | lirixiang123/recipe | e78deb5e67c2a4faf140fd183c1a716a23b8a508 | [
"Apache-2.0"
] | null | null | null | apps/shop/migrations/0005_remove_shop_stock.py | lirixiang123/recipe | e78deb5e67c2a4faf140fd183c1a716a23b8a508 | [
"Apache-2.0"
] | null | null | null | apps/shop/migrations/0005_remove_shop_stock.py | lirixiang123/recipe | e78deb5e67c2a4faf140fd183c1a716a23b8a508 | [
"Apache-2.0"
] | 1 | 2021-07-10T16:46:25.000Z | 2021-07-10T16:46:25.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2020-04-19 07:57
from __future__ import unicode_literals
from django.db import migrations
| 19.4 | 49 | 0.608247 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2020-04-19 07:57
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shop', '0004_auto_20200419_1551'),
]
operations = [
migrations.RemoveField(
model_name='shop',
name='stock',
),
]
| 0 | 216 | 23 |
16e4cad44a1ed9a73f7ef3bad62ac39980aa6c7c | 502 | py | Python | sponge-jython/examples/script/py/processor_inheritance.py | mnpas/sponge | 7190f23ae888bbef49d0fbb85157444d6ea48bcd | [
"Apache-2.0"
] | 9 | 2017-12-16T21:48:57.000Z | 2022-01-06T12:22:24.000Z | sponge-jython/examples/script/py/processor_inheritance.py | mnpas/sponge | 7190f23ae888bbef49d0fbb85157444d6ea48bcd | [
"Apache-2.0"
] | 3 | 2020-12-18T11:56:46.000Z | 2022-03-31T18:37:10.000Z | sponge-jython/examples/script/py/processor_inheritance.py | mnpas/sponge | 7190f23ae888bbef49d0fbb85157444d6ea48bcd | [
"Apache-2.0"
] | 2 | 2019-12-29T16:08:32.000Z | 2020-06-15T14:05:34.000Z | """
Sponge Knowledge Base
Processor inheritance
"""
| 22.818182 | 55 | 0.667331 | """
Sponge Knowledge Base
Processor inheritance
"""
def onInit():
# Variables for assertions only
sponge.setVariable("result", None)
class AbstractEchoAction(Action):
def calculateResult(self):
return 1
class EchoAction(AbstractEchoAction):
def onCall(self):
return self.calculateResult() * 2
def onStartup():
result = sponge.call("EchoAction")
sponge.setVariable("result", result)
sponge.logger.debug("Action returned: {}", result)
| 264 | 28 | 154 |
5959d094a79362b441bf3e998bd2b1e8d878a593 | 553 | py | Python | python_modules/dagster/dagster_tests/core_tests/types_tests/test_materialization_schema.py | vishvananda/dagster | f6aa44714246bc770fe05a9c986fe8b7d848956b | [
"Apache-2.0"
] | null | null | null | python_modules/dagster/dagster_tests/core_tests/types_tests/test_materialization_schema.py | vishvananda/dagster | f6aa44714246bc770fe05a9c986fe8b7d848956b | [
"Apache-2.0"
] | null | null | null | python_modules/dagster/dagster_tests/core_tests/types_tests/test_materialization_schema.py | vishvananda/dagster | f6aa44714246bc770fe05a9c986fe8b7d848956b | [
"Apache-2.0"
] | null | null | null | from dagster import PipelineDefinition, OutputDefinition, lambda_solid, types
| 34.5625 | 100 | 0.804702 | from dagster import PipelineDefinition, OutputDefinition, lambda_solid, types
def test_materialization_schema_types():
@lambda_solid(output=OutputDefinition(types.String))
def return_one():
return 1
pipeline_def = PipelineDefinition(name='test_materialization_schema_types', solids=[return_one])
string_mat_schema = pipeline_def.config_type_named('String.MaterializationSchema')
string_json_mat_schema = string_mat_schema.fields['json'].config_type
assert pipeline_def.config_type_keyed(string_json_mat_schema.key)
| 451 | 0 | 23 |
67e421e5593e25e525a9cd5688eeb2dc838c4133 | 1,449 | py | Python | source/example.py | momokchung/HIPPO | 7367981eb1b6004f975ab21e594f220c78aa55bc | [
"CC0-1.0"
] | null | null | null | source/example.py | momokchung/HIPPO | 7367981eb1b6004f975ab21e594f220c78aa55bc | [
"CC0-1.0"
] | null | null | null | source/example.py | momokchung/HIPPO | 7367981eb1b6004f975ab21e594f220c78aa55bc | [
"CC0-1.0"
] | 1 | 2021-06-29T19:08:27.000Z | 2021-06-29T19:08:27.000Z | # import os
# import numpy as np
# import readprm
# import readxyz
# import katom
# # I use examples.py to play with code.
# path = '/Users/moseschung/Documents/Ponder/HIPPO/test/testitems'
# atom_dict, bond_dict, angle_dict, repel_dict, disp_dict, mpole_dict, chgpen_dict, polarize_dict, chgtrn_dict = readprm.readkeyfile('/Users/moseschung/Documents/Ponder/HIPPO/test/testitems/water21test.key')
# ATOM = katom.returnatom(atom_dict)
# BOND = katom.returnbond(bond_dict)
# ANGLE = katom.returnangle(angle_dict)
# REPEL = katom.returnrepulsion(repel_dict)
# DISP = katom.returndispersion(disp_dict)
# MPOLE = katom.returnmultipole(mpole_dict)
# CHGPEN = katom.returnchargepenetration(chgpen_dict)
# POLARIZE = katom.returnpolarization(polarize_dict)
# CHGTRN = katom.returnchargetransfer(chgtrn_dict)
# # print(CHGPEN)
# coordinates, atomtype, connectivity = readxyz.readxyz(f'{path}/waterwater.xyz')
# # print(coordinates[0])
# # print(atomtype[0])
# # print(connectivity[0])
# # print(atom_dict)
# # print(ATOM)
# ATOMID = katom.returnatomid(atomtype[0], coordinates[0], connectivity[0], ATOM, MPOLE, CHGPEN)
# for i in ATOMID:
# i.rotmat(ATOMID)
# i.rotsite()
# print(i.returnmscale(ATOMID))
# # coordinates, atomtype, connectivity = readxyz.readxyz(f'{path}/Nawater.xyz')
# # print(coordinates[0])
# # print(atomtype[0])
# # print(connectivity[0])
# print(3.3535788241877126E-002)
one, two, three = 1, 2, 3
print(one)
| 27.865385 | 207 | 0.73568 | # import os
# import numpy as np
# import readprm
# import readxyz
# import katom
# # I use examples.py to play with code.
# path = '/Users/moseschung/Documents/Ponder/HIPPO/test/testitems'
# atom_dict, bond_dict, angle_dict, repel_dict, disp_dict, mpole_dict, chgpen_dict, polarize_dict, chgtrn_dict = readprm.readkeyfile('/Users/moseschung/Documents/Ponder/HIPPO/test/testitems/water21test.key')
# ATOM = katom.returnatom(atom_dict)
# BOND = katom.returnbond(bond_dict)
# ANGLE = katom.returnangle(angle_dict)
# REPEL = katom.returnrepulsion(repel_dict)
# DISP = katom.returndispersion(disp_dict)
# MPOLE = katom.returnmultipole(mpole_dict)
# CHGPEN = katom.returnchargepenetration(chgpen_dict)
# POLARIZE = katom.returnpolarization(polarize_dict)
# CHGTRN = katom.returnchargetransfer(chgtrn_dict)
# # print(CHGPEN)
# coordinates, atomtype, connectivity = readxyz.readxyz(f'{path}/waterwater.xyz')
# # print(coordinates[0])
# # print(atomtype[0])
# # print(connectivity[0])
# # print(atom_dict)
# # print(ATOM)
# ATOMID = katom.returnatomid(atomtype[0], coordinates[0], connectivity[0], ATOM, MPOLE, CHGPEN)
# for i in ATOMID:
# i.rotmat(ATOMID)
# i.rotsite()
# print(i.returnmscale(ATOMID))
# # coordinates, atomtype, connectivity = readxyz.readxyz(f'{path}/Nawater.xyz')
# # print(coordinates[0])
# # print(atomtype[0])
# # print(connectivity[0])
# print(3.3535788241877126E-002)
one, two, three = 1, 2, 3
print(one)
| 0 | 0 | 0 |
01103fb5e3adf207d1893caa0cf335a11bc0a0fe | 812 | py | Python | .idea/VirtualEnvironment/Lib/site-packages/hstest/outcomes/unexpected_error_outcome.py | Vladpetr/WeatherApp | 4ce4762775f5942e0df9add7d381b678f19745e1 | [
"Apache-2.0"
] | null | null | null | .idea/VirtualEnvironment/Lib/site-packages/hstest/outcomes/unexpected_error_outcome.py | Vladpetr/WeatherApp | 4ce4762775f5942e0df9add7d381b678f19745e1 | [
"Apache-2.0"
] | null | null | null | .idea/VirtualEnvironment/Lib/site-packages/hstest/outcomes/unexpected_error_outcome.py | Vladpetr/WeatherApp | 4ce4762775f5942e0df9add7d381b678f19745e1 | [
"Apache-2.0"
] | null | null | null | from hstest.common.reflection_utils import get_stacktrace
from hstest.exception.failure_handler import get_report
from hstest.exception.outcomes import UnexpectedError
from hstest.outcomes.outcome import Outcome
| 42.736842 | 92 | 0.711823 | from hstest.common.reflection_utils import get_stacktrace
from hstest.exception.failure_handler import get_report
from hstest.exception.outcomes import UnexpectedError
from hstest.outcomes.outcome import Outcome
class UnexpectedErrorOutcome(Outcome):
def __init__(self, test_num: int, cause: BaseException):
super().__init__()
self.test_number = test_num
self.error_text = 'We have recorded this bug ' \
'and will fix it soon.\n\n' + get_report()
self.stack_trace = get_stacktrace(cause, hide_internals=False)
if isinstance(cause, UnexpectedError) and cause.exception is not None:
self.stack_trace += '\n' + get_stacktrace(cause.exception, hide_internals=False)
def get_type(self) -> str:
return 'Unexpected error'
| 506 | 17 | 76 |
8ecdf074a386d21d74e99703c37cbeba36e8c3dc | 1,057 | py | Python | test/test_silhouette.py | apblair/project5 | 9ca266596895fe2bcef50ec7c3ef2ef4d8e8655e | [
"MIT"
] | null | null | null | test/test_silhouette.py | apblair/project5 | 9ca266596895fe2bcef50ec7c3ef2ef4d8e8655e | [
"MIT"
] | null | null | null | test/test_silhouette.py | apblair/project5 | 9ca266596895fe2bcef50ec7c3ef2ef4d8e8655e | [
"MIT"
] | null | null | null | # write your silhouette score unit tests here
import numpy as np
from scipy.spatial.distance import cdist
import cluster
import pytest
from sklearn.metrics import silhouette_samples, silhouette_score
def test_silhouette():
"""
Unit test of silhouette scores for each of the observations.
NOTE: Using sklearn.metrics.silhouette_samples as ground truth. (NOT sklearn.metrics.silhouette_score)
References
----------
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.silhouette_samples.html
"""
# Generate cluster data
mat, labels = cluster.utils.make_clusters()
# Fit model and predict labels
k_model = cluster.KMeans(k=3)
k_model.fit(mat)
predicted_labels = k_model.predict(mat)
# Calculate silhouette scores and check against sklearn
bmi203_proj5_sihouette_values = cluster.Silhouette().score(mat, predicted_labels)
sklearn_silhouette_values = silhouette_samples(mat, predicted_labels)
assert np.allclose(bmi203_proj5_sihouette_values, sklearn_silhouette_values) | 34.096774 | 106 | 0.762535 | # write your silhouette score unit tests here
import numpy as np
from scipy.spatial.distance import cdist
import cluster
import pytest
from sklearn.metrics import silhouette_samples, silhouette_score
def test_silhouette():
"""
Unit test of silhouette scores for each of the observations.
NOTE: Using sklearn.metrics.silhouette_samples as ground truth. (NOT sklearn.metrics.silhouette_score)
References
----------
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.silhouette_samples.html
"""
# Generate cluster data
mat, labels = cluster.utils.make_clusters()
# Fit model and predict labels
k_model = cluster.KMeans(k=3)
k_model.fit(mat)
predicted_labels = k_model.predict(mat)
# Calculate silhouette scores and check against sklearn
bmi203_proj5_sihouette_values = cluster.Silhouette().score(mat, predicted_labels)
sklearn_silhouette_values = silhouette_samples(mat, predicted_labels)
assert np.allclose(bmi203_proj5_sihouette_values, sklearn_silhouette_values) | 0 | 0 | 0 |
5831af5b51f9a063116d94b4702b2af91a6cc4b6 | 2,722 | py | Python | setup.py | westurner/MyST-NB | 2bc0c11cedbad6206f70546819fad85d779ce449 | [
"BSD-3-Clause"
] | null | null | null | setup.py | westurner/MyST-NB | 2bc0c11cedbad6206f70546819fad85d779ce449 | [
"BSD-3-Clause"
] | null | null | null | setup.py | westurner/MyST-NB | 2bc0c11cedbad6206f70546819fad85d779ce449 | [
"BSD-3-Clause"
] | null | null | null | """MyST-NB package setup."""
from setuptools import find_packages, setup
# Manually finding the version so we don't need to import our module
text = open("./myst_nb/__init__.py").read()
for line in text.split("\n"):
if "__version__" in line:
break
version = line.split("= ")[-1].strip('"')
setup(
name="myst-nb",
version=version,
description=(
"A Jupyter Notebook Sphinx reader built on top of the MyST markdown parser."
),
long_description=open("README.md", encoding="utf8").read(),
long_description_content_type="text/markdown",
url="https://github.com/executablebooks/myst_nb",
author="ExecutableBookProject",
author_email="choldgraf@berkeley.edu",
license="BSD-3",
packages=find_packages(),
entry_points={"console_scripts": []},
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Text Processing :: Markup",
"Framework :: Sphinx :: Extension",
],
keywords="markdown lexer parser development docutils sphinx",
python_requires=">=3.6",
package_data={"myst_nb": ["_static/*"]},
install_requires=[
"myst-parser~=0.12.0",
"docutils>=0.15",
"sphinx>=2,<4",
"jupyter_sphinx~=0.2.4",
"jupyter-cache~=0.4.0",
"ipython",
"nbformat~=5.0",
"nbconvert~=5.6",
"pyyaml",
"sphinx-togglebutton~=0.2.2",
],
extras_require={
"code_style": ["flake8<3.8.0,>=3.7.0", "black", "pre-commit==1.17.0"],
"testing": [
"pytest~=5.4",
"pytest-cov~=2.8",
"coverage<5.0",
"pytest-regressions",
"matplotlib",
"numpy",
"sympy",
"pandas",
],
"rtd": [
"coconut~=1.4.3",
"sphinxcontrib-bibtex",
"ipywidgets",
"pandas",
"numpy",
"sympy",
"altair",
"alabaster",
"bokeh",
"plotly",
"matplotlib",
"sphinx-copybutton",
"sphinx-book-theme",
"sphinx-panels~=0.4.1",
],
},
zip_safe=True,
)
| 31.651163 | 84 | 0.539677 | """MyST-NB package setup."""
from setuptools import find_packages, setup
# Manually finding the version so we don't need to import our module
text = open("./myst_nb/__init__.py").read()
for line in text.split("\n"):
if "__version__" in line:
break
version = line.split("= ")[-1].strip('"')
setup(
name="myst-nb",
version=version,
description=(
"A Jupyter Notebook Sphinx reader built on top of the MyST markdown parser."
),
long_description=open("README.md", encoding="utf8").read(),
long_description_content_type="text/markdown",
url="https://github.com/executablebooks/myst_nb",
author="ExecutableBookProject",
author_email="choldgraf@berkeley.edu",
license="BSD-3",
packages=find_packages(),
entry_points={"console_scripts": []},
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Text Processing :: Markup",
"Framework :: Sphinx :: Extension",
],
keywords="markdown lexer parser development docutils sphinx",
python_requires=">=3.6",
package_data={"myst_nb": ["_static/*"]},
install_requires=[
"myst-parser~=0.12.0",
"docutils>=0.15",
"sphinx>=2,<4",
"jupyter_sphinx~=0.2.4",
"jupyter-cache~=0.4.0",
"ipython",
"nbformat~=5.0",
"nbconvert~=5.6",
"pyyaml",
"sphinx-togglebutton~=0.2.2",
],
extras_require={
"code_style": ["flake8<3.8.0,>=3.7.0", "black", "pre-commit==1.17.0"],
"testing": [
"pytest~=5.4",
"pytest-cov~=2.8",
"coverage<5.0",
"pytest-regressions",
"matplotlib",
"numpy",
"sympy",
"pandas",
],
"rtd": [
"coconut~=1.4.3",
"sphinxcontrib-bibtex",
"ipywidgets",
"pandas",
"numpy",
"sympy",
"altair",
"alabaster",
"bokeh",
"plotly",
"matplotlib",
"sphinx-copybutton",
"sphinx-book-theme",
"sphinx-panels~=0.4.1",
],
},
zip_safe=True,
)
| 0 | 0 | 0 |
3aedbd4190eca1c860d28695d2fa278fe8070791 | 1,152 | py | Python | solutions/solution30.py | ag-ds-bubble/projEuler | eac7fc0159f1324065c471ef814c88f38284934a | [
"MIT"
] | null | null | null | solutions/solution30.py | ag-ds-bubble/projEuler | eac7fc0159f1324065c471ef814c88f38284934a | [
"MIT"
] | null | null | null | solutions/solution30.py | ag-ds-bubble/projEuler | eac7fc0159f1324065c471ef814c88f38284934a | [
"MIT"
] | null | null | null | """
Author : Achintya Gupta
Date Created : 22-09-2020
"""
"""
Problem Statement
------------------------------------------------
Q) Surprisingly there are only three numbers that can be written as the sum of fourth powers of their digits:
1634 = 14 + 64 + 34 + 44
8208 = 84 + 24 + 04 + 84
9474 = 94 + 44 + 74 + 44
As 1 = 14 is not a sum it is not included.
The sum of these numbers is 1634 + 8208 + 9474 = 19316.
Find the sum of all the numbers that can be written as the sum of fifth powers of their digits.
"""
from utils import timing_decorator
import numpy as np
@timing_decorator
digit_n_power()
| 28.097561 | 109 | 0.55816 | """
Author : Achintya Gupta
Date Created : 22-09-2020
"""
"""
Problem Statement
------------------------------------------------
Q) Surprisingly there are only three numbers that can be written as the sum of fourth powers of their digits:
1634 = 14 + 64 + 34 + 44
8208 = 84 + 24 + 04 + 84
9474 = 94 + 44 + 74 + 44
As 1 = 14 is not a sum it is not included.
The sum of these numbers is 1634 + 8208 + 9474 = 19316.
Find the sum of all the numbers that can be written as the sum of fifth powers of their digits.
"""
from utils import timing_decorator
import numpy as np
@timing_decorator
def digit_n_power(power=5):
# Figure out the upper limit
llimit,ulimit=0,int(1e9)
for i in range(1,100):
maxpossible = i*(9**power)
if i>=len(str(maxpossible)):
ulimit = int("".join(['9']*i))
print(llimit, ulimit)
break
nums = []
for i in range(llimit, ulimit):
if sum([int(k)**power for k in list(str(i))]) == i and i not in [0,1]:
nums.append(i)
print(f'Sum : ', sum(nums))
digit_n_power()
| 451 | 0 | 22 |
366546df064984b3051a9703a0f17c0d0ee7bb3f | 10,264 | py | Python | AadhaarAuth/command.py | SugumaranPoornima/Sample1 | b5c60e68caac608ff394ec10d5c4d7139fec55d3 | [
"MIT"
] | 42 | 2015-01-03T18:00:55.000Z | 2021-09-04T05:04:24.000Z | AadhaarAuth/command.py | SugumaranPoornima/Sample1 | b5c60e68caac608ff394ec10d5c4d7139fec55d3 | [
"MIT"
] | 6 | 2016-07-07T09:33:03.000Z | 2017-07-20T10:50:18.000Z | AadhaarAuth/command.py | SugumaranPoornima/Sample1 | b5c60e68caac608ff394ec10d5c4d7139fec55d3 | [
"MIT"
] | 27 | 2015-01-05T11:01:21.000Z | 2020-11-04T04:27:42.000Z | #!/usr/bin/env python
#
#Copyright (C) 2011 by Venkata Pingali (pingali@gmail.com) & TCS
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
import os.path, sys
from config import Config, ConfigMerger
from optparse import OptionParser, SUPPRESS_HELP
import logging
import traceback
log=logging.getLogger('AuthConfig')
__author__ = "Venkata Pingali"
__copyright__ = "Copyright 2011,Venkata Pingali and TCS"
__credits__ = ["UIDAI", "MindTree", "GeoDesic", "Viral Shah"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Venkata Pingali"
__email__ = "pingali@gmail.com"
__status__ = "Pre-release"
class AuthConfig():
"""
Parse the command line
"""
def __init__(self,name="unknown", summary="Unknown commands",
cfg=None):
"""
Initialize command object with information about the target
configuration (e.g., 'request') and appropriate help text
(e.g., 'Processes authentication requests'
"""
self._name = name
self._summary = summary
self._cfg = cfg
def update_paths(self, cfg):
"""
Take relative paths of various files and make it absolute
"""
# first introduce a dir element
paths = ['common.private_key', 'common.public_cert',
'common.pkcs_path', 'common.uid_cert_path',
#'common.logfile',
#'request_demo.xml', 'request_demo.signedxml',
#'request_bio.xml', 'request_bio.signedxml',
'response_validate.xml',
'signature_default.xml', 'signature_default.signedxml',
'signature_verify.signedxml', 'validate_xml_only.xml',
'batch_default.json']
basedir = cfg.common.dir
for p in paths:
try:
old_path = eval("cfg.%s" % p)
if not old_path.startswith("/"):
new_path = basedir + "/" + old_path
else:
new_path = old_path
if not os.path.isfile(new_path):
log.warn("File %s does not exist" % new_path)
exec("cfg.%s = '%s'" % (p, new_path))
#log.debug("Updated path from %s to %s " % \
# (old_path, eval("cfg.%s" % p)))
except:
traceback.print_exc()
log.error("Could not update the path for cfg.%s" % p)
pass
# Treat the xsd paths specially. They are relative to the
# package
xsd_paths = ['common.request_xsd', 'common.response_xsd']
this_dir = os.path.dirname(os.path.realpath(__file__))
exec("cfg.common.request_xsd='%s/xsd/uid-auth-request.xsd'" % \
this_dir)
exec("cfg.common.response_xsd='%s/xsd/uid-auth-response.xsd'" % \
this_dir)
#log.debug("request_xsd path is %s " % cfg.common.request_xsd)
#log.debug("response_xsd path is %s " % cfg.common.response_xsd)
return
def update_config(self):
"""
Process each element of the command line and generate a target
configuration.
"""
logging.basicConfig()
usage = "usage: %prog [options] [<attrib=value> <attrib=value>...]\n" \
+ self._summary
parser = OptionParser(usage=usage)
if self._cfg == None:
default_config_file = "fixtures/auth.cfg"
else:
default_config_file = self._cfg
#=> Set the help text and other command line options.
parser.add_option("-c", "--config",
action="store", type="string", dest="config_file",
default=default_config_file,
help="Specify the input configuration file. " +
"(default: %s)" % default_config_file,
metavar="FILE")
parser.add_option("--show-example-config",
action="callback", callback=self.show_example_config,
help="Sample configuration file")
defaults = {
'data': 'request_demo',
'request': 'request_demo',
'response': 'response_validate',
'crypt': 'crypt_test',
'signature': 'signature_default',
'validate': 'validate_xml_only',
'batch': 'batch_default',
'unknown': 'unknown_default'
}
# => For a given command (e.g., response.py), enable the help
# text for that config element. For everything else, suppress
# help. Make the options secret.
for k, v in defaults.items():
if (k == self._name):
help_text=\
"""Specify the configuration instance to use for %s. For example, %s. See
available choices in config file. (default: %s)""" % (k, v, v)
else:
help_text=SUPPRESS_HELP
parser.add_option("--" + k,
action="store", type="string",
dest=k, default=v,
metavar="NAME",
help=help_text)
# parse the command line
(options, args) = parser.parse_args()
# Check if the configuration file exists
if (not os.path.isfile(options.config_file)):
raise Exception("Unknown config file %s " % (options.config_file))
# Read the configuration file
cfg = Config(options.config_file)
# => For the target configuration element (e.g., request or
# response) check whether the target configuration is valid
cmd = "cfg[options.%s]" % self._name
try:
target_config = eval(cmd)
except:
raise Exception("Invalid setting for parameter \'%s\'. Please check the configuration file." % self._name)
# => Update the configuration for the particular service
log.debug("Setting request configuration to %s " % \
(eval("options.%s" % self._name)))
cmd = "cfg.%s=cfg[options.%s]" % (self._name, self._name)
exec(cmd)
# => Update the paths
if options.config_file.startswith('/'):
config_path = options.config_file
else:
config_path = os.path.realpath(options.config_file)
cfg['common']['dir'] = os.path.dirname(config_path)
self.update_paths(cfg)
# python <command> --conf=auth.cfg a=x c.d=y Over ride
# individual parameters of the config file. Note that you can
# override pretty much any config element. If there is a '.'
# in the variable name, then it is assumed to refer to full
# path of the config element (e.g., batch.json). If there is
# no '.', it is assumed to refer to only the configuration
# element corresponding to the command (e.g., request).
param_hash = {}
for idx, arg in enumerate(args):
parts = arg.split('=', 1)
if len(parts) < 2:
# End of options, don't translate the rest.
# newargs.extend(sys.argv[idx+1:])
break
argname, argvalue = parts
param_hash[argname] = argvalue
log.debug("Command line parameter options = " + param_hash.__str__())
# Print the updated configuration element
log.debug("Configuration of target element '%s':\n%s " % (self._name, cfg[self._name]))
# Update the
for k,v in param_hash.items():
if "." not in k:
# here we are updating only the config element
# corresponding to the command.
cfg[self._name][k] = v
else:
cmd = "cfg.%s=\'%s\'" % (k, v)
exec(cmd)
cmd = "cfg.%s" % k
log.debug("Updated conf var %s to %s \n" % (cmd, eval(cmd)))
# Turn some strings in objects
cfg.common.loglevel = eval("logging.%s" % cfg.common.loglevel)
if (cfg.validate.signed):
cfg.validate.signed = eval("%s" % cfg.validate.signed)
log.debug("Final configuration:\n%s" % cfg)
return cfg
if __name__ == "__main__":
name = "request"
summary = "Issues authentication requests to the server"
logging.getLogger().setLevel(logging.DEBUG)
logging.basicConfig()#filename="execution.log")
c = AuthConfig(name, summary)
cfg = c.update_config()
| 39.937743 | 118 | 0.564984 | #!/usr/bin/env python
#
#Copyright (C) 2011 by Venkata Pingali (pingali@gmail.com) & TCS
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
import os.path, sys
from config import Config, ConfigMerger
from optparse import OptionParser, SUPPRESS_HELP
import logging
import traceback
log=logging.getLogger('AuthConfig')
__author__ = "Venkata Pingali"
__copyright__ = "Copyright 2011,Venkata Pingali and TCS"
__credits__ = ["UIDAI", "MindTree", "GeoDesic", "Viral Shah"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Venkata Pingali"
__email__ = "pingali@gmail.com"
__status__ = "Pre-release"
class AuthConfig():
"""
Parse the command line
"""
def __init__(self,name="unknown", summary="Unknown commands",
cfg=None):
"""
Initialize command object with information about the target
configuration (e.g., 'request') and appropriate help text
(e.g., 'Processes authentication requests'
"""
self._name = name
self._summary = summary
self._cfg = cfg
def get_path(cfg, path):
try:
if path.startswith("/"):
# dont do anything
return path
basedir = cfg.common.dir
return basedir + "/" + path
except:
return None
def show_example_config(self, option, opt_str, value, parser):
current_directory = os.path.dirname(__file__)
cfg = file(current_directory + '/fixtures/auth.cfg').read()
print cfg
sys.exit(0)
def update_paths(self, cfg):
"""
Take relative paths of various files and make it absolute
"""
# first introduce a dir element
paths = ['common.private_key', 'common.public_cert',
'common.pkcs_path', 'common.uid_cert_path',
#'common.logfile',
#'request_demo.xml', 'request_demo.signedxml',
#'request_bio.xml', 'request_bio.signedxml',
'response_validate.xml',
'signature_default.xml', 'signature_default.signedxml',
'signature_verify.signedxml', 'validate_xml_only.xml',
'batch_default.json']
basedir = cfg.common.dir
for p in paths:
try:
old_path = eval("cfg.%s" % p)
if not old_path.startswith("/"):
new_path = basedir + "/" + old_path
else:
new_path = old_path
if not os.path.isfile(new_path):
log.warn("File %s does not exist" % new_path)
exec("cfg.%s = '%s'" % (p, new_path))
#log.debug("Updated path from %s to %s " % \
# (old_path, eval("cfg.%s" % p)))
except:
traceback.print_exc()
log.error("Could not update the path for cfg.%s" % p)
pass
# Treat the xsd paths specially. They are relative to the
# package
xsd_paths = ['common.request_xsd', 'common.response_xsd']
this_dir = os.path.dirname(os.path.realpath(__file__))
exec("cfg.common.request_xsd='%s/xsd/uid-auth-request.xsd'" % \
this_dir)
exec("cfg.common.response_xsd='%s/xsd/uid-auth-response.xsd'" % \
this_dir)
#log.debug("request_xsd path is %s " % cfg.common.request_xsd)
#log.debug("response_xsd path is %s " % cfg.common.response_xsd)
return
def update_config(self):
"""
Process each element of the command line and generate a target
configuration.
"""
logging.basicConfig()
usage = "usage: %prog [options] [<attrib=value> <attrib=value>...]\n" \
+ self._summary
parser = OptionParser(usage=usage)
if self._cfg == None:
default_config_file = "fixtures/auth.cfg"
else:
default_config_file = self._cfg
#=> Set the help text and other command line options.
parser.add_option("-c", "--config",
action="store", type="string", dest="config_file",
default=default_config_file,
help="Specify the input configuration file. " +
"(default: %s)" % default_config_file,
metavar="FILE")
parser.add_option("--show-example-config",
action="callback", callback=self.show_example_config,
help="Sample configuration file")
defaults = {
'data': 'request_demo',
'request': 'request_demo',
'response': 'response_validate',
'crypt': 'crypt_test',
'signature': 'signature_default',
'validate': 'validate_xml_only',
'batch': 'batch_default',
'unknown': 'unknown_default'
}
# => For a given command (e.g., response.py), enable the help
# text for that config element. For everything else, suppress
# help. Make the options secret.
for k, v in defaults.items():
if (k == self._name):
help_text=\
"""Specify the configuration instance to use for %s. For example, %s. See
available choices in config file. (default: %s)""" % (k, v, v)
else:
help_text=SUPPRESS_HELP
parser.add_option("--" + k,
action="store", type="string",
dest=k, default=v,
metavar="NAME",
help=help_text)
# parse the command line
(options, args) = parser.parse_args()
# Check if the configuration file exists
if (not os.path.isfile(options.config_file)):
raise Exception("Unknown config file %s " % (options.config_file))
# Read the configuration file
cfg = Config(options.config_file)
# => For the target configuration element (e.g., request or
# response) check whether the target configuration is valid
cmd = "cfg[options.%s]" % self._name
try:
target_config = eval(cmd)
except:
raise Exception("Invalid setting for parameter \'%s\'. Please check the configuration file." % self._name)
# => Update the configuration for the particular service
log.debug("Setting request configuration to %s " % \
(eval("options.%s" % self._name)))
cmd = "cfg.%s=cfg[options.%s]" % (self._name, self._name)
exec(cmd)
# => Update the paths
if options.config_file.startswith('/'):
config_path = options.config_file
else:
config_path = os.path.realpath(options.config_file)
cfg['common']['dir'] = os.path.dirname(config_path)
self.update_paths(cfg)
# python <command> --conf=auth.cfg a=x c.d=y Over ride
# individual parameters of the config file. Note that you can
# override pretty much any config element. If there is a '.'
# in the variable name, then it is assumed to refer to full
# path of the config element (e.g., batch.json). If there is
# no '.', it is assumed to refer to only the configuration
# element corresponding to the command (e.g., request).
param_hash = {}
for idx, arg in enumerate(args):
parts = arg.split('=', 1)
if len(parts) < 2:
# End of options, don't translate the rest.
# newargs.extend(sys.argv[idx+1:])
break
argname, argvalue = parts
param_hash[argname] = argvalue
log.debug("Command line parameter options = " + param_hash.__str__())
# Print the updated configuration element
log.debug("Configuration of target element '%s':\n%s " % (self._name, cfg[self._name]))
# Update the
for k,v in param_hash.items():
if "." not in k:
# here we are updating only the config element
# corresponding to the command.
cfg[self._name][k] = v
else:
cmd = "cfg.%s=\'%s\'" % (k, v)
exec(cmd)
cmd = "cfg.%s" % k
log.debug("Updated conf var %s to %s \n" % (cmd, eval(cmd)))
# Turn some strings in objects
cfg.common.loglevel = eval("logging.%s" % cfg.common.loglevel)
if (cfg.validate.signed):
cfg.validate.signed = eval("%s" % cfg.validate.signed)
log.debug("Final configuration:\n%s" % cfg)
return cfg
if __name__ == "__main__":
name = "request"
summary = "Issues authentication requests to the server"
logging.getLogger().setLevel(logging.DEBUG)
logging.basicConfig()#filename="execution.log")
c = AuthConfig(name, summary)
cfg = c.update_config()
| 441 | 0 | 64 |
dcd5529ef811350b2281f9ccd6e0fe4401edc16a | 1,630 | py | Python | tests/example_project/adam/adam_2.py | XD-DENG/cosmic-ray | d265dd0c7bf65484ee2ff1503129b2b16d0c7f55 | [
"MIT"
] | 1 | 2020-10-18T11:29:03.000Z | 2020-10-18T11:29:03.000Z | tests/example_project/adam/adam_2.py | XD-DENG/cosmic-ray | d265dd0c7bf65484ee2ff1503129b2b16d0c7f55 | [
"MIT"
] | 4 | 2020-11-21T07:36:24.000Z | 2020-11-22T03:09:39.000Z | tests/example_project/adam/adam_2.py | XD-DENG/cosmic-ray | d265dd0c7bf65484ee2ff1503129b2b16d0c7f55 | [
"MIT"
] | 1 | 2021-11-10T10:42:57.000Z | 2021-11-10T10:42:57.000Z | """adam.adam_2
"""
# pylint: disable=C0111
import ctypes
import functools
import operator
@decorator
| 22.027027 | 79 | 0.660123 | """adam.adam_2
"""
# pylint: disable=C0111
import ctypes
import functools
import operator
def trigger_infinite_loop():
result = None
# When `break` becomes `continue`, this should enter an infinite loop. This
# helps us test timeouts.
# Any object which isn't None passes the truth value testing so here
# we use `while object()` instead of `while True` b/c the later becomes
# `while False` when ReplaceTrueFalse is applied and we don't trigger an
# infinite loop.
while object():
result = object()
break
# when `while object()` becomes `while not object()`
# the code below will be triggered
return result
def single_iteration():
result = None
iterable = [object()]
for i in iterable: # pylint: disable=W0612
result = True
return result
def handle_exception():
result = None
try:
raise IOError
except IOError:
result = True
return result
def decorator(func):
func.cosmic_ray = True
return func
@decorator
def decorated_func():
result = None
if decorated_func.cosmic_ray:
result = True
return result
def use_ctypes(size):
array_type = ctypes.c_char * size
chars_a = array_type(*(b"a" * size))
chars_b = array_type(*(b"b" * size))
# This odd construct ensures that, under number mutation to increase number
# values, `size` varies by amounts big enough to trigger a segfault on the
# subsequent memmove.
size = functools.reduce(operator.mul, [10, 10, 10, 10, 10, 10])
ctypes.memmove(chars_a, chars_b, size)
return chars_a.value
| 1,383 | 0 | 137 |
0203a5e3f6313034f06f197a6b5da04a3671f705 | 3,940 | py | Python | convert_video.py | tlalexander/rover_video_scripts | ba930a943585c7b571c8eb63e0a9a97b4213f4ba | [
"Apache-2.0"
] | 3 | 2021-04-12T20:08:05.000Z | 2022-02-23T18:37:18.000Z | convert_video.py | tlalexander/rover_video_scripts | ba930a943585c7b571c8eb63e0a9a97b4213f4ba | [
"Apache-2.0"
] | null | null | null | convert_video.py | tlalexander/rover_video_scripts | ba930a943585c7b571c8eb63e0a9a97b4213f4ba | [
"Apache-2.0"
] | null | null | null | import os
import subprocess as sp
"""batch convert videos to a 4x4 720p version"""
#VIDEO_DIRECTORY = '/media/taylor/external/robot/Rover/trail'
#VIDEO_DIRECTORY = '/media/taylor/feynman/rover_videos/trail_scan/Videos'
VIDEO_DIRECTORY = '/media/taylor/external/robot/Rover/trail/unprocessed'
video_groups = []
#{01_20_2020_20:49:06}_
files = os.listdir(VIDEO_DIRECTORY)
print(files)
idx = 0
for file in files:
match = False
for video in video_groups:
if video[:21] == file[:21]:
match = True
print('Match: {} {}'.format(video[:21], file[:21]))
if not match:
video_groups.append(files[idx])
idx+=1
print(video_groups)
#sys.exit()
for video in video_groups:
filename = os.path.join(VIDEO_DIRECTORY, video[:21])
output = "{}_merged".format(video[:21])
output = os.path.join(VIDEO_DIRECTORY, output)
if os.path.isfile("{}_720.mp4".format(output)):
print("Output file already exists and will be skipped: {}".format(output))
else:
#command = "ffmpeg -i {}_2.mkv -i {}_4.mkv -i {}_3.mkv -i {}_1.mkv -filter_complex '[0:v][1:v]hstack=inputs=2[top];[2:v][3:v]hstack=inputs=2[bottom];[top][bottom]vstack=inputs=2[v]' -vsync 0 -map '[v]' {}.mp4".format(filename,filename,filename,filename,output)
command = ("ffmpeg -vsync 0 -hwaccel cuvid -c:v h264_cuvid -i {}_2.mkv -hwaccel cuvid -c:v h264_cuvid -i {}_4.mkv -hwaccel cuvid -c:v h264_cuvid -i {}_3.mkv -hwaccel cuvid -c:v h264_cuvid -i {}_1.mkv ".format(filename,filename,filename,filename) +
"-filter_complex '[0:v] scale_npp=1280:720, hwdownload, format=nv12 [upperleft]; [1:v] scale_npp=1280:720, hwdownload, format=nv12 [lowerleft]; [2:v] scale_npp=1280:720, hwdownload, format=nv12 [upperright]; " +
"[3:v] scale_npp=1280:720, hwdownload, format=nv12 [lowerright]; [upperleft][upperright][lowerleft][lowerright]xstack=inputs=4:layout=0_0|0_h0|w0_0|w0_h0[mosaic]; [mosaic] hwupload_cuda' " +
"-c:v hevc_nvenc -preset slow -rc vbr_hq -b:v 20M -maxrate:v 30M -c:a aac -b:a 240k {}_720.mp4".format(output))
# command = ("ffmpeg -vsync 0 -hwaccel cuvid -c:v hevc_cuvid -i {}_1.mp4 -hwaccel cuvid -c:v hevc_cuvid -i {}_3.mp4 -hwaccel cuvid -c:v hevc_cuvid -i {}_2.mp4 -hwaccel cuvid -c:v hevc_cuvid -i {}_0.mp4 ".format(filename,filename,filename,filename) +
# "-filter_complex '[0:v] scale_npp=1280:720, hwdownload, format=nv12 [upperleft]; [1:v] scale_npp=1280:720, hwdownload, format=nv12 [lowerleft]; [2:v] scale_npp=1280:720, hwdownload, format=nv12 [upperright]; " +
# "[3:v] scale_npp=1280:720, hwdownload, format=nv12 [lowerright]; [upperleft][upperright][lowerleft][lowerright]xstack=inputs=4:layout=0_0|0_h0|w0_0|w0_h0[mosaic]; [mosaic] hwupload_cuda' " +
# "-c:v hevc_nvenc -preset slow -rc vbr_hq -b:v 20M -maxrate:v 30M -c:a aac -b:a 240k {}_720.mp4".format(output))
# command = ("ffmpeg -vsync 0 -hwaccel cuvid -c:v h264_cuvid -i {}_2.mkv -hwaccel cuvid -c:v h264_cuvid -i {}_4.mkv -hwaccel cuvid -c:v h264_cuvid -i {}_3.mkv -hwaccel cuvid -c:v h264_cuvid -i {}_1.mkv ".format(filename,filename,filename,filename) +
# "-filter_complex '[0:v] hwdownload, format=nv12 [upperleft]; [1:v] hwdownload, format=nv12 [lowerleft]; [2:v] hwdownload, format=nv12 [upperright]; " +
# "[3:v] hwdownload, format=nv12 [lowerright]; [upperleft][upperright][lowerleft][lowerright]xstack=inputs=4:layout=0_0|0_h0|w0_0|w0_h0[mosaic]; [mosaic] hwupload_cuda' " +
# "-c:v hevc_nvenc -preset slow -rc vbr_hq -b:v 20M -maxrate:v 30M -c:a aac -b:a 240k {}.mp4".format(output))
print("Running command: {}".format(command))
try:
output = sp.check_output(command, shell=True)
except sp.CalledProcessError as e:
print("Error with video {}. Proceeding to the next. Actual error was:")
print(e)
print("Output was: {}".format(output))
| 54.722222 | 268 | 0.670558 | import os
import subprocess as sp
"""batch convert videos to a 4x4 720p version"""
#VIDEO_DIRECTORY = '/media/taylor/external/robot/Rover/trail'
#VIDEO_DIRECTORY = '/media/taylor/feynman/rover_videos/trail_scan/Videos'
VIDEO_DIRECTORY = '/media/taylor/external/robot/Rover/trail/unprocessed'
video_groups = []
#{01_20_2020_20:49:06}_
files = os.listdir(VIDEO_DIRECTORY)
print(files)
idx = 0
for file in files:
match = False
for video in video_groups:
if video[:21] == file[:21]:
match = True
print('Match: {} {}'.format(video[:21], file[:21]))
if not match:
video_groups.append(files[idx])
idx+=1
print(video_groups)
#sys.exit()
for video in video_groups:
filename = os.path.join(VIDEO_DIRECTORY, video[:21])
output = "{}_merged".format(video[:21])
output = os.path.join(VIDEO_DIRECTORY, output)
if os.path.isfile("{}_720.mp4".format(output)):
print("Output file already exists and will be skipped: {}".format(output))
else:
#command = "ffmpeg -i {}_2.mkv -i {}_4.mkv -i {}_3.mkv -i {}_1.mkv -filter_complex '[0:v][1:v]hstack=inputs=2[top];[2:v][3:v]hstack=inputs=2[bottom];[top][bottom]vstack=inputs=2[v]' -vsync 0 -map '[v]' {}.mp4".format(filename,filename,filename,filename,output)
command = ("ffmpeg -vsync 0 -hwaccel cuvid -c:v h264_cuvid -i {}_2.mkv -hwaccel cuvid -c:v h264_cuvid -i {}_4.mkv -hwaccel cuvid -c:v h264_cuvid -i {}_3.mkv -hwaccel cuvid -c:v h264_cuvid -i {}_1.mkv ".format(filename,filename,filename,filename) +
"-filter_complex '[0:v] scale_npp=1280:720, hwdownload, format=nv12 [upperleft]; [1:v] scale_npp=1280:720, hwdownload, format=nv12 [lowerleft]; [2:v] scale_npp=1280:720, hwdownload, format=nv12 [upperright]; " +
"[3:v] scale_npp=1280:720, hwdownload, format=nv12 [lowerright]; [upperleft][upperright][lowerleft][lowerright]xstack=inputs=4:layout=0_0|0_h0|w0_0|w0_h0[mosaic]; [mosaic] hwupload_cuda' " +
"-c:v hevc_nvenc -preset slow -rc vbr_hq -b:v 20M -maxrate:v 30M -c:a aac -b:a 240k {}_720.mp4".format(output))
# command = ("ffmpeg -vsync 0 -hwaccel cuvid -c:v hevc_cuvid -i {}_1.mp4 -hwaccel cuvid -c:v hevc_cuvid -i {}_3.mp4 -hwaccel cuvid -c:v hevc_cuvid -i {}_2.mp4 -hwaccel cuvid -c:v hevc_cuvid -i {}_0.mp4 ".format(filename,filename,filename,filename) +
# "-filter_complex '[0:v] scale_npp=1280:720, hwdownload, format=nv12 [upperleft]; [1:v] scale_npp=1280:720, hwdownload, format=nv12 [lowerleft]; [2:v] scale_npp=1280:720, hwdownload, format=nv12 [upperright]; " +
# "[3:v] scale_npp=1280:720, hwdownload, format=nv12 [lowerright]; [upperleft][upperright][lowerleft][lowerright]xstack=inputs=4:layout=0_0|0_h0|w0_0|w0_h0[mosaic]; [mosaic] hwupload_cuda' " +
# "-c:v hevc_nvenc -preset slow -rc vbr_hq -b:v 20M -maxrate:v 30M -c:a aac -b:a 240k {}_720.mp4".format(output))
# command = ("ffmpeg -vsync 0 -hwaccel cuvid -c:v h264_cuvid -i {}_2.mkv -hwaccel cuvid -c:v h264_cuvid -i {}_4.mkv -hwaccel cuvid -c:v h264_cuvid -i {}_3.mkv -hwaccel cuvid -c:v h264_cuvid -i {}_1.mkv ".format(filename,filename,filename,filename) +
# "-filter_complex '[0:v] hwdownload, format=nv12 [upperleft]; [1:v] hwdownload, format=nv12 [lowerleft]; [2:v] hwdownload, format=nv12 [upperright]; " +
# "[3:v] hwdownload, format=nv12 [lowerright]; [upperleft][upperright][lowerleft][lowerright]xstack=inputs=4:layout=0_0|0_h0|w0_0|w0_h0[mosaic]; [mosaic] hwupload_cuda' " +
# "-c:v hevc_nvenc -preset slow -rc vbr_hq -b:v 20M -maxrate:v 30M -c:a aac -b:a 240k {}.mp4".format(output))
print("Running command: {}".format(command))
try:
output = sp.check_output(command, shell=True)
except sp.CalledProcessError as e:
print("Error with video {}. Proceeding to the next. Actual error was:")
print(e)
print("Output was: {}".format(output))
| 0 | 0 | 0 |
a2daa5db42366859b5b2ed437252897f3257d420 | 5,583 | py | Python | amazon_kclpy/v3/processor.py | rconroy293/amazon-kinesis-client-python | 66659655e31cec25ca0cc76c397478bdd5bcfcc8 | [
"Apache-2.0"
] | 338 | 2015-01-08T00:39:31.000Z | 2022-03-28T07:17:27.000Z | amazon_kclpy/v3/processor.py | rconroy293/amazon-kinesis-client-python | 66659655e31cec25ca0cc76c397478bdd5bcfcc8 | [
"Apache-2.0"
] | 110 | 2015-01-06T01:22:16.000Z | 2022-03-28T07:26:07.000Z | amazon_kclpy/v3/processor.py | rconroy293/amazon-kinesis-client-python | 66659655e31cec25ca0cc76c397478bdd5bcfcc8 | [
"Apache-2.0"
] | 221 | 2015-01-05T10:56:45.000Z | 2022-02-23T15:40:21.000Z | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import abc
from amazon_kclpy.messages import ShutdownInput
class RecordProcessorBase(object):
"""
Base class for implementing a record processor. Each RecordProcessor processes a single shard in a stream.
The record processor represents a lifecycle where it will be initialized, possibly process records, and
finally be terminated.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def initialize(self, initialize_input):
"""
Called once by a the KCL to allow the record processor to configure itself before starting to process records.
:param amazon_kclpy.messages.InitializeInput initialize_input: Information about the
initialization request for the record processor
"""
raise NotImplementedError
@abc.abstractmethod
def process_records(self, process_records_input):
"""
This is called whenever records are received. The method will be provided the batch of records that were
received. A checkpointer is also supplied that allows the application to checkpoint its progress within the
shard.
:param amazon_kclpy.messages.ProcessRecordsInput process_records_input: the records, metadata about the
records, and a checkpointer.
"""
raise NotImplementedError
@abc.abstractmethod
def lease_lost(self, lease_lost_input):
"""
This is called whenever the record processor has lost the lease. After this returns the record processor will
be shutdown. Additionally once a lease has been lost checkpointing is no longer possible.
:param amazon_kclpy.messages.LeaseLostInput lease_lost_input: information about the lease loss (currently empty)
"""
raise NotImplementedError
@abc.abstractmethod
def shard_ended(self, shard_ended_input):
"""
This is called whenever the record processor has reached the end of the shard. The record processor needs to
checkpoint to notify the KCL that it's ok to start processing the child shard(s). Failing to checkpoint will
trigger a retry of the shard end
:param amazon_kclpy.messages.ShardEndedInput shard_ended_input: information about reaching the end of the shard.
"""
raise NotImplementedError
@abc.abstractmethod
def shutdown_requested(self, shutdown_requested_input):
"""
Called when the parent process is preparing to shutdown. This gives the record processor one more chance to
checkpoint before its lease will be released.
:param amazon_kclpy.messages.ShutdownRequestedInput shutdown_requested_input:
Information related to shutdown requested including the checkpointer.
"""
raise NotImplementedError
version = 3
class V2toV3Processor(RecordProcessorBase):
"""
Provides a bridge between the new v2 RecordProcessorBase, and the original RecordProcessorBase.
This handles the conversion of the new input types to the older expected forms. This normally shouldn't be used
directly by record processors, since it's just a compatibility layer.
The delegate should be a :py:class:`amazon_kclpy.kcl.RecordProcessorBase`:
"""
def __init__(self, delegate):
"""
Creates a new V2 to V3 record processor.
:param amazon_kclpy.kcl.v2.RecordProcessorBase delegate: the delegate where requests will be forwarded to
"""
self.delegate = delegate
def initialize(self, initialize_input):
"""
Initializes the record processor
:param amazon_kclpy.messages.InitializeInput initialize_input: the initialization request
:return: None
"""
self.delegate.initialize(initialize_input)
def process_records(self, process_records_input):
"""
Expands the requests, and hands it off to the delegate for processing
:param amazon_kclpy.messages.ProcessRecordsInput process_records_input: information about the records
to process
:return: None
"""
self.delegate.process_records(process_records_input)
def lease_lost(self, lease_lost_input):
"""
Translates the lease lost call to the older shutdown/shutdown input style that was used. In a special case the
checkpointer will not be set in this call, which is essentially fine as checkpointing would fail anyway
:param amazon_kclpy.messages.LeaseLostInput lease_lost_input: information about the lease loss
(currently this is empty)
:return: None
"""
self.delegate.shutdown(ShutdownInput.zombie())
def shard_ended(self, shard_ended_input):
"""
Translates the shard end message to a shutdown input with a reason of TERMINATE and the checkpointer
:param amazon_kclpy.messages.ShardEndedInput shard_ended_input: information, and checkpoint for the end of the
shard.
:return: None
"""
self.delegate.shutdown(ShutdownInput.terminate(shard_ended_input.checkpointer))
def shutdown_requested(self, shutdown_requested_input):
"""
Sends the shutdown request to the delegate
:param amazon_kclpy.messages.ShutdownRequested shutdown_requested_input: information related to the record processor shutdown
:return: None
"""
self.delegate.shutdown_requested(shutdown_requested_input)
| 39.595745 | 133 | 0.713057 | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import abc
from amazon_kclpy.messages import ShutdownInput
class RecordProcessorBase(object):
"""
Base class for implementing a record processor. Each RecordProcessor processes a single shard in a stream.
The record processor represents a lifecycle where it will be initialized, possibly process records, and
finally be terminated.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def initialize(self, initialize_input):
"""
Called once by a the KCL to allow the record processor to configure itself before starting to process records.
:param amazon_kclpy.messages.InitializeInput initialize_input: Information about the
initialization request for the record processor
"""
raise NotImplementedError
@abc.abstractmethod
def process_records(self, process_records_input):
"""
This is called whenever records are received. The method will be provided the batch of records that were
received. A checkpointer is also supplied that allows the application to checkpoint its progress within the
shard.
:param amazon_kclpy.messages.ProcessRecordsInput process_records_input: the records, metadata about the
records, and a checkpointer.
"""
raise NotImplementedError
@abc.abstractmethod
def lease_lost(self, lease_lost_input):
"""
This is called whenever the record processor has lost the lease. After this returns the record processor will
be shutdown. Additionally once a lease has been lost checkpointing is no longer possible.
:param amazon_kclpy.messages.LeaseLostInput lease_lost_input: information about the lease loss (currently empty)
"""
raise NotImplementedError
@abc.abstractmethod
def shard_ended(self, shard_ended_input):
"""
This is called whenever the record processor has reached the end of the shard. The record processor needs to
checkpoint to notify the KCL that it's ok to start processing the child shard(s). Failing to checkpoint will
trigger a retry of the shard end
:param amazon_kclpy.messages.ShardEndedInput shard_ended_input: information about reaching the end of the shard.
"""
raise NotImplementedError
@abc.abstractmethod
def shutdown_requested(self, shutdown_requested_input):
"""
Called when the parent process is preparing to shutdown. This gives the record processor one more chance to
checkpoint before its lease will be released.
:param amazon_kclpy.messages.ShutdownRequestedInput shutdown_requested_input:
Information related to shutdown requested including the checkpointer.
"""
raise NotImplementedError
version = 3
class V2toV3Processor(RecordProcessorBase):
"""
Provides a bridge between the new v2 RecordProcessorBase, and the original RecordProcessorBase.
This handles the conversion of the new input types to the older expected forms. This normally shouldn't be used
directly by record processors, since it's just a compatibility layer.
The delegate should be a :py:class:`amazon_kclpy.kcl.RecordProcessorBase`:
"""
def __init__(self, delegate):
"""
Creates a new V2 to V3 record processor.
:param amazon_kclpy.kcl.v2.RecordProcessorBase delegate: the delegate where requests will be forwarded to
"""
self.delegate = delegate
def initialize(self, initialize_input):
"""
Initializes the record processor
:param amazon_kclpy.messages.InitializeInput initialize_input: the initialization request
:return: None
"""
self.delegate.initialize(initialize_input)
def process_records(self, process_records_input):
"""
Expands the requests, and hands it off to the delegate for processing
:param amazon_kclpy.messages.ProcessRecordsInput process_records_input: information about the records
to process
:return: None
"""
self.delegate.process_records(process_records_input)
def lease_lost(self, lease_lost_input):
"""
Translates the lease lost call to the older shutdown/shutdown input style that was used. In a special case the
checkpointer will not be set in this call, which is essentially fine as checkpointing would fail anyway
:param amazon_kclpy.messages.LeaseLostInput lease_lost_input: information about the lease loss
(currently this is empty)
:return: None
"""
self.delegate.shutdown(ShutdownInput.zombie())
def shard_ended(self, shard_ended_input):
"""
Translates the shard end message to a shutdown input with a reason of TERMINATE and the checkpointer
:param amazon_kclpy.messages.ShardEndedInput shard_ended_input: information, and checkpoint for the end of the
shard.
:return: None
"""
self.delegate.shutdown(ShutdownInput.terminate(shard_ended_input.checkpointer))
def shutdown_requested(self, shutdown_requested_input):
"""
Sends the shutdown request to the delegate
:param amazon_kclpy.messages.ShutdownRequested shutdown_requested_input: information related to the record processor shutdown
:return: None
"""
self.delegate.shutdown_requested(shutdown_requested_input)
| 0 | 0 | 0 |
b7164864904264ede69eac75cce43c9e8b2f5c2e | 15,534 | py | Python | switch_mod/trans_build.py | mseaborn/switch_py | a1cbf924209268e0397ae457227b24fac3f29779 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | switch_mod/trans_build.py | mseaborn/switch_py | a1cbf924209268e0397ae457227b24fac3f29779 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | switch_mod/trans_build.py | mseaborn/switch_py | a1cbf924209268e0397ae457227b24fac3f29779 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright 2015 The Switch Authors. All rights reserved.
# Licensed under the Apache License, Version 2, which is in the LICENSE file.
"""
Defines model components to describe transmission build-outs for the
SWITCH-Pyomo model.
SYNOPSIS
>>> from switch_mod.utilities import define_AbstractModel
>>> model = define_AbstractModel(
... 'timescales', 'financials', 'load_zones', 'trans_build')
>>> instance = model.load_inputs(inputs_dir='test_dat')
"""
import os
from pyomo.environ import *
from financials import capital_recovery_factor as crf
def define_components(mod):
"""
Adds components to a Pyomo abstract model object to describe bulk
transmission of an electric grid. This includes parameters, build
decisions and constraints. Unless otherwise stated, all power
capacity is specified in units of MW and all sets and parameters are
mandatory.
TRANSMISSION_LINES is the complete set of transmission pathways
connecting load zones. Each member of this set is a one dimensional
identifier such as "A-B". This set has no regard for directionality
of transmisison lines and will generate an error if you specify two
lines that move in opposite directions such as (A to B) and (B to
A). Another derived set - TRANS_LINES_DIRECTIONAL - stores
directional information. Transmission may be abbreviated as trans or
tx in parameter names or indexes.
trans_lz1[tx] and trans_lz2[tx] specify the load zones at either end
of a transmission line. The order of 1 and 2 is unimportant, but you
are encouraged to be consistent to simplify merging information back
into external databases.
trans_dbid[tx in TRANSMISSION_LINES] is an external database
identifier for each transmission line. This is an optional parameter
than defaults to the identifier of the transmission line.
trans_length_km[tx in TRANSMISSION_LINES] is the length of each
transmission line in kilometers.
trans_efficiency[tx in TRANSMISSION_LINES] is the proportion of
energy sent down a line that is delivered. If 2 percent of energy
sent down a line is lost, this value would be set to 0.98.
trans_new_build_allowed[tx in TRANSMISSION_LINES] is a binary value
indicating whether new transmission build-outs are allowed along a
transmission line. This optional parameter defaults to True.
TRANS_BUILD_YEARS is the set of transmission lines and years in
which they have been or could be built. This set includes past and
potential future builds. All future builds must come online in the
first year of an investment period. This set is composed of two
elements with members: (tx, build_year). For existing transmission
where the build years are not known, build_year is set to 'Legacy'.
EXISTING_TRANS_BLD_YRS is a subset of TRANS_BUILD_YEARS that lists
builds that happened before the first investment period. For most
datasets the build year is unknown, so is it always set to 'Legacy'.
existing_trans_cap[tx in TRANSMISSION_LINES] is a parameter that
describes how many MW of capacity has been installed before the
start of the study.
NEW_TRANS_BLD_YRS is a subset of TRANS_BUILD_YEARS that describes
potential builds.
BuildTrans[(tx, bld_yr) in TRANS_BUILD_YEARS] is a decision variable
that describes the transfer capacity in MW installed on a cooridor
in a given build year. For existing builds, this variable is locked
to the existing capacity.
TransCapacity[(tx, bld_yr) in TRANS_BUILD_YEARS] is an expression
that returns the total nameplate transfer capacity of a transmission
line in a given period. This is the sum of existing and newly-build
capacity.
trans_derating_factor[tx in TRANSMISSION_LINES] is an overall
derating factor for each transmission line that can reflect forced
outage rates, stability or contingency limitations. This parameter
is optional and defaults to 0.
TransCapacityAvailable[(tx, bld_yr) in TRANS_BUILD_YEARS] is an
expression that returns the available transfer capacity of a
transmission line in a given period, taking into account the
nameplate capacity and derating factor.
trans_terrain_multiplier[tx in TRANSMISSION_LINES] is
a cost adjuster applied to each transmission line that reflects the
additional costs that may be incurred for traversing that specific
terrain. Crossing mountains or cities will be more expensive than
crossing plains. This parameter is optional and defaults to 1. This
parameter should be in the range of 0.5 to 3.
trans_capital_cost_per_mw_km describes the generic costs of building
new transmission in units of $BASE_YEAR per MW transfer capacity per
km. This is optional and defaults to 1000.
trans_lifetime_yrs is the number of years in which a capital
construction loan for a new transmission line is repaid. This
optional parameter defaults to 20 years based on 2009 WREZ
transmission model transmission data. At the end of this time,
we assume transmission lines will be rebuilt at the same cost.
trans_fixed_o_m_fraction is describes the fixed Operations and
Maintenance costs as a fraction of capital costs. This optional
parameter defaults to 0.03 based on 2009 WREZ transmission model
transmission data costs for existing transmission maintenance.
trans_cost_hourly[tx TRANSMISSION_LINES] is the cost of building
transmission lines in units of $BASE_YEAR / MW- transfer-capacity /
hour. This derived parameter is based on the total annualized
capital and fixed O&M costs, then divides that by hours per year to
determine the portion of costs incurred hourly.
TRANS_DIRECTIONAL is a derived set of directional paths that
electricity can flow along transmission lines. Each element of this
set is a two-dimensional entry that describes the origin and
destination of the flow: (load_zone_from, load_zone_to). Every
transmission line will generate two entries in this set. Members of
this set are abbreviated as trans_d where possible, but may be
abbreviated as tx in situations where brevity is important and it is
unlikely to be confused with the overall transmission line.
trans_d_line[trans_d] is the transmission line associated with this
directional path.
PERIOD_RELEVANT_TRANS_BUILDS[p in PERIODS] is an indexed set that
describes which transmission builds will be operational in a given
period. Currently, transmission lines are kept online indefinitely,
with parts being replaced as they wear out.
PERIOD_RELEVANT_TRANS_BUILDS[p] will return a subset of (tx, bld_yr)
in TRANS_BUILD_YEARS.
--- Delayed implementation ---
is_dc_line ... Do I even need to implement this?
--- NOTES ---
The cost stream over time for transmission lines differs from the
SWITCH-WECC model. The SWITCH-WECC model assumed new transmission
had a financial lifetime of 20 years, which was the length of the
loan term. During this time, fixed operations & maintenance costs
were also incurred annually and these were estimated to be 3 percent
of the initial capital costs. These fixed O&M costs were obtained
from the 2009 WREZ transmission model transmission data costs for
existing transmission maintenance .. most of those lines were old
and their capital loans had been paid off, so the O&M were the costs
of keeping them operational. SWITCH-WECC basically assumed the lines
could be kept online indefinitely with that O&M budget, with
components of the lines being replaced as needed. This payment
schedule and lifetimes was assumed to hold for both existing and new
lines. This made the annual costs change over time, which could
create edge effects near the end of the study period. SWITCH-WECC
had different cost assumptions for local T&D; capital expenses and
fixed O&M expenses were rolled in together, and those were assumed
to continue indefinitely. This basically assumed that local T&D would
be replaced at the end of its financial lifetime.
SWITCH-Pyomo treats all transmission and distribution (long-
distance or local) the same. Any capacity that is built will be kept
online indefinitely. At the end of its financial lifetime, existing
capacity will be retired and rebuilt, so the annual cost of a line
upgrade will remain constant in every future year.
"""
mod.TRANSMISSION_LINES = Set()
mod.trans_lz1 = Param(mod.TRANSMISSION_LINES, within=mod.LOAD_ZONES)
mod.trans_lz2 = Param(mod.TRANSMISSION_LINES, within=mod.LOAD_ZONES)
mod.min_data_check('TRANSMISSION_LINES', 'trans_lz1', 'trans_lz2')
mod.trans_dbid = Param(mod.TRANSMISSION_LINES, default=lambda m, tx: tx)
mod.trans_length_km = Param(mod.TRANSMISSION_LINES, within=PositiveReals)
mod.trans_efficiency = Param(
mod.TRANSMISSION_LINES,
within=PositiveReals,
validate=lambda m, val, tx: val <= 1)
mod.EXISTING_TRANS_BLD_YRS = Set(
dimen=2,
initialize=lambda m: set(
(tx, 'Legacy') for tx in m.TRANSMISSION_LINES))
mod.existing_trans_cap = Param(
mod.TRANSMISSION_LINES,
within=PositiveReals)
mod.min_data_check(
'trans_length_km', 'trans_efficiency', 'EXISTING_TRANS_BLD_YRS',
'existing_trans_cap')
mod.trans_new_build_allowed = Param(
mod.TRANSMISSION_LINES, within=Boolean, default=True)
mod.NEW_TRANS_BLD_YRS = Set(
dimen=2,
initialize=lambda m: m.TRANSMISSION_LINES * m.PERIODS,
filter=lambda m, tx, p: m.trans_new_build_allowed[tx])
mod.TRANS_BUILD_YEARS = Set(
dimen=2,
initialize=lambda m: m.EXISTING_TRANS_BLD_YRS | m.NEW_TRANS_BLD_YRS)
mod.PERIOD_RELEVANT_TRANS_BUILDS = Set(
mod.PERIODS,
within=mod.TRANS_BUILD_YEARS,
initialize=lambda m, p: set(
(tx, bld_yr) for (tx, bld_yr) in m.TRANS_BUILD_YEARS
if bld_yr <= p))
mod.BuildTrans = Var(
mod.TRANS_BUILD_YEARS,
within=NonNegativeReals,
bounds=bounds_BuildTrans)
mod.TransCapacity = Expression(
mod.TRANSMISSION_LINES, mod.PERIODS,
initialize=lambda m, tx, period: sum(
m.BuildTrans[tx, bld_yr]
for (tx2, bld_yr) in m.TRANS_BUILD_YEARS
if tx2 == tx and (bld_yr == 'Legacy' or bld_yr <= period)))
mod.trans_derating_factor = Param(
mod.TRANSMISSION_LINES,
within=NonNegativeReals,
default=0,
validate=lambda m, val, tx: val <= 1)
mod.TransCapacityAvailable = Expression(
mod.TRANSMISSION_LINES, mod.PERIODS,
initialize=lambda m, tx, period: (
m.TransCapacity[tx, period] * m.trans_derating_factor[tx]))
mod.trans_terrain_multiplier = Param(
mod.TRANSMISSION_LINES,
within=Reals,
default=1,
validate=lambda m, val, tx: val >= 0.5 and val <= 3)
mod.trans_capital_cost_per_mw_km = Param(
within=PositiveReals,
default=1000)
mod.trans_lifetime_yrs = Param(
within=PositiveReals,
default=20)
mod.trans_fixed_o_m_fraction = Param(
within=PositiveReals,
default=0.03)
# Total annaul fixed costs for building new transmission lines...
# Multiply capital costs by capital recover factor to get annual
# payments. Add annual fixed O&M that are expressed as a fraction of
# overnight costs.
mod.trans_cost_annual = Param(
mod.TRANSMISSION_LINES,
within=PositiveReals,
initialize=lambda m, tx: (
m.trans_capital_cost_per_mw_km * m.trans_terrain_multiplier[tx] *
(crf(m.interest_rate, m.trans_lifetime_yrs) +
m.trans_fixed_o_m_fraction)))
# An expression to summarize annual costs for the objective
# function. Units should be total annual future costs in $base_year
# real dollars. The objective function will convert these to
# base_year Net Present Value in $base_year real dollars.
mod.Trans_Fixed_Costs_Annual = Expression(
mod.PERIODS,
initialize=lambda m, p: sum(
m.BuildTrans[tx, bld_yr] * m.trans_cost_annual[tx]
for (tx, bld_yr) in m.PERIOD_RELEVANT_TRANS_BUILDS[p]))
mod.cost_components_annual.append('Trans_Fixed_Costs_Annual')
mod.TRANS_DIRECTIONAL = Set(
dimen=2,
initialize=init_TRANS_DIRECTIONAL)
mod.trans_d_line = Param(
mod.TRANS_DIRECTIONAL,
within=mod.TRANSMISSION_LINES,
initialize=init_trans_d_line)
def load_inputs(mod, switch_data, inputs_dir):
"""
Import data related to transmission builds. The following files are
expected in the input directory:
transmission_lines.tab
TRANSMISSION_LINE, trans_lz1, trans_lz2, trans_length_km,
trans_efficiency, existing_trans_cap
The next files are optional. If they are not included or if any rows
are missing, those parameters will be set to default values as
described in documentation. If you only want to override some
columns and not others in trans_optional_params, put a dot . in the
columns that you don't want to override.
trans_optional_params.tab
TRANSMISSION_LINE, trans_dbid, trans_derating_factor,
trans_terrain_multiplier, trans_new_build_allowed
Note that the next file is formatted as .dat, not as .tab.
trans_params.dat
trans_capital_cost_per_mw_km, trans_lifetime_yrs,
trans_fixed_o_m_fraction, distribution_losses
"""
switch_data.load(
filename=os.path.join(inputs_dir, 'transmission_lines.tab'),
select=('TRANSMISSION_LINE', 'trans_lz1', 'trans_lz2',
'trans_length_km', 'trans_efficiency', 'existing_trans_cap'),
index=mod.TRANSMISSION_LINES,
param=(mod.trans_lz1, mod.trans_lz2, mod.trans_length_km,
mod.trans_efficiency, mod.existing_trans_cap))
trans_optional_params_path = os.path.join(
inputs_dir, 'trans_optional_params.tab')
if os.path.isfile(trans_optional_params_path):
switch_data.load(
filename=trans_optional_params_path,
select=('TRANSMISSION_LINE', 'trans_dbid', 'trans_derating_factor',
'trans_terrain_multiplier', 'trans_new_build_allowed'),
param=(mod.trans_dbid, mod.trans_derating_factor,
mod.trans_terrain_multiplier, mod.trans_new_build_allowed))
trans_params_path = os.path.join(inputs_dir, 'trans_params.dat')
if os.path.isfile(trans_params_path):
switch_data.load(filename=trans_params_path)
| 45.026087 | 79 | 0.723574 | # Copyright 2015 The Switch Authors. All rights reserved.
# Licensed under the Apache License, Version 2, which is in the LICENSE file.
"""
Defines model components to describe transmission build-outs for the
SWITCH-Pyomo model.
SYNOPSIS
>>> from switch_mod.utilities import define_AbstractModel
>>> model = define_AbstractModel(
... 'timescales', 'financials', 'load_zones', 'trans_build')
>>> instance = model.load_inputs(inputs_dir='test_dat')
"""
import os
from pyomo.environ import *
from financials import capital_recovery_factor as crf
def define_components(mod):
"""
Adds components to a Pyomo abstract model object to describe bulk
transmission of an electric grid. This includes parameters, build
decisions and constraints. Unless otherwise stated, all power
capacity is specified in units of MW and all sets and parameters are
mandatory.
TRANSMISSION_LINES is the complete set of transmission pathways
connecting load zones. Each member of this set is a one dimensional
identifier such as "A-B". This set has no regard for directionality
of transmisison lines and will generate an error if you specify two
lines that move in opposite directions such as (A to B) and (B to
A). Another derived set - TRANS_LINES_DIRECTIONAL - stores
directional information. Transmission may be abbreviated as trans or
tx in parameter names or indexes.
trans_lz1[tx] and trans_lz2[tx] specify the load zones at either end
of a transmission line. The order of 1 and 2 is unimportant, but you
are encouraged to be consistent to simplify merging information back
into external databases.
trans_dbid[tx in TRANSMISSION_LINES] is an external database
identifier for each transmission line. This is an optional parameter
than defaults to the identifier of the transmission line.
trans_length_km[tx in TRANSMISSION_LINES] is the length of each
transmission line in kilometers.
trans_efficiency[tx in TRANSMISSION_LINES] is the proportion of
energy sent down a line that is delivered. If 2 percent of energy
sent down a line is lost, this value would be set to 0.98.
trans_new_build_allowed[tx in TRANSMISSION_LINES] is a binary value
indicating whether new transmission build-outs are allowed along a
transmission line. This optional parameter defaults to True.
TRANS_BUILD_YEARS is the set of transmission lines and years in
which they have been or could be built. This set includes past and
potential future builds. All future builds must come online in the
first year of an investment period. This set is composed of two
elements with members: (tx, build_year). For existing transmission
where the build years are not known, build_year is set to 'Legacy'.
EXISTING_TRANS_BLD_YRS is a subset of TRANS_BUILD_YEARS that lists
builds that happened before the first investment period. For most
datasets the build year is unknown, so is it always set to 'Legacy'.
existing_trans_cap[tx in TRANSMISSION_LINES] is a parameter that
describes how many MW of capacity has been installed before the
start of the study.
NEW_TRANS_BLD_YRS is a subset of TRANS_BUILD_YEARS that describes
potential builds.
BuildTrans[(tx, bld_yr) in TRANS_BUILD_YEARS] is a decision variable
that describes the transfer capacity in MW installed on a cooridor
in a given build year. For existing builds, this variable is locked
to the existing capacity.
TransCapacity[(tx, bld_yr) in TRANS_BUILD_YEARS] is an expression
that returns the total nameplate transfer capacity of a transmission
line in a given period. This is the sum of existing and newly-build
capacity.
trans_derating_factor[tx in TRANSMISSION_LINES] is an overall
derating factor for each transmission line that can reflect forced
outage rates, stability or contingency limitations. This parameter
is optional and defaults to 0.
TransCapacityAvailable[(tx, bld_yr) in TRANS_BUILD_YEARS] is an
expression that returns the available transfer capacity of a
transmission line in a given period, taking into account the
nameplate capacity and derating factor.
trans_terrain_multiplier[tx in TRANSMISSION_LINES] is
a cost adjuster applied to each transmission line that reflects the
additional costs that may be incurred for traversing that specific
terrain. Crossing mountains or cities will be more expensive than
crossing plains. This parameter is optional and defaults to 1. This
parameter should be in the range of 0.5 to 3.
trans_capital_cost_per_mw_km describes the generic costs of building
new transmission in units of $BASE_YEAR per MW transfer capacity per
km. This is optional and defaults to 1000.
trans_lifetime_yrs is the number of years in which a capital
construction loan for a new transmission line is repaid. This
optional parameter defaults to 20 years based on 2009 WREZ
transmission model transmission data. At the end of this time,
we assume transmission lines will be rebuilt at the same cost.
trans_fixed_o_m_fraction is describes the fixed Operations and
Maintenance costs as a fraction of capital costs. This optional
parameter defaults to 0.03 based on 2009 WREZ transmission model
transmission data costs for existing transmission maintenance.
trans_cost_hourly[tx TRANSMISSION_LINES] is the cost of building
transmission lines in units of $BASE_YEAR / MW- transfer-capacity /
hour. This derived parameter is based on the total annualized
capital and fixed O&M costs, then divides that by hours per year to
determine the portion of costs incurred hourly.
TRANS_DIRECTIONAL is a derived set of directional paths that
electricity can flow along transmission lines. Each element of this
set is a two-dimensional entry that describes the origin and
destination of the flow: (load_zone_from, load_zone_to). Every
transmission line will generate two entries in this set. Members of
this set are abbreviated as trans_d where possible, but may be
abbreviated as tx in situations where brevity is important and it is
unlikely to be confused with the overall transmission line.
trans_d_line[trans_d] is the transmission line associated with this
directional path.
PERIOD_RELEVANT_TRANS_BUILDS[p in PERIODS] is an indexed set that
describes which transmission builds will be operational in a given
period. Currently, transmission lines are kept online indefinitely,
with parts being replaced as they wear out.
PERIOD_RELEVANT_TRANS_BUILDS[p] will return a subset of (tx, bld_yr)
in TRANS_BUILD_YEARS.
--- Delayed implementation ---
is_dc_line ... Do I even need to implement this?
--- NOTES ---
The cost stream over time for transmission lines differs from the
SWITCH-WECC model. The SWITCH-WECC model assumed new transmission
had a financial lifetime of 20 years, which was the length of the
loan term. During this time, fixed operations & maintenance costs
were also incurred annually and these were estimated to be 3 percent
of the initial capital costs. These fixed O&M costs were obtained
from the 2009 WREZ transmission model transmission data costs for
existing transmission maintenance .. most of those lines were old
and their capital loans had been paid off, so the O&M were the costs
of keeping them operational. SWITCH-WECC basically assumed the lines
could be kept online indefinitely with that O&M budget, with
components of the lines being replaced as needed. This payment
schedule and lifetimes was assumed to hold for both existing and new
lines. This made the annual costs change over time, which could
create edge effects near the end of the study period. SWITCH-WECC
had different cost assumptions for local T&D; capital expenses and
fixed O&M expenses were rolled in together, and those were assumed
to continue indefinitely. This basically assumed that local T&D would
be replaced at the end of its financial lifetime.
SWITCH-Pyomo treats all transmission and distribution (long-
distance or local) the same. Any capacity that is built will be kept
online indefinitely. At the end of its financial lifetime, existing
capacity will be retired and rebuilt, so the annual cost of a line
upgrade will remain constant in every future year.
"""
mod.TRANSMISSION_LINES = Set()
mod.trans_lz1 = Param(mod.TRANSMISSION_LINES, within=mod.LOAD_ZONES)
mod.trans_lz2 = Param(mod.TRANSMISSION_LINES, within=mod.LOAD_ZONES)
mod.min_data_check('TRANSMISSION_LINES', 'trans_lz1', 'trans_lz2')
mod.trans_dbid = Param(mod.TRANSMISSION_LINES, default=lambda m, tx: tx)
mod.trans_length_km = Param(mod.TRANSMISSION_LINES, within=PositiveReals)
mod.trans_efficiency = Param(
mod.TRANSMISSION_LINES,
within=PositiveReals,
validate=lambda m, val, tx: val <= 1)
mod.EXISTING_TRANS_BLD_YRS = Set(
dimen=2,
initialize=lambda m: set(
(tx, 'Legacy') for tx in m.TRANSMISSION_LINES))
mod.existing_trans_cap = Param(
mod.TRANSMISSION_LINES,
within=PositiveReals)
mod.min_data_check(
'trans_length_km', 'trans_efficiency', 'EXISTING_TRANS_BLD_YRS',
'existing_trans_cap')
mod.trans_new_build_allowed = Param(
mod.TRANSMISSION_LINES, within=Boolean, default=True)
mod.NEW_TRANS_BLD_YRS = Set(
dimen=2,
initialize=lambda m: m.TRANSMISSION_LINES * m.PERIODS,
filter=lambda m, tx, p: m.trans_new_build_allowed[tx])
mod.TRANS_BUILD_YEARS = Set(
dimen=2,
initialize=lambda m: m.EXISTING_TRANS_BLD_YRS | m.NEW_TRANS_BLD_YRS)
mod.PERIOD_RELEVANT_TRANS_BUILDS = Set(
mod.PERIODS,
within=mod.TRANS_BUILD_YEARS,
initialize=lambda m, p: set(
(tx, bld_yr) for (tx, bld_yr) in m.TRANS_BUILD_YEARS
if bld_yr <= p))
def bounds_BuildTrans(model, tx, bld_yr):
if((tx, bld_yr) in model.EXISTING_TRANS_BLD_YRS):
return (model.existing_trans_cap[tx],
model.existing_trans_cap[tx])
else:
return (0, None)
mod.BuildTrans = Var(
mod.TRANS_BUILD_YEARS,
within=NonNegativeReals,
bounds=bounds_BuildTrans)
mod.TransCapacity = Expression(
mod.TRANSMISSION_LINES, mod.PERIODS,
initialize=lambda m, tx, period: sum(
m.BuildTrans[tx, bld_yr]
for (tx2, bld_yr) in m.TRANS_BUILD_YEARS
if tx2 == tx and (bld_yr == 'Legacy' or bld_yr <= period)))
mod.trans_derating_factor = Param(
mod.TRANSMISSION_LINES,
within=NonNegativeReals,
default=0,
validate=lambda m, val, tx: val <= 1)
mod.TransCapacityAvailable = Expression(
mod.TRANSMISSION_LINES, mod.PERIODS,
initialize=lambda m, tx, period: (
m.TransCapacity[tx, period] * m.trans_derating_factor[tx]))
mod.trans_terrain_multiplier = Param(
mod.TRANSMISSION_LINES,
within=Reals,
default=1,
validate=lambda m, val, tx: val >= 0.5 and val <= 3)
mod.trans_capital_cost_per_mw_km = Param(
within=PositiveReals,
default=1000)
mod.trans_lifetime_yrs = Param(
within=PositiveReals,
default=20)
mod.trans_fixed_o_m_fraction = Param(
within=PositiveReals,
default=0.03)
# Total annaul fixed costs for building new transmission lines...
# Multiply capital costs by capital recover factor to get annual
# payments. Add annual fixed O&M that are expressed as a fraction of
# overnight costs.
mod.trans_cost_annual = Param(
mod.TRANSMISSION_LINES,
within=PositiveReals,
initialize=lambda m, tx: (
m.trans_capital_cost_per_mw_km * m.trans_terrain_multiplier[tx] *
(crf(m.interest_rate, m.trans_lifetime_yrs) +
m.trans_fixed_o_m_fraction)))
# An expression to summarize annual costs for the objective
# function. Units should be total annual future costs in $base_year
# real dollars. The objective function will convert these to
# base_year Net Present Value in $base_year real dollars.
mod.Trans_Fixed_Costs_Annual = Expression(
mod.PERIODS,
initialize=lambda m, p: sum(
m.BuildTrans[tx, bld_yr] * m.trans_cost_annual[tx]
for (tx, bld_yr) in m.PERIOD_RELEVANT_TRANS_BUILDS[p]))
mod.cost_components_annual.append('Trans_Fixed_Costs_Annual')
def init_TRANS_DIRECTIONAL(model):
tx_dir = set()
for tx in model.TRANSMISSION_LINES:
tx_dir.add((model.trans_lz1[tx], model.trans_lz2[tx]))
tx_dir.add((model.trans_lz2[tx], model.trans_lz1[tx]))
return tx_dir
mod.TRANS_DIRECTIONAL = Set(
dimen=2,
initialize=init_TRANS_DIRECTIONAL)
def init_trans_d_line(m, lz_from, lz_to):
for tx in m.TRANSMISSION_LINES:
if((m.trans_lz1[tx] == lz_from and m.trans_lz2[tx] == lz_to) or
(m.trans_lz2[tx] == lz_from and m.trans_lz1[tx] == lz_to)):
return tx
mod.trans_d_line = Param(
mod.TRANS_DIRECTIONAL,
within=mod.TRANSMISSION_LINES,
initialize=init_trans_d_line)
def load_inputs(mod, switch_data, inputs_dir):
"""
Import data related to transmission builds. The following files are
expected in the input directory:
transmission_lines.tab
TRANSMISSION_LINE, trans_lz1, trans_lz2, trans_length_km,
trans_efficiency, existing_trans_cap
The next files are optional. If they are not included or if any rows
are missing, those parameters will be set to default values as
described in documentation. If you only want to override some
columns and not others in trans_optional_params, put a dot . in the
columns that you don't want to override.
trans_optional_params.tab
TRANSMISSION_LINE, trans_dbid, trans_derating_factor,
trans_terrain_multiplier, trans_new_build_allowed
Note that the next file is formatted as .dat, not as .tab.
trans_params.dat
trans_capital_cost_per_mw_km, trans_lifetime_yrs,
trans_fixed_o_m_fraction, distribution_losses
"""
switch_data.load(
filename=os.path.join(inputs_dir, 'transmission_lines.tab'),
select=('TRANSMISSION_LINE', 'trans_lz1', 'trans_lz2',
'trans_length_km', 'trans_efficiency', 'existing_trans_cap'),
index=mod.TRANSMISSION_LINES,
param=(mod.trans_lz1, mod.trans_lz2, mod.trans_length_km,
mod.trans_efficiency, mod.existing_trans_cap))
trans_optional_params_path = os.path.join(
inputs_dir, 'trans_optional_params.tab')
if os.path.isfile(trans_optional_params_path):
switch_data.load(
filename=trans_optional_params_path,
select=('TRANSMISSION_LINE', 'trans_dbid', 'trans_derating_factor',
'trans_terrain_multiplier', 'trans_new_build_allowed'),
param=(mod.trans_dbid, mod.trans_derating_factor,
mod.trans_terrain_multiplier, mod.trans_new_build_allowed))
trans_params_path = os.path.join(inputs_dir, 'trans_params.dat')
if os.path.isfile(trans_params_path):
switch_data.load(filename=trans_params_path)
| 694 | 0 | 81 |
dfea48865d3a2ee9dcce9adeaac49ca447c94ea2 | 2,241 | py | Python | apt/tags/releasefile.py | javajawa/debian-repo-remux | b6626b268acd1743208d8a399f8c975316cfbc80 | [
"BSD-2-Clause"
] | 1 | 2019-10-31T08:36:29.000Z | 2019-10-31T08:36:29.000Z | apt/tags/releasefile.py | javajawa/debian-repo-remux | b6626b268acd1743208d8a399f8c975316cfbc80 | [
"BSD-2-Clause"
] | null | null | null | apt/tags/releasefile.py | javajawa/debian-repo-remux | b6626b268acd1743208d8a399f8c975316cfbc80 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Release File wrapper.
The ReleaseFile is the overall meta data file of a :class:apt.repo.Distribution
"""
from typing import Dict, Optional, List
from .tagblock import TagBlock
from .filehash import FileHash
class ReleaseFile(TagBlock):
"""
Release File wrapper.
The ReleaseFile is the overall meta data file of a :class:apt.repo.Distribution
"""
files: Dict[str, FileHash]
def components(self) -> List[str]:
"""
Returns the list of components as a python List
:return List[str]:
"""
return self['Components'].split(' ')
def architectures(self) -> List[str]:
"""
Returns the list of architectures as a python List
:return List[str]:
"""
return self['Architectures'].split(' ')
| 26.678571 | 99 | 0.586345 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Release File wrapper.
The ReleaseFile is the overall meta data file of a :class:apt.repo.Distribution
"""
from typing import Dict, Optional, List
from .tagblock import TagBlock
from .filehash import FileHash
class ReleaseFile(TagBlock):
"""
Release File wrapper.
The ReleaseFile is the overall meta data file of a :class:apt.repo.Distribution
"""
files: Dict[str, FileHash]
def __init__(self):
super(ReleaseFile, self).__init__()
self.magic.append('MD5Sum')
self.magic.append('SHA1')
self.magic.append('SHA256')
self.magic.append('SHA512')
self.files = {}
def __setitem__(self, key: str, value: str) -> None:
if key not in self.magic:
super(ReleaseFile, self).__setitem__(key, value)
return
for [checksum, size_s, filename] in [x.split() for x in value.split('\n')]:
size: int = int(size_s.strip(), 10)
checksum: str = checksum.strip()
filename: str = filename.strip()
if filename not in self.files:
self.files[filename] = FileHash(filename)
self.files[filename].size = size
self.files[filename].__setattr__(key, checksum)
def __getitem__(self, key: str) -> Optional[str]:
if key not in self.magic:
return super(ReleaseFile, self).__getitem__(key)
output = []
file_list = self.files.values()
file_list = sorted(file_list, key=lambda f: f['filename'])
for info in file_list:
if key not in info:
continue
output.append('{0} {1.size:>12} {1.filename}'.format(info.__getattribute__(key), info))
if not output:
return None
return '\n'.join(output)
def components(self) -> List[str]:
"""
Returns the list of components as a python List
:return List[str]:
"""
return self['Components'].split(' ')
def architectures(self) -> List[str]:
"""
Returns the list of architectures as a python List
:return List[str]:
"""
return self['Architectures'].split(' ')
| 1,313 | 0 | 81 |
6f702f2b39955f36a87608d00daaf7bead43074c | 2,357 | py | Python | examples/offboard_velocity_ned.py | thomas-watters-skydio/MAVSDK-Python | e0f9db072e802a06a792a4ed6c64ce75f900167f | [
"BSD-3-Clause"
] | null | null | null | examples/offboard_velocity_ned.py | thomas-watters-skydio/MAVSDK-Python | e0f9db072e802a06a792a4ed6c64ce75f900167f | [
"BSD-3-Clause"
] | null | null | null | examples/offboard_velocity_ned.py | thomas-watters-skydio/MAVSDK-Python | e0f9db072e802a06a792a4ed6c64ce75f900167f | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
import asyncio
from mavsdk import System
from mavsdk.offboard import OffboardError, VelocityNedYaw
async def run():
""" Does Offboard control using velocity NED coordinates. """
drone = System()
await drone.connect(system_address="udp://:14540")
print("Waiting for drone to connect...")
async for state in drone.core.connection_state():
if state.is_connected:
print(f"Drone discovered!")
break
print("-- Arming")
await drone.action.arm()
print("-- Setting initial setpoint")
await drone.offboard.set_velocity_ned(VelocityNedYaw(0.0, 0.0, 0.0, 0.0))
print("-- Starting offboard")
try:
await drone.offboard.start()
except OffboardError as error:
print(
f"Starting offboard mode failed with error code: \
{error._result.result}"
)
print("-- Disarming")
await drone.action.disarm()
return
print("-- Go up 2 m/s")
await drone.offboard.set_velocity_ned(VelocityNedYaw(0.0, 0.0, -2.0, 0.0))
await asyncio.sleep(4)
print("-- Go North 2 m/s, turn to face East")
await drone.offboard.set_velocity_ned(VelocityNedYaw(2.0, 0.0, 0.0, 90.0))
await asyncio.sleep(4)
print("-- Go South 2 m/s, turn to face West")
await drone.offboard.set_velocity_ned(VelocityNedYaw(-2.0, 0.0, 0.0, 270.0))
await asyncio.sleep(4)
print("-- Go West 2 m/s, turn to face East")
await drone.offboard.set_velocity_ned(VelocityNedYaw(0.0, -2.0, 0.0, 90.0))
await asyncio.sleep(4)
print("-- Go East 2 m/s")
await drone.offboard.set_velocity_ned(VelocityNedYaw(0.0, 2.0, 0.0, 90.0))
await asyncio.sleep(4)
print("-- Turn to face South")
await drone.offboard.set_velocity_ned(VelocityNedYaw(0.0, 0.0, 0.0, 180.0))
await asyncio.sleep(2)
print("-- Go down 1 m/s, turn to face North")
await drone.offboard.set_velocity_ned(VelocityNedYaw(0.0, 0.0, 1.0, 0.0))
await asyncio.sleep(4)
print("-- Stopping offboard")
try:
await drone.offboard.stop()
except OffboardError as error:
print(
f"Stopping offboard mode failed with error code: \
{error._result.result}"
)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(run())
| 29.098765 | 80 | 0.637675 | #!/usr/bin/env python3
import asyncio
from mavsdk import System
from mavsdk.offboard import OffboardError, VelocityNedYaw
async def run():
""" Does Offboard control using velocity NED coordinates. """
drone = System()
await drone.connect(system_address="udp://:14540")
print("Waiting for drone to connect...")
async for state in drone.core.connection_state():
if state.is_connected:
print(f"Drone discovered!")
break
print("-- Arming")
await drone.action.arm()
print("-- Setting initial setpoint")
await drone.offboard.set_velocity_ned(VelocityNedYaw(0.0, 0.0, 0.0, 0.0))
print("-- Starting offboard")
try:
await drone.offboard.start()
except OffboardError as error:
print(
f"Starting offboard mode failed with error code: \
{error._result.result}"
)
print("-- Disarming")
await drone.action.disarm()
return
print("-- Go up 2 m/s")
await drone.offboard.set_velocity_ned(VelocityNedYaw(0.0, 0.0, -2.0, 0.0))
await asyncio.sleep(4)
print("-- Go North 2 m/s, turn to face East")
await drone.offboard.set_velocity_ned(VelocityNedYaw(2.0, 0.0, 0.0, 90.0))
await asyncio.sleep(4)
print("-- Go South 2 m/s, turn to face West")
await drone.offboard.set_velocity_ned(VelocityNedYaw(-2.0, 0.0, 0.0, 270.0))
await asyncio.sleep(4)
print("-- Go West 2 m/s, turn to face East")
await drone.offboard.set_velocity_ned(VelocityNedYaw(0.0, -2.0, 0.0, 90.0))
await asyncio.sleep(4)
print("-- Go East 2 m/s")
await drone.offboard.set_velocity_ned(VelocityNedYaw(0.0, 2.0, 0.0, 90.0))
await asyncio.sleep(4)
print("-- Turn to face South")
await drone.offboard.set_velocity_ned(VelocityNedYaw(0.0, 0.0, 0.0, 180.0))
await asyncio.sleep(2)
print("-- Go down 1 m/s, turn to face North")
await drone.offboard.set_velocity_ned(VelocityNedYaw(0.0, 0.0, 1.0, 0.0))
await asyncio.sleep(4)
print("-- Stopping offboard")
try:
await drone.offboard.stop()
except OffboardError as error:
print(
f"Stopping offboard mode failed with error code: \
{error._result.result}"
)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(run())
| 0 | 0 | 0 |
e58aabb445f12ba0189a44e8748879f37afd7e73 | 908 | py | Python | task021.py | lessunc/python-guanabara | 5c4c41eb46cc1742fdf36e3dc3c830a189344fad | [
"MIT"
] | 32 | 2018-12-09T00:44:20.000Z | 2022-03-11T19:28:53.000Z | task021.py | lessunc/python-guanabara | 5c4c41eb46cc1742fdf36e3dc3c830a189344fad | [
"MIT"
] | null | null | null | task021.py | lessunc/python-guanabara | 5c4c41eb46cc1742fdf36e3dc3c830a189344fad | [
"MIT"
] | 4 | 2019-01-21T08:04:29.000Z | 2020-06-01T14:27:15.000Z | #coding: utf-8
#----------------------------------------------------------------
# Um programa que abre e reproduz o áudio de um arquivo MP3.
#----------------------------------------------------------------
# Tocando um MP3 - Exercício #021
#----------------------------------------------------------------
import pygame, mutagen.mp3
opc = str(input('\n\033[35mDigite bora, para curtir um GreenDayzinho <3: \033[m')).lower()[0]
if opc == 'b':
#linha colorida com frase centralizada(not important)
print('\033[2;35;45m>>>{:^60}<<<\033[m'.format('BOM SOM'))
#instrução para parar com código de cor(opcional)
print('\033[35m>> Qualquer botão para parar <<\033[m\n')
mp3file = 'task021guns.mp3'
pygame.mixer.init(frequency=mutagen.mp3.MP3(mp3file).info.sample_rate)
pygame.mixer.music.load(mp3file)
pygame.mixer.music.play()
pygame.init()
pygame.event.wait()
print('Thanks for listening.')
| 32.428571 | 93 | 0.552863 | #coding: utf-8
#----------------------------------------------------------------
# Um programa que abre e reproduz o áudio de um arquivo MP3.
#----------------------------------------------------------------
# Tocando um MP3 - Exercício #021
#----------------------------------------------------------------
import pygame, mutagen.mp3
opc = str(input('\n\033[35mDigite bora, para curtir um GreenDayzinho <3: \033[m')).lower()[0]
if opc == 'b':
#linha colorida com frase centralizada(not important)
print('\033[2;35;45m>>>{:^60}<<<\033[m'.format('BOM SOM'))
#instrução para parar com código de cor(opcional)
print('\033[35m>> Qualquer botão para parar <<\033[m\n')
mp3file = 'task021guns.mp3'
pygame.mixer.init(frequency=mutagen.mp3.MP3(mp3file).info.sample_rate)
pygame.mixer.music.load(mp3file)
pygame.mixer.music.play()
pygame.init()
pygame.event.wait()
print('Thanks for listening.')
| 0 | 0 | 0 |
ecbf59aa4aedba19d0e29b87aaa47f03a9e44068 | 2,335 | py | Python | src/CircleClasses/CircleWithCentre.py | Lovely-XPP/tkzgeom | bf68e139dc05f759542d6611f4dc07f4f2727b92 | [
"MIT"
] | 41 | 2021-11-24T05:54:08.000Z | 2022-03-26T10:19:30.000Z | src/CircleClasses/CircleWithCentre.py | Lovely-XPP/tkzgeom | bf68e139dc05f759542d6611f4dc07f4f2727b92 | [
"MIT"
] | 1 | 2022-02-28T04:34:51.000Z | 2022-03-07T10:49:27.000Z | src/CircleClasses/CircleWithCentre.py | Lovely-XPP/tkzgeom | bf68e139dc05f759542d6611f4dc07f4f2727b92 | [
"MIT"
] | 10 | 2021-11-24T07:35:17.000Z | 2022-03-25T18:42:14.000Z | from Circle import Circle
from Item import Item
import Constant as c
from GeometryMath import sub, norm
| 37.66129 | 147 | 0.631263 | from Circle import Circle
from Item import Item
import Constant as c
from GeometryMath import sub, norm
class CircleWithCentre(Circle):
def __init__(self, item):
"""Construct CircleWithCentre."""
Circle.__init__(self, item)
self.item["sub_type"] = c.Circle.Definition.WITH_CENTRE
def tikzify(self):
return '\\tkzDrawCircle[%s](%s,%s)' % (self.tikzify_options(), self.item["definition"]["O"], self.item["definition"]["P"])
def recompute_canvas(self, items, window, width, height):
O = items[self.depends_on()[0]].get_canvas_coordinates()
P = items[self.depends_on()[1]].get_canvas_coordinates()
self.set_canvas_centre_xy(*O)
self.set_canvas_radius(norm(sub(O, P)))
def recompute_canvas_with_mouse(self, scene, x, y):
O = scene.project_data.items[scene.select_history.id_history[0]].get_canvas_coordinates()
P = x, y
return O, norm(sub(O, P))
def __str__(self):
return "Circle (%s) with centre %s and perimetric point %s" % (self.item["id"], self.item["definition"]["O"], self.item["definition"]["P"])
def definition_builder(self, data, items=None):
return dict(zip(["O", "P"], data))
def parse_into_definition(self, arguments, items):
# arguments length condition
if len(arguments) != 2:
return None
# all arguments are members of the regular expression for argument name
if not all(map(lambda x: self.name_pattern(x), arguments)):
return None
# all arguments are items that already exist
if not all(map(lambda x: x in items, arguments)):
return None
# the type of all arguments is of a certain type
if not all(map(lambda x: items[x].item["type"] == 'point', arguments)):
return None
# self-reference condition (self-reference is not permitted)
if self.get_id() in arguments:
return None
# condition for cross reference
for id in arguments:
deep_depends = items[id].deep_depends_on(items)
if self.get_id() in deep_depends:
return None
return self.definition_builder(arguments)
@staticmethod
def static_patterns():
return ["pp"]
def patterns(self):
return ["pp"]
| 1,791 | 416 | 23 |
ef9536dd514639f1215577b8b7f3ad663346be72 | 330 | py | Python | examples/effects/lens/barrel.py | SimLeek/displayarray | 64fe1e2094448d86d743536eedae0039ca339063 | [
"MIT"
] | 8 | 2019-11-01T19:14:36.000Z | 2021-08-18T17:55:43.000Z | examples/effects/lens/barrel.py | SimLeek/displayarray | 64fe1e2094448d86d743536eedae0039ca339063 | [
"MIT"
] | 12 | 2019-10-01T06:06:48.000Z | 2020-04-29T23:05:58.000Z | examples/effects/lens/barrel.py | SimLeek/displayarray | 64fe1e2094448d86d743536eedae0039ca339063 | [
"MIT"
] | 3 | 2018-04-03T01:29:21.000Z | 2019-06-27T02:52:34.000Z | from displayarray.effects import lens
from displayarray import display
from examples.videos import test_video
# Move the mouse to center the image, scroll to increase/decrease barrel, ctrl+scroll to increase/decrease zoom
m = lens.Barrel(use_bleed=False)
m.enable_mouse_control()
display(test_video, callbacks=m, blocking=True)
| 33 | 111 | 0.821212 | from displayarray.effects import lens
from displayarray import display
from examples.videos import test_video
# Move the mouse to center the image, scroll to increase/decrease barrel, ctrl+scroll to increase/decrease zoom
m = lens.Barrel(use_bleed=False)
m.enable_mouse_control()
display(test_video, callbacks=m, blocking=True)
| 0 | 0 | 0 |
4523ae03626671b16b64a7377c63efa8848ee712 | 4,164 | py | Python | pipda/expression.py | pwwang/dpipe | 4efafbb1b13f8a70cc692943473d716b66e9e947 | [
"MIT"
] | null | null | null | pipda/expression.py | pwwang/dpipe | 4efafbb1b13f8a70cc692943473d716b66e9e947 | [
"MIT"
] | null | null | null | pipda/expression.py | pwwang/dpipe | 4efafbb1b13f8a70cc692943473d716b66e9e947 | [
"MIT"
] | null | null | null | """Provides the abstract class Expression"""
from abc import ABC, abstractmethod
from functools import partialmethod
from typing import Any
from .context import ContextBase
class Expression(ABC):
"""The abstract Expression class"""
def __hash__(self) -> int:
"""Make it hashable"""
return hash(id(self))
def __getattr__(self, name: str) -> "Expression":
"""Whenever `expr.attr` is encountered,
return a ReferenceAttr object"""
# for dispatch
from .symbolic import ReferenceAttr
return ReferenceAttr(self, name)
def __getitem__(self, item: Any) -> "Expression":
"""Whenever `expr[item]` is encountered,
return a ReferenceAttr object"""
from .symbolic import ReferenceItem
return ReferenceItem(self, item)
def _op_handler(self, op: str, *args: Any, **kwargs: Any) -> "Expression":
"""Handle the operators"""
from .operator import Operator
return Operator.REGISTERED(op, (self, *args), kwargs)
def __rshift__(self, other):
"""Allow to use the right shift operator,"""
from .verb import Verb
if isinstance(other, Verb):
return Verb(
other._pipda_func,
(self, *other._pipda_args),
other._pipda_kwargs,
dataarg=False,
)
return self._op_handler("rshift", other)
# Make sure the operators connect all expressions into one
__add__ = partialmethod(_op_handler, "add")
__radd__ = partialmethod(_op_handler, "radd")
__sub__ = partialmethod(_op_handler, "sub")
__rsub__ = partialmethod(_op_handler, "rsub")
__mul__ = partialmethod(_op_handler, "mul")
__rmul__ = partialmethod(_op_handler, "rmul")
__matmul__ = partialmethod(_op_handler, "matmul")
__rmatmul__ = partialmethod(_op_handler, "rmatmul")
__truediv__ = partialmethod(_op_handler, "truediv")
__rtruediv__ = partialmethod(_op_handler, "rtruediv")
__floordiv__ = partialmethod(_op_handler, "floordiv")
__rfloordiv__ = partialmethod(_op_handler, "rfloordiv")
__mod__ = partialmethod(_op_handler, "mod")
__rmod__ = partialmethod(_op_handler, "rmod")
__lshift__ = partialmethod(_op_handler, "lshift")
__rlshift__ = partialmethod(_op_handler, "rlshift")
# __rshift__ = partialmethod(_op_handler, "rshift")
__rrshift__ = partialmethod(_op_handler, "rrshift")
__and__ = partialmethod(_op_handler, "and_")
__rand__ = partialmethod(_op_handler, "rand_")
__xor__ = partialmethod(_op_handler, "xor")
__rxor__ = partialmethod(_op_handler, "rxor")
__or__ = partialmethod(_op_handler, "or_")
__ror__ = partialmethod(_op_handler, "ror_")
__pow__ = partialmethod(_op_handler, "pow")
__rpow__ = partialmethod(_op_handler, "rpow")
# __contains__() is forced into bool
# __contains__ = partialmethod(_op_handler, 'contains')
__lt__ = partialmethod(_op_handler, "lt") # type: ignore
__le__ = partialmethod(_op_handler, "le")
__eq__ = partialmethod(_op_handler, "eq") # type: ignore
__ne__ = partialmethod(_op_handler, "ne") # type: ignore
__gt__ = partialmethod(_op_handler, "gt")
__ge__ = partialmethod(_op_handler, "ge")
__neg__ = partialmethod(_op_handler, "neg")
__pos__ = partialmethod(_op_handler, "pos")
__invert__ = partialmethod(_op_handler, "invert")
def __index__(self):
"""Allow Expression object to work as indexes"""
return None
def __iter__(self):
"""Forbiden iterating on Expression objects
If it is happening, probably wrong usage of functions/verbs
"""
raise TypeError(
"Expression object is not iterable.\n"
"If you are expecting the evaluated results of the object, try "
"using the piping syntax or writing it in a independent statement, "
"instead of an argument of a regular function call."
)
@abstractmethod
def _pipda_eval(
self,
data: Any,
context: ContextBase = None,
) -> Any:
"""Evaluate the expression using given data"""
| 36.208696 | 80 | 0.662104 | """Provides the abstract class Expression"""
from abc import ABC, abstractmethod
from functools import partialmethod
from typing import Any
from .context import ContextBase
class Expression(ABC):
"""The abstract Expression class"""
def __hash__(self) -> int:
"""Make it hashable"""
return hash(id(self))
def __getattr__(self, name: str) -> "Expression":
"""Whenever `expr.attr` is encountered,
return a ReferenceAttr object"""
# for dispatch
from .symbolic import ReferenceAttr
return ReferenceAttr(self, name)
def __getitem__(self, item: Any) -> "Expression":
"""Whenever `expr[item]` is encountered,
return a ReferenceAttr object"""
from .symbolic import ReferenceItem
return ReferenceItem(self, item)
def _op_handler(self, op: str, *args: Any, **kwargs: Any) -> "Expression":
"""Handle the operators"""
from .operator import Operator
return Operator.REGISTERED(op, (self, *args), kwargs)
def __rshift__(self, other):
"""Allow to use the right shift operator,"""
from .verb import Verb
if isinstance(other, Verb):
return Verb(
other._pipda_func,
(self, *other._pipda_args),
other._pipda_kwargs,
dataarg=False,
)
return self._op_handler("rshift", other)
# Make sure the operators connect all expressions into one
__add__ = partialmethod(_op_handler, "add")
__radd__ = partialmethod(_op_handler, "radd")
__sub__ = partialmethod(_op_handler, "sub")
__rsub__ = partialmethod(_op_handler, "rsub")
__mul__ = partialmethod(_op_handler, "mul")
__rmul__ = partialmethod(_op_handler, "rmul")
__matmul__ = partialmethod(_op_handler, "matmul")
__rmatmul__ = partialmethod(_op_handler, "rmatmul")
__truediv__ = partialmethod(_op_handler, "truediv")
__rtruediv__ = partialmethod(_op_handler, "rtruediv")
__floordiv__ = partialmethod(_op_handler, "floordiv")
__rfloordiv__ = partialmethod(_op_handler, "rfloordiv")
__mod__ = partialmethod(_op_handler, "mod")
__rmod__ = partialmethod(_op_handler, "rmod")
__lshift__ = partialmethod(_op_handler, "lshift")
__rlshift__ = partialmethod(_op_handler, "rlshift")
# __rshift__ = partialmethod(_op_handler, "rshift")
__rrshift__ = partialmethod(_op_handler, "rrshift")
__and__ = partialmethod(_op_handler, "and_")
__rand__ = partialmethod(_op_handler, "rand_")
__xor__ = partialmethod(_op_handler, "xor")
__rxor__ = partialmethod(_op_handler, "rxor")
__or__ = partialmethod(_op_handler, "or_")
__ror__ = partialmethod(_op_handler, "ror_")
__pow__ = partialmethod(_op_handler, "pow")
__rpow__ = partialmethod(_op_handler, "rpow")
# __contains__() is forced into bool
# __contains__ = partialmethod(_op_handler, 'contains')
__lt__ = partialmethod(_op_handler, "lt") # type: ignore
__le__ = partialmethod(_op_handler, "le")
__eq__ = partialmethod(_op_handler, "eq") # type: ignore
__ne__ = partialmethod(_op_handler, "ne") # type: ignore
__gt__ = partialmethod(_op_handler, "gt")
__ge__ = partialmethod(_op_handler, "ge")
__neg__ = partialmethod(_op_handler, "neg")
__pos__ = partialmethod(_op_handler, "pos")
__invert__ = partialmethod(_op_handler, "invert")
def __index__(self):
"""Allow Expression object to work as indexes"""
return None
def __iter__(self):
"""Forbiden iterating on Expression objects
If it is happening, probably wrong usage of functions/verbs
"""
raise TypeError(
"Expression object is not iterable.\n"
"If you are expecting the evaluated results of the object, try "
"using the piping syntax or writing it in a independent statement, "
"instead of an argument of a regular function call."
)
@abstractmethod
def _pipda_eval(
self,
data: Any,
context: ContextBase = None,
) -> Any:
"""Evaluate the expression using given data"""
| 0 | 0 | 0 |
69dff3a5f30dcf9c6e9ef143818675faacfafe6b | 179,701 | py | Python | ioflo/base/building.py | BradyHammond/ioflo | 177ac656d7c4ff801aebb0d8b401db365a5248ce | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 128 | 2015-01-14T12:26:56.000Z | 2021-11-06T07:09:29.000Z | ioflo/base/building.py | BradyHammond/ioflo | 177ac656d7c4ff801aebb0d8b401db365a5248ce | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 17 | 2015-01-28T18:26:50.000Z | 2020-11-19T22:08:06.000Z | ioflo/base/building.py | BradyHammond/ioflo | 177ac656d7c4ff801aebb0d8b401db365a5248ce | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 29 | 2015-01-27T23:28:31.000Z | 2021-05-04T16:37:30.000Z | """building.py build frameworks from mission files
"""
from __future__ import division
#print("module {0}".format(__name__))
import time
import re
import importlib
import os
from collections import deque
try:
from itertools import izip
except ImportError: #python 3 zip is same as izip
izip = zip
from ..aid.sixing import *
from ..aid.odicting import odict
from .globaling import *
from . import excepting
from . import registering
from . import storing
from . import housing
from . import acting
from . import poking
from . import needing
from . import goaling
from . import doing
from . import traiting
from . import fiating
from . import wanting
from . import completing
from . import tasking
from . import framing
from . import logging
from . import serving
from .. import trim
from ..aid.consoling import getConsole
console = getConsole()
from ..trim import exterior
def Convert2Num(text):
"""converts text to python type in order
Int, hex, Float, Complex
ValueError if can't
"""
#convert to number if possible
try:
value = int(text, 10)
return value
except ValueError as ex:
pass
try:
value = int(text, 16)
return value
except ValueError as ex:
pass
try:
value = float(text)
return value
except ValueError as ex:
pass
try:
value = complex(text)
return value
except ValueError as ex:
pass
raise ValueError("Expected Number got '{0}'".format(text))
# return None
def Convert2CoordNum(text):
"""converts text to python type in order
FracDeg, Int, hex, Float, Complex
ValueError if can't
"""
#convert to FracDeg Coord if possible
dm = REO_LatLonNE.findall(text) #returns list of tuples of groups [(deg,min)]
if dm:
deg = float(dm[0][0])
min_ = float(dm[0][1])
return (deg + min_/60.0)
dm = REO_LatLonSW.findall(text) #returns list of tuples of groups [(deg,min)]
if dm:
deg = float(dm[0][0])
min_ = float(dm[0][1])
return (-(deg + min_/60.0))
try:
return (Convert2Num(text))
except ValueError:
raise ValueError("Expected CoordPointNum got '{0}'".format(text))
def Convert2BoolCoordNum(text):
"""converts text to python type in order
None, Boolean, Int, Float, Complex
ValueError if can't
"""
#convert to None if possible
if text.lower() == 'none':
return None
#convert to boolean if possible
if text.lower() in ['true', 'yes']:
return (True)
if text.lower() in ['false', 'no']:
return (False)
try:
return (Convert2CoordNum(text))
except ValueError:
raise ValueError("Expected BoolCoordPointNum got '{0}'".format(text))
return None
def Convert2StrBoolCoordNum(text):
"""converts text to python type in order
Boolean, Int, Float, complex or double quoted string
ValueError if can't
Need goal wants unitary type not path or point
"""
if REO_Quoted.match(text): #text is double quoted string
return text.strip('"') #strip off quotes
if REO_QuotedSingle.match(text): #text is single quoted string
return text.strip("'") #strip off quotes
try:
return (Convert2BoolCoordNum(text))
except ValueError:
raise ValueError("Expected StrBoolCoordNum got '{0}'".format(text))
return None
def Convert2PointNum(text):
"""
Converts text to python type in order
Pxy, Pne,Pfs,Pxyz,Pned,Pfsb, Int, hex, Float, Complex
ValueError if can't
"""
# convert to on of the Point classes if possible
match = REO_PointXY.findall(text)
if match:
x, y = match[0]
return Pxy(x=float(x), y=float(y))
match = REO_PointNE.findall(text)
if match:
n, e = match[0]
return Pne(n=float(n), e=float(e))
match = REO_PointFS.findall(text)
if match:
f, s = match[0]
return Pfs(f=float(f), s=float(s))
match = REO_PointXYZ.findall(text)
if match:
x, y, z = match[0]
return Pxyz(x=float(x), y=float(y), z=float(z))
match = REO_PointNED.findall(text)
if match:
n, e, d = match[0]
return Pned(n=float(n), e=float(e), d=float(d))
match = REO_PointFSB.findall(text)
if match:
f, s, b = match[0]
return Pfsb(f=float(f), s=float(s), b=float(b))
try:
return (Convert2Num(text))
except ValueError:
raise ValueError("Expected PointNum got '{0}'".format(text))
def Convert2CoordPointNum(text):
"""converts text to python type in order
FracDeg, Int, hex, Float, Complex
ValueError if can't
"""
#convert to FracDeg Coord if possible
dm = REO_LatLonNE.findall(text) #returns list of tuples of groups [(deg,min)]
if dm:
deg = float(dm[0][0])
min_ = float(dm[0][1])
return (deg + min_/60.0)
dm = REO_LatLonSW.findall(text) #returns list of tuples of groups [(deg,min)]
if dm:
deg = float(dm[0][0])
min_ = float(dm[0][1])
return (-(deg + min_/60.0))
try:
return (Convert2PointNum(text))
except ValueError:
raise ValueError("Expected CoordPointNum got '{0}'".format(text))
def Convert2BoolCoordPointNum(text):
"""converts text to python type in order
None, Boolean, Int, Float, Complex
ValueError if can't
"""
#convert to None if possible
if text.lower() == 'none':
return None
#convert to boolean if possible
if text.lower() in ['true', 'yes']:
return (True)
if text.lower() in ['false', 'no']:
return (False)
try:
return (Convert2CoordPointNum(text))
except ValueError:
raise ValueError("Expected BoolCoordPointNum got '{0}'".format(text))
return None
def Convert2PathCoordPointNum(text):
"""converts text to python type in order
Boolean, Int, Float, Complex
ValueError if can't
"""
#convert to path string if possible
if REO_PathNode.match(text):
return (text)
try:
return (Convert2CoordPointNum(text))
except ValueError:
raise ValueError("Expected PathCoordPointNum got '{0}'".format(text))
return None
def Convert2BoolPathCoordPointNum(text):
"""converts text to python type in order
Boolean, Int, Float, Complex
ValueError if can't
"""
#convert to None if possible
if text.lower() == 'none':
return None
#convert to boolean if possible
if text.lower() in ['true', 'yes']:
return (True)
if text.lower() in ['false', 'no']:
return (False)
try:
return (Convert2PathCoordPointNum(text))
except ValueError:
raise ValueError("Expected PathBoolCoordPointNum got '{0}'".format(text))
return None
def Convert2StrBoolPathCoordPointNum(text):
"""converts text to python type in order
Boolean, Int, Float, complex or double quoted string
ValueError if can't
"""
if REO_Quoted.match(text): #text is double quoted string
return text.strip('"') #strip off quotes
if REO_QuotedSingle.match(text): #text is single quoted string
return text.strip("'") #strip off quotes
try:
return (Convert2BoolPathCoordPointNum(text))
except ValueError:
raise ValueError("Expected StrBoolPathCoordPointNum got '{0}'".format(text))
return None
def StripQuotes(text):
"""
Returns text with leading and following quotes (singe or double) stripped
off if any Otherwise return as is
"""
if REO_Quoted.match(text): #text is double quoted string
return text.strip('"') #strip off quotes
if REO_QuotedSingle.match(text): #text is single quoted string
return text.strip("'") #strip off quotes
return text
VerbList = ['load', 'house', 'init',
'server',
'logger', 'log', 'loggee',
'framer', 'first',
'frame', 'over', 'under', 'next', 'done', 'timeout', 'repeat',
'native', 'benter', 'enter', 'recur', 'exit', 'precur', 'renter', 'rexit',
'print', 'put', 'inc', 'copy', 'set',
'aux', 'rear', 'raze',
'go', 'let',
'do',
'bid', 'ready', 'start', 'stop', 'run', 'abort',
'use', 'flo', 'give', 'take' ]
#reserved tokens
Comparisons = ['==', '<', '<=', '>=', '>', '!=']
Connectives = ['to', 'by', 'with', 'from', 'per', 'for', 'cum', 'qua', 'via',
'as', 'at', 'in', 'of', 'on', 're', 'is',
'if', 'be', 'into', 'and', 'not', '+-', ]
Reserved = Connectives + Comparisons #concatenate to get reserved words
ReservedFrameNames = ['next', 'prev'] # frame names with special meaning as target of goto
class Builder(object):
"""
"""
def __init__(self, fileName='', mode=None, metas=None, preloads=None, behaviors=None):
"""
"""
self.fileName = fileName #initial name of file to start building from
self.mode = mode or []
self.metas = metas or []
self.preloads = preloads or []
self.behaviors = behaviors or []
self.files = [] #list of open file objects, appended to by load commands
self.counts = [] #list of linectr s for open file objects
self.houses = [] #list of houses
self.currentFile = None
self.currentCount = 0
self.currentHuman = '' # human friendly version of current line
self.currentMode = None # None is any
self.currentHouse = None
self.currentStore = None
self.currentLogger = None
self.currentLog = None
self.currentFramer = None
self.currentFrame = None # current frame
self.currentContext = NATIVE
def tokenize(self, line):
"""
Parse line and read and parse continuation lines if any and return tokens list.
"""
saveLines = []
saveLineViews = []
while line.endswith('\\\n'): # escaped newline continuation
line = line.rstrip()
saveLineViews.append("%04d %s" % (self.currentCount, line))
saveLines.append(line.rstrip('\\').strip())
line = self.currentFile.readline() #empty if end of file
self.currentCount += 1 #inc line counter
# process last line read as either only line or continuation line
line = line.rstrip()
saveLineViews.append("%04d %s" % (self.currentCount, line))
saveLines.append(line)
# join all saved into one line
lineView = "\n".join(saveLineViews)
line = " ".join(saveLines)
console.concise(lineView + '\n')
line = line.strip() #strips white space both ends
chunks = REO_Chunks.findall(line) # also chunks trailing comments
tokens = []
for chunk in chunks:
if chunk[0] == '#': # throw away chunk as comment
break
else:
tokens.append(chunk)
return tokens
def build(self, fileName='', mode=None, metas=None, preloads=None, behaviors=None):
"""
Allows building from multiple files. Essentially files list is stack of files
fileName is name of first file. Load commands in any files push (append) file onto files
until file completed loaded and then popped off
Each house's store is inited with the meta data in metas
"""
#overwrite default if truthy argument
if fileName:
self.fileName = fileName
if mode:
self.mode.extend[mode]
if metas:
self.metas.extend[metas]
if preloads:
self.preloads.extend[preloads]
if behaviors:
self.behaviors.extend[behaviors]
if self.behaviors: #import behavior package/module
for behavior in self.behaviors:
mod = importlib.import_module(behavior)
housing.House.Clear() #clear house registry
housing.ClearRegistries() #clear all the other registries
try: #IOError
self.fileName = os.path.abspath(self.fileName)
self.currentFile = open(self.fileName,"r")
self.currentCount = 0
try: #ResolveError
while self.currentFile:
line = self.currentFile.readline() # empty if end of file
self.currentCount += 1 # inc line counter
nextTokens = [] # for connective continuation
while (line):
if nextTokens: # parsed ahead but not continuation
tokens = nextTokens
nextTokens = []
else:
tokens = self.tokenize(line) # line and any continuations
if (not tokens): #empty line or comment only
line = self.currentFile.readline() # empty if end of file
self.currentCount += 1 # inc line counter
continue # guarantees at least 1 token
# verbs like load which change file context can not be continued
if tokens[0] not in ('load'): # verb allows connective continuation
while True: # iteratively attempt connective continuation
# Connective continuation
# adds lines that start with connective
# skips empty or comment lines
# stops on line starting with non connective verb
line = self.currentFile.readline() # empty if end of file
self.currentCount += 1 # inc line counter
if not line: # end of file
break
nextTokens = self.tokenize(line) # parse ahead
if nextTokens and nextTokens[0] not in Reserved: # not connective
break # do not continue
if nextTokens:
tokens.extend(nextTokens) # add continuation
nextTokens = []
self.currentHuman = ' '.join(tokens)
try: #ParseError ParseWarning
if not self.dispatch(tokens): # catches dispatches the return unexpectedly
console.terse("Script Parsing stopped at line {0} in file {1}\n".format(
self.currentCount, self.currentFile.name))
console.terse(self.currentHuman + '\n')
return False
except excepting.ParseError as ex:
console.terse("\n{0}\n\n".format(ex))
console.terse("Script line {0} in file {1}\n".format(
self.currentCount, self.currentFile.name))
console.terse(self.currentHuman + '\n')
raise
#dispatch evals commands. self.currentFile may be changed by load command
if not nextTokens:
line = self.currentFile.readline() #empty if end of file
self.currentCount += 1 #inc line counter
self.currentFile.close()
if self.files:
self.currentFile = self.files.pop()
self.currentCount = self.counts.pop()
console.terse("Resume loading from file {0}.\n".format(self.currentFile.name))
else:
self.currentFile = None
#building done so now resolve links and collect actives inactives
for house in self.houses:
house.orderTaskables()
house.resolve()
if console._verbosity >= console.Wordage.concise:
house.showAllTaskers()
#show framework hierarchiy
for framer in house.framers:
framer.showHierarchy()
#show hierarchy of each house's store
console.concise( "\nData Store for {0}\n".format(house.name))
house.store.expose(valued=(console._verbosity >= console.Wordage.terse))
return True
except excepting.ResolveError as ex:
console.terse("{0}\n".format(ex))
return False
except IOError as ex:
console.terse("Error opening mission file {0}\n".format(ex))
return False
finally:
for f in self.files:
if not f.closed:
f.close()
def dispatch(self, tokens):
"""
Converts declaration verb into build method name and calls it
"""
verb = tokens[0]
index = 1
if verb not in VerbList:
msg = "ParseError: Building {0}. Unknown verb {1}, index = {2} tokens = {3}".format(
verb, verb, index, tokens)
raise excepting.ParseError(msg, tokens, index)
verbMethod = 'build' + verb.capitalize()
if hasattr(self, verbMethod):
return(getattr(self, verbMethod )(verb, tokens, index))
else:
return self.buildGeneric(verb, tokens, index)
def buildGeneric(self, verb, tokens, index):
"""
Called when no build method exists for a verb
"""
msg = "ParseError: No build method for verb {0}.".format(verb)
raise excepting.ParseError(msg, tokens, index)
def buildLoad(self, command, tokens, index):
"""
load filepathname
"""
try:
name = tokens[index]
index +=1
self.files.append(self.currentFile) #push currentFile
self.counts.append(self.currentCount) #push current line ct
cwd = os.getcwd() #save current working directory
os.chdir(os.path.split(self.currentFile.name)[0]) # set cwd to current file
name = os.path.abspath(os.path.expanduser(name)) # resolve name if relpath to cwd
os.chdir(cwd) #restore old cwd
self.currentFile = open(name,"r")
self.currentCount = 0
console.terse("Loading from file {0}.\n".format(self.currentFile.name))
except IndexError:
msg = "ParseError: Building verb '%s'. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "ParseError: Building verb '%s'. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
return True
#House specific builders
def buildHouse(self, command, tokens, index):
"""Create a new house and make it the current one
house dreams
"""
try:
name = tokens[index]
index +=1
self.verifyName(name, command, tokens, index)
self.currentHouse = housing.House(name = name) #also creates .store
self.houses.append(self.currentHouse)
self.currentStore = self.currentHouse.store
console.terse(" Created House '{0}'. Assigning registries and "
"creating instances ...\n".format(name))
self.currentHouse.assignRegistries()
console.profuse(" Clearing current Framer, Frame, Log etc.\n")
#changed store so need to make new frameworks and frames
self.currentFramer = None #current framer
self.currentFrame = None #current frame
self.currentLogger = None #current logger
self.currentLog = None #current log
#meta data in metas is list of triples of (name, path, data)
for name, path, data in self.metas:
self.currentHouse.metas[name] = self.initPathToData(path, data)
# set .meta.house to house.name
self.currentHouse.metas['house'] = self.initPathToData('.meta.house',
odict(value=self.currentHouse.name))
for path, data in self.preloads:
self.initPathToData(path, data)
except IndexError:
msg = "ParseError: Building verb '%s'. Not enough tokens." % (command, )
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "ParseError: Building verb '%s'. Unused tokens." % (command, )
raise excepting.ParseError(msg, tokens, index)
msg = " Built House '{0}' with meta:\n".format(self.currentHouse.name)
for name, share in self.currentHouse.metas.items():
msg += " {0}: {1!r}\n".format(name, share)
console.terse(msg)
msg = " Built House '{0}' with preload:\n".format(self.currentHouse.name)
for path, data in self.preloads:
share = self.currentHouse.store.fetch(path)
msg += " {0}: {1!r}\n".format(path, share)
console.terse(msg)
return True
# Convenience Functions
def initPathToData(self, path, data):
"""Convenience support function to preload meta data.
Initialize share given by path with data.
Assumes self.currentStore is valid
path is share path string
data is ordered dict of data
"""
share = self.currentStore.create(path)
self.verifyShareFields(share, data.keys(), None, None)
share.update(data)
return share
#Store specific builders
def buildInit(self, command, tokens, index):
"""Initialize share in current store
init destination with data
init indirect from source
destination:
absolute
path
data:
direct
indirect:
[(value, fields) in] absolute
[(value, fields) in] path
source:
[(value, fields) in] absolute
[(value, fields) in] path
"""
if not self.currentStore:
msg = "ParseError: Building verb '%s'. No current store" % (command)
raise excepting.ParseError(msg, tokens, index)
try:
destinationFields, index = self.parseFields(tokens, index)
destinationPath, index = self.parsePath(tokens, index)
if self.currentStore.fetchShare(destinationPath) is None:
console.terse(" Warning: Init of non-preexistent share {0} ..."
" creating anyway\n".format(destinationPath))
destination = self.currentStore.create(destinationPath)
connective = tokens[index]
index += 1
if connective in ('with', 'to'): # to form deprecated eventually remove
if connective == 'to':
console.terse("Warning: Connective 'to' in 'init' verb depricated. Use 'with' instead.\n")
if destinationFields: #fields not allowed so error
msg = "ParseError: Building verb '%s'. Unexpected fields '%s in' clause " %\
(command, destinationFields)
raise excepting.ParseError(msg, tokens, index)
data, index = self.parseDirect(tokens, index)
#prevent init value and non value fields in same share
self.verifyShareFields(destination, data.keys(), tokens, index)
destination.update(data)
console.profuse(" Inited share {0} to data = {1}\n".format(destination.name, data))
elif connective in ('from', ):
sourceFields, index = self.parseFields(tokens, index)
sourcePath, index = self.parsePath(tokens, index)
source = self.currentStore.fetchShare(sourcePath)
if source is None:
msg = "ParseError: Building verb '%s'. Nonexistent source share '%s'" %\
(command, sourcePath)
raise excepting.ParseError(msg, tokens, index)
sourceFields, destinationFields = self.prepareSrcDstFields(source,
sourceFields,
destination,
destinationFields,
tokens,
index)
data = odict()
for sf, df in izip(sourceFields, destinationFields):
data[df] = source[sf]
destination.update(data)
msg = " Inited share {0} from source {1} with data = {2}\n".format(
destination.name, source.name, data)
console.profuse(msg)
else:
msg = "ParseError: Building verb '%s'. Unexpected connective '%s'" %\
(command, connective)
raise excepting.ParseError(msg, tokens, index)
except IndexError:
msg = "ParseError: Building verb '%s'. Not enough tokens." % (command, )
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "ParseError: Building verb '%s'. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
return True
def buildServer(self, command, tokens, index):
"""create server tasker in current house
server has to have name so can ask stop
server name [at period] [be scheduled]
[rx shost:sport] [tx dhost:dport] [in order] [to prefix] [per data]
[for source]
scheduled: (active, inactive, slave)
rx:
(host:port, :port, host:, host, :)
tx:
(host:port, :port, host:, host, :)
order:
(front, mid, back)
prefix
filepath
data:
direct
source:
[(value, fields) in] indirect
"""
if not self.currentHouse:
msg = "ParseError: Building verb '%s'. No current house" % (command)
raise excepting.ParseError(msg, tokens, index)
if not self.currentStore:
msg = "ParseError: Building verb '%s'. No current store" % (command)
raise excepting.ParseError(msg, tokens, index)
try:
parms = {}
init = {}
name = ''
connective = None
period = 0.0
prefix = './'
schedule = ACTIVE #globaling.py
order = MID #globaling.py
rxa = ''
txa = ''
sha = ('', 54321) #empty host means any interface on local host
dha = ('localhost', 54321)
name = tokens[index]
index +=1
while index < len(tokens): #options
connective = tokens[index]
index += 1
if connective == 'at':
period = abs(Convert2Num(tokens[index]))
index +=1
elif connective == 'to':
prefix = tokens[index]
index +=1
elif connective == 'be':
option = tokens[index]
index +=1
if option not in ['active', 'inactive', 'slave']:
msg = "ParseError: Building verb '%s'. Bad server scheduled option got %s" % \
(command, option)
raise excepting.ParseError(msg, tokens, index)
schedule = ScheduleValues[option] #replace text with value
elif connective == 'in':
order = tokens[index]
index +=1
if order not in OrderValues:
msg = "ParseError: Building verb '%s'. Bad order option got %s" % \
(command, order)
raise excepting.ParseError(msg, tokens, index)
order = OrderValues[order] #convert to order value
elif connective == 'rx':
rxa = tokens[index]
index += 1
elif connective == 'tx':
txa = tokens[index]
index += 1
elif connective == 'per':
data, index = self.parseDirect(tokens, index)
init.update(data)
elif connective == 'for':
srcFields, index = self.parseFields(tokens, index)
srcPath, index = self.parsePath(tokens, index)
if self.currentStore.fetchShare(srcPath) is None:
console.terse(" Warning: Init 'with' non-existent share {0}"
" ... creating anyway".format(srcPath))
src = self.currentStore.create(srcPath)
#assumes src share inited before this line parsed
for field in srcFields:
init[field] = src[field]
else:
msg = "ParseError: Building verb '%s'. Bad connective got %s" % \
(command, connective)
raise excepting.ParseError(msg, tokens, index)
except IndexError:
msg = "ParseError: Building verb '%s'. Not enough tokens." % (command, )
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "ParseError: Building verb '%s'. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
prefix += '/' + self.currentHouse.name #extra slashes are ignored
if rxa:
if ':' in rxa:
host, port = rxa.split(':')
sha = (host, int(port))
else:
sha = (rxa, sha[1])
if txa:
if ':' in txa:
host, port = txa.split(':')
dha = (host, int(port))
else:
dha = (txa, dha[1])
server = serving.Server(name=name, store = self.currentStore,)
kw = dict(period=period, schedule=schedule, sha=sha, dha=dha, prefix=prefix,)
kw.update(init)
server.reinit(**kw)
self.currentHouse.taskers.append(server)
if schedule == SLAVE:
self.currentHouse.slaves.append(server)
else: #taskable active or inactive
if order == FRONT:
self.currentHouse.fronts.append(server)
elif order == BACK:
self.currentHouse.backs.append(server)
else:
self.currentHouse.mids.append(server)
msg = " Created server named {0} at period {2:0.4f} be {3}\n".format(
server.name, name, server.period, ScheduleNames[server.schedule])
console.profuse(msg)
return True
#Logger specific builders
def buildLogger(self, command, tokens, index):
"""
Create logger in current house
logger logname [to prefix] [at period] [be scheduled]
[flush interval] [keep copies] [cycle term] [size bytes]
scheduled: (active, inactive, slave)
period seconds
interval seconds
term seconds
copies integer
bytes bytes
logger basic at 0.125
logger basic
"""
if not self.currentHouse:
msg = "ParseError: Building verb '{0}'. No current house.".format(
command, index, tokens)
raise excepting.ParseError(msg, tokens, index)
if not self.currentStore:
msg = "ParseError: Building verb '{0}'. No current store.".format(
command, index, tokens)
raise excepting.ParseError(msg, tokens, index)
try:
name = tokens[index]
index +=1
period = 0.0 #default
schedule = ACTIVE #globaling.py
order = MID #globaling.py
interval = 30.0
prefix = './'
keep = 0
term = 3600.0
size = 1024 # default rotate size is 1024 bytes = 1KB
reuse = False # non-unique logger directory name if True
while index < len(tokens): #options
connective = tokens[index]
index += 1
if connective == 'at':
period = abs(Convert2Num(tokens[index]))
index +=1
elif connective == 'to': # base directory path for log files
prefix = tokens[index] # house name is post pended as sub directory
index +=1
elif connective == 'be':
option = tokens[index]
index +=1
if option not in ['active', 'inactive', 'slave']:
msg = "Error building %s. Bad logger scheduled option got %s." %\
(command, option)
raise excepting.ParseError(msg, tokens, index)
schedule = ScheduleValues[option] #replace text with value
elif connective == 'in':
order = tokens[index]
index +=1
if order not in OrderValues:
msg = "Error building %s. Bad order got %s." %\
(command, order)
raise excepting.ParseError(msg, tokens, index)
order = OrderValues[order] #convert to order value
elif connective == 'flush':
interval = max(1.0, abs(Convert2Num(tokens[index])))
index +=1
elif connective == 'keep':
keep = max(0, int(Convert2Num(tokens[index])))
index +=1
elif connective == 'cycle':
term = max(0.0, abs(Convert2Num(tokens[index])))
index +=1
elif connective == 'size':
size = max(0, abs(Convert2Num(tokens[index])))
index +=1
elif connective == 'reuse':
reuse = True
else:
msg = "Error building %s. Bad connective got %s." %\
(command, connective)
raise excepting.ParseError(msg, tokens, index)
if name in logging.Logger.Names:
msg = "Error building %s. Task %s already exists." %\
(command, name)
raise excepting.ParseError(msg, tokens, index)
logger = logging.Logger(name=name,
store=self.currentStore,
period=period,
flushPeriod=interval,
prefix=prefix,
keep=keep,
cyclePeriod=term,
fileSize=size,
reuse=reuse)
logger.schedule = schedule
self.currentHouse.taskers.append(logger)
if schedule == SLAVE:
self.currentHouse.slaves.append(logger)
else: #taskable active or inactive
if order == FRONT:
self.currentHouse.fronts.append(logger)
elif order == BACK:
self.currentHouse.backs.append(logger)
else:
self.currentHouse.mids.append(logger)
self.currentLogger = logger
console.profuse(" Created logger named {0} at period {1:0.4f} be {2}\n".format(
logger.name, logger.period, ScheduleNames[logger.schedule]))
except IndexError:
msg = "Error building %s. Not enough tokens." % (command, )
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "Error building %s. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
return True
def buildLog(self, command, tokens, index):
"""
Create log in current logger
log name [to fileName] [as (text, binary)] [on rule]
rule: (once, never, always, update, change, streak, deck)
default fileName is log's name
default type is text
default rule is never
for manual logging use tout command with rule once or never
log autopilot text to './logs/' on update
"""
if not self.currentLogger:
msg = "Error building %s. No current logger." % (command,)
raise excepting.ParseError(msg, tokens, index)
if not self.currentStore:
msg = "Error building %s. No current store." % (command,)
raise excepting.ParseError(msg, tokens, index)
try:
kind = 'text'
fileName = ''
rule = NEVER
name = tokens[index]
index +=1
while index < len(tokens): #options
connective = tokens[index]
index += 1
if connective == 'as':
kind = tokens[index]
index +=1
if kind not in ['text', 'binary']:
msg = "Error building %s. Bad kind = %s." %\
(command, kind)
raise excepting.ParseError(msg, tokens, index)
elif connective == 'to':
fileName = tokens[index]
index +=1
elif connective == 'on':
rule = tokens[index].capitalize()
index +=1
if rule not in LogRuleValues:
msg = "Error building %s. Bad rule = %s." %\
(command, rule)
raise excepting.ParseError(msg, tokens, index)
rule = LogRuleValues[rule]
else:
msg = "Error building %s. Bad connective got %s." %\
(command, connective)
raise excepting.ParseError(msg, tokens, index)
if name in logging.Log.Names: # check if instance name in Registrar
msg = "Error building %s. Log named %s already exists." %\
(command, name)
raise excepting.ParseError(msg, tokens, index)
if fileName:
for log in self.currentLogger.logs:
if fileName == log.baseFilename:
msg = ("Error building {0}. Log named {1} file named {2} "
"already exists.".format(command, name, fileName))
raise excepting.ParseError(msg, tokens, index)
log = logging.Log(name=name,
store=self.currentStore,
kind=kind,
baseFilename=fileName,
rule=rule)
self.currentLogger.addLog(log)
self.currentLog = log
console.profuse(" Created log named {0} kind {1} file {2} rule {3}\n".format(
name, kind, fileName, LogRuleNames[rule]))
except IndexError:
msg = "Error building %s. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "Error building %s. Unused tokens." % (command, )
raise excepting.ParseError(msg, tokens, index)
return True
def buildLoggee(self, command, tokens, index):
"""
Add loggee(s) to current log
Syntax:
loggee [fields in] path [as tag] [[fields in] path [as tag]] ...
path: share path
fields: field list
If fields not provided use all fields
If tag not provide use last segment of path as tag
If log rule is streak then only one loggee per log is allowed and only
the first field from fields clause is used.
Syntax:
log name on streak
loggee [fields in] path [as tag]
If log rule is deck then only one loggee per log is allowed and
fields clause is required.
Syntax:
log name on deck
loggee fields in path [as tag]
"""
if not self.currentLog:
msg = "Error building %s. No current log." % (command,)
raise excepting.ParseError(msg, tokens, index)
if not self.currentStore:
msg = "Error building %s. No current store." % (command,)
raise excepting.ParseError(msg, tokens, index)
try:
while index < len(tokens):
tag = ""
fields, index = self.parseFields(tokens, index)
path = tokens[index]
index +=1
if path in Reserved:
msg = "ParseError: Invalid path '{0}' using reserved".format(path)
raise excepting.ParseError(msg, tokens, index)
if not (REO_DotPath.match(path) or REO_RelPath.match(path)):
#valid absolute or relative path segment without relation clause
msg = "ParseError: Invalid path format'{0}'".format(path)
raise excepting.ParseError(msg, tokens, index)
parts = path.split(".")
if "me" in parts:
msg = "ParseError: Invalid path format'{0}', 'me' undefined".format(path)
raise excepting.ParseError(msg, tokens, index)
if index < len(tokens):
connective = tokens[index]
if connective == 'as':
index += 1 # eat token
tag = tokens[index]
if tag in Reserved:
msg = "ParseError: Invalid tag '{0}' using reserved".format(tag)
raise excepting.ParseError(msg, tokens, index)
tag = StripQuotes(tag)
index += 1
if not tag:
tag = parts[-1]
share = self.currentStore.create(path) #create so no errors at runtime
if not isinstance(share, storing.Share): #verify path ends in share not node
msg = "Error building %s. Loggee path %s not Share." % (command, path)
raise excepting.ParseError(msg, tokens, index)
if tag in self.currentLog.loggees:
msg = "Error building %s. Loggee %s already exists in Log %s." %\
(command, tag, self.currentLog.name)
raise excepting.ParseError(msg, tokens, index)
if self.currentLog.rule in (STREAK, DECK) and self.currentLog.loggees:
# only one loggee allowed when rule is streak or deck
msg = ("Error building {0}. Only one loggee allowed when "
"rule is streak or deck.".format(command))
raise excepting.ParseError(msg, tokens, index)
self.currentLog.addLoggee(tag=tag, loggee=share, fields=fields)
console.profuse(" Added loggee {0} with tag {1} fields {2}\n".format(
share.name, tag, fields))
except IndexError:
msg = "Error building %s. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "Error building %s. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
return True
#Framework specific builders
def buildFramer(self, command, tokens, index):
"""Create a new framer and make it the current one
framer framername [be (active, inactive, aux, slave)] [at period]
[first frame] [via inode]
framer framername be active at 0.0
framer framername
"""
if not self.currentHouse:
msg = "Error building %s. No current house." % (command,)
raise excepting.ParseError(msg, tokens, index)
if not self.currentStore:
msg = "Error building %s. No current store." % (command,)
raise excepting.ParseError(msg, tokens, index)
try:
name = tokens[index]
index +=1
self.verifyName(name, command, tokens, index)
schedule = INACTIVE #globaling.py
order = MID #globaling.py
period = 0.0
frame = ''
inode = ''
while index < len(tokens): #options
connective = tokens[index]
index += 1
if connective == 'at':
period = max(0.0, Convert2Num(tokens[index]))
index +=1
elif connective == 'be':
option = tokens[index]
index +=1
if option not in ScheduleValues:
msg = "Error building %s. Bad scheduled option got %s." %\
(command, option)
raise excepting.ParseError(msg, tokens, index)
schedule = ScheduleValues[option] #replace text with value
elif connective == 'in':
order = tokens[index]
index +=1
if order not in OrderValues:
msg = "Error building %s. Bad order got %s." %\
(command, order,)
raise excepting.ParseError(msg, tokens, index)
order = OrderValues[order] #convert to order value
elif connective == 'first':
frame = tokens[index]
index +=1
self.verifyName(frame, command, tokens, index)
elif connective == 'via':
inode, index = self.parseIndirect(tokens, index, node=True)
else:
msg = "Error building %s. Bad connective got %s." %\
(command, connective)
raise excepting.ParseError(msg, tokens, index)
if name in framing.Framer.Names:
msg = "Error building %s. Framer or Task %s already exists." %\
(command, name)
raise excepting.ParseError(msg, tokens, index)
else:
framer = framing.Framer(name = name,
store = self.currentStore,
period = period)
framer.schedule = schedule
framer.first = frame #need to resolve later
framer.inode = inode
self.currentHouse.taskers.append(framer)
self.currentHouse.framers.append(framer)
if schedule == SLAVE:
self.currentHouse.slaves.append(framer)
elif schedule == AUX:
self.currentHouse.auxes.append(framer)
elif schedule == MOOT:
self.currentHouse.moots.append(framer)
else: #taskable active or inactive
if order == FRONT:
self.currentHouse.fronts.append(framer)
elif order == BACK:
self.currentHouse.backs.append(framer)
else:
self.currentHouse.mids.append(framer)
self.currentFramer = framer
self.currentFramer.assignFrameRegistry()
self.currentFrame = None #changed current Framer so no current Frame
console.profuse(" Created Framer named '{0}' at period {1:0.4f} be {2} first {3}\n".format(
framer.name, framer.period, ScheduleNames[framer.schedule], framer.first))
console.profuse(" Added Framer '{0}' to House '{1}', Assigned frame registry\n".format(
framer.name, self.currentHouse.name))
except IndexError:
msg = "Error building %s. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "Error building %s. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
return True
def buildFirst(self, command, tokens, index):
"""set first (starting) frame for current framer
first framename
"""
if not self.currentFramer:
msg = "Error building %s. No current framer." % (command,)
raise excepting.ParseError(msg, tokens, index)
try:
name = tokens[index]
index +=1
self.verifyName(name, command, tokens, index)
self.currentFramer.first = name #need to resolve later
console.profuse(" Assigned first frame {0} for framework {1}\n".format(
name, self.currentFramer.name))
except IndexError:
msg = "Error building %s. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "Error building %s. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
return True
#Frame specific builders
def buildFrame(self, command, tokens, index):
"""Create frame and attach to over frame if indicated
frame framename [in over] [via inode]
framename cannot be "next" which is reserved
"""
if not self.currentStore:
msg = "Error building %s. No current store." % (command,)
raise excepting.ParseError(msg, tokens, index)
if not self.currentFramer:
msg = "Error building %s. No current framer." % (command,)
raise excepting.ParseError(msg, tokens, index)
inode = ''
try:
name = tokens[index]
index +=1
self.verifyName(name, command, tokens, index)
over = None
while index < len(tokens): #options
connective = tokens[index]
index += 1
if connective == 'in':
over = tokens[index]
index +=1
elif connective == 'via':
inode, index = self.parseIndirect(tokens, index, node=True)
else:
msg = "Error building %s. Bad connective got %s." % (command, connective)
raise excepting.ParseError(msg, tokens, index)
except IndexError:
msg = "Error building %s. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "Error building %s. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if name in ReservedFrameNames:
msg = "Error building %s in Framer %s. Frame name %s reserved." %\
(command, self.currentFramer.name, name)
raise excepting.ParseError(msg, tokens, index)
elif name in framing.Frame.Names: #could use Registry Retrieve function
msg = "Error building %s in Framer %s. Frame %s already exists." %\
(command, self.currentFramer.name, name)
raise excepting.ParseError(msg, tokens, index)
else:
frame = framing.Frame(name=name, store = self.currentStore,
framer=self.currentFramer.name,
inode=inode)
if over:
frame.over = over #need to resolve later
#if previous frame did not have explicit next frame then use this new frame
# ad next lexically
if self.currentFrame and not self.currentFrame.next_:
self.currentFrame.next_ = frame.name
#default first frame is first lexical frame if not assigned otherwise
#so if startFrame is none then we must be first lexical frame
if not self.currentFramer.first: #frame.framer.first:
self.currentFramer.first = frame.name #frame.framer.first = frame
self.currentFrame = frame
self.currentContext = NATIVE
console.profuse(" Created frame {0} with over {1}\n".format(frame.name, over))
return True
def buildOver(self, command, tokens, index):
"""Makes frame the over frame of the current frame
over frame
"""
self.verifyCurrentContext(tokens, index) #currentStore, currentFramer, currentFrame exist
try:
over = tokens[index]
index +=1
self.verifyName(over, command, tokens, index)
except IndexError:
msg = "Error building %s. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "Error building %s. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
self.currentFrame.over = over #need to resolve and attach later
console.profuse(" Assigned over {0} to frame {1}\n".format(
over,self.currentFrame.name))
return True
def buildUnder(self, command, tokens, index):
"""Makes frame the primary under frame of the current frame
under frame
"""
self.verifyCurrentContext(tokens, index) #currentStore, currentFramer, currentFrame exist
try:
under = tokens[index]
index +=1
self.verifyName(under, command, tokens, index)
except IndexError:
msg = "Error building %s. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "Error building %s. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
unders = self.currentFrame.unders
if not unders: #empty so just append
unders.append(under)
elif under != unders[0]: #not already primary
while under in unders: #remove under (in case multiple copies shouldnt be)
unders.remove(under)
if isinstance(unders[0], framing.Frame): #should not be but if valid don't overwrite
unders.insert(0, under)
else: #just name so overwrite
unders[0] = under
else: #under == unders[0] already so do nothing
pass
console.profuse(" Assigned primary under {0} for frame {1}\n".format(
under,self.currentFrame.name))
return True
def buildNext(self, command, tokens, index):
"""Explicitly assign next frame for timeouts and as target of go next
next frameName
next
blank frameName means use lexically next allows override if multiple
next commands to default of lexical
"""
self.verifyCurrentContext(tokens, index) #currentStore, currentFramer, currentFrame exist
try:
if index < len(tokens): #next frame optional
next_ = tokens[index]
index += 1
self.verifyName(next_, command, tokens, index)
else:
next_ = None
except IndexError:
msg = "Error building %s. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "Error building %s. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
self.currentFrame.next_ = next_
console.profuse(" Assigned next frame {0} for frame {1}\n".format(
next_, self.currentFrame.name))
return True
def buildAux(self, command, tokens, index):
"""Parse 'aux' command for simple, cloned, or conditional aux of forms
Simple Auxiliary:
aux framername
Cloned Auxiliary:
aux framername as (mine, clonedauxname) [via (main, mine, inode)]
Simple Conditional Auxiliary:
aux framername if [not] need
aux framername if [not] need [and [not] need ...]
Cloned Conditional Auxiliary:
aux framername as (mine, clonedauxname) [via inode]
if [not] need
aux framername as (mine, clonedauxname) [via inode]
if [not] need [and [not] need ...]
"""
self.verifyCurrentContext(tokens, index) #currentStore, currentFramer, currentFrame exist
try:
needs = []
aux = None # original
connective = None
clone = None
inode = ''
insular = False
aux = tokens[index]
index +=1 #eat token
self.verifyName(aux, command, tokens, index)
while index < len(tokens): #options
connective = tokens[index]
index += 1
if connective == 'as':
clone = tokens[index]
index += 1
self.verifyName(clone, command, tokens, index)
elif connective == 'via':
inode, index = self.parseIndirect(tokens, index, node=True)
elif connective == 'if':
while (index < len(tokens)):
act, index = self.makeNeed(tokens, index)
if not act:
return False # something wrong do not know what
needs.append(act)
if index < len(tokens):
connective = tokens[index]
if connective not in ['and']:
msg = "ParseError: Building verb '%s'. Bad connective '%s'" % \
(command, connective)
raise excepting.ParseError(msg, tokens, index)
index += 1 #otherwise eat token
else:
msg = ("Error building {0}. Invalid connective"
" '{1}'.".format(command, connective))
raise excepting.ParseError(msg, tokens, index)
except IndexError:
msg = "Error building %s. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "Error building %s. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if clone and needs:
msg = "Error building %s. Conditional auxilary may not be clone." % (command,)
raise excepting.ParseError(msg, tokens, index)
if clone:
if clone == 'mine':
clone = self.currentFramer.newMootTag(base=aux)
insular = True
if clone in self.currentFramer.moots:
msg = ("Error building {0}. Aux/Clone tag '{1}' "
"already in use.".format(command, clone))
raise excepting.ParseError(msg, tokens, index)
data = odict(original=aux,
clone=clone,
schedule=AUX,
human=self.currentHuman,
count=self.currentCount,
inode=inode,
insular=insular)
self.currentFramer.moots[clone] = data # need to resolve early
aux = odict(tag=clone) # mapping indicates that its a clone
# assign aux to mapping with clone tag name as original aux is to be cloned
# named clone create clone when resolve framer.moots so may be referenced
# named clones must be resolved before any frames get resolved
# and are added to the class Framer.names so they can be referenced
# resolved by house.resolve -> house.presolvePresolvables
# -> framer.presolve -> framer.resolveMoots
# resolveMoots adds new resolveable framers to house.presolvables
# self.store.house.presolvables.append(clone)
if needs: # conditional auxiliary suspender preact
human = ' '.join(tokens) #recreate transition command string for debugging
#resolve aux link later
parms = dict(needs = needs, main = 'me', aux = aux, human = human)
act = acting.Act( actor='Suspender',
registrar=acting.Actor,
parms=parms,
human=self.currentHuman,
count=self.currentCount)
self.currentFrame.addPreact(act)
console.profuse(" Added suspender preact, '{0}', with aux"
" {1} needs:\n".format(command, aux))
for need in needs:
console.profuse(" {0} with parms = {1}\n".format(need.actor, need.parms))
else: # simple auxiliary if aux is string then regular auz if aux is mapping then clone
self.currentFrame.addAux(aux) #need to resolve later
console.profuse(" Added aux framer {0}\n".format(aux))
return True
def buildRear(self, command, tokens, index):
"""
Parse 'rear' verb
Two Forms: only first form is currently supported
rear original [as mine] [be aux] in frame framename
framename cannot be me or in outline of me
rear original as clonename be schedule
schedule cannot be aux
clonename cannot be mine
"""
self.verifyCurrentContext(tokens, index) #currentStore, currentFramer, currentFrame exist
try:
original = None
connective = None
clone = 'mine' # default is insular clone
schedule = 'aux' # default schedule is aux
frame = 'me' # default frame is current
original = tokens[index]
index +=1 # eat token
self.verifyName(original, command, tokens, index)
while index < len(tokens): #options
connective = tokens[index]
index += 1
if connective == 'as':
clone = tokens[index]
index += 1
self.verifyName(clone, command, tokens, index)
elif connective == 'be':
schedule = tokens[index]
index += 1
elif connective == 'in': #optional in frame or in framer clause
place = tokens[index] #need to resolve
index += 1 # eat token
if place != 'frame':
msg = ("ParseError: Building verb '{0}'. Invalid "
" '{1}' clause. Expected 'frame' got "
"'{2}'".format(command, connective, place))
raise excepting.ParseError(msg, tokens, index)
if index < len(tokens):
frame = tokens[index]
index += 1
else:
msg = ("Error building {0}. Invalid connective"
" '{1}'.".format(command, connective))
raise excepting.ParseError(msg, tokens, index)
except IndexError:
msg = "Error building {0}. Not enough tokens.".format(command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "Error building {0}. Unused tokens.".format(command,)
raise excepting.ParseError(msg, tokens, index)
# only allow schedule of aux for now
if schedule not in ScheduleValues or schedule not in ['aux']:
msg = "Error building {0}. Bad scheduled option got '{1}'.".format(command, schedule)
raise excepting.ParseError(msg, tokens, index)
schedule = ScheduleValues[schedule] #replace text with value
# when clone is insular and schedule is aux then frame cannot be
# current frames outline. This is validated in the actor resolve
if schedule == AUX:
if clone != 'mine':
msg = ("Error building {0}. Only insular clonename of"
" 'mine' allowed. Got '{1}'.".format(command, clone))
raise excepting.ParseError(msg, tokens, index)
if frame == 'me':
msg = ("Error building {0}. Frame clause required.".format(command, clone))
raise excepting.ParseError(msg, tokens, index)
parms = dict(original=original,
clone=clone,
schedule=schedule,
frame=frame)
actorName = 'Rearer'
if actorName not in acting.Actor.Registry:
msg = "Error building '{0}'. No actor named '{1}'.".format(command, actorName)
raise excepting.ParseError(msg, tokens, index)
act = acting.Act(actor=actorName,
registrar=acting.Actor,
parms=parms,
human=self.currentHuman,
count=self.currentCount)
context = self.currentContext
if context == NATIVE:
context = ENTER # what is native for this command
if not self.currentFrame.addByContext(act, context):
msg = "Error building %s. Bad context '%s'." % (command, context)
raise excepting.ParseError(msg, tokens, index)
console.profuse(" Added {0} '{1}' with parms '{2}'\n".format(
ActionContextNames[context], act.actor, act.parms))
return True
def buildRaze(self, command, tokens, index):
"""
Parse 'raze' verb
raze (all, last, first) [in frame [(me, framename)]]
"""
self.verifyCurrentContext(tokens, index) #currentStore, currentFramer, currentFrame exist
try:
connective = None
who = None # default is insular clone
frame = 'me' # default frame is current
who = tokens[index]
index +=1 # eat token
if who not in ['all', 'first', 'last']:
msg = ("ParseError: Building verb '{0}'. Invalid target of"
" raze. Expected one of ['all', 'first', 'last'] but got "
"'{2}'".format(command, connective, who))
raise excepting.ParseError(msg, tokens, index)
while index < len(tokens): #options
connective = tokens[index]
index += 1
if connective == 'in': #optional in frame or in framer clause
place = tokens[index] #need to resolve
index += 1 # eat token
if place != 'frame':
msg = ("ParseError: Building verb '{0}'. Invalid "
" '{1}' clause. Expected 'frame' got "
"'{2}'".format(command, connective, place))
raise excepting.ParseError(msg, tokens, index)
if index < len(tokens):
frame = tokens[index]
index += 1
else:
msg = ("Error building {0}. Invalid connective"
" '{1}'.".format(command, connective))
raise excepting.ParseError(msg, tokens, index)
except IndexError:
msg = "Error building {0}. Not enough tokens.".format(command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "Error building {0}. Unused tokens.".format(command,)
raise excepting.ParseError(msg, tokens, index)
parms = dict(who=who,
frame=frame)
actorName = 'Razer'
if actorName not in acting.Actor.Registry:
msg = "Error building '{0}'. No actor named '{1}'.".format(command, actorName)
raise excepting.ParseError(msg, tokens, index)
act = acting.Act(actor=actorName,
registrar=acting.Actor,
parms=parms,
human=self.currentHuman,
count=self.currentCount)
context = self.currentContext
if context == NATIVE:
context = EXIT # what is native for this command
if not self.currentFrame.addByContext(act, context):
msg = "Error building %s. Bad context '%s'." % (command, context)
raise excepting.ParseError(msg, tokens, index)
console.profuse(" Added {0} '{1}' with parms '{2}'\n".format(
ActionContextNames[context], act.actor, act.parms))
return True
def buildDone(self, command, tokens, index):
"""
Creates complete action that indicates tasker(s) completed
by setting .done state to True
native context is enter
done tasker [tasker ...]
done [me]
tasker:
(taskername, me)
"""
self.verifyCurrentContext(tokens, index) #currentStore, currentFramer, currentFrame exist
try:
kind = 'Done'
taskers = []
while index < len(tokens):
tasker = tokens[index]
index +=1
self.verifyName(tasker, command, tokens, index)
taskers.append(tasker) #resolve later
if not taskers:
taskers.append('me')
except IndexError:
msg = "Error building %s. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "Error building %s. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
actorName = 'Complete' + kind.capitalize()
if actorName not in completing.Complete.Registry:
msg = "Error building complete %s. No actor named %s." %\
(kind, actorName)
raise excepting.ParseError(msg, tokens, index)
parms = {}
parms['taskers'] = taskers #resolve later
act = acting.Act(actor=actorName,
registrar=completing.Complete,
parms=parms,
human=self.currentHuman,
count=self.currentCount)
context = self.currentContext
if context == NATIVE:
context = ENTER #what is native for this command
if not self.currentFrame.addByContext(act, context):
msg = "Error building %s. Bad context '%s'." % (command, context)
raise excepting.ParseError(msg, tokens, index)
console.profuse(" Created done complete {0} with {1}\n".format(act.actor, act.parms))
return True
def buildTimeout(self, command, tokens, index):
"""creates implicit transition to next on elapsed >= value
timeout 5.0
"""
self.verifyCurrentContext(tokens, index)
try:
value = abs(Convert2Num(tokens[index])) #convert text to number if valid format
index +=1
if isinstance(value, str):
msg = "Error building %s. invalid timeout %s." %\
(command, value)
raise excepting.ParseError(msg, tokens, index)
except IndexError:
msg = "Error building %s. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "Error building %s. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
# build need act for transact
need = self.makeImplicitDirectFramerNeed( name="elapsed",
comparison='>=',
goal=float(value),
tolerance=0)
needs = []
needs.append(need)
# build transact
human = ' '.join(tokens) #recreate transition command string for debugging
far = 'next' #resolve far link later
parms = dict(needs = needs, near = 'me', far = far, human = human)
act = acting.Act(actor='Transiter',
registrar=acting.Actor,
parms=parms,
human=self.currentHuman,
count=self.currentCount)
self.currentFrame.addPreact(act) #add transact as preact
console.profuse(" Added timeout transition preact, '{0}', with far {1} needs:\n".format(
command, far))
for act in needs:
console.profuse(" {0} with parms = {1}\n".format(act.actor, act.parms))
return True
def buildRepeat(self, command, tokens, index):
"""creates implicit transition to next on recurred >= value
repeat 2
go next if recurred >= 2
"""
self.verifyCurrentContext(tokens, index)
try:
value = abs(Convert2Num(tokens[index])) #convert text to number if valid format
index +=1
if isinstance(value, str):
msg = "Error building %s. invalid repeat %s." %\
(command, value)
raise excepting.ParseError(msg, tokens, index)
except IndexError:
msg = "Error building %s. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "Error building %s. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
# build need act for transact
need = self.makeImplicitDirectFramerNeed( name="recurred",
comparison='>=',
goal=int(value),
tolerance=0)
needs = []
needs.append(need)
# build transact
human = ' '.join(tokens) #recreate transition command string for debugging
far = 'next' #resolve far link later
parms = dict(needs = needs, near = 'me', far = far, human = human)
act = acting.Act( actor='Transiter',
registrar=acting.Actor,
parms=parms,
human=self.currentHuman,
count=self.currentCount)
self.currentFrame.addPreact(act) #add transact as preact
console.profuse(" Added repeat transition preact, '{0}', with far {1} needs:\n".format(
command, far))
for act in needs:
console.profuse(" {0} with parms = {1}\n".format(act.actor, act.parms))
return True
def buildNative(self, command, tokens, index):
""" sets context for current frame to
native
"""
self.currentContext = NATIVE
console.profuse(" Changed context to {0}\n".format(
ActionContextNames[self.currentContext]))
return True
def buildBenter(self, command, tokens, index):
""" sets context for current frame to
benter
"""
self.currentContext = BENTER
console.profuse(" Changed context to {0}\n".format(
ActionContextNames[self.currentContext]))
return True
def buildEnter(self, command, tokens, index):
""" sets context for current frame to
enter
"""
self.currentContext = ENTER
console.profuse(" Changed context to {0}\n".format(
ActionContextNames[self.currentContext]))
return True
def buildRenter(self, command, tokens, index):
""" sets context for current frame to
renter
"""
self.currentContext = RENTER
console.profuse(" Changed context to {0}\n".format(
ActionContextNames[self.currentContext]))
return True
def buildPrecur(self, command, tokens, index):
""" sets context for current frame to
precur
"""
self.currentContext = PRECUR
console.profuse(" Changed context to {0}\n".format(
ActionContextNames[self.currentContext]))
return True
def buildRecur(self, command, tokens, index):
""" sets context for current frame to
recur
"""
self.currentContext = RECUR
console.profuse(" Changed context to {0}\n".format(
ActionContextNames[self.currentContext]))
return True
def buildExit(self, command, tokens, index):
""" sets context for current frame to
exit
"""
self.currentContext = EXIT
console.profuse(" Changed context to {0}\n".format(
ActionContextNames[self.currentContext]))
return True
def buildRexit(self, command, tokens, index):
""" sets context for current frame to
rexit
"""
self.currentContext = REXIT
console.profuse(" Changed context to {0}\n".format(
ActionContextNames[self.currentContext]))
return True
#Frame Action specific builders
def buildPrint(self, command, tokens, index):
"""prints a string consisting of space separated tokens
print message
print hello world
"""
self.verifyCurrentContext(tokens, index) #currentStore, currentFramer, currentFrame exist
try:
message = ' '.join(tokens[1:])
except IndexError:
message = ''
parms = dict(message = message)
act = acting.Act( actor='Printer',
registrar=acting.Actor,
parms=parms,
human=self.currentHuman,
count=self.currentCount)
context = self.currentContext
if context == NATIVE:
context = ENTER #what is native for this command
if not self.currentFrame.addByContext(act, context):
msg = "Error building %s. Bad context '%s'." % (command, context)
raise excepting.ParseError(msg, tokens, index)
console.profuse(" Added {0} '{1}' with parms '{2}'\n".format(
ActionContextNames[context], act.actor, act.parms))
return True
def buildPut(self, command, tokens, index):
"""Build put command to put data into share
put data into destination
data:
direct
destination:
[(value, fields) in] indirect
"""
self.verifyCurrentContext(tokens, index) #currentStore, currentFramer, currentFrame exist
try:
srcData, index = self.parseDirect(tokens, index)
connective = tokens[index]
index += 1
if connective != 'into':
msg = "ParseError: Building verb '%s'. Unexpected connective '%s'" %\
(command, connective)
raise excepting.ParseError(msg, tokens, index)
dstFields, index = self.parseFields(tokens, index)
dstPath, index = self.parseIndirect(tokens, index)
except IndexError:
msg = "ParseError: Building verb '%s'. Not enough tokens." % (command, )
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "ParseError: Building verb '%s'. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
actorName = 'Poke' + 'Direct' #capitalize second word
if actorName not in poking.Poke.Registry:
msg = "ParseError: Can't find actor named '%s'" % (actorName)
raise excepting.ParseError(msg, tokens, index)
parms = {}
parms['sourceData'] = srcData # this is dict
parms['destination'] = dstPath # this is a share path
parms['destinationFields'] = dstFields # this is a list
act = acting.Act( actor=actorName,
registrar=poking.Poke,
parms=parms,
human=self.currentHuman,
count=self.currentCount)
msg = " Created Actor {0} parms: data = {1} destination = {2} fields = {3} ".format(
actorName, srcData, dstPath, dstFields)
console.profuse(msg)
context = self.currentContext
if context == NATIVE:
context = ENTER #what is native for this command
if not self.currentFrame.addByContext(act, context):
msg = "Error building %s. Bad context '%s'." % (command, context)
raise excepting.ParseError(msg, tokens, index)
console.profuse(" Added {0} '{1}' with parms '{2}'\n".format(
ActionContextNames[context], act.actor, act.parms))
return True
def buildInc(self, command, tokens, index):
"""Build inc command to inc share by data or from source
inc destination with data
inc destination from source
destination:
[(value, field) in] indirect
data:
directone
source:
[(value, field) in] indirect
"""
self.verifyCurrentContext(tokens, index) #currentStore, currentFramer, currentFrame exist
try:
dstFields, index = self.parseFields(tokens, index)
dstPath, index = self.parseIndirect(tokens, index)
connective = tokens[index]
index += 1
if connective in ('with', ):
srcData, index = self.parseDirect(tokens, index)
for field, value in srcData.items():
if isinstance(value, str):
msg = "ParseError: Building verb '%s'. " % (command)
msg += "Data value = '%s' in field '%s' not a number" %\
(value, field)
raise excepting.ParseError(msg, tokens, index)
act = self.makeIncDirect(dstPath, dstFields, srcData)
elif connective in ('from', ):
srcFields, index = self.parseFields(tokens, index)
srcPath, index = self.parseIndirect(tokens, index)
act = self.makeIncIndirect(dstPath, dstFields, srcPath, srcFields)
else:
msg = "ParseError: Building verb '%s'. Unexpected connective '%s'" %\
(command, connective)
raise excepting.ParseError(msg, tokens, index)
except IndexError:
msg = "ParseError: Building verb '%s'. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "ParseError: Building verb '%s'. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
context = self.currentContext
if context == NATIVE:
context = ENTER #what is native for this command
if not self.currentFrame.addByContext(act, context):
msg = "Error building %s. Bad context '%s'." % (command, context)
raise excepting.ParseError(msg, tokens, index)
console.profuse(" Added {0} '{1}' with parms '{2}'\n".format(
ActionContextNames[context], act.actor, act.parms))
return True
def buildCopy(self, command, tokens, index):
"""Build copy command to copy from one share to another
copy source into destination
source:
[(value, fields) in] indirect
destination:
[(value, fields) in] indirect
"""
self.verifyCurrentContext(tokens, index) #currentStore, currentFramer, currentFrame exist
try:
srcFields, index = self.parseFields(tokens, index)
srcPath, index = self.parseIndirect(tokens, index)
connective = tokens[index]
index += 1
if connective != 'into':
msg = "ParseError: Building verb '%s'. Unexpected connective '%s'" %\
(command, connective)
raise excepting.ParseError(msg, tokens, index)
dstFields, index = self.parseFields(tokens, index)
dstPath, index = self.parseIndirect(tokens, index)
except IndexError:
msg = "ParseError: Building verb '%s'. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "ParseError: Building verb '%s'. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
actorName = 'Poke' + 'Indirect' #capitalize second word
if actorName not in poking.Poke.Registry:
msg = "ParseError: Can't find actor named '%s'" % (actorName)
raise excepting.ParseError(msg, tokens, index)
parms = {}
parms['source'] = srcPath #this is string
parms['sourceFields'] = srcFields #this is a list
parms['destination'] = dstPath #this is a string
parms['destinationFields'] = dstFields #this is a list
act = acting.Act( actor=actorName,
registrar=poking.Poke,
parms=parms,
human=self.currentHuman,
count=self.currentCount)
msg = " Created Actor {0} parms: ".format(actorName)
for key, value in parms.items():
msg += " {0} = {1}".format(key, value)
console.profuse("{0}\n".format(msg))
context = self.currentContext
if context == NATIVE:
context = ENTER #what is native for this command
if not self.currentFrame.addByContext(act, context):
msg = "Error building %s. Bad context '%s'." % (command, context)
raise excepting.ParseError(msg, tokens, index)
console.profuse(" Added {0} '{1}' with parms '{2}'\n".format(
ActionContextNames[context], act.actor, act.parms))
return True
def buildSet(self, command, tokens, index):
"""Build set command to generate goal actions
set goal with data
set goal from source
goal:
elapsed
recurred
[(value, fields) in] absolute
[(value, fields) in] relativegoal
data:
direct
source:
indirect
"""
self.verifyCurrentContext(tokens, index) #currentStore, currentFramer, currentFrame exist
try:
kind = tokens[index]
if kind in ['elapsed', 'recurred']: #simple implicit framer relative goals, direct and indirect,
index +=1 #eat token
act, index = self.makeFramerGoal(kind, tokens, index)
else: #basic goals
#goal is destination dst
dstFields, index = self.parseFields(tokens, index)
dstPath, index = self.parseIndirect(tokens, index)
#required connective
connective = tokens[index]
index += 1
if connective in ('with', ): #data direct
srcData, index = self.parseDirect(tokens, index)
act = self.makeGoalDirect(dstPath, dstFields, srcData)
elif connective in ('from', ): #source indirect
srcFields, index = self.parseFields(tokens, index)
srcPath, index = self.parseIndirect(tokens, index)
act = self.makeGoalIndirect(dstPath, dstFields, srcPath, srcFields)
else:
msg = "ParseError: Building verb '%s'. Unexpected connective '%s'" %\
(command, connective)
raise excepting.ParseError(msg, tokens, index)
if not act:
return False
except IndexError:
msg = "ParseError: Building verb '%s'. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "ParseError: Building verb '%s'. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
context = self.currentContext
if context == NATIVE:
context = ENTER #what is native for this command
if not self.currentFrame.addByContext(act, context):
msg = "Error building %s. Bad context '%s'." % (command, context)
raise excepting.ParseError(msg, tokens, index)
console.profuse(" Added {0} '{1}' with parms '{2}'\n".format(
ActionContextNames[context], act.actor, act.parms))
return True
def buildGo(self, command, tokens, index):
"""Parse 'go' command transition with
transition conditions of forms
Transitions:
go far
go far if [not] need
go far if [not] need [and [not] need ...]
Far:
next
me
frame
"""
self.verifyCurrentContext(tokens, index) #currentStore, currentFramer, currentFrame exist
try:
needs = []
far = None
connective = None
far = tokens[index] #get target
index +=1 #eat token
self.verifyName(far, command, tokens, index)
if index < len(tokens): #check for optional if connective
connective = tokens[index]
if connective not in ['if']: #invalid connective
msg = "ParseError: Building verb '%s'. Bad connective '%s'" % \
(command, connective)
raise excepting.ParseError(msg, tokens, index)
index += 1 #otherwise eat token
while (index < len(tokens)):
act, index = self.makeNeed(tokens, index)
if not act:
return False #something wrong do not know what
needs.append(act)
if index < len(tokens):
connective = tokens[index]
if connective not in ['and']:
msg = "ParseError: Building verb '%s'. Bad connective '%s'" % \
(command, connective)
raise excepting.ParseError(msg, tokens, index)
index += 1 #otherwise eat token
except IndexError:
msg = "ParseError: Building verb '%s'. Not enough tokens." % (command, )
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "ParseError: Building verb '%s'. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if not needs and connective: #if but no needs
msg = "ParseError: Building verb '%s'. Connective %s but missing need(s)" %\
(command, connective)
raise excepting.ParseError(msg, tokens, index)
# build transact
human = ' '.join(tokens) #recreate transition command string for debugging
#resolve far link later
parms = dict(needs = needs, near = 'me', far = far, human = human)
act = acting.Act( actor='Transiter',
registrar=acting.Actor,
parms=parms,
human=self.currentHuman,
count=self.currentCount)
self.currentFrame.addPreact(act)
console.profuse(" Added transition preact, '{0}', with far {1} needs:\n".format(
command, far))
for act in needs:
console.profuse(" {0} with parms = {1}\n".format(act.actor, act.parms))
return True
def buildLet(self, command, tokens, index):
"""Parse 'let' command benter action with entry conditions of forms
Before Enter:
let [me] if [not] need
let [me] if [not] need [and [not] need ...]
Far:
next
me
frame
"""
self.verifyCurrentContext(tokens, index) #currentStore, currentFramer, currentFrame exist
try:
needs = []
connective = None
connective = tokens[index] #get me or if
if connective not in ['me', 'if']: #invalid connective
msg = "ParseError: Building verb '%s'. Bad connective '%s'" % \
(command, connective)
raise excepting.ParseError(msg, tokens, index)
index += 1 #otherwise eat token
if connective == 'me':
connective = tokens[index] #check for if connective
if connective not in ['if']: #invalid connective
msg = "ParseError: Building verb '%s'. Bad connective '%s'" % \
(command, connective)
raise excepting.ParseError(msg, tokens, index)
index += 1 #otherwise eat token
while (index < len(tokens)):
act, index = self.makeNeed(tokens, index)
if not act:
return False # something wrong do know what
needs.append(act)
if index < len(tokens):
connective = tokens[index]
if connective not in ['and']:
msg = "ParseError: Building verb '%s'. Bad connective '%s'" % \
(command, connective)
raise excepting.ParseError(msg, tokens, index)
index += 1 #otherwise eat token
except IndexError:
msg = "ParseError: Building verb '%s'. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "ParseError: Building verb '%s'. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if not needs: # no needs
msg = "ParseError: Building verb '%s'. Missing need(s)" %\
(command)
raise excepting.ParseError(msg, tokens, index)
# build beact
for act in needs:
self.currentFrame.addBeact(act)
console.profuse(" Added beact, '{0}', with needs:\n".format(command))
for act in needs:
console.profuse(" {0} with {1}\n".format(act.actor, act.parms))
return True
def buildDo(self, command, tokens, index):
"""
Syntax:
do kind [part ...] [as name [part ...]] [at context] [via inode]
[with data]
[from source]
[per data]
[for source]
[cum data]
[qua source]
deed:
name [part ...]
kind:
name [part ...]
context:
(native, benter, enter, recur, exit, precur, renter, rexit)
inode:
indirect
data:
direct
source:
[(value, fields) in] indirect
do controller pid depth --> controllerPIDDepth
do arbiter switch heading --> arbiterSwitchHeading
do controller pid depth with foobar 1
do controller pid depth from value in .max.depth
"""
self.verifyCurrentContext(tokens, index) #currentStore, currentFramer, currentFrame exist
try:
kind = "" # deed class key in registry
name = "" #specific name of deed instance
inode = None
parts = []
parms = odict()
inits = odict()
ioinits = odict()
prerefs = odict([('inits', odict()),
('ioinits', odict()),
('parms', odict()) ])
connective = None
context = self.currentContext
while index < len(tokens):
if (tokens[index] in ['as', 'at', 'via', 'with', 'from',
'per', 'for', 'cum', 'qua' ]): # end of parts
break
parts.append(tokens[index])
index += 1 #eat token
if parts:
kind = "".join([part.capitalize() for part in parts]) #camel case
while index < len(tokens): #options
connective = tokens[index]
index += 1
if connective in ('as', ):
parts = []
while index < len(tokens): # kind parts end when connective
if tokens[index] in ['as', 'at', 'with', 'from' 'per',
'for', 'cum', 'qua' ]: # end of parts
break
parts.append(tokens[index])
index += 1 #eat token
name = "".join([part.capitalize() for part in parts]) #camel case
if not name:
msg = "ParseError: Building verb '%s'. Missing name for connective 'as'" % (command)
raise excepting.ParseError(msg, tokens, index)
elif connective in ('at', ):
context = tokens[index]
index += 1
if context not in ActionContextValues:
msg = ("ParseError: Building verb '{0}'. Invalid context"
" '{1} for connective 'as'".format(command, context))
raise excepting.ParseError(msg, tokens, index)
context = ActionContextValues[context]
elif connective in ('via', ):
inode, index = self.parseIndirect(tokens, index, node=True)
elif connective in ('with', ):
data, index = self.parseDirect(tokens, index)
parms.update(data)
elif connective in ('from', ):
srcFields, index = self.parseFields(tokens, index)
srcPath, index = self.parseIndirect(tokens, index)
prerefs['parms'][srcPath] = srcFields
elif connective in ('per', ):
data, index = self.parseDirect(tokens, index)
ioinits.update(data)
elif connective in ('for', ):
srcFields, index = self.parseFields(tokens, index)
srcPath, index = self.parseIndirect(tokens, index)
prerefs['ioinits'][srcPath] = srcFields
elif connective in ('cum', ):
data, index = self.parseDirect(tokens, index)
inits.update(data)
elif connective in ('qua', ):
srcFields, index = self.parseFields(tokens, index)
srcPath, index = self.parseIndirect(tokens, index)
prerefs['inits'][srcPath] = srcFields
else:
msg = ("Error building {0}. Invalid connective"
" '{1}'.".format(command, connective))
raise excepting.ParseError(msg, tokens, index)
except IndexError:
msg = "Error building %s. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "Error building %s. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if not kind:
msg = "ParseError: Building verb '%s'. Missing kind for Doer." %\
(command)
raise excepting.ParseError(msg, tokens, index)
if kind not in doing.Doer.Registry: # class registration not exist
msg = "ParseError: Building verb '%s'. No Deed of kind '%s' in registry" %\
(command, kind)
raise excepting.ParseError(msg, tokens, index)
if inode:
ioinits.update(inode=inode) # via argument takes precedence over others
if name:
inits['name'] = name
act = acting.Act( actor=kind,
registrar=doing.Doer,
inits=inits,
ioinits=ioinits,
parms=parms,
prerefs=prerefs,
human=self.currentHuman,
count=self.currentCount)
#context = self.currentContext
if context == NATIVE:
context = RECUR #what is native for this command
if not self.currentFrame.addByContext(act, context):
msg = "Error building %s. Bad context '%s'." % (command, context)
raise excepting.ParseError(msg, tokens, index)
console.profuse(" Added {0} '{1}' with parms '{2}'\n".format(
ActionContextNames[context], act.actor, act.parms))
return True
def buildBid(self, command, tokens, index):
"""
bid control tasker [tasker ...] [at period]
bid control [me] [at period]
bid control all [at period]
control:
(stop, start, run, abort, ready)
tasker:
(tasker, me, all)
period:
number
indirectOne
indirectOne:
sharepath [of relative]
(field, value) in sharepath [of relative]
"""
self.verifyCurrentContext(tokens, index) #currentStore, currentFramer, currentFrame exist
try:
period = None # no period provided
sourcePath = None
sourceField = None
parms = odict([('taskers', []), ('period', None), ('sources', odict())])
control = tokens[index]
index +=1
if control not in ['start', 'run', 'stop', 'abort', 'ready']:
msg = "Error building {0}. Bad control = {1}.".format(command, control)
raise excepting.ParseError(msg, tokens, index)
taskers = []
while index < len(tokens):
if (tokens[index] in ['at']):
break # end of taskers so do not eat yet
tasker = tokens[index]
index +=1
self.verifyName(tasker, command, tokens, index)
taskers.append(tasker) #resolve later
if not taskers:
taskers.append('me')
while index < len(tokens): # at option
connective = tokens[index]
index += 1
if connective in ['at']:
# parse period direct or indirect
try: #parse direct
period = max(0.0, Convert2Num(tokens[index])) # period is number
index += 1 # eat token
except ValueError: # parse indirect
sourceField, index = self.parseField(tokens, index)
sourcePath, index = self.parseIndirect(tokens, index)
else:
msg = ("Error building {0}. Invalid connective"
" '{1}'.".format(command, connective))
raise excepting.ParseError(msg, tokens, index)
actorName = 'Want' + control.capitalize()
if actorName not in wanting.Want.Registry:
msg = "Error building %s. No actor named %s." % (command, actorName)
raise excepting.ParseError(msg, tokens, index)
parms['taskers'] = taskers #resolve later
parms['period'] = period
parms['source'] = sourcePath
parms['sourceField'] = sourceField
act = acting.Act( actor=actorName,
registrar=wanting.Want,
parms=parms,
human=self.currentHuman,
count=self.currentCount)
except IndexError:
msg = "Error building %s. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "Error building %s. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
context = self.currentContext
if context == NATIVE:
context = ENTER #what is native for this command
if not self.currentFrame.addByContext(act, context):
msg = "Error building %s. Bad context '%s'." % (command, context)
raise excepting.ParseError(msg, tokens, index)
console.profuse(" Added {0} want '{1}' with parms '{2}'\n".format(
ActionContextNames[context], act.actor, act.parms))
return True
def buildReady(self, command, tokens, index):
"""
ready taskName
"""
self.verifyCurrentContext(tokens, index) #currentStore, currentFramer, currentFrame exist
try:
tasker = tokens[index]
index +=1
self.verifyName(tasker, command, tokens, index)
except IndexError:
msg = "Error building %s. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "Error building %s. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
native = BENTER
self.makeFiat(tasker, 'ready', native, command, tokens, index)
return True
def buildStart(self, command, tokens, index):
"""
start taskName
"""
self.verifyCurrentContext(tokens, index) #currentStore, currentFramer, currentFrame exist
try:
tasker = tokens[index]
index +=1
self.verifyName(tasker, command, tokens, index)
except IndexError:
msg = "Error building %s. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "Error building %s. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
native = ENTER
self.makeFiat(tasker, 'start', native, command, tokens, index)
return True
def buildStop(self, command, tokens, index):
"""
stop taskName
"""
self.verifyCurrentContext(tokens, index) #currentStore, currentFramer, currentFrame exist
try:
tasker = tokens[index]
index +=1
self.verifyName(tasker, command, tokens, index)
except IndexError:
msg = "Error building %s. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "Error building %s. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
native = EXIT
self.makeFiat(tasker, 'stop', native, command, tokens, index)
return True
def buildRun(self, command, tokens, index):
"""
run taskName
"""
self.verifyCurrentContext(tokens, index) #currentStore, currentFramer, currentFrame exist
try:
tasker = tokens[index]
index +=1
self.verifyName(tasker, command, tokens, index)
except IndexError:
msg = "Error building %s. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "Error building %s. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
native = RECUR
self.makeFiat(tasker, 'run', native, command, tokens, index)
return True
def buildAbort(self, command, tokens, index):
"""
abort taskName
"""
self.verifyCurrentContext(tokens, index) #currentStore, currentFramer, currentFrame exist
try:
tasker = tokens[index]
index +=1
self.verifyName(tasker, command, tokens, index)
except IndexError:
msg = "Error building %s. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "Error building %s. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
native = ENTER
self.makeFiat(tasker, 'abort', native, command, tokens, index)
return True
def buildUse(self, command, tokens, index):
"""
Not implemented yet
"""
msg = " ".join(tokens)
console.concise("{0}\n")
return True
def buildFlo(self, command, tokens, index):
"""
Not implemented yet
"""
msg = " ".join(tokens)
console.concise("{0}\n")
return True
def buildTake(self, command, tokens, index):
"""
Not implemented yet
"""
msg = " ".join(tokens)
console.concise("{0}\n")
return True
def buildGive(self, command, tokens, index):
"""
Not implemented yet
"""
msg = " ".join(tokens)
console.concise("{0}\n")
return True
#------------------
def makeIncDirect(self, dstPath, dstFields, srcData):
"""Make IncDirect act
method must be wrapped in appropriate try excepts
"""
actorName = 'Inc' + 'Direct' #capitalize second word
if actorName not in poking.Poke.Registry:
msg = "ParseError: Can't find actor named '%s'" % (actorName)
raise excepting.ParseError(msg, tokens, index)
parms = {}
parms['destination'] = dstPath #this is string
parms['destinationFields'] = dstFields # this is a list
parms['sourceData'] = srcData #this is an ordered dictionary
act = acting.Act( actor=actorName,
registrar=poking.Poke,
parms=parms,
human=self.currentHuman,
count=self.currentCount)
msg = " Created Actor {0} parms: ".format(actorName)
for key, value in parms.items():
msg += " {0} = {1}".format(key, value)
console.profuse("{0}\n".format(msg))
return act
def makeIncIndirect(self, dstPath, dstFields, srcPath, srcFields):
"""Make IncIndirect act
method must be wrapped in appropriate try excepts
"""
actorName = 'Inc' + 'Indirect' #capitalize second word
if actorName not in poking.Poke.Registry:
msg = "ParseError: Goal can't find actor named '%s'" % (actorName)
raise excepting.ParseError(msg, tokens, index)
#actor = poking.Poke.Names[actorName]
parms = {}
parms['destination'] = dstPath #this is a share
parms['destinationFields'] = dstFields #this is a list
parms['source'] = srcPath #this is a share
parms['sourceFields'] = srcFields #this is a list
act = acting.Act( actor=actorName,
registrar=poking.Poke,
parms=parms,
human=self.currentHuman,
count=self.currentCount)
msg = " Created Actor {0} parms: ".format(actorName)
for key, value in parms.items():
msg += " {0} = {1}".format(key, value)
console.profuse("{0}\n".format(msg))
return act
def makeFramerGoal(self, name, tokens, index):
"""Goal to set goal name relative to current framer
method must be wrapped in appropriate try excepts
goal to data
goal from source
goal:
name
implied goal is framer.currentframer.goal.name value
data:
[value] value
field value [field value ...]
source:
[(value, fields) in] indirect
"""
#name is used as name of goal relative to current framer
#create goal relative to current framer destination is goal
dstPath = 'framer.' + 'me' + '.goal.' + name
dstField = 'value'
dstFields = [dstField]
#required connective
connective = tokens[index]
index += 1
if connective in ['to', 'with']: #data direct
srcData, index = self.parseDirect(tokens, index)
act = self.makeGoalDirect(dstPath, dstFields, srcData )
elif connective in ['by', 'from']: #source indirect
srcFields, index = self.parseFields(tokens, index)
srcPath, index = self.parseIndirect(tokens, index)
act = self.makeGoalIndirect(dstPath, dstFields, srcPath, srcFields)
else:
msg = "ParseError: Unexpected connective '%s'" %\
(connective)
raise excepting.ParseError(msg, tokens, index)
return act, index
def makeGoalDirect(self, dstPath, dstFields, srcData):
"""Make GoalDirect act
method must be wrapped in appropriate try excepts
"""
actorName = 'Goal' + 'Direct' #capitalize second word
if actorName not in goaling.Goal.Registry:
msg = "ParseError: Goal can't find actor named '%s'" % (actorName)
raise excepting.ParseError(msg, tokens, index)
parms = {}
parms['destination'] = dstPath #this is string
parms['destinationFields'] = dstFields #this is list
parms['sourceData'] = srcData #this is a dictionary
act = acting.Act( actor=actorName,
registrar=goaling.Goal,
parms=parms,
human=self.currentHuman,
count=self.currentCount)
msg = " Created Actor {0} parms: ".format(actorName)
for key, value in parms.items():
msg += " {0} = {1}".format(key, value)
console.profuse("{0}\n".format(msg))
return act
def makeGoalIndirect(self, dstPath, dstFields, srcPath, srcFields):
"""Make GoalIndirect act
method must be wrapped in appropriate try excepts
"""
actorName = 'Goal' + 'Indirect' #capitalize second word
if actorName not in goaling.Goal.Registry:
msg = "ParseError: Goal can't find actor named '%s'" % (actorName)
raise excepting.ParseError(msg, tokens, index)
parms = {}
parms['destination'] = dstPath #this is string
parms['destinationFields'] = dstFields #this is a list
parms['source'] = srcPath #this is a string
parms['sourceFields'] = srcFields #this is a list
act = acting.Act( actor=actorName,
registrar=goaling.Goal,
parms=parms,
human=self.currentHuman,
count=self.currentCount)
msg = " Created Actor {0} parms: ".format(actorName)
for key, value in parms.items():
msg += " {0} = {1}".format(key, value)
console.profuse("{0}\n".format(msg))
return act
def makeNeed(self, tokens, index):
"""
Parse a need
method must be wrapped in try except indexError
method assumes already checked for currentStore
method assumes already checked for currentFramer
method assumes already checked for currentFrame
Need forms:
[not] need
need:
basic need:
if state [comparison goal [+- tolerance]]
simple need:
if framerstate [re [(me, framername)]] comparison framergoal [+- tolerance]
if framerstate re [me] is TBD # not supported yet
special need:
if indirect is updated [in frame (me, framename)]
if taskername is (readied, started, running, stopped, aborted)
if taskername is done
if (aux auxname, any, all)
[in frame [(me, framename)][in framer [(me, framername)]]] is done
if (aux auxname, any, all)
[in framer [(me, framername)]] is done
if ([aux] auxname, any, all)
in frame [(me, framename)][in framer [(me, framername)]] is done
if ([aux] auxname, any, all)
in framer [(me, framername)] is done
state:
[(value, field) in] indirect
goal:
value
[(value, field) in] indirect
indirect:
path [[of relation] ...]
comparison:
(==, !=, <, <=, >=, >)
tolerance:
number (the absolute value is used)
framerstate:
(elapsed, recurred)
framergoal:
goal
value
[(value, field) in] indirect
"""
kind = None
negate = False
if tokens[index] == 'not':
negate = True
index += 1 #eat token
# find back end of current clause
if 'and' in tokens[index:]: # conjunction
back = tokens[index:].index('and') + index + 1
else:
back = len(tokens)
if 'is' in tokens[index:back]: # check for 'is participle' form, special needs
place = tokens[index:back].index('is') # is
participle = tokens[index + place + 1] # participle modifier to is
if participle in ('done', ):
kind = 'done'
act, index = self.makeDoneNeed(kind, tokens, index)
elif participle in ('readied', 'started', 'running', 'stopped', 'aborted'):
kind = 'status'
act, index = self.makeStatusNeed(kind, tokens, index)
elif participle in ('updated', 'changed'):
kind = participle[:-1] # remove 'd' suffix
act, index = self.makeMarkerNeed(kind, tokens, index)
else:
msg = "ParseError: Unexpected 'is' participle '%s' for need" %\
(participle)
raise excepting.ParseError(msg, tokens, index)
else: # either simple need or basic need
state, framer, index = self.parseFramerState(tokens, index)
if state is not None: # 're' clause present, simple need
if state not in ('elapsed', 'recurred'):
msg = "ParseError: Unsupported framer state '%s'" %\
(state)
raise excepting.ParseError(msg, tokens, index)
kind = state
act, index = self.makeFramerNeed(kind, tokens, index)
# in the future we could support framer need for a different framer
# not me or current framer
# currently ignoring framer, because only allow 'me' or currentFramer
else: # basic need with support for deprecated form of simple need
simple = False # found deprecated simple need form
stateField, index = self.parseField(tokens, index)
if stateField is None: # no 'in' clause
state = tokens[index] # look for bare framer state
if state in ('elapsed', 'recurred'): # deprecated
index += 1
kind = state
simple = True
act, index = self.makeFramerNeed(kind, tokens, index)
if not simple: # basic need either path not elapsed,recurred or 'in' clause
statePath, index = self.parseIndirect(tokens, index)
#parse optional comparison
comparison, index = self.parseComparisonOpt(tokens,index)
if not comparison: #no comparison so make a boolean need
act = self.makeBoolenNeed(statePath, stateField)
else: #valid comparison so required goal
#parse required goal
direct, goal, goalPath, goalField, index = \
self.parseNeedGoal(statePath, stateField, tokens, index)
#parse optional tolerance
tolerance, index = self.parseTolerance(tokens, index)
if direct: #make a direct need
act = self.makeDirectNeed(statePath,
stateField,
comparison,
goal,
tolerance)
else: #make an indirect need
act = self.makeIndirectNeed(statePath,
stateField,
comparison,
goalPath,
goalField,
tolerance)
if negate:
act = acting.Nact(actor=act.actor,
registrar=act.registrar,
parms=act.parms,
human=self.currentHuman,
count=self.currentCount)
return (act, index)
def makeDoneNeed(self, kind, tokens, index):
"""
Need to check if tasker completed by .done truthy
method must be wrapped in appropriate try excepts
Syntax:
if taskername is done
if (aux auxname, any, all)
[in frame [(me, framename)][in framer [(me, framername)]]] is done
if (aux auxname, any, all)
[in framer [(me, framername)]] is done
if ([aux] auxname, any, all)
in frame [(me, framename)][in framer [(me, framername)]] is done
if ([aux] auxname, any, all)
in framer [(me, framername)] is done
"""
frame = "" # name of frame where aux resides if applicable
framer = "" # name of framer where aux resides if applicable
auxed = False # one of the auxiliary forms
tasker = tokens[index]
if tasker in ('any', 'all'): # auxilary case applicable so default
index += 1
auxed = True
framer = 'me'
frame = 'me'
elif tasker == "aux":
index += 1
auxed = True
framer = 'me'
tasker = tokens[index]
self.verifyName(tasker, kind, tokens, index)
index += 1
else:
self.verifyName(tasker, kind, tokens, index)
index += 1
# in clause existence means auxilary case
# optional in clauses followed by is clause
connective = tokens[index]
if connective == 'in': # optional 'in frame [(me, framename)]' clause
index += 1 # eat 'in' connective
auxed = True
framer = 'me'
place = tokens[index] # required place frame or framer
index += 1 # eat place token
if place == 'framer':
connective = tokens[index]
if connective not in Reserved: # assume must be name
framer = connective
self.verifyName(framer, kind, tokens, index)
index += 1
connective = tokens[index] # set up for next clause
elif place == 'frame':
frame = 'me'
connective = tokens[index]
if connective not in Reserved: # assume must be name
frame = connective
self.verifyName(frame, kind, tokens, index)
index += 1
connective = tokens[index] # setup for next clause
if connective == 'in': # optional 'in framer [(me, framername)]' clause
index += 1 # eat 'in' connective
place = tokens[index] # required place framer
index += 1 # eat place token
if place != 'framer':
msg = ("ParseError: Expected 'framer' got "
"'{0}'".format(place))
raise excepting.ParseError(msg, tokens, index)
connective = tokens[index]
if connective not in Reserved: # assume must be name
framer = connective
self.verifyName(framer, kind, tokens, index)
index += 1
connective = tokens[index] # setup for next clause
else:
msg = ("ParseError: Expected 'framer' or frame' got "
"'{0}'".format(place))
raise excepting.ParseError(msg, tokens, index)
if connective not in ('is', ): # missing 'is'
msg = ("ParseError: Expected 'is' connective got "
"'{0}'".format(connective))
raise excepting.ParseError(msg, tokens, index)
index += 1 # eat 'is' connective token
participle = tokens[index]
index += 1
if participle not in ('done', ): # wrong 'participle'
msg = ("ParseError: Expected 'done' participle got "
"'{0}'".format(participle))
raise excepting.ParseError(msg, tokens, index)
# a frame of me is nonsensical if framer is not current framer
if (frame == 'me' and
not (framer == 'me' or framer == self.currentFramer.name)):
msg = ("Error: Frame '{0}' nonsensical given"
" Framer '{1}'.".format(frame, framer))
raise excepting.ParseError(msg, tokens, index)
actorName = 'Need' + kind.capitalize()
if auxed:
actorName += 'Aux'
if actorName not in needing.Need.Registry:
msg = "ParseError: Need '%s' can't find actor named '%s'" %\
( kind, actorName)
raise excepting.ParseError(msg, tokens, index)
parms = {}
parms['tasker'] = tasker
parms['framer'] = framer
parms['frame'] = frame
act = acting.Act(actor=actorName,
registrar=needing.Need,
parms=parms,
human=self.currentHuman,
count=self.currentCount)
return (act, index)
def makeStatusNeed(self, kind, tokens, index):
"""
Need to check if tasker named tasker status' is status
method must be wrapped in appropriate try excepts
Syntax:
if taskername is (readied, started, running, stopped, aborted)
"""
tasker = tokens[index]
if not REO_IdentPub.match(tasker):
msg = "ParseError: Invalid format of tasker name '%s'" % (tasker)
raise excepting.ParseError(msg, tokens, index)
index += 1
connective = tokens[index]
index += 1
if connective not in ('is', ):
msg = "ParseError: Need status invalid connective '%s'" %\
(kind, connective)
raise excepting.ParseError(msg, tokens, index)
status = tokens[index] # participle
index += 1
if status.capitalize() not in StatusValues:
msg = "ParseError: Need status invalid status '%s'" %\
(kind, status)
raise excepting.ParseError(msg, tokens, index)
status = StatusValues[status.capitalize()] #replace name with value
actorName = 'Need' + kind.capitalize()
if actorName not in needing.Need.Registry:
msg = "ParseError: Need '%s' can't find actor named '%s'" %\
(kind, actorName)
raise excepting.ParseError(msg, tokens, index)
parms = {}
parms['tasker'] = tasker #need to resolve this
parms['status'] = status
act = acting.Act( actor=actorName,
registrar=needing.Need,
parms=parms,
human=self.currentHuman,
count=self.currentCount)
return (act, index)
def makeUpdateNeed(self, kind, tokens, index):
"""
Need to check if share updated in frame
method must be wrapped in appropriate try excepts
Syntax:
if path [[of relation] ...] is updated [in frame [(me, framename)]]
[by marker]
"""
return (self.makeMarkerNeed(kind, tokens, index))
def makeChangeNeed(self, kind, tokens, index):
"""
Need to check if share updated in frame
method must be wrapped in appropriate try excepts
Syntax:
if path [[of relation] ...] is changed [in frame [(me, framename)]]
[by marker]
"""
return (self.makeMarkerNeed(kind, tokens, index))
def makeMarkerNeed(self, kind, tokens, index):
"""
Support method to make either NeedUpdate or NeedChange
as determined by kind
Syntax:
if path [[of relation] ...] is (updated, changed)
[in frame [(me, framename)]] [by marker]
sharepath:
path [[of relation] ...]
marker:
string
"""
frame = "" # name of marked frame when empty resolve uses "me"" but no enact
marker = ""
sharePath, index = self.parseIndirect(tokens, index)
connective = tokens[index]
if connective not in ('is', ):
msg = ("ParseError: Unexpected connective '{0}' not 'is', "
"while building need".format(connective))
raise excepting.ParseError(msg, tokens, index)
index += 1
participle = tokens[index]
if participle not in ('updated', 'changed' ):
msg = ("ParseError: Unexpected 'is' participle '{0}', "
" not 'updated' or 'changed', "
"while building need".format(participle))
raise excepting.ParseError(msg, tokens, index)
index += 1
# ensure kind and participle match
if participle[:-1] != kind: # remove 'd' suffix
msg = ("ParseError: Mismatching participle. Expected '{0}' got "
"'{1}'".format(kind + 'd', participle))
raise excepting.ParseError(msg, tokens, index)
while index < len(tokens): # optional 'in frame' clause
connective = tokens[index]
if connective not in ('in', 'by'): # next need clause started
break
index += 1 # eat token for connective
if connective == 'in':
place = tokens[index] #need to resolve
index += 1 # eat place token
if place != 'frame':
msg = ("ParseError: Invalid "
" '{0}' clause. Expected 'frame' got "
"'{1}'".format(connective, place))
raise excepting.ParseError(msg, tokens, index)
frame = "me" # default if just frame but no framename
if index < len(tokens): # frame name is optional
connective = tokens[index] #need to resolve
if connective not in Reserved: # assume must be name
frame = connective # only
if not REO_IdentPub.match(frame):
msg = "ParseError: Invalid format of frame name '%s'" % (frame)
raise excepting.ParseError(msg, tokens, index)
index += 1 # consume frame name token
elif connective == 'by':
marker = tokens[index]
index += 1 # eat marker token
marker = StripQuotes(marker)
# assign marker type actual marker Act created in need's resolve
markerKind = 'Marker' + kind.capitalize()
actorName = 'Need' + kind.capitalize()
if actorName not in needing.Need.Registry:
msg = "ParseError: Need '%s' can't find actor named '%s'" %\
(kind, actorName)
raise excepting.ParseError(msg, tokens, index)
parms = {}
parms['share'] = sharePath
parms['frame'] = frame # marked frame name resolved in resolvelinks
parms['kind'] = markerKind # marker kind resolved in resolvelinks
parms['marker'] = marker
act = acting.Act( actor=actorName,
registrar=needing.Need,
parms=parms,
human=self.currentHuman,
count=self.currentCount)
return (act, index)
def makeImplicitDirectFramerNeed(self, name, comparison, goal, tolerance):
"""Make implicit need, ie the need is not parsed but implied by the command
such as timeout
method must be wrapped in appropriate try excepts
state comparison goal [+- tolerance]
goal:
value (direct number or string)
state:
name
implied state is framer.currentframer.state.name value
"""
console.profuse(" Making implicit direct framer need {0}\n".format(name))
#name is used as name of state relative to current framer
# and if implicit goal the name of goal relative to current framer
#create state relative to framer
statePath = 'framer.' + 'me' + '.state.' + name
stateField = 'value'
act = self.makeDirectNeed(statePath, stateField, comparison, goal, tolerance)
return act
def makeFramerNeed(self, name, tokens, index):
"""Need that checks if framer state name for current framer satisfies comparison
method must be wrapped in appropriate try excepts
state comparison goal [+- tolerance]
state:
name
implied state is framer.currentframer.state.name value
goal:
goal
from path [key]
value
dotpath [key]
elapsed >= 25.0
elapsed >= goal
elapsed == goal +- 0.1
"""
console.profuse(" Making framer need {0}\n".format(name))
#name is used as name of state relative to current framer
# and if implicit goal the name of goal relative to current framer
#create state relative to framer
statePath = 'framer.' + 'me' + '.state.' + name
stateField = 'value'
#parse required comparison
comparison, index = self.parseComparisonReq(tokens,index)
#parse required goal
direct, goal, goalPath, goalField, index = \
self.parseFramerNeedGoal(statePath, stateField, tokens, index)
#parse optional tolerance
tolerance, index = self.parseTolerance(tokens, index)
if direct: #make a direct need
act = self.makeDirectNeed(statePath,
stateField,
comparison,
goal,
tolerance)
else: #make an indirect need
act = self.makeIndirectNeed(statePath,
stateField,
comparison,
goalPath,
goalField,
tolerance)
return (act, index)
def makeBoolenNeed(self, statePath, stateField):
"""Make booleanNeed act
method must be wrapped in appropriate try excepts
"""
actorName = 'Need' + 'Boolean' #capitalize second word
if actorName not in needing.Need.Registry:
msg = "ParseError: Need can't find actor named '%s'" % (actorName)
raise excepting.ParseError(msg, tokens, index)
parms = {}
parms['state'] = statePath #this is a string
parms['stateField'] = stateField #this is string
act = acting.Act( actor=actorName,
registrar=needing.Need,
parms=parms,
human=self.currentHuman,
count=self.currentCount)
msg = " Created Actor {0} parms: ".format(actorName)
for key, value in parms.items():
msg += " {0} = {1}".format(key, value)
console.profuse("{0}\n".format(msg))
return act
def makeDirectNeed(self, statePath, stateField, comparison, goal, tolerance):
"""Make directNeed act
method must be wrapped in appropriate try excepts
"""
actorName = 'Need' + 'Direct' #capitalize second word
if actorName not in needing.Need.Registry:
msg = "ParseError: Need can't find actor named '%s'" % (actorName)
raise excepting.ParseError(msg, tokens, index)
parms = {}
parms['state'] = statePath #this is a string
parms['stateField'] = stateField #this is a string
parms['comparison'] = comparison #this is a string
parms['goal'] = goal #this is a value: boolean number or string
parms['tolerance'] = tolerance #this is a number
act = acting.Act( actor=actorName,
registrar=needing.Need,
parms=parms,
human=self.currentHuman,
count=self.currentCount)
msg = " Created Actor {0} parms: ".format(actorName)
for key, value in parms.items():
msg += " {0} = {1}".format(key, value)
console.profuse("{0}\n".format(msg))
return act
def makeIndirectNeed(self,
statePath,
stateField,
comparison,
goalPath,
goalField,
tolerance):
"""Make indirectNeed act
method must be wrapped in appropriate try excepts
"""
actorName = 'Need' + 'Indirect' #capitalize second word
if actorName not in needing.Need.Registry:
msg = "ParseError: Need can't find actor named '%s'" % (actorName)
raise excepting.ParseError(msg, tokens, index)
parms = {}
parms['state'] = statePath #this is string
parms['stateField'] = stateField #this is a string
parms['comparison'] = comparison #this is a string
parms['goal'] = goalPath #this is a string
parms['goalField'] = goalField #this is a string
parms['tolerance'] = tolerance #this is a number
msg = " Created Actor {0} parms: ".format(actorName)
for key, value in parms.items():
msg += " {0} = {1}".format(key, value)
console.profuse("{0}\n".format(msg))
act = acting.Act( actor=actorName,
registrar=needing.Need,
parms=parms,
human=self.currentHuman,
count=self.currentCount)
return act
def makeFiat(self, name, kind, native, command, tokens, index):
"""
Assumes wrapped in currentFrame etc checks
make a fiat action given the tasker name and fiat kind
"""
actorName = 'Fiat' + kind.capitalize()
if actorName not in fiating.Fiat.Registry:
msg = "Error building fiat %s. No actor named %s." % (kind, actorName)
raise excepting.ParseError(msg, tokens, index)
parms = {}
parms['tasker'] = name #resolve later
act = acting.Act( actor=actorName,
registrar=fiating.Fiat,
parms=parms,
human=self.currentHuman,
count=self.currentCount)
context = self.currentContext
if context == NATIVE:
context = native #The native context for this command
if not self.currentFrame.addByContext(act, context):
msg = "Error building %s. Bad context '%s'." % (command, context)
raise excepting.ParseError(msg, tokens, index)
console.profuse(" Added {0} fiat '{1}' with parms '{2}'\n".format(
ActionContextNames[context], act.actor, act.parms))
return True
#----------------------------
def parseDirect(self, tokens, index):
"""Parse Direct data address
returns ordered dictionary of fields (keys) and values
if no field provided then uses default field = 'value'
parms:
tokens = list of tokens for command
index = current index into tokens
returns:
data ordered dict
index
method must be wrapped in appropriate try excepts
Syntax:
data:
[value] value
field value [field value ...]
possible parsing end conditions:
no more tokens (init, set)
token 'into' (put)
"""
data = odict()
if index == (len(tokens) - 1): #only one more token so it must be value
value = tokens[index]
if value in Reserved: # ending token not valid value
msg = "ParseError: Encountered reserved '{0}' instead of value." % (value)
raise excepting.ParseError(msg, tokens, index)
index +=1 #eat token
field = 'value' #default field
else: #more than one so first may be field and second token may be value
field = tokens[index]
if field in Reserved: # ending token not valid field
msg = "ParseError: Encountered reserved '{0}' instead of field." % (field)
raise excepting.ParseError(msg, tokens, index)
index += 1
value = tokens[index]
if value in Reserved: #second reserved token so first token was value
value = field
field = 'value' #default field
else: #first token was field and second value
field = StripQuotes(field)
index += 1 #eat token
data[field] = Convert2StrBoolPathCoordPointNum(value) #convert to BoolNumStr, load data
#parse rest if any
while index < len(tokens): #must be in pairs unless first is ending token
field = tokens[index]
if field in Reserved: #ending token so break
break
field = StripQuotes(field)
index += 1 #eat token
value = tokens[index]
if value in Reserved: # ending token before valid value
msg = "ParseError: Encountered reserved '{0}' instead of value." % (value)
raise excepting.ParseError(msg, tokens, index)
index += 1
data[field] = Convert2StrBoolPathCoordPointNum(value) #convert to BoolNumStr, load data
#prevent using multiple fields if one of them is 'value'
if (len(data) > 1) and ('value' in data):
msg = "ParseError: Direct data field = 'value' must be only field '%s'" % (data.keys)
raise excepting.ParseError(msg, tokens, index)
#prevent using incorrect format for fields
for field in data: # keys
if not REO_IdentPub.match(field): #invalid format
msg = "ParseError: Invalid field = '%s'" % (field)
raise excepting.ParseError(msg, tokens, index)
return (data, index)
def parseFields(self, tokens, index):
"""
Parse optional field list for Indirect address
parms:
tokens = list of tokens for command
index = current index into tokens
returns:
(fields,index)
method must be wrapped in appropriate try excepts
Syntax:
[(value, fields) in] indirect
fields:
field [field ...]
valid fields only when encounter token 'in' after fields
consumes fields and the 'in' so subsequent parsePath starts with indirect path
parsing end conditions that signify no fields
if encounter before 'in':
no more tokens
reserved token
"""
indexSave = index #save it since welookahead to see if "in"
fields = []
found = False #flag to indicate found 'in' wich indicates fields clause
while index < len(tokens): # provisionall parse for fields
field = tokens[index]
if field == 'in': #field list present and completed now we know
index +=1
found = True
break
if field in Reserved: #field list not present
break
index += 1 #eat token
field = StripQuotes(field)
fields.append(field) # provisional
if not found: # no fields clause so we ignore
index = indexSave #so restore index
fields = [] #empty fields list
#prevent using multiple fields if one of them is 'value'
if (len(fields) > 1) and ('value' in fields):
msg = "ParseError: Field = 'value' with multiple fields = '%s'" % (fields)
raise excepting.ParseError(msg, tokens, index)
for i, field in enumerate(fields): # now we check if valid format
if not REO_IdentPub.match(field):
msg = "ParseError: Invalid format of field '%s'" % (field)
raise excepting.ParseError(msg, tokens, index)
return (fields, index)
def parseField(self, tokens, index):
"""
Parse optional field for Indirect address
parms:
tokens = list of tokens for command
index = current index into tokens
returns:
(field, index)
method must be wrapped in appropriate try excepts
Syntax:
[(value, field) in] indirect
valid field only when encounter token 'in' after first field
consumes field and the 'in' so subsequent parsePath starts with indirect path
parsing end conditions that signify no fields
if encounter before 'in':
no more tokens
reserved token
"""
indexSave = index #save it since welookahead to see if "in"
fields = []
found = False #flag to indicate found 'in' which indicates fields clause
while index < len(tokens):
field = tokens[index]
if field == 'in': #field list present and completed
index +=1
found = True
break
if field in Reserved: #field list not present
break
index += 1 #eat token
field = StripQuotes(field)
fields.append(field)
if not found: #no fields clause
index = indexSave #so restore index
fields = [] #empty fields list
#prevent using multiple fields
if (len(fields) > 1):
msg = "ParseError: More than one field = '%s'" % (fields)
raise excepting.ParseError(msg, tokens, index)
if fields:
field = fields[0]
if not REO_IdentPub.match(field):
msg = "ParseError: Invalid format of field '%s'" % (field)
raise excepting.ParseError(msg, tokens, index)
else:
field = None
return (field, index)
def parsePath(self, tokens, index):
"""Parse required (path or dotpath) path
Does not support relative path processing for verbs such as init or
server which are not inside a framer context
method must be wrapped in appropriate try excepts
"""
path = tokens[index]
index +=1
if not REO_Path.match(path): #check if valid path
msg = "ParseError: Invalid path '%s'" % (path)
raise excepting.ParseError(msg, tokens, index)
#path = path.lstrip('.') #remove leading dot if any
return (path, index)
def parseIndirect(self, tokens, index, node=False):
"""
Parse Indirect data address
If node then allow trailing dot in path
parms:
tokens = list of tokens for command
index = current index into tokens
returns:
path
index
method must be wrapped in appropriate try excepts
Syntax:
indirect:
absolute
relative
absolute:
dotpath
relative:
root
inode
framer
frame
actor
root:
path [of root]
inode:
path of me
framer:
path of framer [name]
frame:
path of frame [name]
actor:
path of actor [name]
"""
if node:
reoDotPath = REO_DotPathNode
reoRelPath = REO_RelPathNode
else:
reoDotPath = REO_DotPath
reoRelPath = REO_RelPath
path = tokens[index]
index +=1
if path in Reserved:
msg = "ParseError: Invalid path '%s' using reserved" % (path)
raise excepting.ParseError(msg, tokens, index)
if reoDotPath.match(path): #valid absolute path segment
#check for optional relation clause
#if 'of relation' clause then allows relative but no
#implied relation clauses
relation, index = self.parseRelation(tokens, index)
# dotpath starts with '.' no need to add
elif reoRelPath.match(path): #valid relative path segment
#get optional relation clause, default is root
relation, index = self.parseRelation(tokens, index)
chunks = path.split('.')
if relation: # check for relation conflict
if chunks[0] in ['framer', 'frame', 'actor']:
if (chunks[0] == 'framer' or
(chunks[0] == 'frame' and '.frame.' in relation) or
(chunks[0] == 'actor' and '.actor.' in relation)):
msg = ("ParseError: Relation conflict in path '{0}'"
" with relation '{1}'".format(path, relation))
raise excepting.ParseError(msg, tokens, index)
if relation == 'me':
msg = ("ParseError: Relation conflict in path '{0}'"
" with relation '{1}'".format(path, relation))
raise excepting.ParseError(msg, tokens, index)
else: # prepend missing relations if partial relation in path
if chunks[0] == 'actor':
if len(chunks) < 3: # actor name or share name missing
msg = ("ParseError: Incomplete path '{0}'. Actor name"
" or Share name missing given inline actor "
"relation".format(path))
raise excepting.ParseError(msg, tokens, index)
relation = 'framer.me.frame.me'
elif chunks[0] == 'frame':
if len(chunks) < 3: # frame name or share name missing
msg = ("ParseError: Incomplete path '{0}'. Frame name"
" or Share name missing given inline frame "
"relation".format(path))
raise excepting.ParseError(msg, tokens, index)
framername = 'me'
if chunks[1] == 'main':
framername = 'main'
relation = 'framer.' + framername
if relation:
relation += '.' # add dot since not dotpath
else: #invalid path format
msg = "ParseError: Invalid path '{0}'".format(path)
raise excepting.ParseError(msg, tokens, index)
path = relation + path
return (path, index)
def parseRelation(self, tokens, index, framername=''):
"""
Parse optional relation clause of relative data address
parms:
tokens = list of tokens for command
index = current index into tokens
framername = default framer name if not provided such as 'main'
returns:
relation
index
method must be wrapped in appropriate try excepts
Syntax:
relative:
root
inode
framer
frame
actor
root:
path [of root]
inode:
path of me
framer:
path of framer [(me, main, name)]
frame:
path of frame [(me, main, name)]
actor:
path of actor [(me, name)]
"""
relation = '' #default relation if none given
if index < len(tokens): #are there more tokens
connective = tokens[index]
if connective == 'of': #of means relation given
index += 1 #eat token
relation = tokens[index]
index +=1
if relation not in ['root', 'me', 'framer', 'frame', 'actor']:
msg = "ParseError: Invalid relation '%s'" % (relation)
raise excepting.ParseError(msg, tokens, index)
if relation == 'root':
relation = '' #nothing gets prepended for root relative
elif relation == 'me':
pass # do nothing
if relation in ['framer']: #may be optional name for framer
name = '' #default name is empty
if index < len(tokens): #more tokens to check for optional name
name = tokens[index]
if name not in Reserved: #name given
index += 1 #eat token
if not REO_IdentPub.match(name): #check if valid name
msg = "ParseError: Invalid relation %s name '%s'" %\
(relation, name)
raise excepting.ParseError(msg, tokens, index)
else:
name = ''
if not name: #no name given so substitute default
name = framername or 'me'
relation += '.' + name #append name
if relation in ['frame']: #may be optional name of frame
name = '' #default name is empty
if index < len(tokens): #more tokens to check for optional name
name = tokens[index]
if name not in Reserved: #name given
index += 1 #eat token
if not REO_IdentPub.match(name): #check if valid name
msg = "ParseError: Invalid relation %s name '%s'" %\
(relation, name)
raise excepting.ParseError(msg, tokens, index)
else:
name = ''
if not name: #no name given so substitute default
name = 'me'
relation += '.' + name #append name
# parse optional of framer relation
framername = ''
if name == 'main': # default framer for frame main is framer main
framername = 'main'
framerRelation, index = self.parseRelation(tokens,
index,
framername=framername)
# check if spurious, of frame or, of actor
if (framerRelation and
('.frame.' in framerRelation or
'.actor.' in framerRelation )):
msg = "ParseError: Invalid relation '%s' following frame relation" %\
(framerRelation)
raise excepting.ParseError(msg, tokens, index)
if framerRelation:
relation = framerRelation + '.' + relation
else: #use default framer
framername = framername or 'me'
relation = ('framer.' + framername + '.' + relation)
if relation in ['actor']: #may be optional name of actor
name = '' #default name is empty
if index < len(tokens): #more tokens to check for optional name
name = tokens[index]
if name not in Reserved: #name given
index += 1 #eat token
if not REO_IdentPub.match(name): #check if valid name
msg = "ParseError: Invalid relation %s name '%s'" %\
(relation, name)
raise excepting.ParseError(msg, tokens, index)
else:
name = ''
if not name: #no name given so substitute default
name = 'me'
relation += '.' + name #append name
# parse optional of frame and hence framer relation
frameRelation, index = self.parseRelation(tokens, index)
# check if spurious, of framer or, of actor
if (frameRelation and
'.actor.' in frameRelation ):
msg = "ParseError: Invalid relation '%s' following actor relation" %\
(frameRelation)
raise excepting.ParseError(msg, tokens, index)
if frameRelation:
relation = frameRelation + '.' + relation
else: #use default frame and framer
relation = ('framer.' + 'me.' + 'frame.' + 'me' + '.' + relation)
return (relation, index)
def parseComparisonOpt(self, tokens, index):
"""Parse a optional comparison
method must be wrapped in appropriate try excepts
"""
comparison = None
if index < len(tokens): #at least one more token
#if at least one more token could be connective or comparision
comparison = tokens[index]
if comparison in Comparisons: #
index +=1 #so eat token
else:
comparison = None
return (comparison, index)
def parseComparisonReq(self, tokens, index):
"""Parse a required comparison
method must be wrapped in appropriate try excepts
"""
comparison = tokens[index]
index +=1 #so eat token
if comparison not in Comparisons: #
msg = "ParseError: Need has invalid comparison '%s'" % (comparison)
raise excepting.ParseError(msg, tokens, index)
return (comparison, index)
def parseFramerState(self, tokens, index):
"""Parse framer state expression
parms:
tokens = list of tokens for command
index = current index into tokens
returns:
(state, framer, index)
method must be wrapped in appropriate try excepts
Syntax:
state re [(me, framername)]
valid state only when encounter token 're' after first state
parsing end conditions that signify no state
if encounter before 're':
no more tokens
reserved token
multiple states
"""
indexSave = index # save it since we lookahead to see if "re"
states = []
found = False # tag to indicate found 're'
framer = None
while index < len(tokens):
connective = tokens[index]
if connective == 're': # state list completed
index += 1 # eat 're' token
found = True
break # do not append state == 're' to states
if connective in Reserved: #field list not present
break # do not append state == reserved to states
index += 1 # eat last state token
state = StripQuotes(connective) # candidate state since re os quotes ok
states.append(state) # save it
if not found: # no state clause 're'
index = indexSave #so restore index
states = [] #empty states list
#prevent using multiple fields
if (len(states) > 1):
msg = "ParseError: More than one state = '%s'" % (states)
raise excepting.ParseError(msg, tokens, index)
if states:
state = states[0]
if not REO_IdentPub.match(state):
msg = "ParseError: Invalid format of state '%s'" % (state)
raise excepting.ParseError(msg, tokens, index)
else:
state = None
if state is not None: # get optional framer
framer = 'me'
while index < len(tokens):
connective = tokens[index]
if connective in Reserved: # framer not present
break
framer = connective
if not REO_IdentPub.match(framer):
msg = "ParseError: Invalid format of framer name '%s'" % (framer)
raise excepting.ParseError(msg, tokens, index)
if framer != 'me' and framer != self.currentFramer.name:
msg = "ParseError: Framer name '%s' for state need not current framer" % (framer)
raise excepting.ParseError(msg, tokens, index)
index += 1
return (state, framer, index)
def parseNeedState(self, tokens, index):
"""Parse required need state
method must be wrapped in appropriate try excepts
"""
stateField, index = self.parseField(tokens, index)
statePath, index = self.parseIndirect(tokens, index)
return (statePath, stateField, index)
def parseNeedGoal(self, statePath, stateField, tokens, index):
"""Parse required goal
method must be wrapped in appropriate try excepts
"""
goalPath = None #default
goalField = None #default
direct = False
goal = tokens[index]
#parse required goal
try:
goal = Convert2StrBoolCoordNum(tokens[index]) #goal is quoted string, boolean, or number
index += 1 #eat token
direct = True
except ValueError: #means text is not (quoted string, bool, or number) so indirect
goalField, index = self.parseField(tokens, index)
goalPath, index = self.parseIndirect(tokens, index)
return (direct, goal, goalPath, goalField, index)
def parseFramerNeedGoal(self, statePath, stateField, tokens, index):
"""
Parse required goal for special framer need such as
elapsed or recurred
method must be wrapped in appropriate try excepts
"""
goalPath = None #default
goalField = None #default
direct = False
goal = tokens[index]
#parse required goal
try:
goal = Convert2StrBoolCoordNum(tokens[index]) #goal is quoted string, boolean, or number
index += 1 #eat token
direct = True
except ValueError: #means text is not (quoted string, bool, or number) so indirect
if goal == 'goal': #means goal inferred by relative statePath
index += 1 #eat token
#now create goal path as inferred from state path
#check if statePath can be interpreted as framer state relative
chunks = statePath.strip('.').split('.')
try:
if ((chunks[0] == 'framer') and
(chunks[2] == 'state')): #framer relative
chunks[2] = 'goal' # .framer.me.state becomes .framer.me.goal
else:
msg = "ParseError: Goal = 'goal' without framer state path '%s'" %\
(statePath)
raise excepting.ParseError(msg, tokens, index)
except IndexError:
msg = "ParseError: Goal = 'goal' without framer state path '%s'" %\
(statePath)
raise excepting.ParseError(msg, tokens, index)
goalPath = ".".join(chunks)
goalField = stateField #goal field is the same as the given state field
else: #not 'goal' so parse as indirect
#is 'field in' clause present
goalField, index = self.parseField(tokens, index)
goalPath, index = self.parseIndirect(tokens, index)
return (direct, goal, goalPath, goalField, index)
def parseTolerance(self, tokens, index):
"""Parse a optional tolerance
method must be wrapped in appropriate try excepts
"""
tolerance = 0
if index < len(tokens): #at least one more token
#if at least one more token could be connective
connective = tokens[index]
if connective == '+-': #valid tolerance connective
index +=1 #so eat token
tolerance = tokens[index] #get tolerance
index += 1
tolerance = Convert2Num(tolerance) #convert to value
if isinstance(tolerance, str):
msg = "ParseError: Need has invalid tolerance '%s'" % (tolerance)
raise excepting.ParseError(msg, tokens, index)
return (tolerance, index)
def prepareSrcDstFields(self, src, srcFields, dst, dstFields, tokens, index):
"""
Prepares and verifys a transfer of data
from sourceFields in source
to dstFields in dst
Handles default conditions when fields are empty
src and dst are shares
fields are lists
Ensure Actor._prepareSrcDstFields is the same
"""
if not srcFields: #no source fields so assign defaults
if src:
if 'value' in src:
srcFields = ['value'] #use value field
elif dstFields: #use destination fields for source fields
srcFields = dstFields
else: #use pre-existing source fields
srcFields = src.keys()
#else: #ambiguous multiple source fields
#msg = "ParseError: Can't determine source field"
#raise excepting.ParseError(msg, tokens, index)
else:
srcFields = ['value'] #use value field
self.verifyShareFields(src, srcFields, tokens, index)
if not dstFields: #no destination fields so assign defaults
if 'value' in dst:
dstFields = ['value'] #use value field
else: #use source fields for destination fields
dstFields = srcFields
self.verifyShareFields(dst, dstFields, tokens, index)
if len(srcFields) != len(dstFields):
msg = "ParseError: Unequal number of source %s and destination %s fields" %\
(srcFields, dstFields)
raise excepting.ParseError(msg, tokens, index)
for dstField, srcField in izip(dstFields, srcFields):
if (dstField != srcField) and (srcField != 'value'):
console.profuse(" Warning: Field names mismatch. '{0}' in {1} "
"from '{2}' in {3} ... creating anyway".format(
dstField, dst.name, srcField, src.name))
#create any non existent source or destination fields
for field in srcFields: #use source fields for source data
if field not in src:
console.profuse(" Warning: Transfer from non-existent field '{0}' "
"in share {1} ... creating anyway".format(field, src.name))
src[field] = None #create
for field in dstFields: #use destination fields for destination data
if field not in dst:
console.profuse(" Warning: Transfer into non-existent field '{0}' "
"in share {1} ... creating anyway\n".format(field, dst.name))
dst[field] = None #create
return (srcFields, dstFields)
def prepareDataDstFields(self, data, dataFields, dst, dstFields, tokens, index):
"""
Prepares and verifys a transfer of data
from dataFields in data
to dstFields in dst
Handles default conditions when fields are empty
data is dict
dst is share
fields are lists
Ensure Actor._prepareDstFields is similar
"""
if not dstFields: #no destinationField so use default rules
if 'value' in dst:
dstFields = ['value'] #use value field
else: #use dataField
dstFields = dataFields
self.verifyShareFields(dst, dstFields, tokens, index)
if len(dataFields) != len(dstFields):
msg = "ParseError: Unequal number of source %s and destination %s fields" %\
(dataFields, dstFields)
raise excepting.ParseError(msg, tokens, index)
for dstField, dataField in izip(dstFields, dataFields):
if (dstField != dataField) and (dataField != 'value'):
console.profuse(" Warning: Field names mismatch. '{0}' in {1} "
"from '{2}' ... creating anyway".format(
dstField, dst.name, dataField))
#create any non existent destination fields
for field in dstFields: #use destination fields for destination data
if field not in dst:
console.profuse(" Warning: Transfer into non-existent field '{0}' in "
"share {1} ... creating anyway\n".format(field, dst.name))
dst[field] = None #create
return (dataFields, dstFields)
def verifyShareFields(self, share, fields, tokens, index):
"""
Verify that updating fields in share won't violate the
condition that when a share has field == 'value'
it will be the only field
fields is list of field names
share is share
raises exception if condition would be violated
Ensure Actor._verifyShareFields is same
"""
if (len(fields) > 1) and ('value' in fields):
msg = "ParseError: Field = 'value' within fields = '%s'" % (fields)
raise excepting.ParseError(msg, tokens, index)
if share: #does share have fields already
for field in fields:
if field not in share: #so this field could be added to share
if ('value' in share) or (field == 'value'):
msg = "ParseError: Candidate field '%s' when fields = '%s' exist" %\
(field, share.keys())
raise excepting.ParseError(msg, tokens, index)
return
def validShareFields(self, share, fields):
"""Validates that updating fields in share won't violate the
condition that when a share has field = 'value'
it will be the only field
fields is list of field names
share is share
returns False if condition would be violated
return True otherwise
"""
if (len(fields) > 1) and ('value' in fields):
return False
if share: #does share have fields already
for field in fields:
if field not in share: #so this field could be added to share
if ('value' in share) or (field == 'value'):
return False
return True
def verifyCurrentContext(self, tokens, index):
"""Verify that parse context has
currentStore
currentFramer
currentFrame
If not raises ParseError
"""
if not self.currentStore:
msg = "ParseError: Building verb '%s'. No current store" % (tokens)
raise excepting.ParseError(msg, tokens, index)
if not self.currentFramer:
msg = "ParseError: Building verb '%s'. No current framer" % (tokens)
raise excepting.ParseError(msg, tokens, index)
if not self.currentFrame:
msg = "ParseError: Building verb '%s'. No current frame" % (tokens)
raise excepting.ParseError(msg, tokens, index)
return
def verifyName(self, name, command, tokens, index):
"""Verify that name is a valid public identifyer
Used for Tasker, Framer, and Frame names
"""
if not REO_IdentPub.match(name) or name in Reserved: #bad name
msg = "ParseError: Building verb '%s'. Invalid entity name '%s'" %\
(command, name)
raise excepting.ParseError(msg, tokens, index)
#------------------------
def DebugShareFields(store, name):
""" prints out fields of share named name from store for debugging """
share = store.fetch(name)
if share is not None:
console.terse("++++++++ Debug share fields++++++++\n{0} = {1}\n".format(
share.name, share.items))
def Test(fileName = None, verbose = False):
"""Module self test
"""
import globaling
import aiding
import excepting
import registering
import storing
import skedding
import tasking
import acting
import poking
import needing
import goaling
import traiting
import fiating
import wanting
import completing
import doing
import arbiting
import controlling
import framing
import logging
import interfacing
import housing
#import building
import monitoring
import testing
allModules = [globaling, aiding, excepting, registering, storing, skedding,
acting, poking, goaling, needing, traiting,
fiating, wanting, completing,
doing, arbiting, controlling,
tasking, framing, logging, interfacing, serving,
housing, monitoring, testing]
if not fileName:
fileName = "mission.txt"
b = Builder()
if b.build(fileName = fileName):
houses = b.houses
for house in houses:
house.store.changeStamp(0.0)
for framer in house.actives:
status = framer.runner.send(START)
for tasker in house.taskers:
status = tasker.runner.send(START) #prepares logs and reopens files
done = False
while not done:
done = True
for house in houses:
actives = []
for framer in house.actives:
#status = framer.status
desire = framer.desire
if desire is not None:
control = desire
else:
control = RUN
status = framer.runner.send(control)
console.terse("Framer {0} control {1} resulting status = {2}\n".format(
framer.name, ControlNames[control], StatusNames[status]))
if not (status == STOPPED or status == ABORTED):
actives.append(framer)
done = False
house.actives = actives
for tasker in house.taskers:
status = tasker.runner.send(RUN)
house.store.advanceStamp(0.125)
for house in houses:
for tasker in house.taskers:
status = tasker.runner.send(STOP) # closes files
return b
if __name__ == "__main__":
Test()
| 36.171699 | 111 | 0.526152 | """building.py build frameworks from mission files
"""
from __future__ import division
#print("module {0}".format(__name__))
import time
import re
import importlib
import os
from collections import deque
try:
from itertools import izip
except ImportError: #python 3 zip is same as izip
izip = zip
from ..aid.sixing import *
from ..aid.odicting import odict
from .globaling import *
from . import excepting
from . import registering
from . import storing
from . import housing
from . import acting
from . import poking
from . import needing
from . import goaling
from . import doing
from . import traiting
from . import fiating
from . import wanting
from . import completing
from . import tasking
from . import framing
from . import logging
from . import serving
from .. import trim
from ..aid.consoling import getConsole
console = getConsole()
from ..trim import exterior
def Convert2Num(text):
"""converts text to python type in order
Int, hex, Float, Complex
ValueError if can't
"""
#convert to number if possible
try:
value = int(text, 10)
return value
except ValueError as ex:
pass
try:
value = int(text, 16)
return value
except ValueError as ex:
pass
try:
value = float(text)
return value
except ValueError as ex:
pass
try:
value = complex(text)
return value
except ValueError as ex:
pass
raise ValueError("Expected Number got '{0}'".format(text))
# return None
def Convert2CoordNum(text):
"""converts text to python type in order
FracDeg, Int, hex, Float, Complex
ValueError if can't
"""
#convert to FracDeg Coord if possible
dm = REO_LatLonNE.findall(text) #returns list of tuples of groups [(deg,min)]
if dm:
deg = float(dm[0][0])
min_ = float(dm[0][1])
return (deg + min_/60.0)
dm = REO_LatLonSW.findall(text) #returns list of tuples of groups [(deg,min)]
if dm:
deg = float(dm[0][0])
min_ = float(dm[0][1])
return (-(deg + min_/60.0))
try:
return (Convert2Num(text))
except ValueError:
raise ValueError("Expected CoordPointNum got '{0}'".format(text))
def Convert2BoolCoordNum(text):
"""converts text to python type in order
None, Boolean, Int, Float, Complex
ValueError if can't
"""
#convert to None if possible
if text.lower() == 'none':
return None
#convert to boolean if possible
if text.lower() in ['true', 'yes']:
return (True)
if text.lower() in ['false', 'no']:
return (False)
try:
return (Convert2CoordNum(text))
except ValueError:
raise ValueError("Expected BoolCoordPointNum got '{0}'".format(text))
return None
def Convert2StrBoolCoordNum(text):
"""converts text to python type in order
Boolean, Int, Float, complex or double quoted string
ValueError if can't
Need goal wants unitary type not path or point
"""
if REO_Quoted.match(text): #text is double quoted string
return text.strip('"') #strip off quotes
if REO_QuotedSingle.match(text): #text is single quoted string
return text.strip("'") #strip off quotes
try:
return (Convert2BoolCoordNum(text))
except ValueError:
raise ValueError("Expected StrBoolCoordNum got '{0}'".format(text))
return None
def Convert2PointNum(text):
"""
Converts text to python type in order
Pxy, Pne,Pfs,Pxyz,Pned,Pfsb, Int, hex, Float, Complex
ValueError if can't
"""
# convert to on of the Point classes if possible
match = REO_PointXY.findall(text)
if match:
x, y = match[0]
return Pxy(x=float(x), y=float(y))
match = REO_PointNE.findall(text)
if match:
n, e = match[0]
return Pne(n=float(n), e=float(e))
match = REO_PointFS.findall(text)
if match:
f, s = match[0]
return Pfs(f=float(f), s=float(s))
match = REO_PointXYZ.findall(text)
if match:
x, y, z = match[0]
return Pxyz(x=float(x), y=float(y), z=float(z))
match = REO_PointNED.findall(text)
if match:
n, e, d = match[0]
return Pned(n=float(n), e=float(e), d=float(d))
match = REO_PointFSB.findall(text)
if match:
f, s, b = match[0]
return Pfsb(f=float(f), s=float(s), b=float(b))
try:
return (Convert2Num(text))
except ValueError:
raise ValueError("Expected PointNum got '{0}'".format(text))
def Convert2CoordPointNum(text):
"""converts text to python type in order
FracDeg, Int, hex, Float, Complex
ValueError if can't
"""
#convert to FracDeg Coord if possible
dm = REO_LatLonNE.findall(text) #returns list of tuples of groups [(deg,min)]
if dm:
deg = float(dm[0][0])
min_ = float(dm[0][1])
return (deg + min_/60.0)
dm = REO_LatLonSW.findall(text) #returns list of tuples of groups [(deg,min)]
if dm:
deg = float(dm[0][0])
min_ = float(dm[0][1])
return (-(deg + min_/60.0))
try:
return (Convert2PointNum(text))
except ValueError:
raise ValueError("Expected CoordPointNum got '{0}'".format(text))
def Convert2BoolCoordPointNum(text):
"""converts text to python type in order
None, Boolean, Int, Float, Complex
ValueError if can't
"""
#convert to None if possible
if text.lower() == 'none':
return None
#convert to boolean if possible
if text.lower() in ['true', 'yes']:
return (True)
if text.lower() in ['false', 'no']:
return (False)
try:
return (Convert2CoordPointNum(text))
except ValueError:
raise ValueError("Expected BoolCoordPointNum got '{0}'".format(text))
return None
def Convert2PathCoordPointNum(text):
"""converts text to python type in order
Boolean, Int, Float, Complex
ValueError if can't
"""
#convert to path string if possible
if REO_PathNode.match(text):
return (text)
try:
return (Convert2CoordPointNum(text))
except ValueError:
raise ValueError("Expected PathCoordPointNum got '{0}'".format(text))
return None
def Convert2BoolPathCoordPointNum(text):
"""converts text to python type in order
Boolean, Int, Float, Complex
ValueError if can't
"""
#convert to None if possible
if text.lower() == 'none':
return None
#convert to boolean if possible
if text.lower() in ['true', 'yes']:
return (True)
if text.lower() in ['false', 'no']:
return (False)
try:
return (Convert2PathCoordPointNum(text))
except ValueError:
raise ValueError("Expected PathBoolCoordPointNum got '{0}'".format(text))
return None
def Convert2StrBoolPathCoordPointNum(text):
"""converts text to python type in order
Boolean, Int, Float, complex or double quoted string
ValueError if can't
"""
if REO_Quoted.match(text): #text is double quoted string
return text.strip('"') #strip off quotes
if REO_QuotedSingle.match(text): #text is single quoted string
return text.strip("'") #strip off quotes
try:
return (Convert2BoolPathCoordPointNum(text))
except ValueError:
raise ValueError("Expected StrBoolPathCoordPointNum got '{0}'".format(text))
return None
def StripQuotes(text):
"""
Returns text with leading and following quotes (singe or double) stripped
off if any Otherwise return as is
"""
if REO_Quoted.match(text): #text is double quoted string
return text.strip('"') #strip off quotes
if REO_QuotedSingle.match(text): #text is single quoted string
return text.strip("'") #strip off quotes
return text
VerbList = ['load', 'house', 'init',
'server',
'logger', 'log', 'loggee',
'framer', 'first',
'frame', 'over', 'under', 'next', 'done', 'timeout', 'repeat',
'native', 'benter', 'enter', 'recur', 'exit', 'precur', 'renter', 'rexit',
'print', 'put', 'inc', 'copy', 'set',
'aux', 'rear', 'raze',
'go', 'let',
'do',
'bid', 'ready', 'start', 'stop', 'run', 'abort',
'use', 'flo', 'give', 'take' ]
#reserved tokens
Comparisons = ['==', '<', '<=', '>=', '>', '!=']
Connectives = ['to', 'by', 'with', 'from', 'per', 'for', 'cum', 'qua', 'via',
'as', 'at', 'in', 'of', 'on', 're', 'is',
'if', 'be', 'into', 'and', 'not', '+-', ]
Reserved = Connectives + Comparisons #concatenate to get reserved words
ReservedFrameNames = ['next', 'prev'] # frame names with special meaning as target of goto
class Builder(object):
"""
"""
def __init__(self, fileName='', mode=None, metas=None, preloads=None, behaviors=None):
"""
"""
self.fileName = fileName #initial name of file to start building from
self.mode = mode or []
self.metas = metas or []
self.preloads = preloads or []
self.behaviors = behaviors or []
self.files = [] #list of open file objects, appended to by load commands
self.counts = [] #list of linectr s for open file objects
self.houses = [] #list of houses
self.currentFile = None
self.currentCount = 0
self.currentHuman = '' # human friendly version of current line
self.currentMode = None # None is any
self.currentHouse = None
self.currentStore = None
self.currentLogger = None
self.currentLog = None
self.currentFramer = None
self.currentFrame = None # current frame
self.currentContext = NATIVE
def tokenize(self, line):
"""
Parse line and read and parse continuation lines if any and return tokens list.
"""
saveLines = []
saveLineViews = []
while line.endswith('\\\n'): # escaped newline continuation
line = line.rstrip()
saveLineViews.append("%04d %s" % (self.currentCount, line))
saveLines.append(line.rstrip('\\').strip())
line = self.currentFile.readline() #empty if end of file
self.currentCount += 1 #inc line counter
# process last line read as either only line or continuation line
line = line.rstrip()
saveLineViews.append("%04d %s" % (self.currentCount, line))
saveLines.append(line)
# join all saved into one line
lineView = "\n".join(saveLineViews)
line = " ".join(saveLines)
console.concise(lineView + '\n')
line = line.strip() #strips white space both ends
chunks = REO_Chunks.findall(line) # also chunks trailing comments
tokens = []
for chunk in chunks:
if chunk[0] == '#': # throw away chunk as comment
break
else:
tokens.append(chunk)
return tokens
def build(self, fileName='', mode=None, metas=None, preloads=None, behaviors=None):
"""
Allows building from multiple files. Essentially files list is stack of files
fileName is name of first file. Load commands in any files push (append) file onto files
until file completed loaded and then popped off
Each house's store is inited with the meta data in metas
"""
#overwrite default if truthy argument
if fileName:
self.fileName = fileName
if mode:
self.mode.extend[mode]
if metas:
self.metas.extend[metas]
if preloads:
self.preloads.extend[preloads]
if behaviors:
self.behaviors.extend[behaviors]
if self.behaviors: #import behavior package/module
for behavior in self.behaviors:
mod = importlib.import_module(behavior)
housing.House.Clear() #clear house registry
housing.ClearRegistries() #clear all the other registries
try: #IOError
self.fileName = os.path.abspath(self.fileName)
self.currentFile = open(self.fileName,"r")
self.currentCount = 0
try: #ResolveError
while self.currentFile:
line = self.currentFile.readline() # empty if end of file
self.currentCount += 1 # inc line counter
nextTokens = [] # for connective continuation
while (line):
if nextTokens: # parsed ahead but not continuation
tokens = nextTokens
nextTokens = []
else:
tokens = self.tokenize(line) # line and any continuations
if (not tokens): #empty line or comment only
line = self.currentFile.readline() # empty if end of file
self.currentCount += 1 # inc line counter
continue # guarantees at least 1 token
# verbs like load which change file context can not be continued
if tokens[0] not in ('load'): # verb allows connective continuation
while True: # iteratively attempt connective continuation
# Connective continuation
# adds lines that start with connective
# skips empty or comment lines
# stops on line starting with non connective verb
line = self.currentFile.readline() # empty if end of file
self.currentCount += 1 # inc line counter
if not line: # end of file
break
nextTokens = self.tokenize(line) # parse ahead
if nextTokens and nextTokens[0] not in Reserved: # not connective
break # do not continue
if nextTokens:
tokens.extend(nextTokens) # add continuation
nextTokens = []
self.currentHuman = ' '.join(tokens)
try: #ParseError ParseWarning
if not self.dispatch(tokens): # catches dispatches the return unexpectedly
console.terse("Script Parsing stopped at line {0} in file {1}\n".format(
self.currentCount, self.currentFile.name))
console.terse(self.currentHuman + '\n')
return False
except excepting.ParseError as ex:
console.terse("\n{0}\n\n".format(ex))
console.terse("Script line {0} in file {1}\n".format(
self.currentCount, self.currentFile.name))
console.terse(self.currentHuman + '\n')
raise
#dispatch evals commands. self.currentFile may be changed by load command
if not nextTokens:
line = self.currentFile.readline() #empty if end of file
self.currentCount += 1 #inc line counter
self.currentFile.close()
if self.files:
self.currentFile = self.files.pop()
self.currentCount = self.counts.pop()
console.terse("Resume loading from file {0}.\n".format(self.currentFile.name))
else:
self.currentFile = None
#building done so now resolve links and collect actives inactives
for house in self.houses:
house.orderTaskables()
house.resolve()
if console._verbosity >= console.Wordage.concise:
house.showAllTaskers()
#show framework hierarchiy
for framer in house.framers:
framer.showHierarchy()
#show hierarchy of each house's store
console.concise( "\nData Store for {0}\n".format(house.name))
house.store.expose(valued=(console._verbosity >= console.Wordage.terse))
return True
except excepting.ResolveError as ex:
console.terse("{0}\n".format(ex))
return False
except IOError as ex:
console.terse("Error opening mission file {0}\n".format(ex))
return False
finally:
for f in self.files:
if not f.closed:
f.close()
def dispatch(self, tokens):
"""
Converts declaration verb into build method name and calls it
"""
verb = tokens[0]
index = 1
if verb not in VerbList:
msg = "ParseError: Building {0}. Unknown verb {1}, index = {2} tokens = {3}".format(
verb, verb, index, tokens)
raise excepting.ParseError(msg, tokens, index)
verbMethod = 'build' + verb.capitalize()
if hasattr(self, verbMethod):
return(getattr(self, verbMethod )(verb, tokens, index))
else:
return self.buildGeneric(verb, tokens, index)
def buildGeneric(self, verb, tokens, index):
"""
Called when no build method exists for a verb
"""
msg = "ParseError: No build method for verb {0}.".format(verb)
raise excepting.ParseError(msg, tokens, index)
def buildLoad(self, command, tokens, index):
"""
load filepathname
"""
try:
name = tokens[index]
index +=1
self.files.append(self.currentFile) #push currentFile
self.counts.append(self.currentCount) #push current line ct
cwd = os.getcwd() #save current working directory
os.chdir(os.path.split(self.currentFile.name)[0]) # set cwd to current file
name = os.path.abspath(os.path.expanduser(name)) # resolve name if relpath to cwd
os.chdir(cwd) #restore old cwd
self.currentFile = open(name,"r")
self.currentCount = 0
console.terse("Loading from file {0}.\n".format(self.currentFile.name))
except IndexError:
msg = "ParseError: Building verb '%s'. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "ParseError: Building verb '%s'. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
return True
#House specific builders
def buildHouse(self, command, tokens, index):
"""Create a new house and make it the current one
house dreams
"""
try:
name = tokens[index]
index +=1
self.verifyName(name, command, tokens, index)
self.currentHouse = housing.House(name = name) #also creates .store
self.houses.append(self.currentHouse)
self.currentStore = self.currentHouse.store
console.terse(" Created House '{0}'. Assigning registries and "
"creating instances ...\n".format(name))
self.currentHouse.assignRegistries()
console.profuse(" Clearing current Framer, Frame, Log etc.\n")
#changed store so need to make new frameworks and frames
self.currentFramer = None #current framer
self.currentFrame = None #current frame
self.currentLogger = None #current logger
self.currentLog = None #current log
#meta data in metas is list of triples of (name, path, data)
for name, path, data in self.metas:
self.currentHouse.metas[name] = self.initPathToData(path, data)
# set .meta.house to house.name
self.currentHouse.metas['house'] = self.initPathToData('.meta.house',
odict(value=self.currentHouse.name))
for path, data in self.preloads:
self.initPathToData(path, data)
except IndexError:
msg = "ParseError: Building verb '%s'. Not enough tokens." % (command, )
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "ParseError: Building verb '%s'. Unused tokens." % (command, )
raise excepting.ParseError(msg, tokens, index)
msg = " Built House '{0}' with meta:\n".format(self.currentHouse.name)
for name, share in self.currentHouse.metas.items():
msg += " {0}: {1!r}\n".format(name, share)
console.terse(msg)
msg = " Built House '{0}' with preload:\n".format(self.currentHouse.name)
for path, data in self.preloads:
share = self.currentHouse.store.fetch(path)
msg += " {0}: {1!r}\n".format(path, share)
console.terse(msg)
return True
# Convenience Functions
def initPathToData(self, path, data):
"""Convenience support function to preload meta data.
Initialize share given by path with data.
Assumes self.currentStore is valid
path is share path string
data is ordered dict of data
"""
share = self.currentStore.create(path)
self.verifyShareFields(share, data.keys(), None, None)
share.update(data)
return share
#Store specific builders
def buildInit(self, command, tokens, index):
"""Initialize share in current store
init destination with data
init indirect from source
destination:
absolute
path
data:
direct
indirect:
[(value, fields) in] absolute
[(value, fields) in] path
source:
[(value, fields) in] absolute
[(value, fields) in] path
"""
if not self.currentStore:
msg = "ParseError: Building verb '%s'. No current store" % (command)
raise excepting.ParseError(msg, tokens, index)
try:
destinationFields, index = self.parseFields(tokens, index)
destinationPath, index = self.parsePath(tokens, index)
if self.currentStore.fetchShare(destinationPath) is None:
console.terse(" Warning: Init of non-preexistent share {0} ..."
" creating anyway\n".format(destinationPath))
destination = self.currentStore.create(destinationPath)
connective = tokens[index]
index += 1
if connective in ('with', 'to'): # to form deprecated eventually remove
if connective == 'to':
console.terse("Warning: Connective 'to' in 'init' verb depricated. Use 'with' instead.\n")
if destinationFields: #fields not allowed so error
msg = "ParseError: Building verb '%s'. Unexpected fields '%s in' clause " %\
(command, destinationFields)
raise excepting.ParseError(msg, tokens, index)
data, index = self.parseDirect(tokens, index)
#prevent init value and non value fields in same share
self.verifyShareFields(destination, data.keys(), tokens, index)
destination.update(data)
console.profuse(" Inited share {0} to data = {1}\n".format(destination.name, data))
elif connective in ('from', ):
sourceFields, index = self.parseFields(tokens, index)
sourcePath, index = self.parsePath(tokens, index)
source = self.currentStore.fetchShare(sourcePath)
if source is None:
msg = "ParseError: Building verb '%s'. Nonexistent source share '%s'" %\
(command, sourcePath)
raise excepting.ParseError(msg, tokens, index)
sourceFields, destinationFields = self.prepareSrcDstFields(source,
sourceFields,
destination,
destinationFields,
tokens,
index)
data = odict()
for sf, df in izip(sourceFields, destinationFields):
data[df] = source[sf]
destination.update(data)
msg = " Inited share {0} from source {1} with data = {2}\n".format(
destination.name, source.name, data)
console.profuse(msg)
else:
msg = "ParseError: Building verb '%s'. Unexpected connective '%s'" %\
(command, connective)
raise excepting.ParseError(msg, tokens, index)
except IndexError:
msg = "ParseError: Building verb '%s'. Not enough tokens." % (command, )
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "ParseError: Building verb '%s'. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
return True
def buildServer(self, command, tokens, index):
"""create server tasker in current house
server has to have name so can ask stop
server name [at period] [be scheduled]
[rx shost:sport] [tx dhost:dport] [in order] [to prefix] [per data]
[for source]
scheduled: (active, inactive, slave)
rx:
(host:port, :port, host:, host, :)
tx:
(host:port, :port, host:, host, :)
order:
(front, mid, back)
prefix
filepath
data:
direct
source:
[(value, fields) in] indirect
"""
if not self.currentHouse:
msg = "ParseError: Building verb '%s'. No current house" % (command)
raise excepting.ParseError(msg, tokens, index)
if not self.currentStore:
msg = "ParseError: Building verb '%s'. No current store" % (command)
raise excepting.ParseError(msg, tokens, index)
try:
parms = {}
init = {}
name = ''
connective = None
period = 0.0
prefix = './'
schedule = ACTIVE #globaling.py
order = MID #globaling.py
rxa = ''
txa = ''
sha = ('', 54321) #empty host means any interface on local host
dha = ('localhost', 54321)
name = tokens[index]
index +=1
while index < len(tokens): #options
connective = tokens[index]
index += 1
if connective == 'at':
period = abs(Convert2Num(tokens[index]))
index +=1
elif connective == 'to':
prefix = tokens[index]
index +=1
elif connective == 'be':
option = tokens[index]
index +=1
if option not in ['active', 'inactive', 'slave']:
msg = "ParseError: Building verb '%s'. Bad server scheduled option got %s" % \
(command, option)
raise excepting.ParseError(msg, tokens, index)
schedule = ScheduleValues[option] #replace text with value
elif connective == 'in':
order = tokens[index]
index +=1
if order not in OrderValues:
msg = "ParseError: Building verb '%s'. Bad order option got %s" % \
(command, order)
raise excepting.ParseError(msg, tokens, index)
order = OrderValues[order] #convert to order value
elif connective == 'rx':
rxa = tokens[index]
index += 1
elif connective == 'tx':
txa = tokens[index]
index += 1
elif connective == 'per':
data, index = self.parseDirect(tokens, index)
init.update(data)
elif connective == 'for':
srcFields, index = self.parseFields(tokens, index)
srcPath, index = self.parsePath(tokens, index)
if self.currentStore.fetchShare(srcPath) is None:
console.terse(" Warning: Init 'with' non-existent share {0}"
" ... creating anyway".format(srcPath))
src = self.currentStore.create(srcPath)
#assumes src share inited before this line parsed
for field in srcFields:
init[field] = src[field]
else:
msg = "ParseError: Building verb '%s'. Bad connective got %s" % \
(command, connective)
raise excepting.ParseError(msg, tokens, index)
except IndexError:
msg = "ParseError: Building verb '%s'. Not enough tokens." % (command, )
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "ParseError: Building verb '%s'. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
prefix += '/' + self.currentHouse.name #extra slashes are ignored
if rxa:
if ':' in rxa:
host, port = rxa.split(':')
sha = (host, int(port))
else:
sha = (rxa, sha[1])
if txa:
if ':' in txa:
host, port = txa.split(':')
dha = (host, int(port))
else:
dha = (txa, dha[1])
server = serving.Server(name=name, store = self.currentStore,)
kw = dict(period=period, schedule=schedule, sha=sha, dha=dha, prefix=prefix,)
kw.update(init)
server.reinit(**kw)
self.currentHouse.taskers.append(server)
if schedule == SLAVE:
self.currentHouse.slaves.append(server)
else: #taskable active or inactive
if order == FRONT:
self.currentHouse.fronts.append(server)
elif order == BACK:
self.currentHouse.backs.append(server)
else:
self.currentHouse.mids.append(server)
msg = " Created server named {0} at period {2:0.4f} be {3}\n".format(
server.name, name, server.period, ScheduleNames[server.schedule])
console.profuse(msg)
return True
#Logger specific builders
def buildLogger(self, command, tokens, index):
"""
Create logger in current house
logger logname [to prefix] [at period] [be scheduled]
[flush interval] [keep copies] [cycle term] [size bytes]
scheduled: (active, inactive, slave)
period seconds
interval seconds
term seconds
copies integer
bytes bytes
logger basic at 0.125
logger basic
"""
if not self.currentHouse:
msg = "ParseError: Building verb '{0}'. No current house.".format(
command, index, tokens)
raise excepting.ParseError(msg, tokens, index)
if not self.currentStore:
msg = "ParseError: Building verb '{0}'. No current store.".format(
command, index, tokens)
raise excepting.ParseError(msg, tokens, index)
try:
name = tokens[index]
index +=1
period = 0.0 #default
schedule = ACTIVE #globaling.py
order = MID #globaling.py
interval = 30.0
prefix = './'
keep = 0
term = 3600.0
size = 1024 # default rotate size is 1024 bytes = 1KB
reuse = False # non-unique logger directory name if True
while index < len(tokens): #options
connective = tokens[index]
index += 1
if connective == 'at':
period = abs(Convert2Num(tokens[index]))
index +=1
elif connective == 'to': # base directory path for log files
prefix = tokens[index] # house name is post pended as sub directory
index +=1
elif connective == 'be':
option = tokens[index]
index +=1
if option not in ['active', 'inactive', 'slave']:
msg = "Error building %s. Bad logger scheduled option got %s." %\
(command, option)
raise excepting.ParseError(msg, tokens, index)
schedule = ScheduleValues[option] #replace text with value
elif connective == 'in':
order = tokens[index]
index +=1
if order not in OrderValues:
msg = "Error building %s. Bad order got %s." %\
(command, order)
raise excepting.ParseError(msg, tokens, index)
order = OrderValues[order] #convert to order value
elif connective == 'flush':
interval = max(1.0, abs(Convert2Num(tokens[index])))
index +=1
elif connective == 'keep':
keep = max(0, int(Convert2Num(tokens[index])))
index +=1
elif connective == 'cycle':
term = max(0.0, abs(Convert2Num(tokens[index])))
index +=1
elif connective == 'size':
size = max(0, abs(Convert2Num(tokens[index])))
index +=1
elif connective == 'reuse':
reuse = True
else:
msg = "Error building %s. Bad connective got %s." %\
(command, connective)
raise excepting.ParseError(msg, tokens, index)
if name in logging.Logger.Names:
msg = "Error building %s. Task %s already exists." %\
(command, name)
raise excepting.ParseError(msg, tokens, index)
logger = logging.Logger(name=name,
store=self.currentStore,
period=period,
flushPeriod=interval,
prefix=prefix,
keep=keep,
cyclePeriod=term,
fileSize=size,
reuse=reuse)
logger.schedule = schedule
self.currentHouse.taskers.append(logger)
if schedule == SLAVE:
self.currentHouse.slaves.append(logger)
else: #taskable active or inactive
if order == FRONT:
self.currentHouse.fronts.append(logger)
elif order == BACK:
self.currentHouse.backs.append(logger)
else:
self.currentHouse.mids.append(logger)
self.currentLogger = logger
console.profuse(" Created logger named {0} at period {1:0.4f} be {2}\n".format(
logger.name, logger.period, ScheduleNames[logger.schedule]))
except IndexError:
msg = "Error building %s. Not enough tokens." % (command, )
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "Error building %s. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
return True
def buildLog(self, command, tokens, index):
"""
Create log in current logger
log name [to fileName] [as (text, binary)] [on rule]
rule: (once, never, always, update, change, streak, deck)
default fileName is log's name
default type is text
default rule is never
for manual logging use tout command with rule once or never
log autopilot text to './logs/' on update
"""
if not self.currentLogger:
msg = "Error building %s. No current logger." % (command,)
raise excepting.ParseError(msg, tokens, index)
if not self.currentStore:
msg = "Error building %s. No current store." % (command,)
raise excepting.ParseError(msg, tokens, index)
try:
kind = 'text'
fileName = ''
rule = NEVER
name = tokens[index]
index +=1
while index < len(tokens): #options
connective = tokens[index]
index += 1
if connective == 'as':
kind = tokens[index]
index +=1
if kind not in ['text', 'binary']:
msg = "Error building %s. Bad kind = %s." %\
(command, kind)
raise excepting.ParseError(msg, tokens, index)
elif connective == 'to':
fileName = tokens[index]
index +=1
elif connective == 'on':
rule = tokens[index].capitalize()
index +=1
if rule not in LogRuleValues:
msg = "Error building %s. Bad rule = %s." %\
(command, rule)
raise excepting.ParseError(msg, tokens, index)
rule = LogRuleValues[rule]
else:
msg = "Error building %s. Bad connective got %s." %\
(command, connective)
raise excepting.ParseError(msg, tokens, index)
if name in logging.Log.Names: # check if instance name in Registrar
msg = "Error building %s. Log named %s already exists." %\
(command, name)
raise excepting.ParseError(msg, tokens, index)
if fileName:
for log in self.currentLogger.logs:
if fileName == log.baseFilename:
msg = ("Error building {0}. Log named {1} file named {2} "
"already exists.".format(command, name, fileName))
raise excepting.ParseError(msg, tokens, index)
log = logging.Log(name=name,
store=self.currentStore,
kind=kind,
baseFilename=fileName,
rule=rule)
self.currentLogger.addLog(log)
self.currentLog = log
console.profuse(" Created log named {0} kind {1} file {2} rule {3}\n".format(
name, kind, fileName, LogRuleNames[rule]))
except IndexError:
msg = "Error building %s. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "Error building %s. Unused tokens." % (command, )
raise excepting.ParseError(msg, tokens, index)
return True
def buildLoggee(self, command, tokens, index):
"""
Add loggee(s) to current log
Syntax:
loggee [fields in] path [as tag] [[fields in] path [as tag]] ...
path: share path
fields: field list
If fields not provided use all fields
If tag not provide use last segment of path as tag
If log rule is streak then only one loggee per log is allowed and only
the first field from fields clause is used.
Syntax:
log name on streak
loggee [fields in] path [as tag]
If log rule is deck then only one loggee per log is allowed and
fields clause is required.
Syntax:
log name on deck
loggee fields in path [as tag]
"""
if not self.currentLog:
msg = "Error building %s. No current log." % (command,)
raise excepting.ParseError(msg, tokens, index)
if not self.currentStore:
msg = "Error building %s. No current store." % (command,)
raise excepting.ParseError(msg, tokens, index)
try:
while index < len(tokens):
tag = ""
fields, index = self.parseFields(tokens, index)
path = tokens[index]
index +=1
if path in Reserved:
msg = "ParseError: Invalid path '{0}' using reserved".format(path)
raise excepting.ParseError(msg, tokens, index)
if not (REO_DotPath.match(path) or REO_RelPath.match(path)):
#valid absolute or relative path segment without relation clause
msg = "ParseError: Invalid path format'{0}'".format(path)
raise excepting.ParseError(msg, tokens, index)
parts = path.split(".")
if "me" in parts:
msg = "ParseError: Invalid path format'{0}', 'me' undefined".format(path)
raise excepting.ParseError(msg, tokens, index)
if index < len(tokens):
connective = tokens[index]
if connective == 'as':
index += 1 # eat token
tag = tokens[index]
if tag in Reserved:
msg = "ParseError: Invalid tag '{0}' using reserved".format(tag)
raise excepting.ParseError(msg, tokens, index)
tag = StripQuotes(tag)
index += 1
if not tag:
tag = parts[-1]
share = self.currentStore.create(path) #create so no errors at runtime
if not isinstance(share, storing.Share): #verify path ends in share not node
msg = "Error building %s. Loggee path %s not Share." % (command, path)
raise excepting.ParseError(msg, tokens, index)
if tag in self.currentLog.loggees:
msg = "Error building %s. Loggee %s already exists in Log %s." %\
(command, tag, self.currentLog.name)
raise excepting.ParseError(msg, tokens, index)
if self.currentLog.rule in (STREAK, DECK) and self.currentLog.loggees:
# only one loggee allowed when rule is streak or deck
msg = ("Error building {0}. Only one loggee allowed when "
"rule is streak or deck.".format(command))
raise excepting.ParseError(msg, tokens, index)
self.currentLog.addLoggee(tag=tag, loggee=share, fields=fields)
console.profuse(" Added loggee {0} with tag {1} fields {2}\n".format(
share.name, tag, fields))
except IndexError:
msg = "Error building %s. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "Error building %s. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
return True
#Framework specific builders
def buildFramer(self, command, tokens, index):
"""Create a new framer and make it the current one
framer framername [be (active, inactive, aux, slave)] [at period]
[first frame] [via inode]
framer framername be active at 0.0
framer framername
"""
if not self.currentHouse:
msg = "Error building %s. No current house." % (command,)
raise excepting.ParseError(msg, tokens, index)
if not self.currentStore:
msg = "Error building %s. No current store." % (command,)
raise excepting.ParseError(msg, tokens, index)
try:
name = tokens[index]
index +=1
self.verifyName(name, command, tokens, index)
schedule = INACTIVE #globaling.py
order = MID #globaling.py
period = 0.0
frame = ''
inode = ''
while index < len(tokens): #options
connective = tokens[index]
index += 1
if connective == 'at':
period = max(0.0, Convert2Num(tokens[index]))
index +=1
elif connective == 'be':
option = tokens[index]
index +=1
if option not in ScheduleValues:
msg = "Error building %s. Bad scheduled option got %s." %\
(command, option)
raise excepting.ParseError(msg, tokens, index)
schedule = ScheduleValues[option] #replace text with value
elif connective == 'in':
order = tokens[index]
index +=1
if order not in OrderValues:
msg = "Error building %s. Bad order got %s." %\
(command, order,)
raise excepting.ParseError(msg, tokens, index)
order = OrderValues[order] #convert to order value
elif connective == 'first':
frame = tokens[index]
index +=1
self.verifyName(frame, command, tokens, index)
elif connective == 'via':
inode, index = self.parseIndirect(tokens, index, node=True)
else:
msg = "Error building %s. Bad connective got %s." %\
(command, connective)
raise excepting.ParseError(msg, tokens, index)
if name in framing.Framer.Names:
msg = "Error building %s. Framer or Task %s already exists." %\
(command, name)
raise excepting.ParseError(msg, tokens, index)
else:
framer = framing.Framer(name = name,
store = self.currentStore,
period = period)
framer.schedule = schedule
framer.first = frame #need to resolve later
framer.inode = inode
self.currentHouse.taskers.append(framer)
self.currentHouse.framers.append(framer)
if schedule == SLAVE:
self.currentHouse.slaves.append(framer)
elif schedule == AUX:
self.currentHouse.auxes.append(framer)
elif schedule == MOOT:
self.currentHouse.moots.append(framer)
else: #taskable active or inactive
if order == FRONT:
self.currentHouse.fronts.append(framer)
elif order == BACK:
self.currentHouse.backs.append(framer)
else:
self.currentHouse.mids.append(framer)
self.currentFramer = framer
self.currentFramer.assignFrameRegistry()
self.currentFrame = None #changed current Framer so no current Frame
console.profuse(" Created Framer named '{0}' at period {1:0.4f} be {2} first {3}\n".format(
framer.name, framer.period, ScheduleNames[framer.schedule], framer.first))
console.profuse(" Added Framer '{0}' to House '{1}', Assigned frame registry\n".format(
framer.name, self.currentHouse.name))
except IndexError:
msg = "Error building %s. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "Error building %s. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
return True
def buildFirst(self, command, tokens, index):
"""set first (starting) frame for current framer
first framename
"""
if not self.currentFramer:
msg = "Error building %s. No current framer." % (command,)
raise excepting.ParseError(msg, tokens, index)
try:
name = tokens[index]
index +=1
self.verifyName(name, command, tokens, index)
self.currentFramer.first = name #need to resolve later
console.profuse(" Assigned first frame {0} for framework {1}\n".format(
name, self.currentFramer.name))
except IndexError:
msg = "Error building %s. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "Error building %s. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
return True
#Frame specific builders
def buildFrame(self, command, tokens, index):
"""Create frame and attach to over frame if indicated
frame framename [in over] [via inode]
framename cannot be "next" which is reserved
"""
if not self.currentStore:
msg = "Error building %s. No current store." % (command,)
raise excepting.ParseError(msg, tokens, index)
if not self.currentFramer:
msg = "Error building %s. No current framer." % (command,)
raise excepting.ParseError(msg, tokens, index)
inode = ''
try:
name = tokens[index]
index +=1
self.verifyName(name, command, tokens, index)
over = None
while index < len(tokens): #options
connective = tokens[index]
index += 1
if connective == 'in':
over = tokens[index]
index +=1
elif connective == 'via':
inode, index = self.parseIndirect(tokens, index, node=True)
else:
msg = "Error building %s. Bad connective got %s." % (command, connective)
raise excepting.ParseError(msg, tokens, index)
except IndexError:
msg = "Error building %s. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "Error building %s. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if name in ReservedFrameNames:
msg = "Error building %s in Framer %s. Frame name %s reserved." %\
(command, self.currentFramer.name, name)
raise excepting.ParseError(msg, tokens, index)
elif name in framing.Frame.Names: #could use Registry Retrieve function
msg = "Error building %s in Framer %s. Frame %s already exists." %\
(command, self.currentFramer.name, name)
raise excepting.ParseError(msg, tokens, index)
else:
frame = framing.Frame(name=name, store = self.currentStore,
framer=self.currentFramer.name,
inode=inode)
if over:
frame.over = over #need to resolve later
#if previous frame did not have explicit next frame then use this new frame
# ad next lexically
if self.currentFrame and not self.currentFrame.next_:
self.currentFrame.next_ = frame.name
#default first frame is first lexical frame if not assigned otherwise
#so if startFrame is none then we must be first lexical frame
if not self.currentFramer.first: #frame.framer.first:
self.currentFramer.first = frame.name #frame.framer.first = frame
self.currentFrame = frame
self.currentContext = NATIVE
console.profuse(" Created frame {0} with over {1}\n".format(frame.name, over))
return True
def buildOver(self, command, tokens, index):
"""Makes frame the over frame of the current frame
over frame
"""
self.verifyCurrentContext(tokens, index) #currentStore, currentFramer, currentFrame exist
try:
over = tokens[index]
index +=1
self.verifyName(over, command, tokens, index)
except IndexError:
msg = "Error building %s. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "Error building %s. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
self.currentFrame.over = over #need to resolve and attach later
console.profuse(" Assigned over {0} to frame {1}\n".format(
over,self.currentFrame.name))
return True
def buildUnder(self, command, tokens, index):
"""Makes frame the primary under frame of the current frame
under frame
"""
self.verifyCurrentContext(tokens, index) #currentStore, currentFramer, currentFrame exist
try:
under = tokens[index]
index +=1
self.verifyName(under, command, tokens, index)
except IndexError:
msg = "Error building %s. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "Error building %s. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
unders = self.currentFrame.unders
if not unders: #empty so just append
unders.append(under)
elif under != unders[0]: #not already primary
while under in unders: #remove under (in case multiple copies shouldnt be)
unders.remove(under)
if isinstance(unders[0], framing.Frame): #should not be but if valid don't overwrite
unders.insert(0, under)
else: #just name so overwrite
unders[0] = under
else: #under == unders[0] already so do nothing
pass
console.profuse(" Assigned primary under {0} for frame {1}\n".format(
under,self.currentFrame.name))
return True
def buildNext(self, command, tokens, index):
"""Explicitly assign next frame for timeouts and as target of go next
next frameName
next
blank frameName means use lexically next allows override if multiple
next commands to default of lexical
"""
self.verifyCurrentContext(tokens, index) #currentStore, currentFramer, currentFrame exist
try:
if index < len(tokens): #next frame optional
next_ = tokens[index]
index += 1
self.verifyName(next_, command, tokens, index)
else:
next_ = None
except IndexError:
msg = "Error building %s. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "Error building %s. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
self.currentFrame.next_ = next_
console.profuse(" Assigned next frame {0} for frame {1}\n".format(
next_, self.currentFrame.name))
return True
def buildAux(self, command, tokens, index):
"""Parse 'aux' command for simple, cloned, or conditional aux of forms
Simple Auxiliary:
aux framername
Cloned Auxiliary:
aux framername as (mine, clonedauxname) [via (main, mine, inode)]
Simple Conditional Auxiliary:
aux framername if [not] need
aux framername if [not] need [and [not] need ...]
Cloned Conditional Auxiliary:
aux framername as (mine, clonedauxname) [via inode]
if [not] need
aux framername as (mine, clonedauxname) [via inode]
if [not] need [and [not] need ...]
"""
self.verifyCurrentContext(tokens, index) #currentStore, currentFramer, currentFrame exist
try:
needs = []
aux = None # original
connective = None
clone = None
inode = ''
insular = False
aux = tokens[index]
index +=1 #eat token
self.verifyName(aux, command, tokens, index)
while index < len(tokens): #options
connective = tokens[index]
index += 1
if connective == 'as':
clone = tokens[index]
index += 1
self.verifyName(clone, command, tokens, index)
elif connective == 'via':
inode, index = self.parseIndirect(tokens, index, node=True)
elif connective == 'if':
while (index < len(tokens)):
act, index = self.makeNeed(tokens, index)
if not act:
return False # something wrong do not know what
needs.append(act)
if index < len(tokens):
connective = tokens[index]
if connective not in ['and']:
msg = "ParseError: Building verb '%s'. Bad connective '%s'" % \
(command, connective)
raise excepting.ParseError(msg, tokens, index)
index += 1 #otherwise eat token
else:
msg = ("Error building {0}. Invalid connective"
" '{1}'.".format(command, connective))
raise excepting.ParseError(msg, tokens, index)
except IndexError:
msg = "Error building %s. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "Error building %s. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if clone and needs:
msg = "Error building %s. Conditional auxilary may not be clone." % (command,)
raise excepting.ParseError(msg, tokens, index)
if clone:
if clone == 'mine':
clone = self.currentFramer.newMootTag(base=aux)
insular = True
if clone in self.currentFramer.moots:
msg = ("Error building {0}. Aux/Clone tag '{1}' "
"already in use.".format(command, clone))
raise excepting.ParseError(msg, tokens, index)
data = odict(original=aux,
clone=clone,
schedule=AUX,
human=self.currentHuman,
count=self.currentCount,
inode=inode,
insular=insular)
self.currentFramer.moots[clone] = data # need to resolve early
aux = odict(tag=clone) # mapping indicates that its a clone
# assign aux to mapping with clone tag name as original aux is to be cloned
# named clone create clone when resolve framer.moots so may be referenced
# named clones must be resolved before any frames get resolved
# and are added to the class Framer.names so they can be referenced
# resolved by house.resolve -> house.presolvePresolvables
# -> framer.presolve -> framer.resolveMoots
# resolveMoots adds new resolveable framers to house.presolvables
# self.store.house.presolvables.append(clone)
if needs: # conditional auxiliary suspender preact
human = ' '.join(tokens) #recreate transition command string for debugging
#resolve aux link later
parms = dict(needs = needs, main = 'me', aux = aux, human = human)
act = acting.Act( actor='Suspender',
registrar=acting.Actor,
parms=parms,
human=self.currentHuman,
count=self.currentCount)
self.currentFrame.addPreact(act)
console.profuse(" Added suspender preact, '{0}', with aux"
" {1} needs:\n".format(command, aux))
for need in needs:
console.profuse(" {0} with parms = {1}\n".format(need.actor, need.parms))
else: # simple auxiliary if aux is string then regular auz if aux is mapping then clone
self.currentFrame.addAux(aux) #need to resolve later
console.profuse(" Added aux framer {0}\n".format(aux))
return True
def buildRear(self, command, tokens, index):
"""
Parse 'rear' verb
Two Forms: only first form is currently supported
rear original [as mine] [be aux] in frame framename
framename cannot be me or in outline of me
rear original as clonename be schedule
schedule cannot be aux
clonename cannot be mine
"""
self.verifyCurrentContext(tokens, index) #currentStore, currentFramer, currentFrame exist
try:
original = None
connective = None
clone = 'mine' # default is insular clone
schedule = 'aux' # default schedule is aux
frame = 'me' # default frame is current
original = tokens[index]
index +=1 # eat token
self.verifyName(original, command, tokens, index)
while index < len(tokens): #options
connective = tokens[index]
index += 1
if connective == 'as':
clone = tokens[index]
index += 1
self.verifyName(clone, command, tokens, index)
elif connective == 'be':
schedule = tokens[index]
index += 1
elif connective == 'in': #optional in frame or in framer clause
place = tokens[index] #need to resolve
index += 1 # eat token
if place != 'frame':
msg = ("ParseError: Building verb '{0}'. Invalid "
" '{1}' clause. Expected 'frame' got "
"'{2}'".format(command, connective, place))
raise excepting.ParseError(msg, tokens, index)
if index < len(tokens):
frame = tokens[index]
index += 1
else:
msg = ("Error building {0}. Invalid connective"
" '{1}'.".format(command, connective))
raise excepting.ParseError(msg, tokens, index)
except IndexError:
msg = "Error building {0}. Not enough tokens.".format(command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "Error building {0}. Unused tokens.".format(command,)
raise excepting.ParseError(msg, tokens, index)
# only allow schedule of aux for now
if schedule not in ScheduleValues or schedule not in ['aux']:
msg = "Error building {0}. Bad scheduled option got '{1}'.".format(command, schedule)
raise excepting.ParseError(msg, tokens, index)
schedule = ScheduleValues[schedule] #replace text with value
# when clone is insular and schedule is aux then frame cannot be
# current frames outline. This is validated in the actor resolve
if schedule == AUX:
if clone != 'mine':
msg = ("Error building {0}. Only insular clonename of"
" 'mine' allowed. Got '{1}'.".format(command, clone))
raise excepting.ParseError(msg, tokens, index)
if frame == 'me':
msg = ("Error building {0}. Frame clause required.".format(command, clone))
raise excepting.ParseError(msg, tokens, index)
parms = dict(original=original,
clone=clone,
schedule=schedule,
frame=frame)
actorName = 'Rearer'
if actorName not in acting.Actor.Registry:
msg = "Error building '{0}'. No actor named '{1}'.".format(command, actorName)
raise excepting.ParseError(msg, tokens, index)
act = acting.Act(actor=actorName,
registrar=acting.Actor,
parms=parms,
human=self.currentHuman,
count=self.currentCount)
context = self.currentContext
if context == NATIVE:
context = ENTER # what is native for this command
if not self.currentFrame.addByContext(act, context):
msg = "Error building %s. Bad context '%s'." % (command, context)
raise excepting.ParseError(msg, tokens, index)
console.profuse(" Added {0} '{1}' with parms '{2}'\n".format(
ActionContextNames[context], act.actor, act.parms))
return True
def buildRaze(self, command, tokens, index):
"""
Parse 'raze' verb
raze (all, last, first) [in frame [(me, framename)]]
"""
self.verifyCurrentContext(tokens, index) #currentStore, currentFramer, currentFrame exist
try:
connective = None
who = None # default is insular clone
frame = 'me' # default frame is current
who = tokens[index]
index +=1 # eat token
if who not in ['all', 'first', 'last']:
msg = ("ParseError: Building verb '{0}'. Invalid target of"
" raze. Expected one of ['all', 'first', 'last'] but got "
"'{2}'".format(command, connective, who))
raise excepting.ParseError(msg, tokens, index)
while index < len(tokens): #options
connective = tokens[index]
index += 1
if connective == 'in': #optional in frame or in framer clause
place = tokens[index] #need to resolve
index += 1 # eat token
if place != 'frame':
msg = ("ParseError: Building verb '{0}'. Invalid "
" '{1}' clause. Expected 'frame' got "
"'{2}'".format(command, connective, place))
raise excepting.ParseError(msg, tokens, index)
if index < len(tokens):
frame = tokens[index]
index += 1
else:
msg = ("Error building {0}. Invalid connective"
" '{1}'.".format(command, connective))
raise excepting.ParseError(msg, tokens, index)
except IndexError:
msg = "Error building {0}. Not enough tokens.".format(command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "Error building {0}. Unused tokens.".format(command,)
raise excepting.ParseError(msg, tokens, index)
parms = dict(who=who,
frame=frame)
actorName = 'Razer'
if actorName not in acting.Actor.Registry:
msg = "Error building '{0}'. No actor named '{1}'.".format(command, actorName)
raise excepting.ParseError(msg, tokens, index)
act = acting.Act(actor=actorName,
registrar=acting.Actor,
parms=parms,
human=self.currentHuman,
count=self.currentCount)
context = self.currentContext
if context == NATIVE:
context = EXIT # what is native for this command
if not self.currentFrame.addByContext(act, context):
msg = "Error building %s. Bad context '%s'." % (command, context)
raise excepting.ParseError(msg, tokens, index)
console.profuse(" Added {0} '{1}' with parms '{2}'\n".format(
ActionContextNames[context], act.actor, act.parms))
return True
def buildDone(self, command, tokens, index):
"""
Creates complete action that indicates tasker(s) completed
by setting .done state to True
native context is enter
done tasker [tasker ...]
done [me]
tasker:
(taskername, me)
"""
self.verifyCurrentContext(tokens, index) #currentStore, currentFramer, currentFrame exist
try:
kind = 'Done'
taskers = []
while index < len(tokens):
tasker = tokens[index]
index +=1
self.verifyName(tasker, command, tokens, index)
taskers.append(tasker) #resolve later
if not taskers:
taskers.append('me')
except IndexError:
msg = "Error building %s. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "Error building %s. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
actorName = 'Complete' + kind.capitalize()
if actorName not in completing.Complete.Registry:
msg = "Error building complete %s. No actor named %s." %\
(kind, actorName)
raise excepting.ParseError(msg, tokens, index)
parms = {}
parms['taskers'] = taskers #resolve later
act = acting.Act(actor=actorName,
registrar=completing.Complete,
parms=parms,
human=self.currentHuman,
count=self.currentCount)
context = self.currentContext
if context == NATIVE:
context = ENTER #what is native for this command
if not self.currentFrame.addByContext(act, context):
msg = "Error building %s. Bad context '%s'." % (command, context)
raise excepting.ParseError(msg, tokens, index)
console.profuse(" Created done complete {0} with {1}\n".format(act.actor, act.parms))
return True
def buildTimeout(self, command, tokens, index):
"""creates implicit transition to next on elapsed >= value
timeout 5.0
"""
self.verifyCurrentContext(tokens, index)
try:
value = abs(Convert2Num(tokens[index])) #convert text to number if valid format
index +=1
if isinstance(value, str):
msg = "Error building %s. invalid timeout %s." %\
(command, value)
raise excepting.ParseError(msg, tokens, index)
except IndexError:
msg = "Error building %s. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "Error building %s. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
# build need act for transact
need = self.makeImplicitDirectFramerNeed( name="elapsed",
comparison='>=',
goal=float(value),
tolerance=0)
needs = []
needs.append(need)
# build transact
human = ' '.join(tokens) #recreate transition command string for debugging
far = 'next' #resolve far link later
parms = dict(needs = needs, near = 'me', far = far, human = human)
act = acting.Act(actor='Transiter',
registrar=acting.Actor,
parms=parms,
human=self.currentHuman,
count=self.currentCount)
self.currentFrame.addPreact(act) #add transact as preact
console.profuse(" Added timeout transition preact, '{0}', with far {1} needs:\n".format(
command, far))
for act in needs:
console.profuse(" {0} with parms = {1}\n".format(act.actor, act.parms))
return True
def buildRepeat(self, command, tokens, index):
"""creates implicit transition to next on recurred >= value
repeat 2
go next if recurred >= 2
"""
self.verifyCurrentContext(tokens, index)
try:
value = abs(Convert2Num(tokens[index])) #convert text to number if valid format
index +=1
if isinstance(value, str):
msg = "Error building %s. invalid repeat %s." %\
(command, value)
raise excepting.ParseError(msg, tokens, index)
except IndexError:
msg = "Error building %s. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "Error building %s. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
# build need act for transact
need = self.makeImplicitDirectFramerNeed( name="recurred",
comparison='>=',
goal=int(value),
tolerance=0)
needs = []
needs.append(need)
# build transact
human = ' '.join(tokens) #recreate transition command string for debugging
far = 'next' #resolve far link later
parms = dict(needs = needs, near = 'me', far = far, human = human)
act = acting.Act( actor='Transiter',
registrar=acting.Actor,
parms=parms,
human=self.currentHuman,
count=self.currentCount)
self.currentFrame.addPreact(act) #add transact as preact
console.profuse(" Added repeat transition preact, '{0}', with far {1} needs:\n".format(
command, far))
for act in needs:
console.profuse(" {0} with parms = {1}\n".format(act.actor, act.parms))
return True
def buildNative(self, command, tokens, index):
""" sets context for current frame to
native
"""
self.currentContext = NATIVE
console.profuse(" Changed context to {0}\n".format(
ActionContextNames[self.currentContext]))
return True
def buildBenter(self, command, tokens, index):
""" sets context for current frame to
benter
"""
self.currentContext = BENTER
console.profuse(" Changed context to {0}\n".format(
ActionContextNames[self.currentContext]))
return True
def buildEnter(self, command, tokens, index):
""" sets context for current frame to
enter
"""
self.currentContext = ENTER
console.profuse(" Changed context to {0}\n".format(
ActionContextNames[self.currentContext]))
return True
def buildRenter(self, command, tokens, index):
""" sets context for current frame to
renter
"""
self.currentContext = RENTER
console.profuse(" Changed context to {0}\n".format(
ActionContextNames[self.currentContext]))
return True
def buildPrecur(self, command, tokens, index):
""" sets context for current frame to
precur
"""
self.currentContext = PRECUR
console.profuse(" Changed context to {0}\n".format(
ActionContextNames[self.currentContext]))
return True
def buildRecur(self, command, tokens, index):
""" sets context for current frame to
recur
"""
self.currentContext = RECUR
console.profuse(" Changed context to {0}\n".format(
ActionContextNames[self.currentContext]))
return True
def buildExit(self, command, tokens, index):
""" sets context for current frame to
exit
"""
self.currentContext = EXIT
console.profuse(" Changed context to {0}\n".format(
ActionContextNames[self.currentContext]))
return True
def buildRexit(self, command, tokens, index):
""" sets context for current frame to
rexit
"""
self.currentContext = REXIT
console.profuse(" Changed context to {0}\n".format(
ActionContextNames[self.currentContext]))
return True
#Frame Action specific builders
def buildPrint(self, command, tokens, index):
"""prints a string consisting of space separated tokens
print message
print hello world
"""
self.verifyCurrentContext(tokens, index) #currentStore, currentFramer, currentFrame exist
try:
message = ' '.join(tokens[1:])
except IndexError:
message = ''
parms = dict(message = message)
act = acting.Act( actor='Printer',
registrar=acting.Actor,
parms=parms,
human=self.currentHuman,
count=self.currentCount)
context = self.currentContext
if context == NATIVE:
context = ENTER #what is native for this command
if not self.currentFrame.addByContext(act, context):
msg = "Error building %s. Bad context '%s'." % (command, context)
raise excepting.ParseError(msg, tokens, index)
console.profuse(" Added {0} '{1}' with parms '{2}'\n".format(
ActionContextNames[context], act.actor, act.parms))
return True
def buildPut(self, command, tokens, index):
"""Build put command to put data into share
put data into destination
data:
direct
destination:
[(value, fields) in] indirect
"""
self.verifyCurrentContext(tokens, index) #currentStore, currentFramer, currentFrame exist
try:
srcData, index = self.parseDirect(tokens, index)
connective = tokens[index]
index += 1
if connective != 'into':
msg = "ParseError: Building verb '%s'. Unexpected connective '%s'" %\
(command, connective)
raise excepting.ParseError(msg, tokens, index)
dstFields, index = self.parseFields(tokens, index)
dstPath, index = self.parseIndirect(tokens, index)
except IndexError:
msg = "ParseError: Building verb '%s'. Not enough tokens." % (command, )
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "ParseError: Building verb '%s'. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
actorName = 'Poke' + 'Direct' #capitalize second word
if actorName not in poking.Poke.Registry:
msg = "ParseError: Can't find actor named '%s'" % (actorName)
raise excepting.ParseError(msg, tokens, index)
parms = {}
parms['sourceData'] = srcData # this is dict
parms['destination'] = dstPath # this is a share path
parms['destinationFields'] = dstFields # this is a list
act = acting.Act( actor=actorName,
registrar=poking.Poke,
parms=parms,
human=self.currentHuman,
count=self.currentCount)
msg = " Created Actor {0} parms: data = {1} destination = {2} fields = {3} ".format(
actorName, srcData, dstPath, dstFields)
console.profuse(msg)
context = self.currentContext
if context == NATIVE:
context = ENTER #what is native for this command
if not self.currentFrame.addByContext(act, context):
msg = "Error building %s. Bad context '%s'." % (command, context)
raise excepting.ParseError(msg, tokens, index)
console.profuse(" Added {0} '{1}' with parms '{2}'\n".format(
ActionContextNames[context], act.actor, act.parms))
return True
def buildInc(self, command, tokens, index):
"""Build inc command to inc share by data or from source
inc destination with data
inc destination from source
destination:
[(value, field) in] indirect
data:
directone
source:
[(value, field) in] indirect
"""
self.verifyCurrentContext(tokens, index) #currentStore, currentFramer, currentFrame exist
try:
dstFields, index = self.parseFields(tokens, index)
dstPath, index = self.parseIndirect(tokens, index)
connective = tokens[index]
index += 1
if connective in ('with', ):
srcData, index = self.parseDirect(tokens, index)
for field, value in srcData.items():
if isinstance(value, str):
msg = "ParseError: Building verb '%s'. " % (command)
msg += "Data value = '%s' in field '%s' not a number" %\
(value, field)
raise excepting.ParseError(msg, tokens, index)
act = self.makeIncDirect(dstPath, dstFields, srcData)
elif connective in ('from', ):
srcFields, index = self.parseFields(tokens, index)
srcPath, index = self.parseIndirect(tokens, index)
act = self.makeIncIndirect(dstPath, dstFields, srcPath, srcFields)
else:
msg = "ParseError: Building verb '%s'. Unexpected connective '%s'" %\
(command, connective)
raise excepting.ParseError(msg, tokens, index)
except IndexError:
msg = "ParseError: Building verb '%s'. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "ParseError: Building verb '%s'. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
context = self.currentContext
if context == NATIVE:
context = ENTER #what is native for this command
if not self.currentFrame.addByContext(act, context):
msg = "Error building %s. Bad context '%s'." % (command, context)
raise excepting.ParseError(msg, tokens, index)
console.profuse(" Added {0} '{1}' with parms '{2}'\n".format(
ActionContextNames[context], act.actor, act.parms))
return True
def buildCopy(self, command, tokens, index):
"""Build copy command to copy from one share to another
copy source into destination
source:
[(value, fields) in] indirect
destination:
[(value, fields) in] indirect
"""
self.verifyCurrentContext(tokens, index) #currentStore, currentFramer, currentFrame exist
try:
srcFields, index = self.parseFields(tokens, index)
srcPath, index = self.parseIndirect(tokens, index)
connective = tokens[index]
index += 1
if connective != 'into':
msg = "ParseError: Building verb '%s'. Unexpected connective '%s'" %\
(command, connective)
raise excepting.ParseError(msg, tokens, index)
dstFields, index = self.parseFields(tokens, index)
dstPath, index = self.parseIndirect(tokens, index)
except IndexError:
msg = "ParseError: Building verb '%s'. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "ParseError: Building verb '%s'. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
actorName = 'Poke' + 'Indirect' #capitalize second word
if actorName not in poking.Poke.Registry:
msg = "ParseError: Can't find actor named '%s'" % (actorName)
raise excepting.ParseError(msg, tokens, index)
parms = {}
parms['source'] = srcPath #this is string
parms['sourceFields'] = srcFields #this is a list
parms['destination'] = dstPath #this is a string
parms['destinationFields'] = dstFields #this is a list
act = acting.Act( actor=actorName,
registrar=poking.Poke,
parms=parms,
human=self.currentHuman,
count=self.currentCount)
msg = " Created Actor {0} parms: ".format(actorName)
for key, value in parms.items():
msg += " {0} = {1}".format(key, value)
console.profuse("{0}\n".format(msg))
context = self.currentContext
if context == NATIVE:
context = ENTER #what is native for this command
if not self.currentFrame.addByContext(act, context):
msg = "Error building %s. Bad context '%s'." % (command, context)
raise excepting.ParseError(msg, tokens, index)
console.profuse(" Added {0} '{1}' with parms '{2}'\n".format(
ActionContextNames[context], act.actor, act.parms))
return True
def buildSet(self, command, tokens, index):
"""Build set command to generate goal actions
set goal with data
set goal from source
goal:
elapsed
recurred
[(value, fields) in] absolute
[(value, fields) in] relativegoal
data:
direct
source:
indirect
"""
self.verifyCurrentContext(tokens, index) #currentStore, currentFramer, currentFrame exist
try:
kind = tokens[index]
if kind in ['elapsed', 'recurred']: #simple implicit framer relative goals, direct and indirect,
index +=1 #eat token
act, index = self.makeFramerGoal(kind, tokens, index)
else: #basic goals
#goal is destination dst
dstFields, index = self.parseFields(tokens, index)
dstPath, index = self.parseIndirect(tokens, index)
#required connective
connective = tokens[index]
index += 1
if connective in ('with', ): #data direct
srcData, index = self.parseDirect(tokens, index)
act = self.makeGoalDirect(dstPath, dstFields, srcData)
elif connective in ('from', ): #source indirect
srcFields, index = self.parseFields(tokens, index)
srcPath, index = self.parseIndirect(tokens, index)
act = self.makeGoalIndirect(dstPath, dstFields, srcPath, srcFields)
else:
msg = "ParseError: Building verb '%s'. Unexpected connective '%s'" %\
(command, connective)
raise excepting.ParseError(msg, tokens, index)
if not act:
return False
except IndexError:
msg = "ParseError: Building verb '%s'. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "ParseError: Building verb '%s'. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
context = self.currentContext
if context == NATIVE:
context = ENTER #what is native for this command
if not self.currentFrame.addByContext(act, context):
msg = "Error building %s. Bad context '%s'." % (command, context)
raise excepting.ParseError(msg, tokens, index)
console.profuse(" Added {0} '{1}' with parms '{2}'\n".format(
ActionContextNames[context], act.actor, act.parms))
return True
def buildGo(self, command, tokens, index):
"""Parse 'go' command transition with
transition conditions of forms
Transitions:
go far
go far if [not] need
go far if [not] need [and [not] need ...]
Far:
next
me
frame
"""
self.verifyCurrentContext(tokens, index) #currentStore, currentFramer, currentFrame exist
try:
needs = []
far = None
connective = None
far = tokens[index] #get target
index +=1 #eat token
self.verifyName(far, command, tokens, index)
if index < len(tokens): #check for optional if connective
connective = tokens[index]
if connective not in ['if']: #invalid connective
msg = "ParseError: Building verb '%s'. Bad connective '%s'" % \
(command, connective)
raise excepting.ParseError(msg, tokens, index)
index += 1 #otherwise eat token
while (index < len(tokens)):
act, index = self.makeNeed(tokens, index)
if not act:
return False #something wrong do not know what
needs.append(act)
if index < len(tokens):
connective = tokens[index]
if connective not in ['and']:
msg = "ParseError: Building verb '%s'. Bad connective '%s'" % \
(command, connective)
raise excepting.ParseError(msg, tokens, index)
index += 1 #otherwise eat token
except IndexError:
msg = "ParseError: Building verb '%s'. Not enough tokens." % (command, )
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "ParseError: Building verb '%s'. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if not needs and connective: #if but no needs
msg = "ParseError: Building verb '%s'. Connective %s but missing need(s)" %\
(command, connective)
raise excepting.ParseError(msg, tokens, index)
# build transact
human = ' '.join(tokens) #recreate transition command string for debugging
#resolve far link later
parms = dict(needs = needs, near = 'me', far = far, human = human)
act = acting.Act( actor='Transiter',
registrar=acting.Actor,
parms=parms,
human=self.currentHuman,
count=self.currentCount)
self.currentFrame.addPreact(act)
console.profuse(" Added transition preact, '{0}', with far {1} needs:\n".format(
command, far))
for act in needs:
console.profuse(" {0} with parms = {1}\n".format(act.actor, act.parms))
return True
def buildLet(self, command, tokens, index):
"""Parse 'let' command benter action with entry conditions of forms
Before Enter:
let [me] if [not] need
let [me] if [not] need [and [not] need ...]
Far:
next
me
frame
"""
self.verifyCurrentContext(tokens, index) #currentStore, currentFramer, currentFrame exist
try:
needs = []
connective = None
connective = tokens[index] #get me or if
if connective not in ['me', 'if']: #invalid connective
msg = "ParseError: Building verb '%s'. Bad connective '%s'" % \
(command, connective)
raise excepting.ParseError(msg, tokens, index)
index += 1 #otherwise eat token
if connective == 'me':
connective = tokens[index] #check for if connective
if connective not in ['if']: #invalid connective
msg = "ParseError: Building verb '%s'. Bad connective '%s'" % \
(command, connective)
raise excepting.ParseError(msg, tokens, index)
index += 1 #otherwise eat token
while (index < len(tokens)):
act, index = self.makeNeed(tokens, index)
if not act:
return False # something wrong do know what
needs.append(act)
if index < len(tokens):
connective = tokens[index]
if connective not in ['and']:
msg = "ParseError: Building verb '%s'. Bad connective '%s'" % \
(command, connective)
raise excepting.ParseError(msg, tokens, index)
index += 1 #otherwise eat token
except IndexError:
msg = "ParseError: Building verb '%s'. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "ParseError: Building verb '%s'. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if not needs: # no needs
msg = "ParseError: Building verb '%s'. Missing need(s)" %\
(command)
raise excepting.ParseError(msg, tokens, index)
# build beact
for act in needs:
self.currentFrame.addBeact(act)
console.profuse(" Added beact, '{0}', with needs:\n".format(command))
for act in needs:
console.profuse(" {0} with {1}\n".format(act.actor, act.parms))
return True
def buildDo(self, command, tokens, index):
"""
Syntax:
do kind [part ...] [as name [part ...]] [at context] [via inode]
[with data]
[from source]
[per data]
[for source]
[cum data]
[qua source]
deed:
name [part ...]
kind:
name [part ...]
context:
(native, benter, enter, recur, exit, precur, renter, rexit)
inode:
indirect
data:
direct
source:
[(value, fields) in] indirect
do controller pid depth --> controllerPIDDepth
do arbiter switch heading --> arbiterSwitchHeading
do controller pid depth with foobar 1
do controller pid depth from value in .max.depth
"""
self.verifyCurrentContext(tokens, index) #currentStore, currentFramer, currentFrame exist
try:
kind = "" # deed class key in registry
name = "" #specific name of deed instance
inode = None
parts = []
parms = odict()
inits = odict()
ioinits = odict()
prerefs = odict([('inits', odict()),
('ioinits', odict()),
('parms', odict()) ])
connective = None
context = self.currentContext
while index < len(tokens):
if (tokens[index] in ['as', 'at', 'via', 'with', 'from',
'per', 'for', 'cum', 'qua' ]): # end of parts
break
parts.append(tokens[index])
index += 1 #eat token
if parts:
kind = "".join([part.capitalize() for part in parts]) #camel case
while index < len(tokens): #options
connective = tokens[index]
index += 1
if connective in ('as', ):
parts = []
while index < len(tokens): # kind parts end when connective
if tokens[index] in ['as', 'at', 'with', 'from' 'per',
'for', 'cum', 'qua' ]: # end of parts
break
parts.append(tokens[index])
index += 1 #eat token
name = "".join([part.capitalize() for part in parts]) #camel case
if not name:
msg = "ParseError: Building verb '%s'. Missing name for connective 'as'" % (command)
raise excepting.ParseError(msg, tokens, index)
elif connective in ('at', ):
context = tokens[index]
index += 1
if context not in ActionContextValues:
msg = ("ParseError: Building verb '{0}'. Invalid context"
" '{1} for connective 'as'".format(command, context))
raise excepting.ParseError(msg, tokens, index)
context = ActionContextValues[context]
elif connective in ('via', ):
inode, index = self.parseIndirect(tokens, index, node=True)
elif connective in ('with', ):
data, index = self.parseDirect(tokens, index)
parms.update(data)
elif connective in ('from', ):
srcFields, index = self.parseFields(tokens, index)
srcPath, index = self.parseIndirect(tokens, index)
prerefs['parms'][srcPath] = srcFields
elif connective in ('per', ):
data, index = self.parseDirect(tokens, index)
ioinits.update(data)
elif connective in ('for', ):
srcFields, index = self.parseFields(tokens, index)
srcPath, index = self.parseIndirect(tokens, index)
prerefs['ioinits'][srcPath] = srcFields
elif connective in ('cum', ):
data, index = self.parseDirect(tokens, index)
inits.update(data)
elif connective in ('qua', ):
srcFields, index = self.parseFields(tokens, index)
srcPath, index = self.parseIndirect(tokens, index)
prerefs['inits'][srcPath] = srcFields
else:
msg = ("Error building {0}. Invalid connective"
" '{1}'.".format(command, connective))
raise excepting.ParseError(msg, tokens, index)
except IndexError:
msg = "Error building %s. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "Error building %s. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if not kind:
msg = "ParseError: Building verb '%s'. Missing kind for Doer." %\
(command)
raise excepting.ParseError(msg, tokens, index)
if kind not in doing.Doer.Registry: # class registration not exist
msg = "ParseError: Building verb '%s'. No Deed of kind '%s' in registry" %\
(command, kind)
raise excepting.ParseError(msg, tokens, index)
if inode:
ioinits.update(inode=inode) # via argument takes precedence over others
if name:
inits['name'] = name
act = acting.Act( actor=kind,
registrar=doing.Doer,
inits=inits,
ioinits=ioinits,
parms=parms,
prerefs=prerefs,
human=self.currentHuman,
count=self.currentCount)
#context = self.currentContext
if context == NATIVE:
context = RECUR #what is native for this command
if not self.currentFrame.addByContext(act, context):
msg = "Error building %s. Bad context '%s'." % (command, context)
raise excepting.ParseError(msg, tokens, index)
console.profuse(" Added {0} '{1}' with parms '{2}'\n".format(
ActionContextNames[context], act.actor, act.parms))
return True
def buildBid(self, command, tokens, index):
"""
bid control tasker [tasker ...] [at period]
bid control [me] [at period]
bid control all [at period]
control:
(stop, start, run, abort, ready)
tasker:
(tasker, me, all)
period:
number
indirectOne
indirectOne:
sharepath [of relative]
(field, value) in sharepath [of relative]
"""
self.verifyCurrentContext(tokens, index) #currentStore, currentFramer, currentFrame exist
try:
period = None # no period provided
sourcePath = None
sourceField = None
parms = odict([('taskers', []), ('period', None), ('sources', odict())])
control = tokens[index]
index +=1
if control not in ['start', 'run', 'stop', 'abort', 'ready']:
msg = "Error building {0}. Bad control = {1}.".format(command, control)
raise excepting.ParseError(msg, tokens, index)
taskers = []
while index < len(tokens):
if (tokens[index] in ['at']):
break # end of taskers so do not eat yet
tasker = tokens[index]
index +=1
self.verifyName(tasker, command, tokens, index)
taskers.append(tasker) #resolve later
if not taskers:
taskers.append('me')
while index < len(tokens): # at option
connective = tokens[index]
index += 1
if connective in ['at']:
# parse period direct or indirect
try: #parse direct
period = max(0.0, Convert2Num(tokens[index])) # period is number
index += 1 # eat token
except ValueError: # parse indirect
sourceField, index = self.parseField(tokens, index)
sourcePath, index = self.parseIndirect(tokens, index)
else:
msg = ("Error building {0}. Invalid connective"
" '{1}'.".format(command, connective))
raise excepting.ParseError(msg, tokens, index)
actorName = 'Want' + control.capitalize()
if actorName not in wanting.Want.Registry:
msg = "Error building %s. No actor named %s." % (command, actorName)
raise excepting.ParseError(msg, tokens, index)
parms['taskers'] = taskers #resolve later
parms['period'] = period
parms['source'] = sourcePath
parms['sourceField'] = sourceField
act = acting.Act( actor=actorName,
registrar=wanting.Want,
parms=parms,
human=self.currentHuman,
count=self.currentCount)
except IndexError:
msg = "Error building %s. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "Error building %s. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
context = self.currentContext
if context == NATIVE:
context = ENTER #what is native for this command
if not self.currentFrame.addByContext(act, context):
msg = "Error building %s. Bad context '%s'." % (command, context)
raise excepting.ParseError(msg, tokens, index)
console.profuse(" Added {0} want '{1}' with parms '{2}'\n".format(
ActionContextNames[context], act.actor, act.parms))
return True
def buildReady(self, command, tokens, index):
"""
ready taskName
"""
self.verifyCurrentContext(tokens, index) #currentStore, currentFramer, currentFrame exist
try:
tasker = tokens[index]
index +=1
self.verifyName(tasker, command, tokens, index)
except IndexError:
msg = "Error building %s. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "Error building %s. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
native = BENTER
self.makeFiat(tasker, 'ready', native, command, tokens, index)
return True
def buildStart(self, command, tokens, index):
"""
start taskName
"""
self.verifyCurrentContext(tokens, index) #currentStore, currentFramer, currentFrame exist
try:
tasker = tokens[index]
index +=1
self.verifyName(tasker, command, tokens, index)
except IndexError:
msg = "Error building %s. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "Error building %s. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
native = ENTER
self.makeFiat(tasker, 'start', native, command, tokens, index)
return True
def buildStop(self, command, tokens, index):
"""
stop taskName
"""
self.verifyCurrentContext(tokens, index) #currentStore, currentFramer, currentFrame exist
try:
tasker = tokens[index]
index +=1
self.verifyName(tasker, command, tokens, index)
except IndexError:
msg = "Error building %s. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "Error building %s. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
native = EXIT
self.makeFiat(tasker, 'stop', native, command, tokens, index)
return True
def buildRun(self, command, tokens, index):
"""
run taskName
"""
self.verifyCurrentContext(tokens, index) #currentStore, currentFramer, currentFrame exist
try:
tasker = tokens[index]
index +=1
self.verifyName(tasker, command, tokens, index)
except IndexError:
msg = "Error building %s. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "Error building %s. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
native = RECUR
self.makeFiat(tasker, 'run', native, command, tokens, index)
return True
def buildAbort(self, command, tokens, index):
"""
abort taskName
"""
self.verifyCurrentContext(tokens, index) #currentStore, currentFramer, currentFrame exist
try:
tasker = tokens[index]
index +=1
self.verifyName(tasker, command, tokens, index)
except IndexError:
msg = "Error building %s. Not enough tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
if index != len(tokens):
msg = "Error building %s. Unused tokens." % (command,)
raise excepting.ParseError(msg, tokens, index)
native = ENTER
self.makeFiat(tasker, 'abort', native, command, tokens, index)
return True
def buildUse(self, command, tokens, index):
"""
Not implemented yet
"""
msg = " ".join(tokens)
console.concise("{0}\n")
return True
def buildFlo(self, command, tokens, index):
"""
Not implemented yet
"""
msg = " ".join(tokens)
console.concise("{0}\n")
return True
def buildTake(self, command, tokens, index):
"""
Not implemented yet
"""
msg = " ".join(tokens)
console.concise("{0}\n")
return True
def buildGive(self, command, tokens, index):
"""
Not implemented yet
"""
msg = " ".join(tokens)
console.concise("{0}\n")
return True
#------------------
def makeIncDirect(self, dstPath, dstFields, srcData):
"""Make IncDirect act
method must be wrapped in appropriate try excepts
"""
actorName = 'Inc' + 'Direct' #capitalize second word
if actorName not in poking.Poke.Registry:
msg = "ParseError: Can't find actor named '%s'" % (actorName)
raise excepting.ParseError(msg, tokens, index)
parms = {}
parms['destination'] = dstPath #this is string
parms['destinationFields'] = dstFields # this is a list
parms['sourceData'] = srcData #this is an ordered dictionary
act = acting.Act( actor=actorName,
registrar=poking.Poke,
parms=parms,
human=self.currentHuman,
count=self.currentCount)
msg = " Created Actor {0} parms: ".format(actorName)
for key, value in parms.items():
msg += " {0} = {1}".format(key, value)
console.profuse("{0}\n".format(msg))
return act
def makeIncIndirect(self, dstPath, dstFields, srcPath, srcFields):
"""Make IncIndirect act
method must be wrapped in appropriate try excepts
"""
actorName = 'Inc' + 'Indirect' #capitalize second word
if actorName not in poking.Poke.Registry:
msg = "ParseError: Goal can't find actor named '%s'" % (actorName)
raise excepting.ParseError(msg, tokens, index)
#actor = poking.Poke.Names[actorName]
parms = {}
parms['destination'] = dstPath #this is a share
parms['destinationFields'] = dstFields #this is a list
parms['source'] = srcPath #this is a share
parms['sourceFields'] = srcFields #this is a list
act = acting.Act( actor=actorName,
registrar=poking.Poke,
parms=parms,
human=self.currentHuman,
count=self.currentCount)
msg = " Created Actor {0} parms: ".format(actorName)
for key, value in parms.items():
msg += " {0} = {1}".format(key, value)
console.profuse("{0}\n".format(msg))
return act
def makeFramerGoal(self, name, tokens, index):
"""Goal to set goal name relative to current framer
method must be wrapped in appropriate try excepts
goal to data
goal from source
goal:
name
implied goal is framer.currentframer.goal.name value
data:
[value] value
field value [field value ...]
source:
[(value, fields) in] indirect
"""
#name is used as name of goal relative to current framer
#create goal relative to current framer destination is goal
dstPath = 'framer.' + 'me' + '.goal.' + name
dstField = 'value'
dstFields = [dstField]
#required connective
connective = tokens[index]
index += 1
if connective in ['to', 'with']: #data direct
srcData, index = self.parseDirect(tokens, index)
act = self.makeGoalDirect(dstPath, dstFields, srcData )
elif connective in ['by', 'from']: #source indirect
srcFields, index = self.parseFields(tokens, index)
srcPath, index = self.parseIndirect(tokens, index)
act = self.makeGoalIndirect(dstPath, dstFields, srcPath, srcFields)
else:
msg = "ParseError: Unexpected connective '%s'" %\
(connective)
raise excepting.ParseError(msg, tokens, index)
return act, index
def makeGoalDirect(self, dstPath, dstFields, srcData):
"""Make GoalDirect act
method must be wrapped in appropriate try excepts
"""
actorName = 'Goal' + 'Direct' #capitalize second word
if actorName not in goaling.Goal.Registry:
msg = "ParseError: Goal can't find actor named '%s'" % (actorName)
raise excepting.ParseError(msg, tokens, index)
parms = {}
parms['destination'] = dstPath #this is string
parms['destinationFields'] = dstFields #this is list
parms['sourceData'] = srcData #this is a dictionary
act = acting.Act( actor=actorName,
registrar=goaling.Goal,
parms=parms,
human=self.currentHuman,
count=self.currentCount)
msg = " Created Actor {0} parms: ".format(actorName)
for key, value in parms.items():
msg += " {0} = {1}".format(key, value)
console.profuse("{0}\n".format(msg))
return act
def makeGoalIndirect(self, dstPath, dstFields, srcPath, srcFields):
"""Make GoalIndirect act
method must be wrapped in appropriate try excepts
"""
actorName = 'Goal' + 'Indirect' #capitalize second word
if actorName not in goaling.Goal.Registry:
msg = "ParseError: Goal can't find actor named '%s'" % (actorName)
raise excepting.ParseError(msg, tokens, index)
parms = {}
parms['destination'] = dstPath #this is string
parms['destinationFields'] = dstFields #this is a list
parms['source'] = srcPath #this is a string
parms['sourceFields'] = srcFields #this is a list
act = acting.Act( actor=actorName,
registrar=goaling.Goal,
parms=parms,
human=self.currentHuman,
count=self.currentCount)
msg = " Created Actor {0} parms: ".format(actorName)
for key, value in parms.items():
msg += " {0} = {1}".format(key, value)
console.profuse("{0}\n".format(msg))
return act
def makeNeed(self, tokens, index):
"""
Parse a need
method must be wrapped in try except indexError
method assumes already checked for currentStore
method assumes already checked for currentFramer
method assumes already checked for currentFrame
Need forms:
[not] need
need:
basic need:
if state [comparison goal [+- tolerance]]
simple need:
if framerstate [re [(me, framername)]] comparison framergoal [+- tolerance]
if framerstate re [me] is TBD # not supported yet
special need:
if indirect is updated [in frame (me, framename)]
if taskername is (readied, started, running, stopped, aborted)
if taskername is done
if (aux auxname, any, all)
[in frame [(me, framename)][in framer [(me, framername)]]] is done
if (aux auxname, any, all)
[in framer [(me, framername)]] is done
if ([aux] auxname, any, all)
in frame [(me, framename)][in framer [(me, framername)]] is done
if ([aux] auxname, any, all)
in framer [(me, framername)] is done
state:
[(value, field) in] indirect
goal:
value
[(value, field) in] indirect
indirect:
path [[of relation] ...]
comparison:
(==, !=, <, <=, >=, >)
tolerance:
number (the absolute value is used)
framerstate:
(elapsed, recurred)
framergoal:
goal
value
[(value, field) in] indirect
"""
kind = None
negate = False
if tokens[index] == 'not':
negate = True
index += 1 #eat token
# find back end of current clause
if 'and' in tokens[index:]: # conjunction
back = tokens[index:].index('and') + index + 1
else:
back = len(tokens)
if 'is' in tokens[index:back]: # check for 'is participle' form, special needs
place = tokens[index:back].index('is') # is
participle = tokens[index + place + 1] # participle modifier to is
if participle in ('done', ):
kind = 'done'
act, index = self.makeDoneNeed(kind, tokens, index)
elif participle in ('readied', 'started', 'running', 'stopped', 'aborted'):
kind = 'status'
act, index = self.makeStatusNeed(kind, tokens, index)
elif participle in ('updated', 'changed'):
kind = participle[:-1] # remove 'd' suffix
act, index = self.makeMarkerNeed(kind, tokens, index)
else:
msg = "ParseError: Unexpected 'is' participle '%s' for need" %\
(participle)
raise excepting.ParseError(msg, tokens, index)
else: # either simple need or basic need
state, framer, index = self.parseFramerState(tokens, index)
if state is not None: # 're' clause present, simple need
if state not in ('elapsed', 'recurred'):
msg = "ParseError: Unsupported framer state '%s'" %\
(state)
raise excepting.ParseError(msg, tokens, index)
kind = state
act, index = self.makeFramerNeed(kind, tokens, index)
# in the future we could support framer need for a different framer
# not me or current framer
# currently ignoring framer, because only allow 'me' or currentFramer
else: # basic need with support for deprecated form of simple need
simple = False # found deprecated simple need form
stateField, index = self.parseField(tokens, index)
if stateField is None: # no 'in' clause
state = tokens[index] # look for bare framer state
if state in ('elapsed', 'recurred'): # deprecated
index += 1
kind = state
simple = True
act, index = self.makeFramerNeed(kind, tokens, index)
if not simple: # basic need either path not elapsed,recurred or 'in' clause
statePath, index = self.parseIndirect(tokens, index)
#parse optional comparison
comparison, index = self.parseComparisonOpt(tokens,index)
if not comparison: #no comparison so make a boolean need
act = self.makeBoolenNeed(statePath, stateField)
else: #valid comparison so required goal
#parse required goal
direct, goal, goalPath, goalField, index = \
self.parseNeedGoal(statePath, stateField, tokens, index)
#parse optional tolerance
tolerance, index = self.parseTolerance(tokens, index)
if direct: #make a direct need
act = self.makeDirectNeed(statePath,
stateField,
comparison,
goal,
tolerance)
else: #make an indirect need
act = self.makeIndirectNeed(statePath,
stateField,
comparison,
goalPath,
goalField,
tolerance)
if negate:
act = acting.Nact(actor=act.actor,
registrar=act.registrar,
parms=act.parms,
human=self.currentHuman,
count=self.currentCount)
return (act, index)
def makeDoneNeed(self, kind, tokens, index):
"""
Need to check if tasker completed by .done truthy
method must be wrapped in appropriate try excepts
Syntax:
if taskername is done
if (aux auxname, any, all)
[in frame [(me, framename)][in framer [(me, framername)]]] is done
if (aux auxname, any, all)
[in framer [(me, framername)]] is done
if ([aux] auxname, any, all)
in frame [(me, framename)][in framer [(me, framername)]] is done
if ([aux] auxname, any, all)
in framer [(me, framername)] is done
"""
frame = "" # name of frame where aux resides if applicable
framer = "" # name of framer where aux resides if applicable
auxed = False # one of the auxiliary forms
tasker = tokens[index]
if tasker in ('any', 'all'): # auxilary case applicable so default
index += 1
auxed = True
framer = 'me'
frame = 'me'
elif tasker == "aux":
index += 1
auxed = True
framer = 'me'
tasker = tokens[index]
self.verifyName(tasker, kind, tokens, index)
index += 1
else:
self.verifyName(tasker, kind, tokens, index)
index += 1
# in clause existence means auxilary case
# optional in clauses followed by is clause
connective = tokens[index]
if connective == 'in': # optional 'in frame [(me, framename)]' clause
index += 1 # eat 'in' connective
auxed = True
framer = 'me'
place = tokens[index] # required place frame or framer
index += 1 # eat place token
if place == 'framer':
connective = tokens[index]
if connective not in Reserved: # assume must be name
framer = connective
self.verifyName(framer, kind, tokens, index)
index += 1
connective = tokens[index] # set up for next clause
elif place == 'frame':
frame = 'me'
connective = tokens[index]
if connective not in Reserved: # assume must be name
frame = connective
self.verifyName(frame, kind, tokens, index)
index += 1
connective = tokens[index] # setup for next clause
if connective == 'in': # optional 'in framer [(me, framername)]' clause
index += 1 # eat 'in' connective
place = tokens[index] # required place framer
index += 1 # eat place token
if place != 'framer':
msg = ("ParseError: Expected 'framer' got "
"'{0}'".format(place))
raise excepting.ParseError(msg, tokens, index)
connective = tokens[index]
if connective not in Reserved: # assume must be name
framer = connective
self.verifyName(framer, kind, tokens, index)
index += 1
connective = tokens[index] # setup for next clause
else:
msg = ("ParseError: Expected 'framer' or frame' got "
"'{0}'".format(place))
raise excepting.ParseError(msg, tokens, index)
if connective not in ('is', ): # missing 'is'
msg = ("ParseError: Expected 'is' connective got "
"'{0}'".format(connective))
raise excepting.ParseError(msg, tokens, index)
index += 1 # eat 'is' connective token
participle = tokens[index]
index += 1
if participle not in ('done', ): # wrong 'participle'
msg = ("ParseError: Expected 'done' participle got "
"'{0}'".format(participle))
raise excepting.ParseError(msg, tokens, index)
# a frame of me is nonsensical if framer is not current framer
if (frame == 'me' and
not (framer == 'me' or framer == self.currentFramer.name)):
msg = ("Error: Frame '{0}' nonsensical given"
" Framer '{1}'.".format(frame, framer))
raise excepting.ParseError(msg, tokens, index)
actorName = 'Need' + kind.capitalize()
if auxed:
actorName += 'Aux'
if actorName not in needing.Need.Registry:
msg = "ParseError: Need '%s' can't find actor named '%s'" %\
( kind, actorName)
raise excepting.ParseError(msg, tokens, index)
parms = {}
parms['tasker'] = tasker
parms['framer'] = framer
parms['frame'] = frame
act = acting.Act(actor=actorName,
registrar=needing.Need,
parms=parms,
human=self.currentHuman,
count=self.currentCount)
return (act, index)
def makeStatusNeed(self, kind, tokens, index):
"""
Need to check if tasker named tasker status' is status
method must be wrapped in appropriate try excepts
Syntax:
if taskername is (readied, started, running, stopped, aborted)
"""
tasker = tokens[index]
if not REO_IdentPub.match(tasker):
msg = "ParseError: Invalid format of tasker name '%s'" % (tasker)
raise excepting.ParseError(msg, tokens, index)
index += 1
connective = tokens[index]
index += 1
if connective not in ('is', ):
msg = "ParseError: Need status invalid connective '%s'" %\
(kind, connective)
raise excepting.ParseError(msg, tokens, index)
status = tokens[index] # participle
index += 1
if status.capitalize() not in StatusValues:
msg = "ParseError: Need status invalid status '%s'" %\
(kind, status)
raise excepting.ParseError(msg, tokens, index)
status = StatusValues[status.capitalize()] #replace name with value
actorName = 'Need' + kind.capitalize()
if actorName not in needing.Need.Registry:
msg = "ParseError: Need '%s' can't find actor named '%s'" %\
(kind, actorName)
raise excepting.ParseError(msg, tokens, index)
parms = {}
parms['tasker'] = tasker #need to resolve this
parms['status'] = status
act = acting.Act( actor=actorName,
registrar=needing.Need,
parms=parms,
human=self.currentHuman,
count=self.currentCount)
return (act, index)
def makeUpdateNeed(self, kind, tokens, index):
"""
Need to check if share updated in frame
method must be wrapped in appropriate try excepts
Syntax:
if path [[of relation] ...] is updated [in frame [(me, framename)]]
[by marker]
"""
return (self.makeMarkerNeed(kind, tokens, index))
def makeChangeNeed(self, kind, tokens, index):
"""
Need to check if share updated in frame
method must be wrapped in appropriate try excepts
Syntax:
if path [[of relation] ...] is changed [in frame [(me, framename)]]
[by marker]
"""
return (self.makeMarkerNeed(kind, tokens, index))
def makeMarkerNeed(self, kind, tokens, index):
"""
Support method to make either NeedUpdate or NeedChange
as determined by kind
Syntax:
if path [[of relation] ...] is (updated, changed)
[in frame [(me, framename)]] [by marker]
sharepath:
path [[of relation] ...]
marker:
string
"""
frame = "" # name of marked frame when empty resolve uses "me"" but no enact
marker = ""
sharePath, index = self.parseIndirect(tokens, index)
connective = tokens[index]
if connective not in ('is', ):
msg = ("ParseError: Unexpected connective '{0}' not 'is', "
"while building need".format(connective))
raise excepting.ParseError(msg, tokens, index)
index += 1
participle = tokens[index]
if participle not in ('updated', 'changed' ):
msg = ("ParseError: Unexpected 'is' participle '{0}', "
" not 'updated' or 'changed', "
"while building need".format(participle))
raise excepting.ParseError(msg, tokens, index)
index += 1
# ensure kind and participle match
if participle[:-1] != kind: # remove 'd' suffix
msg = ("ParseError: Mismatching participle. Expected '{0}' got "
"'{1}'".format(kind + 'd', participle))
raise excepting.ParseError(msg, tokens, index)
while index < len(tokens): # optional 'in frame' clause
connective = tokens[index]
if connective not in ('in', 'by'): # next need clause started
break
index += 1 # eat token for connective
if connective == 'in':
place = tokens[index] #need to resolve
index += 1 # eat place token
if place != 'frame':
msg = ("ParseError: Invalid "
" '{0}' clause. Expected 'frame' got "
"'{1}'".format(connective, place))
raise excepting.ParseError(msg, tokens, index)
frame = "me" # default if just frame but no framename
if index < len(tokens): # frame name is optional
connective = tokens[index] #need to resolve
if connective not in Reserved: # assume must be name
frame = connective # only
if not REO_IdentPub.match(frame):
msg = "ParseError: Invalid format of frame name '%s'" % (frame)
raise excepting.ParseError(msg, tokens, index)
index += 1 # consume frame name token
elif connective == 'by':
marker = tokens[index]
index += 1 # eat marker token
marker = StripQuotes(marker)
# assign marker type actual marker Act created in need's resolve
markerKind = 'Marker' + kind.capitalize()
actorName = 'Need' + kind.capitalize()
if actorName not in needing.Need.Registry:
msg = "ParseError: Need '%s' can't find actor named '%s'" %\
(kind, actorName)
raise excepting.ParseError(msg, tokens, index)
parms = {}
parms['share'] = sharePath
parms['frame'] = frame # marked frame name resolved in resolvelinks
parms['kind'] = markerKind # marker kind resolved in resolvelinks
parms['marker'] = marker
act = acting.Act( actor=actorName,
registrar=needing.Need,
parms=parms,
human=self.currentHuman,
count=self.currentCount)
return (act, index)
def makeImplicitDirectFramerNeed(self, name, comparison, goal, tolerance):
"""Make implicit need, ie the need is not parsed but implied by the command
such as timeout
method must be wrapped in appropriate try excepts
state comparison goal [+- tolerance]
goal:
value (direct number or string)
state:
name
implied state is framer.currentframer.state.name value
"""
console.profuse(" Making implicit direct framer need {0}\n".format(name))
#name is used as name of state relative to current framer
# and if implicit goal the name of goal relative to current framer
#create state relative to framer
statePath = 'framer.' + 'me' + '.state.' + name
stateField = 'value'
act = self.makeDirectNeed(statePath, stateField, comparison, goal, tolerance)
return act
def makeFramerNeed(self, name, tokens, index):
"""Need that checks if framer state name for current framer satisfies comparison
method must be wrapped in appropriate try excepts
state comparison goal [+- tolerance]
state:
name
implied state is framer.currentframer.state.name value
goal:
goal
from path [key]
value
dotpath [key]
elapsed >= 25.0
elapsed >= goal
elapsed == goal +- 0.1
"""
console.profuse(" Making framer need {0}\n".format(name))
#name is used as name of state relative to current framer
# and if implicit goal the name of goal relative to current framer
#create state relative to framer
statePath = 'framer.' + 'me' + '.state.' + name
stateField = 'value'
#parse required comparison
comparison, index = self.parseComparisonReq(tokens,index)
#parse required goal
direct, goal, goalPath, goalField, index = \
self.parseFramerNeedGoal(statePath, stateField, tokens, index)
#parse optional tolerance
tolerance, index = self.parseTolerance(tokens, index)
if direct: #make a direct need
act = self.makeDirectNeed(statePath,
stateField,
comparison,
goal,
tolerance)
else: #make an indirect need
act = self.makeIndirectNeed(statePath,
stateField,
comparison,
goalPath,
goalField,
tolerance)
return (act, index)
def makeBoolenNeed(self, statePath, stateField):
"""Make booleanNeed act
method must be wrapped in appropriate try excepts
"""
actorName = 'Need' + 'Boolean' #capitalize second word
if actorName not in needing.Need.Registry:
msg = "ParseError: Need can't find actor named '%s'" % (actorName)
raise excepting.ParseError(msg, tokens, index)
parms = {}
parms['state'] = statePath #this is a string
parms['stateField'] = stateField #this is string
act = acting.Act( actor=actorName,
registrar=needing.Need,
parms=parms,
human=self.currentHuman,
count=self.currentCount)
msg = " Created Actor {0} parms: ".format(actorName)
for key, value in parms.items():
msg += " {0} = {1}".format(key, value)
console.profuse("{0}\n".format(msg))
return act
def makeDirectNeed(self, statePath, stateField, comparison, goal, tolerance):
"""Make directNeed act
method must be wrapped in appropriate try excepts
"""
actorName = 'Need' + 'Direct' #capitalize second word
if actorName not in needing.Need.Registry:
msg = "ParseError: Need can't find actor named '%s'" % (actorName)
raise excepting.ParseError(msg, tokens, index)
parms = {}
parms['state'] = statePath #this is a string
parms['stateField'] = stateField #this is a string
parms['comparison'] = comparison #this is a string
parms['goal'] = goal #this is a value: boolean number or string
parms['tolerance'] = tolerance #this is a number
act = acting.Act( actor=actorName,
registrar=needing.Need,
parms=parms,
human=self.currentHuman,
count=self.currentCount)
msg = " Created Actor {0} parms: ".format(actorName)
for key, value in parms.items():
msg += " {0} = {1}".format(key, value)
console.profuse("{0}\n".format(msg))
return act
def makeIndirectNeed(self,
statePath,
stateField,
comparison,
goalPath,
goalField,
tolerance):
"""Make indirectNeed act
method must be wrapped in appropriate try excepts
"""
actorName = 'Need' + 'Indirect' #capitalize second word
if actorName not in needing.Need.Registry:
msg = "ParseError: Need can't find actor named '%s'" % (actorName)
raise excepting.ParseError(msg, tokens, index)
parms = {}
parms['state'] = statePath #this is string
parms['stateField'] = stateField #this is a string
parms['comparison'] = comparison #this is a string
parms['goal'] = goalPath #this is a string
parms['goalField'] = goalField #this is a string
parms['tolerance'] = tolerance #this is a number
msg = " Created Actor {0} parms: ".format(actorName)
for key, value in parms.items():
msg += " {0} = {1}".format(key, value)
console.profuse("{0}\n".format(msg))
act = acting.Act( actor=actorName,
registrar=needing.Need,
parms=parms,
human=self.currentHuman,
count=self.currentCount)
return act
def makeFiat(self, name, kind, native, command, tokens, index):
"""
Assumes wrapped in currentFrame etc checks
make a fiat action given the tasker name and fiat kind
"""
actorName = 'Fiat' + kind.capitalize()
if actorName not in fiating.Fiat.Registry:
msg = "Error building fiat %s. No actor named %s." % (kind, actorName)
raise excepting.ParseError(msg, tokens, index)
parms = {}
parms['tasker'] = name #resolve later
act = acting.Act( actor=actorName,
registrar=fiating.Fiat,
parms=parms,
human=self.currentHuman,
count=self.currentCount)
context = self.currentContext
if context == NATIVE:
context = native #The native context for this command
if not self.currentFrame.addByContext(act, context):
msg = "Error building %s. Bad context '%s'." % (command, context)
raise excepting.ParseError(msg, tokens, index)
console.profuse(" Added {0} fiat '{1}' with parms '{2}'\n".format(
ActionContextNames[context], act.actor, act.parms))
return True
#----------------------------
def parseDirect(self, tokens, index):
"""Parse Direct data address
returns ordered dictionary of fields (keys) and values
if no field provided then uses default field = 'value'
parms:
tokens = list of tokens for command
index = current index into tokens
returns:
data ordered dict
index
method must be wrapped in appropriate try excepts
Syntax:
data:
[value] value
field value [field value ...]
possible parsing end conditions:
no more tokens (init, set)
token 'into' (put)
"""
data = odict()
if index == (len(tokens) - 1): #only one more token so it must be value
value = tokens[index]
if value in Reserved: # ending token not valid value
msg = "ParseError: Encountered reserved '{0}' instead of value." % (value)
raise excepting.ParseError(msg, tokens, index)
index +=1 #eat token
field = 'value' #default field
else: #more than one so first may be field and second token may be value
field = tokens[index]
if field in Reserved: # ending token not valid field
msg = "ParseError: Encountered reserved '{0}' instead of field." % (field)
raise excepting.ParseError(msg, tokens, index)
index += 1
value = tokens[index]
if value in Reserved: #second reserved token so first token was value
value = field
field = 'value' #default field
else: #first token was field and second value
field = StripQuotes(field)
index += 1 #eat token
data[field] = Convert2StrBoolPathCoordPointNum(value) #convert to BoolNumStr, load data
#parse rest if any
while index < len(tokens): #must be in pairs unless first is ending token
field = tokens[index]
if field in Reserved: #ending token so break
break
field = StripQuotes(field)
index += 1 #eat token
value = tokens[index]
if value in Reserved: # ending token before valid value
msg = "ParseError: Encountered reserved '{0}' instead of value." % (value)
raise excepting.ParseError(msg, tokens, index)
index += 1
data[field] = Convert2StrBoolPathCoordPointNum(value) #convert to BoolNumStr, load data
#prevent using multiple fields if one of them is 'value'
if (len(data) > 1) and ('value' in data):
msg = "ParseError: Direct data field = 'value' must be only field '%s'" % (data.keys)
raise excepting.ParseError(msg, tokens, index)
#prevent using incorrect format for fields
for field in data: # keys
if not REO_IdentPub.match(field): #invalid format
msg = "ParseError: Invalid field = '%s'" % (field)
raise excepting.ParseError(msg, tokens, index)
return (data, index)
def parseFields(self, tokens, index):
"""
Parse optional field list for Indirect address
parms:
tokens = list of tokens for command
index = current index into tokens
returns:
(fields,index)
method must be wrapped in appropriate try excepts
Syntax:
[(value, fields) in] indirect
fields:
field [field ...]
valid fields only when encounter token 'in' after fields
consumes fields and the 'in' so subsequent parsePath starts with indirect path
parsing end conditions that signify no fields
if encounter before 'in':
no more tokens
reserved token
"""
indexSave = index #save it since welookahead to see if "in"
fields = []
found = False #flag to indicate found 'in' wich indicates fields clause
while index < len(tokens): # provisionall parse for fields
field = tokens[index]
if field == 'in': #field list present and completed now we know
index +=1
found = True
break
if field in Reserved: #field list not present
break
index += 1 #eat token
field = StripQuotes(field)
fields.append(field) # provisional
if not found: # no fields clause so we ignore
index = indexSave #so restore index
fields = [] #empty fields list
#prevent using multiple fields if one of them is 'value'
if (len(fields) > 1) and ('value' in fields):
msg = "ParseError: Field = 'value' with multiple fields = '%s'" % (fields)
raise excepting.ParseError(msg, tokens, index)
for i, field in enumerate(fields): # now we check if valid format
if not REO_IdentPub.match(field):
msg = "ParseError: Invalid format of field '%s'" % (field)
raise excepting.ParseError(msg, tokens, index)
return (fields, index)
def parseField(self, tokens, index):
"""
Parse optional field for Indirect address
parms:
tokens = list of tokens for command
index = current index into tokens
returns:
(field, index)
method must be wrapped in appropriate try excepts
Syntax:
[(value, field) in] indirect
valid field only when encounter token 'in' after first field
consumes field and the 'in' so subsequent parsePath starts with indirect path
parsing end conditions that signify no fields
if encounter before 'in':
no more tokens
reserved token
"""
indexSave = index #save it since welookahead to see if "in"
fields = []
found = False #flag to indicate found 'in' which indicates fields clause
while index < len(tokens):
field = tokens[index]
if field == 'in': #field list present and completed
index +=1
found = True
break
if field in Reserved: #field list not present
break
index += 1 #eat token
field = StripQuotes(field)
fields.append(field)
if not found: #no fields clause
index = indexSave #so restore index
fields = [] #empty fields list
#prevent using multiple fields
if (len(fields) > 1):
msg = "ParseError: More than one field = '%s'" % (fields)
raise excepting.ParseError(msg, tokens, index)
if fields:
field = fields[0]
if not REO_IdentPub.match(field):
msg = "ParseError: Invalid format of field '%s'" % (field)
raise excepting.ParseError(msg, tokens, index)
else:
field = None
return (field, index)
def parsePath(self, tokens, index):
"""Parse required (path or dotpath) path
Does not support relative path processing for verbs such as init or
server which are not inside a framer context
method must be wrapped in appropriate try excepts
"""
path = tokens[index]
index +=1
if not REO_Path.match(path): #check if valid path
msg = "ParseError: Invalid path '%s'" % (path)
raise excepting.ParseError(msg, tokens, index)
#path = path.lstrip('.') #remove leading dot if any
return (path, index)
def parseIndirect(self, tokens, index, node=False):
"""
Parse Indirect data address
If node then allow trailing dot in path
parms:
tokens = list of tokens for command
index = current index into tokens
returns:
path
index
method must be wrapped in appropriate try excepts
Syntax:
indirect:
absolute
relative
absolute:
dotpath
relative:
root
inode
framer
frame
actor
root:
path [of root]
inode:
path of me
framer:
path of framer [name]
frame:
path of frame [name]
actor:
path of actor [name]
"""
if node:
reoDotPath = REO_DotPathNode
reoRelPath = REO_RelPathNode
else:
reoDotPath = REO_DotPath
reoRelPath = REO_RelPath
path = tokens[index]
index +=1
if path in Reserved:
msg = "ParseError: Invalid path '%s' using reserved" % (path)
raise excepting.ParseError(msg, tokens, index)
if reoDotPath.match(path): #valid absolute path segment
#check for optional relation clause
#if 'of relation' clause then allows relative but no
#implied relation clauses
relation, index = self.parseRelation(tokens, index)
# dotpath starts with '.' no need to add
elif reoRelPath.match(path): #valid relative path segment
#get optional relation clause, default is root
relation, index = self.parseRelation(tokens, index)
chunks = path.split('.')
if relation: # check for relation conflict
if chunks[0] in ['framer', 'frame', 'actor']:
if (chunks[0] == 'framer' or
(chunks[0] == 'frame' and '.frame.' in relation) or
(chunks[0] == 'actor' and '.actor.' in relation)):
msg = ("ParseError: Relation conflict in path '{0}'"
" with relation '{1}'".format(path, relation))
raise excepting.ParseError(msg, tokens, index)
if relation == 'me':
msg = ("ParseError: Relation conflict in path '{0}'"
" with relation '{1}'".format(path, relation))
raise excepting.ParseError(msg, tokens, index)
else: # prepend missing relations if partial relation in path
if chunks[0] == 'actor':
if len(chunks) < 3: # actor name or share name missing
msg = ("ParseError: Incomplete path '{0}'. Actor name"
" or Share name missing given inline actor "
"relation".format(path))
raise excepting.ParseError(msg, tokens, index)
relation = 'framer.me.frame.me'
elif chunks[0] == 'frame':
if len(chunks) < 3: # frame name or share name missing
msg = ("ParseError: Incomplete path '{0}'. Frame name"
" or Share name missing given inline frame "
"relation".format(path))
raise excepting.ParseError(msg, tokens, index)
framername = 'me'
if chunks[1] == 'main':
framername = 'main'
relation = 'framer.' + framername
if relation:
relation += '.' # add dot since not dotpath
else: #invalid path format
msg = "ParseError: Invalid path '{0}'".format(path)
raise excepting.ParseError(msg, tokens, index)
path = relation + path
return (path, index)
def parseRelation(self, tokens, index, framername=''):
"""
Parse optional relation clause of relative data address
parms:
tokens = list of tokens for command
index = current index into tokens
framername = default framer name if not provided such as 'main'
returns:
relation
index
method must be wrapped in appropriate try excepts
Syntax:
relative:
root
inode
framer
frame
actor
root:
path [of root]
inode:
path of me
framer:
path of framer [(me, main, name)]
frame:
path of frame [(me, main, name)]
actor:
path of actor [(me, name)]
"""
relation = '' #default relation if none given
if index < len(tokens): #are there more tokens
connective = tokens[index]
if connective == 'of': #of means relation given
index += 1 #eat token
relation = tokens[index]
index +=1
if relation not in ['root', 'me', 'framer', 'frame', 'actor']:
msg = "ParseError: Invalid relation '%s'" % (relation)
raise excepting.ParseError(msg, tokens, index)
if relation == 'root':
relation = '' #nothing gets prepended for root relative
elif relation == 'me':
pass # do nothing
if relation in ['framer']: #may be optional name for framer
name = '' #default name is empty
if index < len(tokens): #more tokens to check for optional name
name = tokens[index]
if name not in Reserved: #name given
index += 1 #eat token
if not REO_IdentPub.match(name): #check if valid name
msg = "ParseError: Invalid relation %s name '%s'" %\
(relation, name)
raise excepting.ParseError(msg, tokens, index)
else:
name = ''
if not name: #no name given so substitute default
name = framername or 'me'
relation += '.' + name #append name
if relation in ['frame']: #may be optional name of frame
name = '' #default name is empty
if index < len(tokens): #more tokens to check for optional name
name = tokens[index]
if name not in Reserved: #name given
index += 1 #eat token
if not REO_IdentPub.match(name): #check if valid name
msg = "ParseError: Invalid relation %s name '%s'" %\
(relation, name)
raise excepting.ParseError(msg, tokens, index)
else:
name = ''
if not name: #no name given so substitute default
name = 'me'
relation += '.' + name #append name
# parse optional of framer relation
framername = ''
if name == 'main': # default framer for frame main is framer main
framername = 'main'
framerRelation, index = self.parseRelation(tokens,
index,
framername=framername)
# check if spurious, of frame or, of actor
if (framerRelation and
('.frame.' in framerRelation or
'.actor.' in framerRelation )):
msg = "ParseError: Invalid relation '%s' following frame relation" %\
(framerRelation)
raise excepting.ParseError(msg, tokens, index)
if framerRelation:
relation = framerRelation + '.' + relation
else: #use default framer
framername = framername or 'me'
relation = ('framer.' + framername + '.' + relation)
if relation in ['actor']: #may be optional name of actor
name = '' #default name is empty
if index < len(tokens): #more tokens to check for optional name
name = tokens[index]
if name not in Reserved: #name given
index += 1 #eat token
if not REO_IdentPub.match(name): #check if valid name
msg = "ParseError: Invalid relation %s name '%s'" %\
(relation, name)
raise excepting.ParseError(msg, tokens, index)
else:
name = ''
if not name: #no name given so substitute default
name = 'me'
relation += '.' + name #append name
# parse optional of frame and hence framer relation
frameRelation, index = self.parseRelation(tokens, index)
# check if spurious, of framer or, of actor
if (frameRelation and
'.actor.' in frameRelation ):
msg = "ParseError: Invalid relation '%s' following actor relation" %\
(frameRelation)
raise excepting.ParseError(msg, tokens, index)
if frameRelation:
relation = frameRelation + '.' + relation
else: #use default frame and framer
relation = ('framer.' + 'me.' + 'frame.' + 'me' + '.' + relation)
return (relation, index)
def parseComparisonOpt(self, tokens, index):
"""Parse a optional comparison
method must be wrapped in appropriate try excepts
"""
comparison = None
if index < len(tokens): #at least one more token
#if at least one more token could be connective or comparision
comparison = tokens[index]
if comparison in Comparisons: #
index +=1 #so eat token
else:
comparison = None
return (comparison, index)
def parseComparisonReq(self, tokens, index):
"""Parse a required comparison
method must be wrapped in appropriate try excepts
"""
comparison = tokens[index]
index +=1 #so eat token
if comparison not in Comparisons: #
msg = "ParseError: Need has invalid comparison '%s'" % (comparison)
raise excepting.ParseError(msg, tokens, index)
return (comparison, index)
def parseFramerState(self, tokens, index):
"""Parse framer state expression
parms:
tokens = list of tokens for command
index = current index into tokens
returns:
(state, framer, index)
method must be wrapped in appropriate try excepts
Syntax:
state re [(me, framername)]
valid state only when encounter token 're' after first state
parsing end conditions that signify no state
if encounter before 're':
no more tokens
reserved token
multiple states
"""
indexSave = index # save it since we lookahead to see if "re"
states = []
found = False # tag to indicate found 're'
framer = None
while index < len(tokens):
connective = tokens[index]
if connective == 're': # state list completed
index += 1 # eat 're' token
found = True
break # do not append state == 're' to states
if connective in Reserved: #field list not present
break # do not append state == reserved to states
index += 1 # eat last state token
state = StripQuotes(connective) # candidate state since re os quotes ok
states.append(state) # save it
if not found: # no state clause 're'
index = indexSave #so restore index
states = [] #empty states list
#prevent using multiple fields
if (len(states) > 1):
msg = "ParseError: More than one state = '%s'" % (states)
raise excepting.ParseError(msg, tokens, index)
if states:
state = states[0]
if not REO_IdentPub.match(state):
msg = "ParseError: Invalid format of state '%s'" % (state)
raise excepting.ParseError(msg, tokens, index)
else:
state = None
if state is not None: # get optional framer
framer = 'me'
while index < len(tokens):
connective = tokens[index]
if connective in Reserved: # framer not present
break
framer = connective
if not REO_IdentPub.match(framer):
msg = "ParseError: Invalid format of framer name '%s'" % (framer)
raise excepting.ParseError(msg, tokens, index)
if framer != 'me' and framer != self.currentFramer.name:
msg = "ParseError: Framer name '%s' for state need not current framer" % (framer)
raise excepting.ParseError(msg, tokens, index)
index += 1
return (state, framer, index)
def parseNeedState(self, tokens, index):
"""Parse required need state
method must be wrapped in appropriate try excepts
"""
stateField, index = self.parseField(tokens, index)
statePath, index = self.parseIndirect(tokens, index)
return (statePath, stateField, index)
def parseNeedGoal(self, statePath, stateField, tokens, index):
"""Parse required goal
method must be wrapped in appropriate try excepts
"""
goalPath = None #default
goalField = None #default
direct = False
goal = tokens[index]
#parse required goal
try:
goal = Convert2StrBoolCoordNum(tokens[index]) #goal is quoted string, boolean, or number
index += 1 #eat token
direct = True
except ValueError: #means text is not (quoted string, bool, or number) so indirect
goalField, index = self.parseField(tokens, index)
goalPath, index = self.parseIndirect(tokens, index)
return (direct, goal, goalPath, goalField, index)
def parseFramerNeedGoal(self, statePath, stateField, tokens, index):
"""
Parse required goal for special framer need such as
elapsed or recurred
method must be wrapped in appropriate try excepts
"""
goalPath = None #default
goalField = None #default
direct = False
goal = tokens[index]
#parse required goal
try:
goal = Convert2StrBoolCoordNum(tokens[index]) #goal is quoted string, boolean, or number
index += 1 #eat token
direct = True
except ValueError: #means text is not (quoted string, bool, or number) so indirect
if goal == 'goal': #means goal inferred by relative statePath
index += 1 #eat token
#now create goal path as inferred from state path
#check if statePath can be interpreted as framer state relative
chunks = statePath.strip('.').split('.')
try:
if ((chunks[0] == 'framer') and
(chunks[2] == 'state')): #framer relative
chunks[2] = 'goal' # .framer.me.state becomes .framer.me.goal
else:
msg = "ParseError: Goal = 'goal' without framer state path '%s'" %\
(statePath)
raise excepting.ParseError(msg, tokens, index)
except IndexError:
msg = "ParseError: Goal = 'goal' without framer state path '%s'" %\
(statePath)
raise excepting.ParseError(msg, tokens, index)
goalPath = ".".join(chunks)
goalField = stateField #goal field is the same as the given state field
else: #not 'goal' so parse as indirect
#is 'field in' clause present
goalField, index = self.parseField(tokens, index)
goalPath, index = self.parseIndirect(tokens, index)
return (direct, goal, goalPath, goalField, index)
def parseTolerance(self, tokens, index):
"""Parse a optional tolerance
method must be wrapped in appropriate try excepts
"""
tolerance = 0
if index < len(tokens): #at least one more token
#if at least one more token could be connective
connective = tokens[index]
if connective == '+-': #valid tolerance connective
index +=1 #so eat token
tolerance = tokens[index] #get tolerance
index += 1
tolerance = Convert2Num(tolerance) #convert to value
if isinstance(tolerance, str):
msg = "ParseError: Need has invalid tolerance '%s'" % (tolerance)
raise excepting.ParseError(msg, tokens, index)
return (tolerance, index)
def prepareSrcDstFields(self, src, srcFields, dst, dstFields, tokens, index):
"""
Prepares and verifys a transfer of data
from sourceFields in source
to dstFields in dst
Handles default conditions when fields are empty
src and dst are shares
fields are lists
Ensure Actor._prepareSrcDstFields is the same
"""
if not srcFields: #no source fields so assign defaults
if src:
if 'value' in src:
srcFields = ['value'] #use value field
elif dstFields: #use destination fields for source fields
srcFields = dstFields
else: #use pre-existing source fields
srcFields = src.keys()
#else: #ambiguous multiple source fields
#msg = "ParseError: Can't determine source field"
#raise excepting.ParseError(msg, tokens, index)
else:
srcFields = ['value'] #use value field
self.verifyShareFields(src, srcFields, tokens, index)
if not dstFields: #no destination fields so assign defaults
if 'value' in dst:
dstFields = ['value'] #use value field
else: #use source fields for destination fields
dstFields = srcFields
self.verifyShareFields(dst, dstFields, tokens, index)
if len(srcFields) != len(dstFields):
msg = "ParseError: Unequal number of source %s and destination %s fields" %\
(srcFields, dstFields)
raise excepting.ParseError(msg, tokens, index)
for dstField, srcField in izip(dstFields, srcFields):
if (dstField != srcField) and (srcField != 'value'):
console.profuse(" Warning: Field names mismatch. '{0}' in {1} "
"from '{2}' in {3} ... creating anyway".format(
dstField, dst.name, srcField, src.name))
#create any non existent source or destination fields
for field in srcFields: #use source fields for source data
if field not in src:
console.profuse(" Warning: Transfer from non-existent field '{0}' "
"in share {1} ... creating anyway".format(field, src.name))
src[field] = None #create
for field in dstFields: #use destination fields for destination data
if field not in dst:
console.profuse(" Warning: Transfer into non-existent field '{0}' "
"in share {1} ... creating anyway\n".format(field, dst.name))
dst[field] = None #create
return (srcFields, dstFields)
def prepareDataDstFields(self, data, dataFields, dst, dstFields, tokens, index):
"""
Prepares and verifys a transfer of data
from dataFields in data
to dstFields in dst
Handles default conditions when fields are empty
data is dict
dst is share
fields are lists
Ensure Actor._prepareDstFields is similar
"""
if not dstFields: #no destinationField so use default rules
if 'value' in dst:
dstFields = ['value'] #use value field
else: #use dataField
dstFields = dataFields
self.verifyShareFields(dst, dstFields, tokens, index)
if len(dataFields) != len(dstFields):
msg = "ParseError: Unequal number of source %s and destination %s fields" %\
(dataFields, dstFields)
raise excepting.ParseError(msg, tokens, index)
for dstField, dataField in izip(dstFields, dataFields):
if (dstField != dataField) and (dataField != 'value'):
console.profuse(" Warning: Field names mismatch. '{0}' in {1} "
"from '{2}' ... creating anyway".format(
dstField, dst.name, dataField))
#create any non existent destination fields
for field in dstFields: #use destination fields for destination data
if field not in dst:
console.profuse(" Warning: Transfer into non-existent field '{0}' in "
"share {1} ... creating anyway\n".format(field, dst.name))
dst[field] = None #create
return (dataFields, dstFields)
def verifyShareFields(self, share, fields, tokens, index):
"""
Verify that updating fields in share won't violate the
condition that when a share has field == 'value'
it will be the only field
fields is list of field names
share is share
raises exception if condition would be violated
Ensure Actor._verifyShareFields is same
"""
if (len(fields) > 1) and ('value' in fields):
msg = "ParseError: Field = 'value' within fields = '%s'" % (fields)
raise excepting.ParseError(msg, tokens, index)
if share: #does share have fields already
for field in fields:
if field not in share: #so this field could be added to share
if ('value' in share) or (field == 'value'):
msg = "ParseError: Candidate field '%s' when fields = '%s' exist" %\
(field, share.keys())
raise excepting.ParseError(msg, tokens, index)
return
def validShareFields(self, share, fields):
"""Validates that updating fields in share won't violate the
condition that when a share has field = 'value'
it will be the only field
fields is list of field names
share is share
returns False if condition would be violated
return True otherwise
"""
if (len(fields) > 1) and ('value' in fields):
return False
if share: #does share have fields already
for field in fields:
if field not in share: #so this field could be added to share
if ('value' in share) or (field == 'value'):
return False
return True
def verifyCurrentContext(self, tokens, index):
"""Verify that parse context has
currentStore
currentFramer
currentFrame
If not raises ParseError
"""
if not self.currentStore:
msg = "ParseError: Building verb '%s'. No current store" % (tokens)
raise excepting.ParseError(msg, tokens, index)
if not self.currentFramer:
msg = "ParseError: Building verb '%s'. No current framer" % (tokens)
raise excepting.ParseError(msg, tokens, index)
if not self.currentFrame:
msg = "ParseError: Building verb '%s'. No current frame" % (tokens)
raise excepting.ParseError(msg, tokens, index)
return
def verifyName(self, name, command, tokens, index):
"""Verify that name is a valid public identifyer
Used for Tasker, Framer, and Frame names
"""
if not REO_IdentPub.match(name) or name in Reserved: #bad name
msg = "ParseError: Building verb '%s'. Invalid entity name '%s'" %\
(command, name)
raise excepting.ParseError(msg, tokens, index)
#------------------------
def DebugShareFields(store, name):
""" prints out fields of share named name from store for debugging """
share = store.fetch(name)
if share is not None:
console.terse("++++++++ Debug share fields++++++++\n{0} = {1}\n".format(
share.name, share.items))
def Test(fileName = None, verbose = False):
"""Module self test
"""
import globaling
import aiding
import excepting
import registering
import storing
import skedding
import tasking
import acting
import poking
import needing
import goaling
import traiting
import fiating
import wanting
import completing
import doing
import arbiting
import controlling
import framing
import logging
import interfacing
import housing
#import building
import monitoring
import testing
allModules = [globaling, aiding, excepting, registering, storing, skedding,
acting, poking, goaling, needing, traiting,
fiating, wanting, completing,
doing, arbiting, controlling,
tasking, framing, logging, interfacing, serving,
housing, monitoring, testing]
if not fileName:
fileName = "mission.txt"
b = Builder()
if b.build(fileName = fileName):
houses = b.houses
for house in houses:
house.store.changeStamp(0.0)
for framer in house.actives:
status = framer.runner.send(START)
for tasker in house.taskers:
status = tasker.runner.send(START) #prepares logs and reopens files
done = False
while not done:
done = True
for house in houses:
actives = []
for framer in house.actives:
#status = framer.status
desire = framer.desire
if desire is not None:
control = desire
else:
control = RUN
status = framer.runner.send(control)
console.terse("Framer {0} control {1} resulting status = {2}\n".format(
framer.name, ControlNames[control], StatusNames[status]))
if not (status == STOPPED or status == ABORTED):
actives.append(framer)
done = False
house.actives = actives
for tasker in house.taskers:
status = tasker.runner.send(RUN)
house.store.advanceStamp(0.125)
for house in houses:
for tasker in house.taskers:
status = tasker.runner.send(STOP) # closes files
return b
if __name__ == "__main__":
Test()
| 0 | 0 | 0 |
6dfd4825fe2253911d71db94c4f5370df9abe10e | 599 | py | Python | registrations/migrations/0003_auto_20200206_0926.py | IFRCGo/ifrcgo-api | c1c3e0cf1076ab48d03db6aaf7a00f8485ca9e1a | [
"MIT"
] | 11 | 2018-06-11T06:05:12.000Z | 2022-03-25T09:31:44.000Z | registrations/migrations/0003_auto_20200206_0926.py | IFRCGo/ifrcgo-api | c1c3e0cf1076ab48d03db6aaf7a00f8485ca9e1a | [
"MIT"
] | 498 | 2017-11-07T21:20:13.000Z | 2022-03-31T14:37:18.000Z | registrations/migrations/0003_auto_20200206_0926.py | IFRCGo/ifrcgo-api | c1c3e0cf1076ab48d03db6aaf7a00f8485ca9e1a | [
"MIT"
] | 6 | 2018-04-11T13:29:50.000Z | 2020-07-16T16:52:11.000Z | # Generated by Django 2.2.9 on 2020-02-06 09:26
from django.db import migrations, models
| 24.958333 | 62 | 0.60601 | # Generated by Django 2.2.9 on 2020-02-06 09:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('registrations', '0002_auto_20180626_1808'),
]
operations = [
migrations.AddField(
model_name='pending',
name='admin_1_validated_date',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='pending',
name='admin_2_validated_date',
field=models.DateTimeField(blank=True, null=True),
),
]
| 0 | 485 | 23 |
3b3e9ad48fbbc5de739f4d602118cafcc599a65e | 5,549 | py | Python | paddles/paddle_0402/whiteboard/from_scratch/nmist/simple_ann.py | dkeefe3773/pong-rl | e0d548267d7e0ca1b8690224cfd827af8ff571ab | [
"MIT"
] | null | null | null | paddles/paddle_0402/whiteboard/from_scratch/nmist/simple_ann.py | dkeefe3773/pong-rl | e0d548267d7e0ca1b8690224cfd827af8ff571ab | [
"MIT"
] | null | null | null | paddles/paddle_0402/whiteboard/from_scratch/nmist/simple_ann.py | dkeefe3773/pong-rl | e0d548267d7e0ca1b8690224cfd827af8ff571ab | [
"MIT"
] | null | null | null | from typing import Dict
import numpy
from config import logging_configurator
from paddles.paddle_0402.config.paddle_configurator import nmist_raw_ann_config
from paddles.paddle_0402.whiteboard.from_scratch.nmist import Ann, output_training, \
initialize_gradient_weight_accumulation_matrices, initialize_bias_accumulation_vectors, input_training, \
brute_force_feed_forward, calculate_outer_layer_gradient, calculate_previous_layer_gradient, plot_cost_function
logger = logging_configurator.get_logger(__name__)
class SimpleNeuralNetwork(Ann):
"""
With 3000 iterations, this gives 86% prediction accuracy.
This is as simple as it gets. Back propogation is based upon minimizing the overall cost across all samples:
COST = (1/2) * (1/sample_count) SUM_OVER_SAMPES_Z[(abs(label - predicted)^2
"""
def _adjust_weights(self, gradient_for_weights: Dict[int, numpy.ndarray],
gradient_for_bias: Dict[int, numpy.ndarray]) -> None:
"""
This adjusts the weights according to the optimization function to minimize the quadrature of
residuals from predicted to labeled, normalized by the sample data count
:param gradient_for_weights: a dict whose key is the network layer and whose value is a weight matrix
:param gradient_for_bias: a dict whose key is the network layer and whose value is vector of bias gradients
:return:
"""
for layer_index in range(self.num_layers - 1, 0, -1):
self.weight_matrix_by_layer[layer_index] += -self.step_size * (
1.0 / len(output_training) * gradient_for_weights[layer_index])
self.bias_by_layer[layer_index] += -self.step_size * (
1.0 / len(output_training) * gradient_for_bias[layer_index])
if __name__ == "__main__":
neural_network = SimpleNeuralNetwork()
neural_network.train()
neural_network.evalulate_network()
plot_cost_function(neural_network.avg_cost_for_iterations)
| 58.410526 | 141 | 0.649667 | from typing import Dict
import numpy
from config import logging_configurator
from paddles.paddle_0402.config.paddle_configurator import nmist_raw_ann_config
from paddles.paddle_0402.whiteboard.from_scratch.nmist import Ann, output_training, \
initialize_gradient_weight_accumulation_matrices, initialize_bias_accumulation_vectors, input_training, \
brute_force_feed_forward, calculate_outer_layer_gradient, calculate_previous_layer_gradient, plot_cost_function
logger = logging_configurator.get_logger(__name__)
class SimpleNeuralNetwork(Ann):
"""
With 3000 iterations, this gives 86% prediction accuracy.
This is as simple as it gets. Back propogation is based upon minimizing the overall cost across all samples:
COST = (1/2) * (1/sample_count) SUM_OVER_SAMPES_Z[(abs(label - predicted)^2
"""
def __init__(self) -> None:
super().__init__()
self.num_training_iterations: int = nmist_raw_ann_config.training_iterations
self.step_size: float = nmist_raw_ann_config.gradient_step_size
def train(self):
num_samples = len(output_training)
logger.info(
f"Starting training with {self.num_training_iterations} iterations. Each iteration has {num_samples} samples")
for training_index in range(self.num_training_iterations):
avg_cost_for_iteration: float = 0
if training_index % 50 == 0 and training_index != 0:
logger.info(
f"Done with {training_index} / {self.num_training_iterations} iterations. Avg cost: {self.avg_cost_for_iterations[-1]}")
gradient_for_weights = initialize_gradient_weight_accumulation_matrices(self.network_layer_sizes)
gradient_for_bias = initialize_bias_accumulation_vectors(self.network_layer_sizes)
# now loop thru every sample
for sample_index in range(len(output_training)):
# first do a feed forward and collect the aggregates into each layer and the output of each layer
# the chained partial derivatives use these
sample = input_training[sample_index, :]
layer_outputs, layer_aggregates = brute_force_feed_forward(sample, self.weight_matrix_by_layer,
self.bias_by_layer)
# keep a running total of our sample costs
sample_cost = numpy.linalg.norm(output_training[sample_index, :] - layer_outputs[self.num_layers])
avg_cost_for_iteration += sample_cost
# lets perform back propagation, starting from the output layer and working backwards to
# distribute the loss across the weights and biases in the network
outer_layer_index = self.num_layers
outer_layer_gradient = calculate_outer_layer_gradient(output_training[sample_index, :],
layer_outputs[outer_layer_index],
layer_aggregates[outer_layer_index])
gradients_by_layer = {outer_layer_index: outer_layer_gradient}
for layer_index in range(self.num_layers - 1, 0, -1):
if layer_index > 1:
inner_layer_gradient = \
calculate_previous_layer_gradient(gradients_by_layer[layer_index + 1],
self.weight_matrix_by_layer[layer_index],
layer_aggregates[layer_index])
gradients_by_layer[layer_index] = inner_layer_gradient
gradient_for_weights[layer_index] += numpy.dot(
gradients_by_layer[layer_index + 1][:, numpy.newaxis],
numpy.transpose(
layer_outputs[layer_index][:, numpy.newaxis]))
gradient_for_bias[layer_index] += gradients_by_layer[layer_index + 1]
# adjust the weights along their gradients by the step size
self._adjust_weights(gradient_for_weights, gradient_for_bias)
avg_cost_for_iteration /= num_samples
self.avg_cost_for_iterations.append(avg_cost_for_iteration)
def _adjust_weights(self, gradient_for_weights: Dict[int, numpy.ndarray],
gradient_for_bias: Dict[int, numpy.ndarray]) -> None:
"""
This adjusts the weights according to the optimization function to minimize the quadrature of
residuals from predicted to labeled, normalized by the sample data count
:param gradient_for_weights: a dict whose key is the network layer and whose value is a weight matrix
:param gradient_for_bias: a dict whose key is the network layer and whose value is vector of bias gradients
:return:
"""
for layer_index in range(self.num_layers - 1, 0, -1):
self.weight_matrix_by_layer[layer_index] += -self.step_size * (
1.0 / len(output_training) * gradient_for_weights[layer_index])
self.bias_by_layer[layer_index] += -self.step_size * (
1.0 / len(output_training) * gradient_for_bias[layer_index])
if __name__ == "__main__":
neural_network = SimpleNeuralNetwork()
neural_network.train()
neural_network.evalulate_network()
plot_cost_function(neural_network.avg_cost_for_iterations)
| 3,486 | 0 | 53 |
3ec9171172f97212f0af77549383a8c76fb44a73 | 1,291 | py | Python | logger.py | tex0l/JukeBox | f6a811ddb1ee98983e7ec42cfbde1f0d11b04d2d | [
"Apache-2.0"
] | null | null | null | logger.py | tex0l/JukeBox | f6a811ddb1ee98983e7ec42cfbde1f0d11b04d2d | [
"Apache-2.0"
] | null | null | null | logger.py | tex0l/JukeBox | f6a811ddb1ee98983e7ec42cfbde1f0d11b04d2d | [
"Apache-2.0"
] | null | null | null | from __future__ import unicode_literals
# !/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import os
class Logger:
"""
This class is very simple and is of no use in the rest of the program except in main.py,
when it's initialized.
To log you must first import logging in the file.
Then you log messages with debug(), info(), warning, error(), critical() methods
The level is 10 for debug, ..., 50 for critical)
"""
def __init__(self, log_format, path, level):
"""
log_format is the wanted logging format
path is the path of the log file
level is the minimum required level for the logged messages,
if less it's nor stored nor displayed
"""
self.log_formatter = logging.Formatter(log_format)
self.root_logger = logging.getLogger()
self.mpd_logger = logging.getLogger('mpd')
self.mpd_logger.propagate = False
self.root_logger.setLevel(level)
self.mpd_logger.setLevel(level)
self.file_handler = logging.FileHandler(os.path.join(os.path.dirname(__file__), path), mode='a')
self.file_handler.setFormatter(self.log_formatter)
self.root_logger.addHandler(self.file_handler)
self.mpd_logger.addHandler(self.file_handler)
| 33.973684 | 104 | 0.675445 | from __future__ import unicode_literals
# !/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import os
class Logger:
"""
This class is very simple and is of no use in the rest of the program except in main.py,
when it's initialized.
To log you must first import logging in the file.
Then you log messages with debug(), info(), warning, error(), critical() methods
The level is 10 for debug, ..., 50 for critical)
"""
def __init__(self, log_format, path, level):
"""
log_format is the wanted logging format
path is the path of the log file
level is the minimum required level for the logged messages,
if less it's nor stored nor displayed
"""
self.log_formatter = logging.Formatter(log_format)
self.root_logger = logging.getLogger()
self.mpd_logger = logging.getLogger('mpd')
self.mpd_logger.propagate = False
self.root_logger.setLevel(level)
self.mpd_logger.setLevel(level)
self.file_handler = logging.FileHandler(os.path.join(os.path.dirname(__file__), path), mode='a')
self.file_handler.setFormatter(self.log_formatter)
self.root_logger.addHandler(self.file_handler)
self.mpd_logger.addHandler(self.file_handler)
| 0 | 0 | 0 |
d3b92a5606413c2aa6ad4c84bb904572fdc7d826 | 4,853 | py | Python | terraform-modules/lambda-slack/code/notify/notify.py | ovotech/domain-protect | 56e255912852ea624d710307a0d3fa5e962df195 | [
"Apache-2.0"
] | 185 | 2021-06-22T08:55:37.000Z | 2022-03-31T10:13:28.000Z | terraform-modules/lambda-slack/code/notify/notify.py | ovotech/domain-protect | 56e255912852ea624d710307a0d3fa5e962df195 | [
"Apache-2.0"
] | 7 | 2021-07-05T16:02:03.000Z | 2021-12-21T11:38:17.000Z | terraform-modules/lambda-slack/code/notify/notify.py | ovotech/domain-protect | 56e255912852ea624d710307a0d3fa5e962df195 | [
"Apache-2.0"
] | 27 | 2021-06-28T16:17:23.000Z | 2022-03-28T08:45:19.000Z | from __future__ import print_function
import json
import os
from urllib import request, parse
| 29.95679 | 154 | 0.561508 | from __future__ import print_function
import json
import os
from urllib import request, parse
def findings_message(json_data):
try:
findings = json_data["Findings"]
slack_message = {"fallback": "A new message", "fields": [{"title": "Vulnerable domains"}]}
for finding in findings:
print(f"{finding['Domain']} in {finding['Account']} AWS Account")
slack_message["fields"].append(
{"value": f"{finding['Domain']} in {finding['Account']} AWS Account", "short": False}
)
return slack_message
except KeyError:
return None
def takeovers_message(json_data):
try:
takeovers = json_data["Takeovers"]
slack_message = {"fallback": "A new message", "fields": [{"title": "Domain takeover status"}]}
for takeover in takeovers:
success_message = (
f"{takeover['ResourceType']} {takeover['TakeoverDomain']} \n"
f"successfully created in {takeover['TakeoverAccount']} AWS account \n"
f"to protect {takeover['VulnerableDomain']} domain in {takeover['VulnerableAccount']} account"
)
failure_message = (
f"{takeover['ResourceType']} {takeover['TakeoverDomain']} creation \n"
f"failed in {takeover['TakeoverAccount']} AWS account to protect {takeover['VulnerableDomain']} \n"
f"domain in {takeover['VulnerableAccount']} account"
)
if takeover["TakeoverStatus"] == "success":
print(success_message)
slack_message["fields"].append(
{
"value": success_message,
"short": False,
}
)
if takeover["TakeoverStatus"] == "failure":
print(failure_message)
slack_message["fields"].append(
{
"value": failure_message,
"short": False,
}
)
return slack_message
except KeyError:
return None
def resources_message(json_data):
try:
stacks = json_data["Resources"]
slack_message = {"fallback": "A new message", "fields": [{"title": "Resources preventing hostile takeover"}]}
for tags in stacks:
for tag in tags:
if tag["Key"] == "ResourceName":
resource_name = tag["Value"]
elif tag["Key"] == "ResourceType":
resource_type = tag["Value"]
elif tag["Key"] == "TakeoverAccount":
takeover_account = tag["Value"]
elif tag["Key"] == "VulnerableAccount":
vulnerable_account = tag["Value"]
elif tag["Key"] == "VulnerableDomain":
vulnerable_domain = tag["Value"]
print(
f"{resource_type} {resource_name} in {takeover_account} AWS account protecting {vulnerable_domain} domain in {vulnerable_account} Account"
)
slack_message["fields"].append(
{
"value": f"{resource_type} {resource_name} protecting {vulnerable_domain} domain in {vulnerable_account} Account",
"short": False,
}
)
slack_message["fields"].append(
{
"value": "After fixing DNS issues, delete resources and CloudFormation stacks",
"short": False,
}
)
return slack_message
except KeyError:
return None
def lambda_handler(event, context): # pylint:disable=unused-argument
slack_url = os.environ["SLACK_WEBHOOK_URL"]
slack_channel = os.environ["SLACK_CHANNEL"]
slack_username = os.environ["SLACK_USERNAME"]
slack_emoji = os.environ["SLACK_EMOJI"]
subject = event["Records"][0]["Sns"]["Subject"]
payload = {
"channel": slack_channel,
"username": slack_username,
"icon_emoji": slack_emoji,
"attachments": [],
"text": subject,
}
message = event["Records"][0]["Sns"]["Message"]
json_data = json.loads(message)
if findings_message(json_data) is not None:
slack_message = findings_message(json_data)
elif takeovers_message(json_data) is not None:
slack_message = takeovers_message(json_data)
elif resources_message(json_data) is not None:
slack_message = resources_message(json_data)
payload["attachments"].append(slack_message)
data = parse.urlencode({"payload": json.dumps(payload)}).encode("utf-8")
req = request.Request(slack_url)
with request.urlopen(req, data):
print(f"Message sent to {slack_channel} Slack channel")
| 4,663 | 0 | 92 |
55e2e13c4d813fa7a677490779ed341922d896c4 | 6,116 | py | Python | heuteinma.py | s1lvester/heuteinmannheim | c60ec7ff8f453dd8d3cb3446a99f7d82901c0c3c | [
"MIT"
] | null | null | null | heuteinma.py | s1lvester/heuteinmannheim | c60ec7ff8f453dd8d3cb3446a99f7d82901c0c3c | [
"MIT"
] | null | null | null | heuteinma.py | s1lvester/heuteinmannheim | c60ec7ff8f453dd8d3cb3446a99f7d82901c0c3c | [
"MIT"
] | null | null | null | import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "lib"))
import facebook
import websites
import feeds
#import beachstatus
from event import EventVault
import logging
import datetime
import time
import locale
locale.setlocale(locale.LC_TIME, '') # locale for date, time an the infamous german "Umalaute"
LOG_FILENAME = os.path.join(os.path.dirname(__file__), 'log.log')
logging.basicConfig(filename=LOG_FILENAME, level=logging.ERROR)
# Gooo !!!!11einself
main_obj = HeuteInMannheim()
| 43.375887 | 204 | 0.493296 | import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "lib"))
import facebook
import websites
import feeds
#import beachstatus
from event import EventVault
import logging
import datetime
import time
import locale
locale.setlocale(locale.LC_TIME, '') # locale for date, time an the infamous german "Umalaute"
LOG_FILENAME = os.path.join(os.path.dirname(__file__), 'log.log')
logging.basicConfig(filename=LOG_FILENAME, level=logging.ERROR)
class HeuteInMannheim:
def __init__(self):
super(HeuteInMannheim, self).__init__()
self.vault = EventVault() # Initialize main Storage Object
# Initialize Scrapers
self.facebook_scraper = facebook.FacebookScraper(self.vault)
self.website_scraper = websites.WebsiteScraper(self.vault)
self.feed_scraper = feeds.FeedScraper(self.vault)
self.events = self.vault.get_events_for_date(datetime.date.today())
#self.events = self.vault.get_all_events() # Only for testing/debugging
#self.beach_status = beachstatus.BeachStatus()
#self.beach_status = self.beach_status.get_status()
self.state_output = self.make_html()
self.write_html() # Make initial index.html
logging.info("Total amount of Events: " + str(len(self.vault.get_all_events())))
def make_html(self):
"""Generate HTML output from collected events"""
output = """<!DOCTYPE html>
<html>
<head>
<title>Heute in Mannheim</title>
<link href="style.css" media="all" rel="stylesheet" type="text/css">
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="description" content="Heute in Mannheim ist eine simple Website, die dir Events in Mannheim anzeigt. Unabhängig, werbefrei, unkommerziell, free as in freedom and free as in beer.">
<meta name="apple-mobile-web-app-capable" content="yes">
</head>
<body>
<table>\n"""
if not self.events: # Guess we're staying home tonight...
output += """<tr><td><p><span class=\"title\">Heute keine
Events.<br> Guess we're staying home tonight...
:-(</span></p></td></tr>\n"""
else:
eo = 0 # Even/Odd table-rows
for event in self.events:
if eo == 0:
output += " <tr class=\"even\">"
eo = 1
else:
output += " <tr class=\"odd\">"
eo = 0
# Facebook Icon by http://shimmi1.deviantart.com/ to warn Users from evil Facebook links
if event.get("event_url").find("facebook") > -1:
output_fb = "<img src=\"img/fb_ico.png\" alt=\"Achtung: Facebook Link!\">"
else:
output_fb = ""
output += """
<td><p><span class=\"title\"><a href=\"{}\">{} {}</a></span></p>
<span class=\"location\"><a href=\"{}\">{}</a></span><br>
<span class=\"adresse\">{} {} | {} {}</span></td>
<td><span class=\"zeit\">{}</span><br>
</tr>\n""".format(event.get("event_url"),
event.get("title"),
output_fb,
event.get("url"),
event.get("name"),
event.get("strasse"),
event.get("hausnr"),
event.get("plz"),
event.get("ort"),
event.get("uhrzeit"))
# output += """
# </table>
# <hr>
# <p><b>Status der Mannheimer Strände:</b></p>
# <table>"""
# for beach in self.beach_status:
# hours = ""
# if beach["status"] == "open":
# hours = str("<b>" + beach["hours_open"] + " - " + beach["hours_closed"] + "</b><br>")
# output += """
# <tr class=\"beach\">
# <td class=\"{}\">
# <span class=\"adresse"><a href=\"{}\">{}: {}</a></span><br>
# {}
# {} {} | {} {}
# </td>
# </tr>""".format(beach["status"],
# beach["event_obj"].get("url"),
# beach["event_obj"].get("name"),
# beach["status"],
# hours,
# beach["event_obj"].get("strasse"),
# beach["event_obj"].get("hausnr"),
# beach["event_obj"].get("plz"),
# beach["event_obj"].get("ort"))
output += """
</table>
<hr>
<p>Last update: {}</p>
<p><b><a href=\"imprint.html\">Contact, Impressum und Datenschutz</a></b></p>
<p class=\"footer\">Heute in Mannheim ist eine automatisch generierte
Website und wurde nach bestem Wissen und Gewissen erstellt. Die
Einträge wurden nicht redaktionell bearbeitet und ich übernehme
keinerlei Haftung für die Inhalte hinter den links</p>
<p class=\"footer\"><a href=\"https://github.com/s1lvester/heuteinmannheim\">Fork me on GitHub</a><br>Danke an die Jungs von <a href=\"http://heuteinstuttgart.de/\">heuteinstuttgart.de</a></p>
</body>
</html>""".format(time.strftime("%d.%m.%Y %H:%M", time.localtime()))
return output.encode("utf-8")
def write_html(self):
"""Write the index.html file. Requires self.state_output to be set"""
f = open(os.path.join(os.path.dirname(__file__), "static/index.html"),
"wb")
f.write(self.state_output)
f.close()
# Gooo !!!!11einself
main_obj = HeuteInMannheim()
| 802 | 4,779 | 23 |
86715c2149b581cb79e32e65aaacbdc77c87cee0 | 7,986 | py | Python | example/examplechecker.py | DanielHabenicht/enochecker | a771c0bdbcfc4be9cc948d80d7105d8c5303922b | [
"MIT"
] | null | null | null | example/examplechecker.py | DanielHabenicht/enochecker | a771c0bdbcfc4be9cc948d80d7105d8c5303922b | [
"MIT"
] | null | null | null | example/examplechecker.py | DanielHabenicht/enochecker | a771c0bdbcfc4be9cc948d80d7105d8c5303922b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# from src.enochecker import *
import json
import secrets
from typing import Dict
from enochecker import BaseChecker, BrokenServiceException, assert_equals, run
class ExampleChecker(BaseChecker):
"""
Change the methods given here, then simply create the class and .run() it.
A few convenient methods and helpers are provided in the BaseChecker.
When using an HTTP client (requests) or a plain TCP connection (telnetlib) use the
built-in functions of the BaseChecker that include some basic error-handling.
https://enowars.github.io/enochecker/enochecker.html#enochecker.enochecker.BaseChecker.connect
https://enowars.github.io/enochecker/enochecker.html#enochecker.enochecker.BaseChecker.http
https://enowars.github.io/enochecker/enochecker.html#enochecker.enochecker.BaseChecker.http_get
https://enowars.github.io/enochecker/enochecker.html#enochecker.enochecker.BaseChecker.http_post
The full documentation is available at https://enowars.github.io/enochecker/
"""
# how many flags does this service deploy per round? each flag should be stored at a different location in the service
flag_variants = 2
# how many noises does this service deploy per round?
noise_variants = 1
# how many different havoc methods does this service use per round?
havoc_variants = 1
# The port will automatically be picked up as default by self.connect and self.http methods.
port = 80
def putflag(self) -> None:
"""
This method stores a flag in the service.
In case the service has multiple flag stores, self.variant_id gives the appropriate index.
The flag itself can be retrieved from self.flag.
On error, raise an Eno Exception.
:raises EnoException on error
"""
if self.variant_id == 0:
credentials = self.generate_credentials()
self.chain_db = credentials
self.register_and_login(credentials)
res = self.http_post("/notes", json={"note": self.flag})
assert_equals(res.status_code, 200)
elif self.variant_id == 1:
credentials = self.generate_credentials()
self.chain_db = credentials
self.register_and_login(credentials)
res = self.http_post("/profile/status", json={"status": self.flag})
assert_equals(res.status_code, 200)
else:
raise ValueError(
"variant_id {} exceeds the amount of flag variants. Not supported.".format(
self.variant_id
)
)
def getflag(self) -> None:
"""
This method retrieves a flag from the service.
Use self.flag to get the flag that needs to be recovered and self.round to get the round the flag was placed in.
On error, raise an EnoException.
:raises EnoException on error
"""
if self.variant_id == 0:
credentials = self.chain_db
self.login(credentials)
res = self.http_get("/notes")
assert_equals(res.status_code, 200)
try:
if self.flag not in res.json()["notes"]:
raise BrokenServiceException("flag is missing from /notes")
except (KeyError, json.JSONDecodeError):
raise BrokenServiceException(
"received invalid response on /notes endpoint"
)
elif self.variant_id == 1:
credentials = self.chain_db
self.login(credentials)
res = self.http_get("/profile")
assert_equals(res.status_code, 200)
try:
if self.flag != res.json()["status"]:
raise BrokenServiceException("flag is missing from /profile")
except (KeyError, json.JSONDecodeError):
raise BrokenServiceException(
"received invalid response on /profile endpoint"
)
else:
raise ValueError(
"variant_id {} not supported!".format(self.variant_id)
) # Internal error.
def putnoise(self) -> None:
"""
This method stores noise in the service. The noise should later be recoverable.
The difference between noise and flag is, tht noise does not have to remain secret for other teams.
This method can be called many times per round. Check how often using self.variant_id.
On error, raise an EnoException.
:raises EnoException on error
"""
credentials = self.generate_credentials()
self.register_and_login(credentials)
category = secrets.choice(
[
"Python",
"NodeJS",
"C",
"Rust",
"Go",
"C#",
"C++",
"Prolog",
"OCL",
"Julia",
]
)
# we are overwriting the credentials on purpose since we don't need them later in this case
self.chain_db = category
res = self.http_post(
"/posts",
json={"content": self.noise, "category": category, "public": True},
)
assert_equals(res.status_code, 200)
def getnoise(self) -> None:
"""
This method retrieves noise in the service.
The noise to be retrieved is inside self.noise
The difference between noise and flag is, that noise does not have to remain secret for other teams.
This method can be called many times per round.
The engine will also trigger different variants, indicated by variant_id.
On error, raise an EnoException.
:raises EnoException on error
"""
category = self.chain_db
res = self.http_get("/posts", json={"category": category})
assert_equals(res.status_code, 200)
try:
for post in res.json()["posts"]:
if post["content"] == self.noise:
return # returning nothing/raising no exceptions means everything is ok
except (KeyError, json.JSONDecodeError):
raise BrokenServiceException("received invalid response on /posts")
else:
raise BrokenServiceException("noise is missing from /posts")
def havoc(self) -> None:
"""
This method unleashes havoc on the app -> Do whatever you must to prove the service still works. Or not.
On error, raise an EnoException.
:raises EnoException on Error
"""
self.info("I wanted to inform you: I'm running <3")
res = self.http_get("/")
assert_equals(res.status_code, 200)
# You should probably do some more in-depth checks here.
def exploit(self) -> None:
"""
This method was added for CI purposes for exploits to be tested.
Will (hopefully) not be called during actual CTF.
:raises EnoException on Error
:return This function can return a result if it wants
If nothing is returned, the service status is considered okay.
The preferred way to report Errors in the service is by raising an appropriate EnoException
"""
pass
app = ExampleChecker.service # This can be used for gunicorn/uswgi.
if __name__ == "__main__":
run(ExampleChecker)
| 38.210526 | 122 | 0.617205 | #!/usr/bin/env python3
# from src.enochecker import *
import json
import secrets
from typing import Dict
from enochecker import BaseChecker, BrokenServiceException, assert_equals, run
class ExampleChecker(BaseChecker):
"""
Change the methods given here, then simply create the class and .run() it.
A few convenient methods and helpers are provided in the BaseChecker.
When using an HTTP client (requests) or a plain TCP connection (telnetlib) use the
built-in functions of the BaseChecker that include some basic error-handling.
https://enowars.github.io/enochecker/enochecker.html#enochecker.enochecker.BaseChecker.connect
https://enowars.github.io/enochecker/enochecker.html#enochecker.enochecker.BaseChecker.http
https://enowars.github.io/enochecker/enochecker.html#enochecker.enochecker.BaseChecker.http_get
https://enowars.github.io/enochecker/enochecker.html#enochecker.enochecker.BaseChecker.http_post
The full documentation is available at https://enowars.github.io/enochecker/
"""
# how many flags does this service deploy per round? each flag should be stored at a different location in the service
flag_variants = 2
# how many noises does this service deploy per round?
noise_variants = 1
# how many different havoc methods does this service use per round?
havoc_variants = 1
# The port will automatically be picked up as default by self.connect and self.http methods.
port = 80
def login(self, credentials) -> None:
res = self.http_post("/login", json=credentials)
assert_equals(res.status_code, 200)
def register_and_login(self, credentials) -> None:
res = self.http_post("/register", json=credentials)
assert_equals(res.status_code, 200)
self.login(credentials)
def generate_credentials(self) -> Dict[str, str]:
credentials = {
"username": secrets.token_urlsafe(12),
"password": secrets.token_urlsafe(16),
}
return credentials
def putflag(self) -> None:
"""
This method stores a flag in the service.
In case the service has multiple flag stores, self.variant_id gives the appropriate index.
The flag itself can be retrieved from self.flag.
On error, raise an Eno Exception.
:raises EnoException on error
"""
if self.variant_id == 0:
credentials = self.generate_credentials()
self.chain_db = credentials
self.register_and_login(credentials)
res = self.http_post("/notes", json={"note": self.flag})
assert_equals(res.status_code, 200)
elif self.variant_id == 1:
credentials = self.generate_credentials()
self.chain_db = credentials
self.register_and_login(credentials)
res = self.http_post("/profile/status", json={"status": self.flag})
assert_equals(res.status_code, 200)
else:
raise ValueError(
"variant_id {} exceeds the amount of flag variants. Not supported.".format(
self.variant_id
)
)
def getflag(self) -> None:
"""
This method retrieves a flag from the service.
Use self.flag to get the flag that needs to be recovered and self.round to get the round the flag was placed in.
On error, raise an EnoException.
:raises EnoException on error
"""
if self.variant_id == 0:
credentials = self.chain_db
self.login(credentials)
res = self.http_get("/notes")
assert_equals(res.status_code, 200)
try:
if self.flag not in res.json()["notes"]:
raise BrokenServiceException("flag is missing from /notes")
except (KeyError, json.JSONDecodeError):
raise BrokenServiceException(
"received invalid response on /notes endpoint"
)
elif self.variant_id == 1:
credentials = self.chain_db
self.login(credentials)
res = self.http_get("/profile")
assert_equals(res.status_code, 200)
try:
if self.flag != res.json()["status"]:
raise BrokenServiceException("flag is missing from /profile")
except (KeyError, json.JSONDecodeError):
raise BrokenServiceException(
"received invalid response on /profile endpoint"
)
else:
raise ValueError(
"variant_id {} not supported!".format(self.variant_id)
) # Internal error.
def putnoise(self) -> None:
"""
This method stores noise in the service. The noise should later be recoverable.
The difference between noise and flag is, tht noise does not have to remain secret for other teams.
This method can be called many times per round. Check how often using self.variant_id.
On error, raise an EnoException.
:raises EnoException on error
"""
credentials = self.generate_credentials()
self.register_and_login(credentials)
category = secrets.choice(
[
"Python",
"NodeJS",
"C",
"Rust",
"Go",
"C#",
"C++",
"Prolog",
"OCL",
"Julia",
]
)
# we are overwriting the credentials on purpose since we don't need them later in this case
self.chain_db = category
res = self.http_post(
"/posts",
json={"content": self.noise, "category": category, "public": True},
)
assert_equals(res.status_code, 200)
def getnoise(self) -> None:
"""
This method retrieves noise in the service.
The noise to be retrieved is inside self.noise
The difference between noise and flag is, that noise does not have to remain secret for other teams.
This method can be called many times per round.
The engine will also trigger different variants, indicated by variant_id.
On error, raise an EnoException.
:raises EnoException on error
"""
category = self.chain_db
res = self.http_get("/posts", json={"category": category})
assert_equals(res.status_code, 200)
try:
for post in res.json()["posts"]:
if post["content"] == self.noise:
return # returning nothing/raising no exceptions means everything is ok
except (KeyError, json.JSONDecodeError):
raise BrokenServiceException("received invalid response on /posts")
else:
raise BrokenServiceException("noise is missing from /posts")
def havoc(self) -> None:
"""
This method unleashes havoc on the app -> Do whatever you must to prove the service still works. Or not.
On error, raise an EnoException.
:raises EnoException on Error
"""
self.info("I wanted to inform you: I'm running <3")
res = self.http_get("/")
assert_equals(res.status_code, 200)
# You should probably do some more in-depth checks here.
def exploit(self) -> None:
"""
This method was added for CI purposes for exploits to be tested.
Will (hopefully) not be called during actual CTF.
:raises EnoException on Error
:return This function can return a result if it wants
If nothing is returned, the service status is considered okay.
The preferred way to report Errors in the service is by raising an appropriate EnoException
"""
pass
app = ExampleChecker.service # This can be used for gunicorn/uswgi.
if __name__ == "__main__":
run(ExampleChecker)
| 474 | 0 | 81 |
7c5fbe3e9c6275b73daa73348993fc8d188895e3 | 1,109 | py | Python | frameworks/Java/curacao/setup.py | idlewan/FrameworkBenchmarks | f187ec69752f369d84ef5a262efaef85c3a6a5ab | [
"BSD-3-Clause"
] | 4 | 2015-01-22T02:13:03.000Z | 2018-06-13T12:02:46.000Z | frameworks/Java/curacao/setup.py | ratpack/FrameworkBenchmarks | 81604309e46e382fe2ffb7970a87d728f20c8be6 | [
"BSD-3-Clause"
] | null | null | null | frameworks/Java/curacao/setup.py | ratpack/FrameworkBenchmarks | 81604309e46e382fe2ffb7970a87d728f20c8be6 | [
"BSD-3-Clause"
] | null | null | null |
import subprocess
import sys
import time
import os
##start([], open('log.out','a'), open('error.out','a'))
##stop(open('log.out','a'), open('error.out','a'))
| 30.805556 | 138 | 0.651037 |
import subprocess
import sys
import time
import os
def start(args, logfile, errfile):
if os.name == 'nt':
subprocess.check_call('"..\\sbt\\sbt.bat" assembly', shell=True, cwd="curacao", stderr=errfile, stdout=logfile)
else:
subprocess.check_call("$FWROOT/sbt/sbt assembly", shell=True, cwd="curacao", stderr=errfile, stdout=logfile)
subprocess.Popen("java -jar dist/curacao-standalone.jar", shell=True, cwd="curacao", stderr=errfile, stdout=logfile)
time.sleep(5)
return 0
def stop(logfile, errfile):
if os.name == 'nt':
subprocess.check_call("wmic process where \"CommandLine LIKE '%curacao-standalone%'\" call terminate", stderr=errfile, stdout=logfile)
else:
p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.splitlines():
if 'curacao-standalone' in line:
try:
pid = int(line.split(None, 2)[1])
os.kill(pid, 15)
except OSError:
pass
return 0
##start([], open('log.out','a'), open('error.out','a'))
##stop(open('log.out','a'), open('error.out','a'))
| 903 | 0 | 46 |
2f56ab33ec78758402c86dd59fad3dcc377f14ee | 1,226 | py | Python | django_json_ld/settings.py | denisroldan/django-json-ld | 651fdd43f912dbc869503fb23f5ee9c910608cb7 | [
"MIT"
] | 25 | 2019-01-17T03:30:36.000Z | 2022-03-11T11:30:18.000Z | django_json_ld/settings.py | denisroldan/django-json-ld | 651fdd43f912dbc869503fb23f5ee9c910608cb7 | [
"MIT"
] | 24 | 2019-03-08T11:47:43.000Z | 2022-02-01T15:15:31.000Z | django_json_ld/settings.py | denisroldan/django-json-ld | 651fdd43f912dbc869503fb23f5ee9c910608cb7 | [
"MIT"
] | 6 | 2019-03-06T15:56:58.000Z | 2022-02-01T12:36:01.000Z | from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
CONTEXT_ATTRIBUTE = getattr(settings, 'JSON_LD_CONTEXT_ATTRIBUTE', 'sd')
MODEL_ATTRIBUTE = getattr(settings, 'JSON_LD_MODEL_ATTRIBUTE', 'sd')
DEFAULT_CONTEXT = getattr(settings, 'JSON_LD_DEFAULT_CONTEXT', 'https://schema.org/')
DEFAULT_TYPE = getattr(settings, 'JSON_LD_DEFAULT_TYPE', 'Thing')
JSON_INDENT = getattr(settings, 'JSON_LD_INDENT', None)
GENERATE_URL = getattr(settings, 'JSON_LD_GENERATE_URL', True)
valid_empty_input_rendering_settings = [
'strict', 'silent', 'generate_thing'
]
EMPTY_INPUT_RENDERING = getattr(settings, 'JSON_LD_EMPTY_INPUT_RENDERING', 'strict')
err = ''
if EMPTY_INPUT_RENDERING not in valid_empty_input_rendering_settings:
err += 'Invalid value for JSON_LD_EMPTY_INPUT_RENDERING setting. '
err += 'Expected one of {}, but got "{}". '.format(
valid_empty_input_rendering_settings,
EMPTY_INPUT_RENDERING
)
if not (JSON_INDENT is None or isinstance(JSON_INDENT, int) and JSON_INDENT >= 0) :
err += 'Invalid value for JSON_LD_INDENT setting. '
err += 'Expected None or a non-negative integer. '
if err:
raise ImproperlyConfigured(err.strip())
| 37.151515 | 85 | 0.751223 | from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
CONTEXT_ATTRIBUTE = getattr(settings, 'JSON_LD_CONTEXT_ATTRIBUTE', 'sd')
MODEL_ATTRIBUTE = getattr(settings, 'JSON_LD_MODEL_ATTRIBUTE', 'sd')
DEFAULT_CONTEXT = getattr(settings, 'JSON_LD_DEFAULT_CONTEXT', 'https://schema.org/')
DEFAULT_TYPE = getattr(settings, 'JSON_LD_DEFAULT_TYPE', 'Thing')
JSON_INDENT = getattr(settings, 'JSON_LD_INDENT', None)
GENERATE_URL = getattr(settings, 'JSON_LD_GENERATE_URL', True)
valid_empty_input_rendering_settings = [
'strict', 'silent', 'generate_thing'
]
EMPTY_INPUT_RENDERING = getattr(settings, 'JSON_LD_EMPTY_INPUT_RENDERING', 'strict')
err = ''
if EMPTY_INPUT_RENDERING not in valid_empty_input_rendering_settings:
err += 'Invalid value for JSON_LD_EMPTY_INPUT_RENDERING setting. '
err += 'Expected one of {}, but got "{}". '.format(
valid_empty_input_rendering_settings,
EMPTY_INPUT_RENDERING
)
if not (JSON_INDENT is None or isinstance(JSON_INDENT, int) and JSON_INDENT >= 0) :
err += 'Invalid value for JSON_LD_INDENT setting. '
err += 'Expected None or a non-negative integer. '
if err:
raise ImproperlyConfigured(err.strip())
| 0 | 0 | 0 |
67218de130aabcee82d4268b9857f3ae44cdae89 | 596 | py | Python | NKUCodingCat/0019/0019.py | saurabh896/python-1 | f8d3aedf4c0fe6e24dfa3269ea7e642c9f7dd9b7 | [
"MIT"
] | 3,976 | 2015-01-01T15:49:39.000Z | 2022-03-31T03:47:56.000Z | NKUCodingCat/0019/0019.py | dwh65416396/python | 1a7e3edd1cd3422cc0eaa55471a0b42e004a9a1a | [
"MIT"
] | 97 | 2015-01-11T02:59:46.000Z | 2022-03-16T14:01:56.000Z | NKUCodingCat/0019/0019.py | dwh65416396/python | 1a7e3edd1cd3422cc0eaa55471a0b42e004a9a1a | [
"MIT"
] | 3,533 | 2015-01-01T06:19:30.000Z | 2022-03-28T13:14:54.000Z | #coding=utf-8
import xlrd, json, os
from lxml import etree
path = os.path.split(os.path.realpath(__file__))[0]+"/"
data = xlrd.open_workbook(path+"numbers.xls")
table = data.sheets()[0]
nrows = table.nrows
Dict = []
for i in range(nrows ):
Arr = table.row_values(i)
Dict.append(Arr)
root = etree.Element("root")
child1 = etree.SubElement(root, "numbers")
comm = etree.Comment(u"""数字信息""")
child1.append(comm)
child1.text =unicode(json.dumps(Dict).decode("utf-8"))
tree = etree.ElementTree(root)
tree.write(path+"numbers.xml ", pretty_print=True, xml_declaration=True, encoding='utf-8')
| 31.368421 | 94 | 0.711409 | #coding=utf-8
import xlrd, json, os
from lxml import etree
path = os.path.split(os.path.realpath(__file__))[0]+"/"
data = xlrd.open_workbook(path+"numbers.xls")
table = data.sheets()[0]
nrows = table.nrows
Dict = []
for i in range(nrows ):
Arr = table.row_values(i)
Dict.append(Arr)
root = etree.Element("root")
child1 = etree.SubElement(root, "numbers")
comm = etree.Comment(u"""数字信息""")
child1.append(comm)
child1.text =unicode(json.dumps(Dict).decode("utf-8"))
tree = etree.ElementTree(root)
tree.write(path+"numbers.xml ", pretty_print=True, xml_declaration=True, encoding='utf-8')
| 0 | 0 | 0 |
f0027c0975f7f4d7959a1a5711f73c1539d05796 | 25 | py | Python | C45Tree/__init__.py | ManuelFreytag/Algorithm_implementation | 380453c2bd4a66e8d604ecdf91c68cb1e14f6bb8 | [
"MIT"
] | 1 | 2018-07-31T08:29:11.000Z | 2018-07-31T08:29:11.000Z | C45Tree/__init__.py | ManuelFreytag/Algorithm_implementation | 380453c2bd4a66e8d604ecdf91c68cb1e14f6bb8 | [
"MIT"
] | null | null | null | C45Tree/__init__.py | ManuelFreytag/Algorithm_implementation | 380453c2bd4a66e8d604ecdf91c68cb1e14f6bb8 | [
"MIT"
] | null | null | null | __all__ = ["apply","fit"] | 25 | 25 | 0.6 | __all__ = ["apply","fit"] | 0 | 0 | 0 |
2e367445be85ffe4119018eae20af4c01503bddf | 49 | py | Python | torch2cmsis/__init__.py | BCJuan/torch2cmsis | 476555968b3cbc8381f56480413be8957debaa66 | [
"Apache-2.0"
] | 19 | 2020-11-15T09:40:05.000Z | 2022-03-24T15:21:30.000Z | torch2cmsis/__init__.py | BCJuan/torch2cmsis | 476555968b3cbc8381f56480413be8957debaa66 | [
"Apache-2.0"
] | 1 | 2021-07-02T01:01:52.000Z | 2021-07-02T01:01:52.000Z | torch2cmsis/__init__.py | BCJuan/torch2cmsis | 476555968b3cbc8381f56480413be8957debaa66 | [
"Apache-2.0"
] | 4 | 2021-08-25T08:22:10.000Z | 2022-01-11T03:26:13.000Z | from torch2cmsis.converter import CMSISConverter
| 24.5 | 48 | 0.897959 | from torch2cmsis.converter import CMSISConverter
| 0 | 0 | 0 |
fcbb95f8d8cc7e26f00e0fbdea6a2d92f34d2388 | 69 | py | Python | tests/__init__.py | nitesh201/xaitk-saliency | ee86cd3c63bd3b25cad3dc06e5e124ba825190d4 | [
"BSD-3-Clause"
] | null | null | null | tests/__init__.py | nitesh201/xaitk-saliency | ee86cd3c63bd3b25cad3dc06e5e124ba825190d4 | [
"BSD-3-Clause"
] | null | null | null | tests/__init__.py | nitesh201/xaitk-saliency | ee86cd3c63bd3b25cad3dc06e5e124ba825190d4 | [
"BSD-3-Clause"
] | null | null | null | from pathlib import Path
DATA_DIR = Path(__file__).parent / "data"
| 13.8 | 41 | 0.73913 | from pathlib import Path
DATA_DIR = Path(__file__).parent / "data"
| 0 | 0 | 0 |
b4d282088f0c58f6f9bcfce3af81c5a23ea4fdda | 10,617 | py | Python | models/Densenet.py | HotaekHan/FCOS | 8e3a0438cf1a53f8916d21ea81d892b260c100a9 | [
"Apache-2.0"
] | null | null | null | models/Densenet.py | HotaekHan/FCOS | 8e3a0438cf1a53f8916d21ea81d892b260c100a9 | [
"Apache-2.0"
] | null | null | null | models/Densenet.py | HotaekHan/FCOS | 8e3a0438cf1a53f8916d21ea81d892b260c100a9 | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from models.SEBlock import SELayer
if __name__ == '__main__':
test() | 40.678161 | 128 | 0.638222 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from models.SEBlock import SELayer
def _bn_function_factory(norm, conv, relu):
def bn_function(inputs):
output = conv(relu(norm(inputs)))
return output
return bn_function
class DenseBlock_B(nn.Module):
expansion = 4
def __init__(self, kernel_size, input_channels, output_channels, efficient, use_se):
super(DenseBlock_B, self).__init__()
self.kernel_size = kernel_size
self.input_channels = input_channels
self.output_channels = output_channels
self.efficient = efficient
self.batch_norm1 = nn.BatchNorm2d(num_features=self.input_channels, momentum=0.01)
self.conv1 = nn.Conv2d(in_channels=self.input_channels, out_channels=self.output_channels * self.expansion,
kernel_size=(1, 1), stride=(1, 1), padding=(0, 0), bias=False)
self.relu1 = nn.ReLU(inplace=True)
self.bn_function = _bn_function_factory(self.batch_norm1, self.conv1, self.relu1)
self.batch_norm2 = nn.BatchNorm2d(num_features=self.output_channels * self.expansion, momentum=0.01)
self.conv2 = nn.Conv2d(in_channels=self.output_channels * self.expansion, out_channels=self.output_channels,
kernel_size=self.kernel_size, stride=(1, 1), padding=(1, 1), bias=False)
self.relu2 = nn.ReLU(inplace=True)
self.use_se = use_se
if self.use_se:
self.se_block = SELayer(n_channel=self.output_channels, reduction=5)
def forward(self, inputs):
if self.efficient:
out = cp.checkpoint(self.bn_function, inputs)
else:
out = self.bn_function(inputs)
out = self.conv2(self.relu2(self.batch_norm2(out)))
if self.use_se:
out = self.se_block(out)
out = torch.cat((inputs, out), dim=1)
return out
class TransitionBlock(nn.Module):
def __init__(self, input_channels, strides, padding, theta=1.0):
super(TransitionBlock, self).__init__()
self.input_channels = input_channels
output_channels = int(input_channels * theta)
self.output_channels = output_channels
self.strides = strides
self.padding = padding
self.batch_norm1 = nn.BatchNorm2d(num_features=self.input_channels, momentum=0.01)
self.conv1 = nn.Conv2d(in_channels=self.input_channels, out_channels=self.output_channels,
kernel_size=(1, 1), stride=(1, 1), padding=(0, 0), bias=False)
self.pool1 = nn.AvgPool2d(kernel_size=(3, 3), stride=self.strides, padding=self.padding)
self.relu1 = nn.ReLU(inplace=True)
def forward(self, inputs):
out = self.conv1(self.relu1(self.batch_norm1(inputs)))
out = self.pool1(out)
return out
class DenseNet_BC(nn.Module):
def __init__(self, num_dense_blocks, k=40, theta=1.0, efficient=False, use_se=False):
super(DenseNet_BC, self).__init__()
self.k = k
self.theta = theta
self.efficient = efficient
self.use_se = use_se
self.output_dims = 256
out_channels = 2 * self.k
self.conv1 = nn.Conv2d(in_channels=3, out_channels=out_channels,
kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
num_blocks = num_dense_blocks[0]
self.dense1 = self._make_blocks(DenseBlock_B, num_blocks,
kernel_size=(3, 3), input_channels=out_channels, output_channels=self.k,
efficient=self.efficient, use_se=self.use_se)
block1_out_channels = out_channels + (num_blocks * self.k)
self.transition1 = TransitionBlock(input_channels=block1_out_channels, strides=(1, 1), padding=(1, 1), theta=self.theta)
block1_out_channels = int(block1_out_channels * self.theta)
num_blocks = num_dense_blocks[1]
self.dense2 = self._make_blocks(DenseBlock_B, num_blocks,
kernel_size=(3, 3), input_channels=block1_out_channels, output_channels=self.k,
efficient=self.efficient, use_se=self.use_se)
block2_out_channels = block1_out_channels + (num_blocks * self.k)
self.transition2 = TransitionBlock(input_channels=block2_out_channels, strides=(2, 2), padding=(1, 1), theta=self.theta)
block2_out_channels = int(block2_out_channels * self.theta)
num_blocks = num_dense_blocks[2]
self.dense3 = self._make_blocks(DenseBlock_B, num_blocks,
kernel_size=(3, 3), input_channels=block2_out_channels, output_channels=self.k,
efficient=self.efficient, use_se=self.use_se)
block3_out_channels = block2_out_channels + (num_blocks * self.k)
self.transition3 = TransitionBlock(input_channels=block3_out_channels, strides=(2, 2), padding=(1, 1), theta=self.theta)
block3_out_channels = int(block3_out_channels * self.theta)
num_blocks = num_dense_blocks[3]
self.dense4 = self._make_blocks(DenseBlock_B, num_blocks,
kernel_size=(3, 3), input_channels=block3_out_channels, output_channels=self.k,
efficient=self.efficient, use_se=self.use_se)
block4_out_channels = block3_out_channels + (num_blocks * self.k)
self.transition4 = TransitionBlock(input_channels=block4_out_channels, strides=(2, 2), padding=(1, 1), theta=self.theta)
block4_out_channels = int(block4_out_channels * self.theta)
self.conv6 = nn.Conv2d(block4_out_channels, self.output_dims, kernel_size=3, stride=2, padding=1)
self.conv7 = nn.Conv2d(self.output_dims, self.output_dims, kernel_size=3, stride=2, padding=1)
# Lateral layers
self.latlayer1 = nn.Conv2d(block4_out_channels, self.output_dims, kernel_size=1, stride=1, padding=0)
self.latlayer2 = nn.Conv2d(block3_out_channels, self.output_dims, kernel_size=1, stride=1, padding=0)
self.latlayer3 = nn.Conv2d(block2_out_channels, self.output_dims, kernel_size=1, stride=1, padding=0)
# Top-down layers
self.toplayer1 = nn.Conv2d(self.output_dims, self.output_dims, kernel_size=3, stride=1, padding=1)
self.toplayer2 = nn.Conv2d(self.output_dims, self.output_dims, kernel_size=3, stride=1, padding=1)
def _make_blocks(self, block, num_block, kernel_size, input_channels, output_channels, efficient, use_se):
blocks = list()
for iter_block in range(1, num_block+1):
blocks.append(block(kernel_size, input_channels, output_channels, efficient, use_se))
input_channels = input_channels + output_channels
return nn.Sequential(*blocks)
def _upsample_add(self, x, y):
'''Upsample and add two feature maps.
Args:
x: (Variable) top feature map to be upsampled.
y: (Variable) lateral feature map.
Returns:
(Variable) added feature map.
Note in PyTorch, when input size is odd, the upsampled feature map
with `F.upsample(..., scale_factor=2, mode='nearest')`
maybe not equal to the lateral feature map size.
e.g.
original input size: [N,_,15,15] ->
conv2d feature map size: [N,_,8,8] ->
upsampled feature map size: [N,_,16,16]
So we choose bilinear upsample which supports arbitrary output sizes.
'''
_, _, height, width = y.size()
return nn.functional.interpolate(input=x, size=(height, width), mode='nearest') + y
def forward(self, inputs):
# Bottom-up
c1 = F.relu(self.bn1(self.conv1(inputs)))
c1 = F.max_pool2d(c1, kernel_size=3, stride=2, padding=1)
c2 = self.dense1(c1)
c2 = self.transition1(c2)
c3 = self.dense2(c2)
c3 = self.transition2(c3)
c4 = self.dense3(c3)
c4 = self.transition3(c4)
c5 = self.dense4(c4)
c5 = self.transition4(c5)
p6 = self.conv6(c5)
p7 = self.conv7(F.relu(p6))
# Top-down
p5 = self.latlayer1(c5)
p4 = self._upsample_add(p5, self.latlayer2(c4))
p4 = self.toplayer1(p4)
p3 = self._upsample_add(p4, self.latlayer3(c3))
p3 = self.toplayer2(p3)
return p3, p4, p5, p6, p7
def DenseFPN62(use_se=False, efficient=False):
densenet = DenseNet_BC(num_dense_blocks=[3, 6, 12, 8], k=40, theta=1.0, efficient=efficient, use_se=use_se)
for m in densenet.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, mean=0, std=0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
return densenet
def DenseFPN102(use_se=False, efficient=False):
densenet = DenseNet_BC(num_dense_blocks=[3, 6, 24, 16], k=40, theta=1.0, efficient=efficient, use_se=use_se)
for m in densenet.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, mean=0, std=0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
return densenet
def DenseFPN201(use_se=False, efficient=False):
densenet = DenseNet_BC(num_dense_blocks=[6, 12, 48, 32], k=40, theta=1.0, efficient=efficient, use_se=use_se)
for m in densenet.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, mean=0, std=0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
return densenet
def DenseFPN264(use_se=False, efficient=False):
densenet = DenseNet_BC(num_dense_blocks=[6, 12, 64, 48], k=40, theta=1.0, efficient=efficient, use_se=use_se)
for m in densenet.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, mean=0, std=0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
return densenet
def test():
net = DenseFPN102(use_se=True, efficient=True)
net = net.cuda()
num_parameters = 0.
for param in net.parameters():
sizes = param.size()
num_layer_param = 1.
for size in sizes:
num_layer_param *= size
num_parameters += num_layer_param
print(net)
print("num. of parameters : " + str(num_parameters))
tmp = torch.randn(1, 3, 640, 640)
tmp = tmp.cuda()
fms = net(tmp)
for fm in fms:
print(fm.size())
if __name__ == '__main__':
test() | 9,165 | 1,009 | 260 |
0ab15742a4a97501df61f49b81a9828df61a969b | 4,411 | py | Python | xicam/plugins/hiprmc/obsolete/RmcView.py | ronpandolfi/Xi-cam | f6388c7c44b202403194a04ca2b4d8bcca41cc74 | [
"BSD-3-Clause-LBNL"
] | 19 | 2016-11-18T18:20:36.000Z | 2020-03-06T09:04:25.000Z | xicam/plugins/hiprmc/obsolete/RmcView.py | ronpandolfi/Xi-cam | f6388c7c44b202403194a04ca2b4d8bcca41cc74 | [
"BSD-3-Clause-LBNL"
] | 29 | 2016-10-27T17:43:01.000Z | 2018-07-09T03:06:54.000Z | xicam/plugins/hiprmc/obsolete/RmcView.py | ronpandolfi/Xi-cam | f6388c7c44b202403194a04ca2b4d8bcca41cc74 | [
"BSD-3-Clause-LBNL"
] | 10 | 2017-01-27T00:40:27.000Z | 2019-11-12T15:23:51.000Z | import numpy as np # Import important packages
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph as pg
import glob
from PIL import Image
import os
import re
def calcscale(imv): # Defines calcscale function
"""
"""
image = imv.getProcessedImage()
scale = imv.scalemax / float(image[imv.currentIndex].shape[1])
return scale
if __name__ == '__main__': # Start Qt event loop unless running in interactive mode.
import sys
app = QtGui.QApplication([]) # Launches an app
root = '/Users/holden'
win = QtGui.QMainWindow() # Create window with two ImageView widgets
win.resize(800, 800)
win.setWindowTitle('pyqtgraph example: Hiprmc ')
win.setCentralWidget(rmcView(root,0.111))
win.show()
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
| 30.42069 | 136 | 0.599637 | import numpy as np # Import important packages
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph as pg
import glob
from PIL import Image
import os
import re
def calcscale(imv): # Defines calcscale function
"""
"""
image = imv.getProcessedImage()
scale = imv.scalemax / float(image[imv.currentIndex].shape[1])
return scale
class imagetimeline(list): # Sets up the image so it will fin the the viewer
@property
def shape(self): # Defines shape function
return (len(self), self[-1].shape[0], self[-1].shape[0])
def __getitem__(self, item): # Defines getitem function
return list.__getitem__(self, item)
@property
def ndim(self): # Defines ndim functionq
return 3
@property
def size(self): # Defines size functon
return sum(map(np.size, self))
@property
def max(self):
return max(map(np.max, self))
@property
def min(self):
return min(map(np.min, self))
@property
def dtype(self):
return type(self[0][0, 0])
class TimelineView(pg.ImageView): # Beginnings the class Timelineview
def __init__(self, scalemax, *args, **kwargs):
super(TimelineView, self).__init__(*args, **kwargs)
self.scalemax = scalemax
def quickMinMax(self, data): # Defines quickMinMax functon
return min(map(np.min, data)), max(map(np.max, data))
def updateImage(self, autoHistogramRange=True): # Defines updateImage functon
if self.image is None:
return
scale = calcscale(self) # Scales the image
image = self.getProcessedImage()
if autoHistogramRange: # Sets the Y axis intensity bar
self.ui.histogram.setHistogramRange(self.levelMin, self.levelMax)
if self.axes['t'] is None:
self.imageItem.updateImage(image)
else:
self.ui.roiPlot.show()
self.imageItem.updateImage(image[self.currentIndex])
self.imageItem.resetTransform() # Resets the scale up below
self.imageItem.scale(scale, scale) # Scales up by the factor of scale
print 'Image shape' + str(image.shape)
print 'Scale set to: ' + str(scale)
class rmcView(QtGui.QTabWidget):
def __init__(self, root, loadingfactors=None):
super(rmcView, self).__init__()
paths = glob.glob(os.path.join(root,
'[0-9][0-9][0-9][0-9]_[0-9][0-9][0-9][0-9]_[0-9][0-9][0-9][0-9]_[0-9][0-9][0-9][0-9]_model.tif'))
indices = dict(zip(paths, [re.findall('\d{4}', os.path.basename(path)) for path in paths]))
tiles = dict()
for path, ind in indices.iteritems():
if int(ind[1]) in tiles:
tiles[int(ind[1])].append(path)
else:
tiles[int(ind[1])] = [path]
for tile, loadingfactor in zip(tiles, loadingfactors):
images = []
paths = sorted(tiles[tile])
for path in paths:
img = Image.open(path).convert('L')
img = np.array(img)
print path # Prints the path
print img.shape # Prints the shape of the array
images.append(img)
data = imagetimeline(images)
sizemax = max(map(np.shape, data))[0]
view = TimelineView(sizemax)
view.setImage(data)
scale = calcscale(view) # Sets up the scale
view.imageItem.resetTransform()
view.imageItem.scale(scale, scale)
view.autoRange()
view.getHistogramWidget().setHidden(True)
view.ui.roiBtn.setHidden(True)
view.ui.menuBtn.setHidden(True)
if loadingfactors is None:
self.addTab(view, u"Tile " + str(tile + 1))
else:
self.addTab(view, str(loadingfactor))
if __name__ == '__main__': # Start Qt event loop unless running in interactive mode.
import sys
app = QtGui.QApplication([]) # Launches an app
root = '/Users/holden'
win = QtGui.QMainWindow() # Create window with two ImageView widgets
win.resize(800, 800)
win.setWindowTitle('pyqtgraph example: Hiprmc ')
win.setCentralWidget(rmcView(root,0.111))
win.show()
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
| 2,968 | 389 | 175 |
cbc6b7a8d85459b043e4cce6a13ed6449cfa1f37 | 890 | py | Python | backend/users/urls.py | RA-MPR/mpr | a2e6f320af916d318da7c68c0764662c3d146974 | [
"MIT"
] | null | null | null | backend/users/urls.py | RA-MPR/mpr | a2e6f320af916d318da7c68c0764662c3d146974 | [
"MIT"
] | 91 | 2021-02-24T08:25:47.000Z | 2021-05-05T10:14:21.000Z | backend/users/urls.py | RA-MPR/mpr | a2e6f320af916d318da7c68c0764662c3d146974 | [
"MIT"
] | 1 | 2022-01-07T14:56:34.000Z | 2022-01-07T14:56:34.000Z | from django.urls import re_path, path
from rest_framework.authtoken.views import obtain_auth_token
from users.views import UserOrderView, UserCreateView, UserView, UserEventView, UserCompanyView, UserAdminView, UserContactView, UserIdCompany
urlpatterns = [
re_path(r"^$", UserView.as_view(), name="user"),
path(r"<id>", UserIdCompany.as_view(), name="user_id_company"),
re_path(r"^register/", UserCreateView.as_view(), name="user_registration"),
re_path(r"^login/", obtain_auth_token, name="user_login"),
re_path(r"^admin/", UserAdminView.as_view(), name="user_admin"),
re_path(r"^events/", UserEventView.as_view(), name="user_event"),
re_path(r"^companies/", UserCompanyView.as_view(), name="user_company"),
re_path(r"^contacts/", UserContactView.as_view(), name="user_contacts"),
re_path(r"^orders/", UserOrderView.as_view(), name="user_orders")
]
| 52.352941 | 142 | 0.734831 | from django.urls import re_path, path
from rest_framework.authtoken.views import obtain_auth_token
from users.views import UserOrderView, UserCreateView, UserView, UserEventView, UserCompanyView, UserAdminView, UserContactView, UserIdCompany
urlpatterns = [
re_path(r"^$", UserView.as_view(), name="user"),
path(r"<id>", UserIdCompany.as_view(), name="user_id_company"),
re_path(r"^register/", UserCreateView.as_view(), name="user_registration"),
re_path(r"^login/", obtain_auth_token, name="user_login"),
re_path(r"^admin/", UserAdminView.as_view(), name="user_admin"),
re_path(r"^events/", UserEventView.as_view(), name="user_event"),
re_path(r"^companies/", UserCompanyView.as_view(), name="user_company"),
re_path(r"^contacts/", UserContactView.as_view(), name="user_contacts"),
re_path(r"^orders/", UserOrderView.as_view(), name="user_orders")
]
| 0 | 0 | 0 |
4a51109b44e90147f8d93e2e7cda32b046f5fb99 | 6,871 | py | Python | xgbo/xgb_callbacks.py | orielkiss/xgbo | bc8fc4be4a3603e284044546d7b5455f02f5a8b2 | [
"MIT"
] | null | null | null | xgbo/xgb_callbacks.py | orielkiss/xgbo | bc8fc4be4a3603e284044546d7b5455f02f5a8b2 | [
"MIT"
] | null | null | null | xgbo/xgb_callbacks.py | orielkiss/xgbo | bc8fc4be4a3603e284044546d7b5455f02f5a8b2 | [
"MIT"
] | null | null | null | import time
import xgboost as xgb
from xgboost import rabit
def _fmt_metric(value, show_stdv=True):
"""format metric string"""
if len(value) == 2:
return '%s:%g' % (value[0], value[1])
elif len(value) == 3:
if show_stdv:
return '%s:%g+%g' % (value[0], value[1], value[2])
else:
return '%s:%g' % (value[0], value[1])
else:
raise ValueError("wrong metric value")
# Modification of the official early_stop callback to only trigger it from the nth round on
def early_stop(stopping_rounds, start_round=0, maximize=False, verbose=True, eval_idx=-1):
"""Create a callback that activates early stoppping.
Validation error needs to decrease at least
every **stopping_rounds** round(s) to continue training.
Requires at least one item in **evals**.
If there's more than one, will use the last.
Returns the model from the last iteration (not the best one).
If early stopping occurs, the model will have three additional fields:
``bst.best_score``, ``bst.best_iteration`` and ``bst.best_ntree_limit``.
(Use ``bst.best_ntree_limit`` to get the correct value if ``num_parallel_tree``
and/or ``num_class`` appears in the parameters)
Parameters
----------
stopp_rounds : int
The stopping rounds before the trend occur.
maximize : bool
Whether to maximize evaluation metric.
verbose : optional, bool
Whether to print message about early stopping information.
Returns
-------
callback : function
The requested callback function.
"""
state = {}
def init(env):
"""internal function"""
bst = env.model
if len(env.evaluation_result_list) == 0:
raise ValueError('For early stopping you need at least one set in evals.')
if len(env.evaluation_result_list) > 1 and verbose:
msg = ("Multiple eval metrics have been passed: "
"'{0}' will be used for early stopping.\n\n")
rabit.tracker_print(msg.format(env.evaluation_result_list[eval_idx][0]))
maximize_metrics = ('auc', 'map', 'ndcg')
maximize_at_n_metrics = ('auc@', 'map@', 'ndcg@')
maximize_score = maximize
metric_label = env.evaluation_result_list[eval_idx][0]
metric = metric_label.split('-', 1)[-1]
if any(metric.startswith(x) for x in maximize_at_n_metrics):
maximize_score = True
if any(metric.split(":")[0] == x for x in maximize_metrics):
maximize_score = True
if verbose and env.rank == 0:
msg = "Will train until {} hasn't improved in {} rounds.\n"
rabit.tracker_print(msg.format(metric_label, stopping_rounds))
state['maximize_score'] = maximize_score
state['best_iteration'] = 0
if maximize_score:
state['best_score'] = float('-inf')
else:
state['best_score'] = float('inf')
if bst is not None:
if bst.attr('best_score') is not None:
state['best_score'] = float(bst.attr('best_score'))
state['best_iteration'] = int(bst.attr('best_iteration'))
state['best_msg'] = bst.attr('best_msg')
else:
bst.set_attr(best_iteration=str(state['best_iteration']))
bst.set_attr(best_score=str(state['best_score']))
else:
assert env.cvfolds is not None
def callback(env):
"""internal function"""
if env.iteration < start_round:
return
score = env.evaluation_result_list[eval_idx][1]
if len(state) == 0:
init(env)
best_score = state['best_score']
best_iteration = state['best_iteration']
maximize_score = state['maximize_score']
if (maximize_score and score > best_score) or \
(not maximize_score and score < best_score):
msg = '[%d]\t%s' % (
env.iteration,
'\t'.join([_fmt_metric(x) for x in env.evaluation_result_list]))
state['best_msg'] = msg
state['best_score'] = score
state['best_iteration'] = env.iteration
# save the property to attributes, so they will occur in checkpoint.
if env.model is not None:
env.model.set_attr(best_score=str(state['best_score']),
best_iteration=str(state['best_iteration']),
best_msg=state['best_msg'])
elif env.iteration - best_iteration >= stopping_rounds:
best_msg = state['best_msg']
if verbose and env.rank == 0:
msg = "Stopping. Best iteration:\n{}\n\n"
rabit.tracker_print(msg.format(best_msg))
raise xgb.core.EarlyStopException(best_iteration)
return callback
| 37.140541 | 91 | 0.605589 | import time
import xgboost as xgb
from xgboost import rabit
def callback_overtraining(best_test_auc, callback_status):
def callback(env):
train_auc = env.evaluation_result_list[0][1]
test_auc = env.evaluation_result_list[1][1]
if train_auc < best_test_auc:
return
if train_auc - test_auc > 1 - best_test_auc:
print("We have an overtraining problem! Stop boosting.")
callback_status["status"] = 2
raise xgb.core.EarlyStopException(env.iteration)
return callback
def callback_timeout(max_time, best_test_auc, callback_status, n_fit=10):
start_time = time.time()
last_n_times = []
last_n_test_auc = []
status = {'counter': 0}
def callback(env):
if max_time == None:
return
run_time = time.time() - start_time
if run_time > max_time:
callback_status["status"] = 3
raise xgb.core.EarlyStopException(env.iteration)
print("Xgboost training took too long. Stop boosting.")
raise xgb.core.EarlyStopException(env.iteration)
last_n_test_auc.append(env.evaluation_result_list[1][1])
if len(last_n_test_auc) > n_fit:
del last_n_test_auc[0]
last_n_times.append(run_time)
if len(last_n_times) > n_fit:
del last_n_times[0]
if len(last_n_test_auc) < n_fit:
return
poly = np.polyfit(last_n_times, last_n_test_auc, deg=1)
guessed_test_auc_at_max_time = np.polyval(poly, max_time)
if guessed_test_auc_at_max_time < best_test_auc and best_test_auc > 0.0:
status['counter'] = status['counter'] + 1
else:
status['counter'] = 0
if status['counter'] == n_fit:
callback_status["status"] = 2
raise xgb.core.EarlyStopException(env.iteration)
print("Test AUC does not converge well. Stop boosting.")
raise xgb.core.EarlyStopException(env.iteration)
return callback
def _fmt_metric(value, show_stdv=True):
"""format metric string"""
if len(value) == 2:
return '%s:%g' % (value[0], value[1])
elif len(value) == 3:
if show_stdv:
return '%s:%g+%g' % (value[0], value[1], value[2])
else:
return '%s:%g' % (value[0], value[1])
else:
raise ValueError("wrong metric value")
# Modification of the official early_stop callback to only trigger it from the nth round on
def early_stop(stopping_rounds, start_round=0, maximize=False, verbose=True, eval_idx=-1):
"""Create a callback that activates early stoppping.
Validation error needs to decrease at least
every **stopping_rounds** round(s) to continue training.
Requires at least one item in **evals**.
If there's more than one, will use the last.
Returns the model from the last iteration (not the best one).
If early stopping occurs, the model will have three additional fields:
``bst.best_score``, ``bst.best_iteration`` and ``bst.best_ntree_limit``.
(Use ``bst.best_ntree_limit`` to get the correct value if ``num_parallel_tree``
and/or ``num_class`` appears in the parameters)
Parameters
----------
stopp_rounds : int
The stopping rounds before the trend occur.
maximize : bool
Whether to maximize evaluation metric.
verbose : optional, bool
Whether to print message about early stopping information.
Returns
-------
callback : function
The requested callback function.
"""
state = {}
def init(env):
"""internal function"""
bst = env.model
if len(env.evaluation_result_list) == 0:
raise ValueError('For early stopping you need at least one set in evals.')
if len(env.evaluation_result_list) > 1 and verbose:
msg = ("Multiple eval metrics have been passed: "
"'{0}' will be used for early stopping.\n\n")
rabit.tracker_print(msg.format(env.evaluation_result_list[eval_idx][0]))
maximize_metrics = ('auc', 'map', 'ndcg')
maximize_at_n_metrics = ('auc@', 'map@', 'ndcg@')
maximize_score = maximize
metric_label = env.evaluation_result_list[eval_idx][0]
metric = metric_label.split('-', 1)[-1]
if any(metric.startswith(x) for x in maximize_at_n_metrics):
maximize_score = True
if any(metric.split(":")[0] == x for x in maximize_metrics):
maximize_score = True
if verbose and env.rank == 0:
msg = "Will train until {} hasn't improved in {} rounds.\n"
rabit.tracker_print(msg.format(metric_label, stopping_rounds))
state['maximize_score'] = maximize_score
state['best_iteration'] = 0
if maximize_score:
state['best_score'] = float('-inf')
else:
state['best_score'] = float('inf')
if bst is not None:
if bst.attr('best_score') is not None:
state['best_score'] = float(bst.attr('best_score'))
state['best_iteration'] = int(bst.attr('best_iteration'))
state['best_msg'] = bst.attr('best_msg')
else:
bst.set_attr(best_iteration=str(state['best_iteration']))
bst.set_attr(best_score=str(state['best_score']))
else:
assert env.cvfolds is not None
def callback(env):
"""internal function"""
if env.iteration < start_round:
return
score = env.evaluation_result_list[eval_idx][1]
if len(state) == 0:
init(env)
best_score = state['best_score']
best_iteration = state['best_iteration']
maximize_score = state['maximize_score']
if (maximize_score and score > best_score) or \
(not maximize_score and score < best_score):
msg = '[%d]\t%s' % (
env.iteration,
'\t'.join([_fmt_metric(x) for x in env.evaluation_result_list]))
state['best_msg'] = msg
state['best_score'] = score
state['best_iteration'] = env.iteration
# save the property to attributes, so they will occur in checkpoint.
if env.model is not None:
env.model.set_attr(best_score=str(state['best_score']),
best_iteration=str(state['best_iteration']),
best_msg=state['best_msg'])
elif env.iteration - best_iteration >= stopping_rounds:
best_msg = state['best_msg']
if verbose and env.rank == 0:
msg = "Stopping. Best iteration:\n{}\n\n"
rabit.tracker_print(msg.format(best_msg))
raise xgb.core.EarlyStopException(best_iteration)
return callback
| 1,935 | 0 | 46 |
40643836f25d226dbb0944acf6e0241474e172eb | 822 | py | Python | temp-snn/snn/rl.py | Pandinosaurus/Spiking-Neural-Network | de3e24da2806f0a7006b37f395ed055497727ae6 | [
"Apache-2.0"
] | 755 | 2016-09-12T13:42:51.000Z | 2022-03-29T19:11:44.000Z | temp-snn/snn/rl.py | mariusionescu/Spiking-Neural-Network | de3e24da2806f0a7006b37f395ed055497727ae6 | [
"Apache-2.0"
] | 19 | 2017-11-03T06:03:39.000Z | 2022-03-08T08:33:05.000Z | temp-snn/snn/rl.py | Smirenost/Spiking-Neural-Network | de3e24da2806f0a7006b37f395ed055497727ae6 | [
"Apache-2.0"
] | 258 | 2017-03-30T13:40:31.000Z | 2022-03-15T20:32:14.000Z | ########################################################## README ###########################################################
# This file implements STDP curve and weight update rule
##############################################################################################################################
import numpy as np
from matplotlib import pyplot as plt
from parameters import param as par
#STDP reinforcement learning curve
#STDP weight update rule
if __name__ == '__main__':
print(rl(-20)*par.sigma)
| 24.909091 | 126 | 0.502433 | ########################################################## README ###########################################################
# This file implements STDP curve and weight update rule
##############################################################################################################################
import numpy as np
from matplotlib import pyplot as plt
from parameters import param as par
#STDP reinforcement learning curve
def rl(t):
if t>0:
return -par.A_plus*np.exp(-float(t)/par.tau_plus)
if t<=0:
return par.A_minus*np.exp(float(t)/par.tau_minus)
#STDP weight update rule
def update(w, del_w):
if del_w<0:
return w + par.sigma*del_w*(w-abs(par.w_min))*par.scale
elif del_w>0:
return w + par.sigma*del_w*(par.w_max-w)*par.scale
if __name__ == '__main__':
print(rl(-20)*par.sigma)
| 252 | 0 | 44 |
514b82b6b15aa1634c62e6ea3b8fa866dd59c1ab | 2,235 | py | Python | test/sink/test_sink_config_validator.py | daisuke-fujita/monsaca-analytics_20181107 | 5809e66874d76bd9f102e7694197bd849210fa3b | [
"Apache-2.0"
] | 1 | 2021-03-19T04:09:04.000Z | 2021-03-19T04:09:04.000Z | test/sink/test_sink_config_validator.py | daisuke-fujita/monsaca-analytics_20181107 | 5809e66874d76bd9f102e7694197bd849210fa3b | [
"Apache-2.0"
] | 1 | 2019-01-21T09:44:29.000Z | 2019-01-21T09:44:29.000Z | test/sink/test_sink_config_validator.py | daisuke-fujita/monsaca-analytics_20181107 | 5809e66874d76bd9f102e7694197bd849210fa3b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2016 Hewlett Packard Enterprise Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
import voluptuous
import monasca_analytics.sink.sink_config_validator as validator
kafka = validator.validate_kafka_sink_config
if __name__ == "__main__":
unittest.main()
| 33.863636 | 75 | 0.710962 | #!/usr/bin/env python
# Copyright (c) 2016 Hewlett Packard Enterprise Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
import voluptuous
import monasca_analytics.sink.sink_config_validator as validator
kafka = validator.validate_kafka_sink_config
class SinkConfigValidatorTest(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self._valid_config = {
"module": "KafkaSink",
"host": "127.0.0.1",
"port": 9092,
"topic": "transformed_data"
}
def test_validate_kafka_sink_valid_config(self):
try:
kafka(self._valid_config)
except voluptuous.Invalid as e:
self.fail(e.__str__())
def test_validate_kafka_sink_invalid_module(self):
invalid_config = self._valid_config
invalid_config["module"] = "invalid_module"
self.assertRaises(voluptuous.Invalid, kafka, invalid_config)
def test_validate_kafka_sink_invalid_host(self):
invalid_config = self._valid_config
invalid_config["host"] = "invalid host"
self.assertRaises(voluptuous.Invalid, kafka, invalid_config)
def test_validate_kafka_sink_invalid_port(self):
invalid_config = self._valid_config
invalid_config["port"] = "invalid_port"
self.assertRaises(voluptuous.Invalid, kafka, invalid_config)
def test_validate_kafka_sink_invalid_topic(self):
invalid_config = self._valid_config
invalid_config["topic"] = "invalid topic"
self.assertRaises(voluptuous.Invalid, kafka, invalid_config)
def tearDown(self):
unittest.TestCase.tearDown(self)
if __name__ == "__main__":
unittest.main()
| 1,161 | 28 | 211 |
c51c43cb9b957f13e05a979b07c4f245cd5c7e86 | 16,462 | py | Python | unit_05/design4.py | janusnic/21v-pyqt | 8ee3828e1c6e6259367d6cedbd63b9057cf52c24 | [
"MIT"
] | null | null | null | unit_05/design4.py | janusnic/21v-pyqt | 8ee3828e1c6e6259367d6cedbd63b9057cf52c24 | [
"MIT"
] | null | null | null | unit_05/design4.py | janusnic/21v-pyqt | 8ee3828e1c6e6259367d6cedbd63b9057cf52c24 | [
"MIT"
] | 2 | 2019-11-14T15:04:22.000Z | 2021-10-31T07:34:46.000Z | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'notepad4.ui'
#
# Created: Wed Dec 9 14:01:42 2015
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
try:
_encoding = QtGui.QApplication.UnicodeUTF8
except AttributeError:
import pad_rc
| 57.559441 | 114 | 0.701373 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'notepad4.ui'
#
# Created: Wed Dec 9 14:01:42 2015
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(1022, 544)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.textEdit = QtGui.QTextEdit(self.centralwidget)
self.textEdit.setGeometry(QtCore.QRect(10, 0, 1001, 441))
self.textEdit.setObjectName(_fromUtf8("textEdit"))
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1022, 27))
self.menubar.setObjectName(_fromUtf8("menubar"))
self.menu_File = QtGui.QMenu(self.menubar)
self.menu_File.setObjectName(_fromUtf8("menu_File"))
self.menuEdit = QtGui.QMenu(self.menubar)
self.menuEdit.setObjectName(_fromUtf8("menuEdit"))
self.menuView = QtGui.QMenu(self.menubar)
self.menuView.setObjectName(_fromUtf8("menuView"))
self.menuHelp = QtGui.QMenu(self.menubar)
self.menuHelp.setObjectName(_fromUtf8("menuHelp"))
self.menuFormat = QtGui.QMenu(self.menubar)
self.menuFormat.setObjectName(_fromUtf8("menuFormat"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.toolbar = QtGui.QToolBar(MainWindow)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Noto Sans [unknown]"))
font.setPointSize(14)
self.toolbar.setFont(font)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8("icons/new.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolbar.setProperty("new", icon)
self.toolbar.setObjectName(_fromUtf8("toolbar"))
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolbar)
MainWindow.insertToolBarBreak(self.toolbar)
self.formatbar = QtGui.QToolBar(MainWindow)
self.formatbar.setObjectName(_fromUtf8("formatbar"))
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.formatbar)
self.closeAction = QtGui.QAction(MainWindow)
self.closeAction.setObjectName(_fromUtf8("closeAction"))
self.newAction = QtGui.QAction(MainWindow)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/new.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.newAction.setIcon(icon1)
self.newAction.setObjectName(_fromUtf8("newAction"))
self.openAction = QtGui.QAction(MainWindow)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/open.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.openAction.setIcon(icon2)
self.openAction.setObjectName(_fromUtf8("openAction"))
self.saveAction = QtGui.QAction(MainWindow)
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/save.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.saveAction.setIcon(icon3)
self.saveAction.setObjectName(_fromUtf8("saveAction"))
self.printAction = QtGui.QAction(MainWindow)
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/print.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.printAction.setIcon(icon4)
self.printAction.setObjectName(_fromUtf8("printAction"))
self.previewAction = QtGui.QAction(MainWindow)
self.previewAction.setIcon(icon3)
self.previewAction.setObjectName(_fromUtf8("previewAction"))
self.cutAction = QtGui.QAction(MainWindow)
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/cut.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.cutAction.setIcon(icon5)
self.cutAction.setObjectName(_fromUtf8("cutAction"))
self.copyAction = QtGui.QAction(MainWindow)
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/copy.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.copyAction.setIcon(icon6)
self.copyAction.setObjectName(_fromUtf8("copyAction"))
self.pasteAction = QtGui.QAction(MainWindow)
icon7 = QtGui.QIcon()
icon7.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/paste.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pasteAction.setIcon(icon7)
self.pasteAction.setObjectName(_fromUtf8("pasteAction"))
self.undoAction = QtGui.QAction(MainWindow)
icon8 = QtGui.QIcon()
icon8.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/undo.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.undoAction.setIcon(icon8)
self.undoAction.setObjectName(_fromUtf8("undoAction"))
self.redoAction = QtGui.QAction(MainWindow)
icon9 = QtGui.QIcon()
icon9.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/redo.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.redoAction.setIcon(icon9)
self.redoAction.setObjectName(_fromUtf8("redoAction"))
self.bulletAction = QtGui.QAction(MainWindow)
icon10 = QtGui.QIcon()
icon10.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/bullet.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.bulletAction.setIcon(icon10)
self.bulletAction.setObjectName(_fromUtf8("bulletAction"))
self.numberedAction = QtGui.QAction(MainWindow)
icon11 = QtGui.QIcon()
icon11.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/number.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.numberedAction.setIcon(icon11)
self.numberedAction.setObjectName(_fromUtf8("numberedAction"))
self.fontColor = QtGui.QAction(MainWindow)
icon12 = QtGui.QIcon()
icon12.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/font-color.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.fontColor.setIcon(icon12)
self.fontColor.setObjectName(_fromUtf8("fontColor"))
self.backColor = QtGui.QAction(MainWindow)
icon13 = QtGui.QIcon()
icon13.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/highlight.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.backColor.setIcon(icon13)
self.backColor.setObjectName(_fromUtf8("backColor"))
self.boldAction = QtGui.QAction(MainWindow)
icon14 = QtGui.QIcon()
icon14.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/bold.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.boldAction.setIcon(icon14)
self.boldAction.setObjectName(_fromUtf8("boldAction"))
self.italicAction = QtGui.QAction(MainWindow)
icon15 = QtGui.QIcon()
icon15.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/italic.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.italicAction.setIcon(icon15)
self.italicAction.setObjectName(_fromUtf8("italicAction"))
self.underlAction = QtGui.QAction(MainWindow)
icon16 = QtGui.QIcon()
icon16.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/underline.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.underlAction.setIcon(icon16)
self.underlAction.setObjectName(_fromUtf8("underlAction"))
self.strikeAction = QtGui.QAction(MainWindow)
icon17 = QtGui.QIcon()
icon17.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/strike.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.strikeAction.setIcon(icon17)
self.strikeAction.setObjectName(_fromUtf8("strikeAction"))
self.superAction = QtGui.QAction(MainWindow)
icon18 = QtGui.QIcon()
icon18.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/superscript.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.superAction.setIcon(icon18)
self.superAction.setObjectName(_fromUtf8("superAction"))
self.subAction = QtGui.QAction(MainWindow)
icon19 = QtGui.QIcon()
icon19.addPixmap(QtGui.QPixmap(_fromUtf8(":/icons/subscript.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.subAction.setIcon(icon19)
self.subAction.setObjectName(_fromUtf8("subAction"))
self.menu_File.addAction(self.newAction)
self.menu_File.addAction(self.openAction)
self.menu_File.addAction(self.saveAction)
self.menu_File.addSeparator()
self.menu_File.addAction(self.printAction)
self.menu_File.addAction(self.previewAction)
self.menu_File.addAction(self.closeAction)
self.menuEdit.addAction(self.cutAction)
self.menuEdit.addAction(self.copyAction)
self.menuEdit.addAction(self.pasteAction)
self.menuEdit.addAction(self.undoAction)
self.menuEdit.addAction(self.redoAction)
self.menuFormat.addAction(self.bulletAction)
self.menuFormat.addAction(self.numberedAction)
self.menuFormat.addSeparator()
self.menuFormat.addAction(self.fontColor)
self.menuFormat.addAction(self.backColor)
self.menuFormat.addSeparator()
self.menuFormat.addAction(self.boldAction)
self.menuFormat.addAction(self.italicAction)
self.menuFormat.addAction(self.underlAction)
self.menuFormat.addAction(self.strikeAction)
self.menuFormat.addAction(self.superAction)
self.menuFormat.addAction(self.subAction)
self.menuFormat.addSeparator()
self.menubar.addAction(self.menu_File.menuAction())
self.menubar.addAction(self.menuEdit.menuAction())
self.menubar.addAction(self.menuFormat.menuAction())
self.menubar.addAction(self.menuView.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.toolbar.addSeparator()
self.toolbar.addAction(self.newAction)
self.toolbar.addAction(self.openAction)
self.toolbar.addAction(self.saveAction)
self.toolbar.addSeparator()
self.toolbar.addAction(self.printAction)
self.toolbar.addAction(self.previewAction)
self.toolbar.addSeparator()
self.toolbar.addAction(self.cutAction)
self.toolbar.addAction(self.copyAction)
self.toolbar.addAction(self.pasteAction)
self.toolbar.addAction(self.undoAction)
self.toolbar.addAction(self.redoAction)
self.formatbar.addAction(self.bulletAction)
self.formatbar.addAction(self.numberedAction)
self.formatbar.addAction(self.fontColor)
self.formatbar.addAction(self.backColor)
self.formatbar.addAction(self.boldAction)
self.formatbar.addAction(self.italicAction)
self.formatbar.addAction(self.underlAction)
self.formatbar.addAction(self.strikeAction)
self.formatbar.addAction(self.superAction)
self.formatbar.addAction(self.subAction)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "Блокнот", None))
self.menu_File.setTitle(_translate("MainWindow", "&File", None))
self.menuEdit.setTitle(_translate("MainWindow", "Edit", None))
self.menuView.setTitle(_translate("MainWindow", "View", None))
self.menuHelp.setTitle(_translate("MainWindow", "Help", None))
self.menuFormat.setTitle(_translate("MainWindow", "Format", None))
self.toolbar.setWindowTitle(_translate("MainWindow", "Options", None))
self.formatbar.setWindowTitle(_translate("MainWindow", "toolBar", None))
self.closeAction.setText(_translate("MainWindow", "Close", None))
self.closeAction.setToolTip(_translate("MainWindow", "Close Notepad", None))
self.closeAction.setStatusTip(_translate("MainWindow", "Close app", None))
self.closeAction.setShortcut(_translate("MainWindow", "Ctrl+Q", None))
self.newAction.setText(_translate("MainWindow", "New", None))
self.newAction.setToolTip(_translate("MainWindow", "Create New File", None))
self.newAction.setStatusTip(_translate("MainWindow", "Create New File", None))
self.newAction.setShortcut(_translate("MainWindow", "Ctrl+N", None))
self.openAction.setText(_translate("MainWindow", "Open", None))
self.openAction.setToolTip(_translate("MainWindow", "Open a File", None))
self.openAction.setStatusTip(_translate("MainWindow", "Open a File", None))
self.openAction.setShortcut(_translate("MainWindow", "Ctrl+O", None))
self.saveAction.setText(_translate("MainWindow", "Save", None))
self.saveAction.setToolTip(_translate("MainWindow", "Save a File", None))
self.saveAction.setStatusTip(_translate("MainWindow", "Save a File", None))
self.saveAction.setShortcut(_translate("MainWindow", "Ctrl+S", None))
self.printAction.setText(_translate("MainWindow", "Print document", None))
self.printAction.setStatusTip(_translate("MainWindow", "Print document", None))
self.printAction.setShortcut(_translate("MainWindow", "Ctrl+P", None))
self.previewAction.setText(_translate("MainWindow", "Page View", None))
self.previewAction.setStatusTip(_translate("MainWindow", "Preview page before printing", None))
self.previewAction.setShortcut(_translate("MainWindow", "Ctrl+Shift+P", None))
self.cutAction.setText(_translate("MainWindow", "Cut to clipboard", None))
self.cutAction.setStatusTip(_translate("MainWindow", "Delete and copy text to clipboard", None))
self.cutAction.setShortcut(_translate("MainWindow", "Ctrl+X", None))
self.copyAction.setText(_translate("MainWindow", "Copy text to clipboard", None))
self.copyAction.setStatusTip(_translate("MainWindow", "Copy text to clipboard", None))
self.copyAction.setShortcut(_translate("MainWindow", "Ctrl+C", None))
self.pasteAction.setText(_translate("MainWindow", "Paste from clipboard", None))
self.pasteAction.setStatusTip(_translate("MainWindow", "Paste text from clipboard", None))
self.pasteAction.setShortcut(_translate("MainWindow", "Ctrl+V", None))
self.undoAction.setText(_translate("MainWindow", "Undo last action", None))
self.undoAction.setStatusTip(_translate("MainWindow", "Undo last action", None))
self.undoAction.setShortcut(_translate("MainWindow", "Ctrl+Z", None))
self.redoAction.setText(_translate("MainWindow", "Redo last undone thing", None))
self.redoAction.setStatusTip(_translate("MainWindow", "Redo last undone thing", None))
self.redoAction.setShortcut(_translate("MainWindow", "Ctrl+Y", None))
self.bulletAction.setText(_translate("MainWindow", "Insert bullet List", None))
self.bulletAction.setStatusTip(_translate("MainWindow", "Insert bullet list", None))
self.bulletAction.setShortcut(_translate("MainWindow", "Ctrl+Shift+B", None))
self.numberedAction.setText(_translate("MainWindow", "Insert numbered List", None))
self.numberedAction.setStatusTip(_translate("MainWindow", "Insert numbered list", None))
self.numberedAction.setShortcut(_translate("MainWindow", "Ctrl+Shift+L", None))
self.fontColor.setText(_translate("MainWindow", "Change font color", None))
self.backColor.setText(_translate("MainWindow", "Change background color", None))
self.boldAction.setText(_translate("MainWindow", "Bold", None))
self.italicAction.setText(_translate("MainWindow", "Italic", None))
self.underlAction.setText(_translate("MainWindow", "Underline", None))
self.strikeAction.setText(_translate("MainWindow", "Strike-out", None))
self.superAction.setText(_translate("MainWindow", "Superscript", None))
self.subAction.setText(_translate("MainWindow", "Subscript", None))
import pad_rc
| 15,886 | 7 | 154 |
f795a73c4e4bf0ccc8db0a819c8392bf9e1cec50 | 90 | py | Python | responses.py | SEPalmiere/Desafio_Carrefour | eb0e960fba0a5d0e91c75b89c017ae9757b3a845 | [
"MIT"
] | null | null | null | responses.py | SEPalmiere/Desafio_Carrefour | eb0e960fba0a5d0e91c75b89c017ae9757b3a845 | [
"MIT"
] | null | null | null | responses.py | SEPalmiere/Desafio_Carrefour | eb0e960fba0a5d0e91c75b89c017ae9757b3a845 | [
"MIT"
] | null | null | null | from pydantic import BaseModel
| 18 | 31 | 0.7 | from pydantic import BaseModel
class TrendItem(BaseModel):
name: str
url: str | 0 | 35 | 24 |
fc8211a17e47355bd126d2ba86d0f527d04250ed | 6,025 | py | Python | delve/hooks.py | rivol/delve | 38036ea7f0de6a95f172e8a93fc5248fdea89d94 | [
"MIT"
] | null | null | null | delve/hooks.py | rivol/delve | 38036ea7f0de6a95f172e8a93fc5248fdea89d94 | [
"MIT"
] | null | null | null | delve/hooks.py | rivol/delve | 38036ea7f0de6a95f172e8a93fc5248fdea89d94 | [
"MIT"
] | null | null | null | from typing import Optional
import numpy as np
import torch
from delve.metrics import get_explained_variance, get_layer_saturation, get_eigenval_diversity_index, batch_cov, \
batch_mean
from delve.utils import get_layer_prop, get_training_state
def add_param_eigenvals(layer,
eig_vals: Optional[torch.Tensor] = None,
top_eigvals: int = 5,
n_iter: int = None):
"""Add layer parameter eigenvalues to writer."""
if eig_vals is None:
param_eig_vals = get_layer_prop(layer, 'param_eig_vals')
raise NotImplementedError("Not yet implemented.")
if n_iter is None:
n_iter = layer.forward_iter
param_eig_vals = eig_vals.cpu().detach().numpy()
top_eigvals = min(top_eigvals, len(param_eig_vals))
layer.writer.add_scalars('{}-parameter_spectrum'.format(layer.name), {
"param_eig_val{}".format(i): param_eig_vals[i]
for i in range(top_eigvals)
}, n_iter)
def add_spectrum(layer: torch.nn.Module,
eig_vals: Optional[list] = None,
top_eigvals: int = 5,
n_iter: int = None):
"""Add layer input eigenvalues to writer."""
training_state = get_training_state(layer)
if eig_vals is None:
eig_vals = get_layer_prop(layer, f'{training_state}_eig_vals')
if n_iter is None:
n_iter = layer.forward_iter
layer.writer.add_scalars(
'{}-spectrum'.format(layer.name),
{"eig_val{}".format(i): eig_vals[i]
for i in range(top_eigvals)},
n_iter,
)
return eig_vals
def add_spectral_analysis(layer: torch.nn.Module,
eig_vals: np.ndarray,
n_iter: int,
top_eigvals: int = 5):
"""Add spectral analysis `layer` writer and display `top_eigvals`."""
training_state = get_training_state(layer)
if eig_vals is None:
eig_vals = get_layer_prop(layer, f'{training_state}_eig_vals')
if n_iter is None:
n_iter = layer.forward_iter
add_eigen_dist(layer, eig_vals, n_iter)
add_neigen_dist(layer, eig_vals, n_iter)
if top_eigvals is not None:
add_spectrum(layer, eig_vals, top_eigvals, n_iter)
return eig_vals
| 36.515152 | 114 | 0.632697 | from typing import Optional
import numpy as np
import torch
from delve.metrics import get_explained_variance, get_layer_saturation, get_eigenval_diversity_index, batch_cov, \
batch_mean
from delve.utils import get_layer_prop, get_training_state
def add_eigen_dist(layer: torch.nn.Module,
eig_vals: Optional[np.ndarray] = None,
n_iter: Optional[int] = None):
if eig_vals is None:
eig_vals = get_layer_prop(layer, f'{training_state}_eig_vals')
if n_iter is None:
n_iter = layer.forward_iter
layer.writer.add_histogram(
'{}-eigenvalue_distribution'.format(layer.name),
eig_vals,
global_step=n_iter,
bins=10,
)
return eig_vals
def add_neigen_dist(layer: torch.nn.Module,
eig_vals: Optional[np.ndarray] = None,
n_iter: Optional[int] = None):
if eig_vals is None:
eig_vals = get_layer_prop(layer, f'{training_state}_eig_vals')
if n_iter is None:
n_iter = layer.forward_iter
eigval_total = sum(eig_vals)
normalized_eigval_dist = np.array(
[eigval / eigval_total for eigval in eig_vals])
layer.writer.add_histogram(
'{}-normalized_eigenvalue_distribution'.format(layer.name),
normalized_eigval_dist,
global_step=n_iter,
bins=10,
)
return eig_vals
def add_saturation_collection(base, layer: torch.nn.Module,
saturation_logs: dict):
base.writer.add_scalars('saturation',
saturation_logs,
global_step=layer.forward_iter)
def add_layer_saturation(layer: torch.nn.Module,
eig_vals: Optional[np.ndarray] = None,
n_iter: Optional[int] = None,
method='cumvar99'):
training_state = get_training_state(layer)
layer_type = layer._get_name().lower()
if eig_vals is None:
eig_vals = get_layer_prop(layer, f'{training_state}_eig_vals')
if n_iter is None:
n_iter = layer.forward_iter
nr_eig_vals = get_explained_variance(eig_vals)
layer_name = layer.name + (f'_{layer.conv_method}'
if layer_type == 'conv2d' else '')
if method == 'cumvar99':
saturation = get_layer_saturation(nr_eig_vals, layer.out_features)
layer.writer.add_scalar(
f'{training_state}-{layer_name}-percent_saturation-{method}',
saturation, n_iter)
elif method == 'simpson_di':
saturation = get_eigenval_diversity_index(eig_vals)
layer.writer.add_scalar(
f'{training_state}-{layer_name}-percent_saturation-{method}',
saturation, n_iter)
elif method == 'all':
cumvar99_saturation = get_layer_saturation(nr_eig_vals,
layer.out_features)
layer.writer.add_scalar(
f'{training_state}-{layer_name}-percent_saturation-cumvar99',
cumvar99_saturation, n_iter)
simpson_di_saturation = get_eigenval_diversity_index(eig_vals)
saturation = simpson_di_saturation
layer.writer.add_scalar(
f'{training_state}-{layer_name}-percent_saturation-simpson_di',
simpson_di_saturation, n_iter)
layer.writer.add_scalar(
f'{training_state}-{layer_name}-intrinsic_dimensionality', nr_eig_vals,
n_iter)
return eig_vals, saturation
def add_param_eigenvals(layer,
eig_vals: Optional[torch.Tensor] = None,
top_eigvals: int = 5,
n_iter: int = None):
"""Add layer parameter eigenvalues to writer."""
if eig_vals is None:
param_eig_vals = get_layer_prop(layer, 'param_eig_vals')
raise NotImplementedError("Not yet implemented.")
if n_iter is None:
n_iter = layer.forward_iter
param_eig_vals = eig_vals.cpu().detach().numpy()
top_eigvals = min(top_eigvals, len(param_eig_vals))
layer.writer.add_scalars('{}-parameter_spectrum'.format(layer.name), {
"param_eig_val{}".format(i): param_eig_vals[i]
for i in range(top_eigvals)
}, n_iter)
def add_spectrum(layer: torch.nn.Module,
eig_vals: Optional[list] = None,
top_eigvals: int = 5,
n_iter: int = None):
"""Add layer input eigenvalues to writer."""
training_state = get_training_state(layer)
if eig_vals is None:
eig_vals = get_layer_prop(layer, f'{training_state}_eig_vals')
if n_iter is None:
n_iter = layer.forward_iter
layer.writer.add_scalars(
'{}-spectrum'.format(layer.name),
{"eig_val{}".format(i): eig_vals[i]
for i in range(top_eigvals)},
n_iter,
)
return eig_vals
def add_covariance(layer: torch.nn.Module, activation_batch: np.ndarray,
n_iter: int):
layer.writer.add_scalar(
'{}-latent_representation_covariance'.format(layer.name),
batch_cov(activation_batch),
n_iter,
)
def add_mean(layer: torch.nn.Module, activations_batch: np.ndarray,
n_iter: int):
layer.writer.add_scalar(
'{}-latent_representation_mean'.format(layer.name),
batch_mean(activations_batch),
layer.forward_iter,
)
def add_spectral_analysis(layer: torch.nn.Module,
eig_vals: np.ndarray,
n_iter: int,
top_eigvals: int = 5):
"""Add spectral analysis `layer` writer and display `top_eigvals`."""
training_state = get_training_state(layer)
if eig_vals is None:
eig_vals = get_layer_prop(layer, f'{training_state}_eig_vals')
if n_iter is None:
n_iter = layer.forward_iter
add_eigen_dist(layer, eig_vals, n_iter)
add_neigen_dist(layer, eig_vals, n_iter)
if top_eigvals is not None:
add_spectrum(layer, eig_vals, top_eigvals, n_iter)
return eig_vals
| 3,609 | 0 | 138 |
ef96b6343425e150ff3bbeefd50cf26d2df42cea | 5,738 | py | Python | ansible/venv/lib/python2.7/site-packages/ansible/modules/network/junos/junos_lldp_interfaces.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 0fb18850afe0d8609693ba4b23f29c7cda17d97f | [
"MIT"
] | 17 | 2017-06-07T23:15:01.000Z | 2021-08-30T14:32:36.000Z | ansible/venv/lib/python2.7/site-packages/ansible/modules/network/junos/junos_lldp_interfaces.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 0fb18850afe0d8609693ba4b23f29c7cda17d97f | [
"MIT"
] | 9 | 2017-06-25T03:31:52.000Z | 2021-05-17T23:43:12.000Z | ansible/venv/lib/python2.7/site-packages/ansible/modules/network/junos/junos_lldp_interfaces.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 0fb18850afe0d8609693ba4b23f29c7cda17d97f | [
"MIT"
] | 3 | 2018-05-26T21:31:22.000Z | 2019-09-28T17:00:45.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#############################################
# WARNING #
#############################################
#
# This file is auto generated by the resource
# module builder playbook.
#
# Do not edit this file manually.
#
# Changes to this file will be over written
# by the resource module builder.
#
# Changes should be made in the model used to
# generate this file or in the resource module
# builder template.
#
#############################################
"""
The module file for junos_lldp_interfaces
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'
}
DOCUMENTATION = """
---
module: junos_lldp_interfaces
version_added: 2.9
short_description: Manage link layer discovery protocol (LLDP) attributes of interfaces on Juniper JUNOS devices
description:
- This module manages link layer discovery protocol (LLDP) attributes of interfaces on Juniper JUNOS devices.
author: Ganesh Nalawade (@ganeshrn)
options:
config:
description: The list of link layer discovery protocol interface attribute configurations
type: list
elements: dict
suboptions:
name:
description:
- Name of the interface LLDP needs to be configured on.
type: str
required: True
enabled:
description:
- This is a boolean value to control disabling of LLDP on the interface C(name)
type: bool
state:
description:
- The state of the configuration after module completion.
type: str
choices:
- merged
- replaced
- overridden
- deleted
default: merged
"""
EXAMPLES = """
# Using merged
# Before state:
# -------------
# user@junos01# # show protocols lldp
# management-address 10.1.1.1;
# advertisement-interval 10000;
- name: Merge provided configuration with device configuration
junos_lldp_interfaces:
config:
- name: ge-0/0/1
- name: ge-0/0/2
enabled: False
state: merged
# After state:
# -------------
# user@junos01# show protocols lldp
# management-address 10.1.1.1;
# advertisement-interval 10000;
# interface ge-0/0/1;
# interface ge-0/0/2 {
# disable;
# }
# Using replaced
# Before state:
# -------------
# user@junos01# show protocols lldp
# management-address 10.1.1.1;
# advertisement-interval 10000;
# interface ge-0/0/1;
# interface ge-0/0/2 {
# disable;
# }
- name: Replace provided configuration with device configuration
junos_lldp_interfaces:
config:
- name: ge-0/0/2
disable: False
- name: ge-0/0/3
enabled: False
state: replaced
# After state:
# -------------
# user@junos01# show protocols lldp
# management-address 10.1.1.1;
# advertisement-interval 10000;
# interface ge-0/0/1;
# interface ge-0/0/2;
# interface ge-0/0/3 {
# disable;
# }
# Using overridden
# Before state:
# -------------
# user@junos01# show protocols lldp
# management-address 10.1.1.1;
# advertisement-interval 10000;
# interface ge-0/0/1;
# interface ge-0/0/2 {
# disable;
# }
- name: Override provided configuration with device configuration
junos_lldp_interfaces:
config:
- name: ge-0/0/2
enabled: False
state: overridden
# After state:
# -------------
# user@junos01# show protocols lldp
# management-address 10.1.1.1;
# advertisement-interval 10000;
# interface ge-0/0/2 {
# disable;
# }
# Using deleted
# Before state:
# -------------
# user@junos01# show protocols lldp
# management-address 10.1.1.1;
# advertisement-interval 10000;
# interface ge-0/0/1;
# interface ge-0/0/2;
# interface ge-0/0/3 {
# disable;
# }
- name: Delete lldp interface configuration (this will not delete other lldp configuration)
junos_lldp_interfaces:
config:
- name: ge-0/0/1
- name: ge-0/0/3
state: deleted
# After state:
# -------------
# user@junos01# show protocols lldp
# management-address 10.1.1.1;
# advertisement-interval 10000;
# interface ge-0/0/2;
# interface ge-0/0/1;
"""
RETURN = """
before:
description: The configuration as structured data prior to module invocation.
returned: always
type: list
sample: >
The configuration returned will always be in the same format
of the parameters above.
after:
description: The configuration as structured data after module completion.
returned: when changed
type: list
sample: >
The configuration returned will always be in the same format
of the parameters above.
commands:
description: The set of commands pushed to the remote device.
returned: always
type: list
sample: ['xml 1', 'xml 2', 'xml 3']
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.junos.argspec.lldp_interfaces.lldp_interfaces import Lldp_interfacesArgs
from ansible.module_utils.network.junos.config.lldp_interfaces.lldp_interfaces import Lldp_interfaces
def main():
"""
Main entry point for module execution
:returns: the result form module invocation
"""
required_if = [('state', 'merged', ('config',)),
('state', 'replaced', ('config',)),
('state', 'overridden', ('config',))]
module = AnsibleModule(argument_spec=Lldp_interfacesArgs.argument_spec,
required_if=required_if,
supports_check_mode=True)
result = Lldp_interfaces(module).execute_module()
module.exit_json(**result)
if __name__ == '__main__':
main()
| 24.947826 | 112 | 0.651969 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#############################################
# WARNING #
#############################################
#
# This file is auto generated by the resource
# module builder playbook.
#
# Do not edit this file manually.
#
# Changes to this file will be over written
# by the resource module builder.
#
# Changes should be made in the model used to
# generate this file or in the resource module
# builder template.
#
#############################################
"""
The module file for junos_lldp_interfaces
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'
}
DOCUMENTATION = """
---
module: junos_lldp_interfaces
version_added: 2.9
short_description: Manage link layer discovery protocol (LLDP) attributes of interfaces on Juniper JUNOS devices
description:
- This module manages link layer discovery protocol (LLDP) attributes of interfaces on Juniper JUNOS devices.
author: Ganesh Nalawade (@ganeshrn)
options:
config:
description: The list of link layer discovery protocol interface attribute configurations
type: list
elements: dict
suboptions:
name:
description:
- Name of the interface LLDP needs to be configured on.
type: str
required: True
enabled:
description:
- This is a boolean value to control disabling of LLDP on the interface C(name)
type: bool
state:
description:
- The state of the configuration after module completion.
type: str
choices:
- merged
- replaced
- overridden
- deleted
default: merged
"""
EXAMPLES = """
# Using merged
# Before state:
# -------------
# user@junos01# # show protocols lldp
# management-address 10.1.1.1;
# advertisement-interval 10000;
- name: Merge provided configuration with device configuration
junos_lldp_interfaces:
config:
- name: ge-0/0/1
- name: ge-0/0/2
enabled: False
state: merged
# After state:
# -------------
# user@junos01# show protocols lldp
# management-address 10.1.1.1;
# advertisement-interval 10000;
# interface ge-0/0/1;
# interface ge-0/0/2 {
# disable;
# }
# Using replaced
# Before state:
# -------------
# user@junos01# show protocols lldp
# management-address 10.1.1.1;
# advertisement-interval 10000;
# interface ge-0/0/1;
# interface ge-0/0/2 {
# disable;
# }
- name: Replace provided configuration with device configuration
junos_lldp_interfaces:
config:
- name: ge-0/0/2
disable: False
- name: ge-0/0/3
enabled: False
state: replaced
# After state:
# -------------
# user@junos01# show protocols lldp
# management-address 10.1.1.1;
# advertisement-interval 10000;
# interface ge-0/0/1;
# interface ge-0/0/2;
# interface ge-0/0/3 {
# disable;
# }
# Using overridden
# Before state:
# -------------
# user@junos01# show protocols lldp
# management-address 10.1.1.1;
# advertisement-interval 10000;
# interface ge-0/0/1;
# interface ge-0/0/2 {
# disable;
# }
- name: Override provided configuration with device configuration
junos_lldp_interfaces:
config:
- name: ge-0/0/2
enabled: False
state: overridden
# After state:
# -------------
# user@junos01# show protocols lldp
# management-address 10.1.1.1;
# advertisement-interval 10000;
# interface ge-0/0/2 {
# disable;
# }
# Using deleted
# Before state:
# -------------
# user@junos01# show protocols lldp
# management-address 10.1.1.1;
# advertisement-interval 10000;
# interface ge-0/0/1;
# interface ge-0/0/2;
# interface ge-0/0/3 {
# disable;
# }
- name: Delete lldp interface configuration (this will not delete other lldp configuration)
junos_lldp_interfaces:
config:
- name: ge-0/0/1
- name: ge-0/0/3
state: deleted
# After state:
# -------------
# user@junos01# show protocols lldp
# management-address 10.1.1.1;
# advertisement-interval 10000;
# interface ge-0/0/2;
# interface ge-0/0/1;
"""
RETURN = """
before:
description: The configuration as structured data prior to module invocation.
returned: always
type: list
sample: >
The configuration returned will always be in the same format
of the parameters above.
after:
description: The configuration as structured data after module completion.
returned: when changed
type: list
sample: >
The configuration returned will always be in the same format
of the parameters above.
commands:
description: The set of commands pushed to the remote device.
returned: always
type: list
sample: ['xml 1', 'xml 2', 'xml 3']
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.junos.argspec.lldp_interfaces.lldp_interfaces import Lldp_interfacesArgs
from ansible.module_utils.network.junos.config.lldp_interfaces.lldp_interfaces import Lldp_interfaces
def main():
"""
Main entry point for module execution
:returns: the result form module invocation
"""
required_if = [('state', 'merged', ('config',)),
('state', 'replaced', ('config',)),
('state', 'overridden', ('config',))]
module = AnsibleModule(argument_spec=Lldp_interfacesArgs.argument_spec,
required_if=required_if,
supports_check_mode=True)
result = Lldp_interfaces(module).execute_module()
module.exit_json(**result)
if __name__ == '__main__':
main()
| 0 | 0 | 0 |
1caa42b43eb4a3f7919f42e0dcb2a3df79254087 | 3,250 | py | Python | scripts/perf/perf_kit/python.py | Hartorn/airflow | a79e2d4c4aa105f3fac5ae6a28e29af9cd572407 | [
"Apache-2.0"
] | 3 | 2015-08-25T13:56:44.000Z | 2020-03-21T10:26:58.000Z | scripts/perf/perf_kit/python.py | Hartorn/airflow | a79e2d4c4aa105f3fac5ae6a28e29af9cd572407 | [
"Apache-2.0"
] | 37 | 2020-07-21T07:50:02.000Z | 2022-03-29T22:31:28.000Z | scripts/perf/perf_kit/python.py | Hartorn/airflow | a79e2d4c4aa105f3fac5ae6a28e29af9cd572407 | [
"Apache-2.0"
] | 4 | 2020-07-17T14:02:28.000Z | 2022-02-23T04:29:58.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import contextlib
import cProfile
import datetime
import io
import os
import pstats
import signal
PYSPY_OUTPUT = os.environ.get("PYSPY_OUTPUT", "/files/pyspy/")
@contextlib.contextmanager
def pyspy():
"""
This decorator provide deterministic profiling. It generate and save flame graph to file. It uses``pyspy``
internally.
Running py-spy inside of a docker container will also usually bring up a permissions denied error
even when running as root.
This error is caused by docker restricting the process_vm_readv system call we are using. This can be
overridden by setting --cap-add SYS_PTRACE when starting the docker container.
Alternatively you can edit the docker-compose yaml file
.. code-block:: yaml
your_service:
cap_add:
- SYS_PTRACE
In the case of Airflow Breeze, you should modify the ``scripts/perf/perf_kit/python.py`` file.
"""
pid = str(os.getpid())
suffix = datetime.datetime.now().isoformat()
filename = f"{PYSPY_OUTPUT}/flame-{suffix}-{pid}.html"
pyspy_pid = os.spawnlp(
os.P_NOWAIT, "sudo", "sudo", "py-spy", "record", "--idle", "-o", filename, "-p", pid
)
try:
yield
finally:
os.kill(pyspy_pid, signal.SIGINT)
print(f"Report saved to: {filename}")
@contextlib.contextmanager
def profiled(print_callers=False):
"""
This decorator provide deterministic profiling. It uses ``cProfile`` internally. It generates statistic
and print on the screen.
"""
pr = cProfile.Profile()
pr.enable()
try:
yield
finally:
pr.disable()
s = io.StringIO()
ps = pstats.Stats(pr, stream=s).sort_stats("cumulative")
if print_callers:
ps.print_callers()
else:
ps.print_stats()
print(s.getvalue())
if __name__ == "__main__":
# Load modules
case()
# Example:
print("PySpy:")
with pyspy():
case()
# Example:
print("cProfile")
with profiled():
case()
| 29.816514 | 110 | 0.678154 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import contextlib
import cProfile
import datetime
import io
import os
import pstats
import signal
PYSPY_OUTPUT = os.environ.get("PYSPY_OUTPUT", "/files/pyspy/")
@contextlib.contextmanager
def pyspy():
"""
This decorator provide deterministic profiling. It generate and save flame graph to file. It uses``pyspy``
internally.
Running py-spy inside of a docker container will also usually bring up a permissions denied error
even when running as root.
This error is caused by docker restricting the process_vm_readv system call we are using. This can be
overridden by setting --cap-add SYS_PTRACE when starting the docker container.
Alternatively you can edit the docker-compose yaml file
.. code-block:: yaml
your_service:
cap_add:
- SYS_PTRACE
In the case of Airflow Breeze, you should modify the ``scripts/perf/perf_kit/python.py`` file.
"""
pid = str(os.getpid())
suffix = datetime.datetime.now().isoformat()
filename = f"{PYSPY_OUTPUT}/flame-{suffix}-{pid}.html"
pyspy_pid = os.spawnlp(
os.P_NOWAIT, "sudo", "sudo", "py-spy", "record", "--idle", "-o", filename, "-p", pid
)
try:
yield
finally:
os.kill(pyspy_pid, signal.SIGINT)
print(f"Report saved to: {filename}")
@contextlib.contextmanager
def profiled(print_callers=False):
"""
This decorator provide deterministic profiling. It uses ``cProfile`` internally. It generates statistic
and print on the screen.
"""
pr = cProfile.Profile()
pr.enable()
try:
yield
finally:
pr.disable()
s = io.StringIO()
ps = pstats.Stats(pr, stream=s).sort_stats("cumulative")
if print_callers:
ps.print_callers()
else:
ps.print_stats()
print(s.getvalue())
if __name__ == "__main__":
def case():
import airflow
from airflow.jobs.scheduler_job import DagFileProcessor
import logging
log = logging.getLogger(__name__)
processor = DagFileProcessor(dag_ids=[], log=log)
dag_file = os.path.join(os.path.dirname(airflow.__file__), "example_dags", "example_complex.py")
processor.process_file(file_path=dag_file, failure_callback_requests=[])
# Load modules
case()
# Example:
print("PySpy:")
with pyspy():
case()
# Example:
print("cProfile")
with profiled():
case()
| 387 | 0 | 27 |
43629a2b7ac06eb7d4e24167a366cd3b74a6202b | 6,747 | py | Python | mlmodels/transformer.py | tfmortie/mlmodels | 8f0401f2d97b30b89b3ee5edb94738c25dc6a1f1 | [
"MIT"
] | null | null | null | mlmodels/transformer.py | tfmortie/mlmodels | 8f0401f2d97b30b89b3ee5edb94738c25dc6a1f1 | [
"MIT"
] | null | null | null | mlmodels/transformer.py | tfmortie/mlmodels | 8f0401f2d97b30b89b3ee5edb94738c25dc6a1f1 | [
"MIT"
] | null | null | null | """
Transformer for machine translation (Attention Is All You Need, Vaswani et al. 2018)
Thomas Mortier
March 2022
"""
import torch
import numpy as np
class Transformer(torch.nn.Module):
""" Represents the main transformer class.
"""
| 35.698413 | 108 | 0.58752 | """
Transformer for machine translation (Attention Is All You Need, Vaswani et al. 2018)
Thomas Mortier
March 2022
"""
import torch
import numpy as np
class Transformer(torch.nn.Module):
""" Represents the main transformer class.
"""
def __init__(self, voc_s_size, voc_t_size, args):
super(Transformer, self).__init__()
self.args = args
# register the encoder and decoder
self.encoder = torch.nn.ModuleList([Encoder(args) for _ in range(args.ns)])
self.decoder = torch.nn.ModuleList([Decoder(args) for _ in range(args.ns)])
# register embedding layers for source->embedding and target->embedding
self.emb_voc_s = torch.nn.Embedding(voc_s_size, args.dm)
self.emb_voc_t = torch.nn.Embedding(voc_t_size, args.dm)
# our final layer which predicts target words
self.emb_dm = torch.nn.Linear(args.dm, voc_t_size)
# and our positional encoder
self.pe = PositionalEncoder(args)
# init all weights
def init_xavier(m):
if type(m) == torch.nn.Linear:
torch.nn.init.xavier_uniform_(m.weight)
m.bias.data.fill_(0.01)
self.apply(init_xavier)
def forward(self, s, t, pad_mask_s=None, pad_mask_t=None, mask=True):
# get embeddings for source and target sequence
s = self.emb_voc_s(s)
t = self.emb_voc_t(t)
# pass both through positional encoder
s = self.pe(s)
t = self.pe(t)
# pass through encoder network
e_o = s
for e in self.encoder:
e_o = e(e_o, pad_mask_s)
# and finally pass through decoder
d_o = t
for d in self.decoder:
d_o = d(d_o, e_o, pad_mask_t, mask)
d_o = self.emb_dm(d_o)
return d_o
def save(self):
print("Saving model to {0}...".format(self.args.mo))
torch.save(self, self.args.mo)
class PositionalEncoder(torch.nn.Module):
def __init__(self, args):
super(PositionalEncoder, self).__init__()
self.args = args
pos = torch.arange(0,args.ml).view(-1,1).repeat((1,args.dm))
denum = 10000**((2*torch.arange(0,args.dm).view(1,-1).repeat((args.ml,1)))/args.dm)
PE = pos/denum
c = torch.arange(0,args.dm).view(1,-1).repeat((args.ml,1))%2
s = 1-c
self.PE = torch.cos(c*PE)+torch.sin(s*PE)
self.PE = self.PE.to(args.device)
self.dropout = torch.nn.Dropout(p=args.d)
def forward(self, x):
x = x + self.PE[:x.shape[1],:].unsqueeze(0).repeat(x.shape[0],1,1)
x = self.dropout(x)
return x
class Encoder(torch.nn.Module):
def __init__(self, args):
super(Encoder, self).__init__()
self.args = args
self.mha = MultiHeadAttention(args)
self.norml = torch.nn.LayerNorm((args.dm))
self.linear = torch.nn.Sequential(
torch.nn.Linear(args.dm, args.dff),
torch.nn.ReLU(),
torch.nn.Linear(args.dff, args.dm)
)
self.dropout = torch.nn.Dropout(p=args.d)
def forward(self, x, pad_mask):
o1 = self.mha(x, None, None, pad_mask, False)
o1 = self.dropout(o1)
# add skip connection
o1 = o1 + x
# normalize features of each token
o1 = self.norml(o1)
# pass through fully-connected network
o2 = self.linear(o1)
o2 = self.dropout(o2)
# add second skip connection
o2 = o2 + o1
# and normalize again
o2 = self.norml(o2)
return o2
class Decoder(torch.nn.Module):
def __init__(self, args):
super(Decoder, self).__init__()
self.args = args
self.mha1 = MultiHeadAttention(args)
self.mha2 = MultiHeadAttention(args)
self.norml = torch.nn.LayerNorm((args.dm))
self.linear = torch.nn.Sequential(
torch.nn.Linear(args.dm, args.dff),
torch.nn.ReLU(),
torch.nn.Linear(args.dff, args.dm)
)
self.dropout = torch.nn.Dropout(p=args.d)
def forward(self, x, e, pad_mask, mask):
o1 = self.mha1(x, None, None, pad_mask, mask)
o1 = self.dropout(o1)
# add skip connection
o1 = o1 + x
# normalize features of each token
o1 = self.norml(o1)
# calculate attention with encoded source
o2 = self.mha2(o1, e, e, None, None)
o2 = self.dropout(o2)
# add second skip connection
o2 = o2 + o1
o2 = self.norml(o2)
o3 = self.linear(o2)
o3 = self.dropout(o3)
# add third and final skip connection
o3 = o3 + o2
return o3
class MultiHeadAttention(torch.nn.Module):
def __init__(self, args):
super(MultiHeadAttention, self).__init__()
self.args = args
# define projection matrices for Q,K and V
self.w_q = torch.nn.Linear(args.dm,args.dm)
self.w_k = torch.nn.Linear(args.dm,args.dm)
self.w_v = torch.nn.Linear(args.dm,args.dm)
assert (args.dv*args.nh)==args.dm, 'Number of heads must be multiple of dimensionality of K and V!'
self.linear = torch.nn.Linear(args.dv*args.nh,args.dm)
self.sldpa = ScaledLinearDotProductAttention(args)
def forward(self, q, k=None, v=None, pad_mask=None, mask=False):
# calculate projections
if k is None:
proj_q = self.w_q(q)
proj_k = self.w_k(q)
proj_v = self.w_v(q)
else:
proj_q = self.w_q(q)
proj_k = self.w_k(k)
proj_v = self.w_v(v)
# get chunks and perform attention
proj_q_l = torch.chunk(proj_q,self.args.nh,2)
proj_k_l = torch.chunk(proj_k,self.args.nh,2)
proj_v_l = torch.chunk(proj_v,self.args.nh,2)
o_l = []
for i in range(self.args.nh):
o_l.append(self.sldpa(proj_q_l[i], proj_k_l[i], proj_v_l[i], pad_mask, mask))
o = torch.cat(o_l,dim=-1)
o = self.linear(o)
return o
class ScaledLinearDotProductAttention(torch.nn.Module):
def __init__(self, args):
super(ScaledLinearDotProductAttention, self).__init__()
self.args = args
def forward(self, ql, kl, vl, pad_mask=None, mask=False):
o = torch.einsum('bij,bjk->bik',ql,torch.einsum('bij->bji',kl))
o = o/np.sqrt(self.args.dk)
if pad_mask is not None:
o = o.masked_fill(pad_mask.unsqueeze(-2)==1,value=-torch.inf)
if mask:
o = o + torch.triu(torch.zeros_like(o)-torch.inf,diagonal=1).to(o.device) # reduces memory usage
o = torch.nn.functional.softmax(o,dim=-1)
o = torch.einsum('bij,bjk->bik',o,vl)
return o
| 5,940 | 95 | 468 |
c1c9863209b8a41b524fa3d74d996afd2979a7e4 | 8,929 | py | Python | src/final_layer_manfeat.py | arllab123/cmu-ammml-project | a5e78841aea28069b1af88ab0aeacc955d6c14c2 | [
"MIT"
] | 14 | 2017-04-07T20:58:15.000Z | 2021-11-07T11:56:53.000Z | src/final_layer_manfeat.py | arllab123/cmu-ammml-project | a5e78841aea28069b1af88ab0aeacc955d6c14c2 | [
"MIT"
] | 1 | 2017-09-13T07:02:01.000Z | 2017-09-13T07:02:01.000Z | src/final_layer_manfeat.py | arllab123/cmu-ammml-project | a5e78841aea28069b1af88ab0aeacc955d6c14c2 | [
"MIT"
] | 6 | 2016-05-20T06:07:08.000Z | 2019-10-13T00:44:55.000Z | # coding=utf-8
# manfeat.py: classifier based on manually extracted features.
from __future__ import print_function
import argparse
import os
import cPickle
import itertools
from datetime import datetime
import numpy as np
from scipy.stats import mode
from keras.optimizers import Adam
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from models.shallow import ShallowNet
SPLIT_DIR = "data/perssplit"
SPLITS = ["train", "val", "test"]
PICKLED_LABEL_FILE = "data/labels.pickle"
PERS_FIELD_NAME = "Answer.q7_persuasive"
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("--feats-file", type=str, required=True)
arg_parser.add_argument("--names-file", type=str, required=True)
arg_parser.add_argument("--sig-feats-file", type=str, default=None)
arg_parser.add_argument("--last-layer-file", type=str, default=None)
arg_parser.add_argument("--save-path", type=str, required=True)
arg_parser.add_argument("--train", type=str, choices=["true", "false"], required=True)
arg_parser.add_argument("--weights", type=str, default=None)
arg_parser.add_argument("--lr", type=float, nargs="+", required=True)
arg_parser.add_argument("--epochs", type=int, nargs="+", required=True)
arg_parser.add_argument("--dropout", type=float, nargs="+", required=True)
arg_parser.add_argument("--dense-layers", type=int, nargs="+", required=True)
arg_parser.add_argument("--dense-layer-units", type=int, nargs="+", required=True)
arg_parser.add_argument("--batch-size", type=int, nargs="+", required=True)
arg_parser.add_argument("--ensemble-size", type=int, required=True)
args = arg_parser.parse_args()
with open(PICKLED_LABEL_FILE, "rb") as lf:
labels_map = cPickle.load(lf)
name_splits = {}
Xs = {}
ys = {}
for split in SPLITS:
with open(os.path.join(SPLIT_DIR, "{}.txt".format(split))) as split_file:
for line in split_file:
name_splits[line.strip()] = split
Xs[split] = []
ys[split] = []
with open(args.names_file) as man_feats_names_file, open(args.feats_file) as feats_file:
for name_line, feat_line in zip(man_feats_names_file, feats_file):
name = name_line.strip()
feats = map(float, feat_line.strip().split(","))
split = name_splits[name]
Xs[split].append(feats)
score = labels_map[name][PERS_FIELD_NAME]
if score >= 5.5:
ys[split].append(1)
else:
ys[split].append(0)
for split in SPLITS:
Xs[split] = np.array(Xs[split])
ys[split] = np.array(ys[split])
if args.sig_feats_file is not None:
print("Selecting significant features")
with open(args.sig_feats_file) as sig_feats_file:
sig_feats = [int(line.strip()) - 1 for line in sig_feats_file]
for split in SPLITS:
Xs[split] = Xs[split][:, sig_feats]
if args.train == "true":
date = str(datetime.now().date())
base_save_dir = os.path.join(args.save_path, date)
os.makedirs(base_save_dir)
final_train_perfs = {}
final_val_perfs = {}
for lr, epochs, dropout, dense_layers, dense_layer_units, batch_size in itertools.product(args.lr, args.epochs, args.dropout, args.dense_layers, args.dense_layer_units, args.batch_size):
params = lr, epochs, dropout, dense_layers, dense_layer_units, batch_size
print("LR: {}, EPOCHS: {}, DROPOUT: {}, DENSE LAYERS: {}, DENSE_LAYER_UNITS: {}, BATCH_SIZE: {}".format(*params))
save_path = os.path.join(base_save_dir, "lr{};epochs{};dropout{};dense_layers{};dense_layer_units{};batch_size{}".format(*params))
os.makedirs(save_path)
train_preds = np.zeros((Xs["train"].shape[0], args.ensemble_size))
val_preds = np.zeros((Xs["val"].shape[0], args.ensemble_size))
print("Building model")
model = ShallowNet(Xs["train"].shape[1], dropout, dense_layers, dense_layer_units, args.weights)
model.compile(optimizer=Adam(lr=lr), loss="binary_crossentropy")
print("Model built")
history = model.fit(
X=Xs["train"],
y=ys["train"],
batch_size=batch_size,
nb_epoch=epochs,
verbose=1,
validation_data=(Xs["val"], ys["val"]),
shuffle=True,
show_accuracy=True,
)
model.layers.pop()
model.compile(optimizer=Adam(lr=lr), loss="binary_crossentropy")
train_pred = model.predict(X=Xs["train"], batch_size=batch_size, verbose=0)
val_pred = model.predict(X=Xs["val"], batch_size=batch_size, verbose=0)
cou = 0
with open(args.last_layer_file, "w") as layer_file:
for vec in train_pred:
layer_file.write(",".join([str(i) for i in vec]))
layer_file.write("\n")
cou += 1
for vec in val_pred:
layer_file.write(",".join([str(i) for i in vec]))
layer_file.write("\n")
cou += 1
print(cou)
with open(args.last_layer_file+".labels", "w") as label_file:
for value in ys["train"]:
label_file.write(str(value))
label_file.write("\n")
for value in ys["val"]:
label_file.write(str(value))
label_file.write("\n")
exit(1)
final_train_pred = mode(train_preds, axis=1).mode
final_val_pred = mode(val_preds, axis=1).mode
final_train_perfs[params] = eval_pred(ys["train"], final_train_pred)
final_val_perfs[params] = eval_pred(ys["val"], final_val_pred)
print("final train perf: acc {}, f1 {}; final val perf: acc {}, f1 {}".format(final_train_perfs[params]["acc"], final_train_perfs[params]["f1"], final_val_perfs[params]["acc"], final_val_perfs[params]["f1"]))
print("\n".join(map(lambda x: "{}: {}".format(x[0], x[1]), final_train_perfs.items())), file=open(os.path.join(base_save_dir, "final_train_perfs.txt"), "w"))
print("\n".join(map(lambda x: "{}: {}".format(x[0], x[1]), final_val_perfs.items())), file=open(os.path.join(base_save_dir, "final_val_perfs.txt"), "w"))
best_params = max(final_val_perfs, key=lambda x: final_val_perfs[x]["f1"])
else:
best_params = (0.0001, 100, 0.5, 5, 5, 100)
best_lr, best_epochs, best_dropout, best_dense_layers, best_dense_layer_units, best_batch_size = best_params
if args.train == "true":
print("Training ensemble on training and validation set")
save_path = os.path.join(base_save_dir, "best_params")
os.makedirs(save_path)
preds = np.zeros((Xs["test"].shape[0], args.ensemble_size))
for i in range(args.ensemble_size):
print("Building model")
model = ShallowNet(Xs["train"].shape[1], best_dropout, best_dense_layers, best_dense_layer_units, args.weights)
model.compile(optimizer=Adam(lr=best_lr), loss="binary_crossentropy")
print("Model built")
history = model.fit(
X=np.concatenate((Xs["train"], Xs["val"])),
y=np.concatenate((ys["train"], ys["val"])),
batch_size=best_batch_size,
nb_epoch=best_epochs,
verbose=1,
shuffle=True,
show_accuracy=True,
)
model.save_weights(os.path.join(save_path, "weights{}.h5".format(i)), overwrite=True)
print("\n".join(map(str, history.history["acc"])), file=open(os.path.join(save_path, "train_accs{}.txt".format(i)), "w"))
print("\n".join(map(str, history.history["loss"])), file=open(os.path.join(save_path, "train_losses{}.txt".format(i)), "w"))
pred = model.predict_classes(X=Xs["test"], batch_size=batch_size, verbose=0)
preds[:, i] = pred[:, 0]
final_pred = mode(preds, axis=1).mode
test_perf = eval_pred(ys["test"], final_pred)
else:
print("Building model")
model = ShallowNet(Xs["train"].shape[1], best_dropout, best_dense_layers, best_dense_layer_units, args.weights)
model.compile(optimizer=Adam(lr=best_lr), loss="binary_crossentropy")
print("Model built")
test_perf = eval_model(model, best_batch_size, Xs["test"], ys["test"])
print("Test perf: {}".format(test_perf))
if args.train == "true":
summary = {
"best_lr": best_lr,
"best_epochs": best_epochs,
"best_dropout": best_dropout,
"best_dense_layers": best_dense_layers,
"best_dense_layer_units": best_dense_layer_units,
"best_batch_size": best_batch_size,
"ensemble_size": args.ensemble_size,
"test_perf": test_perf
}
print("\n".join(map(lambda x: "{}: {}".format(x[0], x[1]), summary.items())), file=open(os.path.join(base_save_dir, "summary.txt"), "w"))
| 41.337963 | 216 | 0.654049 | # coding=utf-8
# manfeat.py: classifier based on manually extracted features.
from __future__ import print_function
import argparse
import os
import cPickle
import itertools
from datetime import datetime
import numpy as np
from scipy.stats import mode
from keras.optimizers import Adam
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from models.shallow import ShallowNet
SPLIT_DIR = "data/perssplit"
SPLITS = ["train", "val", "test"]
PICKLED_LABEL_FILE = "data/labels.pickle"
PERS_FIELD_NAME = "Answer.q7_persuasive"
def eval_pred(y, pred):
acc = accuracy_score(y, pred)
prec = precision_score(y, pred)
rec = recall_score(y, pred)
f1 = f1_score(y, pred)
return {"acc": acc, "prec": prec, "rec": rec, "f1": f1}
def eval_model(model, batch_size, X, y):
pred = model.predict_classes(X=X, batch_size=batch_size, verbose=0)
return eval_pred(y, pred)
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("--feats-file", type=str, required=True)
arg_parser.add_argument("--names-file", type=str, required=True)
arg_parser.add_argument("--sig-feats-file", type=str, default=None)
arg_parser.add_argument("--last-layer-file", type=str, default=None)
arg_parser.add_argument("--save-path", type=str, required=True)
arg_parser.add_argument("--train", type=str, choices=["true", "false"], required=True)
arg_parser.add_argument("--weights", type=str, default=None)
arg_parser.add_argument("--lr", type=float, nargs="+", required=True)
arg_parser.add_argument("--epochs", type=int, nargs="+", required=True)
arg_parser.add_argument("--dropout", type=float, nargs="+", required=True)
arg_parser.add_argument("--dense-layers", type=int, nargs="+", required=True)
arg_parser.add_argument("--dense-layer-units", type=int, nargs="+", required=True)
arg_parser.add_argument("--batch-size", type=int, nargs="+", required=True)
arg_parser.add_argument("--ensemble-size", type=int, required=True)
args = arg_parser.parse_args()
with open(PICKLED_LABEL_FILE, "rb") as lf:
labels_map = cPickle.load(lf)
name_splits = {}
Xs = {}
ys = {}
for split in SPLITS:
with open(os.path.join(SPLIT_DIR, "{}.txt".format(split))) as split_file:
for line in split_file:
name_splits[line.strip()] = split
Xs[split] = []
ys[split] = []
with open(args.names_file) as man_feats_names_file, open(args.feats_file) as feats_file:
for name_line, feat_line in zip(man_feats_names_file, feats_file):
name = name_line.strip()
feats = map(float, feat_line.strip().split(","))
split = name_splits[name]
Xs[split].append(feats)
score = labels_map[name][PERS_FIELD_NAME]
if score >= 5.5:
ys[split].append(1)
else:
ys[split].append(0)
for split in SPLITS:
Xs[split] = np.array(Xs[split])
ys[split] = np.array(ys[split])
if args.sig_feats_file is not None:
print("Selecting significant features")
with open(args.sig_feats_file) as sig_feats_file:
sig_feats = [int(line.strip()) - 1 for line in sig_feats_file]
for split in SPLITS:
Xs[split] = Xs[split][:, sig_feats]
if args.train == "true":
date = str(datetime.now().date())
base_save_dir = os.path.join(args.save_path, date)
os.makedirs(base_save_dir)
final_train_perfs = {}
final_val_perfs = {}
for lr, epochs, dropout, dense_layers, dense_layer_units, batch_size in itertools.product(args.lr, args.epochs, args.dropout, args.dense_layers, args.dense_layer_units, args.batch_size):
params = lr, epochs, dropout, dense_layers, dense_layer_units, batch_size
print("LR: {}, EPOCHS: {}, DROPOUT: {}, DENSE LAYERS: {}, DENSE_LAYER_UNITS: {}, BATCH_SIZE: {}".format(*params))
save_path = os.path.join(base_save_dir, "lr{};epochs{};dropout{};dense_layers{};dense_layer_units{};batch_size{}".format(*params))
os.makedirs(save_path)
train_preds = np.zeros((Xs["train"].shape[0], args.ensemble_size))
val_preds = np.zeros((Xs["val"].shape[0], args.ensemble_size))
print("Building model")
model = ShallowNet(Xs["train"].shape[1], dropout, dense_layers, dense_layer_units, args.weights)
model.compile(optimizer=Adam(lr=lr), loss="binary_crossentropy")
print("Model built")
history = model.fit(
X=Xs["train"],
y=ys["train"],
batch_size=batch_size,
nb_epoch=epochs,
verbose=1,
validation_data=(Xs["val"], ys["val"]),
shuffle=True,
show_accuracy=True,
)
model.layers.pop()
model.compile(optimizer=Adam(lr=lr), loss="binary_crossentropy")
train_pred = model.predict(X=Xs["train"], batch_size=batch_size, verbose=0)
val_pred = model.predict(X=Xs["val"], batch_size=batch_size, verbose=0)
cou = 0
with open(args.last_layer_file, "w") as layer_file:
for vec in train_pred:
layer_file.write(",".join([str(i) for i in vec]))
layer_file.write("\n")
cou += 1
for vec in val_pred:
layer_file.write(",".join([str(i) for i in vec]))
layer_file.write("\n")
cou += 1
print(cou)
with open(args.last_layer_file+".labels", "w") as label_file:
for value in ys["train"]:
label_file.write(str(value))
label_file.write("\n")
for value in ys["val"]:
label_file.write(str(value))
label_file.write("\n")
exit(1)
final_train_pred = mode(train_preds, axis=1).mode
final_val_pred = mode(val_preds, axis=1).mode
final_train_perfs[params] = eval_pred(ys["train"], final_train_pred)
final_val_perfs[params] = eval_pred(ys["val"], final_val_pred)
print("final train perf: acc {}, f1 {}; final val perf: acc {}, f1 {}".format(final_train_perfs[params]["acc"], final_train_perfs[params]["f1"], final_val_perfs[params]["acc"], final_val_perfs[params]["f1"]))
print("\n".join(map(lambda x: "{}: {}".format(x[0], x[1]), final_train_perfs.items())), file=open(os.path.join(base_save_dir, "final_train_perfs.txt"), "w"))
print("\n".join(map(lambda x: "{}: {}".format(x[0], x[1]), final_val_perfs.items())), file=open(os.path.join(base_save_dir, "final_val_perfs.txt"), "w"))
best_params = max(final_val_perfs, key=lambda x: final_val_perfs[x]["f1"])
else:
best_params = (0.0001, 100, 0.5, 5, 5, 100)
best_lr, best_epochs, best_dropout, best_dense_layers, best_dense_layer_units, best_batch_size = best_params
if args.train == "true":
print("Training ensemble on training and validation set")
save_path = os.path.join(base_save_dir, "best_params")
os.makedirs(save_path)
preds = np.zeros((Xs["test"].shape[0], args.ensemble_size))
for i in range(args.ensemble_size):
print("Building model")
model = ShallowNet(Xs["train"].shape[1], best_dropout, best_dense_layers, best_dense_layer_units, args.weights)
model.compile(optimizer=Adam(lr=best_lr), loss="binary_crossentropy")
print("Model built")
history = model.fit(
X=np.concatenate((Xs["train"], Xs["val"])),
y=np.concatenate((ys["train"], ys["val"])),
batch_size=best_batch_size,
nb_epoch=best_epochs,
verbose=1,
shuffle=True,
show_accuracy=True,
)
model.save_weights(os.path.join(save_path, "weights{}.h5".format(i)), overwrite=True)
print("\n".join(map(str, history.history["acc"])), file=open(os.path.join(save_path, "train_accs{}.txt".format(i)), "w"))
print("\n".join(map(str, history.history["loss"])), file=open(os.path.join(save_path, "train_losses{}.txt".format(i)), "w"))
pred = model.predict_classes(X=Xs["test"], batch_size=batch_size, verbose=0)
preds[:, i] = pred[:, 0]
final_pred = mode(preds, axis=1).mode
test_perf = eval_pred(ys["test"], final_pred)
else:
print("Building model")
model = ShallowNet(Xs["train"].shape[1], best_dropout, best_dense_layers, best_dense_layer_units, args.weights)
model.compile(optimizer=Adam(lr=best_lr), loss="binary_crossentropy")
print("Model built")
test_perf = eval_model(model, best_batch_size, Xs["test"], ys["test"])
print("Test perf: {}".format(test_perf))
if args.train == "true":
summary = {
"best_lr": best_lr,
"best_epochs": best_epochs,
"best_dropout": best_dropout,
"best_dense_layers": best_dense_layers,
"best_dense_layer_units": best_dense_layer_units,
"best_batch_size": best_batch_size,
"ensemble_size": args.ensemble_size,
"test_perf": test_perf
}
print("\n".join(map(lambda x: "{}: {}".format(x[0], x[1]), summary.items())), file=open(os.path.join(base_save_dir, "summary.txt"), "w"))
| 312 | 0 | 46 |
0e12a49e1c1ec998720fbbc6e84c374e8a3914e4 | 2,030 | py | Python | Python/Leaderboard/leaderboard.py | MariaMich/Hacktoberfest2019-2 | a1a1756fa4594ab9965405e0361a5125b1d4dd48 | [
"MIT"
] | null | null | null | Python/Leaderboard/leaderboard.py | MariaMich/Hacktoberfest2019-2 | a1a1756fa4594ab9965405e0361a5125b1d4dd48 | [
"MIT"
] | null | null | null | Python/Leaderboard/leaderboard.py | MariaMich/Hacktoberfest2019-2 | a1a1756fa4594ab9965405e0361a5125b1d4dd48 | [
"MIT"
] | null | null | null | import pandas as pd
from datetime import date,datetime
import matplotlib.pyplot as plt
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
class Leaderboard:
'''
To maintain the leaderboard of the models
'''
def plot(self):
'''
Plot model accuracy vs time according to the rank
in the leaderboard.
'''
df = pd.read_excel('op_lbd.xlsx', index_col=0)
df1 = df.loc[df['SOTA'] == False]
df = df.loc[df['SOTA'] == True]
# df = df.drop_duplicates('Rank')
plt.plot(df['Date'], df['Accuracy'], '.-')
plt.plot(df1['Date'], df1['Accuracy'], 'co')
plt.show()
def leaderboardPush(self, model_name, accuracy, n_params):
'''
This will push a new model to the leaderboard.
@param model_name : Name of the model
@param accuracy : Accuracy of the model
@param n_param : Nos of parameter of the model
@returns : None
'''
assert accuracy > 0.0 , "Accuracy can't be negetive"
assert accuracy < 100.0, "Accuracy can't be greater than 100.0%"
assert n_params > 0, "Number of parameters can't be zero or less"
assert type(n_params) == int, "Number of parameters can't be fraction"
items = {
"Model_name": model_name,
"Accuracy" : accuracy,
"Parameters": n_params,
"Date" : date.today(),
"SOTA" : False
}
df = pd.read_excel('lbd.xlsx', index_col=0)
sota = df.Accuracy.max()
if accuracy > sota:
items["SOTA"] = True
df = df.append(items, ignore_index=True)
df.to_excel('lbd.xlsx')
df['Rank'] = df['Accuracy'].rank(ascending=False, method='min')
df = df.sort_values('Rank', ascending=True)
#Remove this after in the PC
print(df)
df.to_excel('op_lbd.xlsx')
return | 33.833333 | 78 | 0.570443 | import pandas as pd
from datetime import date,datetime
import matplotlib.pyplot as plt
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
class Leaderboard:
'''
To maintain the leaderboard of the models
'''
def __init__(self):
pass
def plot(self):
'''
Plot model accuracy vs time according to the rank
in the leaderboard.
'''
df = pd.read_excel('op_lbd.xlsx', index_col=0)
df1 = df.loc[df['SOTA'] == False]
df = df.loc[df['SOTA'] == True]
# df = df.drop_duplicates('Rank')
plt.plot(df['Date'], df['Accuracy'], '.-')
plt.plot(df1['Date'], df1['Accuracy'], 'co')
plt.show()
def leaderboardPush(self, model_name, accuracy, n_params):
'''
This will push a new model to the leaderboard.
@param model_name : Name of the model
@param accuracy : Accuracy of the model
@param n_param : Nos of parameter of the model
@returns : None
'''
assert accuracy > 0.0 , "Accuracy can't be negetive"
assert accuracy < 100.0, "Accuracy can't be greater than 100.0%"
assert n_params > 0, "Number of parameters can't be zero or less"
assert type(n_params) == int, "Number of parameters can't be fraction"
items = {
"Model_name": model_name,
"Accuracy" : accuracy,
"Parameters": n_params,
"Date" : date.today(),
"SOTA" : False
}
df = pd.read_excel('lbd.xlsx', index_col=0)
sota = df.Accuracy.max()
if accuracy > sota:
items["SOTA"] = True
df = df.append(items, ignore_index=True)
df.to_excel('lbd.xlsx')
df['Rank'] = df['Accuracy'].rank(ascending=False, method='min')
df = df.sort_values('Rank', ascending=True)
#Remove this after in the PC
print(df)
df.to_excel('op_lbd.xlsx')
return | 11 | 0 | 26 |
f9feaaa43e1860ee0319a6cd5189e38dd5c151fc | 3,553 | py | Python | Gluon/trainer.py | osmr/utct | 7cf32823a83812d2eadce66d3effea6bee538109 | [
"MIT"
] | null | null | null | Gluon/trainer.py | osmr/utct | 7cf32823a83812d2eadce66d3effea6bee538109 | [
"MIT"
] | null | null | null | Gluon/trainer.py | osmr/utct | 7cf32823a83812d2eadce66d3effea6bee538109 | [
"MIT"
] | null | null | null | import logging
from utct.common.trainer_template import TrainerTemplate
import mxnet as mx
from mxnet import gluon, autograd
class Trainer(TrainerTemplate):
"""
Class, which provides training process under Gluon/MXNet framework.
Parameters:
----------
model : object
instance of Model class with graph of CNN
optimizer : object
instance of Optimizer class with CNN optimizer
data_source : object
instance of DataSource class with training/validation iterators
saver : object
instance of Saver class with information about stored files
ctx : object
instance of MXNet context
"""
def _hyper_train_target_sub(self, **kwargs):
"""
Calling single training procedure for specific hyper parameters from hyper optimizer.
"""
if self.saver.log_filename:
fh = logging.FileHandler(self.saver.log_filename)
self.logger.addHandler(fh)
self.logger.info("Training with parameters: {}".format(kwargs))
train_loader, val_loader = self.data_source()
net = self.model()
net.initialize(
mx.init.Xavier(magnitude=2.24),
ctx=self.ctx)
trainer = self.optimizer(
params=net.collect_params(),
**kwargs)
metric = mx.metric.Accuracy()
loss = gluon.loss.SoftmaxCrossEntropyLoss()
log_interval = 1
for epoch in range(self.num_epoch):
metric.reset()
for i, (data, label) in enumerate(train_loader):
# Copy data to ctx if necessary
data = data.as_in_context(self.ctx)
label = label.as_in_context(self.ctx)
# Start recording computation graph with record() section.
# Recorded graphs can then be differentiated with backward.
with autograd.record():
output = net(data)
L = loss(output, label)
L.backward()
# take a gradient step with batch_size equal to data.shape[0]
trainer.step(data.shape[0])
# update metric at last.
metric.update([label], [output])
if i % log_interval == 0 and i > 0:
name, acc = metric.get()
print('[Epoch %d Batch %d] Training: %s=%f' % (epoch, i, name, acc))
name, acc = metric.get()
print('[Epoch %d] Training: %s=%f' % (epoch, name, acc))
name, val_acc = self._test(
model=net,
val_data=val_loader,
ctx=self.ctx)
print('[Epoch %d] Validation: %s=%f' % (epoch, name, val_acc))
if self.saver.log_filename:
self.logger.removeHandler(fh)
fh.close()
best_value = 0.0
return best_value
@staticmethod
| 30.62931 | 93 | 0.547425 | import logging
from utct.common.trainer_template import TrainerTemplate
import mxnet as mx
from mxnet import gluon, autograd
class Trainer(TrainerTemplate):
"""
Class, which provides training process under Gluon/MXNet framework.
Parameters:
----------
model : object
instance of Model class with graph of CNN
optimizer : object
instance of Optimizer class with CNN optimizer
data_source : object
instance of DataSource class with training/validation iterators
saver : object
instance of Saver class with information about stored files
ctx : object
instance of MXNet context
"""
def __init__(self,
model,
optimizer,
data_source,
saver,
ctx):
super(Trainer, self).__init__(
model,
optimizer,
data_source,
saver)
self.ctx = ctx
def _hyper_train_target_sub(self, **kwargs):
"""
Calling single training procedure for specific hyper parameters from hyper optimizer.
"""
if self.saver.log_filename:
fh = logging.FileHandler(self.saver.log_filename)
self.logger.addHandler(fh)
self.logger.info("Training with parameters: {}".format(kwargs))
train_loader, val_loader = self.data_source()
net = self.model()
net.initialize(
mx.init.Xavier(magnitude=2.24),
ctx=self.ctx)
trainer = self.optimizer(
params=net.collect_params(),
**kwargs)
metric = mx.metric.Accuracy()
loss = gluon.loss.SoftmaxCrossEntropyLoss()
log_interval = 1
for epoch in range(self.num_epoch):
metric.reset()
for i, (data, label) in enumerate(train_loader):
# Copy data to ctx if necessary
data = data.as_in_context(self.ctx)
label = label.as_in_context(self.ctx)
# Start recording computation graph with record() section.
# Recorded graphs can then be differentiated with backward.
with autograd.record():
output = net(data)
L = loss(output, label)
L.backward()
# take a gradient step with batch_size equal to data.shape[0]
trainer.step(data.shape[0])
# update metric at last.
metric.update([label], [output])
if i % log_interval == 0 and i > 0:
name, acc = metric.get()
print('[Epoch %d Batch %d] Training: %s=%f' % (epoch, i, name, acc))
name, acc = metric.get()
print('[Epoch %d] Training: %s=%f' % (epoch, name, acc))
name, val_acc = self._test(
model=net,
val_data=val_loader,
ctx=self.ctx)
print('[Epoch %d] Validation: %s=%f' % (epoch, name, val_acc))
if self.saver.log_filename:
self.logger.removeHandler(fh)
fh.close()
best_value = 0.0
return best_value
@staticmethod
def _test(model,
val_data,
ctx):
metric = mx.metric.Accuracy()
for data, label in val_data:
data = data.as_in_context(ctx)
label = label.as_in_context(ctx)
output = model(data)
metric.update([label], [output])
return metric.get()
| 583 | 0 | 52 |
26dd0fb076f3664e8d6a8cf3141555a24eecba80 | 1,963 | py | Python | crawlster/helpers/log.py | vladcalin/crawlster | 4cabc4e891a4491a9f06b59cab929c62dc501757 | [
"MIT"
] | 1 | 2017-01-13T23:14:31.000Z | 2017-01-13T23:14:31.000Z | crawlster/helpers/log.py | vladcalin/crawlster | 4cabc4e891a4491a9f06b59cab929c62dc501757 | [
"MIT"
] | 5 | 2017-03-19T09:29:23.000Z | 2017-11-07T11:24:22.000Z | crawlster/helpers/log.py | vladcalin/crawlster | 4cabc4e891a4491a9f06b59cab929c62dc501757 | [
"MIT"
] | 1 | 2017-03-19T13:48:16.000Z | 2017-03-19T13:48:16.000Z | import logging
import colorlog
import sys
from crawlster.config import ChoiceOption
from crawlster.helpers.base import BaseHelper
class LoggingHelper(BaseHelper):
"""Logging helper that handles all the crawling logging.
Must provide the following methods:
- initialize() (inherited from BaseHelper)
- ``debug(*args, **kwargs)`` compatible with the :py:mod:`logging`
interface
- same for ``info``, ``warning``, ``error`` and ``critical``
"""
name = 'log'
valid_log_levels = ('debug', 'info', 'warning', 'error', 'critical')
config_options = {
'log.level': ChoiceOption(valid_log_levels, default='info')
}
DEFAULT_FORMAT = "%(log_color)s%(levelname)s - %(name)s - %(message)s"
def initialize(self):
"""Creates and initializes the logger"""
level = self.parse_level(self.config.get('log.level'))
logger = logging.getLogger('crawlster')
stream_handler = self.make_stream_handler(level)
logger.addHandler(stream_handler)
logger.setLevel(level)
self.logger = logger
def make_stream_handler(self, level):
"""Creates a colored stream handler that writes to STDOUT"""
colored_formatter = colorlog.ColoredFormatter(self.DEFAULT_FORMAT)
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setFormatter(colored_formatter)
stream_handler.setLevel(level)
return stream_handler
def __getattr__(self, item):
"""Delegates method calls to the wrapped logger"""
if item not in ('debug', 'info', 'warning', 'error', 'critical'):
raise AttributeError()
return getattr(self.logger, item)
def parse_level(self, level_name):
"""Converts human readable level name to logging constants"""
return getattr(logging, level_name.upper())
| 32.716667 | 74 | 0.665818 | import logging
import colorlog
import sys
from crawlster.config import ChoiceOption
from crawlster.helpers.base import BaseHelper
class LoggingHelper(BaseHelper):
"""Logging helper that handles all the crawling logging.
Must provide the following methods:
- initialize() (inherited from BaseHelper)
- ``debug(*args, **kwargs)`` compatible with the :py:mod:`logging`
interface
- same for ``info``, ``warning``, ``error`` and ``critical``
"""
name = 'log'
valid_log_levels = ('debug', 'info', 'warning', 'error', 'critical')
config_options = {
'log.level': ChoiceOption(valid_log_levels, default='info')
}
DEFAULT_FORMAT = "%(log_color)s%(levelname)s - %(name)s - %(message)s"
def __init__(self):
super(LoggingHelper, self).__init__()
self.logger = None
def initialize(self):
"""Creates and initializes the logger"""
level = self.parse_level(self.config.get('log.level'))
logger = logging.getLogger('crawlster')
stream_handler = self.make_stream_handler(level)
logger.addHandler(stream_handler)
logger.setLevel(level)
self.logger = logger
def make_stream_handler(self, level):
"""Creates a colored stream handler that writes to STDOUT"""
colored_formatter = colorlog.ColoredFormatter(self.DEFAULT_FORMAT)
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setFormatter(colored_formatter)
stream_handler.setLevel(level)
return stream_handler
def __getattr__(self, item):
"""Delegates method calls to the wrapped logger"""
if item not in ('debug', 'info', 'warning', 'error', 'critical'):
raise AttributeError()
return getattr(self.logger, item)
def parse_level(self, level_name):
"""Converts human readable level name to logging constants"""
return getattr(logging, level_name.upper())
| 71 | 0 | 27 |
60877868e31ababe742144d2265b76b74c6b212b | 8,185 | py | Python | core/models/cnn.py | JeremieMelo/ADEPT | f79f518197798735cb684b373e11cdcc8a80d872 | [
"MIT"
] | 5 | 2022-02-26T09:14:47.000Z | 2022-03-20T22:57:06.000Z | core/models/cnn.py | JeremieMelo/ADEPT | f79f518197798735cb684b373e11cdcc8a80d872 | [
"MIT"
] | null | null | null | core/models/cnn.py | JeremieMelo/ADEPT | f79f518197798735cb684b373e11cdcc8a80d872 | [
"MIT"
] | null | null | null | """
Description:
Author: Jiaqi Gu (jqgu@utexas.edu)
Date: 2021-09-28 04:26:34
LastEditors: Jiaqi Gu (jqgu@utexas.edu)
LastEditTime: 2021-09-28 04:33:35
"""
from collections import OrderedDict
from typing import Dict, List, Union
import torch
from core.models.layers.activation import ReLUN
from core.models.layers.super_conv2d import SuperBlockConv2d
from core.models.layers.super_linear import SuperBlockLinear
from torch import Tensor, nn
from torch.types import Device, _size
from .super_model_base import SuperModel_CLASS_BASE
__all__ = ["SuperOCNN", "LinearBlock", "ConvBlock"]
if __name__ == "__main__":
pass
| 31.848249 | 99 | 0.569334 | """
Description:
Author: Jiaqi Gu (jqgu@utexas.edu)
Date: 2021-09-28 04:26:34
LastEditors: Jiaqi Gu (jqgu@utexas.edu)
LastEditTime: 2021-09-28 04:33:35
"""
from collections import OrderedDict
from typing import Dict, List, Union
import torch
from core.models.layers.activation import ReLUN
from core.models.layers.super_conv2d import SuperBlockConv2d
from core.models.layers.super_linear import SuperBlockLinear
from torch import Tensor, nn
from torch.types import Device, _size
from .super_model_base import SuperModel_CLASS_BASE
__all__ = ["SuperOCNN", "LinearBlock", "ConvBlock"]
class LinearBlock(nn.Module):
def __init__(
self,
in_features: int,
out_features: int,
mini_block: int = 4,
bias: bool = False,
w_bit: int = 16,
in_bit: int = 16,
v_max=2.0,
photodetect: bool = True,
device: Device = torch.device("cuda"),
activation: bool = True,
act_thres: float = 6.0,
verbose: bool = False,
) -> None:
super().__init__()
self.linear = SuperBlockLinear(
in_features,
out_features,
mini_block=mini_block,
bias=bias,
w_bit=w_bit,
in_bit=in_bit,
v_max=v_max,
photodetect=photodetect,
device=device,
# verbose=verbose,
)
self.activation = ReLUN(act_thres, inplace=True) if activation else None
def forward(self, x: Tensor) -> Tensor:
x = self.linear(x)
if self.activation is not None:
x = self.activation(x)
return x
class ConvBlock(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int = 3,
mini_block: int = 8,
bias: bool = False,
stride: Union[int, _size] = 1,
padding: Union[int, _size] = 0,
w_bit: int = 16,
in_bit: int = 16,
v_max=2.0,
photodetect: bool = True,
device: Device = torch.device("cuda"),
activation: bool = True,
act_thres: float = 6.0,
bn_affine: bool = False,
verbose: bool = False,
) -> None:
super().__init__()
self.conv = SuperBlockConv2d(
in_channels,
out_channels,
kernel_size,
mini_block=mini_block,
bias=bias,
stride=stride,
padding=padding,
w_bit=w_bit,
in_bit=in_bit,
v_max=v_max,
photodetect=photodetect,
device=device,
# verbose=verbose,
)
self.bn = nn.BatchNorm2d(out_channels, affine=bn_affine, track_running_stats=bn_affine)
self.activation = ReLUN(act_thres, inplace=True) if activation else None
def forward(self, x: Tensor) -> Tensor:
x = self.conv(x)
x = self.bn(x)
if self.activation is not None:
x = self.activation(x)
return x
class SuperOCNN(SuperModel_CLASS_BASE):
_conv_linear = (SuperBlockConv2d, SuperBlockLinear)
def __init__(
self,
img_height: int,
img_width: int,
in_channels: int,
num_classes: int,
kernel_list: List[int] = [16],
kernel_size_list: List[int] = [3],
stride_list: List[int] = [1],
padding_list: List[int] = [1],
hidden_list: List[int] = [],
block_list: List[int] = [4, 4],
pool_out_size=5,
in_bit: int = 32,
w_bit: int = 32,
norm: str = "bn",
act_thres: float = 6,
bias: bool = False,
v_max=2.0,
photodetect: bool = True,
super_layer_name: str = "ps_dc_cr",
super_layer_config: Dict = {},
bn_affine: bool = False,
device: Device = torch.device("cuda"),
verbose: bool = False,
):
super().__init__(
super_layer_name=super_layer_name, super_layer_config=super_layer_config, device=device
)
self.img_height = img_height
self.img_width = img_width
self.in_channels = in_channels
self.kernel_list = kernel_list
self.kernel_size_list = kernel_size_list
self.block_list = block_list
self.hidden_list = hidden_list
self.stride_list = stride_list
self.padding_list = padding_list
self.pool_out_size = pool_out_size
self.num_classes = num_classes
self.norm = None if norm.lower() == "none" else norm
self.act_thres = act_thres
self.w_bit = w_bit
self.in_bit = in_bit
self.v_max = v_max
self.photodetect = photodetect
self.bn_affine = bn_affine
self.bias = bias
self.device = device
self.verbose = verbose
self.build_layers()
self.reset_parameters()
self.build_super_layer(super_layer_name, arch=super_layer_config, device=device)
def build_layers(self) -> None:
self.features = OrderedDict()
for idx, out_channels in enumerate(self.kernel_list, 0):
layer_name = "conv" + str(idx + 1)
in_channels = self.in_channels if (idx == 0) else self.kernel_list[idx - 1]
self.features[layer_name] = ConvBlock(
in_channels,
out_channels,
self.kernel_size_list[idx],
stride=self.stride_list[idx],
padding=self.padding_list[idx],
mini_block=self.block_list[idx],
bias=self.bias,
in_bit=self.in_bit,
w_bit=self.w_bit,
v_max=self.v_max,
photodetect=self.photodetect,
device=self.device,
activation=True,
act_thres=self.act_thres,
bn_affine=self.bn_affine,
verbose=self.verbose,
)
self.features = nn.Sequential(self.features)
if self.pool_out_size > 0:
self.pool2d = nn.AdaptiveAvgPool2d(self.pool_out_size)
feature_size = self.kernel_list[-1] * self.pool_out_size * self.pool_out_size
else:
self.pool2d = None
img_height, img_width = self.img_height, self.img_width
for layer in self.modules():
if isinstance(layer, self._conv):
img_height, img_width = layer.get_output_dim(img_height, img_width)
feature_size = img_height * img_width * self.kernel_list[-1]
self.classifier = OrderedDict()
for idx, hidden_dim in enumerate(self.hidden_list, 0):
layer_name = "fc" + str(idx + 1)
in_features = feature_size if idx == 0 else self.hidden_list[idx - 1]
out_features = hidden_dim
self.classifier[layer_name] = LinearBlock(
in_features,
out_features,
mini_block=self.block_list[idx],
bias=self.bias,
w_bit=self.w_bit,
in_bit=self.in_bit,
v_max=self.v_max,
photodetect=self.photodetect,
device=self.device,
activation=True,
act_thres=self.act_thres,
verbose=self.verbose,
)
layer_name = "fc" + str(len(self.hidden_list) + 1)
self.classifier[layer_name] = LinearBlock(
self.hidden_list[-1] if len(self.hidden_list) > 0 else feature_size,
self.num_classes,
mini_block=self.block_list[-1],
bias=self.bias,
w_bit=self.w_bit,
in_bit=self.in_bit,
v_max=self.v_max,
photodetect=self.photodetect,
device=self.device,
activation=False,
act_thres=self.act_thres,
verbose=self.verbose,
)
self.classifier = nn.Sequential(self.classifier)
def forward(self, x: Tensor) -> Tensor:
x = self.features(x)
if self.pool2d is not None:
x = self.pool2d(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
if __name__ == "__main__":
pass
| 7,213 | 169 | 175 |
6fac5c8d11237bf08e21d4dd51ad71ec17674c71 | 243 | py | Python | services/user_service.py | yezz123/JeffQL | a03e5838b8f655321955664841a952f785210b37 | [
"MIT"
] | 21 | 2021-06-16T11:07:31.000Z | 2022-01-29T22:12:55.000Z | services/user_service.py | yezz123/JeffQL | a03e5838b8f655321955664841a952f785210b37 | [
"MIT"
] | 2 | 2021-08-30T16:27:01.000Z | 2021-10-03T01:07:27.000Z | services/user_service.py | yezz123/JeffQL | a03e5838b8f655321955664841a952f785210b37 | [
"MIT"
] | 6 | 2021-06-16T13:42:37.000Z | 2022-01-29T22:12:56.000Z | import config
from auth import auth_handler
| 20.25 | 57 | 0.716049 | import config
from auth import auth_handler
def user_login(name):
access_token = auth_handler.create_access_token(
user_id=name, expiretime=config.auth_expiretime
)
return {"id": 1, "name": name, "token": access_token}
| 175 | 0 | 23 |
7f8dc6e74806ba791d61c6665b174c104ccd2850 | 1,646 | py | Python | examples/client.py | bravandi/CS505FinalProject | 5b065b13fcb066943efdb7a92bab13fe5d3c5c77 | [
"MIT"
] | null | null | null | examples/client.py | bravandi/CS505FinalProject | 5b065b13fcb066943efdb7a92bab13fe5d3c5c77 | [
"MIT"
] | null | null | null | examples/client.py | bravandi/CS505FinalProject | 5b065b13fcb066943efdb7a92bab13fe5d3c5c77 | [
"MIT"
] | null | null | null | import sys
import locale
from twisted.internet import reactor, task, threads
from twisted.python import log
from kademlia.network import Server, QuorumServer
log.startLogging(sys.stdout)
port = 5485
if(len(sys.argv) > 1):
port = int(sys.argv[1])
server = QuorumServer(ksize=3)
server.listen(port)
server.bootstrap([("127.0.0.1", 8468)]).addCallback(bootstrapDone, server)
get_input()
reactor.run()
| 18.91954 | 74 | 0.584447 | import sys
import locale
from twisted.internet import reactor, task, threads
from twisted.python import log
from kademlia.network import Server, QuorumServer
log.startLogging(sys.stdout)
def get_done(result, server_ley):
print ">>Got key result: ",result
print ">>Got key server_ley: ",server_ley
def set_done(result, server):
print ">>set_done:", result
def get_input():
def command_result(command_st):
commands = command_st.strip().split()
if(len(commands) > 1):
op = commands[0]
key = str(commands[1])
print "####KEY", key
if(len(commands) > 2):
value = commands[2].encode('ascii', 'ignore')
if op == "q":
reactor.stop()
elif op == "set":
print "adding key:", key, "-->", value
server.set(key, value).addCallback(set_done, server)
elif op == "get":
print "getting key:", key
server.get(key).addCallback(get_done, server)
else:
print "command: ",command_st," is wrong format"
get_input()
def get_command():
print ">>enter command:"
return raw_input()
d = threads.deferToThread(get_command)
d.addCallback(command_result)
def bootstrapDone(found, server):
print("INFO: Bootstrap done")
get_input()
return
port = 5485
if(len(sys.argv) > 1):
port = int(sys.argv[1])
server = QuorumServer(ksize=3)
server.listen(port)
server.bootstrap([("127.0.0.1", 8468)]).addCallback(bootstrapDone, server)
get_input()
reactor.run()
| 1,144 | 0 | 92 |
cfa1078ed7712f8f174569dd9051ed5cf1378a69 | 896 | py | Python | paddle/classification_yesno.py | yongbowin/DuReader_annotation | 138f60558f3a4810c0f83d2e8fcac150220bab60 | [
"Apache-2.0"
] | null | null | null | paddle/classification_yesno.py | yongbowin/DuReader_annotation | 138f60558f3a4810c0f83d2e8fcac150220bab60 | [
"Apache-2.0"
] | null | null | null | paddle/classification_yesno.py | yongbowin/DuReader_annotation | 138f60558f3a4810c0f83d2e8fcac150220bab60 | [
"Apache-2.0"
] | null | null | null | import json
"""
Train:
search, length=136208, yes_no_depends=11476
zhidao, length=135366, yes_no_depends=11273
"""
PATH = '/home/wangyongbo/2019rc/DuReader_test/data/preprocessed/trainset'
# search.train.json zhidao.train.json
def count_yesno():
"""
To count the nums of tes/no/depends in dataset
"""
with open(PATH + "/zhidao.train.json", "r", encoding="utf-8") as f:
res_list = f.readlines()
print("Total nums: ", len(res_list))
cou_t = 0
cou = 0
yes_no = []
for i in res_list:
cou_t += 1
ii = json.loads(i)
# print(ii['question_type'])
if 'yesno_answers' in ii:
if ii['yesno_answers']:
cou += 1
yes_no.append(ii['yesno_answers'])
# print(yes_no)
print(len(yes_no))
print("num of yes_no: ", cou)
print("num of train samples: ", cou_t)
| 22.4 | 73 | 0.583705 | import json
"""
Train:
search, length=136208, yes_no_depends=11476
zhidao, length=135366, yes_no_depends=11273
"""
PATH = '/home/wangyongbo/2019rc/DuReader_test/data/preprocessed/trainset'
# search.train.json zhidao.train.json
def count_yesno():
"""
To count the nums of tes/no/depends in dataset
"""
with open(PATH + "/zhidao.train.json", "r", encoding="utf-8") as f:
res_list = f.readlines()
print("Total nums: ", len(res_list))
cou_t = 0
cou = 0
yes_no = []
for i in res_list:
cou_t += 1
ii = json.loads(i)
# print(ii['question_type'])
if 'yesno_answers' in ii:
if ii['yesno_answers']:
cou += 1
yes_no.append(ii['yesno_answers'])
# print(yes_no)
print(len(yes_no))
print("num of yes_no: ", cou)
print("num of train samples: ", cou_t)
| 0 | 0 | 0 |
e64bc20c5d67e16b9a9e2d3ba8bed878f3e801bb | 6,369 | py | Python | nuplan/database/utils/pointclouds/pointcloud.py | MCZhi/nuplan-devkit | 3c4f5b8dcd517b27cfd258915ca5fe5c54e3cb0c | [
"Apache-2.0"
] | null | null | null | nuplan/database/utils/pointclouds/pointcloud.py | MCZhi/nuplan-devkit | 3c4f5b8dcd517b27cfd258915ca5fe5c54e3cb0c | [
"Apache-2.0"
] | null | null | null | nuplan/database/utils/pointclouds/pointcloud.py | MCZhi/nuplan-devkit | 3c4f5b8dcd517b27cfd258915ca5fe5c54e3cb0c | [
"Apache-2.0"
] | null | null | null | from __future__ import annotations
from io import BytesIO
from typing import IO, Any, List, NamedTuple
import numpy as np
import numpy.typing as npt
class PointCloudHeader(NamedTuple):
""" Class for Point Cloud header. """
version: str
fields: List[str]
size: List[int]
type: List[str]
count: List[int] # type: ignore
width: int
height: int
viewpoint: List[int]
points: int
data: str
class PointCloud:
"""
Class for raw .pcd file.
"""
def __init__(self, header: PointCloudHeader, points: npt.NDArray[np.float64]) -> None:
"""
PointCloud.
:param header: Pointcloud header.
:param points: <np.ndarray, X, N>. X columns, N points.
"""
self._header = header
self._points = points
@property
def header(self) -> PointCloudHeader:
"""
Returns pointcloud header.
:return: A PointCloudHeader instance.
"""
return self._header
@property
def points(self) -> npt.NDArray[np.float64]:
"""
Returns points.
:return: <np.ndarray, X, N>. X columns, N points.
"""
return self._points
def save(self, file_path: str) -> None:
"""
Saves to .pcd file.
:param file_path: The path to the .pcd file.
"""
with open(file_path, 'wb') as fp:
fp.write('# .PCD v{} - Point Cloud Data file format\n'.format(self._header.version).encode('utf8'))
for field in self._header._fields:
value = getattr(self._header, field)
if isinstance(value, list):
text = ' '.join(map(str, value))
else:
text = str(value)
fp.write('{} {}\n'.format(field.upper(), text).encode('utf8'))
fp.write(self._points.tobytes())
@classmethod
def parse(cls, pcd_content: bytes) -> PointCloud:
"""
Parses the pointcloud from byte stream.
:param pcd_content: The byte stream that holds the pcd content.
:return: A PointCloud object.
"""
with BytesIO(pcd_content) as stream:
header = cls.parse_header(stream)
points = cls.parse_points(stream, header)
return cls(header, points)
@classmethod
def parse_from_file(cls, pcd_file: str) -> PointCloud:
"""
Parses the pointcloud from .pcd file on disk.
:param pcd_file: The path to the .pcd file.
:return: A PointCloud instance.
"""
with open(pcd_file, 'rb') as stream:
header = cls.parse_header(stream)
points = cls.parse_points(stream, header)
return cls(header, points)
@staticmethod
def parse_header(stream: IO[Any]) -> PointCloudHeader:
"""
Parses the header of a pointcloud from byte IO stream.
:param stream: Binary stream.
:return: A PointCloudHeader instance.
"""
headers_list = []
while True:
line = stream.readline().decode('utf8').strip()
if line.startswith('#'):
continue
columns = line.split()
key = columns[0].lower()
val = columns[1:] if len(columns) > 2 else columns[1]
headers_list.append((key, val))
if key == 'data':
break
headers = dict(headers_list)
headers['size'] = list(map(int, headers['size']))
headers['count'] = list(map(int, headers['count']))
headers['width'] = int(headers['width'])
headers['height'] = int(headers['height'])
headers['viewpoint'] = list(map(int, headers['viewpoint']))
headers['points'] = int(headers['points'])
header = PointCloudHeader(**headers)
if any([c != 1 for c in header.count]):
raise RuntimeError('"count" has to be 1')
if not len(header.fields) == len(header.size) == len(header.type) == len(header.count):
raise RuntimeError('fields/size/type/count field number are inconsistent')
return header
@staticmethod
def parse_points(stream: IO[Any], header: PointCloudHeader) -> npt.NDArray[np.float64]:
"""
Parses points from byte IO stream.
:param stream: Byte stream that holds the points.
:param header: <np.ndarray, X, N>. A numpy array that has X columns(features), N points.
:return: Points of Point Cloud.
"""
if header.data != 'binary':
raise RuntimeError('Un-supported data foramt: {}. "binary" is expected.'.format(header.data))
# There is garbage data at the end of the stream, usually all b'\x00'.
row_type = PointCloud.np_type(header)
length = row_type.itemsize * header.points
buff = stream.read(length)
if len(buff) != length:
raise RuntimeError('Incomplete pointcloud stream: {} bytes expected, {} got'.format(length, len(buff)))
points = np.frombuffer(buff, row_type) # type: ignore
return points # type: ignore
@staticmethod
def np_type(header: PointCloudHeader) -> np.dtype: # type: ignore
"""
Helper function that translate column types in pointcloud to np types.
:param header: A PointCloudHeader object.
:return: np.dtype that holds the X features.
"""
type_mapping = {'I': 'int', 'U': 'uint', 'F': 'float'}
np_types = [type_mapping[t] + str(int(s) * 8) for t, s in zip(header.type, header.size)]
return np.dtype([(f, getattr(np, nt)) for f, nt in zip(header.fields, np_types)])
def to_pcd_bin(self) -> npt.NDArray[np.float32]:
"""
Converts pointcloud to .pcd.bin format.
:return: <np.float32, 5, N>, the point cloud in .pcd.bin format.
"""
lidar_fields = ['x', 'y', 'z', 'intensity', 'ring']
return np.array([np.array(self.points[f], dtype=np.float32) for f in lidar_fields])
def to_pcd_bin2(self) -> npt.NDArray[np.float32]:
"""
Converts pointcloud to .pcd.bin2 format.
:return: <np.float32, 6, N>, the point cloud in .pcd.bin2 format.
"""
lidar_fields = ['x', 'y', 'z', 'intensity', 'ring', 'lidar_info']
return np.array([np.array(self.points[f], dtype=np.float32) for f in lidar_fields])
| 35.187845 | 115 | 0.581096 | from __future__ import annotations
from io import BytesIO
from typing import IO, Any, List, NamedTuple
import numpy as np
import numpy.typing as npt
class PointCloudHeader(NamedTuple):
""" Class for Point Cloud header. """
version: str
fields: List[str]
size: List[int]
type: List[str]
count: List[int] # type: ignore
width: int
height: int
viewpoint: List[int]
points: int
data: str
class PointCloud:
"""
Class for raw .pcd file.
"""
def __init__(self, header: PointCloudHeader, points: npt.NDArray[np.float64]) -> None:
"""
PointCloud.
:param header: Pointcloud header.
:param points: <np.ndarray, X, N>. X columns, N points.
"""
self._header = header
self._points = points
@property
def header(self) -> PointCloudHeader:
"""
Returns pointcloud header.
:return: A PointCloudHeader instance.
"""
return self._header
@property
def points(self) -> npt.NDArray[np.float64]:
"""
Returns points.
:return: <np.ndarray, X, N>. X columns, N points.
"""
return self._points
def save(self, file_path: str) -> None:
"""
Saves to .pcd file.
:param file_path: The path to the .pcd file.
"""
with open(file_path, 'wb') as fp:
fp.write('# .PCD v{} - Point Cloud Data file format\n'.format(self._header.version).encode('utf8'))
for field in self._header._fields:
value = getattr(self._header, field)
if isinstance(value, list):
text = ' '.join(map(str, value))
else:
text = str(value)
fp.write('{} {}\n'.format(field.upper(), text).encode('utf8'))
fp.write(self._points.tobytes())
@classmethod
def parse(cls, pcd_content: bytes) -> PointCloud:
"""
Parses the pointcloud from byte stream.
:param pcd_content: The byte stream that holds the pcd content.
:return: A PointCloud object.
"""
with BytesIO(pcd_content) as stream:
header = cls.parse_header(stream)
points = cls.parse_points(stream, header)
return cls(header, points)
@classmethod
def parse_from_file(cls, pcd_file: str) -> PointCloud:
"""
Parses the pointcloud from .pcd file on disk.
:param pcd_file: The path to the .pcd file.
:return: A PointCloud instance.
"""
with open(pcd_file, 'rb') as stream:
header = cls.parse_header(stream)
points = cls.parse_points(stream, header)
return cls(header, points)
@staticmethod
def parse_header(stream: IO[Any]) -> PointCloudHeader:
"""
Parses the header of a pointcloud from byte IO stream.
:param stream: Binary stream.
:return: A PointCloudHeader instance.
"""
headers_list = []
while True:
line = stream.readline().decode('utf8').strip()
if line.startswith('#'):
continue
columns = line.split()
key = columns[0].lower()
val = columns[1:] if len(columns) > 2 else columns[1]
headers_list.append((key, val))
if key == 'data':
break
headers = dict(headers_list)
headers['size'] = list(map(int, headers['size']))
headers['count'] = list(map(int, headers['count']))
headers['width'] = int(headers['width'])
headers['height'] = int(headers['height'])
headers['viewpoint'] = list(map(int, headers['viewpoint']))
headers['points'] = int(headers['points'])
header = PointCloudHeader(**headers)
if any([c != 1 for c in header.count]):
raise RuntimeError('"count" has to be 1')
if not len(header.fields) == len(header.size) == len(header.type) == len(header.count):
raise RuntimeError('fields/size/type/count field number are inconsistent')
return header
@staticmethod
def parse_points(stream: IO[Any], header: PointCloudHeader) -> npt.NDArray[np.float64]:
"""
Parses points from byte IO stream.
:param stream: Byte stream that holds the points.
:param header: <np.ndarray, X, N>. A numpy array that has X columns(features), N points.
:return: Points of Point Cloud.
"""
if header.data != 'binary':
raise RuntimeError('Un-supported data foramt: {}. "binary" is expected.'.format(header.data))
# There is garbage data at the end of the stream, usually all b'\x00'.
row_type = PointCloud.np_type(header)
length = row_type.itemsize * header.points
buff = stream.read(length)
if len(buff) != length:
raise RuntimeError('Incomplete pointcloud stream: {} bytes expected, {} got'.format(length, len(buff)))
points = np.frombuffer(buff, row_type) # type: ignore
return points # type: ignore
@staticmethod
def np_type(header: PointCloudHeader) -> np.dtype: # type: ignore
"""
Helper function that translate column types in pointcloud to np types.
:param header: A PointCloudHeader object.
:return: np.dtype that holds the X features.
"""
type_mapping = {'I': 'int', 'U': 'uint', 'F': 'float'}
np_types = [type_mapping[t] + str(int(s) * 8) for t, s in zip(header.type, header.size)]
return np.dtype([(f, getattr(np, nt)) for f, nt in zip(header.fields, np_types)])
def to_pcd_bin(self) -> npt.NDArray[np.float32]:
"""
Converts pointcloud to .pcd.bin format.
:return: <np.float32, 5, N>, the point cloud in .pcd.bin format.
"""
lidar_fields = ['x', 'y', 'z', 'intensity', 'ring']
return np.array([np.array(self.points[f], dtype=np.float32) for f in lidar_fields])
def to_pcd_bin2(self) -> npt.NDArray[np.float32]:
"""
Converts pointcloud to .pcd.bin2 format.
:return: <np.float32, 6, N>, the point cloud in .pcd.bin2 format.
"""
lidar_fields = ['x', 'y', 'z', 'intensity', 'ring', 'lidar_info']
return np.array([np.array(self.points[f], dtype=np.float32) for f in lidar_fields])
| 0 | 0 | 0 |
9c093407b2d2f8096204084605761ea48a96bbe0 | 223 | py | Python | week1/wk1.py | kostyahwostochenko/modeling | aab491a749707456ec3689154a42ac0e30e6f57e | [
"Unlicense"
] | null | null | null | week1/wk1.py | kostyahwostochenko/modeling | aab491a749707456ec3689154a42ac0e30e6f57e | [
"Unlicense"
] | null | null | null | week1/wk1.py | kostyahwostochenko/modeling | aab491a749707456ec3689154a42ac0e30e6f57e | [
"Unlicense"
] | null | null | null | import matplotlib.pyplot as plt
from matplotlib import rcParams
import numpy as np
a = []
data = np.loadtxt("text.txt")
plt.grid(True)
plt.hist(data, bins = 100)
plt.show()
| 10.136364 | 32 | 0.569507 | import matplotlib.pyplot as plt
from matplotlib import rcParams
import numpy as np
a = []
data = np.loadtxt("text.txt")
plt.grid(True)
plt.hist(data, bins = 100)
plt.show()
| 0 | 0 | 0 |
d94804c1428eb0f9adf7febad1cfc9bc73071492 | 2,862 | py | Python | KartAI/TrackManager.py | eritzyg/KartAI | 5c49ca29148aea19f80d6635b0799d0d32b9c053 | [
"MIT"
] | 10 | 2017-10-18T21:20:16.000Z | 2020-04-07T17:02:55.000Z | KartAI/TrackManager.py | Garnica1999/Self-Driving-Car-Reinforcement-Learning | f7f5d2e7ce912f708a1c82fb40bb0d8b645ecb8a | [
"MIT"
] | null | null | null | KartAI/TrackManager.py | Garnica1999/Self-Driving-Car-Reinforcement-Learning | f7f5d2e7ce912f708a1c82fb40bb0d8b645ecb8a | [
"MIT"
] | 5 | 2017-10-18T21:12:27.000Z | 2020-04-17T08:44:57.000Z | # KartAI https://github.com/eritzyg/KartAI/
# Copyright (c) 2017 Eritz Yerga Gutierrez and Iker García Ferrero
# MIT License https://github.com/eritzyg/KartAI/blob/master/LICENSE
import Player as player
import Render as render
import StateManager as sm | 31.108696 | 99 | 0.696716 | # KartAI https://github.com/eritzyg/KartAI/
# Copyright (c) 2017 Eritz Yerga Gutierrez and Iker García Ferrero
# MIT License https://github.com/eritzyg/KartAI/blob/master/LICENSE
import Player as player
import Render as render
import StateManager as sm
def init():
global selectedTrack
global nTracks
global tracknames
global track_sprites
global trackmap_sprites
global initial_posx
global initial_posy
global initial_dir
global initial_velocity
global initial_rotation
#### EDIT THIS TO ADD MORE TRACKS ####
selectedTrack = 0
nTracks = 3
tracknames = ["Donut Plains", "Donut Plains Reversed", "Rainbow Road"]
track_sprites = ["Donut_Plains_2.png", "Donut_Plains_2.png", "Rainbow_Road.png"]
trackmap_sprites = ["Donut_Plains_2-map.png", "Donut_Plains_2-map.png", "Rainbow_Road-map.png"]
initial_posx = [920.0, 920.0, 63.0]
initial_posy = [619.0, 619.0, 489.0]
initial_dir = [[0.0, -1.0], [0.0, 1.0], [0.0, -1.0]]
initial_velocity = [1.0, 1.0, 1.0]
initial_rotation = [90.0, 270.0, 90.0]
#######################################
def ldTrack():
global selectedTrack
global nTracks
global track_sprites
global trackmap_sprites
global initial_posx
global initial_posy
global initial_dir
global initial_velocity
global initial_rotation
if (selectedTrack >= nTracks):
selectedTrack = 0
render.track = loadImage(track_sprites[selectedTrack])
render.trackmap = loadImage(trackmap_sprites[selectedTrack])
player.posx = initial_posx[selectedTrack]
player.posy = initial_posy[selectedTrack]
player.dir = initial_dir[selectedTrack]
player.velocity = initial_velocity[selectedTrack]
player.rotation = initial_rotation[selectedTrack]
sm.init()
def initplayer():
global selectedTrack
global nTracks
global initial_posx
global initial_posy
global initial_dir
global initial_velocity
global initial_rotation
if (selectedTrack >= nTracks):
selectedTrack = 0
player.posx = initial_posx[selectedTrack]
player.posy = initial_posy[selectedTrack]
player.dir = initial_dir[selectedTrack]
player.velocity = initial_velocity[selectedTrack]
player.rotation = initial_rotation[selectedTrack]
def initrender():
global selectedTrack
global tracknames
global nTracks
global track_sprites
global trackmap_sprites
if (selectedTrack >= nTracks):
selectedTrack = 0
render.track = loadImage(track_sprites[selectedTrack])
render.trackmap = loadImage(trackmap_sprites[selectedTrack])
print("Loaded track: "+tracknames[selectedTrack])
def setTrack(number):
global selectedTrack
global nTracks
if (number >= nTracks or number < 0):
return
selectedTrack = number | 2,482 | 0 | 127 |
3d949e2c656265410b99db71a3605d909750e2a4 | 607 | py | Python | getsub/constants.py | snhome/GetSubtitles | e83bed8460735a591cfeaef48db1bbd1665d9a71 | [
"MIT"
] | null | null | null | getsub/constants.py | snhome/GetSubtitles | e83bed8460735a591cfeaef48db1bbd1665d9a71 | [
"MIT"
] | null | null | null | getsub/constants.py | snhome/GetSubtitles | e83bed8460735a591cfeaef48db1bbd1665d9a71 | [
"MIT"
] | null | null | null | # coding: utf-8
SUB_FORMATS = [".ass", ".srt", ".ssa", ".sub"]
ARCHIVE_TYPES = [".zip", ".rar", ".7z"]
VIDEO_FORMATS = [
".webm",
".mkv",
".flv",
".vob",
".ogv",
".ogg",
".drc",
".gif",
".gifv",
".mng",
".avi",
".mov",
".qt",
".wmv",
".yuv",
".rm",
".rmvb",
".asf",
".amv",
".mp4",
".m4p",
".m4v",
".mpg",
".mp2",
".mpeg",
".mpe",
".mpv",
".mpg",
".m2v",
".svi",
".3gp",
".3g2",
".mxf",
".roq",
".nsv",
".flv",
".f4v",
".f4p",
".f4a",
".f4b",
]
| 12.645833 | 46 | 0.311367 | # coding: utf-8
SUB_FORMATS = [".ass", ".srt", ".ssa", ".sub"]
ARCHIVE_TYPES = [".zip", ".rar", ".7z"]
VIDEO_FORMATS = [
".webm",
".mkv",
".flv",
".vob",
".ogv",
".ogg",
".drc",
".gif",
".gifv",
".mng",
".avi",
".mov",
".qt",
".wmv",
".yuv",
".rm",
".rmvb",
".asf",
".amv",
".mp4",
".m4p",
".m4v",
".mpg",
".mp2",
".mpeg",
".mpe",
".mpv",
".mpg",
".m2v",
".svi",
".3gp",
".3g2",
".mxf",
".roq",
".nsv",
".flv",
".f4v",
".f4p",
".f4a",
".f4b",
]
| 0 | 0 | 0 |
43cd677498c6f518210f401aadd0c6c81ded8fcf | 6,406 | py | Python | Source Code/medicalDataLoader.py | nowtryz/SubUnet | 881fdcf1a3e95378cd3ecd2f64aef41258fb62d2 | [
"MIT"
] | null | null | null | Source Code/medicalDataLoader.py | nowtryz/SubUnet | 881fdcf1a3e95378cd3ecd2f64aef41258fb62d2 | [
"MIT"
] | null | null | null | Source Code/medicalDataLoader.py | nowtryz/SubUnet | 881fdcf1a3e95378cd3ecd2f64aef41258fb62d2 | [
"MIT"
] | null | null | null | from __future__ import print_function, division
import os
import torch
import pandas as pd
from skimage import io, transform
import numpy as np
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from PIL import Image, ImageOps
from random import random, randint
import pdb
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
class MedicalImageDataset(Dataset):
"""Face Landmarks dataset."""
def __init__(self, mode, root_dir, augment=False, equalize=False, load_on_gpu=False, load_all_dataset=False):
"""
Args:
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.root_dir = root_dir
self.augmentation = augment
self.equalize = equalize
self.load_on_gpu = load_on_gpu and torch.cuda.is_available()
self.load_all_dataset = load_all_dataset and self.load_on_gpu
self.img_paths = make_dataset(root_dir, mode)
self.mode = mode
if self.load_all_dataset:
self.loaded_imgs = []
for index in range(self.__len__()):
img_path, mask_path = self.img_paths[index]
img = transforms.ToTensor()(Image.open(img_path)).cuda()
mask = transforms.ToTensor()(Image.open(mask_path).convert('L')).cuda()
self.loaded_imgs.append((img, mask))
@staticmethod
| 35.005464 | 113 | 0.605058 | from __future__ import print_function, division
import os
import torch
import pandas as pd
from skimage import io, transform
import numpy as np
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from PIL import Image, ImageOps
from random import random, randint
import pdb
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
def make_dataset(root, mode):
assert mode in ['train','val', 'test', 'train_small']
items = []
if mode == 'train':
train_img_path = os.path.join(root, 'train', 'Img')
train_mask_path = os.path.join(root, 'train', 'GT')
images = os.listdir(train_img_path)
labels = os.listdir(train_mask_path)
images.sort()
labels.sort()
for it_im, it_gt in zip(images, labels):
item = (os.path.join(train_img_path, it_im), os.path.join(train_mask_path, it_gt))
items.append(item)
elif mode == 'train_small':
train_img_path = os.path.join(root, 'train_small', 'Img')
train_mask_path = os.path.join(root, 'train_small', 'GT')
images = os.listdir(train_img_path)
labels = os.listdir(train_mask_path)
images.sort()
labels.sort()
for it_im, it_gt in zip(images, labels):
item = (os.path.join(train_img_path, it_im), os.path.join(train_mask_path, it_gt))
items.append(item)
elif mode == 'val':
val_img_path = os.path.join(root, 'val', 'Img')
val_mask_path = os.path.join(root, 'val', 'GT')
images = os.listdir(val_img_path)
labels = os.listdir(val_mask_path)
images.sort()
labels.sort()
for it_im, it_gt in zip(images, labels):
item = (os.path.join(val_img_path, it_im), os.path.join(val_mask_path, it_gt))
items.append(item)
else:
test_img_path = os.path.join(root, 'test', 'Img')
test_mask_path = os.path.join(root, 'test', 'GT')
images = os.listdir(test_img_path)
labels = os.listdir(test_mask_path)
images.sort()
labels.sort()
for it_im, it_gt in zip(images, labels):
item = (os.path.join(test_img_path, it_im), os.path.join(test_mask_path, it_gt))
items.append(item)
return items
class MedicalImageDataset(Dataset):
"""Face Landmarks dataset."""
def __init__(self, mode, root_dir, augment=False, equalize=False, load_on_gpu=False, load_all_dataset=False):
"""
Args:
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.root_dir = root_dir
self.augmentation = augment
self.equalize = equalize
self.load_on_gpu = load_on_gpu and torch.cuda.is_available()
self.load_all_dataset = load_all_dataset and self.load_on_gpu
self.img_paths = make_dataset(root_dir, mode)
self.mode = mode
if self.load_all_dataset:
self.loaded_imgs = []
for index in range(self.__len__()):
img_path, mask_path = self.img_paths[index]
img = transforms.ToTensor()(Image.open(img_path)).cuda()
mask = transforms.ToTensor()(Image.open(mask_path).convert('L')).cuda()
self.loaded_imgs.append((img, mask))
def __len__(self):
return len(self.img_paths)
@staticmethod
def augment(img, mask):
prob = 0.2
img_size = img[0].size()
if random() < prob: # Flip
img = transforms.functional.vflip(img)
mask = transforms.functional.vflip(mask)
if random() < prob: # Mirror
img = transforms.functional.hflip(img)
mask = transforms.functional.hflip(mask)
if random() < prob: # Rotate
angle = random() * 60 - 30
img = transforms.functional.rotate(img, angle=angle)
mask = transforms.functional.rotate(mask, angle=angle)
if random() < prob: # Crop
crop_size = tuple(int((random() + 1) * x / 2) for x in img_size)
params = transforms.RandomCrop.get_params(img, output_size=crop_size)
img = transforms.functional.crop(img, *params)
img = transforms.functional.resize(img, size=img_size)
mask = transforms.functional.crop(mask, *params)
mask = transforms.functional.resize(mask, size=img_size)
if random() < prob: # Pad
pad_size = tuple(int(random() * x) for x in img_size)
img = transforms.Pad(padding=pad_size)(img)
img = transforms.Resize(size=img_size)(img)
mask = transforms.Pad(padding=pad_size)(mask)
mask = transforms.Resize(size=img_size)(mask)
if random() < prob: # Z Axe Shift
scale = random() * 0.25
params = transforms.RandomPerspective.get_params(*img_size, distortion_scale=scale)
img = transforms.functional.perspective(img, *params)
mask = transforms.functional.perspective(mask, *params)
if random() < prob: # Brightness shift
img = transforms.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0)(img)
if random() < prob: # Gaussian Blur
img = transforms.GaussianBlur(kernel_size=5, sigma=(0.1, 2.0))(img)
if random() < prob: # Autocontrast
img = transforms.functional.autocontrast(img)
if random() < prob: # Sharpness
img = transforms.functional.adjust_sharpness(img, sharpness_factor=random()*2)
return img, mask
def __getitem__(self, index):
if self.load_all_dataset:
img, mask = self.loaded_imgs[index]
img_path,_ = self.img_paths[index]
else:
img_path, mask_path = self.img_paths[index]
img = transforms.ToTensor()(Image.open(img_path))
mask = transforms.ToTensor()(Image.open(mask_path).convert('L'))
if self.load_on_gpu:
img = img.cuda()
mask = mask.cuda()
if self.equalize:
img = transforms.functional.equalize(img)
if self.augmentation:
img, mask = self.augment(img, mask)
return [img, mask, img_path]
| 4,796 | 0 | 103 |
2a1aded12e63ca7ff611224297f0938f3fc121fd | 129 | py | Python | listings/api/serializers.py | jeacaveo/takehome-be | 9d8f2bb268706441be95250b8ecb6667c34da440 | [
"MIT"
] | 1 | 2021-10-15T03:33:22.000Z | 2021-10-15T03:33:22.000Z | listings/api/serializers.py | jeacaveo/takehome-be | 9d8f2bb268706441be95250b8ecb6667c34da440 | [
"MIT"
] | 1 | 2021-10-02T16:07:17.000Z | 2021-10-02T16:07:17.000Z | listings/api/serializers.py | jeacaveo/takehome-be | 9d8f2bb268706441be95250b8ecb6667c34da440 | [
"MIT"
] | 8 | 2021-07-13T22:25:44.000Z | 2021-10-21T23:46:15.000Z | from django.contrib.auth.models import User, Group
from rest_framework import serializers
# TODO: Create your serializers here.
| 25.8 | 50 | 0.821705 | from django.contrib.auth.models import User, Group
from rest_framework import serializers
# TODO: Create your serializers here.
| 0 | 0 | 0 |
2b7c787a5e56ce8d6f1e104e7fddb509dc550419 | 4,875 | py | Python | tests/test_create_case_lists.py | Sage-Bionetworks/Genie | ce70861b0d3717cd5b57a393a16b4d6fea9500f3 | [
"MIT"
] | 10 | 2017-08-31T21:32:18.000Z | 2022-03-07T21:37:17.000Z | tests/test_create_case_lists.py | Sage-Bionetworks/Genie | ce70861b0d3717cd5b57a393a16b4d6fea9500f3 | [
"MIT"
] | 216 | 2016-10-24T21:30:12.000Z | 2022-03-31T15:04:37.000Z | tests/test_create_case_lists.py | Sage-Bionetworks/Genie | ce70861b0d3717cd5b57a393a16b4d6fea9500f3 | [
"MIT"
] | 12 | 2016-10-21T13:48:06.000Z | 2020-06-04T19:21:23.000Z | import os
import pytest
from genie import create_case_lists
study_id = "test"
clinical_file_map = {'': ['FOOBAR', 'NEED']}
clinical_file_map['Testing2'] = ['test1']
clinical_file_map['Test1 Now, Please/foo'] = ['wow']
sequenced_case_list_files = create_case_lists.write_case_list_sequenced(
['test1', 'test2'], "./", study_id)
case_list_cna_path = create_case_lists.write_case_list_cna(
['test1', 'test2'], "./", study_id)
case_list_cnaseq_path = create_case_lists.write_case_list_cnaseq(
['test1', 'test2'], "./", study_id)
expected_change_text = (
'cancer_study_identifier: test\n'
'stable_id: test_Test1_Now_Please_foo\n'
'case_list_name: Tumor Type: Test1 Now, Please/foo\n'
'case_list_description: All tumors with cancer type '
'Test1 Now, Please/foo\n'
'case_list_ids: wow')
expected_same_text = (
'cancer_study_identifier: test\n'
'stable_id: test_Testing2\n'
'case_list_name: Tumor Type: Testing2\n'
'case_list_description: All tumors with cancer type Testing2\n'
'case_list_ids: test1')
expected_nocode_text = (
'cancer_study_identifier: test\n'
'stable_id: test_no_oncotree_code\n'
'case_list_name: Tumor Type: NA\n'
'case_list_description: All tumors with cancer type NA\n'
'case_list_ids: FOOBAR\tNEED')
@pytest.fixture(params=[
# tuple with (input, expectedOutput)
('', clinical_file_map[''], expected_nocode_text),
('Testing2', clinical_file_map['Testing2'], expected_same_text),
('Test1 Now, Please/foo',
clinical_file_map['Test1 Now, Please/foo'],
expected_change_text)])
| 33.854167 | 72 | 0.716103 | import os
import pytest
from genie import create_case_lists
study_id = "test"
clinical_file_map = {'': ['FOOBAR', 'NEED']}
clinical_file_map['Testing2'] = ['test1']
clinical_file_map['Test1 Now, Please/foo'] = ['wow']
sequenced_case_list_files = create_case_lists.write_case_list_sequenced(
['test1', 'test2'], "./", study_id)
case_list_cna_path = create_case_lists.write_case_list_cna(
['test1', 'test2'], "./", study_id)
case_list_cnaseq_path = create_case_lists.write_case_list_cnaseq(
['test1', 'test2'], "./", study_id)
def test_filenames_write_case_list_files():
case_list_files = create_case_lists.write_case_list_files(
clinical_file_map, "./", study_id)
required_files = [
"cases_Test1_Now_Please_foo.txt",
"cases_no_oncotree_code.txt",
"cases_Testing2.txt"]
basenames = [
os.path.basename(case_file) for case_file in case_list_files]
assert all([req_file in basenames for req_file in required_files])
expected_change_text = (
'cancer_study_identifier: test\n'
'stable_id: test_Test1_Now_Please_foo\n'
'case_list_name: Tumor Type: Test1 Now, Please/foo\n'
'case_list_description: All tumors with cancer type '
'Test1 Now, Please/foo\n'
'case_list_ids: wow')
expected_same_text = (
'cancer_study_identifier: test\n'
'stable_id: test_Testing2\n'
'case_list_name: Tumor Type: Testing2\n'
'case_list_description: All tumors with cancer type Testing2\n'
'case_list_ids: test1')
expected_nocode_text = (
'cancer_study_identifier: test\n'
'stable_id: test_no_oncotree_code\n'
'case_list_name: Tumor Type: NA\n'
'case_list_description: All tumors with cancer type NA\n'
'case_list_ids: FOOBAR\tNEED')
@pytest.fixture(params=[
# tuple with (input, expectedOutput)
('', clinical_file_map[''], expected_nocode_text),
('Testing2', clinical_file_map['Testing2'], expected_same_text),
('Test1 Now, Please/foo',
clinical_file_map['Test1 Now, Please/foo'],
expected_change_text)])
def oncotree_write_params(request):
return request.param
def test__write_single_oncotree_case_list(oncotree_write_params):
(cancer_type, ids, expected_text) = oncotree_write_params
caselist_path = \
create_case_lists._write_single_oncotree_case_list(
cancer_type, ids, study_id, "./")
with open(caselist_path, 'r') as case_list:
caselist_text = case_list.read()
assert caselist_text == expected_text
os.remove(caselist_path)
def test_filenames_write_case_list_sequenced():
first = os.path.basename(sequenced_case_list_files[0])
assert first == "cases_sequenced.txt"
second = os.path.basename(sequenced_case_list_files[1])
assert second == "cases_all.txt"
def test_sequencetext_write_case_list_sequenced():
expected_text = (
'cancer_study_identifier: test\n'
'stable_id: test_sequenced\n'
'case_list_name: Sequenced Tumors\n'
'case_list_description: All sequenced samples\n'
'case_list_ids: test1\ttest2')
with open(sequenced_case_list_files[0], 'r') as case_list:
caselist_text = case_list.read()
assert caselist_text == expected_text
os.remove(sequenced_case_list_files[0])
def test_all_write_case_list_sequenced():
expected_text = (
'cancer_study_identifier: test\n'
'stable_id: test_all\n'
'case_list_name: All samples\n'
'case_list_description: All samples\n'
'case_list_ids: test1\ttest2')
with open(sequenced_case_list_files[1], 'r') as case_list:
caselist_text = case_list.read()
assert caselist_text == expected_text
os.remove(sequenced_case_list_files[1])
def test_filename_write_case_list_cna():
assert os.path.basename(case_list_cna_path) == "cases_cna.txt"
def test_filename_write_case_list_cnaseq():
assert os.path.basename(case_list_cnaseq_path) == "cases_cnaseq.txt"
def test_cnatext_write_case_list_cna():
expected_text = (
'cancer_study_identifier: test\n'
'stable_id: test_cna\n'
'case_list_name: Samples with CNA\n'
'case_list_description: Samples with CNA\n'
'case_list_ids: test1\ttest2')
with open(case_list_cna_path, 'r') as case_list:
caselist_text = case_list.read()
assert caselist_text == expected_text
os.remove(case_list_cna_path)
def test_cnaseq_write_case_list_cnaseq():
expected_text = (
'cancer_study_identifier: test\n'
'stable_id: test_cnaseq\n'
'case_list_name: Samples with CNA and mutation\n'
'case_list_description: Samples with CNA and mutation\n'
'case_list_ids: test1\ttest2')
with open(case_list_cnaseq_path, 'r') as case_list:
caselist_text = case_list.read()
assert caselist_text == expected_text
os.remove(case_list_cnaseq_path)
| 3,030 | 0 | 229 |
ea472073cf50884329fa9327f9103c958a6e9432 | 1,433 | py | Python | utils/weight_init.py | doronpor/GAN | 374159ea97d09ed5fc3c132133e60182f6600010 | [
"MIT"
] | 1 | 2019-12-04T06:08:13.000Z | 2019-12-04T06:08:13.000Z | utils/weight_init.py | doronpor/GAN | 374159ea97d09ed5fc3c132133e60182f6600010 | [
"MIT"
] | null | null | null | utils/weight_init.py | doronpor/GAN | 374159ea97d09ed5fc3c132133e60182f6600010 | [
"MIT"
] | null | null | null | import torch.nn as nn
from torch.nn.modules.conv import _ConvNd
def basic_weight_init(slop=0, non_linearity='relu', type='kaiming_uniform'):
"""
return a weight_init method for convolution and batchnorm initialization.
convolutional layer are initialized using kaimin_uniform initialization
:param type: type of convolution normalization 'normal', 'kaiming_uniform', 'xavier_normal'
:param slop: slop of the non linearity
:param non_linearity: 'relu' or 'leaky_relu'
:return: weight initialization method
"""
return weight_init
| 43.424242 | 102 | 0.662247 | import torch.nn as nn
from torch.nn.modules.conv import _ConvNd
def basic_weight_init(slop=0, non_linearity='relu', type='kaiming_uniform'):
"""
return a weight_init method for convolution and batchnorm initialization.
convolutional layer are initialized using kaimin_uniform initialization
:param type: type of convolution normalization 'normal', 'kaiming_uniform', 'xavier_normal'
:param slop: slop of the non linearity
:param non_linearity: 'relu' or 'leaky_relu'
:return: weight initialization method
"""
def weight_init(module):
if isinstance(module, _ConvNd):
# both conv and conv transposed inherit from _ConvNd
if type == 'kaiming_uniform':
nn.init.kaiming_uniform_(module.weight.data, a=slop, nonlinearity=non_linearity)
elif type == 'xavier':
nn.init.xavier_normal_(module.weight.data, gain=nn.init.calculate_gain(non_linearity))
elif type == 'noraml':
nn.init.normal_(module.weight.data, 0.0, 0.02)
else:
raise TypeError('the type of convolution normalization is not supported')
if module.bias is not None:
nn.init.zeros_(module.bias.data)
elif isinstance(module, nn.BatchNorm2d):
nn.init.normal_(module.weight.data, 1.0, 0.02)
nn.init.constant_(module.bias.data, 0)
return weight_init
| 840 | 0 | 26 |
b466df893a4e4a3cb1fb1b8e8791cc401f2a509f | 1,261 | py | Python | scripting/urls.py | NicolasKiely/percms | dbfae2406a9ea79c273197d96c5b0e70010ad114 | [
"MIT"
] | null | null | null | scripting/urls.py | NicolasKiely/percms | dbfae2406a9ea79c273197d96c5b0e70010ad114 | [
"MIT"
] | 9 | 2016-09-15T05:12:36.000Z | 2016-10-27T21:38:40.000Z | scripting/urls.py | NicolasKiely/percms | dbfae2406a9ea79c273197d96c5b0e70010ad114 | [
"MIT"
] | null | null | null | from django.conf.urls import url
from . import views, script_views
from . import dashboard
from common.dashboard import dashboard_view_closure
urlpatterns = [
url(r'^dashboard/$', views.dashboard, name='dashboard'),
# Script pages
dashboard.Script_Dashboard.url_view_dashboard(r'script/dashboard/$'),
dashboard.Script_Dashboard.url_view_editor(r'script/editor/(?P<pk>\d+)/\w*$'),
dashboard.Script_Dashboard.url_view_public(r'script/view/(?P<pk>\d+)/\w*$'),
dashboard.Script_Dashboard.url_post_add(r'^script/add/$'),
dashboard.Script_Dashboard.url_post_edit(r'script/edit/$'),
dashboard.Script_Dashboard.url_post_delete(r'^script/delete/$'),
url(r'^script/test_run/$',
dashboard_view_closure(dashboard.Script_Dashboard, script_views.test_run),
name='test_run'
),
# Source pages
url(r'^script/source/commit/$', script_views.commit, name='commit'),
dashboard.Source_Dashboard.url_view_editor(r'^source/editor/(?P<pk>\d+)/'),
dashboard.Source_Dashboard.url_view_public(r'^source/view/(?P<pk>\d+)/'),
dashboard.Source_Dashboard.url_post_edit(r'^source/edit/$'),
dashboard.Source_Dashboard.url_post_delete(r'^source/delete/$')
] + \
dashboard.Log_Dashboard.create_standard_urls()
| 43.482759 | 82 | 0.727994 | from django.conf.urls import url
from . import views, script_views
from . import dashboard
from common.dashboard import dashboard_view_closure
urlpatterns = [
url(r'^dashboard/$', views.dashboard, name='dashboard'),
# Script pages
dashboard.Script_Dashboard.url_view_dashboard(r'script/dashboard/$'),
dashboard.Script_Dashboard.url_view_editor(r'script/editor/(?P<pk>\d+)/\w*$'),
dashboard.Script_Dashboard.url_view_public(r'script/view/(?P<pk>\d+)/\w*$'),
dashboard.Script_Dashboard.url_post_add(r'^script/add/$'),
dashboard.Script_Dashboard.url_post_edit(r'script/edit/$'),
dashboard.Script_Dashboard.url_post_delete(r'^script/delete/$'),
url(r'^script/test_run/$',
dashboard_view_closure(dashboard.Script_Dashboard, script_views.test_run),
name='test_run'
),
# Source pages
url(r'^script/source/commit/$', script_views.commit, name='commit'),
dashboard.Source_Dashboard.url_view_editor(r'^source/editor/(?P<pk>\d+)/'),
dashboard.Source_Dashboard.url_view_public(r'^source/view/(?P<pk>\d+)/'),
dashboard.Source_Dashboard.url_post_edit(r'^source/edit/$'),
dashboard.Source_Dashboard.url_post_delete(r'^source/delete/$')
] + \
dashboard.Log_Dashboard.create_standard_urls()
| 0 | 0 | 0 |
7222c5d03abd02d0230977a7149537014a1cdd54 | 637 | py | Python | my_notes/lesrn_meta_class.py | Zhang-Jinlei/one-python-craftsman | bceee25c8e1b44b54f6cc7a73ee1353aa59299fa | [
"Apache-2.0"
] | null | null | null | my_notes/lesrn_meta_class.py | Zhang-Jinlei/one-python-craftsman | bceee25c8e1b44b54f6cc7a73ee1353aa59299fa | [
"Apache-2.0"
] | null | null | null | my_notes/lesrn_meta_class.py | Zhang-Jinlei/one-python-craftsman | bceee25c8e1b44b54f6cc7a73ee1353aa59299fa | [
"Apache-2.0"
] | null | null | null |
my_object = ObjectCreator()
print(my_object)
print(ObjectCreator)
echo(ObjectCreator)
print(hasattr(ObjectCreator, 'new_attr'))
ObjectCreator.new_attr = 'foo'
print(hasattr(ObjectCreator, 'new_attr'))
print(ObjectCreator.new_attr)
ObjectCreatorMirror = ObjectCreator
print(ObjectCreatorMirror.new_attr)
print(ObjectCreatorMirror())
MyClass = choose_class('foo')
print(MyClass)
print(MyClass())
| 15.536585 | 41 | 0.686028 | class ObjectCreator(object):
pass
my_object = ObjectCreator()
print(my_object)
print(ObjectCreator)
def echo(o):
print(o)
echo(ObjectCreator)
print(hasattr(ObjectCreator, 'new_attr'))
ObjectCreator.new_attr = 'foo'
print(hasattr(ObjectCreator, 'new_attr'))
print(ObjectCreator.new_attr)
ObjectCreatorMirror = ObjectCreator
print(ObjectCreatorMirror.new_attr)
print(ObjectCreatorMirror())
def choose_class(name):
if name == 'foo':
class Foo:
pass
return Foo
else:
class Bar:
pass
return Bar
MyClass = choose_class('foo')
print(MyClass)
print(MyClass())
| 150 | 16 | 68 |
bd6637b77a4c6556d9dc546fd0a4aef9c92b8a41 | 2,579 | py | Python | bot/feed_dao.py | Yunato/extract-feed-bot | aa2b4a5cb135741cef7abd2faa01e56588fc2a4c | [
"MIT"
] | null | null | null | bot/feed_dao.py | Yunato/extract-feed-bot | aa2b4a5cb135741cef7abd2faa01e56588fc2a4c | [
"MIT"
] | null | null | null | bot/feed_dao.py | Yunato/extract-feed-bot | aa2b4a5cb135741cef7abd2faa01e56588fc2a4c | [
"MIT"
] | null | null | null | import psycopg2
import pytz
from datetime import datetime
from bot.dao import Dao
from bot.feed import Feed
| 42.983333 | 175 | 0.564172 | import psycopg2
import pytz
from datetime import datetime
from bot.dao import Dao
from bot.feed import Feed
class FeedDao(Dao):
# TABLE_INFO = {"name": "feed", "param1": "title", "param2": "link", "param3": "source", "param4": "time", "param5": "summary", "param6": "category"}
TABLE_INFO = {"name": "feed", "param1": "title", "param2": "link", "param3": "source", "param4": "time", "param5": "summary"}
def __init__(self):
super().__init__(FeedDao.TABLE_INFO)
print(self._con)
def get_count(self):
return super()._get_count(FeedDao.TABLE_INFO["name"])
def add_feed(self, feed):
keys = list(FeedDao.TABLE_INFO.keys())
param = FeedDao.TABLE_INFO[keys[1]]
for index in range(len(keys) - 2):
param += (", " + FeedDao.TABLE_INFO[keys[index + 2]])
with self._con.cursor() as cur:
# cur.execute(f"INSERT INTO {FeedDao.TABLE_INFO['name']} ({param}) VALUES (%s, %s, %s, %s, %s, %s);",
# (feed.title, feed.link, feed.source, feed.time, feed.summary, feed.category))
cur.execute(f"INSERT INTO {FeedDao.TABLE_INFO['name']} ({param}) VALUES (%s, %s, %s, %s, %s);",
(feed.title, feed.link, feed.source, feed.time, feed.summary))
def get_feeds(self):
with self._con.cursor() as cur:
cur.execute(f"SELECT * FROM {FeedDao.TABLE_INFO['name']};")
feeds = cur.fetchall()
rtn = []
for feed in feeds:
title = feed[1]
link = feed[2]
source = feed[3]
time = datetime.strptime(feed[4], '%Y/%m/%d %H:%M:%S')
summary = feed[5]
# category = feed[6]
# rtn.append(Feed(title, link, source, time, summary, category))
rtn.append(Feed(title, link, source, time, summary))
return rtn
def delete_all(self):
with self._con.cursor() as cur:
cur.execute(f"DELETE FROM {FeedDao.TABLE_INFO['name']}")
def get_latest_time(self, src):
with self._con.cursor() as cur:
cur.execute(f"SELECT {FeedDao.TABLE_INFO['param4']} FROM {FeedDao.TABLE_INFO['name']} WHERE {FeedDao.TABLE_INFO['param3']} = %s ORDER BY id DESC LIMIT 1;", (src,))
times = cur.fetchall()
if(len(times) != 0):
time = datetime.strptime(times[0][0], '%Y/%m/%d %H:%M:%S').replace(tzinfo=pytz.timezone("Asia/Tokyo"))
else:
time = datetime.min.replace(tzinfo=pytz.timezone("Asia/Tokyo"))
return time
| 1,992 | 455 | 23 |
8160911908d965c261f88718994cc4e3308e0ae3 | 401 | py | Python | scripts/jsonWorker.py | Ivanco21/Waves-assets-analytics | 9a27684d5e0a5037dee74f11559c8f72459b8903 | [
"MIT"
] | null | null | null | scripts/jsonWorker.py | Ivanco21/Waves-assets-analytics | 9a27684d5e0a5037dee74f11559c8f72459b8903 | [
"MIT"
] | null | null | null | scripts/jsonWorker.py | Ivanco21/Waves-assets-analytics | 9a27684d5e0a5037dee74f11559c8f72459b8903 | [
"MIT"
] | 1 | 2019-08-25T04:10:46.000Z | 2019-08-25T04:10:46.000Z | # -*- coding: utf-8 -*-
import json
| 20.05 | 58 | 0.548628 | # -*- coding: utf-8 -*-
import json
def jsonRead (jsonPth):
try:
with open(jsonPth, 'r', encoding = 'utf-8') as fh:
information = json.load(fh)
return information
except (IOError, Exception) as e:
print(e)
def getOneTag (json,tag):
allTags= []
for n in json:
oneTag = n[tag]
allTags.append(oneTag)
return allTags
| 310 | 0 | 54 |
f166c2abfa12c500499dfc7be1d1a9ba14d228d0 | 1,935 | py | Python | fileMaster.py | bison--/diskWriter | 14d525874f09892f3b633d51fc337e6be2c52c25 | [
"MIT"
] | 1 | 2019-02-15T09:55:51.000Z | 2019-02-15T09:55:51.000Z | fileMaster.py | bison--/diskWriter | 14d525874f09892f3b633d51fc337e6be2c52c25 | [
"MIT"
] | null | null | null | fileMaster.py | bison--/diskWriter | 14d525874f09892f3b633d51fc337e6be2c52c25 | [
"MIT"
] | null | null | null | import os
__author__ = 'bison'
| 35.833333 | 118 | 0.634625 | import os
__author__ = 'bison'
class FileMaster:
def __init__(self, file_path=''):
self.is_closed = False
self.file_path = file_path
self.file_handle = open(file_path, 'wb+')
self.chunk_size = 4096
self.file_size = 0
def write_bytes(self, bytes_to_write, byte_value=0):
if bytes_to_write > self.file_size:
raise ValueError('Data to write is bigger than file size {0}/{1}'.format(bytes_to_write, self.file_size))
if self.is_closed:
raise IOError('File has been closed!')
written_bytes = 0
fixed_byte_array = [byte_value] * self.chunk_size
while written_bytes < bytes_to_write:
bytes_written = 0
byte_array_to_write = fixed_byte_array
theoretical_new_size = written_bytes + self.chunk_size
if theoretical_new_size > bytes_to_write:
bytes_written = bytes_to_write - written_bytes
byte_array_to_write = [byte_value] * bytes_to_write
else:
bytes_written = self.chunk_size
self.file_handle.write(bytearray(byte_array_to_write))
written_bytes += bytes_written
self.file_handle.flush()
return written_bytes
def create_size(self, file_size, byte_value=0, enforce=False):
if file_size > self.file_size or enforce:
self.file_size = file_size
self.write_bytes(file_size, byte_value)
def write_to_start(self, bytes_to_write, byte_value=0):
self.file_handle.seek(0)
self.write_bytes(bytes_to_write, byte_value)
def write_to_end(self, bytes_to_write, byte_value=0):
self.file_handle.seek(self.file_size - bytes_to_write)
self.write_bytes(bytes_to_write, byte_value)
def close(self):
self.is_closed = True
self.file_handle.close()
| 1,705 | -4 | 199 |
ba97b568e444a0d717c876d3df8c7bd68e600c8a | 300 | py | Python | pyday_alarms/models.py | 6desislava6/PyDay | e43d05ac83297a16c12fef6b55c00df2571e3e81 | [
"MIT"
] | 2 | 2016-07-09T08:23:51.000Z | 2016-07-14T14:50:42.000Z | pyday_alarms/models.py | 6desislava6/PyDay | e43d05ac83297a16c12fef6b55c00df2571e3e81 | [
"MIT"
] | null | null | null | pyday_alarms/models.py | 6desislava6/PyDay | e43d05ac83297a16c12fef6b55c00df2571e3e81 | [
"MIT"
] | null | null | null | from django.db import models
# Create your models here.
| 25 | 68 | 0.69 | from django.db import models
class Alarm(models.Model):
user = models.ForeignKey(
'pyday_social_network.PyDayUser',
on_delete=models.CASCADE,
)
date = models.DateTimeField()
message = models.TextField(blank=True, default="PyDay :) PyDay")
# Create your models here.
| 0 | 220 | 23 |
8c2e763e6c0dfbd895715e1a102ef9c84c739a12 | 1,438 | py | Python | lambdas/CreateTicketTemplate/index.py | DocFlowIFE/Deployment | 6dd868571f25b1e04db9b46acec80b0a9889311c | [
"MIT"
] | null | null | null | lambdas/CreateTicketTemplate/index.py | DocFlowIFE/Deployment | 6dd868571f25b1e04db9b46acec80b0a9889311c | [
"MIT"
] | null | null | null | lambdas/CreateTicketTemplate/index.py | DocFlowIFE/Deployment | 6dd868571f25b1e04db9b46acec80b0a9889311c | [
"MIT"
] | null | null | null | import json
import logging
import os
import boto3
import database
from database import Ticket, TicketTemplate, User
logger = logging.getLogger(__name__)
BUCKET_NAME = os.environ['BUCKET_NAME']
| 31.26087 | 114 | 0.596662 | import json
import logging
import os
import boto3
import database
from database import Ticket, TicketTemplate, User
logger = logging.getLogger(__name__)
BUCKET_NAME = os.environ['BUCKET_NAME']
def handler(event, context):
parameters = json.loads(event['body'])
dbs = database.initialize_database()
users = []
for username in parameters['users']:
queried_user = dbs.query(User).filter(User.email == username).first()
if not queried_user:
return 400
users.append(queried_user)
ticket_template = TicketTemplate(users=users, filename=parameters['filename'], title=parameters['title'],
description=parameters['description'])
dbs.add(ticket_template)
dbs.commit()
s3_client = boto3.client('s3')
file_link = s3_client.generate_presigned_post(BUCKET_NAME,
f'{ticket_template.ticket_template_id}/{parameters["filename"]}',
ExpiresIn=3600)
print(file_link)
return {
"statusCode": 200,
'headers': {
'Access-Control-Allow-Headers': '*',
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': '*'
},
"body": json.dumps({
"ticketTemplateId": ticket_template.ticket_template_id,
"fileUploadLink": file_link,
}),
}
| 1,218 | 0 | 23 |
9bd776f3e576c85cadabfba07291b9d73ad42aa5 | 5,726 | py | Python | migrations/versions/c0732331a3c8_.py | CSCfi/pebbles | 24b32e8fc538cc8095fda62c892a8221346c2bce | [
"MIT"
] | 4 | 2017-05-11T14:50:32.000Z | 2020-01-10T09:02:27.000Z | migrations/versions/c0732331a3c8_.py | CSCfi/pebbles | 24b32e8fc538cc8095fda62c892a8221346c2bce | [
"MIT"
] | 145 | 2017-04-07T11:01:58.000Z | 2019-12-11T15:30:23.000Z | migrations/versions/c0732331a3c8_.py | CSCfi/pebbles | 24b32e8fc538cc8095fda62c892a8221346c2bce | [
"MIT"
] | 3 | 2017-10-25T12:36:16.000Z | 2018-04-26T08:49:34.000Z | """Initial commit.
May be necessary to ghost this in production or do a manual migrate.
SQLite seems to be stupid and require an explicit name for each constraint
and it just won't take an empty name like all real databases.
Revision ID: c0732331a3c8
Revises: None
Create Date: 2016-09-28 15:34:30.135146
"""
# revision identifiers, used by Alembic.
revision = 'c0732331a3c8'
down_revision = None
from alembic import op
import sqlalchemy as sa
| 45.444444 | 116 | 0.685994 | """Initial commit.
May be necessary to ghost this in production or do a manual migrate.
SQLite seems to be stupid and require an explicit name for each constraint
and it just won't take an empty name like all real databases.
Revision ID: c0732331a3c8
Revises: None
Create Date: 2016-09-28 15:34:30.135146
"""
# revision identifiers, used by Alembic.
revision = 'c0732331a3c8'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('locks',
sa.Column('lock_id', sa.String(length=64), nullable=False),
sa.Column('acquired_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('lock_id', name=op.f('pk_locks')),
sa.UniqueConstraint('lock_id', name=op.f('uq_locks_lock_id'))
)
op.create_table('notifications',
sa.Column('id', sa.String(length=32), nullable=False),
sa.Column('broadcasted', sa.DateTime(), nullable=True),
sa.Column('subject', sa.String(length=255), nullable=True),
sa.Column('message', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id', name=op.f('pk_notifications'))
)
op.create_table('plugins',
sa.Column('id', sa.String(length=32), nullable=False),
sa.Column('name', sa.String(length=32), nullable=True),
sa.Column('schema', sa.Text(), nullable=True),
sa.Column('form', sa.Text(), nullable=True),
sa.Column('model', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id', name=op.f('pk_plugins'))
)
op.create_table('users',
sa.Column('id', sa.String(length=32), nullable=False),
sa.Column('email', sa.String(length=128), nullable=True),
sa.Column('password', sa.String(length=100), nullable=True),
sa.Column('is_admin', sa.Boolean(), nullable=True),
sa.Column('is_active', sa.Boolean(), nullable=True),
sa.Column('is_deleted', sa.Boolean(), nullable=True),
sa.Column('is_blocked', sa.Boolean(), nullable=True),
sa.Column('credits_quota', sa.Float(), nullable=True),
sa.Column('latest_seen_notification_ts', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id', name=op.f('pk_users')),
sa.UniqueConstraint('email', name=op.f('uq_users_email'))
)
op.create_table('variables',
sa.Column('id', sa.String(length=32), nullable=False),
sa.Column('key', sa.String(length=512), nullable=True),
sa.Column('value', sa.String(length=512), nullable=True),
sa.Column('readonly', sa.Boolean(), nullable=True),
sa.Column('t', sa.String(length=16), nullable=True),
sa.PrimaryKeyConstraint('id', name=op.f('pk_variables')),
sa.UniqueConstraint('key', name=op.f('uq_variables_key'))
)
op.create_table('activation_tokens',
sa.Column('token', sa.String(length=32), nullable=False),
sa.Column('user_id', sa.String(length=32), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], name=op.f('fk_activation_tokens_user_id_users')),
sa.PrimaryKeyConstraint('token', name=op.f('pk_activation_tokens'))
)
op.create_table('blueprints',
sa.Column('id', sa.String(length=32), nullable=False),
sa.Column('name', sa.String(length=128), nullable=True),
sa.Column('config', sa.Text(), nullable=True),
sa.Column('is_enabled', sa.Boolean(), nullable=True),
sa.Column('plugin', sa.String(length=32), nullable=True),
sa.Column('maximum_lifetime', sa.Integer(), nullable=True),
sa.Column('preallocated_credits', sa.Boolean(), nullable=True),
sa.Column('cost_multiplier', sa.Float(), nullable=True),
sa.ForeignKeyConstraint(['plugin'], ['plugins.id'], name=op.f('fk_blueprints_plugin_plugins')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_blueprints'))
)
op.create_table('keypairs',
sa.Column('id', sa.String(length=32), nullable=False),
sa.Column('user_id', sa.String(length=32), nullable=True),
sa.Column('_public_key', sa.String(length=450), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], name=op.f('fk_keypairs_user_id_users')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_keypairs'))
)
op.create_table('instances',
sa.Column('id', sa.String(length=32), nullable=False),
sa.Column('user_id', sa.String(length=32), nullable=True),
sa.Column('blueprint_id', sa.String(length=32), nullable=True),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('public_ip', sa.String(length=64), nullable=True),
sa.Column('client_ip', sa.String(length=64), nullable=True),
sa.Column('provisioned_at', sa.DateTime(), nullable=True),
sa.Column('deprovisioned_at', sa.DateTime(), nullable=True),
sa.Column('errored', sa.Boolean(), nullable=True),
sa.Column('state', sa.String(length=32), nullable=True),
sa.Column('to_be_deleted', sa.Boolean(), nullable=True),
sa.Column('error_msg', sa.String(length=256), nullable=True),
sa.Column('instance_data', sa.Text(), nullable=True),
sa.ForeignKeyConstraint(['blueprint_id'], ['blueprints.id'], name=op.f('fk_instances_blueprint_id_blueprints')),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], name=op.f('fk_instances_user_id_users')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_instances')),
sa.UniqueConstraint('name', name=op.f('uq_instances_name'))
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('instances')
op.drop_table('keypairs')
op.drop_table('blueprints')
op.drop_table('activation_tokens')
op.drop_table('variables')
op.drop_table('users')
op.drop_table('plugins')
op.drop_table('notifications')
op.drop_table('locks')
### end Alembic commands ###
| 5,228 | 0 | 46 |
0d976a29158daf7be606b3d2d4de2d18162f369a | 11,624 | py | Python | seq2seq/bidirectional_dynamic_rnn_LSTM/seq2seq_bidirectional_dynamic_rnn_LSTM_real.py | zlpmichelle/crackingtensorflow | 66c3517b60c3793ef06f904e5d58e4d044628182 | [
"Apache-2.0"
] | 3 | 2017-10-19T23:41:26.000Z | 2019-10-22T08:59:35.000Z | seq2seq/bidirectional_dynamic_rnn_LSTM/seq2seq_bidirectional_dynamic_rnn_LSTM_real.py | zlpmichelle/crackingtensorflow | 66c3517b60c3793ef06f904e5d58e4d044628182 | [
"Apache-2.0"
] | null | null | null | seq2seq/bidirectional_dynamic_rnn_LSTM/seq2seq_bidirectional_dynamic_rnn_LSTM_real.py | zlpmichelle/crackingtensorflow | 66c3517b60c3793ef06f904e5d58e4d044628182 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import tensorflow as tf
import helpers
tf.reset_default_graph()
sess = tf.InteractiveSession()
#Vocabulary size.
PAD = 0
EOS = 1
vocab_size = 10
input_embedding_size = 20 #character length
encoder_hidden_units = 20 #num neurons
decoder_hidden_units = encoder_hidden_units * 2
#in original paper, they used same number of neurons for both encoder and decoder, but we use twice
#as many so decoded output is different, the target value is the original input in this example
#input placeholders
encoder_inputs = tf.placeholder(shape=(None, None), dtype=tf.int32, name='encoder_inputs')
encoder_inputs_length = tf.placeholder(shape=(None,), dtype=tf.int32, name='encoder_inputs_length')
decoder_targets = tf.placeholder(shape=(None, None), dtype=tf.int32, name='decoder_targets')
#randomly initialized embedding matrix that can fit input sequence
#used to convert sequences to vectors (embeddings) for both encoder and decoder of the right size
#reshaping is a thing, in TF you gotta make sure you tensors are the right shape (num dimensions)
embeddings = tf.Variable(tf.random_uniform([vocab_size, input_embedding_size], -1.0, 1.0), dtype=tf.float32)
encoder_inputs_embedded = tf.nn.embedding_lookup(embeddings, encoder_inputs)
#####API LOCATION ERROR HERE
from tensorflow.python.ops.rnn_cell import LSTMCell, LSTMStateTuple
#from tensorflow.contrib.rnn.python.ops.core_rnn_cell_impl import LSTMCell, LSTMStateTuple
encoder_cell_fw = LSTMCell(encoder_hidden_units)
encoder_cell_bw = LSTMCell(encoder_hidden_units)
((encoder_fw_outputs,encoder_bw_outputs),(encoder_fw_final_state,encoder_bw_final_state)) = (
tf.nn.bidirectional_dynamic_rnn(cell_fw=encoder_cell_fw,
cell_bw=encoder_cell_bw,
inputs=encoder_inputs_embedded,
sequence_length=encoder_inputs_length,
dtype=tf.float32, time_major=True)
)
#Concatenates tensors along one dimension.
##############print("? = ",encoder_bw_outputs)
encoder_outputs = tf.concat((encoder_fw_outputs, encoder_bw_outputs), 2)
#letters h and c are commonly used to denote "output value" and "cell state".
#http://colah.github.io/posts/2015-08-Understanding-LSTMs/
#Those tensors represent combined internal state of the cell, and should be passed together.
encoder_final_state_c = tf.concat(
(encoder_fw_final_state.c, encoder_bw_final_state.c), 1)
encoder_final_state_h = tf.concat(
(encoder_fw_final_state.h, encoder_bw_final_state.h), 1)
#TF Tuple used by LSTM Cells for state_size, zero_state, and output state.
encoder_final_state = LSTMStateTuple(
c=encoder_final_state_c,
h=encoder_final_state_h
)
decoder_cell = LSTMCell(decoder_hidden_units)
encoder_max_time, batch_size = tf.unstack(tf.shape(encoder_inputs))
decoder_lengths = encoder_inputs_length + 3
# +2 additional steps, +1 leading <EOS> token for decoder inputs
#manually specifying since we are going to implement attention details for the decoder in a sec
#weights
W = tf.Variable(tf.random_uniform([decoder_hidden_units, vocab_size], -1, 1), dtype=tf.float32)
#bias
b = tf.Variable(tf.zeros([vocab_size]), dtype=tf.float32)
#create padded inputs for the decoder from the word embeddings
#were telling the program to tests a condition, and trigger an error if the condition is false.
assert EOS == 1 and PAD == 0
eos_time_slice = tf.ones([batch_size], dtype=tf.int32, name='EOS')
pad_time_slice = tf.zeros([batch_size], dtype=tf.int32, name='PAD')
#retrieves rows of the params tensor. The behavior is similar to using indexing with arrays in numpy
eos_step_embedded = tf.nn.embedding_lookup(embeddings, eos_time_slice)
pad_step_embedded = tf.nn.embedding_lookup(embeddings, pad_time_slice)
#manually specifying loop function through time - to get initial cell state and input to RNN
#normally we'd just use dynamic_rnn, but lets get detailed here with raw_rnn
#we define and return these values, no operations occur here
#attention mechanism --choose which previously generated token to pass as input in the next timestep
#Creates an RNN specified by RNNCell cell and loop function loop_fn.
#This function is a more primitive version of dynamic_rnn that provides more direct access to the
#inputs each iteration. It also provides more control over when to start and finish reading the sequence,
#and what to emit for the output.
#ta = tensor array
decoder_outputs_ta, decoder_final_state, _ = tf.nn.raw_rnn(decoder_cell, loop_fn)
decoder_outputs = decoder_outputs_ta.stack()
decoder_outputs
#to convert output to human readable prediction
#we will reshape output tensor
#Unpacks the given dimension of a rank-R tensor into rank-(R-1) tensors.
#reduces dimensionality
decoder_max_steps, decoder_batch_size, decoder_dim = tf.unstack(tf.shape(decoder_outputs))
#flettened output tensor
decoder_outputs_flat = tf.reshape(decoder_outputs, (-1, decoder_dim))
#pass flattened tensor through decoder
decoder_logits_flat = tf.add(tf.matmul(decoder_outputs_flat, W), b)
#prediction vals
decoder_logits = tf.reshape(decoder_logits_flat, (decoder_max_steps, decoder_batch_size, vocab_size))
#final prediction
decoder_prediction = tf.argmax(decoder_logits, 2)
#cross entropy loss
#one hot encode the target values so we don't rank just differentiate
stepwise_cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
labels=tf.one_hot(decoder_targets, depth=vocab_size, dtype=tf.float32),
logits=decoder_logits,
)
#loss function
loss = tf.reduce_mean(stepwise_cross_entropy)
#train it
train_op = tf.train.AdamOptimizer().minimize(loss)
sess.run(tf.global_variables_initializer())
batch_size = 2
# batches = helpers.random_sequences(length_from=3, length_to=8, vocab_lower=2, vocab_upper=10, batch_size=batch_size)
train_features_batches = helpers.loadDataFile("../MNIST_data/train_features.txt")
train_labels_batches = helpers.loadDataFile("../MNIST_data/train_labels.txt")
print("train_features_batches:", train_features_batches)
print('head of the train_features_batches:')
for seq in next(train_features_batches.__iter__())[:10]:
print(seq)
print("train_labels_batches:", train_features_batches)
print('head of the train_labels_batches:')
for seq in next(train_labels_batches.__iter__())[:10]:
print(seq)
#############
loss_track = []
max_batches = 30
batches_in_epoch = 10
import time
try:
start = time.time()
config = tf.ConfigProto()
config.gpu_options.allocator_type = 'BFC'
config.gpu_options.per_process_gpu_memory_fraction = 0.90
linesofar = 0
for batch in range(max_batches):
train_features_batches = helpers.next_batch_k("../MNIST_data/features.txt", batch_size, linesofar)
train_labels_batches = helpers.next_batch_k("../MNIST_data/labels.txt", batch_size, linesofar)
fd = next_feed(train_features_batches, train_labels_batches)
_, l = sess.run([train_op, loss], fd)
loss_track.append(l)
linesofar += batch_size
if batch == 0 or batch % batches_in_epoch == 0:
print('batch {}'.format(batch))
if(batch != 0):
print("Time used: ", time.time() - start)
print(' minibatch loss: {}'.format(sess.run(loss, fd)))
predict_ = sess.run(decoder_prediction, fd)
for i, (inp, pred) in enumerate(zip(fd[encoder_inputs].T, predict_.T)):
print(' sample {}:'.format(i + 1))
print(' input > {}'.format(inp))
print(' predicted > {}'.format(pred))
if i >= 2:
break
print()
start = time.time()
print("Optimization Finished!")
# start prediction
start = time.time()
test_features_batches = helpers.loadDataFile("../MNIST_data/test_features.txt")
test_labels_batches = helpers.loadDataFile("../MNIST_data/test_labels.txt")
test_fd = next_feed(test_features_batches, test_labels_batches)
print("Time used: ", time.time() - start)
print(' tests batch loss: {}'.format(sess.run(loss, test_fd)))
test_predict_ = sess.run(decoder_prediction, test_fd)
for i, (inp, pred) in enumerate(zip(test_fd[encoder_inputs].T, test_predict_.T)):
print(' sample {}:'.format(i + 1))
print(' input > {}'.format(inp))
print(' predicted > {}'.format(pred))
if i >= 2:
break
print()
print("Prediction Finished!")
except KeyboardInterrupt:
print('training interrupted')
| 40.785965 | 118 | 0.721696 | import numpy as np
import tensorflow as tf
import helpers
tf.reset_default_graph()
sess = tf.InteractiveSession()
#Vocabulary size.
PAD = 0
EOS = 1
vocab_size = 10
input_embedding_size = 20 #character length
encoder_hidden_units = 20 #num neurons
decoder_hidden_units = encoder_hidden_units * 2
#in original paper, they used same number of neurons for both encoder and decoder, but we use twice
#as many so decoded output is different, the target value is the original input in this example
#input placeholders
encoder_inputs = tf.placeholder(shape=(None, None), dtype=tf.int32, name='encoder_inputs')
encoder_inputs_length = tf.placeholder(shape=(None,), dtype=tf.int32, name='encoder_inputs_length')
decoder_targets = tf.placeholder(shape=(None, None), dtype=tf.int32, name='decoder_targets')
#randomly initialized embedding matrix that can fit input sequence
#used to convert sequences to vectors (embeddings) for both encoder and decoder of the right size
#reshaping is a thing, in TF you gotta make sure you tensors are the right shape (num dimensions)
embeddings = tf.Variable(tf.random_uniform([vocab_size, input_embedding_size], -1.0, 1.0), dtype=tf.float32)
encoder_inputs_embedded = tf.nn.embedding_lookup(embeddings, encoder_inputs)
#####API LOCATION ERROR HERE
from tensorflow.python.ops.rnn_cell import LSTMCell, LSTMStateTuple
#from tensorflow.contrib.rnn.python.ops.core_rnn_cell_impl import LSTMCell, LSTMStateTuple
encoder_cell_fw = LSTMCell(encoder_hidden_units)
encoder_cell_bw = LSTMCell(encoder_hidden_units)
((encoder_fw_outputs,encoder_bw_outputs),(encoder_fw_final_state,encoder_bw_final_state)) = (
tf.nn.bidirectional_dynamic_rnn(cell_fw=encoder_cell_fw,
cell_bw=encoder_cell_bw,
inputs=encoder_inputs_embedded,
sequence_length=encoder_inputs_length,
dtype=tf.float32, time_major=True)
)
#Concatenates tensors along one dimension.
##############print("? = ",encoder_bw_outputs)
encoder_outputs = tf.concat((encoder_fw_outputs, encoder_bw_outputs), 2)
#letters h and c are commonly used to denote "output value" and "cell state".
#http://colah.github.io/posts/2015-08-Understanding-LSTMs/
#Those tensors represent combined internal state of the cell, and should be passed together.
encoder_final_state_c = tf.concat(
(encoder_fw_final_state.c, encoder_bw_final_state.c), 1)
encoder_final_state_h = tf.concat(
(encoder_fw_final_state.h, encoder_bw_final_state.h), 1)
#TF Tuple used by LSTM Cells for state_size, zero_state, and output state.
encoder_final_state = LSTMStateTuple(
c=encoder_final_state_c,
h=encoder_final_state_h
)
decoder_cell = LSTMCell(decoder_hidden_units)
encoder_max_time, batch_size = tf.unstack(tf.shape(encoder_inputs))
decoder_lengths = encoder_inputs_length + 3
# +2 additional steps, +1 leading <EOS> token for decoder inputs
#manually specifying since we are going to implement attention details for the decoder in a sec
#weights
W = tf.Variable(tf.random_uniform([decoder_hidden_units, vocab_size], -1, 1), dtype=tf.float32)
#bias
b = tf.Variable(tf.zeros([vocab_size]), dtype=tf.float32)
#create padded inputs for the decoder from the word embeddings
#were telling the program to tests a condition, and trigger an error if the condition is false.
assert EOS == 1 and PAD == 0
eos_time_slice = tf.ones([batch_size], dtype=tf.int32, name='EOS')
pad_time_slice = tf.zeros([batch_size], dtype=tf.int32, name='PAD')
#retrieves rows of the params tensor. The behavior is similar to using indexing with arrays in numpy
eos_step_embedded = tf.nn.embedding_lookup(embeddings, eos_time_slice)
pad_step_embedded = tf.nn.embedding_lookup(embeddings, pad_time_slice)
#manually specifying loop function through time - to get initial cell state and input to RNN
#normally we'd just use dynamic_rnn, but lets get detailed here with raw_rnn
#we define and return these values, no operations occur here
def loop_fn_initial():
initial_elements_finished = (0 >= decoder_lengths) # all False at the initial step
#end of sentence
initial_input = eos_step_embedded
#last time steps cell state
initial_cell_state = encoder_final_state
#none
initial_cell_output = None
#none
initial_loop_state = None # we don't need to pass any additional information
return (initial_elements_finished,
initial_input,
initial_cell_state,
initial_cell_output,
initial_loop_state)
#attention mechanism --choose which previously generated token to pass as input in the next timestep
def loop_fn_transition(time, previous_output, previous_state, previous_loop_state):
def get_next_input():
#dot product between previous ouput and weights, then + biases
output_logits = tf.add(tf.matmul(previous_output, W), b)
#Logits simply means that the function operates on the unscaled output of
#earlier layers and that the relative scale to understand the units is linear.
#It means, in particular, the sum of the inputs may not equal 1, that the values are not probabilities
#(you might have an input of 5).
#prediction value at current time step
#Returns the index with the largest value across axes of a tensor.
#This is Attention!!!!
prediction = tf.argmax(output_logits, axis=1)
#embed prediction for the next input
next_input = tf.nn.embedding_lookup(embeddings, prediction)
return next_input
elements_finished = (time >= decoder_lengths) # this operation produces boolean tensor of [batch_size]
# defining if corresponding sequence has ended
#Computes the "logical and" of elements across dimensions of a tensor.
finished = tf.reduce_all(elements_finished) # -> boolean scalar
#Return either fn1() or fn2() based on the boolean predicate pred.
input = tf.cond(finished, lambda: pad_step_embedded, get_next_input)
#set previous to current
state = previous_state
output = previous_output
loop_state = None
return (elements_finished,
input,
state,
output,
loop_state)
def loop_fn(time, previous_output, previous_state, previous_loop_state):
if previous_state is None: # time == 0
assert previous_output is None and previous_state is None
return loop_fn_initial()
else:
return loop_fn_transition(time, previous_output, previous_state, previous_loop_state)
#Creates an RNN specified by RNNCell cell and loop function loop_fn.
#This function is a more primitive version of dynamic_rnn that provides more direct access to the
#inputs each iteration. It also provides more control over when to start and finish reading the sequence,
#and what to emit for the output.
#ta = tensor array
decoder_outputs_ta, decoder_final_state, _ = tf.nn.raw_rnn(decoder_cell, loop_fn)
decoder_outputs = decoder_outputs_ta.stack()
decoder_outputs
#to convert output to human readable prediction
#we will reshape output tensor
#Unpacks the given dimension of a rank-R tensor into rank-(R-1) tensors.
#reduces dimensionality
decoder_max_steps, decoder_batch_size, decoder_dim = tf.unstack(tf.shape(decoder_outputs))
#flettened output tensor
decoder_outputs_flat = tf.reshape(decoder_outputs, (-1, decoder_dim))
#pass flattened tensor through decoder
decoder_logits_flat = tf.add(tf.matmul(decoder_outputs_flat, W), b)
#prediction vals
decoder_logits = tf.reshape(decoder_logits_flat, (decoder_max_steps, decoder_batch_size, vocab_size))
#final prediction
decoder_prediction = tf.argmax(decoder_logits, 2)
#cross entropy loss
#one hot encode the target values so we don't rank just differentiate
stepwise_cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
labels=tf.one_hot(decoder_targets, depth=vocab_size, dtype=tf.float32),
logits=decoder_logits,
)
#loss function
loss = tf.reduce_mean(stepwise_cross_entropy)
#train it
train_op = tf.train.AdamOptimizer().minimize(loss)
sess.run(tf.global_variables_initializer())
batch_size = 2
# batches = helpers.random_sequences(length_from=3, length_to=8, vocab_lower=2, vocab_upper=10, batch_size=batch_size)
train_features_batches = helpers.loadDataFile("../MNIST_data/train_features.txt")
train_labels_batches = helpers.loadDataFile("../MNIST_data/train_labels.txt")
print("train_features_batches:", train_features_batches)
print('head of the train_features_batches:')
for seq in next(train_features_batches.__iter__())[:10]:
print(seq)
print("train_labels_batches:", train_features_batches)
print('head of the train_labels_batches:')
for seq in next(train_labels_batches.__iter__())[:10]:
print(seq)
#############
def next_feed(train_features_batches, train_labels_batches):
train_features_batche = train_features_batches
train_labels_batche = train_labels_batches
encoder_inputs_, encoder_input_lengths_ = helpers.batch(train_features_batche)
decoder_targets_, _ = helpers.batch(
[(sequence) + [EOS] + [PAD] * 2 for sequence in train_labels_batche]
)
return {
encoder_inputs: encoder_inputs_,
encoder_inputs_length: encoder_input_lengths_,
decoder_targets: decoder_targets_,
}
loss_track = []
max_batches = 30
batches_in_epoch = 10
import time
try:
start = time.time()
config = tf.ConfigProto()
config.gpu_options.allocator_type = 'BFC'
config.gpu_options.per_process_gpu_memory_fraction = 0.90
linesofar = 0
for batch in range(max_batches):
train_features_batches = helpers.next_batch_k("../MNIST_data/features.txt", batch_size, linesofar)
train_labels_batches = helpers.next_batch_k("../MNIST_data/labels.txt", batch_size, linesofar)
fd = next_feed(train_features_batches, train_labels_batches)
_, l = sess.run([train_op, loss], fd)
loss_track.append(l)
linesofar += batch_size
if batch == 0 or batch % batches_in_epoch == 0:
print('batch {}'.format(batch))
if(batch != 0):
print("Time used: ", time.time() - start)
print(' minibatch loss: {}'.format(sess.run(loss, fd)))
predict_ = sess.run(decoder_prediction, fd)
for i, (inp, pred) in enumerate(zip(fd[encoder_inputs].T, predict_.T)):
print(' sample {}:'.format(i + 1))
print(' input > {}'.format(inp))
print(' predicted > {}'.format(pred))
if i >= 2:
break
print()
start = time.time()
print("Optimization Finished!")
# start prediction
start = time.time()
test_features_batches = helpers.loadDataFile("../MNIST_data/test_features.txt")
test_labels_batches = helpers.loadDataFile("../MNIST_data/test_labels.txt")
test_fd = next_feed(test_features_batches, test_labels_batches)
print("Time used: ", time.time() - start)
print(' tests batch loss: {}'.format(sess.run(loss, test_fd)))
test_predict_ = sess.run(decoder_prediction, test_fd)
for i, (inp, pred) in enumerate(zip(test_fd[encoder_inputs].T, test_predict_.T)):
print(' sample {}:'.format(i + 1))
print(' input > {}'.format(inp))
print(' predicted > {}'.format(pred))
if i >= 2:
break
print()
print("Prediction Finished!")
except KeyboardInterrupt:
print('training interrupted')
| 2,960 | 0 | 90 |
31b90f1eb9469d0e7691c7f5c6d8176a3aa6f318 | 10,474 | py | Python | Buck-py/src/main.py | Pleasant-tech/Buck | cea392a88568cc8aed3c5a50d9719c017d8fa121 | [
"MIT"
] | 6 | 2021-02-14T15:04:12.000Z | 2021-11-04T09:52:29.000Z | Buck-py/src/main.py | Pleasant-tech/Buck | cea392a88568cc8aed3c5a50d9719c017d8fa121 | [
"MIT"
] | 1 | 2021-11-04T09:55:39.000Z | 2021-11-04T09:58:23.000Z | Buck-py/src/main.py | Pleasant-tech/Buck | cea392a88568cc8aed3c5a50d9719c017d8fa121 | [
"MIT"
] | null | null | null | import sys
import json
import os
import shlex
import pprint
import importlib.resources
import firebase_admin
from firebase_admin import credentials,firestore
with importlib.resources.path("src","serviceAccountKey.json") as fire_resource:
cred = credentials.Certificate(fire_resource)
firebase_admin.initialize_app(cred)
# Checks / Creates a local data.json file to store buckets.
with importlib.resources.path("src","main.py") as haar_resource:
file = os.path.abspath(haar_resource)
file = file[:-11]
file = file + "buck-data/"
if os.path.isdir(file):
i = 0
else:
os.mkdir(file)
data = file + "data.json"
f = open(data,"a+")
# Creates the Bucket class
# Interacts with local db
# Creates a New Bucket
#List out bucketsq
# Check if command is cd
# Runs commands if is_cd == True
#Run Commands From Bucket
#Add bucket from cloud
# deletes a bucket
# Main Function
| 25.484185 | 107 | 0.520145 | import sys
import json
import os
import shlex
import pprint
import importlib.resources
import firebase_admin
from firebase_admin import credentials,firestore
with importlib.resources.path("src","serviceAccountKey.json") as fire_resource:
cred = credentials.Certificate(fire_resource)
firebase_admin.initialize_app(cred)
# Checks / Creates a local data.json file to store buckets.
with importlib.resources.path("src","main.py") as haar_resource:
file = os.path.abspath(haar_resource)
file = file[:-11]
file = file + "buck-data/"
if os.path.isdir(file):
i = 0
else:
os.mkdir(file)
data = file + "data.json"
f = open(data,"a+")
# Creates the Bucket class
class Bucket:
def __init__(self,name,executor,commandList,description):
self.name = name
self.executor = executor
self.commandList = commandList
self.description = description
def __str__(self):
return "{} {} {} {} {}". format(self.name, self.executor,self.commandList,self.description, self.count)
# Interacts with local db
def middleMan(arg,data):
try :
#Fetches data from data file
with importlib.resources.path("src","main.py") as haar_resource:
file = os.path.abspath(haar_resource)
file = file[:-11]
dataFilePath = file + "buck-data/data.json"
if arg == "r":
with open (dataFilePath, 'r') as f:
data = f.read()
f.close()
return data
elif arg == "a":
data = json.dumps(data)
with open(dataFilePath,"a") as f:
data = '\n'+data+', \n'
f.write(data)
f.close()
elif arg == "w":
data = json.dumps(data)
with open(dataFilePath,"w") as f:
data = '\n'+data+', \n'
f.write(data)
f.close()
else:
return dataFilePath
except FileNotFoundError:
print(">> Cannot locate data file : " + dataFilePath )
except Exception as e:
print (">> Error")
# Creates a New Bucket
def createBucket():
try :
print(' >> Howdy! Create A New Bucket ')
name = input("\n Name : ")
print ('\n >> Seperate commands with a comma')
preCmds = input (" Commands : ")
cmds = preCmds.split(',')
executor = str(input("\n Executor : "))
detail = str(input("""\n Description : """))
data = Bucket(name,executor,cmds,detail)
# Load data object into a new object (spaghetti code❗)
newData = {
"name": data.name,
"executor":data.executor,
"buck_list":data.commandList,
"description":data.description
}
middleMan("a",newData)
# Sucess Message
print('\n >> yay! it is done ')
# terrible code that needs to be fixed.
score = 0
for i in data.commandList:
if "$" in i :
score += 10000
if "$" not in i :
score -= 10
if score > 1000:
print (f"\n >> Usage : 'buck {data.executor} [extra argument]' ")
break
elif score < 1000:
print (f"\n >> Usage : 'buck {data.executor}' ")
except KeyboardInterrupt:
print("\n >> KeyboardInterrupt : Process terminated !")
#List out bucketsq
def listBucket(arg):
if len(arg) > 2:
# fetch data from middleMan()
data = middleMan("r","")
if data :
if data[-4] == ",":
data = data[:-4]
else:
data = data[:-3]
otherData = '{ "bucket" : [' + data + ' ] } '
data = json.loads(otherData)
data = data['bucket']
# Logic
for i in data:
response = i.get('name')
if arg[2] in response:
if i:
print (' >> Here you go : \n')
print(json.dumps(i,indent=2))
else:
print(">> no data")
else:
print(">> no data")
else:
# fetch data from middleMan()
data = middleMan("r","")
if data:
modifiedData = '{ "bucket" : [' + data + '{} ] } '
#Coverts Data To Json
jsonData = json.loads(modifiedData)
# Renders Data
print (' >> Here you go : \n')
print(json.dumps(jsonData,indent=2))
else:
print(">> no data")
# Check if command is cd
def is_cd(command: str) -> bool:
command_split = shlex.split(command)
return command_split[0] == "cd"
# this returns True if command is cd or False if not
# Runs commands if is_cd == True
def run_command(command: str) -> int:
if is_cd(command):
split_command = shlex.split(command)
directory_to_change = ' '.join(split_command[1:])
os.chdir(directory_to_change)
else:
os.system(command)
#Run Commands From Bucket
def run(arg):
# Fetch Data from middleMan()
data = middleMan("r","")
data = data[:-3]
otherData = '{ "bucket" : [' + data + '] } '
# Coverts modified data to json
data = json.loads(otherData)
# Logic
for i in data['bucket']:
response = i.get('executor')
if arg[1] in response:
buck = i.get('buck_list')
if len(arg) > 2 :
for i in buck:
# print (cmd)
if '$' in i:
cmd = i
newCmd = cmd.replace('$',arg[2])
for i in range(len(buck)):
if buck[i] == cmd:
buck[i] = newCmd
for i in buck:
run_command(i)
if len(buck) == 1 :
print('>> Done! executed 1 command.')
else:
print('>> Done! executed '+ str(len(buck)) + ' commands.')
else:
for i in buck:
if '$' in i:
print(">> This command takes in an extra argument -'" + arg[1] + " <extra argument>'")
sys.exit()
for i in buck:
run_command(i)
if len(buck) == 1 :
print('\n >> Done! executed 1 command.')
else:
print('\n >> Done! executed '+ str(len(buck)) + ' commands.')
def eraseBucket():
ans = input('\n>> This would wipe out your bucket data ! , "y" or "n" : ' )
if ans == "y" or ans == "Y":
file = middleMan("","")
# Write Json to a Json Data Fi
with open(file,"w") as f:
f.write("")
f.close()
# Sucess Message
print('\n>> Your bucket is now empty. ')
# End Process
sys.exit()
elif ans == "n" or ans == "N":
print("\n>> Process Terminated...")
else:
print("\n>> error : You did not enter a valid input, try again !")
sys.exit()
#Add bucket from cloud
def addBucket(arg):
print("\n >> Searching for " + arg[2] + " ...\n")
try :
exe = arg[2]
db = firestore.client()
collection = db.collection('buckets')
doc = collection.document(exe)
res = doc.get().to_dict()
if res is not None:
print(' >> Fetching ' + arg[2] + " ...\n" )
name = res.get("name")
executor = res.get('executor')
commandList = res.get('commands')
description = res.get('description')
newData = {
"name": name,
"executor":executor,
"buck_list":commandList,
"description":description
}
middleMan("a",newData)
print(' >> yay! it is done ')
score = 0
for i in commandList:
if "$" in i :
score += 10000
if "$" not in i :
score -= 10
if score > 1000:
print (f"\n >> Usage : 'buck {executor} [extra argument]' ")
break
elif score < 1000:
print (f"\n >> Usage : 'buck {executor}' ")
elif res == None:
print(" >> No bucket - " + arg[2] + " :(")
# End Process
sys.exit()
except Exception as e:
print(" >> Oops! :( An error occured")
# deletes a bucket
def deleteBucket(arg):
if len(arg) > 2:
# fetch data from middleMan()
data = middleMan("r","")
data = data[:-3]
otherData = '{ "bucket" : [' + data + '] } '
# Coverts modified data to json
data = json.loads(otherData)
data = data['bucket']
#Logic
try:
for i in range(len(data)):
response = data[i].get('executor')
if arg[2] == response:
ans = input('\n>> This would delete bucket "' + arg[2] + '" ! , "y" or "n" : ' )
if ans == "y" or ans == "Y":
# Write new Json to a Json Data file
del data[i]
if data == [] or data == None:
file = middleMan("","")
# Write Json to a Json Data Fi
with open(file,"w") as f:
f.write("")
f.close()
else:
middleMan("w",data)
# Sucess Message
print('\n>> Done ! ')
# End Process
sys.exit()
elif ans == "n" or ans == "N":
print("\n>> Process Terminated...")
else:
print("\n>> error : You did not enter a valid input, try again !")
sys.exit()
except Exception:
return
def helpGuide():
print(" >> Welcome to buck :) \n")
print(" >> Let's tour buck together. Visit https://getbuck.tech/ for more info \n")
print(" >> Run 'buck --list' or 'buck -l' to list all your buckets. \n")
print(" >> Run 'buck --create' or 'buck -c' to create a new bucket. \n")
print(" >> Run 'buck --add <name> ' or 'buck -a <name>' to add a new bucket from the cloud. \n")
print(" >> Run 'buck --delete <name> ' or 'buck -d <name>' to delete a bucket . \n")
print(" >> Run 'buck --erase' or 'buck -e' to clear all your buckets. \n")
print(" >> Run 'buck --help' or 'buck -h' for help. \n")
print(" >> Happy hacking, chief :) \n")
# Main Function
def main(arg=sys.argv):
args = ['--create','-c','--list','-l','--erase','-e','--help','-h','--add','-a','-d','--delete']
if len(arg) == 1:
print (""" >> Hello, chief :) \n """)
print(" >> Run 'buck --help' for help \n")
print(" OR \n")
print(" >> Visit https://getbuck.tech/")
elif arg[1] == '--create' or arg[1] == '-c':
createBucket()
elif arg[1] == '--list' or arg[1]=='-l':
listBucket(arg)
elif arg[1] == '--erase' or arg[1]=='-e':
eraseBucket()
elif arg[1] == '--help' or arg[1]=='-h':
helpGuide()
elif arg[1] == '--delete' or arg[1]=='-d':
deleteBucket(arg)
elif arg[1] == '--add' or arg[1]=='-a':
addBucket(arg)
elif arg[1] not in args:
run(arg)
| 9,213 | -8 | 327 |
68e0afb0e79f353200c47b9e2c0abc34367137fc | 2,077 | py | Python | src/robot/running/runkwregister.py | userzimmermann/robotframework | 7aa16338ce2120cb082605cf548c0794956ec901 | [
"Apache-2.0"
] | 7 | 2015-02-25T10:55:02.000Z | 2015-11-04T03:20:05.000Z | src/robot/running/runkwregister.py | userzimmermann/robotframework | 7aa16338ce2120cb082605cf548c0794956ec901 | [
"Apache-2.0"
] | 12 | 2015-02-24T17:00:06.000Z | 2015-07-31T08:32:07.000Z | src/robot/running/runkwregister.py | userzimmermann/robotframework | 7aa16338ce2120cb082605cf548c0794956ec901 | [
"Apache-2.0"
] | 2 | 2015-12-15T11:00:35.000Z | 2018-02-24T18:11:24.000Z | # Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from robot.utils import NormalizedDict
RUN_KW_REGISTER = _RunKeywordRegister()
| 35.810345 | 75 | 0.674531 | # Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from robot.utils import NormalizedDict
class _RunKeywordRegister:
def __init__(self):
self._libs = {}
def register_run_keyword(self, libname, keyword, args_to_process=None):
if args_to_process is None:
args_to_process = self._get_args_from_method(keyword)
keyword = keyword.__name__
if libname not in self._libs:
self._libs[libname] = NormalizedDict(ignore=['_'])
self._libs[libname][keyword] = int(args_to_process)
def get_args_to_process(self, libname, kwname):
if libname in self._libs and kwname in self._libs[libname]:
return self._libs[libname][kwname]
return -1
def is_run_keyword(self, libname, kwname):
return self.get_args_to_process(libname, kwname) >= 0
def _get_args_from_method(self, method):
# Python 3 has no unbound methods, they are just functions,
# so ismethod won't be True...
if inspect.ismethod(method):
return method.__func__.__code__.co_argcount - 1
elif inspect.isfunction(method):
code = method.__code__
argcount = code.co_argcount
# ...but you can look at the args:
#TODO: Better solution?
if argcount and code.co_varnames[0] == 'self':
argcount -= 1
return argcount
raise ValueError('Needs function or method')
RUN_KW_REGISTER = _RunKeywordRegister()
| 1,208 | 5 | 158 |
88231b72e3fb9904bb15ca351c194582d140459a | 930 | py | Python | apps/admin/serializers/user.py | panla/kesousou | df6751b0a2e8eeec5122418cd29fe1f99e23f39e | [
"MIT"
] | 1 | 2021-04-27T01:18:45.000Z | 2021-04-27T01:18:45.000Z | apps/admin/serializers/user.py | panla/kesousou | df6751b0a2e8eeec5122418cd29fe1f99e23f39e | [
"MIT"
] | 5 | 2020-08-26T14:47:47.000Z | 2021-03-22T08:56:43.000Z | apps/admin/serializers/user.py | panla/kesousou | df6751b0a2e8eeec5122418cd29fe1f99e23f39e | [
"MIT"
] | null | null | null | from django.contrib.auth.views import get_user_model
from rest_framework import serializers
User = get_user_model()
| 32.068966 | 95 | 0.635484 | from django.contrib.auth.views import get_user_model
from rest_framework import serializers
User = get_user_model()
class UserSerializer(serializers.ModelSerializer):
def create(self, validated_data):
password = validated_data.get('password')
if password:
user = User(**validated_data)
user.set_password(password)
user.save()
return user
else:
raise Exception('there is no password')
def update(self, instance, validated_data):
password = validated_data.get('password')
if password:
instance.set_password(password)
validated_data.pop('password')
return super().update(instance, validated_data)
class Meta:
model = User
fields = ['id', 'username', 'mobile', 'email', 'password', 'is_superuser', 'is_active']
extra_kwargs = {'password': {'write_only': True}}
| 515 | 274 | 23 |
1c4e0e153ba7b81f014ecd91d1e5813d19780dea | 4,389 | py | Python | scripts/camera_sync.py | Tarekbouamer/Azure_Kinect_ROS_Driver | 17442bbc0a1ed18e60dfad2a1f6040900aa30d70 | [
"MIT"
] | 1 | 2020-02-15T22:26:58.000Z | 2020-02-15T22:26:58.000Z | scripts/camera_sync.py | Tarekbouamer/Azure_Kinect_ROS_Driver | 17442bbc0a1ed18e60dfad2a1f6040900aa30d70 | [
"MIT"
] | null | null | null | scripts/camera_sync.py | Tarekbouamer/Azure_Kinect_ROS_Driver | 17442bbc0a1ed18e60dfad2a1f6040900aa30d70 | [
"MIT"
] | null | null | null | # !/usr/bin/env python2
import rospy
import rospkg
import yaml
from sensor_msgs.msg import CameraInfo
from std_msgs.msg import String
import roslaunch
import os
import argparse
from Config.config import load_config, DEFAULTS as DEFAULT_CONFIGS
import time
import Camera as cam
parser = argparse.ArgumentParser(description="Multi Azure Python Controller")
parser.add_argument("--mode", type=str, default="driver", help="Write logs to the given directory")
parser.add_argument("--nb", type=int, default="1", help="Enter number of cameras used")
parser.add_argument("--config", metavar="FILE", type=str, help="Path to configuration file")
parser.add_argument("--path", metavar="DIR", type=str, help="Path to save experiment mkv video")
if __name__ == '__main__':
main(parser.parse_args())
| 29.655405 | 141 | 0.637959 | # !/usr/bin/env python2
import rospy
import rospkg
import yaml
from sensor_msgs.msg import CameraInfo
from std_msgs.msg import String
import roslaunch
import os
import argparse
from Config.config import load_config, DEFAULTS as DEFAULT_CONFIGS
import time
import Camera as cam
parser = argparse.ArgumentParser(description="Multi Azure Python Controller")
parser.add_argument("--mode", type=str, default="driver", help="Write logs to the given directory")
parser.add_argument("--nb", type=int, default="1", help="Enter number of cameras used")
parser.add_argument("--config", metavar="FILE", type=str, help="Path to configuration file")
parser.add_argument("--path", metavar="DIR", type=str, help="Path to save experiment mkv video")
def talker():
pub = rospy.Publisher('chatter', String, queue_size=10)
rospy.init_node('talker')
rate = rospy.Rate(10) # 10hz
while not rospy.is_shutdown():
hello_str = "spin"
rate.sleep()
##rospy.loginfo(hello_str)
# pub.publish(hello_str)
# rate.sleep()
def launchNode():
uuid = roslaunch.rlutil.get_or_generate_uuid(None, False)
roslaunch.configure_logging(uuid)
camera1 = ['/home/tarek/workspaces/ros/azure_ws/src/Azure_Kinect_ROS_Driver/launch/record.launch',
'file_name:=cam1']
camera2 = ['/home/tarek/workspaces/ros/azure_ws/src/Azure_Kinect_ROS_Driver/launch/driver.launch',
'file_name:=cam2']
roslaunch_file1 = roslaunch.rlutil.resolve_launch_arguments(camera1)[0]
roslaunch_args1 = camera1[1:]
roslaunch_file2 = roslaunch.rlutil.resolve_launch_arguments(camera2)[0]
roslaunch_args2 = camera2[1:]
# launch_files = [(roslaunch_file1, roslaunch_args1), (roslaunch_file2, roslaunch_args2)]
launch_files = [(roslaunch_file1, roslaunch_args1)]
launch = roslaunch.parent.ROSLaunchParent(uuid, launch_files)
launch.start()
#rospy.loginfo("started")
def go():
#rospy.init_node('en_Mapping', anonymous=True)
uuid = roslaunch.rlutil.get_or_generate_uuid(None, False)
roslaunch.configure_logging(uuid)
launch = roslaunch.parent.ROSLaunchParent(uuid, ["/home/tarek/workspaces/ros/azure_ws/src/Azure_Kinect_ROS_Driver/launch/record.launch"])
launch.start()
class nodeCamerasController():
def __init__(self):
# init cameras controller node
rospy.init_node('controller')
def load_setup_config(args):
print("\n Loading setup configuration :")
print(" ")
cfg_setup = load_config(args.config, DEFAULT_CONFIGS["setup"])
for section in cfg_setup.sections():
for name, value in cfg_setup.items(section=section):
print(" {:40} {}".format(name, cfg_setup.get(section, name)))
print(" ")
return cfg_setup
def main(args):
if args.path == '':
print(" Recoding folder is not set, K4A switch to mode Driver")
args.mode = 'driver'
if args.mode == 'driver':
#
print("Mode DRIVER is ON")
if args.mode == 'recording':
print("Mode RECORDING is ON")
cameras_list = []
if os.path.isdir(args.path):
cfg_setup = load_setup_config(args)
for x in cfg_setup.sections():
print(x)
camera_name = cfg_setup.get(x, 'CAMERA_NAME')
camera_sn = cfg_setup.get(x, 'SENSOR_SN')
camera_sync = cfg_setup.getint(x, 'SYNC')
cameras_list.append(cam.Camera(args, camera_name, camera_sn, camera_sync))
for x in cameras_list:
x.ros_launch()
#talker()
#for c in args.nb:
# cameras_list.append(cam.Camera(args))
#cameras_list[0].display()
#cam1.ros_launch()
#talker()
print("####$$$$$$$$$$$%%%%%%%%%%%%%%%^^^^^^^^^^^^^^^^^^^")
else:
print("ERROR ", args.path, " Folder does not exist")
if args.mode == 'playback':
print("Mode PLAYBACK is ON")
#mkv_filepath = os.path.join(args.path, ...)
if os.path.isdir(args.path):
camera1 = cam.Camera(args)
camera1.display()
else:
print("ERROR ", args.path, " Folder does not exist")
#try:
# talker()
#except rospy.ROSInterruptException:
# pass
if __name__ == '__main__':
main(parser.parse_args())
| 3,413 | 9 | 164 |